std::unique_ptr<frame_muxer> muxer_;
const boost::rational<int> framerate_;
- const uint32_t start_;
- const uint32_t length_;
const bool thumbnail_mode_;
core::draw_frame last_frame_;
const std::wstring& url_or_file,
const std::wstring& filter,
bool loop,
- uint32_t start,
- uint32_t length,
+ uint32_t in,
+ uint32_t out,
bool thumbnail_mode,
const std::wstring& custom_channel_order,
const ffmpeg_options& vid_params)
: filename_(url_or_file)
, frame_factory_(frame_factory)
, initial_logger_disabler_(temporary_enable_quiet_logging_for_thread(thumbnail_mode))
- , input_(graph_, url_or_file, loop, start, length, thumbnail_mode, vid_params)
+ , input_(graph_, url_or_file, loop, in, out, thumbnail_mode, vid_params)
, framerate_(read_framerate(*input_.context(), format_desc.framerate))
- , start_(start)
- , length_(length)
, thumbnail_mode_(thumbnail_mode)
, last_frame_(core::draw_frame::empty())
- , frame_number_(0)
{
graph_->set_color("frame-time", diagnostics::color(0.1f, 1.0f, 0.1f));
graph_->set_color("underflow", diagnostics::color(0.6f, 0.3f, 0.9f));
CASPAR_THROW_EXCEPTION(averror_stream_not_found() << msg_info("No streams found"));
muxer_.reset(new frame_muxer(framerate_, std::move(audio_input_pads), frame_factory, format_desc, channel_layout, filter, true));
+
+ if (auto nb_frames = file_nb_frames())
+ {
+ out = std::min(out, nb_frames);
+ input_.out(out);
+ }
}
// frame_producer
// therefore no seeking should be necessary for the first frame.
{
input_.seek(file_position > 1 ? file_position - 2: file_position).get();
- boost::this_thread::sleep_for(boost::chrono::milliseconds(40));
+ boost::this_thread::sleep_for(boost::chrono::milliseconds(40));
}
for (int i = 0; i < NUM_RETRIES; ++i)
{
- boost::this_thread::sleep_for(boost::chrono::milliseconds(40));
+ boost::this_thread::sleep_for(boost::chrono::milliseconds(40));
auto frame = render_frame();
{
CASPAR_LOG(trace) << print() << L" adjusting to " << adjusted_seek;
input_.seek(static_cast<uint32_t>(adjusted_seek) - 1).get();
- boost::this_thread::sleep_for(boost::chrono::milliseconds(40));
+ boost::this_thread::sleep_for(boost::chrono::milliseconds(40));
}
else
return frame.first;
if (is_url() || input_.loop())
return std::numeric_limits<uint32_t>::max();
- uint32_t nb_frames = file_nb_frames();
-
- nb_frames = std::min(length_, nb_frames - start_);
- nb_frames = muxer_->calc_nb_frames(nb_frames);
+ auto nb_frames = std::min(input_.out(), file_nb_frames());
+ if (nb_frames >= input_.in())
+ nb_frames -= input_.in();
+ else
+ nb_frames = 0;
- return nb_frames;
+ return muxer_->calc_nb_frames(nb_frames);
}
uint32_t file_nb_frames() const
{
- uint32_t file_nb_frames = 0;
- file_nb_frames = std::max(file_nb_frames, video_decoder_ ? video_decoder_->nb_frames() : 0);
- return file_nb_frames;
+ return video_decoder_ ? video_decoder_->nb_frames() : 0;
}
std::future<std::wstring> call(const std::vector<std::wstring>& params) override
{
- static const boost::wregex loop_exp(LR"(LOOP\s*(?<VALUE>\d?)?)", boost::regex::icase);
- static const boost::wregex seek_exp(LR"(SEEK\s+(?<VALUE>(\+|-)?\d+)(\s+(?<WHENCE>REL|END))?)", boost::regex::icase);
- static const boost::wregex length_exp(LR"(LENGTH\s+(?<VALUE>\d+)?)", boost::regex::icase);
- static const boost::wregex start_exp(LR"(START\s+(?<VALUE>\d+)?)", boost::regex::icase);
-
- auto param = boost::algorithm::join(params, L" ");
-
std::wstring result;
- boost::wsmatch what;
- if(boost::regex_match(param, what, loop_exp))
+ std::wstring cmd = params.at(0);
+ std::wstring value;
+ if (params.size() > 1)
+ value = params.at(1);
+
+ if (boost::iequals(cmd, L"loop"))
{
- auto value = what["VALUE"].str();
if (!value.empty())
input_.loop(boost::lexical_cast<bool>(value));
result = boost::lexical_cast<std::wstring>(input_.loop());
}
- else if(boost::regex_match(param, what, seek_exp))
+ else if (boost::iequals(cmd, L"in") || boost::iequals(cmd, L"start"))
{
- auto value = boost::lexical_cast<int64_t>(what["VALUE"].str());
- auto whence = what["WHENCE"].str();
- auto total = file_nb_frames();
-
- if(boost::iequals(whence, L"REL"))
- value = file_frame_number() + value;
- else if(boost::iequals(whence, L"END"))
- value = total - value;
-
- if(value < 0)
- value = 0;
- else if(value >= total)
- value = total - 1;
-
- input_.seek(static_cast<uint32_t>(value));
+ if (!value.empty())
+ input_.in(boost::lexical_cast<uint32_t>(value));
+ result = boost::lexical_cast<std::wstring>(input_.in());
}
- else if(boost::regex_match(param, what, length_exp))
+ else if (boost::iequals(cmd, L"out"))
{
- auto value = what["VALUE"].str();
- if(!value.empty())
+ if (!value.empty())
+ input_.out(boost::lexical_cast<uint32_t>(value));
+ result = boost::lexical_cast<std::wstring>(input_.out());
+ }
+ else if (boost::iequals(cmd, L"length"))
+ {
+ if (!value.empty())
input_.length(boost::lexical_cast<uint32_t>(value));
result = boost::lexical_cast<std::wstring>(input_.length());
}
- else if(boost::regex_match(param, what, start_exp))
+ else if (boost::iequals(cmd, L"seek") && !value.empty())
{
- auto value = what["VALUE"].str();
- if(!value.empty())
- input_.start(boost::lexical_cast<uint32_t>(value));
- result = boost::lexical_cast<std::wstring>(input_.start());
+ auto nb_frames = file_nb_frames();
+
+ int64_t seek;
+ if (boost::iequals(value, L"rel"))
+ seek = file_frame_number();
+ else if (boost::iequals(value, L"in"))
+ seek = input_.in();
+ else if (boost::iequals(value, L"out"))
+ seek = input_.out();
+ else if (boost::iequals(value, L"end"))
+ seek = nb_frames;
+ else
+ seek = boost::lexical_cast<int64_t>(value);
+
+ if (params.size() > 2)
+ seek += boost::lexical_cast<int64_t>(params.at(2));
+
+ if (seek < 0)
+ seek = 0;
+ else if (seek >= nb_frames)
+ seek = nb_frames - 1;
+
+ input_.seek(static_cast<uint32_t>(seek));
}
else
CASPAR_THROW_EXCEPTION(invalid_argument());
void describe_producer(core::help_sink& sink, const core::help_repository& repo)
{
sink.short_description(L"A producer for playing media files supported by FFmpeg.");
- sink.syntax(L"[clip,url:string] {[loop:LOOP]} {SEEK [start:int]} {LENGTH [start:int]} {FILTER [filter:string]} {CHANNEL_LAYOUT [channel_layout:string]}");
+ sink.syntax(L"[clip,url:string] {[loop:LOOP]} {IN,SEEK [in:int]} {OUT [out:int] | LENGTH [length:int]} {FILTER [filter:string]} {CHANNEL_LAYOUT [channel_layout:string]}");
sink.para()
->text(L"The FFmpeg Producer can play all media that FFmpeg can play, which includes many ")
->text(L"QuickTime video codec such as Animation, PNG, PhotoJPEG, MotionJPEG, as well as ")
sink.definitions()
->item(L"clip", L"The file without the file extension to play. It should reside under the media folder.")
->item(L"url", L"If clip contains :// it is instead treated as the URL parameter. The URL can either be any streaming protocol supported by FFmpeg, dshow://video={webcam_name} or v4l2://{video device}.")
- ->item(L"loop", L"Will cause the media file to loop between start and start + length")
- ->item(L"start", L"Optionally sets the start frame. 0 by default. If loop is specified this will be the frame where it starts over again.")
- ->item(L"length", L"Optionally sets the length of the clip. If not specified the clip will be played to the end. If loop is specified the file will jump to start position once this number of frames has been played.")
+ ->item(L"loop", L"Will cause the media file to loop between in and out.")
+ ->item(L"in", L"Optionally sets the first frame. 0 by default. If loop is specified, this will be the frame where it starts over again.")
+ ->item(L"out", L"Optionally sets the last frame. If not specified the clip will be played to the end. If loop is specified, the file will jump to start position once it reaches the last frame.")
+ ->item(L"length", L"Optionally sets the length of the clip. Equivalent to OUT in + length.")
->item(L"filter", L"If specified, will be used as an FFmpeg video filter.")
->item(L"channel_layout",
- L"Optionally override the automatically deduced audio channel layout. "
+ L"Optionally override the automatically deduced audio channel layout."
L"Either a named layout as specified in casparcg.config or in the format [type:string]:[channel_order:string] for a custom layout.");
sink.para()->text(L"Examples:");
sink.example(L">> PLAY 1-10 folder/clip", L"to play all frames in a clip and stop at the last frame.");
sink.example(L">> PLAY 1-10 folder/clip LOOP", L"to loop a clip between the first frame and the last frame.");
- sink.example(L">> PLAY 1-10 folder/clip LOOP SEEK 10", L"to loop a clip between frame 10 and the last frame.");
- sink.example(L">> PLAY 1-10 folder/clip LOOP SEEK 10 LENGTH 50", L"to loop a clip between frame 10 and frame 60.");
- sink.example(L">> PLAY 1-10 folder/clip SEEK 10 LENGTH 50", L"to play frames 10-60 in a clip and stop.");
+ sink.example(L">> PLAY 1-10 folder/clip LOOP IN 10", L"to loop a clip between frame 10 and the last frame.");
+ sink.example(L">> PLAY 1-10 folder/clip LOOP IN 10 LENGTH 50", L"to loop a clip between frame 10 and frame 60.");
+ sink.example(L">> PLAY 1-10 folder/clip IN 10 OUT 60", L"to play frames 10-60 in a clip and stop.");
sink.example(L">> PLAY 1-10 folder/clip FILTER yadif=1,-1", L"to deinterlace the video.");
sink.example(L">> PLAY 1-10 folder/clip CHANNEL_LAYOUT film", L"given the defaults in casparcg.config this will specifies that the clip has 6 audio channels of the type 5.1 and that they are in the order FL FC FR BL BR LFE regardless of what ffmpeg says.");
sink.example(L">> PLAY 1-10 folder/clip CHANNEL_LAYOUT \"5.1:LFE FL FC FR BL BR\"", L"specifies that the clip has 6 audio channels of the type 5.1 and that they are in the specified order regardless of what ffmpeg says.");
sink.example(L">> PLAY 1-10 v4l2:///dev/video0", L"to use a web camera as video input on Linux.");
sink.para()->text(L"The FFmpeg producer also supports changing some of the settings via ")->code(L"CALL")->text(L":");
sink.example(L">> CALL 1-10 LOOP 1");
- sink.example(L">> CALL 1-10 START 10");
+ sink.example(L">> CALL 1-10 IN 10");
+ sink.example(L">> CALL 1-10 OUT 60");
sink.example(L">> CALL 1-10 LENGTH 50");
sink.example(L">> CALL 1-10 SEEK 30");
core::describe_framerate_producer(sink);
if (file_or_url.empty())
return core::frame_producer::empty();
+ constexpr auto uint32_max = std::numeric_limits<uint32_t>::max();
+
auto loop = contains_param(L"LOOP", params);
- auto start = get_param(L"SEEK", params, static_cast<uint32_t>(0));
- auto length = get_param(L"LENGTH", params, std::numeric_limits<uint32_t>::max());
+
+ auto in = get_param(L"SEEK", params, static_cast<uint32_t>(0)); // compatibility
+ in = get_param(L"IN", params, in);
+
+ auto out = get_param(L"LENGTH", params, uint32_max);
+ if (out < uint32_max - in)
+ out += in;
+ else
+ out = uint32_max;
+ out = get_param(L"OUT", params, out);
+
auto filter_str = get_param(L"FILTER", params, L"");
auto custom_channel_order = get_param(L"CHANNEL_LAYOUT", params, L"");
file_or_url,
filter_str,
loop,
- start,
- length,
+ in,
+ out,
false,
custom_channel_order,
vid_params);
return core::draw_frame::empty();
auto loop = false;
- auto start = 0;
- auto length = std::numeric_limits<uint32_t>::max();
+ auto in = 0;
+ auto out = std::numeric_limits<uint32_t>::max();
auto filter_str = L"";
ffmpeg_options vid_params;
filename,
filter_str,
loop,
- start,
- length,
+ in,
+ out,
true,
L"",
vid_params);
#include <tbb/atomic.h>
#include <tbb/recursive_mutex.h>
-#include <boost/range/algorithm.hpp>
-#include <boost/thread/condition_variable.hpp>
-#include <boost/thread/mutex.hpp>
-#include <boost/thread/thread.hpp>
-
#if defined(_MSC_VER)
#pragma warning (push)
#pragma warning (disable : 4244)
static const size_t MAX_BUFFER_SIZE = 64 * 1000000;
namespace caspar { namespace ffmpeg {
-struct input::implementation : boost::noncopyable
+struct input::impl : boost::noncopyable
{
const spl::shared_ptr<diagnostics::graph> graph_;
const int default_stream_index_ = av_find_default_stream_index(format_context_.get());
const std::wstring filename_;
- tbb::atomic<uint32_t> start_;
- tbb::atomic<uint32_t> length_;
+ tbb::atomic<uint32_t> in_;
+ tbb::atomic<uint32_t> out_;
const bool thumbnail_mode_;
tbb::atomic<bool> loop_;
- uint32_t frame_number_ = 0;
+ uint32_t file_frame_number_ = 0;
tbb::concurrent_bounded_queue<std::shared_ptr<AVPacket>> buffer_;
tbb::atomic<size_t> buffer_size_;
executor executor_;
- explicit implementation(const spl::shared_ptr<diagnostics::graph> graph, const std::wstring& url_or_file, bool loop, uint32_t start, uint32_t length, bool thumbnail_mode, const ffmpeg_options& vid_params)
+ explicit impl(const spl::shared_ptr<diagnostics::graph> graph, const std::wstring& url_or_file, bool loop, uint32_t in, uint32_t out, bool thumbnail_mode, const ffmpeg_options& vid_params)
: graph_(graph)
, format_context_(open_input(url_or_file, vid_params))
, filename_(url_or_file)
enable_quiet_logging_for_thread();
});
- start_ = start;
- length_ = length;
+ in_ = in;
+ out_ = out;
loop_ = loop;
buffer_size_ = 0;
- if(start_ > 0)
- queued_seek(start_);
+ if(in_ > 0)
+ queued_seek(in_);
graph_->set_color("seek", diagnostics::color(1.0f, 0.5f, 0.0f));
graph_->set_color("buffer-count", diagnostics::color(0.7f, 0.4f, 0.4f));
if(is_eof(ret))
{
- frame_number_ = 0;
+ file_frame_number_ = 0;
if(loop_)
{
- queued_seek(start_);
+ queued_seek(in_);
graph_->set_tag(diagnostics::tag_severity::INFO, "seek");
CASPAR_LOG(trace) << print() << " Looping.";
}
THROW_ON_ERROR(ret, "av_read_frame", print());
if(packet->stream_index == default_stream_index_)
- ++frame_number_;
+ ++file_frame_number_;
THROW_ON_ERROR2(av_dup_packet(packet.get()), print());
auto stream = format_context_->streams[default_stream_index_];
-
auto fps = read_fps(*format_context_, 0.0);
THROW_ON_ERROR2(avformat_seek_file(
std::numeric_limits<int64_t>::max(),
0), print());
+ file_frame_number_ = target;
+
auto flush_packet = create_packet();
flush_packet->data = nullptr;
flush_packet->size = 0;
if(ret == AVERROR_EOF)
CASPAR_LOG(trace) << print() << " Received EOF. ";
- return ret == AVERROR_EOF || ret == AVERROR(EIO) || frame_number_ >= length_; // av_read_frame doesn't always correctly return AVERROR_EOF;
+ return ret == AVERROR_EOF || ret == AVERROR(EIO) || file_frame_number_ >= out_; // av_read_frame doesn't always correctly return AVERROR_EOF;
}
int num_audio_streams() const
}
};
-input::input(const spl::shared_ptr<diagnostics::graph>& graph, const std::wstring& url_or_file, bool loop, uint32_t start, uint32_t length, bool thumbnail_mode, const ffmpeg_options& vid_params)
- : impl_(new implementation(graph, url_or_file, loop, start, length, thumbnail_mode, vid_params)){}
+input::input(const spl::shared_ptr<diagnostics::graph>& graph, const std::wstring& url_or_file, bool loop, uint32_t in, uint32_t out, bool thumbnail_mode, const ffmpeg_options& vid_params)
+ : impl_(new impl(graph, url_or_file, loop, in, out, thumbnail_mode, vid_params)){}
bool input::eof() const {return !impl_->executor_.is_running();}
bool input::try_pop(std::shared_ptr<AVPacket>& packet){return impl_->try_pop(packet);}
spl::shared_ptr<AVFormatContext> input::context(){return impl_->format_context_;}
-void input::start(uint32_t value){impl_->start_ = value;}
-uint32_t input::start() const{return impl_->start_;}
-void input::length(uint32_t value){impl_->length_ = value;}
-uint32_t input::length() const{return impl_->length_;}
+void input::in(uint32_t value){impl_->in_ = value;}
+uint32_t input::in() const{return impl_->in_;}
+void input::out(uint32_t value){impl_->out_ = value;}
+uint32_t input::out() const{return impl_->out_;}
+void input::length(uint32_t value){impl_->out_ = impl_->in_ + value;}
+uint32_t input::length() const{return impl_->out_ - impl_->in_;}
void input::loop(bool value){impl_->loop_ = value;}
bool input::loop() const{return impl_->loop_;}
int input::num_audio_streams() const { return impl_->num_audio_streams(); }