*/
#include "../StdAfx.h"
-
+
#include "decklink_consumer.h"
#include "../util/util.h"
#include <boost/lexical_cast.hpp>
#include <boost/circular_buffer.hpp>
#include <boost/property_tree/ptree.hpp>
+#include <boost/thread/mutex.hpp>
#include <future>
-namespace caspar { namespace decklink {
-
+namespace caspar { namespace decklink {
+
struct configuration
{
enum class keyer_t
bool key_only = false;
int base_buffer_depth = 3;
core::audio_channel_layout out_channel_layout = core::audio_channel_layout::invalid();
-
+
int buffer_depth() const
{
return base_buffer_depth + (latency == latency_t::low_latency ? 0 : 1);
{
BOOL value = true;
if (SUCCEEDED(attributes->GetFlag(BMDDeckLinkSupportsInternalKeying, &value)) && !value)
- CASPAR_LOG(error) << print << L" Failed to enable internal keyer.";
+ CASPAR_LOG(error) << print << L" Failed to enable internal keyer.";
else if (FAILED(decklink_keyer->Enable(FALSE)))
- CASPAR_LOG(error) << print << L" Failed to enable internal keyer.";
+ CASPAR_LOG(error) << print << L" Failed to enable internal keyer.";
else if (FAILED(decklink_keyer->SetLevel(255)))
CASPAR_LOG(error) << print << L" Failed to set key-level to max.";
else
- CASPAR_LOG(info) << print << L" Enabled internal keyer.";
+ CASPAR_LOG(info) << print << L" Enabled internal keyer.";
}
else if (keyer == configuration::keyer_t::external_keyer)
{
BOOL value = true;
if (SUCCEEDED(attributes->GetFlag(BMDDeckLinkSupportsExternalKeying, &value)) && !value)
- CASPAR_LOG(error) << print << L" Failed to enable external keyer.";
- else if (FAILED(decklink_keyer->Enable(TRUE)))
- CASPAR_LOG(error) << print << L" Failed to enable external keyer.";
+ CASPAR_LOG(error) << print << L" Failed to enable external keyer.";
+ else if (FAILED(decklink_keyer->Enable(TRUE)))
+ CASPAR_LOG(error) << print << L" Failed to enable external keyer.";
else if (FAILED(decklink_keyer->SetLevel(255)))
CASPAR_LOG(error) << print << L" Failed to set key-level to max.";
else
- CASPAR_LOG(info) << print << L" Enabled external keyer.";
+ CASPAR_LOG(info) << print << L" Enabled external keyer.";
}
}
needs_to_copy_ = will_attempt_dma && dma_transfer_from_gl_buffer_impossible;
}
-
+
// IUnknown
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID, LPVOID*)
{
return E_NOINTERFACE;
}
-
+
virtual ULONG STDMETHODCALLTYPE AddRef()
{
return ++ref_count_;
virtual long STDMETHODCALLTYPE GetRowBytes() {return static_cast<long>(format_desc_.width*4);}
virtual BMDPixelFormat STDMETHODCALLTYPE GetPixelFormat() {return bmdFormat8BitBGRA;}
virtual BMDFrameFlags STDMETHODCALLTYPE GetFlags() {return bmdFrameFlagDefault;}
-
+
virtual HRESULT STDMETHODCALLTYPE GetBytes(void** buffer)
{
try
return S_OK;
}
-
+
virtual HRESULT STDMETHODCALLTYPE GetTimecode(BMDTimecodeFormat format, IDeckLinkTimecode** timecode) {return S_FALSE;}
virtual HRESULT STDMETHODCALLTYPE GetAncillaryData(IDeckLinkVideoFrameAncillary** ancillary) {return S_FALSE;}
- // decklink_frame
+ // decklink_frame
const core::audio_buffer& audio_data()
{
template <typename Configuration>
struct decklink_consumer : public IDeckLinkVideoOutputCallback, boost::noncopyable
-{
+{
const int channel_index_;
const configuration config_;
std::exception_ptr exception_;
tbb::atomic<bool> is_running_;
-
+
const std::wstring model_name_ = get_model_name(decklink_);
bool will_attempt_dma_;
const core::video_format_desc format_desc_;
long long audio_scheduled_ = 0;
int preroll_count_ = 0;
-
+
boost::circular_buffer<std::vector<int32_t>> audio_container_ { buffer_size_ + 1 };
tbb::concurrent_bounded_queue<core::const_frame> frame_buffer_;
-
+
spl::shared_ptr<diagnostics::graph> graph_;
caspar::timer tick_timer_;
+ boost::mutex send_completion_mutex_;
std::packaged_task<bool ()> send_completion_;
reference_signal_detector reference_signal_detector_ { output_ };
tbb::atomic<int64_t> current_presentation_delay_;
const configuration& config,
const core::video_format_desc& format_desc,
const core::audio_channel_layout& in_channel_layout,
- int channel_index)
+ int channel_index)
: channel_index_(channel_index)
, config_(config)
, format_desc_(format_desc)
is_running_ = true;
current_presentation_delay_ = 0;
scheduled_frames_completed_ = 0;
-
+
frame_buffer_.set_capacity(1);
if (config.keyer == configuration::keyer_t::external_separate_device_keyer)
key_context_.reset(new key_video_context<Configuration>(config, print()));
- graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f));
+ graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f));
graph_->set_color("late-frame", diagnostics::color(0.6f, 0.3f, 0.3f));
graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f));
graph_->set_color("flushed-frame", diagnostics::color(0.4f, 0.3f, 0.8f));
graph_->set_text(print());
diagnostics::register_graph(graph_);
-
+
enable_video(get_display_mode(output_, format_desc_.format, bmdFormat8BitBGRA, bmdVideoOutputFlagDefault, will_attempt_dma_));
-
+
if(config.embedded_audio)
enable_audio();
-
- set_latency(configuration_, config.latency, print());
+
+ set_latency(configuration_, config.latency, print());
set_keyer(attributes_, keyer_, config.keyer, print());
if(config.embedded_audio)
- output_->BeginAudioPreroll();
-
+ output_->BeginAudioPreroll();
+
for (int n = 0; n < buffer_size_; ++n)
{
if (config.embedded_audio)
}
~decklink_consumer()
- {
+ {
is_running_ = false;
frame_buffer_.try_push(core::const_frame::empty());
- if(output_ != nullptr)
+ if(output_ != nullptr)
{
output_->StopScheduledPlayback(0, nullptr, 0);
if(config_.embedded_audio)
output_->DisableVideoOutput();
}
}
-
+
void enable_audio()
{
if(FAILED(output_->EnableAudioOutput(bmdAudioSampleRate48kHz, bmdAudioSampleType32bitInteger, out_channel_layout_.num_channels, bmdAudioOutputStreamTimestamped)))
CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Could not enable audio output."));
-
+
CASPAR_LOG(info) << print() << L" Enabled embedded-audio.";
}
void enable_video(BMDDisplayMode display_mode)
{
- if(FAILED(output_->EnableVideoOutput(display_mode, bmdVideoOutputFlagDefault)))
+ if(FAILED(output_->EnableVideoOutput(display_mode, bmdVideoOutputFlagDefault)))
CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Could not enable fill video output."));
-
+
if(FAILED(output_->SetScheduledFrameCompletionCallback(this)))
- CASPAR_THROW_EXCEPTION(caspar_exception()
+ CASPAR_THROW_EXCEPTION(caspar_exception()
<< msg_info(print() + L" Failed to set fill playback completion callback.")
<< boost::errinfo_api_function("SetScheduledFrameCompletionCallback"));
void start_playback()
{
- if(FAILED(output_->StartScheduledPlayback(0, format_desc_.time_scale, 1.0)))
+ if(FAILED(output_->StartScheduledPlayback(0, format_desc_.time_scale, 1.0)))
CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Failed to schedule fill playback."));
if (key_context_ && FAILED(key_context_->output_->StartScheduledPlayback(0, format_desc_.time_scale, 1.0)))
CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Failed to schedule key playback."));
}
-
+
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID, LPVOID*) {return E_NOINTERFACE;}
virtual ULONG STDMETHODCALLTYPE AddRef() {return 1;}
virtual ULONG STDMETHODCALLTYPE Release() {return 1;}
-
+
virtual HRESULT STDMETHODCALLTYPE ScheduledPlaybackHasStopped()
{
is_running_ = false;
{
if(!is_running_)
return E_FAIL;
-
+
try
{
auto tick_time = tick_timer_.elapsed()*format_desc_.fps * 0.5;
frame_buffer_.pop(frame);
- if (send_completion_.valid())
{
- send_completion_();
- send_completion_ = std::packaged_task<bool ()>();
+ boost::lock_guard<boost::mutex> lock(send_completion_mutex_);
+
+ if (send_completion_.valid())
+ {
+ send_completion_();
+ send_completion_ = std::packaged_task<bool()>();
+ }
}
if (config_.embedded_audio)
audio_scheduled_ += sample_frame_count;
}
-
+
void schedule_next_video(core::const_frame frame)
{
if (key_context_)
});
if(exception != nullptr)
- std::rethrow_exception(exception);
+ std::rethrow_exception(exception);
if(!is_running_)
CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info(print() + L" Is not running."));
-
+
if (frame_buffer_.try_push(frame))
return make_ready_future(true);
+ boost::lock_guard<boost::mutex> lock(send_completion_mutex_);
+
send_completion_ = std::packaged_task<bool ()>([frame, this] () mutable -> bool
{
frame_buffer_.push(frame);
return send_completion_.get_future();
}
-
+
std::wstring print() const
{
if (config_.keyer == configuration::keyer_t::external_separate_device_keyer)
}
// frame_consumer
-
+
void initialize(const core::video_format_desc& format_desc, const core::audio_channel_layout& channel_layout, int channel_index) override
{
format_desc_ = format_desc;
executor_.invoke([=]
{
consumer_.reset();
- consumer_.reset(new decklink_consumer<Configuration>(config_, format_desc, channel_layout, channel_index));
+ consumer_.reset(new decklink_consumer<Configuration>(config_, format_desc, channel_layout, channel_index));
});
}
-
+
std::future<bool> send(core::const_frame frame) override
{
return consumer_->send(frame);
}
-
+
std::wstring print() const override
{
return consumer_ ? consumer_->print() : L"[decklink_consumer]";
- }
+ }
std::wstring name() const override
{
{
if (params.size() < 1 || !boost::iequals(params.at(0), L"DECKLINK"))
return core::frame_consumer::empty();
-
+
configuration config;
-
+
if (params.size() > 1)
config.device_index = boost::lexical_cast<int>(params.at(1));
-
+
if (contains_param(L"INTERNAL_KEY", params))
config.keyer = configuration::keyer_t::internal_keyer;
else if (contains_param(L"EXTERNAL_KEY", params))
-----------------------------------------------------------------------------
-Thanks for your inquiry. The minimum number of frames that you can preroll
-for scheduled playback is three frames for video and four frames for audio.
+Thanks for your inquiry. The minimum number of frames that you can preroll
+for scheduled playback is three frames for video and four frames for audio.
As you mentioned if you preroll less frames then playback will not start or
-playback will be very sporadic. From our experience with Media Express, we
-recommended that at least seven frames are prerolled for smooth playback.
+playback will be very sporadic. From our experience with Media Express, we
+recommended that at least seven frames are prerolled for smooth playback.
Regarding the bmdDeckLinkConfigLowLatencyVideoOutput flag:
There can be around 3 frames worth of latency on scheduled output.
When the bmdDeckLinkConfigLowLatencyVideoOutput flag is used this latency is
-reduced or removed for scheduled playback. If the DisplayVideoFrameSync()
-method is used, the bmdDeckLinkConfigLowLatencyVideoOutput setting will
-guarantee that the provided frame will be output as soon the previous
+reduced or removed for scheduled playback. If the DisplayVideoFrameSync()
+method is used, the bmdDeckLinkConfigLowLatencyVideoOutput setting will
+guarantee that the provided frame will be output as soon the previous
frame output has been completed.
################################################################################
*/
-----------------------------------------------------------------------------
-Thanks for your inquiry. You could try subclassing IDeckLinkMutableVideoFrame
-and providing a pointer to your video buffer when GetBytes() is called.
-This may help to keep copying to a minimum. Please ensure that the pixel
-format is in bmdFormat10BitYUV, otherwise the DeckLink API / driver will
+Thanks for your inquiry. You could try subclassing IDeckLinkMutableVideoFrame
+and providing a pointer to your video buffer when GetBytes() is called.
+This may help to keep copying to a minimum. Please ensure that the pixel
+format is in bmdFormat10BitYUV, otherwise the DeckLink API / driver will
have to colourspace convert which may result in additional copying.
################################################################################
*/
#pragma warning (push)
#pragma warning (disable : 4244)
#endif
-extern "C"
+extern "C"
{
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#endif
namespace caspar { namespace ffmpeg {
-
+
struct audio_decoder::implementation : boost::noncopyable
-{
+{
int index_ = -1;
const spl::shared_ptr<AVCodecContext> codec_context_;
const int out_samplerate_;
-
+
cache_aligned_vector<int32_t> buffer_;
std::queue<spl::shared_ptr<AVPacket>> packets_;
: codec_context_(open_codec(*context, AVMEDIA_TYPE_AUDIO, index_, false))
, out_samplerate_(out_samplerate)
, buffer_(10 * out_samplerate_ * codec_context_->channels) // 10 seconds of audio
- {
+ {
if(!swr_)
- BOOST_THROW_EXCEPTION(bad_alloc());
-
+ CASPAR_THROW_EXCEPTION(bad_alloc());
+
THROW_ON_ERROR2(swr_init(swr_.get()), "[audio_decoder]");
codec_context_->refcounted_frames = 1;
}
void push(const std::shared_ptr<AVPacket>& packet)
- {
+ {
if(!packet)
return;
if(packet->stream_index == index_ || packet->data == nullptr)
packets_.push(spl::make_shared_ptr(packet));
- }
-
+ }
+
std::shared_ptr<core::mutable_audio_buffer> poll()
{
if(packets_.empty())
return nullptr;
-
+
auto packet = packets_.front();
if(packet->data == nullptr)
auto audio = decode(*packet);
- if(packet->size == 0)
+ if(packet->size == 0)
packets_.pop();
return audio;
}
std::wstring print() const
- {
+ {
return L"[audio-decoder] " + u16(codec_context_->codec->long_name);
}
};
const std::wstring filename_;
const std::wstring path_relative_to_media_ = get_relative_or_original(filename_, env::media_folder());
- FFMPEG_Resource resource_type_;
-
const spl::shared_ptr<diagnostics::graph> graph_;
timer frame_timer_;
explicit ffmpeg_producer(
const spl::shared_ptr<core::frame_factory>& frame_factory,
const core::video_format_desc& format_desc,
- const std::wstring& filename,
- FFMPEG_Resource resource_type,
+ const std::wstring& url_or_file,
const std::wstring& filter,
bool loop,
uint32_t start,
bool thumbnail_mode,
const std::wstring& custom_channel_order,
const ffmpeg_options& vid_params)
- : filename_(filename)
- , resource_type_(resource_type)
+ : filename_(url_or_file)
, frame_factory_(frame_factory)
, initial_logger_disabler_(temporary_enable_quiet_logging_for_thread(thumbnail_mode))
- , input_(graph_, filename_, resource_type, loop, start, length, thumbnail_mode, vid_params)
+ , input_(graph_, url_or_file, loop, start, length, thumbnail_mode, vid_params)
, framerate_(read_framerate(*input_.context(), format_desc.framerate))
, start_(start)
, length_(length)
send_osc();
return std::make_pair(last_frame(), -1);
}
- else if (resource_type_ == FFMPEG_Resource::FFMPEG_FILE)
+ else if (!is_url())
{
graph_->set_tag(diagnostics::tag_severity::WARNING, "underflow");
send_osc();
- return std::make_pair(core::draw_frame::late(), -1);
+ return std::make_pair(last_frame_, -1);
}
else
{
send_osc();
- return std::make_pair(last_frame(), -1);
+ return std::make_pair(last_frame_, -1);
}
}
return frame;
}
+ bool is_url() const
+ {
+ return boost::contains(filename_, L"://");
+ }
+
void send_osc()
{
double fps = static_cast<double>(framerate_.numerator()) / static_cast<double>(framerate_.denominator());
if (grid < 1)
{
CASPAR_LOG(error) << L"configuration/thumbnails/video-grid cannot be less than 1";
- BOOST_THROW_EXCEPTION(caspar_exception() << msg_info("configuration/thumbnails/video-grid cannot be less than 1"));
+ CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("configuration/thumbnails/video-grid cannot be less than 1"));
}
if (grid == 1)
uint32_t nb_frames() const override
{
- if (resource_type_ == FFMPEG_Resource::FFMPEG_DEVICE || resource_type_ == FFMPEG_Resource::FFMPEG_STREAM || input_.loop())
+ if (is_url() || input_.loop())
return std::numeric_limits<uint32_t>::max();
uint32_t nb_frames = file_nb_frames();
std::wstring print() const override
{
- return L"ffmpeg[" + boost::filesystem::path(filename_).filename().wstring() + L"|"
+ return L"ffmpeg[" + (is_url() ? filename_ : boost::filesystem::path(filename_).filename().wstring()) + L"|"
+ print_mode() + L"|"
+ boost::lexical_cast<std::wstring>(file_frame_number_) + L"/" + boost::lexical_cast<std::wstring>(file_nb_frames()) + L"]";
}
void describe_producer(core::help_sink& sink, const core::help_repository& repo)
{
sink.short_description(L"A producer for playing media files supported by FFmpeg.");
- sink.syntax(L"[clip:string] {[loop:LOOP]} {SEEK [start:int]} {LENGTH [start:int]} {FILTER [filter:string]} {CHANNEL_LAYOUT [channel_layout:string]}");
+ sink.syntax(L"[clip,url:string] {[loop:LOOP]} {SEEK [start:int]} {LENGTH [start:int]} {FILTER [filter:string]} {CHANNEL_LAYOUT [channel_layout:string]}");
sink.para()
->text(L"The FFmpeg Producer can play all media that FFmpeg can play, which includes many ")
->text(L"QuickTime video codec such as Animation, PNG, PhotoJPEG, MotionJPEG, as well as ")
->text(L"H.264, FLV, WMV and several audio codecs as well as uncompressed audio.");
sink.definitions()
->item(L"clip", L"The file without the file extension to play. It should reside under the media folder.")
+ ->item(L"url", L"If clip contains :// it is instead treated as the URL parameter. The URL can either be any streaming protocol supported by FFmpeg or dshow://video={webcam_name}.")
->item(L"loop", L"Will cause the media file to loop between start and start + length")
->item(L"start", L"Optionally sets the start frame. 0 by default. If loop is specified this will be the frame where it starts over again.")
->item(L"length", L"Optionally sets the length of the clip. If not specified the clip will be played to the end. If loop is specified the file will jump to start position once this number of frames has been played.")
sink.example(L">> PLAY 1-10 folder/clip FILTER yadif=1,-1", L"to deinterlace the video.");
sink.example(L">> PLAY 1-10 folder/clip CHANNEL_LAYOUT film", L"given the defaults in casparcg.config this will specifies that the clip has 6 audio channels of the type 5.1 and that they are in the order FL FC FR BL BR LFE regardless of what ffmpeg says.");
sink.example(L">> PLAY 1-10 folder/clip CHANNEL_LAYOUT \"5.1:LFE FL FC FR BL BR\"", L"specifies that the clip has 6 audio channels of the type 5.1 and that they are in the specified order regardless of what ffmpeg says.");
+ sink.example(L">> PLAY 1-10 rtmp://example.com/live/stream", L"to play an RTMP stream.");
+ sink.example(L">> PLAY 1-10 \"dshow://video=Live! Cam Chat HD VF0790\"", L"to use a web camera as video input.");
sink.para()->text(L"The FFmpeg producer also supports changing some of the settings via ")->code(L"CALL")->text(L":");
sink.example(L">> CALL 1-10 LOOP 1");
sink.example(L">> CALL 1-10 START 10");
const std::vector<std::wstring>& params,
const spl::shared_ptr<core::media_info_repository>& info_repo)
{
- // Infer the resource type from the resource_name
- auto resource_type = FFMPEG_Resource::FFMPEG_FILE;
- auto tokens = protocol_split(params.at(0));
- auto filename = params.at(0);
+ auto file_or_url = params.at(0);
- if (!tokens[0].empty())
- {
- if (tokens[0] == L"dshow")
- {
- // Camera
- resource_type = FFMPEG_Resource::FFMPEG_DEVICE;
- filename = tokens[1];
- }
- else
- {
- // Stream
- resource_type = FFMPEG_Resource::FFMPEG_STREAM;
- filename = params.at(0);
- }
- }
- else
+ if (!boost::contains(file_or_url, L"://"))
{
// File
- resource_type = FFMPEG_Resource::FFMPEG_FILE;
- filename = probe_stem(env::media_folder() + L"/" + params.at(0), false);
+ file_or_url = probe_stem(env::media_folder() + L"/" + file_or_url, false);
}
- if (filename.empty())
+ if (file_or_url.empty())
return core::frame_producer::empty();
auto loop = contains_param(L"LOOP", params);
auto producer = spl::make_shared<ffmpeg_producer>(
dependencies.frame_factory,
dependencies.format_desc,
- filename,
- resource_type,
+ file_or_url,
filter_str,
loop,
start,
dependencies.frame_factory,
dependencies.format_desc,
filename,
- FFMPEG_Resource::FFMPEG_FILE,
filter_str,
loop,
start,
#include <common/executor.h>
#include <common/except.h>
#include <common/os/general_protection_fault.h>
+#include <common/param.h>
+#include <common/scope_exit.h>
#include <tbb/concurrent_queue.h>
#include <tbb/atomic.h>
executor executor_;
- explicit implementation(const spl::shared_ptr<diagnostics::graph> graph, const std::wstring& filename, FFMPEG_Resource resource_type, bool loop, uint32_t start, uint32_t length, bool thumbnail_mode, const ffmpeg_options& vid_params)
+ explicit implementation(const spl::shared_ptr<diagnostics::graph> graph, const std::wstring& url_or_file, bool loop, uint32_t start, uint32_t length, bool thumbnail_mode, const ffmpeg_options& vid_params)
: graph_(graph)
- , format_context_(open_input(filename, resource_type, vid_params))
- , filename_(filename)
+ , format_context_(open_input(url_or_file, vid_params))
+ , filename_(url_or_file)
, thumbnail_mode_(thumbnail_mode)
, executor_(print())
{
});
}
- spl::shared_ptr<AVFormatContext> open_input(const std::wstring resource_name, FFMPEG_Resource resource_type, const ffmpeg_options& vid_params)
+ spl::shared_ptr<AVFormatContext> open_input(const std::wstring& url_or_file, const ffmpeg_options& vid_params)
{
- AVFormatContext* weak_context = nullptr;
+ AVDictionary* format_options = nullptr;
- switch (resource_type) {
- case FFMPEG_Resource::FFMPEG_FILE:
- THROW_ON_ERROR2(avformat_open_input(&weak_context, u8(resource_name).c_str(), nullptr, nullptr), resource_name);
- break;
- case FFMPEG_Resource::FFMPEG_DEVICE:
- {
- AVDictionary* format_options = NULL;
- for (auto& option : vid_params)
- {
- av_dict_set(&format_options, option.first.c_str(), option.second.c_str(), 0);
- }
- AVInputFormat* input_format = av_find_input_format("dshow");
- THROW_ON_ERROR2(avformat_open_input(&weak_context, u8(resource_name).c_str(), input_format, &format_options), resource_name);
- if (format_options != nullptr)
- {
- std::string unsupported_tokens = "";
- AVDictionaryEntry *t = NULL;
- while ((t = av_dict_get(format_options, "", t, AV_DICT_IGNORE_SUFFIX)) != nullptr)
- {
- if (!unsupported_tokens.empty())
- unsupported_tokens += ", ";
- unsupported_tokens += t->key;
- }
- avformat_close_input(&weak_context);
- BOOST_THROW_EXCEPTION(ffmpeg_error() << msg_info(unsupported_tokens));
- }
- av_dict_free(&format_options);
- }
- break;
- case FFMPEG_Resource::FFMPEG_STREAM:
- {
- AVDictionary* format_options = NULL;
- for (auto& option : vid_params)
- {
- av_dict_set(&format_options, option.first.c_str(), option.second.c_str(), 0);
- }
- THROW_ON_ERROR2(avformat_open_input(&weak_context, u8(resource_name).c_str(), nullptr, &format_options), resource_name);
- if (format_options != nullptr)
- {
- std::string unsupported_tokens = "";
- AVDictionaryEntry *t = NULL;
- while ((t = av_dict_get(format_options, "", t, AV_DICT_IGNORE_SUFFIX)) != nullptr)
- {
- if (!unsupported_tokens.empty())
- unsupported_tokens += ", ";
- unsupported_tokens += t->key;
- }
- avformat_close_input(&weak_context);
- BOOST_THROW_EXCEPTION(ffmpeg_error() << msg_info(unsupported_tokens));
- }
+ CASPAR_SCOPE_EXIT
+ {
+ if (format_options)
av_dict_free(&format_options);
- }
- break;
};
- spl::shared_ptr<AVFormatContext> context(weak_context, [](AVFormatContext* p)
+
+ for (auto& option : vid_params)
+ av_dict_set(&format_options, option.first.c_str(), option.second.c_str(), 0);
+
+ auto resource_name = std::wstring();
+ auto parts = caspar::protocol_split(url_or_file);
+ AVInputFormat* input_format = nullptr;
+
+ if (parts.at(0).empty())
+ resource_name = parts.at(1);
+ else if (parts.at(0) == L"dshow")
+ {
+ input_format = av_find_input_format("dshow");
+ resource_name = parts.at(1);
+ }
+ else
+ resource_name = parts.at(0) + L"://" + parts.at(1);
+
+ AVFormatContext* weak_context = nullptr;
+ THROW_ON_ERROR2(avformat_open_input(&weak_context, u8(resource_name).c_str(), input_format, &format_options), resource_name);
+
+ spl::shared_ptr<AVFormatContext> context(weak_context, [](AVFormatContext* ptr)
{
- avformat_close_input(&p);
+ avformat_close_input(&ptr);
});
- THROW_ON_ERROR2(avformat_find_stream_info(weak_context, nullptr), resource_name);
+
+ if (format_options)
+ {
+ std::string unsupported_tokens = "";
+ AVDictionaryEntry *t = NULL;
+ while ((t = av_dict_get(format_options, "", t, AV_DICT_IGNORE_SUFFIX)) != nullptr)
+ {
+ if (!unsupported_tokens.empty())
+ unsupported_tokens += ", ";
+ unsupported_tokens += t->key;
+ }
+ CASPAR_THROW_EXCEPTION(user_error() << msg_info(unsupported_tokens));
+ }
+
+ THROW_ON_ERROR2(avformat_find_stream_info(context.get(), nullptr), resource_name);
fix_meta_data(*context);
return context;
}
}
};
-input::input(const spl::shared_ptr<diagnostics::graph>& graph, const std::wstring& filename, FFMPEG_Resource resource_type, bool loop, uint32_t start, uint32_t length, bool thumbnail_mode, const ffmpeg_options& vid_params)
- : impl_(new implementation(graph, filename, resource_type, loop, start, length, thumbnail_mode, vid_params)){}
+input::input(const spl::shared_ptr<diagnostics::graph>& graph, const std::wstring& url_or_file, bool loop, uint32_t start, uint32_t length, bool thumbnail_mode, const ffmpeg_options& vid_params)
+ : impl_(new implementation(graph, url_or_file, loop, start, length, thumbnail_mode, vid_params)){}
bool input::eof() const {return !impl_->executor_.is_running();}
bool input::try_pop(std::shared_ptr<AVPacket>& packet){return impl_->try_pop(packet);}
spl::shared_ptr<AVFormatContext> input::context(){return impl_->format_context_;}
class graph;
}
-
+
namespace ffmpeg {
class input : boost::noncopyable
{
public:
- explicit input(const spl::shared_ptr<diagnostics::graph>& graph, const std::wstring& filename, FFMPEG_Resource resource_type, bool loop, uint32_t start, uint32_t length, bool thumbnail_mode, const ffmpeg_options& vid_params);
+ explicit input(const spl::shared_ptr<diagnostics::graph>& graph, const std::wstring& url_or_file, bool loop, uint32_t start, uint32_t length, bool thumbnail_mode, const ffmpeg_options& vid_params);
bool try_pop(std::shared_ptr<AVPacket>& packet);
bool eof() const;
std::shared_ptr<implementation> impl_;
};
-
+
}}
}
if (audio_streams_.back().size() > 32 * audio_cadence_.front() * audio_channel_layout_.num_channels)
- BOOST_THROW_EXCEPTION(invalid_operation() << source_info("frame_muxer") << msg_info("audio-stream overflow. This can be caused by incorrect frame-rate. Check clip meta-data."));
+ CASPAR_THROW_EXCEPTION(invalid_operation() << source_info("frame_muxer") << msg_info("audio-stream overflow. This can be caused by incorrect frame-rate. Check clip meta-data."));
}
bool video_ready() const
const core::audio_channel_layout& channel_layout,
const std::wstring& filter,
bool multithreaded_filter);
-
+
void push(const std::shared_ptr<AVFrame>& video_frame);
void push(const std::shared_ptr<core::mutable_audio_buffer>& audio_samples);
-
+
bool video_ready() const;
bool audio_ready() const;
spl::shared_ptr<impl> impl_;
};
-}}
\ No newline at end of file
+}}
namespace caspar { namespace ffmpeg {
-enum class FFMPEG_Resource {
- FFMPEG_FILE,
- FFMPEG_DEVICE,
- FFMPEG_STREAM
-};
-
typedef std::vector<std::pair<std::string, std::string>> ffmpeg_options;
// Utils
packets_.pop();
file_frame_number_ = static_cast<uint32_t>(packet->pos);
avcodec_flush_buffers(codec_context_.get());
- return flush_video();
+ return flush_video();
}
packets_.pop();
<!--\r
<log-level> info [trace|debug|info|warning|error|fatal]</log-level>\r
<log-categories> communication [calltrace|communication|calltrace,communication]</log-categories>\r
-<force-deinterlace> false [true|false]</force-deinterlacing>\r
+<force-deinterlace> false [true|false]</force-deinterlace>\r
<channel-grid> false [true|false]</channel-grid>\r
<mixer>\r
<blend-modes> false [true|false]</blend-modes>\r