auto num_samples = input.size() / input_layout_.num_channels;
auto expected_output_size = num_samples * output_layout_.num_channels;
- auto input_frame = std::shared_ptr<AVFrame>(av_frame_alloc(), [](AVFrame* p)
- {
- if (p)
- av_frame_free(&p);
- });
+ auto input_frame = ffmpeg::create_frame();
input_frame->channels = input_layout_.num_channels;
input_frame->channel_layout = ffmpeg::create_channel_layout_bitmask(input_layout_.num_channels);
*
* Author: Robert Nagy, ronag89@gmail.com
*/
-
+
#include "../StdAfx.h"
#include "../ffmpeg_error.h"
#include "ffmpeg_consumer.h"
#include "../producer/tbb_avcodec.h"
+#include "../producer/util/util.h"
#include <core/frame/frame.h>
#include <core/frame/audio_channel_layout.h>
#pragma warning (push)
#pragma warning (disable : 4244)
#endif
-extern "C"
+extern "C"
{
#define __STDC_CONSTANT_MACROS
#define __STDC_LIMIT_MACROS
#endif
namespace caspar { namespace ffmpeg {
-
+
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
{
AVClass* av_class = *(AVClass**)obj;
if((strcmp(name, "pix_fmt") == 0 || strcmp(name, "pixel_format") == 0) && strcmp(av_class->class_name, "AVCodecContext") == 0)
{
- AVCodecContext* c = (AVCodecContext*)obj;
+ AVCodecContext* c = (AVCodecContext*)obj;
auto pix_fmt = av_get_pix_fmt(val);
if(pix_fmt == PIX_FMT_NONE)
- return -1;
+ return -1;
c->pix_fmt = pix_fmt;
return 0;
}
//if((strcmp(name, "r") == 0 || strcmp(name, "frame_rate") == 0) && strcmp(av_class->class_name, "AVCodecContext") == 0)
//{
- // AVCodecContext* c = (AVCodecContext*)obj;
+ // AVCodecContext* c = (AVCodecContext*)obj;
// if(c->codec_type != AVMEDIA_TYPE_VIDEO)
// return -1;
{
}
};
-
+
struct output_format
{
AVOutputFormat* format;
{
return set_opt(o.name, o.value);
});
-
+
if(vcodec == CODEC_ID_NONE && format)
vcodec = format->video_codec;
if(acodec == CODEC_ID_NONE && format)
acodec = format->audio_codec;
-
+
if(vcodec == CODEC_ID_NONE)
vcodec = CODEC_ID_H264;
-
+
if(acodec == CODEC_ID_NONE)
acodec = CODEC_ID_PCM_S16LE;
}
-
+
bool set_opt(const std::string& name, const std::string& value)
{
//if(name == "target")
- //{
+ //{
// enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
- //
+ //
// if(name.find("pal-") != std::string::npos)
// norm = PAL;
// else if(name.find("ntsc-") != std::string::npos)
// if(norm == UNKNOWN)
// CASPAR_THROW_EXCEPTION(invalid_argument() << arg_name_info("target"));
- //
- // if (name.find("-dv") != std::string::npos)
+ //
+ // if (name.find("-dv") != std::string::npos)
// {
// set_opt("f", "dv");
// if(norm == PAL)
// }
// }
// set_opt("s", norm == PAL ? "720x576" : "720x480");
- // }
+ // }
// return true;
//}
- //else
+ //else
if(name == "f")
{
format = av_guess_format(value.c_str(), nullptr, nullptr);
{
if(av_parse_video_size(&width, &height, value.c_str()) < 0)
CASPAR_THROW_EXCEPTION(user_error() << msg_info("Unknown video size " + value));
-
+
return true;
}
else if(name == "croptop")
return true;
}
-
+
return false;
}
};
typedef cache_aligned_vector<uint8_t> byte_vector;
struct ffmpeg_consumer : boost::noncopyable
-{
+{
const spl::shared_ptr<diagnostics::graph> graph_;
const std::string filename_;
const std::string full_filename_ = u8(env::media_folder()) + filename_;
const core::audio_channel_layout channel_layout_;
core::monitor::subject monitor_subject_;
-
+
tbb::spin_mutex exception_mutex_;
std::exception_ptr exception_;
-
+
std::shared_ptr<AVStream> audio_st_;
std::shared_ptr<AVStream> video_st_;
-
+
byte_vector picture_buffer_;
byte_vector key_picture_buf_;
byte_vector audio_buffer_;
executor_.set_capacity(8);
oc_->oformat = output_format_.format;
-
+
std::strcpy(oc_->filename, full_filename_.c_str());
-
+
// Add the audio and video streams using the default format codecs and initialize the codecs.
video_st_ = add_video_stream(options);
if (!key_only)
audio_st_ = add_audio_stream(options);
-
+
av_dump_format(oc_.get(), 0, full_filename_.c_str(), 1);
-
+
// Open the output ffmpeg, if needed.
- if (!(oc_->oformat->flags & AVFMT_NOFILE))
+ if (!(oc_->oformat->flags & AVFMT_NOFILE))
THROW_ON_ERROR2(avio_open(&oc_->pb, full_filename_.c_str(), AVIO_FLAG_WRITE), "[ffmpeg_consumer]");
-
+
THROW_ON_ERROR2(avformat_write_header(oc_.get(), nullptr), "[ffmpeg_consumer]");
if(options.size() > 0)
}
~ffmpeg_consumer()
- {
+ {
try
{
executor_.wait();
}
LOG_ON_ERROR2(av_write_trailer(oc_.get()), "[ffmpeg_consumer]");
-
+
if (!key_only_)
audio_st_.reset();
video_st_.reset();
-
- if (!(oc_->oformat->flags & AVFMT_NOFILE))
+
+ if (!(oc_->oformat->flags & AVFMT_NOFILE))
LOG_ON_ERROR2(avio_close(oc_->pb), "[ffmpeg_consumer]");
}
-
+
// frame_consumer
void send(core::const_frame& frame)
std::rethrow_exception(exception);
executor_.begin_invoke([=]
- {
+ {
encode(frame);
current_encoding_delay_ = frame.get_age_millis();
});
{
return L"ffmpeg[" + u16(filename_) + L"]";
}
-
+
core::monitor::subject& monitor_output()
{
return monitor_subject_;
private:
std::shared_ptr<AVStream> add_video_stream(std::vector<option>& options)
- {
+ {
if(output_format_.vcodec == CODEC_ID_NONE)
return nullptr;
auto st = avformat_new_stream(oc_.get(), 0);
- if (!st)
- CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Could not allocate video-stream.") << boost::errinfo_api_function("av_new_stream"));
+ if (!st)
+ CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Could not allocate video-stream.") << boost::errinfo_api_function("av_new_stream"));
auto encoder = avcodec_find_encoder(output_format_.vcodec);
if (!encoder)
auto c = st->codec;
avcodec_get_context_defaults3(c, encoder);
-
+
c->codec_id = output_format_.vcodec;
c->codec_type = AVMEDIA_TYPE_VIDEO;
c->width = output_format_.width;
c->pix_fmt = c->pix_fmt != PIX_FMT_NONE ? c->pix_fmt : PIX_FMT_YUV420P;
if(c->codec_id == CODEC_ID_PRORES)
- {
+ {
c->bit_rate = output_format_.width < 1280 ? 63*1000000 : 220*1000000;
c->pix_fmt = PIX_FMT_YUV422P10;
}
else if(c->codec_id == CODEC_ID_DVVIDEO)
{
c->width = c->height == 1280 ? 960 : c->width;
-
+
if(format_desc_.format == core::video_format::ntsc)
{
c->pix_fmt = PIX_FMT_YUV411P;
c->pix_fmt = PIX_FMT_YUV420P;
else // dv50
c->pix_fmt = PIX_FMT_YUV422P;
-
- if(format_desc_.duration == 1001)
- c->width = c->height == 1080 ? 1280 : c->width;
+
+ if(format_desc_.duration == 1001)
+ c->width = c->height == 1080 ? 1280 : c->width;
else
- c->width = c->height == 1080 ? 1440 : c->width;
+ c->width = c->height == 1080 ? 1440 : c->width;
}
else if(c->codec_id == CODEC_ID_H264)
- {
- c->pix_fmt = PIX_FMT_YUV420P;
+ {
+ c->pix_fmt = PIX_FMT_YUV420P;
av_opt_set(c->priv_data, "preset", "ultrafast", 0);
av_opt_set(c->priv_data, "tune", "fastdecode", 0);
av_opt_set(c->priv_data, "crf", "5", 0);
{
c->pix_fmt = PIX_FMT_ARGB;
}
-
+
boost::range::remove_erase_if(options, [&](const option& o)
{
return o.name.at(0) != 'a' && ffmpeg::av_opt_set(c, o.name.c_str(), o.value.c_str(), AV_OPT_SEARCH_CHILDREN) > -1;
});
-
+
if(output_format_.format->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
-
+
THROW_ON_ERROR2(tbb_avcodec_open(c, encoder, false), "[ffmpeg_consumer]");
return std::shared_ptr<AVStream>(st, [](AVStream* st)
LOG_ON_ERROR2(tbb_avcodec_close(st->codec), "[ffmpeg_consumer]");
});
}
-
+
std::shared_ptr<AVStream> add_audio_stream(std::vector<option>& options)
{
if(output_format_.acodec == CODEC_ID_NONE)
auto st = avformat_new_stream(oc_.get(), nullptr);
if(!st)
- CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Could not allocate audio-stream") << boost::errinfo_api_function("av_new_stream"));
-
+ CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Could not allocate audio-stream") << boost::errinfo_api_function("av_new_stream"));
+
auto encoder = avcodec_find_encoder(output_format_.acodec);
if (!encoder)
CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("codec not found"));
-
+
auto c = st->codec;
avcodec_get_context_defaults3(c, encoder);
c->time_base.num = 1;
c->time_base.den = c->sample_rate;
- if(output_format_.vcodec == CODEC_ID_FLV1)
- c->sample_rate = 44100;
+ if(output_format_.vcodec == CODEC_ID_FLV1)
+ c->sample_rate = 44100;
if(output_format_.format->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
-
+
boost::range::remove_erase_if(options, [&](const option& o)
{
return ffmpeg::av_opt_set(c, o.name.c_str(), o.value.c_str(), AV_OPT_SEARCH_CHILDREN) > -1;
LOG_ON_ERROR2(avcodec_close(st->codec), "[ffmpeg_consumer]");
});
}
-
+
void encode_video_frame(core::const_frame frame)
- {
+ {
if(!video_st_)
return;
-
+
auto enc = video_st_->codec;
-
+
auto av_frame = convert_video(frame, enc);
av_frame->interlaced_frame = format_desc_.field_mode != core::field_mode::progressive;
av_frame->top_field_first = format_desc_.field_mode == core::field_mode::upper;
if(!got_packet)
return;
-
+
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(pkt.pts, enc->time_base, video_st_->time_base);
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts = av_rescale_q(pkt.dts, enc->time_base, video_st_->time_base);
-
+
pkt.stream_index = video_st_->index;
-
+
THROW_ON_ERROR2(av_interleaved_write_frame(oc_.get(), &pkt), "[ffmpeg_consumer]");
}
-
+
uint64_t get_channel_layout(AVCodecContext* dec)
{
auto layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
return layout;
}
-
+
void encode_audio_frame(core::const_frame frame)
- {
+ {
if(!audio_st_)
return;
-
+
auto enc = audio_st_->codec;
boost::push_back(audio_buffer_, convert_audio(frame, enc));
-
+
auto frame_size = enc->frame_size != 0 ? enc->frame_size * enc->channels * av_get_bytes_per_sample(enc->sample_fmt) : static_cast<int>(audio_buffer_.size());
-
+
while(audio_buffer_.size() >= frame_size)
- {
- std::shared_ptr<AVFrame> av_frame(av_frame_alloc(), [=](AVFrame* p) { av_frame_free(&p); });
- avcodec_get_frame_defaults(av_frame.get());
+ {
+ auto av_frame = create_frame();
av_frame->nb_samples = frame_size / (enc->channels * av_get_bytes_per_sample(enc->sample_fmt));
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = nullptr;
- pkt.size = 0;
-
+ pkt.size = 0;
+
THROW_ON_ERROR2(avcodec_fill_audio_frame(av_frame.get(), enc->channels, enc->sample_fmt, audio_buffer_.data(), frame_size, 1), "[ffmpeg_consumer]");
int got_packet = 0;
THROW_ON_ERROR2(avcodec_encode_audio2(enc, &pkt, av_frame.get(), &got_packet), "[ffmpeg_consumer]");
std::shared_ptr<AVPacket> guard(&pkt, av_free_packet);
-
+
audio_buffer_.erase(audio_buffer_.begin(), audio_buffer_.begin() + frame_size);
if(!got_packet)
return;
-
+
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(pkt.pts, enc->time_base, audio_st_->time_base);
if (pkt.dts != AV_NOPTS_VALUE)
pkt.dts = av_rescale_q(pkt.dts, enc->time_base, audio_st_->time_base);
if (pkt.duration > 0)
pkt.duration = static_cast<int>(av_rescale_q(pkt.duration, enc->time_base, audio_st_->time_base));
-
+
pkt.stream_index = audio_st_->index;
-
+
THROW_ON_ERROR2(av_interleaved_write_frame(oc_.get(), &pkt), "[ffmpeg_consumer]");
}
- }
-
+ }
+
std::shared_ptr<AVFrame> convert_video(core::const_frame frame, AVCodecContext* c)
{
- if(!sws_)
+ if(!sws_)
{
- sws_.reset(sws_getContext(format_desc_.width,
- format_desc_.height - output_format_.croptop - output_format_.cropbot,
+ sws_.reset(sws_getContext(format_desc_.width,
+ format_desc_.height - output_format_.croptop - output_format_.cropbot,
PIX_FMT_BGRA,
c->width,
- c->height,
- c->pix_fmt,
- SWS_BICUBIC, nullptr, nullptr, nullptr),
+ c->height,
+ c->pix_fmt,
+ SWS_BICUBIC, nullptr, nullptr, nullptr),
sws_freeContext);
- if (sws_ == nullptr)
+ if (sws_ == nullptr)
CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Cannot initialize the conversion context"));
}
std::shared_ptr<AVFrame> in_frame(avcodec_alloc_frame(), av_free);
auto in_picture = reinterpret_cast<AVPicture*>(in_frame.get());
-
+
if (key_only_)
{
key_picture_buf_.resize(frame.image_data().size());
// crop-top
- for(int n = 0; n < 4; ++n)
- in_frame->data[n] += in_frame->linesize[n] * output_format_.croptop;
-
+ for(int n = 0; n < 4; ++n)
+ in_frame->data[n] += in_frame->linesize[n] * output_format_.croptop;
+
// #out_frame
std::shared_ptr<AVFrame> out_frame(avcodec_alloc_frame(), av_free);
-
+
av_image_fill_linesizes(out_frame->linesize, c->pix_fmt, c->width);
for(int n = 0; n < 4; ++n)
out_frame->linesize[n] += 32 - (out_frame->linesize[n] % 32); // align
picture_buffer_.resize(av_image_fill_pointers(out_frame->data, c->pix_fmt, c->height, nullptr, out_frame->linesize));
av_image_fill_pointers(out_frame->data, c->pix_fmt, c->height, picture_buffer_.data(), out_frame->linesize);
-
+
// #scale
- sws_scale(sws_.get(),
- in_frame->data,
+ sws_scale(sws_.get(),
+ in_frame->data,
in_frame->linesize,
- 0,
- format_desc_.height - output_format_.cropbot - output_format_.croptop,
- out_frame->data,
+ 0,
+ format_desc_.height - output_format_.cropbot - output_format_.croptop,
+ out_frame->data,
out_frame->linesize);
out_frame->format = c->pix_fmt;
return out_frame;
}
-
+
byte_vector convert_audio(core::const_frame& frame, AVCodecContext* c)
{
- if(!swr_)
+ if(!swr_)
{
swr_ = std::shared_ptr<SwrContext>(swr_alloc_set_opts(nullptr,
get_channel_layout(c), c->sample_fmt, c->sample_rate,
THROW_ON_ERROR2(swr_init(swr_.get()), "[audio_decoder]");
}
-
+
byte_vector buffer(48000);
const uint8_t* in[] = {reinterpret_cast<const uint8_t*>(frame.audio_data().data())};
uint8_t* out[] = {buffer.data()};
- auto channel_samples = swr_convert(swr_.get(),
- out, static_cast<int>(buffer.size()) / c->channels / av_get_bytes_per_sample(c->sample_fmt),
+ auto channel_samples = swr_convert(swr_.get(),
+ out, static_cast<int>(buffer.size()) / c->channels / av_get_bytes_per_sample(c->sample_fmt),
in, static_cast<int>(frame.audio_data().size()/channel_layout_.num_channels));
- buffer.resize(channel_samples * c->channels * av_get_bytes_per_sample(c->sample_fmt));
+ buffer.resize(channel_samples * c->channels * av_get_bytes_per_sample(c->sample_fmt));
return buffer;
}
graph_->set_value("frame-time", frame_timer.elapsed()*format_desc_.fps*0.5);
}
catch(...)
- {
+ {
lock(exception_mutex_, [&]
{
exception_ = std::current_exception();
, separate_key_(separate_key)
{
}
-
+
void initialize(const core::video_format_desc& format_desc, const core::audio_channel_layout& channel_layout, int) override
{
if(consumer_)
std::future<bool> send(core::const_frame frame) override
{
bool ready_for_frame = consumer_->ready_for_frame();
-
+
if (ready_for_frame && separate_key_)
ready_for_frame = ready_for_frame && key_only_consumer_->ready_for_frame();
if (ready_for_frame)
{
consumer_->send(frame);
-
+
if (separate_key_)
key_only_consumer_->send(frame);
}
else
{
consumer_->mark_dropped();
-
+
if (separate_key_)
key_only_consumer_->mark_dropped();
}
-
+
return make_ready_future(true);
}
-
+
std::wstring print() const override
{
return consumer_ ? consumer_->print() : L"[ffmpeg_consumer]";
info.add(L"separate_key", separate_key_);
return info;
}
-
+
bool has_synchronization_clock() const override
{
return false;
}
auto str = std::accumulate(params2.begin(), params2.end(), std::wstring(), [](const std::wstring& lhs, const std::wstring& rhs) {return lhs + L" " + rhs;});
-
+
boost::wregex path_exp(LR"(\s*FILE(\s(?<PATH>.+\.[^\s]+))?.*)", boost::regex::icase);
boost::wsmatch path;
if(!boost::regex_match(str, path, path_exp))
return core::frame_consumer::empty();
-
- boost::wregex opt_exp(LR"(-((?<NAME>[^\s]+)\s+(?<VALUE>[^\s]+)))");
-
+
+ boost::wregex opt_exp(LR"(-((?<NAME>[^\s]+)\s+(?<VALUE>[^\s]+)))");
+
std::vector<option> options;
for(boost::wsregex_iterator it(str.begin(), str.end(), opt_exp); it != boost::wsregex_iterator(); ++it)
{
auto name = u8(boost::trim_copy(boost::to_lower_copy((*it)["NAME"].str())));
auto value = u8(boost::trim_copy(boost::to_lower_copy((*it)["VALUE"].str())));
-
+
if(value == "h264")
value = "libx264";
else if(value == "dvcpro")
options.push_back(option(name, value));
}
-
+
return spl::make_shared<ffmpeg_consumer_proxy>(path["PATH"].str(), options, separate_key);
}
std::vector<option> options;
options.push_back(option("vcodec", u8(codec)));
-
+
return spl::make_shared<ffmpeg_consumer_proxy>(filename, options, separate_key);
}
#include "ffmpeg_consumer.h"
#include "../ffmpeg_error.h"
+#include "../producer/util/util.h"
#include <common/except.h>
#include <common/executor.h>
#pragma warning(push)
#pragma warning(disable: 4244)
-extern "C"
+extern "C"
{
#define __STDC_CONSTANT_MACROS
#define __STDC_LIMIT_MACROS
{
public:
// Static Members
-
+
private:
core::monitor::subject subject_;
boost::filesystem::path path_;
std::map<std::string, std::string> options_;
bool compatibility_mode_;
-
+
core::video_format_desc in_video_format_;
core::audio_channel_layout in_channel_layout_ = core::audio_channel_layout::invalid();
std::shared_ptr<AVFormatContext> oc_;
tbb::atomic<bool> abort_request_;
-
+
std::shared_ptr<AVStream> video_st_;
std::shared_ptr<AVStream> audio_st_;
std::int64_t video_pts_;
std::int64_t audio_pts_;
-
- AVFilterContext* audio_graph_in_;
- AVFilterContext* audio_graph_out_;
- std::shared_ptr<AVFilterGraph> audio_graph_;
- std::shared_ptr<AVBitStreamFilterContext> audio_bitstream_filter_;
-
- AVFilterContext* video_graph_in_;
- AVFilterContext* video_graph_out_;
- std::shared_ptr<AVFilterGraph> video_graph_;
+
+ AVFilterContext* audio_graph_in_;
+ AVFilterContext* audio_graph_out_;
+ std::shared_ptr<AVFilterGraph> audio_graph_;
+ std::shared_ptr<AVBitStreamFilterContext> audio_bitstream_filter_;
+
+ AVFilterContext* video_graph_in_;
+ AVFilterContext* video_graph_out_;
+ std::shared_ptr<AVFilterGraph> video_graph_;
std::shared_ptr<AVBitStreamFilterContext> video_bitstream_filter_;
-
+
executor executor_;
executor video_encoder_executor_;
tbb::atomic<int64_t> current_encoding_delay_;
executor write_executor_;
-
+
public:
streaming_consumer(
- std::string path,
+ std::string path,
std::string options,
bool compatibility_mode)
: path_(path)
, audio_encoder_executor_(print() + L" audio_encoder")
, video_encoder_executor_(print() + L" video_encoder")
, write_executor_(print() + L" io")
- {
+ {
abort_request_ = false;
current_encoding_delay_ = 0;
- for(auto it =
+ for(auto it =
boost::sregex_iterator(
- options.begin(),
- options.end(),
- boost::regex("-(?<NAME>[^-\\s]+)(\\s+(?<VALUE>[^\\s]+))?"));
- it != boost::sregex_iterator();
+ options.begin(),
+ options.end(),
+ boost::regex("-(?<NAME>[^-\\s]+)(\\s+(?<VALUE>[^\\s]+))?"));
+ it != boost::sregex_iterator();
++it)
- {
+ {
options_[(*it)["NAME"].str()] = (*it)["VALUE"].matched ? (*it)["VALUE"].str() : "";
}
-
+
if (options_.find("threads") == options_.end())
options_["threads"] = "auto";
- tokens_ =
+ tokens_ =
std::max(
- 1,
+ 1,
try_remove_arg<int>(
- options_,
- boost::regex("tokens")).get_value_or(2));
+ options_,
+ boost::regex("tokens")).get_value_or(2));
}
-
+
~streaming_consumer()
{
if(oc_)
int channel_index) override
{
try
- {
+ {
static boost::regex prot_exp("^.+:.*" );
-
- const auto overwrite =
+
+ const auto overwrite =
try_remove_arg<std::string>(
options_,
boost::regex("y")) != boost::none;
if(!boost::regex_match(
- path_.string(),
+ path_.string(),
prot_exp))
{
if(!path_.is_complete())
{
- path_ =
+ path_ =
u8(
- env::media_folder()) +
+ env::media_folder()) +
path_.string();
}
-
+
if(boost::filesystem::exists(path_))
{
if(!overwrite && !compatibility_mode_)
BOOST_THROW_EXCEPTION(invalid_argument() << msg_info("File exists"));
-
+
boost::filesystem::remove(path_);
}
}
-
- const auto oformat_name =
+
+ const auto oformat_name =
try_remove_arg<std::string>(
- options_,
+ options_,
boost::regex("^f|format$"));
-
+
AVFormatContext* oc;
FF(avformat_alloc_output_context2(
- &oc,
- nullptr,
- oformat_name && !oformat_name->empty() ? oformat_name->c_str() : nullptr,
+ &oc,
+ nullptr,
+ oformat_name && !oformat_name->empty() ? oformat_name->c_str() : nullptr,
path_.string().c_str()));
oc_.reset(
- oc,
+ oc,
avformat_free_context);
-
+
CASPAR_VERIFY(oc_->oformat);
oc_->interrupt_callback.callback = streaming_consumer::interrupt_cb;
- oc_->interrupt_callback.opaque = this;
+ oc_->interrupt_callback.opaque = this;
CASPAR_VERIFY(format_desc.format != core::video_format::invalid);
in_video_format_ = format_desc;
in_channel_layout_ = channel_layout;
-
+
CASPAR_VERIFY(oc_->oformat);
-
- const auto video_codec_name =
+
+ const auto video_codec_name =
try_remove_arg<std::string>(
- options_,
+ options_,
boost::regex("^c:v|codec:v|vcodec$"));
- const auto video_codec =
- video_codec_name
+ const auto video_codec =
+ video_codec_name
? avcodec_find_encoder_by_name(video_codec_name->c_str())
: avcodec_find_encoder(oc_->oformat->video_codec);
-
- const auto audio_codec_name =
+
+ const auto audio_codec_name =
try_remove_arg<std::string>(
- options_,
+ options_,
boost::regex("^c:a|codec:a|acodec$"));
-
- const auto audio_codec =
- audio_codec_name
+
+ const auto audio_codec =
+ audio_codec_name
? avcodec_find_encoder_by_name(audio_codec_name->c_str())
: avcodec_find_encoder(oc_->oformat->audio_codec);
-
+
if (!video_codec)
CASPAR_THROW_EXCEPTION(user_error() << msg_info(
"Failed to find video codec " + (video_codec_name
? *audio_codec_name
: "with id " + boost::lexical_cast<std::string>(
oc_->oformat->audio_codec))));
-
+
// Filters
{
configure_video_filters(
- *video_codec,
- try_remove_arg<std::string>(options_,
+ *video_codec,
+ try_remove_arg<std::string>(options_,
boost::regex("vf|f:v|filter:v")).get_value_or(""));
configure_audio_filters(
- *audio_codec,
+ *audio_codec,
try_remove_arg<std::string>(options_,
boost::regex("af|f:a|filter:a")).get_value_or(""));
}
auto audio_options = options_;
video_st_ = open_encoder(
- *video_codec,
+ *video_codec,
video_options);
audio_st_ = open_encoder(
- *audio_codec,
+ *audio_codec,
audio_options);
auto it = options_.begin();
AVDictionary* av_opts = nullptr;
to_dict(
- &av_opts,
+ &av_opts,
std::move(options_));
CASPAR_SCOPE_EXIT
av_dict_free(&av_opts);
};
- if (!(oc_->oformat->flags & AVFMT_NOFILE))
+ if (!(oc_->oformat->flags & AVFMT_NOFILE))
{
FF(avio_open2(
- &oc_->pb,
- path_.string().c_str(),
- AVIO_FLAG_WRITE,
- &oc_->interrupt_callback,
+ &oc_->pb,
+ path_.string().c_str(),
+ AVIO_FLAG_WRITE,
+ &oc_->interrupt_callback,
&av_opts));
}
-
+
FF(avformat_write_header(
- oc_.get(),
+ oc_.get(),
&av_opts));
-
+
options_ = to_map(av_opts);
}
// Dump Info
-
+
av_dump_format(
- oc_.get(),
- 0,
- oc_->filename,
- 1);
+ oc_.get(),
+ 0,
+ oc_->filename,
+ 1);
for (const auto& option : options_)
{
- CASPAR_LOG(warning)
- << L"Invalid option: -"
- << u16(option.first)
- << L" "
+ CASPAR_LOG(warning)
+ << L"Invalid option: -"
+ << u16(option.first)
+ << L" "
<< u16(option.second);
}
}
}
std::future<bool> send(core::const_frame frame) override
- {
+ {
CASPAR_VERIFY(in_video_format_.format != core::video_format::invalid);
-
+
--tokens_;
std::shared_ptr<void> token(
- nullptr,
+ nullptr,
[this, frame](void*)
{
++tokens_;
video_encoder_executor_.begin_invoke([=]() mutable
{
encode_video(
- frame,
+ frame,
token);
});
-
+
audio_encoder_executor_.begin_invoke([=]() mutable
{
encode_audio(
- frame,
+ frame,
token);
});
-
+
return true;
});
}
{
return L"streaming_consumer[" + u16(path_.string()) + L"]";
}
-
+
virtual boost::property_tree::wptree info() const override
{
boost::property_tree::wptree info;
static int interrupt_cb(void* ctx)
{
CASPAR_ASSERT(ctx);
- return reinterpret_cast<streaming_consumer*>(ctx)->abort_request_;
+ return reinterpret_cast<streaming_consumer*>(ctx)->abort_request_;
}
-
+
std::shared_ptr<AVStream> open_encoder(
const AVCodec& codec,
std::map<std::string,
std::string>& options)
- {
- auto st =
+ {
+ auto st =
avformat_new_stream(
- oc_.get(),
+ oc_.get(),
&codec);
- if (!st)
+ if (!st)
CASPAR_THROW_EXCEPTION(caspar_exception() << msg_info("Could not allocate video-stream.") << boost::errinfo_api_function("av_new_stream"));
auto enc = st->codec;
-
+
CASPAR_VERIFY(enc);
-
+
switch(enc->codec_type)
{
case AVMEDIA_TYPE_VIDEO:
enc->width = video_graph_out_->inputs[0]->w;
enc->height = video_graph_out_->inputs[0]->h;
enc->bit_rate_tolerance = 400 * 1000000;
-
+
break;
}
case AVMEDIA_TYPE_AUDIO:
enc->sample_rate = audio_graph_out_->inputs[0]->sample_rate;
enc->channel_layout = audio_graph_out_->inputs[0]->channel_layout;
enc->channels = audio_graph_out_->inputs[0]->channels;
-
+
break;
}
}
-
+
if(oc_->oformat->flags & AVFMT_GLOBALHEADER)
enc->flags |= CODEC_FLAG_GLOBAL_HEADER;
-
+
static const std::array<std::string, 4> char_id_map = {{"v", "a", "d", "s"}};
const auto char_id = char_id_map.at(enc->codec_type);
-
- const auto codec_opts =
+
+ const auto codec_opts =
remove_options(
- options,
+ options,
boost::regex("^(" + char_id + "?[^:]+):" + char_id + "$"));
-
+
AVDictionary* av_codec_opts = nullptr;
to_dict(
- &av_codec_opts,
+ &av_codec_opts,
options);
to_dict(
codec_opts);
options.clear();
-
+
FF(avcodec_open2(
- enc,
- &codec,
- av_codec_opts ? &av_codec_opts : nullptr));
+ enc,
+ &codec,
+ av_codec_opts ? &av_codec_opts : nullptr));
if(av_codec_opts)
{
- auto t =
+ auto t =
av_dict_get(
- av_codec_opts,
- "",
- nullptr,
+ av_codec_opts,
+ "",
+ nullptr,
AV_DICT_IGNORE_SUFFIX);
while(t)
options[t->key + (codec_opts.find(t->key) != codec_opts.end() ? ":" + char_id : "")] = t->value;
t = av_dict_get(
- av_codec_opts,
- "",
- t,
+ av_codec_opts,
+ "",
+ t,
AV_DICT_IGNORE_SUFFIX);
}
av_dict_free(&av_codec_opts);
}
-
+
if(enc->codec_type == AVMEDIA_TYPE_AUDIO && !(codec.capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE))
{
CASPAR_ASSERT(enc->frame_size > 0);
- av_buffersink_set_frame_size(audio_graph_out_,
+ av_buffersink_set_frame_size(audio_graph_out_,
enc->frame_size);
}
-
+
return std::shared_ptr<AVStream>(st, [this](AVStream* st)
{
avcodec_close(st->codec);
void configue_audio_bistream_filters(
std::map<std::string, std::string>& options)
{
- const auto audio_bitstream_filter_str =
+ const auto audio_bitstream_filter_str =
try_remove_arg<std::string>(
- options,
+ options,
boost::regex("^bsf:a|absf$"));
- const auto audio_bitstream_filter =
- audio_bitstream_filter_str
- ? av_bitstream_filter_init(audio_bitstream_filter_str->c_str())
+ const auto audio_bitstream_filter =
+ audio_bitstream_filter_str
+ ? av_bitstream_filter_init(audio_bitstream_filter_str->c_str())
: nullptr;
CASPAR_VERIFY(!audio_bitstream_filter_str || audio_bitstream_filter);
if(audio_bitstream_filter)
{
audio_bitstream_filter_.reset(
- audio_bitstream_filter,
+ audio_bitstream_filter,
av_bitstream_filter_close);
}
-
+
if(audio_bitstream_filter_str && !audio_bitstream_filter_)
options["bsf:a"] = *audio_bitstream_filter_str;
}
-
+
void configue_video_bistream_filters(
std::map<std::string, std::string>& options)
{
- const auto video_bitstream_filter_str =
+ const auto video_bitstream_filter_str =
try_remove_arg<std::string>(
- options,
+ options,
boost::regex("^bsf:v|vbsf$"));
- const auto video_bitstream_filter =
- video_bitstream_filter_str
- ? av_bitstream_filter_init(video_bitstream_filter_str->c_str())
+ const auto video_bitstream_filter =
+ video_bitstream_filter_str
+ ? av_bitstream_filter_init(video_bitstream_filter_str->c_str())
: nullptr;
CASPAR_VERIFY(!video_bitstream_filter_str || video_bitstream_filter);
if(video_bitstream_filter)
{
video_bitstream_filter_.reset(
- video_bitstream_filter,
+ video_bitstream_filter,
av_bitstream_filter_close);
}
-
+
if(video_bitstream_filter_str && !video_bitstream_filter_)
options["bsf:v"] = *video_bitstream_filter_str;
}
-
+
void configure_video_filters(
const AVCodec& codec,
const std::string& filtergraph)
{
video_graph_.reset(
- avfilter_graph_alloc(),
+ avfilter_graph_alloc(),
[](AVFilterGraph* p)
{
avfilter_graph_free(&p);
});
-
+
video_graph_->nb_threads = boost::thread::hardware_concurrency()/2;
video_graph_->thread_type = AVFILTER_THREAD_SLICE;
boost::rational<int>(
in_video_format_.width,
in_video_format_.height);
-
+
const auto vsrc_options = (boost::format("video_size=%1%x%2%:pix_fmt=%3%:time_base=%4%/%5%:pixel_aspect=%6%/%7%:frame_rate=%8%/%9%")
% in_video_format_.width % in_video_format_.height
% AV_PIX_FMT_BGRA
% in_video_format_.duration % in_video_format_.time_scale
% sample_aspect_ratio.numerator() % sample_aspect_ratio.denominator()
% in_video_format_.time_scale % in_video_format_.duration).str();
-
- AVFilterContext* filt_vsrc = nullptr;
+
+ AVFilterContext* filt_vsrc = nullptr;
FF(avfilter_graph_create_filter(
&filt_vsrc,
- avfilter_get_by_name("buffer"),
+ avfilter_get_by_name("buffer"),
"ffmpeg_consumer_buffer",
- vsrc_options.c_str(),
- nullptr,
+ vsrc_options.c_str(),
+ nullptr,
video_graph_.get()));
-
+
AVFilterContext* filt_vsink = nullptr;
FF(avfilter_graph_create_filter(
&filt_vsink,
- avfilter_get_by_name("buffersink"),
+ avfilter_get_by_name("buffersink"),
"ffmpeg_consumer_buffersink",
- nullptr,
- nullptr,
+ nullptr,
+ nullptr,
video_graph_.get()));
-
+
#pragma warning (push)
#pragma warning (disable : 4245)
FF(av_opt_set_int_list(
- filt_vsink,
- "pix_fmts",
- codec.pix_fmts,
+ filt_vsink,
+ "pix_fmts",
+ codec.pix_fmts,
-1,
AV_OPT_SEARCH_CHILDREN));
#pragma warning (pop)
-
+
configure_filtergraph(
- *video_graph_,
+ *video_graph_,
filtergraph,
*filt_vsrc,
*filt_vsink);
video_graph_in_ = filt_vsrc;
video_graph_out_ = filt_vsink;
-
+
CASPAR_LOG(info)
- << u16(std::string("\n")
+ << u16(std::string("\n")
+ avfilter_graph_dump(
- video_graph_.get(),
+ video_graph_.get(),
nullptr));
}
const std::string& filtergraph)
{
audio_graph_.reset(
- avfilter_graph_alloc(),
+ avfilter_graph_alloc(),
[](AVFilterGraph* p)
{
avfilter_graph_free(&p);
});
-
+
audio_graph_->nb_threads = boost::thread::hardware_concurrency()/2;
audio_graph_->thread_type = AVFILTER_THREAD_SLICE;
-
+
const auto asrc_options = (boost::format("sample_rate=%1%:sample_fmt=%2%:channels=%3%:time_base=%4%/%5%:channel_layout=%6%")
% in_video_format_.audio_sample_rate
% av_get_sample_fmt_name(AV_SAMPLE_FMT_S32)
% in_channel_layout_.num_channels
% 1 % in_video_format_.audio_sample_rate
% boost::io::group(
- std::hex,
- std::showbase,
+ std::hex,
+ std::showbase,
av_get_default_channel_layout(in_channel_layout_.num_channels))).str();
AVFilterContext* filt_asrc = nullptr;
FF(avfilter_graph_create_filter(
&filt_asrc,
- avfilter_get_by_name("abuffer"),
+ avfilter_get_by_name("abuffer"),
"ffmpeg_consumer_abuffer",
- asrc_options.c_str(),
- nullptr,
+ asrc_options.c_str(),
+ nullptr,
audio_graph_.get()));
-
+
AVFilterContext* filt_asink = nullptr;
FF(avfilter_graph_create_filter(
&filt_asink,
- avfilter_get_by_name("abuffersink"),
+ avfilter_get_by_name("abuffersink"),
"ffmpeg_consumer_abuffersink",
- nullptr,
- nullptr,
+ nullptr,
+ nullptr,
audio_graph_.get()));
-
+
#pragma warning (push)
#pragma warning (disable : 4245)
FF(av_opt_set_int(
- filt_asink,
+ filt_asink,
"all_channel_counts",
- 1,
+ 1,
AV_OPT_SEARCH_CHILDREN));
FF(av_opt_set_int_list(
- filt_asink,
- "sample_fmts",
- codec.sample_fmts,
- -1,
+ filt_asink,
+ "sample_fmts",
+ codec.sample_fmts,
+ -1,
AV_OPT_SEARCH_CHILDREN));
FF(av_opt_set_int_list(
filt_asink,
- "channel_layouts",
- codec.channel_layouts,
- -1,
+ "channel_layouts",
+ codec.channel_layouts,
+ -1,
AV_OPT_SEARCH_CHILDREN));
FF(av_opt_set_int_list(
- filt_asink,
- "sample_rates" ,
- codec.supported_samplerates,
- -1,
+ filt_asink,
+ "sample_rates" ,
+ codec.supported_samplerates,
+ -1,
AV_OPT_SEARCH_CHILDREN));
#pragma warning (pop)
-
+
configure_filtergraph(
- *audio_graph_,
- filtergraph,
- *filt_asrc,
+ *audio_graph_,
+ filtergraph,
+ *filt_asrc,
*filt_asink);
audio_graph_in_ = filt_asrc;
audio_graph_out_ = filt_asink;
- CASPAR_LOG(info)
- << u16(std::string("\n")
+ CASPAR_LOG(info)
+ << u16(std::string("\n")
+ avfilter_graph_dump(
- audio_graph_.get(),
+ audio_graph_.get(),
nullptr));
}
inputs->next = nullptr;
FF(avfilter_graph_parse(
- &graph,
- filtergraph.c_str(),
+ &graph,
+ filtergraph.c_str(),
inputs,
outputs,
nullptr));
- }
- else
+ }
+ else
{
FF(avfilter_link(
- &source_ctx,
- 0,
- &sink_ctx,
+ &source_ctx,
+ 0,
+ &sink_ctx,
0));
}
FF(avfilter_graph_config(
- &graph,
+ &graph,
nullptr));
}
catch(...)
throw;
}
}
-
+
void encode_video(core::const_frame frame_ptr, std::shared_ptr<void> token)
- {
+ {
if(!video_st_)
return;
auto enc = video_st_->codec;
-
- std::shared_ptr<AVFrame> src_av_frame;
if(frame_ptr != core::const_frame::empty())
{
- src_av_frame.reset(
- av_frame_alloc(),
- [frame_ptr](AVFrame* frame)
- {
- av_frame_free(&frame);
- });
+ auto src_av_frame = create_frame();
- avcodec_get_frame_defaults(src_av_frame.get());
-
- const auto sample_aspect_ratio =
+ const auto sample_aspect_ratio =
boost::rational<int>(
- in_video_format_.square_width,
+ in_video_format_.square_width,
in_video_format_.square_height) /
boost::rational<int>(
- in_video_format_.width,
+ in_video_format_.width,
in_video_format_.height);
src_av_frame->format = AV_PIX_FMT_BGRA;
src_av_frame->data,
src_av_frame->linesize,
frame_ptr.image_data().begin(),
- static_cast<AVPixelFormat>(src_av_frame->format),
- in_video_format_.width,
- in_video_format_.height,
+ static_cast<AVPixelFormat>(src_av_frame->format),
+ in_video_format_.width,
+ in_video_format_.height,
1));
FF(av_buffersrc_add_frame(
- video_graph_in_,
+ video_graph_in_,
src_av_frame.get()));
- }
+ }
int ret = 0;
while(ret >= 0)
{
- std::shared_ptr<AVFrame> filt_frame(
- av_frame_alloc(),
- [](AVFrame* p)
- {
- av_frame_free(&p);
- });
+ auto filt_frame = create_frame();
ret = av_buffersink_get_frame(
- video_graph_out_,
+ video_graph_out_,
filt_frame.get());
-
+
video_encoder_executor_.begin_invoke([=]
{
if(ret == AVERROR_EOF)
if(enc->codec->capabilities & CODEC_CAP_DELAY)
{
while(encode_av_frame(
- *video_st_,
+ *video_st_,
video_bitstream_filter_.get(),
- avcodec_encode_video2,
+ avcodec_encode_video2,
nullptr, token))
{
boost::this_thread::yield(); // TODO:
}
- }
+ }
}
else if(ret != AVERROR(EAGAIN))
{
FF_RET(ret, "av_buffersink_get_frame");
-
- if (filt_frame->interlaced_frame)
+
+ if (filt_frame->interlaced_frame)
{
if (enc->codec->id == AV_CODEC_ID_MJPEG)
enc->field_order = filt_frame->top_field_first ? AV_FIELD_TT : AV_FIELD_BB;
else
enc->field_order = filt_frame->top_field_first ? AV_FIELD_TB : AV_FIELD_BT;
- }
+ }
else
enc->field_order = AV_FIELD_PROGRESSIVE;
if (!enc->me_threshold)
filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
-
+
encode_av_frame(
*video_st_,
video_bitstream_filter_.get(),
avcodec_encode_video2,
- filt_frame,
+ filt_frame,
token);
boost::this_thread::yield(); // TODO:
});
}
}
-
+
void encode_audio(core::const_frame frame_ptr, std::shared_ptr<void> token)
- {
+ {
if(!audio_st_)
return;
-
+
auto enc = audio_st_->codec;
-
- std::shared_ptr<AVFrame> src_av_frame;
if(frame_ptr != core::const_frame::empty())
{
- src_av_frame.reset(
- av_frame_alloc(),
- [](AVFrame* p)
- {
- av_frame_free(&p);
- });
-
+ auto src_av_frame = create_frame();
+
src_av_frame->channels = in_channel_layout_.num_channels;
src_av_frame->channel_layout = av_get_default_channel_layout(in_channel_layout_.num_channels);
src_av_frame->sample_rate = in_video_format_.audio_sample_rate;
src_av_frame->nb_samples,
static_cast<AVSampleFormat>(src_av_frame->format),
16));
-
+
FF(av_buffersrc_add_frame(
- audio_graph_in_,
+ audio_graph_in_,
src_av_frame.get()));
}
while(ret >= 0)
{
- std::shared_ptr<AVFrame> filt_frame(
- av_frame_alloc(),
- [](AVFrame* p)
- {
- av_frame_free(&p);
- });
+ auto filt_frame = create_frame();
ret = av_buffersink_get_frame(
- audio_graph_out_,
+ audio_graph_out_,
filt_frame.get());
-
+
audio_encoder_executor_.begin_invoke([=]
- {
+ {
if(ret == AVERROR_EOF)
{
if(enc->codec->capabilities & CODEC_CAP_DELAY)
{
while(encode_av_frame(
- *audio_st_,
- audio_bitstream_filter_.get(),
- avcodec_encode_audio2,
- nullptr,
+ *audio_st_,
+ audio_bitstream_filter_.get(),
+ avcodec_encode_audio2,
+ nullptr,
token))
{
boost::this_thread::yield(); // TODO:
else if(ret != AVERROR(EAGAIN))
{
FF_RET(
- ret,
+ ret,
"av_buffersink_get_frame");
encode_av_frame(
- *audio_st_,
- audio_bitstream_filter_.get(),
- avcodec_encode_audio2,
- filt_frame,
+ *audio_st_,
+ audio_bitstream_filter_.get(),
+ avcodec_encode_audio2,
+ filt_frame,
token);
boost::this_thread::yield(); // TODO:
});
}
}
-
+
template<typename F>
bool encode_av_frame(
AVStream& st,
- AVBitStreamFilterContext* bsfc,
- const F& func,
- const std::shared_ptr<AVFrame>& src_av_frame,
+ AVBitStreamFilterContext* bsfc,
+ const F& func,
+ const std::shared_ptr<AVFrame>& src_av_frame,
std::shared_ptr<void> token)
{
AVPacket pkt = {};
int got_packet = 0;
FF(func(
- st.codec,
- &pkt,
- src_av_frame.get(),
+ st.codec,
+ &pkt,
+ src_av_frame.get(),
&got_packet));
-
+
if(!got_packet || pkt.size <= 0)
return false;
pkt.stream_index = st.index;
-
+
if(bsfc)
{
auto new_pkt = pkt;
pkt.size,
pkt.flags & AV_PKT_FLAG_KEY);
- if(a == 0 && new_pkt.data != pkt.data && new_pkt.destruct)
+ if(a == 0 && new_pkt.data != pkt.data && new_pkt.destruct)
{
auto t = reinterpret_cast<std::uint8_t*>(av_malloc(new_pkt.size + FF_INPUT_BUFFER_PADDING_SIZE));
- if(t)
+ if(t)
{
memcpy(
- t,
+ t,
new_pkt.data,
new_pkt.size);
memset(
- t + new_pkt.size,
- 0,
+ t + new_pkt.size,
+ 0,
FF_INPUT_BUFFER_PADDING_SIZE);
new_pkt.data = t;
new_pkt.buf = nullptr;
- }
+ }
else
a = AVERROR(ENOMEM);
}
av_free_packet(&pkt);
FF_RET(
- a,
+ a,
"av_bitstream_filter_filter");
new_pkt.buf =
av_buffer_create(
- new_pkt.data,
+ new_pkt.data,
new_pkt.size,
- av_buffer_default_free,
- nullptr,
+ av_buffer_default_free,
+ nullptr,
0);
CASPAR_VERIFY(new_pkt.buf);
pkt = new_pkt;
}
-
+
if (pkt.pts != AV_NOPTS_VALUE)
{
- pkt.pts =
+ pkt.pts =
av_rescale_q(
pkt.pts,
- st.codec->time_base,
+ st.codec->time_base,
st.time_base);
}
if (pkt.dts != AV_NOPTS_VALUE)
{
- pkt.dts =
+ pkt.dts =
av_rescale_q(
- pkt.dts,
- st.codec->time_base,
+ pkt.dts,
+ st.codec->time_base,
st.time_base);
}
-
- pkt.duration =
+
+ pkt.duration =
static_cast<int>(
av_rescale_q(
- pkt.duration,
+ pkt.duration,
st.codec->time_base, st.time_base));
write_packet(
std::shared_ptr<AVPacket>(
- new AVPacket(pkt),
+ new AVPacket(pkt),
[](AVPacket* p)
{
- av_free_packet(p);
+ av_free_packet(p);
delete p;
}), token);
void write_packet(
const std::shared_ptr<AVPacket>& pkt_ptr,
std::shared_ptr<void> token)
- {
+ {
write_executor_.begin_invoke([this, pkt_ptr, token]() mutable
{
FF(av_interleaved_write_frame(
- oc_.get(),
+ oc_.get(),
pkt_ptr.get()));
- });
- }
-
+ });
+ }
+
template<typename T>
static boost::optional<T> try_remove_arg(
- std::map<std::string, std::string>& options,
+ std::map<std::string, std::string>& options,
const boost::regex& expr)
{
for(auto it = options.begin(); it != options.end(); ++it)
- {
+ {
if(boost::regex_search(it->first, expr))
{
auto arg = it->second;
return boost::optional<T>();
}
-
+
static std::map<std::string, std::string> remove_options(
- std::map<std::string, std::string>& options,
+ std::map<std::string, std::string>& options,
const boost::regex& expr)
{
std::map<std::string, std::string> result;
-
+
auto it = options.begin();
while(it != options.end())
- {
+ {
boost::smatch what;
if(boost::regex_search(it->first, what, expr))
{
result[
- what.size() > 0 && what[1].matched
- ? what[1].str()
+ what.size() > 0 && what[1].matched
+ ? what[1].str()
: it->first] = it->second;
it = options.erase(it);
}
return result;
}
-
+
static void to_dict(AVDictionary** dest, const std::map<std::string, std::string>& c)
- {
+ {
for (const auto& entry : c)
{
av_dict_set(
- dest,
- entry.first.c_str(),
+ dest,
+ entry.first.c_str(),
entry.second.c_str(), 0);
}
}
static std::map<std::string, std::string> to_map(AVDictionary* dict)
{
std::map<std::string, std::string> result;
-
- for(auto t = dict
+
+ for(auto t = dict
? av_dict_get(
- dict,
- "",
- nullptr,
- AV_DICT_IGNORE_SUFFIX)
+ dict,
+ "",
+ nullptr,
+ AV_DICT_IGNORE_SUFFIX)
: nullptr;
- t;
+ t;
t = av_dict_get(
- dict,
- "",
+ dict,
+ "",
t,
AV_DICT_IGNORE_SUFFIX))
{
spl::shared_ptr<core::frame_consumer> create_streaming_consumer(
const std::vector<std::wstring>& params, core::interaction_sink*)
-{
+{
if (params.size() < 1 || (!boost::iequals(params.at(0), L"STREAM") && !boost::iequals(params.at(0), L"FILE")))
return core::frame_consumer::empty();
spl::shared_ptr<core::frame_consumer> create_preconfigured_streaming_consumer(
const boost::property_tree::wptree& ptree, core::interaction_sink*)
-{
+{
return spl::make_shared<streaming_consumer>(
- u8(ptree_get<std::wstring>(ptree, L"path")),
+ u8(ptree_get<std::wstring>(ptree, L"path")),
u8(ptree.get<std::wstring>(L"args", L"")),
false);
}
#include "../../ffmpeg_error.h"
#include "../../ffmpeg.h"
+#include "../util/util.h"
#include <common/assert.h>
#include <common/except.h>
#pragma warning (push)
#pragma warning (disable : 4244)
#endif
-extern "C"
+extern "C"
{
#include <libavutil/avutil.h>
#include <libavutil/imgutils.h>
boost::join(complete_filter_graph, ";"),
audio_graph_inputs_,
audio_graph_outputs_);
-
+
if (is_logging_quiet_for_thread())
CASPAR_LOG(trace)
- << u16(std::string("\n")
+ << u16(std::string("\n")
+ avfilter_graph_dump(
- audio_graph_.get(),
+ audio_graph_.get(),
nullptr));
else
CASPAR_LOG(debug)
audio_graph_.get(),
nullptr));
}
-
+
void configure_filtergraph(
AVFilterGraph& graph,
const std::string& filtergraph,
sink_contexts.push_back(iter->filter_ctx);
FF(avfilter_graph_config(
- &graph,
+ &graph,
nullptr));
}
void push(int input_pad_id, const std::shared_ptr<AVFrame>& src_av_frame)
- {
+ {
FF(av_buffersrc_add_frame(
audio_graph_inputs_.at(input_pad_id),
src_av_frame.get()));
std::shared_ptr<AVFrame> poll(int output_pad_id)
{
- std::shared_ptr<AVFrame> filt_frame(
- av_frame_alloc(),
- [](AVFrame* p)
- {
- av_frame_free(&p);
- });
-
+ auto filt_frame = create_frame();
+
const auto ret = av_buffersink_get_frame(
audio_graph_outputs_.at(output_pad_id),
filt_frame.get());
-
+
if(ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
return nullptr;
-
+
FF_RET(ret, "poll");
return filt_frame;
std::shared_ptr<AVFrame> audio_filter::poll(int output_pad_id){return impl_->poll(output_pad_id);}
std::wstring audio_filter::filter_str() const{return u16(impl_->filtergraph_);}
std::vector<spl::shared_ptr<AVFrame>> audio_filter::poll_all(int output_pad_id)
-{
+{
std::vector<spl::shared_ptr<AVFrame>> frames;
for(auto frame = poll(output_pad_id); frame; frame = poll(output_pad_id))
frames.push_back(spl::make_shared_ptr(frame));
#pragma warning (push)
#pragma warning (disable : 4244)
#endif
-extern "C"
+extern "C"
{
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#endif
namespace caspar { namespace ffmpeg {
-
+
struct video_decoder::implementation : boost::noncopyable
{
int index_ = -1;
const spl::shared_ptr<AVCodecContext> codec_context_;
std::queue<spl::shared_ptr<AVPacket>> packets_;
-
+
const uint32_t nb_frames_;
const int width_ = codec_context_->width;
}
std::shared_ptr<AVFrame> poll()
- {
+ {
if(packets_.empty())
return nullptr;
-
+
auto packet = packets_.front();
-
+
if(packet->data == nullptr)
- {
+ {
if(codec_context_->codec->capabilities & CODEC_CAP_DELAY)
{
auto video = decode(packet);
if(video)
return video;
}
-
+
packets_.pop();
file_frame_number_ = static_cast<uint32_t>(packet->pos);
avcodec_flush_buffers(codec_context_.get());
return flush_video();
}
-
+
packets_.pop();
return decode(packet);
}
std::shared_ptr<AVFrame> decode(spl::shared_ptr<AVPacket> pkt)
{
- auto decoded_frame = std::shared_ptr<AVFrame>(av_frame_alloc(), [](AVFrame* frame)
- {
- av_frame_free(&frame);
- });
-
+ auto decoded_frame = create_frame();
+
int frame_finished = 0;
THROW_ON_ERROR2(avcodec_decode_video2(codec_context_.get(), decoded_frame.get(), &frame_finished, pkt.get()), "[video_decoder]");
-
+
// If a decoder consumes less then the whole packet then something is wrong
// that might be just harmless padding at the end, or a problem with the
// AVParser or demuxer which puted more then one frame in a AVPacket.
- if(frame_finished == 0)
+ if(frame_finished == 0)
return nullptr;
is_progressive_ = !decoded_frame->interlaced_frame;
if(decoded_frame->repeat_pict > 0)
CASPAR_LOG(warning) << "[video_decoder] Field repeat_pict not implemented.";
-
+
++file_frame_number_;
// This ties the life of the decoded_frame to the packet that it came from. For the
// owned by the packet.
return std::shared_ptr<AVFrame>(decoded_frame.get(), [decoded_frame, pkt](AVFrame*){});
}
-
+
bool ready() const
{
return packets_.size() >= 8;
}
std::wstring print() const
- {
+ {
return L"[video-decoder] " + u16(codec_context_->codec->long_name);
}
};
//#include <windows.h>
#include <ffmpeg/producer/filter/filter.h>
+#include <ffmpeg/producer/util/util.h>
#include <core/video_format.h>
#include <core/frame/frame.h>
#pragma warning (push)
#pragma warning (disable : 4244)
#endif
-extern "C"
+extern "C"
{
#define __STDC_CONSTANT_MACROS
#define __STDC_LIMIT_MACROS
#endif
namespace caspar { namespace screen {
-
+
enum class stretch
{
none,
aspect_16_9,
aspect_invalid,
};
-
+
std::wstring name = L"Screen consumer";
int screen_index = 0;
screen::stretch stretch = screen::stretch::fill;
GLuint texture_ = 0;
std::vector<GLuint> pbos_ = std::vector<GLuint> { 0, 0 };
-
+
float width_;
float height_;
int screen_x_;
const configuration& config,
const core::video_format_desc& format_desc,
int channel_index,
- core::interaction_sink* sink)
+ core::interaction_sink* sink)
: config_(config)
, format_desc_(format_desc)
, channel_index_(channel_index)
, pts_(0)
, sink_(sink)
, filter_([&]() -> ffmpeg::filter
- {
- const auto sample_aspect_ratio =
+ {
+ const auto sample_aspect_ratio =
boost::rational<int>(
- format_desc.square_width,
+ format_desc.square_width,
format_desc.square_height) /
boost::rational<int>(
- format_desc.width,
+ format_desc.width,
format_desc.height);
return ffmpeg::filter(
{ AV_PIX_FMT_BGRA },
format_desc.field_mode == core::field_mode::progressive || !config.auto_deinterlace ? "" : "format=pix_fmts=gbrp,YADIF=1:-1");
}())
- {
+ {
if (format_desc_.format == core::video_format::ntsc && config_.aspect == configuration::aspect_ratio::aspect_4_3)
{
// Use default values which are 4:3.
}
frame_buffer_.set_capacity(1);
-
- graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f));
+
+ graph_->set_color("tick-time", diagnostics::color(0.0f, 0.6f, 0.9f));
graph_->set_color("frame-time", diagnostics::color(0.1f, 1.0f, 0.1f));
graph_->set_color("dropped-frame", diagnostics::color(0.3f, 0.6f, 0.3f));
graph_->set_text(print());
diagnostics::register_graph(graph_);
-
+
/*DISPLAY_DEVICE d_device = {sizeof(d_device), 0};
std::vector<DISPLAY_DEVICE> displayDevices;
for(int n = 0; EnumDisplayDevices(NULL, n, &d_device, NULL); ++n)
if(config_.screen_index >= displayDevices.size())
CASPAR_LOG(warning) << print() << L" Invalid screen-index: " << config_.screen_index;
-
+
DEVMODE devmode = {};
if(!EnumDisplaySettings(displayDevices[config_.screen_index].DeviceName, ENUM_CURRENT_SETTINGS, &devmode))
CASPAR_LOG(warning) << print() << L" Could not find display settings for screen-index: " << config_.screen_index;
-
+
screen_x_ = devmode.dmPosition.x;
screen_y_ = devmode.dmPosition.y;
screen_width_ = config_.windowed ? square_width_ : devmode.dmPelsWidth;
screen_y_ = 0;
screen_width_ = square_width_;
screen_height_ = square_height_;
-
+
polling_event_ = false;
is_running_ = true;
current_presentation_age_ = 0;
thread_ = boost::thread([this]{run();});
}
-
+
~screen_consumer()
{
is_running_ = false;
window_.setPosition(sf::Vector2i(screen_x_, screen_y_));
window_.setSize(sf::Vector2u(screen_width_, screen_height_));
window_.setActive();
-
+
if(!GLEW_VERSION_2_1 && glewInit() != GLEW_OK)
CASPAR_THROW_EXCEPTION(gl::ogl_exception() << msg_info("Failed to initialize GLEW."));
CASPAR_THROW_EXCEPTION(not_supported() << msg_info("Missing OpenGL 2.1 support."));
GL(glEnable(GL_TEXTURE_2D));
- GL(glDisable(GL_DEPTH_TEST));
+ GL(glDisable(GL_DEPTH_TEST));
GL(glClearColor(0.0, 0.0, 0.0, 0.0));
GL(glViewport(0, 0, format_desc_.width, format_desc_.height));
GL(glLoadIdentity());
-
+
calculate_aspect();
-
+
GL(glGenTextures(1, &texture_));
GL(glBindTexture(GL_TEXTURE_2D, texture_));
GL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
GL(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP));
GL(glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, format_desc_.width, format_desc_.height, 0, GL_BGRA, GL_UNSIGNED_BYTE, 0));
GL(glBindTexture(GL_TEXTURE_2D, 0));
-
+
GL(glGenBuffers(2, pbos_.data()));
-
+
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pbos_[0]);
glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB, format_desc_.size, 0, GL_STREAM_DRAW_ARB);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, pbos_[1]);
glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_ARB, format_desc_.size, 0, GL_STREAM_DRAW_ARB);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_ARB, 0);
-
+
window_.setVerticalSyncEnabled(config_.vsync);
if (config_.vsync)
}
void uninit()
- {
+ {
if(texture_)
glDeleteTextures(1, &texture_);
init();
while(is_running_)
- {
+ {
try
{
auto poll_event = [this](sf::Event& e)
}
}
}
-
+
core::const_frame frame;
frame_buffer_.pop(frame);
render_and_draw_frame(frame);
-
+
/*perf_timer_.restart();
render(frame);
- graph_->set_value("frame-time", perf_timer_.elapsed()*format_desc_.fps*0.5);
+ graph_->set_value("frame-time", perf_timer_.elapsed()*format_desc_.fps*0.5);
window_.Display();*/
current_presentation_age_ = frame.get_age_millis();
- graph_->set_value("tick-time", tick_timer_.elapsed()*format_desc_.fps*0.5);
+ graph_->set_value("tick-time", tick_timer_.elapsed()*format_desc_.fps*0.5);
tick_timer_.restart();
}
catch(...)
}
spl::shared_ptr<AVFrame> get_av_frame()
- {
- spl::shared_ptr<AVFrame> av_frame(av_frame_alloc(), [](AVFrame* p) { av_frame_free(&p); });
- avcodec_get_frame_defaults(av_frame.get());
-
- av_frame->linesize[0] = format_desc_.width*4;
+ {
+ auto av_frame = ffmpeg::create_frame();
+
+ av_frame->linesize[0] = format_desc_.width*4;
av_frame->format = PIX_FMT_BGRA;
av_frame->width = format_desc_.width;
av_frame->height = format_desc_.height;
if(screen_width_ == 0 && screen_height_ == 0)
return;
-
+
perf_timer_.restart();
auto av_frame = get_av_frame();
av_frame->data[0] = const_cast<uint8_t*>(input_frame.image_data().begin());
{
fast_memcpy(ptr, av_frame->data[0], format_desc_.size);
}
-
+
GL(glUnmapBuffer(GL_PIXEL_UNPACK_BUFFER)); // release the mapped buffer
}
GL(glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0));
-
- GL(glClear(GL_COLOR_BUFFER_BIT));
+
+ GL(glClear(GL_COLOR_BUFFER_BIT));
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 1.0f); glVertex2f(-width_, -height_);
glTexCoord2f(1.0f, 1.0f); glVertex2f( width_, -height_);
glTexCoord2f(1.0f, 0.0f); glVertex2f( width_, height_);
glTexCoord2f(0.0f, 0.0f); glVertex2f(-width_, height_);
glEnd();
-
+
GL(glBindTexture(GL_TEXTURE_2D, 0));
std::rotate(pbos_.begin(), pbos_.begin() + 1, pbos_.end());
}
std::wstring print() const
- {
+ {
return config_.name + L" " + channel_and_format();
}
-
+
void calculate_aspect()
{
if(config_.windowed)
screen_height_ = window_.getSize().y;
screen_width_ = window_.getSize().x;
}
-
+
GL(glViewport(0, 0, screen_width_, screen_height_));
std::pair<float, float> target_ratio = None();
width_ = target_ratio.first;
height_ = target_ratio.second;
}
-
+
std::pair<float, float> None()
{
float width = static_cast<float>(square_width_)/static_cast<float>(screen_width_);
, sink_(sink)
{
}
-
+
// frame_consumer
void initialize(const core::video_format_desc& format_desc, const core::audio_channel_layout&, int channel_index) override
{
return consumer_->send(frame);
}
-
+
std::wstring print() const override
{
return consumer_ ? consumer_->print() : L"[screen_consumer]";
{
return false;
}
-
+
int buffer_depth() const override
{
return 1;
{
return monitor_subject_;
}
-};
+};
void describe_consumer(core::help_sink& sink, const core::help_repository& repo)
{
{
if (params.size() < 1 || !boost::iequals(params.at(0), L"SCREEN"))
return core::frame_consumer::empty();
-
+
configuration config;
-
+
if (params.size() > 1)
config.screen_index = boost::lexical_cast<int>(params.at(1));
-
+
config.windowed = !contains_param(L"FULLSCREEN", params);
config.key_only = contains_param(L"KEY_ONLY", params);
config.interactive = !contains_param(L"NON_INTERACTIVE", params);
return spl::make_shared<screen_consumer_proxy>(config, sink);
}
-spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(const boost::property_tree::wptree& ptree, core::interaction_sink* sink)
+spl::shared_ptr<core::frame_consumer> create_preconfigured_consumer(const boost::property_tree::wptree& ptree, core::interaction_sink* sink)
{
configuration config;
config.name = ptree.get(L"name", config.name);
config.aspect = configuration::aspect_ratio::aspect_16_9;
else if(aspect_str == L"4:3")
config.aspect = configuration::aspect_ratio::aspect_4_3;
-
+
return spl::make_shared<screen_consumer_proxy>(config, sink);
}