c->pix_fmt = pix_fmt;\r
return 0;\r
}\r
- if((strcmp(name, "r") == 0 || strcmp(name, "frame_rate") == 0) && strcmp(av_class->class_name, "AVCodecContext") == 0)\r
- {\r
- AVCodecContext* c = (AVCodecContext*)obj; \r
+ //if((strcmp(name, "r") == 0 || strcmp(name, "frame_rate") == 0) && strcmp(av_class->class_name, "AVCodecContext") == 0)\r
+ //{\r
+ // AVCodecContext* c = (AVCodecContext*)obj; \r
\r
- if(c->codec_type != AVMEDIA_TYPE_VIDEO)\r
- return -1;\r
+ // if(c->codec_type != AVMEDIA_TYPE_VIDEO)\r
+ // return -1;\r
\r
- AVRational rate;\r
- int ret = av_parse_video_rate(&rate, val);\r
- if(ret < 0)\r
- return ret;\r
+ // AVRational rate;\r
+ // int ret = av_parse_video_rate(&rate, val);\r
+ // if(ret < 0)\r
+ // return ret;\r
\r
- c->time_base.num = rate.den;\r
- c->time_base.den = rate.num;\r
- return 0;\r
- }\r
+ // c->time_base.num = rate.den;\r
+ // c->time_base.den = rate.num;\r
+ // return 0;\r
+ //}\r
\r
return ::av_opt_set(obj, name, val, search_flags);\r
}\r
\r
const spl::shared_ptr<diagnostics::graph> graph_;\r
\r
- executor encode_executor_;\r
- executor write_executor_;\r
\r
std::shared_ptr<AVStream> audio_st_;\r
std::shared_ptr<AVStream> video_st_;\r
\r
- byte_vector audio_outbuf_;\r
- byte_vector audio_buf_;\r
- byte_vector video_outbuf_;\r
- byte_vector picture_buf_;\r
+ byte_vector picture_buffer_;\r
+ byte_vector audio_buffer_;\r
std::shared_ptr<SwrContext> swr_;\r
std::shared_ptr<SwsContext> sws_;\r
\r
- int64_t in_frame_number_;\r
- int64_t out_frame_number_;\r
+ int64_t frame_number_;\r
\r
output_format output_format_;\r
\r
+ executor executor_;\r
public:\r
ffmpeg_consumer(const std::string& filename, const core::video_format_desc& format_desc, std::vector<option> options)\r
: filename_(filename)\r
- , video_outbuf_(1920*1080*8)\r
- , audio_outbuf_(10000)\r
, oc_(avformat_alloc_context(), av_free)\r
, format_desc_(format_desc)\r
- , encode_executor_(print())\r
- , write_executor_(print() + L"/output")\r
- , in_frame_number_(0)\r
- , out_frame_number_(0)\r
+ , frame_number_(0)\r
, output_format_(format_desc, filename, options)\r
+ , executor_(print())\r
{\r
// TODO: Ask stakeholders about case where file already exists.\r
boost::filesystem::remove(boost::filesystem::path(env::media_folder() + u16(filename))); // Delete the file if it exists\r
graph_->set_text(print());\r
diagnostics::register_graph(graph_);\r
\r
- encode_executor_.set_capacity(8);\r
- write_executor_.set_capacity(8);\r
+ executor_.set_capacity(8);\r
\r
oc_->oformat = output_format_.format;\r
\r
\r
~ffmpeg_consumer()\r
{ \r
- encode_executor_.wait();\r
- write_executor_.wait();\r
+ executor_.wait();\r
\r
LOG_ON_ERROR2(av_write_trailer(oc_.get()), "[ffmpeg_consumer]");\r
\r
c->time_base.num = format_desc_.duration;\r
c->gop_size = 25;\r
c->flags |= format_desc_.field_mode == core::field_mode::progressive ? 0 : (CODEC_FLAG_INTERLACED_ME | CODEC_FLAG_INTERLACED_DCT);\r
- if(c->pix_fmt == PIX_FMT_NONE)\r
- c->pix_fmt = PIX_FMT_YUV420P;\r
+ c->pix_fmt = c->pix_fmt != PIX_FMT_NONE ? c->pix_fmt : PIX_FMT_YUV420P;\r
\r
if(c->codec_id == CODEC_ID_PRORES)\r
{ \r
else if(c->codec_id == CODEC_ID_H264)\r
{ \r
c->pix_fmt = PIX_FMT_YUV420P; \r
- if(options.empty())\r
- {\r
- av_opt_set(c->priv_data, "preset", "ultrafast", 0);\r
- av_opt_set(c->priv_data, "tune", "fastdecode", 0);\r
- av_opt_set(c->priv_data, "crf", "5", 0);\r
- }\r
+ av_opt_set(c->priv_data, "preset", "ultrafast", 0);\r
+ av_opt_set(c->priv_data, "tune", "fastdecode", 0);\r
+ av_opt_set(c->priv_data, "crf", "5", 0);\r
}\r
else if(c->codec_id == CODEC_ID_QTRLE)\r
{\r
c->pix_fmt = PIX_FMT_ARGB;\r
}\r
- \r
- c->max_b_frames = 0; // b-frames not supported.\r
- \r
+ \r
boost::range::remove_erase_if(options, [&](const option& o)\r
{\r
return o.name.at(0) != 'a' && ffmpeg::av_opt_set(c, o.name.c_str(), o.value.c_str(), AV_OPT_SEARCH_CHILDREN) > -1;\r
if(output_format_.format->flags & AVFMT_GLOBALHEADER)\r
c->flags |= CODEC_FLAG_GLOBAL_HEADER;\r
\r
- //c->thread_count = boost::thread::hardware_concurrency();\r
THROW_ON_ERROR2(tbb_avcodec_open(c, encoder), "[ffmpeg_consumer]");\r
\r
return std::shared_ptr<AVStream>(st, [](AVStream* st)\r
c->sample_rate = 48000;\r
c->channels = 2;\r
c->sample_fmt = AV_SAMPLE_FMT_S16;\r
+ c->time_base.num = 1;\r
+ c->time_base.den = c->sample_rate;\r
\r
if(output_format_.vcodec == CODEC_ID_FLV1) \r
c->sample_rate = 44100; \r
av_freep(&st);\r
});\r
}\r
+ \r
+ void encode_video_frame(core::const_frame frame)\r
+ { \r
+ if(!video_st_)\r
+ return;\r
+ \r
+ auto enc = video_st_->codec;\r
+\r
+ try\r
+ { \r
+ auto av_frame = convert_video(frame, enc);\r
+ av_frame->interlaced_frame = format_desc_.field_mode != core::field_mode::progressive;\r
+ av_frame->top_field_first = format_desc_.field_mode == core::field_mode::upper;\r
+ av_frame->pts = frame_number_++;\r
+\r
+ AVPacket pkt;
+ av_init_packet(&pkt);
+ pkt.data = nullptr;
+ pkt.size = 0;\r
+\r
+ int got_packet = 0;\r
+ THROW_ON_ERROR2(avcodec_encode_video2(enc, &pkt, av_frame.get(), &got_packet), "[ffmpeg_consumer]");\r
+ std::shared_ptr<AVPacket> guard(&pkt, av_free_packet);\r
+\r
+ if(!got_packet)\r
+ return;\r
+
+ if (pkt.pts != AV_NOPTS_VALUE)
+ pkt.pts = av_rescale_q(pkt.pts, enc->time_base, video_st_->time_base);
+ if (pkt.dts != AV_NOPTS_VALUE)
+ pkt.dts = av_rescale_q(pkt.dts, enc->time_base, video_st_->time_base);\r
+ \r
+ pkt.stream_index = video_st_->index;\r
+ \r
+ av_interleaved_write_frame(oc_.get(), &pkt);\r
+ }\r
+ catch(...)\r
+ {\r
+ CASPAR_LOG_CURRENT_EXCEPTION();\r
+ executor_.stop();\r
+ }\r
+ }\r
+ \r
+ uint64_t get_channel_layout(AVCodecContext* dec)\r
+ {\r
+ auto layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);\r
+ return layout;\r
+ }\r
+ \r
+ void encode_audio_frame(core::const_frame frame)\r
+ { \r
+ if(!audio_st_)\r
+ return;\r
+ \r
+ auto enc = audio_st_->codec;\r
+\r
+ try\r
+ {\r
+ boost::push_back(audio_buffer_, convert_audio(frame, enc));\r
+ \r
+ auto frame_size = enc->frame_size != 0 ? enc->frame_size * enc->channels * av_get_bytes_per_sample(enc->sample_fmt) : static_cast<int>(audio_buffer_.size());\r
+ \r
+ while(audio_buffer_.size() >= frame_size)\r
+ { \r
+ std::shared_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free);
+ avcodec_get_frame_defaults(av_frame.get());
+ av_frame->nb_samples = frame_size / (enc->channels * av_get_bytes_per_sample(enc->sample_fmt));\r
+\r
+ AVPacket pkt;
+ av_init_packet(&pkt);
+ pkt.data = nullptr;
+ pkt.size = 0; \r
+ \r
+ THROW_ON_ERROR2(avcodec_fill_audio_frame(av_frame.get(), enc->channels, enc->sample_fmt, audio_buffer_.data(), frame_size, 1), "[ffmpeg_consumer]");\r
+\r
+ int got_packet = 0;\r
+ THROW_ON_ERROR2(avcodec_encode_audio2(enc, &pkt, av_frame.get(), &got_packet), "[ffmpeg_consumer]");\r
+ std::shared_ptr<AVPacket> guard(&pkt, av_free_packet);\r
+ \r
+ audio_buffer_.erase(audio_buffer_.begin(), audio_buffer_.begin() + frame_size);\r
+\r
+ if(!got_packet)\r
+ return;\r
+
+ if (pkt.pts != AV_NOPTS_VALUE)
+ pkt.pts = av_rescale_q(pkt.pts, enc->time_base, audio_st_->time_base);
+ if (pkt.dts != AV_NOPTS_VALUE)
+ pkt.dts = av_rescale_q(pkt.dts, enc->time_base, audio_st_->time_base);
+ if (pkt.duration > 0)
+ pkt.duration = static_cast<int>(av_rescale_q(pkt.duration, enc->time_base, audio_st_->time_base));\r
+ \r
+ pkt.stream_index = audio_st_->index;\r
+ \r
+ av_interleaved_write_frame(oc_.get(), &pkt);\r
+ }\r
+ }\r
+ catch(...)\r
+ {\r
+ CASPAR_LOG_CURRENT_EXCEPTION();\r
+ executor_.stop();\r
+ }\r
+ } \r
\r
std::shared_ptr<AVFrame> convert_video(core::const_frame frame, AVCodecContext* c)\r
{\r
\r
for(int n = 0; n < 4; ++n) \r
in_frame->data[n] += in_frame->linesize[n] * output_format_.croptop; \r
- \r
- std::shared_ptr<AVFrame> out_frame(avcodec_alloc_frame(), av_free);\r
-\r
- picture_buf_.resize(avpicture_get_size(c->pix_fmt, \r
- c->width,\r
- c->height));\r
+ \r
+ picture_buffer_.resize(avpicture_get_size(c->pix_fmt, c->width, c->height));\r
\r
+ std::shared_ptr<AVFrame> out_frame(avcodec_alloc_frame(), av_free);\r
+ \r
avpicture_fill(reinterpret_cast<AVPicture*>(out_frame.get()),\r
- picture_buf_.data(), \r
+ picture_buffer_.data(), \r
c->pix_fmt, \r
c->width, \r
c->height);\r
\r
return out_frame;\r
}\r
- \r
- std::shared_ptr<AVPacket> encode_video_frame(core::const_frame frame)\r
- { \r
- auto c = video_st_->codec;\r
- \r
- auto in_time = static_cast<double>(in_frame_number_) / format_desc_.fps;\r
- auto out_time = static_cast<double>(out_frame_number_) / (static_cast<double>(c->time_base.den) / static_cast<double>(c->time_base.num));\r
- \r
- in_frame_number_++;\r
-\r
- if(out_time - in_time > 0.01)\r
- return nullptr;\r
- \r
- auto av_frame = convert_video(frame, c);\r
- av_frame->interlaced_frame = format_desc_.field_mode != core::field_mode::progressive;\r
- av_frame->top_field_first = format_desc_.field_mode == core::field_mode::upper;\r
- av_frame->pts = out_frame_number_++;\r
-\r
- int out_size = THROW_ON_ERROR2(avcodec_encode_video(c, video_outbuf_.data(), static_cast<int>(video_outbuf_.size()), av_frame.get()), "[ffmpeg_consumer]");\r
- if(out_size > 0)\r
- {\r
- spl::shared_ptr<AVPacket> pkt(new AVPacket, [](AVPacket* p)\r
- {\r
- av_free_packet(p);\r
- delete p;\r
- });\r
- av_init_packet(pkt.get());\r
- \r
- if (c->coded_frame->pts != AV_NOPTS_VALUE)\r
- pkt->pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_st_->time_base);\r
-\r
- if(c->coded_frame->key_frame)\r
- pkt->flags |= AV_PKT_FLAG_KEY;\r
-\r
- pkt->stream_index = video_st_->index;\r
- pkt->data = video_outbuf_.data();\r
- pkt->size = out_size;\r
- \r
- av_dup_packet(pkt.get());\r
- return pkt;\r
- } \r
- return nullptr;\r
- }\r
-
- uint64_t get_channel_layout(AVCodecContext* dec)\r
- {\r
- auto layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);\r
- return layout;\r
- }\r
-\r
+ \r
byte_vector convert_audio(core::const_frame& frame, AVCodecContext* c)\r
{\r
if(!swr_) \r
{\r
- swr_ = std::shared_ptr<SwrContext>(swr_alloc_set_opts(nullptr,
- get_channel_layout(c), c->sample_fmt, c->sample_rate,
- av_get_default_channel_layout(format_desc_.audio_channels), AV_SAMPLE_FMT_S32, format_desc_.audio_sample_rate,
+ swr_ = std::shared_ptr<SwrContext>(swr_alloc_set_opts(nullptr,\r
+ get_channel_layout(c), c->sample_fmt, c->sample_rate,\r
+ av_get_default_channel_layout(format_desc_.audio_channels), AV_SAMPLE_FMT_S32, format_desc_.audio_sample_rate,\r
0, nullptr), [](SwrContext* p){swr_free(&p);});\r
\r
if(!swr_)\r
\r
THROW_ON_ERROR2(swr_init(swr_.get()), "[audio_decoder]");\r
}\r
-
- byte_vector buffer(48000);
-
- const uint8_t *in[] = {reinterpret_cast<const uint8_t*>(frame.audio_data().data())};
- uint8_t* out[] = {buffer.data()};
-
+ \r
+ byte_vector buffer(48000);\r
+\r
+ const uint8_t *in[] = {reinterpret_cast<const uint8_t*>(frame.audio_data().data())};\r
+ uint8_t* out[] = {buffer.data()};\r
+\r
auto channel_samples = swr_convert(swr_.get(), \r
out, static_cast<int>(buffer.size()) / c->channels / av_get_bytes_per_sample(c->sample_fmt), \r
in, static_cast<int>(frame.audio_data().size()/format_desc_.audio_channels));\r
\r
return buffer;\r
}\r
- \r
- std::shared_ptr<AVPacket> encode_audio_frame(core::const_frame frame)\r
- { \r
- // TODO: Sometimes audio is missing towards end of resulting file.\r
-\r
- auto c = audio_st_->codec;\r
-\r
- boost::range::push_back(audio_buf_, convert_audio(frame, c));\r
- \r
- std::size_t frame_size = c->frame_size;\r
- \r
- spl::shared_ptr<AVPacket> pkt(new AVPacket, [](AVPacket* p)\r
- {\r
- av_free_packet(p);\r
- delete p;\r
- });\r
- av_init_packet(pkt.get());\r
-\r
- if(frame_size > 1)\r
- { \r
- auto input_audio_size = frame_size * av_get_bytes_per_sample(c->sample_fmt) * c->channels;\r
- \r
- if(audio_buf_.size() < input_audio_size)\r
- return nullptr;\r
- \r
- pkt->size = avcodec_encode_audio(c, audio_outbuf_.data(), static_cast<int>(audio_outbuf_.size()), reinterpret_cast<short*>(audio_buf_.data()));\r
- audio_buf_.erase(audio_buf_.begin(), audio_buf_.begin() + input_audio_size);\r
- }\r
- else\r
- {\r
- audio_outbuf_ = std::move(audio_buf_); \r
- audio_buf_.clear();\r
- pkt->size = static_cast<int>(audio_outbuf_.size());\r
- pkt->data = audio_outbuf_.data();\r
- }\r
- \r
- if(pkt->size == 0)\r
- return nullptr;\r
\r
- if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)\r
- pkt->pts = av_rescale_q(c->coded_frame->pts, c->time_base, audio_st_->time_base);\r
-\r
- pkt->flags |= AV_PKT_FLAG_KEY;\r
- pkt->stream_index = audio_st_->index;\r
- pkt->data = reinterpret_cast<uint8_t*>(audio_outbuf_.data());\r
- \r
- av_dup_packet(pkt.get());\r
- return pkt;\r
- }\r
- \r
void send(core::const_frame& frame)\r
{\r
- encode_executor_.begin_invoke([=]\r
+ executor_.begin_invoke([=]\r
{ \r
boost::timer frame_timer;\r
\r
- auto video = encode_video_frame(frame);\r
- auto audio = encode_audio_frame(frame);\r
+ encode_video_frame(frame);\r
+ encode_audio_frame(frame);\r
\r
graph_->set_value("frame-time", frame_timer.elapsed()*format_desc_.fps*0.5);\r
- \r
- write_executor_.begin_invoke([=]\r
- {\r
- if(video)\r
- av_interleaved_write_frame(oc_.get(), video.get());\r
- if(audio)\r
- av_interleaved_write_frame(oc_.get(), audio.get());\r
- });\r
});\r
}\r
};\r