const safe_ptr<diagnostics::graph> graph_;\r
\r
executor encode_executor_;\r
- executor write_executor_;\r
\r
std::shared_ptr<AVStream> audio_st_;\r
std::shared_ptr<AVStream> video_st_;\r
, oc_(avformat_alloc_context(), av_free)\r
, format_desc_(format_desc)\r
, encode_executor_(print())\r
- , write_executor_(print() + L"/output")\r
, in_frame_number_(0)\r
, out_frame_number_(0)\r
, output_format_(format_desc, filename, options)\r
diagnostics::register_graph(graph_);\r
\r
encode_executor_.set_capacity(8);\r
- write_executor_.set_capacity(8);\r
\r
oc_->oformat = output_format_.format;\r
\r
~ffmpeg_consumer()\r
{ \r
encode_executor_.stop();\r
- write_executor_.stop();\r
encode_executor_.join();\r
- write_executor_.join();\r
\r
LOG_ON_ERROR2(av_write_trailer(oc_.get()), "[ffmpeg_consumer]");\r
\r
return out_frame;\r
}\r
\r
- std::shared_ptr<AVPacket> encode_video_frame(core::read_frame& frame)\r
+ void encode_video_frame(core::read_frame& frame)\r
{ \r
auto c = video_st_->codec;\r
\r
in_frame_number_++;\r
\r
if(out_time - in_time > 0.01)\r
- return nullptr;\r
+ return;\r
\r
auto av_frame = convert_video(frame, c);\r
av_frame->interlaced_frame = format_desc_.field_mode != core::field_mode::progressive;\r
av_frame->pts = out_frame_number_++;\r
\r
int out_size = THROW_ON_ERROR2(avcodec_encode_video(c, video_outbuf_.data(), video_outbuf_.size(), av_frame.get()), "[ffmpeg_consumer]");\r
- if(out_size > 0)\r
+ if(out_size == 0)\r
+ return;\r
+ \r
+ safe_ptr<AVPacket> pkt(new AVPacket, [](AVPacket* p)\r
{\r
- safe_ptr<AVPacket> pkt(new AVPacket, [](AVPacket* p)\r
- {\r
- av_free_packet(p);\r
- delete p;\r
- });\r
- av_init_packet(pkt.get());\r
+ av_free_packet(p);\r
+ delete p;\r
+ });\r
+ av_init_packet(pkt.get());\r
\r
- if (c->coded_frame->pts != AV_NOPTS_VALUE)\r
- pkt->pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_st_->time_base);\r
+ if (c->coded_frame->pts != AV_NOPTS_VALUE)\r
+ pkt->pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_st_->time_base);\r
\r
- if(c->coded_frame->key_frame)\r
- pkt->flags |= AV_PKT_FLAG_KEY;\r
+ if(c->coded_frame->key_frame)\r
+ pkt->flags |= AV_PKT_FLAG_KEY;\r
\r
- pkt->stream_index = video_st_->index;\r
- pkt->data = video_outbuf_.data();\r
- pkt->size = out_size;\r
- \r
- av_dup_packet(pkt.get());\r
- return pkt;\r
- } \r
- return nullptr;\r
+ pkt->stream_index = video_st_->index;\r
+ pkt->data = video_outbuf_.data();\r
+ pkt->size = out_size;\r
+ \r
+ av_interleaved_write_frame(oc_.get(), pkt.get()); \r
}\r
\r
byte_vector convert_audio(core::read_frame& frame, AVCodecContext* c)\r
return byte_vector(audio_resample_buffer.begin(), audio_resample_buffer.end());\r
}\r
\r
- std::shared_ptr<AVPacket> encode_audio_frame(core::read_frame& frame)\r
+ void encode_audio_frame(core::read_frame& frame)\r
{ \r
auto c = audio_st_->codec;\r
\r
boost::range::push_back(audio_buf_, convert_audio(frame, c));\r
\r
std::size_t frame_size = c->frame_size;\r
+ auto input_audio_size = frame_size * av_get_bytes_per_sample(c->sample_fmt) * c->channels;\r
\r
- safe_ptr<AVPacket> pkt(new AVPacket, [](AVPacket* p)\r
+ while(audio_buf_.size() >= input_audio_size)\r
{\r
- av_free_packet(p);\r
- delete p;\r
- });\r
- av_init_packet(pkt.get());\r
+ safe_ptr<AVPacket> pkt(new AVPacket, [](AVPacket* p)\r
+ {\r
+ av_free_packet(p);\r
+ delete p;\r
+ });\r
+ av_init_packet(pkt.get());\r
\r
- if(frame_size > 1)\r
- { \r
- auto input_audio_size = frame_size * av_get_bytes_per_sample(c->sample_fmt) * c->channels;\r
- \r
- if(audio_buf_.size() < input_audio_size)\r
- return nullptr;\r
- \r
- pkt->size = avcodec_encode_audio(c, audio_outbuf_.data(), audio_outbuf_.size(), reinterpret_cast<short*>(audio_buf_.data()));\r
- audio_buf_.erase(audio_buf_.begin(), audio_buf_.begin() + input_audio_size);\r
- }\r
- else\r
- {\r
- audio_outbuf_ = std::move(audio_buf_); \r
- audio_buf_.clear();\r
- pkt->size = audio_outbuf_.size();\r
- pkt->data = audio_outbuf_.data();\r
- }\r
+ if(frame_size > 1)\r
+ { \r
+ pkt->size = avcodec_encode_audio(c, audio_outbuf_.data(), audio_outbuf_.size(), reinterpret_cast<short*>(audio_buf_.data()));\r
+ audio_buf_.erase(audio_buf_.begin(), audio_buf_.begin() + input_audio_size);\r
+ }\r
+ else\r
+ {\r
+ audio_outbuf_ = std::move(audio_buf_); \r
+ audio_buf_.clear();\r
+ pkt->size = audio_outbuf_.size();\r
+ pkt->data = audio_outbuf_.data();\r
+ }\r
\r
- if(pkt->size == 0)\r
- return nullptr;\r
+ if(pkt->size == 0)\r
+ return;\r
\r
- if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)\r
- pkt->pts = av_rescale_q(c->coded_frame->pts, c->time_base, audio_st_->time_base);\r
+ if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)\r
+ pkt->pts = av_rescale_q(c->coded_frame->pts, c->time_base, audio_st_->time_base);\r
\r
- pkt->flags |= AV_PKT_FLAG_KEY;\r
- pkt->stream_index = audio_st_->index;\r
- pkt->data = reinterpret_cast<uint8_t*>(audio_outbuf_.data());\r
+ pkt->flags |= AV_PKT_FLAG_KEY;\r
+ pkt->stream_index = audio_st_->index;\r
+ pkt->data = reinterpret_cast<uint8_t*>(audio_outbuf_.data());\r
\r
- av_dup_packet(pkt.get());\r
- return pkt;\r
+ av_interleaved_write_frame(oc_.get(), pkt.get());\r
+ }\r
}\r
\r
void send(const safe_ptr<core::read_frame>& frame)\r
{ \r
boost::timer frame_timer;\r
\r
- auto video = encode_video_frame(*frame);\r
- auto audio = encode_audio_frame(*frame);\r
+ encode_video_frame(*frame);\r
+ encode_audio_frame(*frame);\r
\r
- graph_->set_value("frame-time", frame_timer.elapsed()*format_desc_.fps*0.5);\r
- \r
- write_executor_.begin_invoke([=]\r
- {\r
- if(video)\r
- av_interleaved_write_frame(oc_.get(), video.get());\r
- if(audio)\r
- av_interleaved_write_frame(oc_.get(), audio.get());\r
- });\r
+ graph_->set_value("frame-time", frame_timer.elapsed()*format_desc_.fps*0.5); \r
});\r
}\r
};\r