1 #include "../StdAfx.h"
\r
3 #include "frame_muxer.h"
\r
5 #include "filter/filter.h"
\r
9 #include <core/producer/frame/basic_frame.h>
\r
10 #include <core/producer/frame/image_transform.h>
\r
11 #include <core/producer/frame/pixel_format.h>
\r
12 #include <core/producer/frame/frame_factory.h>
\r
13 #include <core/mixer/write_frame.h>
\r
15 #include <common/env.h>
\r
16 #include <common/log/log.h>
\r
18 #include <boost/range/algorithm_ext/push_back.hpp>
\r
20 using namespace caspar::core;
\r
38 static std::wstring print(display_mode::type value)
\r
45 return L"duplicate";
\r
49 return L"interlace";
\r
50 case deinterlace_bob:
\r
51 return L"deinterlace_bob";
\r
53 return L"deinterlace";
\r
60 display_mode::type get_display_mode(const core::video_mode::type in_mode, double in_fps, const core::video_mode::type out_mode, double out_fps)
\r
62 if(in_mode == core::video_mode::invalid || out_mode == core::video_mode::invalid)
\r
63 return display_mode::invalid;
\r
65 static const auto epsilon = 2.0;
\r
67 if(std::abs(in_fps - out_fps) < epsilon)
\r
69 if(in_mode != core::video_mode::progressive && out_mode == core::video_mode::progressive)
\r
70 return display_mode::deinterlace;
\r
71 //else if(in_mode == core::video_mode::progressive && out_mode != core::video_mode::progressive)
\r
72 // simple(); // interlace_duplicate();
\r
74 return display_mode::simple;
\r
76 else if(std::abs(in_fps/2.0 - out_fps) < epsilon)
\r
78 if(in_mode != core::video_mode::progressive)
\r
79 return display_mode::invalid;
\r
81 if(out_mode != core::video_mode::progressive)
\r
82 return display_mode::interlace;
\r
84 return display_mode::half;
\r
86 else if(std::abs(in_fps - out_fps/2.0) < epsilon)
\r
88 if(out_mode != core::video_mode::progressive)
\r
89 return display_mode::invalid;
\r
91 if(in_mode != core::video_mode::progressive)
\r
92 return display_mode::deinterlace_bob;
\r
94 return display_mode::duplicate;
\r
97 return display_mode::invalid;
\r
100 struct frame_muxer::implementation : boost::noncopyable
\r
102 std::deque<std::queue<safe_ptr<write_frame>>> video_streams_;
\r
103 std::deque<std::vector<int16_t>> audio_streams_;
\r
104 std::deque<safe_ptr<basic_frame>> frame_buffer_;
\r
105 display_mode::type display_mode_;
\r
106 const double in_fps_;
\r
107 const video_format_desc format_desc_;
\r
110 size_t audio_sample_count_;
\r
111 size_t video_frame_count_;
\r
113 size_t processed_audio_sample_count_;
\r
114 size_t processed_video_frame_count_;
\r
116 std::unique_ptr<filter> filter_;
\r
117 safe_ptr<core::frame_factory> frame_factory_;
\r
119 implementation(double in_fps, const video_format_desc& format_desc, const safe_ptr<core::frame_factory>& frame_factory)
\r
120 : display_mode_(display_mode::invalid)
\r
122 , format_desc_(format_desc)
\r
123 , auto_mode_(env::properties().get("configuration.auto-mode", false))
\r
124 , audio_sample_count_(0)
\r
125 , video_frame_count_(0)
\r
126 , frame_factory_(frame_factory)
\r
127 , video_streams_(1)
\r
128 , audio_streams_(1)
\r
132 void push(const std::shared_ptr<write_frame>& video_frame)
\r
136 CASPAR_LOG(debug) << L"video-frame-count: " << static_cast<float>(video_frame_count_);
\r
137 video_frame_count_ = 0;
\r
138 video_streams_.push_back(std::queue<safe_ptr<write_frame>>());
\r
142 if(display_mode_ == display_mode::invalid)
\r
144 display_mode_ = auto_mode_ ? get_display_mode(video_frame->get_type(), in_fps_, format_desc_.mode, format_desc_.fps) : display_mode::simple;
\r
145 CASPAR_LOG(info) << L"display_mode: " << display_mode::print(display_mode_);
\r
148 ++video_frame_count_;
\r
150 // Fix field-order if needed
\r
151 if(video_frame->get_type() == core::video_mode::lower && format_desc_.mode == core::video_mode::upper)
\r
152 video_frame->get_image_transform().set_fill_translation(0.0f, 0.5/static_cast<double>(video_frame->get_pixel_format_desc().planes[0].height));
\r
153 else if(video_frame->get_type() == core::video_mode::upper && format_desc_.mode == core::video_mode::lower)
\r
154 video_frame->get_image_transform().set_fill_translation(0.0f, -0.5/static_cast<double>(video_frame->get_pixel_format_desc().planes[0].height));
\r
156 video_streams_.back().push(make_safe(video_frame));
\r
158 process(frame_buffer_);
\r
160 video_frame->commit();
\r
163 void push(const std::shared_ptr<std::vector<int16_t>>& audio_samples)
\r
165 if(!audio_samples)
\r
167 CASPAR_LOG(debug) << L"audio-chunk-count: " << audio_sample_count_/format_desc_.audio_samples_per_frame;
\r
168 audio_streams_.push_back(std::vector<int16_t>());
\r
169 audio_sample_count_ = 0;
\r
173 audio_sample_count_ += audio_samples->size();
\r
175 boost::range::push_back(audio_streams_.back(), *audio_samples);
\r
176 process(frame_buffer_);
\r
179 safe_ptr<basic_frame> pop()
\r
181 auto frame = frame_buffer_.front();
\r
182 frame_buffer_.pop_front();
\r
186 size_t size() const
\r
188 return frame_buffer_.size();
\r
191 safe_ptr<core::write_frame> pop_video()
\r
193 auto frame = video_streams_.front().front();
\r
194 video_streams_.front().pop();
\r
199 std::vector<int16_t> pop_audio()
\r
201 CASPAR_VERIFY(audio_streams_.front().size() >= format_desc_.audio_samples_per_frame);
\r
203 auto begin = audio_streams_.front().begin();
\r
204 auto end = begin + format_desc_.audio_samples_per_frame;
\r
206 auto samples = std::vector<int16_t>(begin, end);
\r
207 audio_streams_.front().erase(begin, end);
\r
212 bool video_ready() const
\r
214 return video_frames() > 1 && video_streams_.size() >= audio_streams_.size();
\r
217 bool audio_ready() const
\r
219 return audio_chunks() > 1 && audio_streams_.size() >= video_streams_.size();
\r
222 size_t video_frames() const
\r
224 return video_streams_.back().size();
\r
227 size_t audio_chunks() const
\r
229 return audio_streams_.back().size() / format_desc_.audio_samples_per_frame;
\r
232 void process(std::deque<safe_ptr<basic_frame>>& dest)
\r
234 if(video_streams_.size() > 1 && audio_streams_.size() > 1 &&
\r
235 (video_streams_.front().empty() || audio_streams_.front().empty()))
\r
237 if(!video_streams_.front().empty() || !audio_streams_.front().empty())
\r
238 CASPAR_LOG(debug) << " Truncating: " << video_streams_.front().size() << L" video-frames, " << audio_streams_.front().size() << L" audio-samples.";
\r
240 video_streams_.pop_front();
\r
241 audio_streams_.pop_front();
\r
244 if(video_streams_.front().empty() || audio_streams_.front().size() < format_desc_.audio_samples_per_frame)
\r
247 switch(display_mode_)
\r
249 case display_mode::simple: return simple(dest);
\r
250 case display_mode::duplicate: return duplicate(dest);
\r
251 case display_mode::half: return half(dest);
\r
252 case display_mode::interlace: return interlace(dest);
\r
253 case display_mode::deinterlace_bob: return deinterlace_bob(dest);
\r
254 case display_mode::deinterlace: return deinterlace(dest);
\r
255 default: BOOST_THROW_EXCEPTION(invalid_operation());
\r
259 void simple(std::deque<safe_ptr<basic_frame>>& dest)
\r
261 if(video_streams_.front().empty() || audio_streams_.front().size() < format_desc_.audio_samples_per_frame)
\r
264 if(video_streams_.front().front()->get_type() != core::video_mode::progressive && format_desc_.mode != core::video_mode::progressive &&
\r
265 video_streams_.front().front()->get_pixel_format_desc().planes.at(0).height != format_desc_.height )
\r
266 { // The frame will most likely be scaled, we need to deinterlace->reinterlace
\r
268 filter_.reset(new filter(L"YADIF=1:-1"));
\r
270 auto frame = pop_video();
\r
272 auto av_frames = filter_->execute(as_av_frame(frame));
\r
274 if(av_frames.size() < 2)
\r
277 auto frame1 = make_write_frame(frame->tag(), av_frames[0], frame_factory_);
\r
279 auto frame2 = make_write_frame(frame->tag(), av_frames[1], frame_factory_);
\r
282 frame1->audio_data() = pop_audio();
\r
283 dest.push_back(core::basic_frame::interlace(frame1, frame2, format_desc_.mode));
\r
287 auto frame1 = pop_video();
\r
289 frame1->audio_data() = pop_audio();
\r
291 dest.push_back(frame1);
\r
295 void duplicate(std::deque<safe_ptr<basic_frame>>& dest)
\r
297 if(video_streams_.front().empty() || audio_streams_.front().size()/2 < format_desc_.audio_samples_per_frame)
\r
300 auto frame = pop_video();
\r
303 auto frame1 = make_safe<core::write_frame>(*frame); // make a copy
\r
304 frame1->audio_data() = pop_audio();
\r
306 auto frame2 = frame;
\r
307 frame2->audio_data() = pop_audio();
\r
309 dest.push_back(frame1);
\r
310 dest.push_back(frame2);
\r
313 void half(std::deque<safe_ptr<basic_frame>>& dest)
\r
315 if(video_streams_.front().size() < 2 || audio_streams_.front().size() < format_desc_.audio_samples_per_frame)
\r
318 auto frame1 = pop_video();
\r
320 frame1->audio_data() = pop_audio();
\r
322 video_streams_.front().pop(); // Throw away
\r
324 dest.push_back(frame1);
\r
327 void interlace(std::deque<safe_ptr<basic_frame>>& dest)
\r
329 if(video_streams_.front().size() < 2 || audio_streams_.front().size() < format_desc_.audio_samples_per_frame)
\r
332 auto frame1 = pop_video();
\r
335 frame1->audio_data() = pop_audio();
\r
337 auto frame2 = pop_video();
\r
339 dest.push_back(core::basic_frame::interlace(frame1, frame2, format_desc_.mode));
\r
342 void deinterlace_bob(std::deque<safe_ptr<basic_frame>>& dest)
\r
344 if(video_streams_.front().empty() || audio_streams_.front().size()/2 < format_desc_.audio_samples_per_frame)
\r
348 filter_.reset(new filter(L"YADIF=1:-1"));
\r
350 auto frame = pop_video();
\r
352 auto av_frames = filter_->execute(as_av_frame(frame));
\r
354 if(av_frames.size() < 2)
\r
357 auto frame1 = make_write_frame(frame->tag(), av_frames.at(0), frame_factory_);
\r
359 frame1->audio_data() = pop_audio();
\r
361 auto frame2 = make_write_frame(frame->tag(), av_frames.at(1), frame_factory_);
\r
363 frame2->audio_data() = pop_audio();
\r
365 dest.push_back(frame1);
\r
366 dest.push_back(frame2);
\r
369 void deinterlace(std::deque<safe_ptr<basic_frame>>& dest)
\r
371 if(video_streams_.front().empty() || audio_streams_.front().size() < format_desc_.audio_samples_per_frame)
\r
375 filter_.reset(new filter(L"YADIF=0:-1"));
\r
377 auto frame = pop_video();
\r
379 auto av_frames = filter_->execute(as_av_frame(frame));
\r
381 if(av_frames.empty())
\r
384 auto frame1 = make_write_frame(frame->tag(), av_frames.at(0), frame_factory_);
\r
386 frame1->audio_data() = pop_audio();
\r
388 dest.push_back(frame1);
\r
392 frame_muxer::frame_muxer(double in_fps, const video_format_desc& format_desc, const safe_ptr<core::frame_factory>& frame_factory)
\r
393 : impl_(new implementation(in_fps, format_desc, frame_factory)){}
\r
394 void frame_muxer::push(const std::shared_ptr<write_frame>& video_frame){impl_->push(video_frame);}
\r
395 void frame_muxer::push(const std::shared_ptr<std::vector<int16_t>>& audio_samples){return impl_->push(audio_samples);}
\r
396 safe_ptr<basic_frame> frame_muxer::pop(){return impl_->pop();}
\r
397 size_t frame_muxer::size() const {return impl_->size();}
\r
398 bool frame_muxer::empty() const {return impl_->size() == 0;}
\r
399 bool frame_muxer::video_ready() const{return impl_->video_ready();}
\r
400 bool frame_muxer::audio_ready() const{return impl_->audio_ready();}
\r