2 * copyright (c) 2010 Sveriges Television AB <info@casparcg.com>
\r
4 * This file is part of CasparCG.
\r
6 * CasparCG is free software: you can redistribute it and/or modify
\r
7 * it under the terms of the GNU General Public License as published by
\r
8 * the Free Software Foundation, either version 3 of the License, or
\r
9 * (at your option) any later version.
\r
11 * CasparCG is distributed in the hope that it will be useful,
\r
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
\r
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
\r
14 * GNU General Public License for more details.
\r
16 * You should have received a copy of the GNU General Public License
\r
17 * along with CasparCG. If not, see <http://www.gnu.org/licenses/>.
\r
20 #include "../../stdafx.h"
\r
22 #include "video_decoder.h"
\r
23 #include "pix_fmt.h"
\r
25 #include "../../ffmpeg_error.h"
\r
26 #include "../filter/filter.h"
\r
28 #include <common/memory/memcpy.h>
\r
30 #include <core/video_format.h>
\r
31 #include <core/producer/frame/basic_frame.h>
\r
32 #include <core/mixer/write_frame.h>
\r
33 #include <core/producer/frame/image_transform.h>
\r
34 #include <core/producer/frame/pixel_format.h>
\r
35 #include <core/producer/frame/frame_factory.h>
\r
37 #include <tbb/parallel_for.h>
\r
39 #include <boost/range/algorithm_ext.hpp>
\r
41 #if defined(_MSC_VER)
\r
42 #pragma warning (push)
\r
43 #pragma warning (disable : 4244)
\r
47 #define __STDC_CONSTANT_MACROS
\r
48 #define __STDC_LIMIT_MACROS
\r
49 #include <libswscale/swscale.h>
\r
50 #include <libavformat/avformat.h>
\r
51 #include <libavcodec/avcodec.h>
\r
53 #if defined(_MSC_VER)
\r
54 #pragma warning (pop)
\r
59 struct video_decoder::implementation : boost::noncopyable
\r
62 std::shared_ptr<SwsContext> sws_context_;
\r
63 const std::shared_ptr<core::frame_factory> frame_factory_;
\r
64 AVCodecContext& codec_context_;
\r
65 size_t frame_number_;
\r
67 std::shared_ptr<filter> filter_;
\r
68 size_t filter_delay_;
\r
71 std::string filter_str_;
\r
74 explicit implementation(input& input, const safe_ptr<core::frame_factory>& frame_factory, const std::string& filter_str)
\r
76 , frame_factory_(frame_factory)
\r
77 , codec_context_(*input_.get_video_codec_context())
\r
79 , filter_(filter_str.empty() ? nullptr : new filter(filter_str))
\r
81 , filter_str_(filter_str)
\r
82 , eof_count_(std::numeric_limits<int>::max())
\r
86 std::deque<std::pair<int, safe_ptr<core::write_frame>>> receive()
\r
88 std::deque<std::pair<int, safe_ptr<core::write_frame>>> result;
\r
90 std::shared_ptr<AVPacket> pkt;
\r
91 for(int n = 0; n < 32 && result.empty() && input_.try_pop_video_packet(pkt); ++n)
\r
92 boost::range::push_back(result, decode(pkt));
\r
97 std::deque<std::pair<int, safe_ptr<core::write_frame>>> decode(const std::shared_ptr<AVPacket>& video_packet)
\r
99 if(!video_packet) // eof
\r
102 std::deque<std::pair<int, safe_ptr<core::write_frame>>> result;
\r
104 frame_number_ = frame_number_ % eof_count_;
\r
108 std::shared_ptr<AVFrame> frame;
\r
110 tbb::parallel_invoke(
\r
113 frame = decode_frame(video_packet);
\r
117 result = poll_filter_frames();
\r
120 push_filter_frames(make_safe(frame));
\r
124 auto frame = decode_frame(video_packet);
\r
127 result.push_back(std::make_pair(frame_number_++, make_write_frame(make_safe(frame))));
\r
133 std::deque<std::pair<int, safe_ptr<core::write_frame>>> flush()
\r
135 std::deque<std::pair<int, safe_ptr<core::write_frame>>> result;
\r
137 eof_count_ = frame_number_;
\r
140 eof_count_ += filter_delay_;
\r
142 avcodec_flush_buffers(&codec_context_);
\r
147 void push_filter_frames(const safe_ptr<AVFrame>& frame)
\r
149 filter_->push(frame);
\r
152 std::deque<std::pair<int, safe_ptr<core::write_frame>>> poll_filter_frames()
\r
154 std::deque<std::pair<int, safe_ptr<core::write_frame>>> result;
\r
156 if(!filter_->is_ready())
\r
159 auto frames = filter_->poll();
\r
161 boost::range::transform(frames, std::back_inserter(result), [&](const safe_ptr<AVFrame>& frame)
\r
163 return std::make_pair(frame_number_, make_write_frame(frame));
\r
166 if(!frames.empty())
\r
174 std::shared_ptr<AVFrame> decode_frame(const std::shared_ptr<AVPacket>& video_packet)
\r
176 std::shared_ptr<AVFrame> decoded_frame(avcodec_alloc_frame(), av_free);
\r
178 int frame_finished = 0;
\r
179 const int errn = avcodec_decode_video2(&codec_context_, decoded_frame.get(), &frame_finished, video_packet.get());
\r
183 BOOST_THROW_EXCEPTION(
\r
184 invalid_operation() <<
\r
185 msg_info(av_error_str(errn)) <<
\r
186 boost::errinfo_api_function("avcodec_decode_video") <<
\r
187 boost::errinfo_errno(AVUNERROR(errn)));
\r
190 if(frame_finished == 0)
\r
191 decoded_frame = nullptr;
\r
193 return decoded_frame;
\r
196 safe_ptr<core::write_frame> make_write_frame(safe_ptr<AVFrame> decoded_frame)
\r
198 // We don't know what the filter output might give until we received the first frame. Initialize everything on first frame.
\r
199 auto width = decoded_frame->width;
\r
200 auto height = decoded_frame->height;
\r
201 auto pix_fmt = static_cast<PixelFormat>(decoded_frame->format);
\r
202 auto desc = get_pixel_format_desc(pix_fmt, width, height);
\r
204 if(desc.pix_fmt == core::pixel_format::invalid)
\r
206 CASPAR_VERIFY(!sws_context_); // Initialize only once. Nothing should change while running;
\r
207 CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";
\r
209 desc = get_pixel_format_desc(PIX_FMT_BGRA, width, height);
\r
211 sws_context_.reset(sws_getContext(width, height, pix_fmt, width, height, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, ¶m), sws_freeContext);
\r
213 BOOST_THROW_EXCEPTION(operation_failed() <<
\r
214 msg_info("Could not create software scaling context.") <<
\r
215 boost::errinfo_api_function("sws_getContext"));
\r
218 auto write = frame_factory_->create_frame(this, desc);
\r
219 write->set_is_interlaced(decoded_frame->interlaced_frame != 0);
\r
221 if(sws_context_ == nullptr)
\r
223 tbb::parallel_for(0, static_cast<int>(desc.planes.size()), 1, [&](int n)
\r
225 auto plane = desc.planes[n];
\r
226 auto result = write->image_data(n).begin();
\r
227 auto decoded = decoded_frame->data[n];
\r
228 auto decoded_linesize = decoded_frame->linesize[n];
\r
230 // Copy line by line since ffmpeg sometimes pads each line.
\r
231 tbb::parallel_for(tbb::blocked_range<size_t>(0, static_cast<int>(desc.planes[n].height)), [&](const tbb::blocked_range<size_t>& r)
\r
233 for(size_t y = r.begin(); y != r.end(); ++y)
\r
234 memcpy(result + y*plane.linesize, decoded + y*decoded_linesize, plane.linesize);
\r
242 // Use sws_scale when provided colorspace has no hw-accel.
\r
243 safe_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free);
\r
244 avcodec_get_frame_defaults(av_frame.get());
\r
245 avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write->image_data().begin(), PIX_FMT_BGRA, width, height);
\r
247 sws_scale(sws_context_.get(), decoded_frame->data, decoded_frame->linesize, 0, height, av_frame->data, av_frame->linesize);
\r
252 // Fix field-order if needed. DVVIDEO is in lower field. Make it upper field if needed.
\r
253 if(decoded_frame->interlaced_frame)
\r
255 switch(frame_factory_->get_video_format_desc().mode)
\r
257 case core::video_mode::upper:
\r
258 if(!decoded_frame->top_field_first)
\r
259 write->get_image_transform().set_fill_translation(0.0f, 0.5/static_cast<double>(height));
\r
261 case core::video_mode::lower:
\r
262 if(decoded_frame->top_field_first)
\r
263 write->get_image_transform().set_fill_translation(0.0f, -0.5/static_cast<double>(height));
\r
272 video_decoder::video_decoder(input& input, const safe_ptr<core::frame_factory>& frame_factory, const std::string& filter_str) : impl_(new implementation(input, frame_factory, filter_str)){}
\r
273 std::deque<std::pair<int, safe_ptr<core::write_frame>>> video_decoder::receive(){return impl_->receive();}
\r