]> git.sesse.net Git - casparcg/blob - modules/ffmpeg/producer/util.cpp
2.0. Updated namespaces.
[casparcg] / modules / ffmpeg / producer / util.cpp
1 #include "../../stdafx.h"\r
2 \r
3 #include "util.h"\r
4 \r
5 #include "format/flv.h"\r
6 \r
7 #include <tbb/concurrent_unordered_map.h>\r
8 #include <tbb/concurrent_queue.h>\r
9 \r
10 #include <core/producer/frame/frame_transform.h>\r
11 #include <core/producer/frame/frame_factory.h>\r
12 #include <core/producer/frame_producer.h>\r
13 #include <core/mixer/write_frame.h>\r
14 \r
15 #include <common/exception/exceptions.h>\r
16 \r
17 #include <tbb/parallel_for.h>\r
18 \r
19 #include <boost/filesystem.hpp>\r
20 #include <boost/lexical_cast.hpp>\r
21 \r
22 #if defined(_MSC_VER)\r
23 #pragma warning (push)\r
24 #pragma warning (disable : 4244)\r
25 #endif\r
26 extern "C" \r
27 {\r
28         #include <libswscale/swscale.h>\r
29         #include <libavcodec/avcodec.h>\r
30         #include <libavformat/avformat.h>\r
31 }\r
32 #if defined(_MSC_VER)\r
33 #pragma warning (pop)\r
34 #endif\r
35 \r
36 namespace caspar { namespace ffmpeg {\r
37 \r
38 core::field_mode::type get_mode(AVFrame& frame)\r
39 {\r
40         if(!frame.interlaced_frame)\r
41                 return core::field_mode::progressive;\r
42 \r
43         return frame.top_field_first ? core::field_mode::upper : core::field_mode::lower;\r
44 }\r
45 \r
46 core::pixel_format::type get_pixel_format(PixelFormat pix_fmt)\r
47 {\r
48         switch(pix_fmt)\r
49         {\r
50         case CASPAR_PIX_FMT_LUMA:       return core::pixel_format::luma;\r
51         case PIX_FMT_GRAY8:                     return core::pixel_format::gray;\r
52         case PIX_FMT_BGRA:                      return core::pixel_format::bgra;\r
53         case PIX_FMT_ARGB:                      return core::pixel_format::argb;\r
54         case PIX_FMT_RGBA:                      return core::pixel_format::rgba;\r
55         case PIX_FMT_ABGR:                      return core::pixel_format::abgr;\r
56         case PIX_FMT_YUV444P:           return core::pixel_format::ycbcr;\r
57         case PIX_FMT_YUV422P:           return core::pixel_format::ycbcr;\r
58         case PIX_FMT_YUV420P:           return core::pixel_format::ycbcr;\r
59         case PIX_FMT_YUV411P:           return core::pixel_format::ycbcr;\r
60         case PIX_FMT_YUV410P:           return core::pixel_format::ycbcr;\r
61         case PIX_FMT_YUVA420P:          return core::pixel_format::ycbcra;\r
62         default:                                        return core::pixel_format::invalid;\r
63         }\r
64 }\r
65 \r
66 core::pixel_format_desc get_pixel_format_desc(PixelFormat pix_fmt, size_t width, size_t height)\r
67 {\r
68         // Get linesizes\r
69         AVPicture dummy_pict;   \r
70         avpicture_fill(&dummy_pict, nullptr, pix_fmt == CASPAR_PIX_FMT_LUMA ? PIX_FMT_GRAY8 : pix_fmt, width, height);\r
71 \r
72         core::pixel_format_desc desc;\r
73         desc.pix_fmt = get_pixel_format(pix_fmt);\r
74                 \r
75         switch(desc.pix_fmt)\r
76         {\r
77         case core::pixel_format::gray:\r
78         case core::pixel_format::luma:\r
79                 {\r
80                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1));                                               \r
81                         return desc;\r
82                 }\r
83         case core::pixel_format::bgra:\r
84         case core::pixel_format::argb:\r
85         case core::pixel_format::rgba:\r
86         case core::pixel_format::abgr:\r
87                 {\r
88                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/4, height, 4));                                             \r
89                         return desc;\r
90                 }\r
91         case core::pixel_format::ycbcr:\r
92         case core::pixel_format::ycbcra:\r
93                 {               \r
94                         // Find chroma height\r
95                         size_t size2 = dummy_pict.data[2] - dummy_pict.data[1];\r
96                         size_t h2 = size2/dummy_pict.linesize[1];                       \r
97 \r
98                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1));\r
99                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[1], h2, 1));\r
100                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[2], h2, 1));\r
101 \r
102                         if(desc.pix_fmt == core::pixel_format::ycbcra)                                          \r
103                                 desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[3], height, 1));       \r
104                         return desc;\r
105                 }               \r
106         default:                \r
107                 desc.pix_fmt = core::pixel_format::invalid;\r
108                 return desc;\r
109         }\r
110 }\r
111 \r
112 int make_alpha_format(int format)\r
113 {\r
114         switch(get_pixel_format(static_cast<PixelFormat>(format)))\r
115         {\r
116         case core::pixel_format::luma:\r
117         case core::pixel_format::gray:\r
118         case core::pixel_format::invalid:\r
119                 return format;\r
120         case core::pixel_format::ycbcr:\r
121         case core::pixel_format::ycbcra:\r
122                 return CASPAR_PIX_FMT_LUMA;\r
123         default:\r
124         return PIX_FMT_GRAY8;\r
125         }\r
126 }\r
127 \r
128 safe_ptr<core::write_frame> make_write_frame(const void* tag, const safe_ptr<AVFrame>& decoded_frame, const safe_ptr<core::frame_factory>& frame_factory, int hints)\r
129 {                       \r
130         static tbb::concurrent_unordered_map<size_t, tbb::concurrent_queue<std::shared_ptr<SwsContext>>> sws_contexts_;\r
131         \r
132         const auto width  = decoded_frame->width;\r
133         const auto height = decoded_frame->height;\r
134         auto desc                 = get_pixel_format_desc(static_cast<PixelFormat>(decoded_frame->format), width, height);\r
135         \r
136         if(hints & core::frame_producer::ALPHA_HINT)\r
137                 desc = get_pixel_format_desc(static_cast<PixelFormat>(make_alpha_format(decoded_frame->format)), width, height);\r
138 \r
139         if(desc.pix_fmt == core::pixel_format::invalid)\r
140         {\r
141                 auto pix_fmt = static_cast<PixelFormat>(decoded_frame->format);\r
142 \r
143                 auto write = frame_factory->create_frame(tag, get_pixel_format_desc(PIX_FMT_BGRA, width, height));\r
144                 write->set_type(get_mode(*decoded_frame));\r
145 \r
146                 std::shared_ptr<SwsContext> sws_context;\r
147 \r
148                 //CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";\r
149 \r
150                 size_t key = width << 20 | height << 8 | pix_fmt;\r
151                         \r
152                 auto& pool = sws_contexts_[key];\r
153                                                 \r
154                 if(!pool.try_pop(sws_context))\r
155                 {\r
156                         double param;\r
157                         sws_context.reset(sws_getContext(width, height, pix_fmt, width, height, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, &param), sws_freeContext);\r
158                 }\r
159                         \r
160                 if(!sws_context)\r
161                 {\r
162                         BOOST_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling context.") << \r
163                                                                         boost::errinfo_api_function("sws_getContext"));\r
164                 }       \r
165 \r
166                 // Use sws_scale when provided colorspace has no hw-accel.\r
167                 safe_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free);     \r
168                 avcodec_get_frame_defaults(av_frame.get());                     \r
169                 avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write->image_data().begin(), PIX_FMT_BGRA, width, height);\r
170                  \r
171                 sws_scale(sws_context.get(), decoded_frame->data, decoded_frame->linesize, 0, height, av_frame->data, av_frame->linesize);      \r
172                 pool.push(sws_context);\r
173 \r
174                 write->commit();\r
175 \r
176                 return write;\r
177         }\r
178         else\r
179         {\r
180                 auto write = frame_factory->create_frame(tag, desc);\r
181                 write->set_type(get_mode(*decoded_frame));\r
182 \r
183                 for(int n = 0; n < static_cast<int>(desc.planes.size()); ++n)\r
184                 {\r
185                         auto plane            = desc.planes[n];\r
186                         auto result           = write->image_data(n).begin();\r
187                         auto decoded          = decoded_frame->data[n];\r
188                         auto decoded_linesize = decoded_frame->linesize[n];\r
189                                 \r
190                         // Copy line by line since ffmpeg sometimes pads each line.\r
191                         tbb::affinity_partitioner ap;\r
192                         tbb::parallel_for(tbb::blocked_range<size_t>(0, static_cast<int>(desc.planes[n].height)), [&](const tbb::blocked_range<size_t>& r)\r
193                         {\r
194                                 for(size_t y = r.begin(); y != r.end(); ++y)\r
195                                         memcpy(result + y*plane.linesize, decoded + y*decoded_linesize, plane.linesize);\r
196                         }, ap);\r
197 \r
198                         write->commit(n);\r
199                 }\r
200                 \r
201                 return write;\r
202         }\r
203 }\r
204 \r
205 bool is_sane_fps(AVRational time_base)\r
206 {\r
207         double fps = static_cast<double>(time_base.den) / static_cast<double>(time_base.num);\r
208         return fps > 20.0 && fps < 65.0;\r
209 }\r
210 \r
211 void fix_meta_data(AVFormatContext& context)\r
212 {\r
213         auto video_index = av_find_best_stream(&context, AVMEDIA_TYPE_VIDEO, -1, -1, 0, 0);\r
214         auto audio_index = av_find_best_stream(&context, AVMEDIA_TYPE_AUDIO, -1, -1, 0, 0);\r
215 \r
216         if(video_index < 0)\r
217                 return;\r
218 \r
219         auto& video_context = *context.streams[video_index]->codec;\r
220         auto& video_stream  = *context.streams[video_index];\r
221                                                 \r
222         if(boost::filesystem2::path(context.filename).extension() == ".flv")\r
223         {\r
224                 try\r
225                 {\r
226                         auto meta = read_flv_meta_info(context.filename);\r
227                         double fps = boost::lexical_cast<double>(meta["framerate"]);\r
228                         video_context.time_base.num = 1000000;\r
229                         video_context.time_base.den = static_cast<int>(fps*1000000.0);\r
230                         video_stream.nb_frames = static_cast<int64_t>(boost::lexical_cast<double>(meta["duration"])*fps);\r
231                 }\r
232                 catch(...){}\r
233         }\r
234         else\r
235         {\r
236                 if(video_stream.nb_frames == 0)\r
237                         video_stream.nb_frames = video_stream.duration;\r
238 \r
239                 if(!is_sane_fps(video_context.time_base))\r
240                 {                       \r
241                         if(video_context.time_base.num == 1)\r
242                                 video_context.time_base.num = static_cast<int>(std::pow(10.0, static_cast<int>(std::log10(static_cast<float>(video_context.time_base.den)))-1));        \r
243 \r
244                         if(!is_sane_fps(video_context.time_base) && audio_index > -1)\r
245                         {\r
246                                 auto& audio_context = *context.streams[audio_index]->codec;\r
247                                 auto& audio_stream  = *context.streams[audio_index];\r
248 \r
249                                 double duration_sec = audio_stream.duration / static_cast<double>(audio_context.sample_rate);\r
250                                                                 \r
251                                 video_context.time_base.num = static_cast<int>(duration_sec*100000.0);\r
252                                 video_context.time_base.den = static_cast<int>(video_stream.nb_frames*100000);\r
253                         }\r
254                 }\r
255 \r
256                 if(audio_index > -1) // Check for invalid double frame-rate\r
257                 {\r
258                         auto& audio_context = *context.streams[audio_index]->codec;\r
259                         auto& audio_stream  = *context.streams[audio_index];\r
260 \r
261                         double duration_sec = audio_stream.duration / static_cast<double>(audio_context.sample_rate);\r
262                         double fps = static_cast<double>(video_context.time_base.den) / static_cast<double>(video_context.time_base.num);\r
263 \r
264                         double fps_nb_frames    = static_cast<double>(duration_sec*fps);\r
265                         double stream_nb_frames =  static_cast<double>(video_stream.nb_frames);\r
266                         double diff = std::abs(fps_nb_frames - stream_nb_frames*2.0);\r
267                         if(diff < fps_nb_frames*0.05)\r
268                                 video_context.time_base.num *= 2;\r
269                 }\r
270         }\r
271 \r
272         double fps = static_cast<double>(video_context.time_base.den) / static_cast<double>(video_context.time_base.num);\r
273 \r
274         double closest_fps = 0.0;\r
275         for(int n = 0; n < core::video_format::count; ++n)\r
276         {\r
277                 auto format = core::video_format_desc::get(static_cast<core::video_format::type>(n));\r
278 \r
279                 double diff1 = std::abs(format.fps - fps);\r
280                 double diff2 = std::abs(closest_fps - fps);\r
281 \r
282                 if(diff1 < diff2)\r
283                         closest_fps = format.fps;\r
284         }\r
285         \r
286         video_context.time_base.num = 1000000;\r
287         video_context.time_base.den = static_cast<int>(closest_fps*1000000.0);\r
288 }\r
289 \r
290 }}