]> git.sesse.net Git - casparcg/blob - modules/ffmpeg/producer/util.cpp
2.0 ffmper_producer: started refactoring into fix_meta_data method.
[casparcg] / modules / ffmpeg / producer / util.cpp
1 #include "../../stdafx.h"\r
2 \r
3 #include "util.h"\r
4 \r
5 #include "format/flv.h"\r
6 \r
7 #include <tbb/concurrent_unordered_map.h>\r
8 #include <tbb/concurrent_queue.h>\r
9 \r
10 #include <core/producer/frame/image_transform.h>\r
11 #include <core/producer/frame/frame_factory.h>\r
12 #include <core/producer/frame_producer.h>\r
13 #include <core/mixer/write_frame.h>\r
14 \r
15 #include <common/exception/exceptions.h>\r
16 \r
17 #include <tbb/parallel_for.h>\r
18 \r
19 #include <boost/filesystem.hpp>\r
20 #include <boost/lexical_cast.hpp>\r
21 \r
22 #if defined(_MSC_VER)\r
23 #pragma warning (push)\r
24 #pragma warning (disable : 4244)\r
25 #endif\r
26 extern "C" \r
27 {\r
28         #include <libswscale/swscale.h>\r
29         #include <libavcodec/avcodec.h>\r
30         #include <libavformat/avformat.h>\r
31 }\r
32 #if defined(_MSC_VER)\r
33 #pragma warning (pop)\r
34 #endif\r
35 \r
36 namespace caspar {\r
37 \r
38 core::video_mode::type get_mode(AVFrame& frame)\r
39 {\r
40         if(!frame.interlaced_frame)\r
41                 return core::video_mode::progressive;\r
42 \r
43         return frame.top_field_first ? core::video_mode::upper : core::video_mode::lower;\r
44 }\r
45 \r
46 core::pixel_format::type get_pixel_format(PixelFormat pix_fmt)\r
47 {\r
48         switch(pix_fmt)\r
49         {\r
50         case CASPAR_PIX_FMT_LUMA:       return core::pixel_format::luma;\r
51         case PIX_FMT_GRAY8:                     return core::pixel_format::gray;\r
52         case PIX_FMT_BGRA:                      return core::pixel_format::bgra;\r
53         case PIX_FMT_ARGB:                      return core::pixel_format::argb;\r
54         case PIX_FMT_RGBA:                      return core::pixel_format::rgba;\r
55         case PIX_FMT_ABGR:                      return core::pixel_format::abgr;\r
56         case PIX_FMT_YUV444P:           return core::pixel_format::ycbcr;\r
57         case PIX_FMT_YUV422P:           return core::pixel_format::ycbcr;\r
58         case PIX_FMT_YUV420P:           return core::pixel_format::ycbcr;\r
59         case PIX_FMT_YUV411P:           return core::pixel_format::ycbcr;\r
60         case PIX_FMT_YUV410P:           return core::pixel_format::ycbcr;\r
61         case PIX_FMT_YUVA420P:          return core::pixel_format::ycbcra;\r
62         default:                                        return core::pixel_format::invalid;\r
63         }\r
64 }\r
65 \r
66 core::pixel_format_desc get_pixel_format_desc(PixelFormat pix_fmt, size_t width, size_t height)\r
67 {\r
68         // Get linesizes\r
69         AVPicture dummy_pict;   \r
70         avpicture_fill(&dummy_pict, nullptr, pix_fmt == CASPAR_PIX_FMT_LUMA ? PIX_FMT_GRAY8 : pix_fmt, width, height);\r
71 \r
72         core::pixel_format_desc desc;\r
73         desc.pix_fmt = get_pixel_format(pix_fmt);\r
74                 \r
75         switch(desc.pix_fmt)\r
76         {\r
77         case core::pixel_format::gray:\r
78         case core::pixel_format::luma:\r
79                 {\r
80                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1));                                               \r
81                         return desc;\r
82                 }\r
83         case core::pixel_format::bgra:\r
84         case core::pixel_format::argb:\r
85         case core::pixel_format::rgba:\r
86         case core::pixel_format::abgr:\r
87                 {\r
88                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/4, height, 4));                                             \r
89                         return desc;\r
90                 }\r
91         case core::pixel_format::ycbcr:\r
92         case core::pixel_format::ycbcra:\r
93                 {               \r
94                         // Find chroma height\r
95                         size_t size2 = dummy_pict.data[2] - dummy_pict.data[1];\r
96                         size_t h2 = size2/dummy_pict.linesize[1];                       \r
97 \r
98                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1));\r
99                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[1], h2, 1));\r
100                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[2], h2, 1));\r
101 \r
102                         if(desc.pix_fmt == core::pixel_format::ycbcra)                                          \r
103                                 desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[3], height, 1));       \r
104                         return desc;\r
105                 }               \r
106         default:                \r
107                 desc.pix_fmt = core::pixel_format::invalid;\r
108                 return desc;\r
109         }\r
110 }\r
111 \r
112 int make_alpha_format(int format)\r
113 {\r
114         switch(get_pixel_format(static_cast<PixelFormat>(format)))\r
115         {\r
116         case core::pixel_format::luma:\r
117         case core::pixel_format::gray:\r
118         case core::pixel_format::invalid:\r
119                 return format;\r
120         case core::pixel_format::ycbcr:\r
121         case core::pixel_format::ycbcra:\r
122                 return CASPAR_PIX_FMT_LUMA;\r
123         default:\r
124         return PIX_FMT_GRAY8;\r
125         }\r
126 }\r
127 \r
128 safe_ptr<core::write_frame> make_write_frame(const void* tag, const safe_ptr<AVFrame>& decoded_frame, const safe_ptr<core::frame_factory>& frame_factory, int hints)\r
129 {                       \r
130         static tbb::concurrent_unordered_map<size_t, tbb::concurrent_queue<std::shared_ptr<SwsContext>>> sws_contexts_;\r
131         \r
132         const auto width  = decoded_frame->width;\r
133         const auto height = decoded_frame->height;\r
134         auto desc                 = get_pixel_format_desc(static_cast<PixelFormat>(decoded_frame->format), width, height);\r
135         \r
136         if(hints & core::frame_producer::ALPHA_HINT)\r
137                 desc = get_pixel_format_desc(static_cast<PixelFormat>(make_alpha_format(decoded_frame->format)), width, height);\r
138 \r
139         if(desc.pix_fmt == core::pixel_format::invalid)\r
140         {\r
141                 auto pix_fmt = static_cast<PixelFormat>(decoded_frame->format);\r
142 \r
143                 auto write = frame_factory->create_frame(tag, desc.pix_fmt != core::pixel_format::invalid ? desc : get_pixel_format_desc(PIX_FMT_BGRA, width, height));\r
144                 write->set_type(get_mode(*decoded_frame));\r
145 \r
146                 std::shared_ptr<SwsContext> sws_context;\r
147 \r
148                 //CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";\r
149 \r
150                 size_t key = width << 20 | height << 8 | pix_fmt;\r
151                         \r
152                 auto& pool = sws_contexts_[key];\r
153                                                 \r
154                 if(!pool.try_pop(sws_context))\r
155                 {\r
156                         double param;\r
157                         sws_context.reset(sws_getContext(width, height, pix_fmt, width, height, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, &param), sws_freeContext);\r
158                 }\r
159                         \r
160                 if(!sws_context)\r
161                 {\r
162                         BOOST_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling context.") << \r
163                                                                         boost::errinfo_api_function("sws_getContext"));\r
164                 }       \r
165 \r
166                 // Use sws_scale when provided colorspace has no hw-accel.\r
167                 safe_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free);     \r
168                 avcodec_get_frame_defaults(av_frame.get());                     \r
169                 avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write->image_data().begin(), PIX_FMT_BGRA, width, height);\r
170                  \r
171                 sws_scale(sws_context.get(), decoded_frame->data, decoded_frame->linesize, 0, height, av_frame->data, av_frame->linesize);      \r
172                 pool.push(sws_context);\r
173 \r
174                 write->commit();\r
175 \r
176                 return write;\r
177         }\r
178         else\r
179         {\r
180                 auto write = frame_factory->create_frame(tag, desc);\r
181                 write->set_type(get_mode(*decoded_frame));\r
182 \r
183                 for(int n = 0; n < static_cast<int>(desc.planes.size()); ++n)\r
184                 {\r
185                         auto plane            = desc.planes[n];\r
186                         auto result           = write->image_data(n).begin();\r
187                         auto decoded          = decoded_frame->data[n];\r
188                         auto decoded_linesize = decoded_frame->linesize[n];\r
189                                 \r
190                         // Copy line by line since ffmpeg sometimes pads each line.\r
191                         tbb::affinity_partitioner ap;\r
192                         tbb::parallel_for(tbb::blocked_range<size_t>(0, static_cast<int>(desc.planes[n].height)), [&](const tbb::blocked_range<size_t>& r)\r
193                         {\r
194                                 for(size_t y = r.begin(); y != r.end(); ++y)\r
195                                         memcpy(result + y*plane.linesize, decoded + y*decoded_linesize, plane.linesize);\r
196                         }, ap);\r
197 \r
198                         write->commit(n);\r
199                 }\r
200 \r
201                 //for(int n = 0; n < static_cast<int>(desc.planes.size()); ++n)\r
202                 //{\r
203                 //      auto plane            = desc.planes[n];\r
204                 //      auto result           = write->image_data(n).begin();\r
205                 //      auto decoded          = decoded_frame->data[n];\r
206                 //      auto decoded_linesize = decoded_frame->linesize[n];\r
207                 //              \r
208                 //      for(size_t y = 0; y < static_cast<int>(desc.planes[n].height); ++y)\r
209                 //              fast_memcpy(result + y*plane.linesize, decoded + y*decoded_linesize, plane.linesize);\r
210 \r
211                 //      write->commit(n);\r
212                 //}\r
213 \r
214                 return write;\r
215         }\r
216 }\r
217 \r
218 bool is_sane_frame_rate(AVRational time_base)\r
219 {\r
220         return true;\r
221 }\r
222 \r
223 //TODO: Not finished\r
224 void fix_meta_data(AVFormatContext& context)\r
225 {\r
226         auto video_index = av_find_best_stream(&context, AVMEDIA_TYPE_VIDEO, -1, -1, 0, 0);\r
227         auto audio_index = av_find_best_stream(&context, AVMEDIA_TYPE_AUDIO, -1, -1, 0, 0);\r
228 \r
229         if(video_index > -1)\r
230         {\r
231                 auto& video_stream  = *context.streams[video_index];\r
232 \r
233                 if(video_stream.time_base.num == 1)\r
234                         video_stream.time_base.num = static_cast<int>(std::pow(10.0, static_cast<int>(std::log10(static_cast<float>(video_stream.time_base.den)))-1));  \r
235                                         \r
236                 if(boost::filesystem2::path(context.filename).extension() == ".flv")\r
237                 {\r
238                         try\r
239                         {\r
240                                 //auto meta = read_flv_meta_info(context.filename);\r
241                                 //fps_ = boost::lexical_cast<double>(meta["framerate"]);\r
242                                 //video_stream.nb_frames = static_cast<int64_t>(boost::lexical_cast<double>(meta["duration"])*fps_);\r
243                         }\r
244                         catch(...){}\r
245                 }\r
246                 else\r
247                 {\r
248                         if(video_stream.nb_frames == 0)\r
249                                 video_stream.nb_frames = video_stream.duration;\r
250                 }\r
251 \r
252                 if(!is_sane_frame_rate(video_stream.time_base))\r
253                 {\r
254                         if(audio_index > -1)\r
255                         {\r
256                                 video_stream.time_base.num = video_stream.nb_frames;\r
257                                 video_stream.time_base.den = context.streams[audio_index]->duration / context.streams[audio_index]->codec->sample_rate;\r
258                         }\r
259                 }\r
260         }\r
261 }\r
262 \r
263 }