]> git.sesse.net Git - casparcg/blob - modules/ffmpeg/producer/util/util.cpp
Merge pull request #205 from ronag/master
[casparcg] / modules / ffmpeg / producer / util / util.cpp
1 /*\r
2 * Copyright 2013 Sveriges Television AB http://casparcg.com/\r
3 *\r
4 * This file is part of CasparCG (www.casparcg.com).\r
5 *\r
6 * CasparCG is free software: you can redistribute it and/or modify\r
7 * it under the terms of the GNU General Public License as published by\r
8 * the Free Software Foundation, either version 3 of the License, or\r
9 * (at your option) any later version.\r
10 *\r
11 * CasparCG is distributed in the hope that it will be useful,\r
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of\r
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
14 * GNU General Public License for more details.\r
15 *\r
16 * You should have received a copy of the GNU General Public License\r
17 * along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
18 *\r
19 * Author: Robert Nagy, ronag89@gmail.com\r
20 */\r
21 \r
22 #include "../../stdafx.h"\r
23 \r
24 #include "util.h"\r
25 \r
26 #include "flv.h"\r
27 \r
28 #include "../tbb_avcodec.h"\r
29 #include "../../ffmpeg_error.h"\r
30 \r
31 #include <tbb/concurrent_unordered_map.h>\r
32 #include <tbb/concurrent_queue.h>\r
33 \r
34 #include <core/producer/frame/frame_transform.h>\r
35 #include <core/producer/frame/frame_factory.h>\r
36 #include <core/producer/frame_producer.h>\r
37 #include <core/mixer/write_frame.h>\r
38 #include <core/mixer/audio/audio_util.h>\r
39 \r
40 #include <common/exception/exceptions.h>\r
41 #include <common/utility/assert.h>\r
42 #include <common/memory/memcpy.h>\r
43 \r
44 #include <tbb/parallel_for.h>\r
45 \r
46 #include <boost/filesystem.hpp>\r
47 #include <boost/lexical_cast.hpp>\r
48 #include <boost/algorithm/string.hpp>\r
49 \r
50 #if defined(_MSC_VER)\r
51 #pragma warning (push)\r
52 #pragma warning (disable : 4244)\r
53 #endif\r
54 extern "C" \r
55 {\r
56         #include <libswscale/swscale.h>\r
57         #include <libavcodec/avcodec.h>\r
58         #include <libavformat/avformat.h>\r
59 }\r
60 #if defined(_MSC_VER)\r
61 #pragma warning (pop)\r
62 #endif\r
63 \r
64 namespace caspar { namespace ffmpeg {\r
65                 \r
66 std::shared_ptr<core::audio_buffer> flush_audio()\r
67 {\r
68         static std::shared_ptr<core::audio_buffer> audio(new core::audio_buffer());\r
69         return audio;\r
70 }\r
71 \r
72 std::shared_ptr<core::audio_buffer> empty_audio()\r
73 {\r
74         static std::shared_ptr<core::audio_buffer> audio(new core::audio_buffer());\r
75         return audio;\r
76 }\r
77 \r
78 std::shared_ptr<AVFrame>                        flush_video()\r
79 {\r
80         static std::shared_ptr<AVFrame> video(avcodec_alloc_frame(), av_free);\r
81         return video;\r
82 }\r
83 \r
84 std::shared_ptr<AVFrame>                        empty_video()\r
85 {\r
86         static std::shared_ptr<AVFrame> video(avcodec_alloc_frame(), av_free);\r
87         return video;\r
88 }\r
89 \r
90 core::field_mode::type get_mode(const AVFrame& frame)\r
91 {\r
92         if(!frame.interlaced_frame)\r
93                 return core::field_mode::progressive;\r
94 \r
95         return frame.top_field_first ? core::field_mode::upper : core::field_mode::lower;\r
96 }\r
97 \r
98 core::pixel_format::type get_pixel_format(PixelFormat pix_fmt)\r
99 {\r
100         switch(pix_fmt)\r
101         {\r
102         case CASPAR_PIX_FMT_LUMA:       return core::pixel_format::luma;\r
103         case PIX_FMT_GRAY8:                     return core::pixel_format::gray;\r
104         case PIX_FMT_BGRA:                      return core::pixel_format::bgra;\r
105         case PIX_FMT_ARGB:                      return core::pixel_format::argb;\r
106         case PIX_FMT_RGBA:                      return core::pixel_format::rgba;\r
107         case PIX_FMT_ABGR:                      return core::pixel_format::abgr;\r
108         case PIX_FMT_YUV444P:           return core::pixel_format::ycbcr;\r
109         case PIX_FMT_YUV422P:           return core::pixel_format::ycbcr;\r
110         case PIX_FMT_YUV420P:           return core::pixel_format::ycbcr;\r
111         case PIX_FMT_YUV411P:           return core::pixel_format::ycbcr;\r
112         case PIX_FMT_YUV410P:           return core::pixel_format::ycbcr;\r
113         case PIX_FMT_YUVA420P:          return core::pixel_format::ycbcra;\r
114         default:                                        return core::pixel_format::invalid;\r
115         }\r
116 }\r
117 \r
118 core::pixel_format_desc get_pixel_format_desc(PixelFormat pix_fmt, size_t width, size_t height)\r
119 {\r
120         // Get linesizes\r
121         AVPicture dummy_pict;   \r
122         avpicture_fill(&dummy_pict, nullptr, pix_fmt == CASPAR_PIX_FMT_LUMA ? PIX_FMT_GRAY8 : pix_fmt, width, height);\r
123 \r
124         core::pixel_format_desc desc;\r
125         desc.pix_fmt = get_pixel_format(pix_fmt);\r
126                 \r
127         switch(desc.pix_fmt)\r
128         {\r
129         case core::pixel_format::gray:\r
130         case core::pixel_format::luma:\r
131                 {\r
132                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1));                                               \r
133                         return desc;\r
134                 }\r
135         case core::pixel_format::bgra:\r
136         case core::pixel_format::argb:\r
137         case core::pixel_format::rgba:\r
138         case core::pixel_format::abgr:\r
139                 {\r
140                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/4, height, 4));                                             \r
141                         return desc;\r
142                 }\r
143         case core::pixel_format::ycbcr:\r
144         case core::pixel_format::ycbcra:\r
145                 {               \r
146                         // Find chroma height\r
147                         size_t size2 = dummy_pict.data[2] - dummy_pict.data[1];\r
148                         size_t h2 = size2/dummy_pict.linesize[1];                       \r
149 \r
150                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1));\r
151                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[1], h2, 1));\r
152                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[2], h2, 1));\r
153 \r
154                         if(desc.pix_fmt == core::pixel_format::ycbcra)                                          \r
155                                 desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[3], height, 1));       \r
156                         return desc;\r
157                 }               \r
158         default:                \r
159                 desc.pix_fmt = core::pixel_format::invalid;\r
160                 return desc;\r
161         }\r
162 }\r
163 \r
164 int make_alpha_format(int format)\r
165 {\r
166         switch(get_pixel_format(static_cast<PixelFormat>(format)))\r
167         {\r
168         case core::pixel_format::ycbcr:\r
169         case core::pixel_format::ycbcra:\r
170                 return CASPAR_PIX_FMT_LUMA;\r
171         default:\r
172                 return format;\r
173         }\r
174 }\r
175 \r
176 safe_ptr<core::write_frame> make_write_frame(const void* tag, const safe_ptr<AVFrame>& decoded_frame, const safe_ptr<core::frame_factory>& frame_factory, int hints, const core::channel_layout& audio_channel_layout)\r
177 {                       \r
178         static tbb::concurrent_unordered_map<int64_t, tbb::concurrent_queue<std::shared_ptr<SwsContext>>> sws_contexts_;\r
179         \r
180         if(decoded_frame->width < 1 || decoded_frame->height < 1)\r
181                 return make_safe<core::write_frame>(tag, audio_channel_layout);\r
182 \r
183         const auto width  = decoded_frame->width;\r
184         const auto height = decoded_frame->height;\r
185         auto desc                 = get_pixel_format_desc(static_cast<PixelFormat>(decoded_frame->format), width, height);\r
186         \r
187         if(hints & core::frame_producer::ALPHA_HINT)\r
188                 desc = get_pixel_format_desc(static_cast<PixelFormat>(make_alpha_format(decoded_frame->format)), width, height);\r
189 \r
190         std::shared_ptr<core::write_frame> write;\r
191 \r
192         if(desc.pix_fmt == core::pixel_format::invalid)\r
193         {\r
194                 auto pix_fmt = static_cast<PixelFormat>(decoded_frame->format);\r
195                 auto target_pix_fmt = PIX_FMT_BGRA;\r
196 \r
197                 if(pix_fmt == PIX_FMT_UYVY422)\r
198                         target_pix_fmt = PIX_FMT_YUV422P;\r
199                 else if(pix_fmt == PIX_FMT_YUYV422)\r
200                         target_pix_fmt = PIX_FMT_YUV422P;\r
201                 else if(pix_fmt == PIX_FMT_UYYVYY411)\r
202                         target_pix_fmt = PIX_FMT_YUV411P;\r
203                 else if(pix_fmt == PIX_FMT_YUV420P10)\r
204                         target_pix_fmt = PIX_FMT_YUV420P;\r
205                 else if(pix_fmt == PIX_FMT_YUV422P10)\r
206                         target_pix_fmt = PIX_FMT_YUV422P;\r
207                 else if(pix_fmt == PIX_FMT_YUV444P10)\r
208                         target_pix_fmt = PIX_FMT_YUV444P;\r
209                 \r
210                 auto target_desc = get_pixel_format_desc(target_pix_fmt, width, height);\r
211 \r
212                 write = frame_factory->create_frame(tag, target_desc, audio_channel_layout);\r
213                 write->set_type(get_mode(*decoded_frame));\r
214 \r
215                 std::shared_ptr<SwsContext> sws_context;\r
216 \r
217                 //CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";\r
218                 \r
219                 int64_t key = ((static_cast<int64_t>(width)                      << 32) & 0xFFFF00000000) | \r
220                                           ((static_cast<int64_t>(height)                 << 16) & 0xFFFF0000) | \r
221                                           ((static_cast<int64_t>(pix_fmt)                <<  8) & 0xFF00) | \r
222                                           ((static_cast<int64_t>(target_pix_fmt) <<  0) & 0xFF);\r
223                         \r
224                 auto& pool = sws_contexts_[key];\r
225                                                 \r
226                 if(!pool.try_pop(sws_context))\r
227                 {\r
228                         double param;\r
229                         sws_context.reset(sws_getContext(width, height, pix_fmt, width, height, target_pix_fmt, SWS_BILINEAR, nullptr, nullptr, &param), sws_freeContext);\r
230                 }\r
231                         \r
232                 if(!sws_context)\r
233                 {\r
234                         BOOST_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling context.") << \r
235                                                                         boost::errinfo_api_function("sws_getContext"));\r
236                 }       \r
237                 \r
238                 safe_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free);     \r
239                 avcodec_get_frame_defaults(av_frame.get());                     \r
240                 if(target_pix_fmt == PIX_FMT_BGRA)\r
241                 {\r
242                         auto size = avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write->image_data().begin(), PIX_FMT_BGRA, width, height);\r
243                         CASPAR_VERIFY(size == write->image_data().size()); \r
244                 }\r
245                 else\r
246                 {\r
247                         av_frame->width  = width;\r
248                         av_frame->height = height;\r
249                         for(size_t n = 0; n < target_desc.planes.size(); ++n)\r
250                         {\r
251                                 av_frame->data[n]               = write->image_data(n).begin();\r
252                                 av_frame->linesize[n]   = target_desc.planes[n].linesize;\r
253                         }\r
254                 }\r
255 \r
256                 sws_scale(sws_context.get(), decoded_frame->data, decoded_frame->linesize, 0, height, av_frame->data, av_frame->linesize);      \r
257                 pool.push(sws_context);\r
258 \r
259                 write->commit();                \r
260         }\r
261         else\r
262         {\r
263                 write = frame_factory->create_frame(tag, desc, audio_channel_layout);\r
264                 write->set_type(get_mode(*decoded_frame));\r
265 \r
266                 for(int n = 0; n < static_cast<int>(desc.planes.size()); ++n)\r
267                 {\r
268                         auto plane            = desc.planes[n];\r
269                         auto result           = write->image_data(n).begin();\r
270                         auto decoded          = decoded_frame->data[n];\r
271                         auto decoded_linesize = decoded_frame->linesize[n];\r
272                         \r
273                         CASPAR_ASSERT(decoded);\r
274                         CASPAR_ASSERT(write->image_data(n).begin());\r
275 \r
276                         if(decoded_linesize != static_cast<int>(plane.linesize))\r
277                         {\r
278                                 // Copy line by line since ffmpeg sometimes pads each line.\r
279                                 tbb::parallel_for<size_t>(0, desc.planes[n].height, [&](size_t y)\r
280                                 {\r
281                                         fast_memcpy(result + y*plane.linesize, decoded + y*decoded_linesize, plane.linesize);\r
282                                 });\r
283                         }\r
284                         else\r
285                         {\r
286                                 fast_memcpy(result, decoded, plane.size);\r
287                         }\r
288 \r
289                         write->commit(n);\r
290                 }\r
291         }\r
292 \r
293         if(decoded_frame->height == 480) // NTSC DV\r
294         {\r
295                 write->get_frame_transform().fill_translation[1] += 2.0/static_cast<double>(frame_factory->get_video_format_desc().height);\r
296                 write->get_frame_transform().fill_scale[1] = 1.0 - 6.0*1.0/static_cast<double>(frame_factory->get_video_format_desc().height);\r
297         }\r
298         \r
299         // Fix field-order if needed\r
300         if(write->get_type() == core::field_mode::lower && frame_factory->get_video_format_desc().field_mode == core::field_mode::upper)\r
301                 write->get_frame_transform().fill_translation[1] += 1.0/static_cast<double>(frame_factory->get_video_format_desc().height);\r
302         else if(write->get_type() == core::field_mode::upper && frame_factory->get_video_format_desc().field_mode == core::field_mode::lower)\r
303                 write->get_frame_transform().fill_translation[1] -= 1.0/static_cast<double>(frame_factory->get_video_format_desc().height);\r
304 \r
305         return make_safe_ptr(write);\r
306 }\r
307 \r
308 bool is_sane_fps(AVRational time_base)\r
309 {\r
310         double fps = static_cast<double>(time_base.den) / static_cast<double>(time_base.num);\r
311         return fps > 20.0 && fps < 65.0;\r
312 }\r
313 \r
314 AVRational fix_time_base(AVRational time_base)\r
315 {\r
316         if(time_base.num == 1)\r
317                 time_base.num = static_cast<int>(std::pow(10.0, static_cast<int>(std::log10(static_cast<float>(time_base.den)))-1));    \r
318                         \r
319         if(!is_sane_fps(time_base))\r
320         {\r
321                 auto tmp = time_base;\r
322                 tmp.den /= 2;\r
323                 if(is_sane_fps(tmp))\r
324                         time_base = tmp;\r
325         }\r
326 \r
327         return time_base;\r
328 }\r
329 \r
330 double read_fps(AVFormatContext& context, double fail_value)\r
331 {                                               \r
332         auto video_index = av_find_best_stream(&context, AVMEDIA_TYPE_VIDEO, -1, -1, 0, 0);\r
333         auto audio_index = av_find_best_stream(&context, AVMEDIA_TYPE_AUDIO, -1, -1, 0, 0);\r
334         \r
335         if(video_index > -1)\r
336         {\r
337                 const auto video_context = context.streams[video_index]->codec;\r
338                 const auto video_stream  = context.streams[video_index];\r
339                                 \r
340                 auto frame_rate_time_base = video_stream->avg_frame_rate;\r
341                 std::swap(frame_rate_time_base.num, frame_rate_time_base.den);\r
342 \r
343                 if(is_sane_fps(frame_rate_time_base))\r
344                 {\r
345                         return static_cast<double>(frame_rate_time_base.den) / static_cast<double>(frame_rate_time_base.num);\r
346                 }\r
347 \r
348                 AVRational time_base = video_context->time_base;\r
349 \r
350                 if(boost::filesystem2::path(context.filename).extension() == ".flv")\r
351                 {\r
352                         try\r
353                         {\r
354                                 auto meta = read_flv_meta_info(context.filename);\r
355                                 return boost::lexical_cast<double>(meta["framerate"]);\r
356                         }\r
357                         catch(...)\r
358                         {\r
359                                 return 0.0;\r
360                         }\r
361                 }\r
362                 else\r
363                 {\r
364                         time_base.num *= video_context->ticks_per_frame;\r
365 \r
366                         if(!is_sane_fps(time_base))\r
367                         {                       \r
368                                 time_base = fix_time_base(time_base);\r
369 \r
370                                 if(!is_sane_fps(time_base) && audio_index > -1)\r
371                                 {\r
372                                         auto& audio_context = *context.streams[audio_index]->codec;\r
373                                         auto& audio_stream  = *context.streams[audio_index];\r
374 \r
375                                         double duration_sec = audio_stream.duration / static_cast<double>(audio_context.sample_rate);\r
376                                                                 \r
377                                         time_base.num = static_cast<int>(duration_sec*100000.0);\r
378                                         time_base.den = static_cast<int>(video_stream->nb_frames*100000);\r
379                                 }\r
380                         }\r
381                 }\r
382                 \r
383                 double fps = static_cast<double>(time_base.den) / static_cast<double>(time_base.num);\r
384 \r
385                 double closest_fps = 0.0;\r
386                 for(int n = 0; n < core::video_format::count; ++n)\r
387                 {\r
388                         auto format = core::video_format_desc::get(static_cast<core::video_format::type>(n));\r
389 \r
390                         double diff1 = std::abs(format.fps - fps);\r
391                         double diff2 = std::abs(closest_fps - fps);\r
392 \r
393                         if(diff1 < diff2)\r
394                                 closest_fps = format.fps;\r
395                 }\r
396         \r
397                 return closest_fps;\r
398         }\r
399 \r
400         return fail_value;      \r
401 }\r
402 \r
403 safe_ptr<AVPacket> create_packet()\r
404 {\r
405         safe_ptr<AVPacket> packet(new AVPacket, [](AVPacket* p)\r
406         {\r
407                 av_free_packet(p);\r
408                 delete p;\r
409         });\r
410         \r
411         av_init_packet(packet.get());\r
412         return packet;\r
413 }\r
414 \r
415 safe_ptr<AVCodecContext> open_codec(AVFormatContext& context, enum AVMediaType type, int& index)\r
416 {       \r
417         AVCodec* decoder;\r
418         index = THROW_ON_ERROR2(av_find_best_stream(&context, type, -1, -1, &decoder, 0), "");\r
419         //if(strcmp(decoder->name, "prores") == 0 && decoder->next && strcmp(decoder->next->name, "prores_lgpl") == 0)\r
420         //      decoder = decoder->next;\r
421 \r
422         THROW_ON_ERROR2(tbb_avcodec_open(context.streams[index]->codec, decoder), "");\r
423         return safe_ptr<AVCodecContext>(context.streams[index]->codec, tbb_avcodec_close);\r
424 }\r
425 \r
426 std::wstring print_mode(size_t width, size_t height, double fps, bool interlaced)\r
427 {\r
428         std::wostringstream fps_ss;\r
429         fps_ss << std::fixed << std::setprecision(2) << (!interlaced ? fps : 2.0 * fps);\r
430 \r
431         return boost::lexical_cast<std::wstring>(width) + L"x" + boost::lexical_cast<std::wstring>(height) + (!interlaced ? L"p" : L"i") + fps_ss.str();\r
432 }\r
433 \r
434 bool is_valid_file(const std::wstring filename, const std::vector<std::wstring>& invalid_exts)\r
435 {\r
436         static std::vector<std::wstring> valid_exts = boost::assign::list_of(L".m2t")(L".mov")(L".mp4")(L".dv")(L".flv")(L".mpg")(L".wav")(L".mp3")(L".dnxhd")(L".h264")(L".prores");\r
437 \r
438         auto ext = boost::to_lower_copy(boost::filesystem::wpath(filename).extension());\r
439                 \r
440         if(std::find(invalid_exts.begin(), invalid_exts.end(), ext) != invalid_exts.end())\r
441                 return false;   \r
442 \r
443         if(std::find(valid_exts.begin(), valid_exts.end(), ext) != valid_exts.end())\r
444                 return true;    \r
445 \r
446         auto filename2 = narrow(filename);\r
447 \r
448         if(boost::filesystem::path(filename2).extension() == ".m2t")\r
449                 return true;\r
450 \r
451         std::ifstream file(filename);\r
452 \r
453         std::vector<unsigned char> buf;\r
454         for(auto file_it = std::istreambuf_iterator<char>(file); file_it != std::istreambuf_iterator<char>() && buf.size() < 2048; ++file_it)\r
455                 buf.push_back(*file_it);\r
456 \r
457         if(buf.empty())\r
458                 return nullptr;\r
459 \r
460         AVProbeData pb;\r
461         pb.filename = filename2.c_str();\r
462         pb.buf          = buf.data();\r
463         pb.buf_size = buf.size();\r
464 \r
465         int score = 0;\r
466         return av_probe_input_format2(&pb, true, &score) != nullptr;\r
467 }\r
468 \r
469 bool is_valid_file(const std::wstring filename)\r
470 {\r
471         static const std::vector<std::wstring> invalid_exts = boost::assign::list_of(L".png")(L".tga")(L".bmp")(L".jpg")(L".jpeg")(L".gif")(L".tiff")(L".tif")(L".jp2")(L".jpx")(L".j2k")(L".j2c")(L".swf")(L".ct");\r
472         \r
473         return is_valid_file(filename, invalid_exts);\r
474 }\r
475 \r
476 bool try_get_duration(const std::wstring filename, std::int64_t& duration, boost::rational<std::int64_t>& time_base)\r
477 {               \r
478         AVFormatContext* weak_context = nullptr;\r
479         if(avformat_open_input(&weak_context, narrow(filename).c_str(), nullptr, nullptr) < 0)\r
480                 return false;\r
481 \r
482         std::shared_ptr<AVFormatContext> context(weak_context, av_close_input_file);\r
483         \r
484         context->probesize = context->probesize / 10;\r
485         context->max_analyze_duration = context->probesize / 10;\r
486 \r
487         if(avformat_find_stream_info(context.get(), nullptr) < 0)\r
488                 return false;\r
489 \r
490         const auto fps = read_fps(*context, 1.0);\r
491                 \r
492         const auto rational_fps = boost::rational<std::int64_t>(static_cast<int>(fps * AV_TIME_BASE), AV_TIME_BASE);\r
493         \r
494         duration = boost::rational_cast<std::int64_t>(context->duration * rational_fps / AV_TIME_BASE);\r
495         time_base = 1/rational_fps;\r
496 \r
497         return true;\r
498 }\r
499 \r
500 std::wstring probe_stem(const std::wstring stem, const std::vector<std::wstring>& invalid_exts)\r
501 {\r
502         auto stem2 = boost::filesystem2::wpath(stem);\r
503         auto dir = stem2.parent_path();\r
504         for(auto it = boost::filesystem2::wdirectory_iterator(dir); it != boost::filesystem2::wdirectory_iterator(); ++it)\r
505         {\r
506                 if(boost::iequals(it->path().stem(), stem2.filename()) && is_valid_file(it->path().file_string(), invalid_exts))\r
507                         return it->path().file_string();\r
508         }\r
509         return L"";\r
510 }\r
511 \r
512 std::wstring probe_stem(const std::wstring stem)\r
513 {\r
514         auto stem2 = boost::filesystem2::wpath(stem);\r
515         auto dir = stem2.parent_path();\r
516         for(auto it = boost::filesystem2::wdirectory_iterator(dir); it != boost::filesystem2::wdirectory_iterator(); ++it)\r
517         {\r
518                 if(boost::iequals(it->path().stem(), stem2.filename()) && is_valid_file(it->path().file_string()))\r
519                         return it->path().file_string();\r
520         }\r
521         return L"";\r
522 }\r
523 \r
524 core::channel_layout get_audio_channel_layout(\r
525                 const AVCodecContext& context, const std::wstring& custom_channel_order)\r
526 {\r
527         if (!custom_channel_order.empty())\r
528         {\r
529                 auto layout = core::create_custom_channel_layout(\r
530                                 custom_channel_order,\r
531                                 core::default_channel_layout_repository());\r
532 \r
533                 layout.num_channels = context.channels;\r
534 \r
535                 return layout;\r
536         }\r
537 \r
538         int64_t ch_layout = context.channel_layout;\r
539 \r
540         if (ch_layout == 0)\r
541                 ch_layout = av_get_default_channel_layout(context.channels);\r
542 \r
543         switch (ch_layout) // TODO: refine this auto-detection\r
544         {\r
545         case AV_CH_LAYOUT_MONO:\r
546                 return core::default_channel_layout_repository().get_by_name(L"MONO");\r
547         case AV_CH_LAYOUT_STEREO:\r
548                 return core::default_channel_layout_repository().get_by_name(L"STEREO");\r
549         case AV_CH_LAYOUT_5POINT1:\r
550         case AV_CH_LAYOUT_5POINT1_BACK:\r
551                 return core::default_channel_layout_repository().get_by_name(L"SMPTE");\r
552         case AV_CH_LAYOUT_7POINT1:\r
553                 return core::default_channel_layout_repository().get_by_name(L"DOLBYE");\r
554         }\r
555 \r
556         return core::create_unspecified_layout(context.channels);\r
557 }\r
558 \r
559 //\r
560 //void av_dup_frame(AVFrame* frame)\r
561 //{\r
562 //      AVFrame* new_frame = avcodec_alloc_frame();\r
563 //\r
564 //\r
565 //      const uint8_t *src_data[4] = {0};\r
566 //      memcpy(const_cast<uint8_t**>(&src_data[0]), frame->data, 4);\r
567 //      const int src_linesizes[4] = {0};\r
568 //      memcpy(const_cast<int*>(&src_linesizes[0]), frame->linesize, 4);\r
569 //\r
570 //      av_image_alloc(new_frame->data, new_frame->linesize, new_frame->width, new_frame->height, frame->format, 16);\r
571 //\r
572 //      av_image_copy(new_frame->data, new_frame->linesize, src_data, src_linesizes, frame->format, new_frame->width, new_frame->height);\r
573 //\r
574 //      frame =\r
575 //}\r
576 \r
577 }}