]> git.sesse.net Git - casparcg/blob - modules/ffmpeg/producer/util/util.cpp
2.1.0: -Fixed ffmpeg input crash. -Log throwing call-stack.
[casparcg] / modules / ffmpeg / producer / util / util.cpp
1 /*\r
2 * Copyright (c) 2011 Sveriges Television AB <info@casparcg.com>\r
3 *\r
4 * This file is part of CasparCG (www.casparcg.com).\r
5 *\r
6 * CasparCG is free software: you can redistribute it and/or modify\r
7 * it under the terms of the GNU General Public License as published by\r
8 * the Free Software Foundation, either version 3 of the License, or\r
9 * (at your option) any later version.\r
10 *\r
11 * CasparCG is distributed in the hope that it will be useful,\r
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of\r
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\r
14 * GNU General Public License for more details.\r
15 *\r
16 * You should have received a copy of the GNU General Public License\r
17 * along with CasparCG. If not, see <http://www.gnu.org/licenses/>.\r
18 *\r
19 * Author: Robert Nagy, ronag89@gmail.com\r
20 */\r
21 \r
22 #include "../../stdafx.h"\r
23 \r
24 #include "util.h"\r
25 \r
26 #include "flv.h"\r
27 \r
28 #include "../tbb_avcodec.h"\r
29 #include "../../ffmpeg_error.h"\r
30 \r
31 #include <tbb/concurrent_unordered_map.h>\r
32 #include <tbb/concurrent_queue.h>\r
33 \r
34 #include <core/frame/frame_transform.h>\r
35 #include <core/frame/frame_factory.h>\r
36 #include <core/frame/frame.h>\r
37 #include <core/producer/frame_producer.h>\r
38 \r
39 #include <common/except.h>\r
40 #include <common/array.h>\r
41 \r
42 #include <tbb/parallel_for.h>\r
43 \r
44 #include <common/assert.h>\r
45 #include <boost/filesystem.hpp>\r
46 #include <boost/lexical_cast.hpp>\r
47 \r
48 #include <asmlib.h>\r
49 \r
50 #if defined(_MSC_VER)\r
51 #pragma warning (push)\r
52 #pragma warning (disable : 4244)\r
53 #endif\r
54 extern "C" \r
55 {\r
56         #include <libswscale/swscale.h>\r
57         #include <libavcodec/avcodec.h>\r
58         #include <libavformat/avformat.h>\r
59 }\r
60 #if defined(_MSC_VER)\r
61 #pragma warning (pop)\r
62 #endif\r
63 \r
64 namespace caspar { namespace ffmpeg {\r
65                 \r
66 std::shared_ptr<core::audio_buffer> flush_audio()\r
67 {\r
68         static std::shared_ptr<core::audio_buffer> audio(new core::audio_buffer());\r
69         return audio;\r
70 }\r
71 \r
72 std::shared_ptr<core::audio_buffer> empty_audio()\r
73 {\r
74         static std::shared_ptr<core::audio_buffer> audio(new core::audio_buffer());\r
75         return audio;\r
76 }\r
77 \r
78 std::shared_ptr<AVFrame>                        flush_video()\r
79 {\r
80         static std::shared_ptr<AVFrame> video(avcodec_alloc_frame(), av_free);\r
81         return video;\r
82 }\r
83 \r
84 std::shared_ptr<AVFrame>                        empty_video()\r
85 {\r
86         static std::shared_ptr<AVFrame> video(avcodec_alloc_frame(), av_free);\r
87         return video;\r
88 }\r
89 \r
90 spl::shared_ptr<AVPacket> flush_packet()\r
91 {\r
92         static spl::shared_ptr<AVPacket> pkt(new AVPacket());\r
93         return pkt;\r
94 }\r
95 \r
96 spl::shared_ptr<AVPacket> null_packet()\r
97 {\r
98         static spl::shared_ptr<AVPacket> pkt(new AVPacket());\r
99         return pkt;\r
100 }\r
101 \r
102 \r
103 core::field_mode get_mode(const AVFrame& frame)\r
104 {\r
105         if(!frame.interlaced_frame)\r
106                 return core::field_mode::progressive;\r
107 \r
108         return frame.top_field_first ? core::field_mode::upper : core::field_mode::lower;\r
109 }\r
110 \r
111 core::pixel_format get_pixel_format(PixelFormat pix_fmt)\r
112 {\r
113         switch(pix_fmt)\r
114         {\r
115         case PIX_FMT_GRAY8:                     return core::pixel_format::gray;\r
116         case PIX_FMT_RGB24:                     return core::pixel_format::rgb;\r
117         case PIX_FMT_BGR24:                     return core::pixel_format::bgr;\r
118         case PIX_FMT_BGRA:                      return core::pixel_format::bgra;\r
119         case PIX_FMT_ARGB:                      return core::pixel_format::argb;\r
120         case PIX_FMT_RGBA:                      return core::pixel_format::rgba;\r
121         case PIX_FMT_ABGR:                      return core::pixel_format::abgr;\r
122         case PIX_FMT_YUV444P:           return core::pixel_format::ycbcr;\r
123         case PIX_FMT_YUV422P:           return core::pixel_format::ycbcr;\r
124         case PIX_FMT_YUV420P:           return core::pixel_format::ycbcr;\r
125         case PIX_FMT_YUV411P:           return core::pixel_format::ycbcr;\r
126         case PIX_FMT_YUV410P:           return core::pixel_format::ycbcr;\r
127         case PIX_FMT_YUVA420P:          return core::pixel_format::ycbcra;\r
128         default:                                        return core::pixel_format::invalid;\r
129         }\r
130 }\r
131 \r
132 core::pixel_format_desc pixel_format_desc(PixelFormat pix_fmt, int width, int height)\r
133 {\r
134         // Get linesizes\r
135         AVPicture dummy_pict;   \r
136         avpicture_fill(&dummy_pict, nullptr, pix_fmt, width, height);\r
137 \r
138         core::pixel_format_desc desc = get_pixel_format(pix_fmt);\r
139                 \r
140         switch(desc.format.value())\r
141         {\r
142         case core::pixel_format::gray:\r
143         case core::pixel_format::luma:\r
144                 {\r
145                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1));                                               \r
146                         return desc;\r
147                 }\r
148         case core::pixel_format::bgr:\r
149         case core::pixel_format::rgb:\r
150                 {\r
151                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/3, height, 3));                                             \r
152                         return desc;\r
153                 }\r
154         case core::pixel_format::bgra:\r
155         case core::pixel_format::argb:\r
156         case core::pixel_format::rgba:\r
157         case core::pixel_format::abgr:\r
158                 {\r
159                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/4, height, 4));                                             \r
160                         return desc;\r
161                 }\r
162         case core::pixel_format::ycbcr:\r
163         case core::pixel_format::ycbcra:\r
164                 {               \r
165                         // Find chroma height\r
166                         int size2 = static_cast<int>(dummy_pict.data[2] - dummy_pict.data[1]);\r
167                         int h2 = size2/dummy_pict.linesize[1];                  \r
168 \r
169                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1));\r
170                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[1], h2, 1));\r
171                         desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[2], h2, 1));\r
172 \r
173                         if(desc.format == core::pixel_format::ycbcra)                                           \r
174                                 desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[3], height, 1));       \r
175                         return desc;\r
176                 }               \r
177         default:                \r
178                 desc.format = core::pixel_format::invalid;\r
179                 return desc;\r
180         }\r
181 }\r
182 \r
183 core::mutable_frame make_frame(const void* tag, const spl::shared_ptr<AVFrame>& decoded_frame, double fps, core::frame_factory& frame_factory)\r
184 {                       \r
185         static tbb::concurrent_unordered_map<int64_t, tbb::concurrent_queue<std::shared_ptr<SwsContext>>> sws_contvalid_exts_;\r
186         \r
187         if(decoded_frame->width < 1 || decoded_frame->height < 1)\r
188                 return frame_factory.create_frame(tag, core::pixel_format_desc(core::pixel_format::invalid));\r
189 \r
190         const auto width  = decoded_frame->width;\r
191         const auto height = decoded_frame->height;\r
192         auto desc                 = pixel_format_desc(static_cast<PixelFormat>(decoded_frame->format), width, height);\r
193                 \r
194         if(desc.format == core::pixel_format::invalid)\r
195         {\r
196                 auto pix_fmt = static_cast<PixelFormat>(decoded_frame->format);\r
197                 auto target_pix_fmt = PIX_FMT_BGRA;\r
198 \r
199                 if(pix_fmt == PIX_FMT_UYVY422)\r
200                         target_pix_fmt = PIX_FMT_YUV422P;\r
201                 else if(pix_fmt == PIX_FMT_YUYV422)\r
202                         target_pix_fmt = PIX_FMT_YUV422P;\r
203                 else if(pix_fmt == PIX_FMT_UYYVYY411)\r
204                         target_pix_fmt = PIX_FMT_YUV411P;\r
205                 else if(pix_fmt == PIX_FMT_YUV420P10)\r
206                         target_pix_fmt = PIX_FMT_YUV420P;\r
207                 else if(pix_fmt == PIX_FMT_YUV422P10)\r
208                         target_pix_fmt = PIX_FMT_YUV422P;\r
209                 else if(pix_fmt == PIX_FMT_YUV444P10)\r
210                         target_pix_fmt = PIX_FMT_YUV444P;\r
211                 \r
212                 auto target_desc = pixel_format_desc(target_pix_fmt, width, height);\r
213 \r
214                 auto write = frame_factory.create_frame(tag, target_desc);\r
215 \r
216                 std::shared_ptr<SwsContext> sws_context;\r
217 \r
218                 //CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";\r
219                 \r
220                 int64_t key = ((static_cast<int64_t>(width)                      << 32) & 0xFFFF00000000) | \r
221                                           ((static_cast<int64_t>(height)                 << 16) & 0xFFFF0000) | \r
222                                           ((static_cast<int64_t>(pix_fmt)                <<  8) & 0xFF00) | \r
223                                           ((static_cast<int64_t>(target_pix_fmt) <<  0) & 0xFF);\r
224                         \r
225                 auto& pool = sws_contvalid_exts_[key];\r
226                                                 \r
227                 if(!pool.try_pop(sws_context))\r
228                 {\r
229                         double param;\r
230                         sws_context.reset(sws_getContext(width, height, pix_fmt, width, height, target_pix_fmt, SWS_BILINEAR, nullptr, nullptr, &param), sws_freeContext);\r
231                 }\r
232                         \r
233                 if(!sws_context)\r
234                 {\r
235                         CASPAR_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling context.") << \r
236                                                                         boost::errinfo_api_function("sws_getContext"));\r
237                 }       \r
238                 \r
239                 spl::shared_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free);      \r
240                 avcodec_get_frame_defaults(av_frame.get());                     \r
241                 if(target_pix_fmt == PIX_FMT_BGRA)\r
242                 {\r
243                         auto size = avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write.image_data(0).begin(), PIX_FMT_BGRA, width, height);\r
244                         CASPAR_VERIFY(size == write.image_data(0).size()); \r
245                 }\r
246                 else\r
247                 {\r
248                         av_frame->width  = width;\r
249                         av_frame->height = height;\r
250                         for(int n = 0; n < target_desc.planes.size(); ++n)\r
251                         {\r
252                                 av_frame->data[n]               = write.image_data(n).begin();\r
253                                 av_frame->linesize[n]   = target_desc.planes[n].linesize;\r
254                         }\r
255                 }\r
256 \r
257                 sws_scale(sws_context.get(), decoded_frame->data, decoded_frame->linesize, 0, height, av_frame->data, av_frame->linesize);      \r
258                 pool.push(sws_context); \r
259 \r
260                 return std::move(write);\r
261         }\r
262         else\r
263         {\r
264                 auto write = frame_factory.create_frame(tag, desc);\r
265                 \r
266                 for(int n = 0; n < static_cast<int>(desc.planes.size()); ++n)\r
267                 {\r
268                         auto plane            = desc.planes[n];\r
269                         auto result           = write.image_data(n).begin();\r
270                         auto decoded          = decoded_frame->data[n];\r
271                         auto decoded_linesize = decoded_frame->linesize[n];\r
272                         \r
273                         CASPAR_ASSERT(decoded);\r
274                         CASPAR_ASSERT(write.image_data(n).begin());\r
275 \r
276                         // Copy line by line since ffmpeg sometimes pads each line.\r
277                         tbb::affinity_partitioner ap;\r
278                         tbb::parallel_for(tbb::blocked_range<int>(0, desc.planes[n].height), [&](const tbb::blocked_range<int>& r)\r
279                         {\r
280                                 for(int y = r.begin(); y != r.end(); ++y)\r
281                                         A_memcpy(result + y*plane.linesize, decoded + y*decoded_linesize, plane.linesize);\r
282                         }, ap);\r
283                 }\r
284         \r
285                 return std::move(write);\r
286         }\r
287 }\r
288 \r
289 spl::shared_ptr<AVFrame> make_av_frame(core::mutable_frame& frame)\r
290 {\r
291         std::array<uint8_t*, 4> data = {};\r
292         for(int n = 0; n < frame.pixel_format_desc().planes.size(); ++n)\r
293                 data[n] = frame.image_data(n).begin();\r
294 \r
295         return make_av_frame(data, frame.pixel_format_desc());\r
296 }\r
297 \r
298 spl::shared_ptr<AVFrame> make_av_frame(std::array<uint8_t*, 4> data, const core::pixel_format_desc& pix_desc)\r
299 {\r
300         spl::shared_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free);      \r
301         avcodec_get_frame_defaults(av_frame.get());\r
302         \r
303         auto planes              = pix_desc.planes;\r
304         auto format              = pix_desc.format.value();\r
305 \r
306         av_frame->width  = planes[0].width;\r
307         av_frame->height = planes[0].height;\r
308         for(int n = 0; n < planes.size(); ++n)  \r
309         {\r
310                 av_frame->data[n]         = data[n];\r
311                 av_frame->linesize[n] = planes[n].linesize;     \r
312         }\r
313 \r
314         switch(format)\r
315         {\r
316         case core::pixel_format::rgb:\r
317                 av_frame->format = PIX_FMT_RGB24;\r
318                 break;\r
319         case core::pixel_format::bgr:\r
320                 av_frame->format = PIX_FMT_BGR24;\r
321                 break;\r
322         case core::pixel_format::rgba:\r
323                 av_frame->format = PIX_FMT_RGBA; \r
324                 break;\r
325         case core::pixel_format::argb:\r
326                 av_frame->format = PIX_FMT_ARGB; \r
327                 break;\r
328         case core::pixel_format::bgra:\r
329                 av_frame->format = PIX_FMT_BGRA; \r
330                 break;\r
331         case core::pixel_format::abgr:\r
332                 av_frame->format = PIX_FMT_ABGR; \r
333                 break;\r
334         case core::pixel_format::gray:\r
335                 av_frame->format = PIX_FMT_GRAY8; \r
336                 break;\r
337         case core::pixel_format::ycbcr:\r
338         {\r
339                 int y_w = planes[0].width;\r
340                 int y_h = planes[0].height;\r
341                 int c_w = planes[1].width;\r
342                 int c_h = planes[1].height;\r
343 \r
344                 if(c_h == y_h && c_w == y_w)\r
345                         av_frame->format = PIX_FMT_YUV444P;\r
346                 else if(c_h == y_h && c_w*2 == y_w)\r
347                         av_frame->format = PIX_FMT_YUV422P;\r
348                 else if(c_h == y_h && c_w*4 == y_w)\r
349                         av_frame->format = PIX_FMT_YUV411P;\r
350                 else if(c_h*2 == y_h && c_w*2 == y_w)\r
351                         av_frame->format = PIX_FMT_YUV420P;\r
352                 else if(c_h*2 == y_h && c_w*4 == y_w)\r
353                         av_frame->format = PIX_FMT_YUV410P;\r
354 \r
355                 break;\r
356         }\r
357         case core::pixel_format::ycbcra:\r
358                 av_frame->format = PIX_FMT_YUVA420P;\r
359                 break;\r
360         }\r
361         return av_frame;\r
362 }\r
363 \r
364 bool is_sane_fps(AVRational time_base)\r
365 {\r
366         double fps = static_cast<double>(time_base.den) / static_cast<double>(time_base.num);\r
367         return fps > 20.0 && fps < 65.0;\r
368 }\r
369 \r
370 AVRational fix_time_base(AVRational time_base)\r
371 {\r
372         if(time_base.num == 1)\r
373                 time_base.num = static_cast<int>(std::pow(10.0, static_cast<int>(std::log10(static_cast<float>(time_base.den)))-1));    \r
374                         \r
375         if(!is_sane_fps(time_base))\r
376         {\r
377                 auto tmp = time_base;\r
378                 tmp.den /= 2;\r
379                 if(is_sane_fps(tmp))\r
380                         time_base = tmp;\r
381         }\r
382 \r
383         return time_base;\r
384 }\r
385 \r
386 double read_fps(AVFormatContext& context, double fail_value)\r
387 {                                               \r
388         auto video_index = av_find_best_stream(&context, AVMEDIA_TYPE_VIDEO, -1, -1, 0, 0);\r
389         auto audio_index = av_find_best_stream(&context, AVMEDIA_TYPE_AUDIO, -1, -1, 0, 0);\r
390         \r
391         if(video_index > -1)\r
392         {\r
393                 const auto video_context = context.streams[video_index]->codec;\r
394                 const auto video_stream  = context.streams[video_index];\r
395                                                 \r
396                 AVRational time_base = video_context->time_base;\r
397 \r
398                 if(boost::filesystem::path(context.filename).extension().string() == ".flv")\r
399                 {\r
400                         try\r
401                         {\r
402                                 auto meta = read_flv_meta_info(context.filename);\r
403                                 return boost::lexical_cast<double>(meta["framerate"]);\r
404                         }\r
405                         catch(...)\r
406                         {\r
407                                 return 0.0;\r
408                         }\r
409                 }\r
410                 else\r
411                 {\r
412                         time_base.num *= video_context->ticks_per_frame;\r
413 \r
414                         if(!is_sane_fps(time_base))\r
415                         {                       \r
416                                 time_base = fix_time_base(time_base);\r
417 \r
418                                 if(!is_sane_fps(time_base) && audio_index > -1)\r
419                                 {\r
420                                         auto& audio_context = *context.streams[audio_index]->codec;\r
421                                         auto& audio_stream  = *context.streams[audio_index];\r
422 \r
423                                         double duration_sec = audio_stream.duration / static_cast<double>(audio_context.sample_rate);\r
424                                                                 \r
425                                         time_base.num = static_cast<int>(duration_sec*100000.0);\r
426                                         time_base.den = static_cast<int>(video_stream->nb_frames*100000);\r
427                                 }\r
428                         }\r
429                 }\r
430                 \r
431                 double fps = static_cast<double>(time_base.den) / static_cast<double>(time_base.num);\r
432 \r
433                 double closest_fps = 0.0;\r
434                 for(int n = 0; n < core::video_format::count; ++n)\r
435                 {\r
436                         auto format = core::video_format_desc(core::video_format(n));\r
437 \r
438                         double diff1 = std::abs(format.fps - fps);\r
439                         double diff2 = std::abs(closest_fps - fps);\r
440 \r
441                         if(diff1 < diff2)\r
442                                 closest_fps = format.fps;\r
443                 }\r
444         \r
445                 return closest_fps;\r
446         }\r
447 \r
448         return fail_value;      \r
449 }\r
450 \r
451 void fix_meta_data(AVFormatContext& context)\r
452 {\r
453         auto video_index = av_find_best_stream(&context, AVMEDIA_TYPE_VIDEO, -1, -1, 0, 0);\r
454 \r
455         if(video_index > -1)\r
456         {\r
457                 auto video_stream   = context.streams[video_index];\r
458                 auto video_context  = context.streams[video_index]->codec;\r
459                                                 \r
460                 if(boost::filesystem::path(context.filename).extension().string() == ".flv")\r
461                 {\r
462                         try\r
463                         {\r
464                                 auto meta = read_flv_meta_info(context.filename);\r
465                                 double fps = boost::lexical_cast<double>(meta["framerate"]);\r
466                                 video_stream->nb_frames = static_cast<int64_t>(boost::lexical_cast<double>(meta["duration"])*fps);\r
467                         }\r
468                         catch(...){}\r
469                 }\r
470                 else\r
471                 {\r
472                         auto stream_time = video_stream->time_base;\r
473                         auto duration    = video_stream->duration;\r
474                         auto codec_time  = video_context->time_base;\r
475                         auto ticks               = video_context->ticks_per_frame;\r
476 \r
477                         if(video_stream->nb_frames == 0)\r
478                                 video_stream->nb_frames = (duration*stream_time.num*codec_time.den)/(stream_time.den*codec_time.num*ticks);     \r
479                 }\r
480         }\r
481 }\r
482 \r
483 spl::shared_ptr<AVPacket> create_packet()\r
484 {\r
485         spl::shared_ptr<AVPacket> packet(new AVPacket, [](AVPacket* p)\r
486         {\r
487                 av_free_packet(p);\r
488                 delete p;\r
489         });\r
490         \r
491         av_init_packet(packet.get());\r
492         return packet;\r
493 }\r
494 \r
495 spl::shared_ptr<AVCodecContext> open_codec(AVFormatContext& context, enum AVMediaType type, int& index)\r
496 {       \r
497         AVCodec* decoder;\r
498         index = THROW_ON_ERROR2(av_find_best_stream(&context, type, -1, -1, &decoder, 0), "");\r
499         //if(strcmp(decoder->name, "prores") == 0 && decoder->next && strcmp(decoder->next->name, "prores_lgpl") == 0)\r
500         //      decoder = decoder->next;\r
501 \r
502         THROW_ON_ERROR2(tbb_avcodec_open(context.streams[index]->codec, decoder), "");\r
503         return spl::shared_ptr<AVCodecContext>(context.streams[index]->codec, tbb_avcodec_close);\r
504 }\r
505 \r
506 spl::shared_ptr<AVFormatContext> open_input(const std::wstring& filename)\r
507 {\r
508         AVFormatContext* weak_context = nullptr;\r
509         THROW_ON_ERROR2(avformat_open_input(&weak_context, u8(filename).c_str(), nullptr, nullptr), filename);\r
510         spl::shared_ptr<AVFormatContext> context(weak_context, av_close_input_file);                    \r
511         THROW_ON_ERROR2(avformat_find_stream_info(weak_context, nullptr), filename);\r
512         fix_meta_data(*context);\r
513         return context;\r
514 }\r
515 \r
516 std::wstring print_mode(int width, int height, double fps, bool interlaced)\r
517 {\r
518         std::wostringstream fps_ss;\r
519         fps_ss << std::fixed << std::setprecision(2) << (!interlaced ? fps : 2.0 * fps);\r
520 \r
521         return boost::lexical_cast<std::wstring>(width) + L"x" + boost::lexical_cast<std::wstring>(height) + (!interlaced ? L"p" : L"i") + fps_ss.str();\r
522 }\r
523 \r
524 bool is_valid_file(const std::wstring filename)\r
525 {                               \r
526         static const std::vector<std::wstring> invalid_exts = boost::assign::list_of(L".png")(L".tga")(L".bmp")(L".jpg")(L".jpeg")(L".gif")(L".tiff")(L".tif")(L".jp2")(L".jpx")(L".j2k")(L".j2c");\r
527         static std::vector<std::wstring>           valid_exts   = boost::assign::list_of(L".m2t")(L".mov")(L".mp4")(L".dv")(L".flv")(L".mpg")(L".wav")(L".mp3")(L".dnxhd")(L".h264")(L".prores");\r
528 \r
529         auto ext = boost::to_lower_copy(boost::filesystem::path(filename).extension().wstring());\r
530                 \r
531         if(std::find(valid_exts.begin(), valid_exts.end(), ext) != valid_exts.end())\r
532                 return true;    \r
533         \r
534         if(std::find(invalid_exts.begin(), invalid_exts.end(), ext) != invalid_exts.end())\r
535                 return false;   \r
536 \r
537         auto u8filename = u8(filename);\r
538         \r
539         int score = 0;\r
540         AVProbeData pb = {};\r
541         pb.filename = u8filename.c_str();\r
542 \r
543         if(av_probe_input_format2(&pb, false, &score) != nullptr)\r
544                 return true;\r
545 \r
546         std::ifstream file(filename);\r
547 \r
548         std::vector<unsigned char> buf;\r
549         for(auto file_it = std::istreambuf_iterator<char>(file); file_it != std::istreambuf_iterator<char>() && buf.size() < 1024; ++file_it)\r
550                 buf.push_back(*file_it);\r
551 \r
552         if(buf.empty())\r
553                 return nullptr;\r
554 \r
555         pb.buf          = buf.data();\r
556         pb.buf_size = static_cast<int>(buf.size());\r
557 \r
558         return av_probe_input_format2(&pb, true, &score) != nullptr;\r
559 }\r
560 \r
561 std::wstring probe_stem(const std::wstring stem)\r
562 {\r
563         auto stem2 = boost::filesystem::path(stem);\r
564         auto dir = stem2.parent_path();\r
565         for(auto it = boost::filesystem::directory_iterator(dir); it != boost::filesystem::directory_iterator(); ++it)\r
566         {\r
567                 if(boost::iequals(it->path().stem().wstring(), stem2.filename().wstring()) && is_valid_file(it->path().wstring()))\r
568                         return it->path().wstring();\r
569         }\r
570         return L"";\r
571 }\r
572 //\r
573 //void av_dup_frame(AVFrame* frame)\r
574 //{\r
575 //      AVFrame* new_frame = avcodec_alloc_frame();\r
576 //\r
577 //\r
578 //      const uint8_t *src_data[4] = {0};\r
579 //      memcpy(const_cast<uint8_t**>(&src_data[0]), frame->data, 4);\r
580 //      const int src_linesizes[4] = {0};\r
581 //      memcpy(const_cast<int*>(&src_linesizes[0]), frame->linesize, 4);\r
582 //\r
583 //      av_image_alloc(new_frame->data, new_frame->linesize, new_frame->width, new_frame->height, frame->format, 16);\r
584 //\r
585 //      av_image_copy(new_frame->data, new_frame->linesize, src_data, src_linesizes, frame->format, new_frame->width, new_frame->height);\r
586 //\r
587 //      frame =\r
588 //}\r
589 \r
590 }}