]> git.sesse.net Git - casparcg/commitdiff
git-svn-id: https://casparcg.svn.sourceforge.net/svnroot/casparcg/server/branches...
authorronag <ronag@362d55ac-95cf-4e76-9f9a-cbaa9c17b72d>
Thu, 27 Oct 2011 21:10:55 +0000 (21:10 +0000)
committerronag <ronag@362d55ac-95cf-4e76-9f9a-cbaa9c17b72d>
Thu, 27 Oct 2011 21:10:55 +0000 (21:10 +0000)
modules/ffmpeg/producer/util/flv.cpp [new file with mode: 0644]
modules/ffmpeg/producer/util/flv.h [new file with mode: 0644]
modules/ffmpeg/producer/util/util.cpp [new file with mode: 0644]
modules/ffmpeg/producer/util/util.h [new file with mode: 0644]

diff --git a/modules/ffmpeg/producer/util/flv.cpp b/modules/ffmpeg/producer/util/flv.cpp
new file mode 100644 (file)
index 0000000..37b1977
--- /dev/null
@@ -0,0 +1,81 @@
+#include "../../stdafx.h"\r
+\r
+#include "flv.h"\r
+\r
+#include <common/exception/exceptions.h>\r
+\r
+#include <boost/filesystem.hpp>\r
+\r
+#include <iostream>\r
+\r
+#include <unordered_map>\r
+\r
+namespace caspar { namespace ffmpeg {\r
+       \r
+std::map<std::string, std::string> read_flv_meta_info(const std::string& filename)\r
+{\r
+       std::map<std::string, std::string>  values;\r
+\r
+       if(boost::filesystem2::path(filename).extension() != ".flv")\r
+               return values;\r
+       \r
+       try\r
+       {\r
+               if(!boost::filesystem2::exists(filename))\r
+                       BOOST_THROW_EXCEPTION(caspar_exception());\r
+       \r
+               std::fstream fileStream = std::fstream(filename, std::fstream::in);\r
+               \r
+               std::vector<char> bytes2(256);\r
+               fileStream.read(bytes2.data(), bytes2.size());\r
+\r
+               auto ptr = bytes2.data();\r
+               \r
+               ptr += 27;\r
+                                               \r
+               if(std::string(ptr, ptr+10) == "onMetaData")\r
+               {\r
+                       ptr += 16;\r
+\r
+                       for(int n = 0; n < 16; ++n)\r
+                       {\r
+                               char name_size = *ptr++;\r
+\r
+                               if(name_size == 0)\r
+                                       break;\r
+\r
+                               auto name = std::string(ptr, ptr + name_size);\r
+                               ptr += name_size;\r
+\r
+                               char data_type = *ptr++;\r
+                               switch(data_type)\r
+                               {\r
+                               case 0: // double\r
+                                       {\r
+                                               static_assert(sizeof(double) == 8, "");\r
+                                               std::reverse(ptr, ptr+8);\r
+                                               values[name] = boost::lexical_cast<std::string>(*(double*)(ptr));\r
+                                               ptr += 9;\r
+\r
+                                               break;\r
+                                       }\r
+                               case 1: // bool\r
+                                       {\r
+                                               values[name] = boost::lexical_cast<std::string>(*ptr != 0);\r
+                                               ptr += 2;\r
+\r
+                                               break;\r
+                                       }\r
+                               }\r
+                       }\r
+               }\r
+       }\r
+       catch(...)\r
+       {\r
+               CASPAR_LOG_CURRENT_EXCEPTION();\r
+       }\r
+\r
+    return values;\r
+}\r
+\r
+}}
\ No newline at end of file
diff --git a/modules/ffmpeg/producer/util/flv.h b/modules/ffmpeg/producer/util/flv.h
new file mode 100644 (file)
index 0000000..c104f5b
--- /dev/null
@@ -0,0 +1,7 @@
+#pragma once\r
+\r
+namespace caspar { namespace ffmpeg {\r
+       \r
+std::map<std::string, std::string> read_flv_meta_info(const std::string& filename);\r
+\r
+}}
\ No newline at end of file
diff --git a/modules/ffmpeg/producer/util/util.cpp b/modules/ffmpeg/producer/util/util.cpp
new file mode 100644 (file)
index 0000000..1f31c96
--- /dev/null
@@ -0,0 +1,401 @@
+#include "../../stdafx.h"\r
+\r
+#include "util.h"\r
+\r
+#include "../ffmpeg_error.h"\r
+#include "format/flv.h"\r
+\r
+#include <concurrent_unordered_map.h>\r
+#include <concurrent_queue.h>\r
+\r
+#include <core/producer/frame/frame_transform.h>\r
+#include <core/producer/frame/frame_factory.h>\r
+#include <core/producer/frame_producer.h>\r
+#include <core/mixer/write_frame.h>\r
+\r
+#include <common/exception/exceptions.h>\r
+#include <common/utility/assert.h>\r
+#include <common/memory/memcpy.h>\r
+\r
+#include <ppl.h>\r
+\r
+#include <boost/filesystem.hpp>\r
+#include <boost/lexical_cast.hpp>\r
+\r
+#if defined(_MSC_VER)\r
+#pragma warning (push)\r
+#pragma warning (disable : 4244)\r
+#endif\r
+extern "C" \r
+{\r
+       #include <libswscale/swscale.h>\r
+       #include <libavcodec/avcodec.h>\r
+       #include <libavformat/avformat.h>\r
+}\r
+#if defined(_MSC_VER)\r
+#pragma warning (pop)\r
+#endif\r
+\r
+namespace caspar { namespace ffmpeg {\r
+\r
+core::field_mode::type get_mode(AVFrame& frame)\r
+{\r
+       if(!frame.interlaced_frame)\r
+               return core::field_mode::progressive;\r
+\r
+       return frame.top_field_first ? core::field_mode::upper : core::field_mode::lower;\r
+}\r
+\r
+core::pixel_format::type get_pixel_format(PixelFormat pix_fmt)\r
+{\r
+       switch(pix_fmt)\r
+       {\r
+       case CASPAR_PIX_FMT_LUMA:       return core::pixel_format::luma;\r
+       case PIX_FMT_GRAY8:                     return core::pixel_format::gray;\r
+       case PIX_FMT_BGRA:                      return core::pixel_format::bgra;\r
+       case PIX_FMT_ARGB:                      return core::pixel_format::argb;\r
+       case PIX_FMT_RGBA:                      return core::pixel_format::rgba;\r
+       case PIX_FMT_ABGR:                      return core::pixel_format::abgr;\r
+       case PIX_FMT_YUV444P:           return core::pixel_format::ycbcr;\r
+       case PIX_FMT_YUV422P:           return core::pixel_format::ycbcr;\r
+       case PIX_FMT_YUV420P:           return core::pixel_format::ycbcr;\r
+       case PIX_FMT_YUV411P:           return core::pixel_format::ycbcr;\r
+       case PIX_FMT_YUV410P:           return core::pixel_format::ycbcr;\r
+       case PIX_FMT_YUVA420P:          return core::pixel_format::ycbcra;\r
+       default:                                        return core::pixel_format::invalid;\r
+       }\r
+}\r
+\r
+core::pixel_format_desc get_pixel_format_desc(PixelFormat pix_fmt, size_t width, size_t height)\r
+{\r
+       // Get linesizes\r
+       AVPicture dummy_pict;   \r
+       avpicture_fill(&dummy_pict, nullptr, pix_fmt == CASPAR_PIX_FMT_LUMA ? PIX_FMT_GRAY8 : pix_fmt, width, height);\r
+\r
+       core::pixel_format_desc desc;\r
+       desc.pix_fmt = get_pixel_format(pix_fmt);\r
+               \r
+       switch(desc.pix_fmt)\r
+       {\r
+       case core::pixel_format::gray:\r
+       case core::pixel_format::luma:\r
+               {\r
+                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1));                                               \r
+                       return desc;\r
+               }\r
+       case core::pixel_format::bgra:\r
+       case core::pixel_format::argb:\r
+       case core::pixel_format::rgba:\r
+       case core::pixel_format::abgr:\r
+               {\r
+                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0]/4, height, 4));                                             \r
+                       return desc;\r
+               }\r
+       case core::pixel_format::ycbcr:\r
+       case core::pixel_format::ycbcra:\r
+               {               \r
+                       // Find chroma height\r
+                       size_t size2 = dummy_pict.data[2] - dummy_pict.data[1];\r
+                       size_t h2 = size2/dummy_pict.linesize[1];                       \r
+\r
+                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[0], height, 1));\r
+                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[1], h2, 1));\r
+                       desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[2], h2, 1));\r
+\r
+                       if(desc.pix_fmt == core::pixel_format::ycbcra)                                          \r
+                               desc.planes.push_back(core::pixel_format_desc::plane(dummy_pict.linesize[3], height, 1));       \r
+                       return desc;\r
+               }               \r
+       default:                \r
+               desc.pix_fmt = core::pixel_format::invalid;\r
+               return desc;\r
+       }\r
+}\r
+\r
+int make_alpha_format(int format)\r
+{\r
+       switch(get_pixel_format(static_cast<PixelFormat>(format)))\r
+       {\r
+       case core::pixel_format::luma:\r
+       case core::pixel_format::gray:\r
+       case core::pixel_format::invalid:\r
+               return format;\r
+       case core::pixel_format::ycbcr:\r
+       case core::pixel_format::ycbcra:\r
+               return CASPAR_PIX_FMT_LUMA;\r
+       default:\r
+       return PIX_FMT_GRAY8;\r
+       }\r
+}\r
+\r
+safe_ptr<core::write_frame> make_write_frame(const void* tag, const safe_ptr<AVFrame>& decoded_frame, const safe_ptr<core::frame_factory>& frame_factory, int hints)\r
+{                      \r
+       static Concurrency::concurrent_unordered_map<size_t, Concurrency::concurrent_queue<std::shared_ptr<SwsContext>>> sws_contexts_;\r
+       \r
+       if(decoded_frame->width < 1 || decoded_frame->height < 1)\r
+               return make_safe<core::write_frame>(tag);\r
+\r
+       const auto width  = decoded_frame->width;\r
+       const auto height = decoded_frame->height;\r
+       auto desc                 = get_pixel_format_desc(static_cast<PixelFormat>(decoded_frame->format), width, height);\r
+       \r
+       if(hints & core::frame_producer::ALPHA_HINT)\r
+               desc = get_pixel_format_desc(static_cast<PixelFormat>(make_alpha_format(decoded_frame->format)), width, height);\r
+\r
+       std::shared_ptr<core::write_frame> write;\r
+\r
+       if(desc.pix_fmt == core::pixel_format::invalid)\r
+       {\r
+               auto pix_fmt = static_cast<PixelFormat>(decoded_frame->format);\r
+\r
+               write = frame_factory->create_frame(tag, get_pixel_format_desc(PIX_FMT_BGRA, width, height));\r
+               write->set_type(get_mode(*decoded_frame));\r
+\r
+               std::shared_ptr<SwsContext> sws_context;\r
+\r
+               //CASPAR_LOG(warning) << "Hardware accelerated color transform not supported.";\r
+\r
+               size_t key = width << 20 | height << 8 | pix_fmt;\r
+                       \r
+               auto& pool = sws_contexts_[key];\r
+                                               \r
+               if(!pool.try_pop(sws_context))\r
+               {\r
+                       double param;\r
+                       sws_context.reset(sws_getContext(width, height, pix_fmt, width, height, PIX_FMT_BGRA, SWS_BILINEAR, nullptr, nullptr, &param), sws_freeContext);\r
+               }\r
+                       \r
+               if(!sws_context)\r
+               {\r
+                       BOOST_THROW_EXCEPTION(operation_failed() << msg_info("Could not create software scaling context.") << \r
+                                                                       boost::errinfo_api_function("sws_getContext"));\r
+               }       \r
+\r
+               // Use sws_scale when provided colorspace has no hw-accel.\r
+               safe_ptr<AVFrame> av_frame(avcodec_alloc_frame(), av_free);     \r
+               avcodec_get_frame_defaults(av_frame.get());                     \r
+               avpicture_fill(reinterpret_cast<AVPicture*>(av_frame.get()), write->image_data().begin(), PIX_FMT_BGRA, width, height);\r
+                \r
+               sws_scale(sws_context.get(), decoded_frame->data, decoded_frame->linesize, 0, height, av_frame->data, av_frame->linesize);      \r
+               pool.push(sws_context);\r
+\r
+               write->commit();\r
+       }\r
+       else\r
+       {\r
+               write = frame_factory->create_frame(tag, desc);\r
+               write->set_type(get_mode(*decoded_frame));\r
+\r
+               for(int n = 0; n < static_cast<int>(desc.planes.size()); ++n)\r
+               {\r
+                       auto plane            = desc.planes[n];\r
+                       auto result           = write->image_data(n).begin();\r
+                       auto decoded          = decoded_frame->data[n];\r
+                       auto decoded_linesize = decoded_frame->linesize[n];\r
+                       \r
+                       CASPAR_ASSERT(decoded);\r
+                       CASPAR_ASSERT(write->image_data(n).begin());\r
+\r
+                       if(decoded_linesize != static_cast<int>(plane.width))\r
+                       {\r
+                               // Copy line by line since ffmpeg sometimes pads each line.\r
+                               Concurrency::parallel_for<size_t>(0, desc.planes[n].height, [&](size_t y)\r
+                               {\r
+                                       fast_memcpy(result + y*plane.linesize, decoded + y*decoded_linesize, plane.linesize);\r
+                               });\r
+                       }\r
+                       else\r
+                       {\r
+                               fast_memcpy(result, decoded, plane.size);\r
+                       }\r
+\r
+                       write->commit(n);\r
+               }\r
+       }\r
+       \r
+       // Fix field-order if needed\r
+       if(write->get_type() == core::field_mode::lower && frame_factory->get_video_format_desc().field_mode == core::field_mode::upper)\r
+               write->get_frame_transform().fill_translation[1] += 1.0/static_cast<double>(frame_factory->get_video_format_desc().height);\r
+       else if(write->get_type() == core::field_mode::upper && frame_factory->get_video_format_desc().field_mode == core::field_mode::lower)\r
+               write->get_frame_transform().fill_translation[1] -= 1.0/static_cast<double>(frame_factory->get_video_format_desc().height);\r
+\r
+       return make_safe_ptr(write);\r
+}\r
+\r
+bool is_sane_fps(AVRational time_base)\r
+{\r
+       double fps = static_cast<double>(time_base.den) / static_cast<double>(time_base.num);\r
+       return fps > 20.0 && fps < 65.0;\r
+}\r
+\r
+void fix_meta_data(AVFormatContext& context)\r
+{\r
+       auto video_index = av_find_best_stream(&context, AVMEDIA_TYPE_VIDEO, -1, -1, 0, 0);\r
+       auto audio_index = av_find_best_stream(&context, AVMEDIA_TYPE_AUDIO, -1, -1, 0, 0);\r
+\r
+       if(video_index < 0)\r
+               return;\r
+\r
+       auto& video_context = *context.streams[video_index]->codec;\r
+       auto& video_stream  = *context.streams[video_index];\r
+                                               \r
+       if(boost::filesystem2::path(context.filename).extension() == ".flv")\r
+       {\r
+               try\r
+               {\r
+                       auto meta = read_flv_meta_info(context.filename);\r
+                       double fps = boost::lexical_cast<double>(meta["framerate"]);\r
+                       video_context.time_base.num = 1000000;\r
+                       video_context.time_base.den = static_cast<int>(fps*1000000.0);\r
+                       video_stream.nb_frames = static_cast<int64_t>(boost::lexical_cast<double>(meta["duration"])*fps);\r
+               }\r
+               catch(...){}\r
+       }\r
+       else\r
+       {\r
+               if(video_stream.nb_frames == 0)\r
+                       video_stream.nb_frames = video_stream.duration;\r
+\r
+               if(!is_sane_fps(video_context.time_base))\r
+               {                       \r
+                       if(video_context.time_base.num == 1)\r
+                               video_context.time_base.num = static_cast<int>(std::pow(10.0, static_cast<int>(std::log10(static_cast<float>(video_context.time_base.den)))-1));        \r
+\r
+                       if(!is_sane_fps(video_context.time_base) && audio_index > -1)\r
+                       {\r
+                               auto& audio_context = *context.streams[audio_index]->codec;\r
+                               auto& audio_stream  = *context.streams[audio_index];\r
+\r
+                               double duration_sec = audio_stream.duration / static_cast<double>(audio_context.sample_rate);\r
+                                                               \r
+                               video_context.time_base.num = static_cast<int>(duration_sec*100000.0);\r
+                               video_context.time_base.den = static_cast<int>(video_stream.nb_frames*100000);\r
+                       }\r
+               }\r
+\r
+               if(audio_index > -1) // Check for invalid double frame-rate\r
+               {\r
+                       auto& audio_context = *context.streams[audio_index]->codec;\r
+                       auto& audio_stream  = *context.streams[audio_index];\r
+\r
+                       double duration_sec = audio_stream.duration / static_cast<double>(audio_context.sample_rate);\r
+                       double fps = static_cast<double>(video_context.time_base.den) / static_cast<double>(video_context.time_base.num);\r
+\r
+                       double fps_nb_frames    = static_cast<double>(duration_sec*fps);\r
+                       double stream_nb_frames =  static_cast<double>(video_stream.nb_frames);\r
+                       double diff = std::abs(fps_nb_frames - stream_nb_frames*2.0);\r
+                       if(diff < fps_nb_frames*0.05)\r
+                               video_context.time_base.num *= 2;\r
+               }\r
+       }\r
+\r
+       double fps = static_cast<double>(video_context.time_base.den) / static_cast<double>(video_context.time_base.num);\r
+\r
+       double closest_fps = 0.0;\r
+       for(int n = 0; n < core::video_format::count; ++n)\r
+       {\r
+               auto format = core::video_format_desc::get(static_cast<core::video_format::type>(n));\r
+\r
+               double diff1 = std::abs(format.fps - fps);\r
+               double diff2 = std::abs(closest_fps - fps);\r
+\r
+               if(diff1 < diff2)\r
+                       closest_fps = format.fps;\r
+       }\r
+       \r
+       video_context.time_base.num = 1000000;\r
+       video_context.time_base.den = static_cast<int>(closest_fps*1000000.0);\r
+}\r
+\r
+safe_ptr<AVPacket> create_packet()\r
+{\r
+       safe_ptr<AVPacket> packet(new AVPacket, [](AVPacket* p)\r
+       {\r
+               av_free_packet(p);\r
+               delete p;\r
+       });\r
+       \r
+       av_init_packet(packet.get());\r
+       return packet;\r
+}\r
+\r
+safe_ptr<AVPacket> flush_packet(int index)\r
+{\r
+       static safe_ptr<AVPacket> packets[] = {create_packet(), create_packet(), create_packet(), create_packet(), create_packet(), create_packet()};\r
+       \r
+       auto& packet = packets[index];\r
+       packet->stream_index = index;\r
+\r
+       return packet;\r
+}\r
+\r
+safe_ptr<AVPacket> eof_packet(int index)\r
+{\r
+       static safe_ptr<AVPacket> packets[] = {create_packet(), create_packet(), create_packet(), create_packet(), create_packet(), create_packet()};\r
+       \r
+       auto& packet = packets[index];\r
+       packet->stream_index = index;\r
+\r
+       return packet;\r
+}\r
+\r
+safe_ptr<AVFrame> flush_video()\r
+{\r
+       static auto frame1 = safe_ptr<AVFrame>(avcodec_alloc_frame(), av_free);\r
+       return frame1;\r
+}\r
+\r
+safe_ptr<AVFrame> eof_video()\r
+{\r
+       static auto frame2 = safe_ptr<AVFrame>(avcodec_alloc_frame(), av_free);\r
+       return frame2;\r
+}\r
+\r
+safe_ptr<core::audio_buffer> flush_audio()\r
+{\r
+       static auto audio1 = safe_ptr<core::audio_buffer>();\r
+       return audio1;\r
+}\r
+\r
+safe_ptr<core::audio_buffer> eof_audio()\r
+{\r
+       static auto audio2 = safe_ptr<core::audio_buffer>();\r
+       return audio2;\r
+}\r
+\r
+safe_ptr<AVCodecContext> open_codec(AVFormatContext& context, enum AVMediaType type, int& index)\r
+{      \r
+       AVCodec* decoder;\r
+       index = THROW_ON_ERROR2(av_find_best_stream(&context, type, -1, -1, &decoder, 0), "");\r
+       THROW_ON_ERROR2(avcodec_open(context.streams[index]->codec, decoder), "");\r
+       return safe_ptr<AVCodecContext>(context.streams[index]->codec, avcodec_close);\r
+}\r
+\r
+safe_ptr<AVFormatContext> open_input(const std::wstring& filename)\r
+{\r
+       AVFormatContext* weak_context = nullptr;\r
+       THROW_ON_ERROR2(avformat_open_input(&weak_context, narrow(filename).c_str(), nullptr, nullptr), filename);\r
+       safe_ptr<AVFormatContext> context(weak_context, av_close_input_file);                   \r
+       THROW_ON_ERROR2(avformat_find_stream_info(weak_context, nullptr), filename);\r
+       fix_meta_data(*context);\r
+       return context;\r
+}\r
+//\r
+//void av_dup_frame(AVFrame* frame)\r
+//{\r
+//     AVFrame* new_frame = avcodec_alloc_frame();\r
+//\r
+//\r
+//     const uint8_t *src_data[4] = {0};\r
+//     memcpy(const_cast<uint8_t**>(&src_data[0]), frame->data, 4);\r
+//     const int src_linesizes[4] = {0};\r
+//     memcpy(const_cast<int*>(&src_linesizes[0]), frame->linesize, 4);\r
+//\r
+//     av_image_alloc(new_frame->data, new_frame->linesize, new_frame->width, new_frame->height, frame->format, 16);\r
+//\r
+//     av_image_copy(new_frame->data, new_frame->linesize, src_data, src_linesizes, frame->format, new_frame->width, new_frame->height);\r
+//\r
+//     frame =\r
+//}\r
+\r
+}}
\ No newline at end of file
diff --git a/modules/ffmpeg/producer/util/util.h b/modules/ffmpeg/producer/util/util.h
new file mode 100644 (file)
index 0000000..7d76f35
--- /dev/null
@@ -0,0 +1,67 @@
+#pragma once\r
+\r
+#include <common/memory/safe_ptr.h>\r
+\r
+#include <core/video_format.h>\r
+#include <core/producer/frame/pixel_format.h>\r
+#include <core/mixer/audio/audio_mixer.h>\r
+\r
+\r
+#if defined(_MSC_VER)\r
+#pragma warning (push)\r
+#pragma warning (disable : 4244)\r
+#endif\r
+extern "C" \r
+{\r
+       #include <libavutil/pixfmt.h>\r
+       #include <libavcodec/avcodec.h>\r
+}\r
+#if defined(_MSC_VER)\r
+#pragma warning (pop)\r
+#endif\r
+\r
+struct AVFrame;\r
+struct AVFormatContext;\r
+struct AVPacket;\r
+\r
+namespace caspar {\r
+\r
+namespace core {\r
+\r
+struct pixel_format_desc;\r
+class write_frame;\r
+struct frame_factory;\r
+\r
+}\r
+\r
+namespace ffmpeg {\r
+       \r
+// Dataflow\r
+       \r
+safe_ptr<AVPacket>                             flush_packet(int index);\r
+safe_ptr<AVPacket>                             eof_packet(int index);\r
+safe_ptr<AVFrame>                              flush_video();\r
+safe_ptr<AVFrame>                              eof_video();\r
+safe_ptr<core::audio_buffer>   flush_audio();\r
+safe_ptr<core::audio_buffer>   eof_audio();\r
+\r
+// Utils\r
+\r
+static const PixelFormat       CASPAR_PIX_FMT_LUMA = PIX_FMT_MONOBLACK; // Just hijack some unual pixel format.\r
+\r
+core::field_mode::type         get_mode(AVFrame& frame);\r
+core::pixel_format::type       get_pixel_format(PixelFormat pix_fmt);\r
+core::pixel_format_desc                get_pixel_format_desc(PixelFormat pix_fmt, size_t width, size_t height);\r
+int                                                    make_alpha_format(int format); // NOTE: Be careful about CASPAR_PIX_FMT_LUMA, change it to PIX_FMT_GRAY8 if you want to use the frame inside some ffmpeg function.\r
+safe_ptr<core::write_frame> make_write_frame(const void* tag, const safe_ptr<AVFrame>& decoded_frame, const safe_ptr<core::frame_factory>& frame_factory, int hints);\r
+\r
+void fix_meta_data(AVFormatContext& context);\r
+\r
+safe_ptr<AVPacket> create_packet();\r
+\r
+safe_ptr<AVCodecContext> open_codec(AVFormatContext& context,  enum AVMediaType type, int& index);\r
+safe_ptr<AVFormatContext> open_input(const std::wstring& filename);\r
+\r
+//void av_dup_frame(AVFrame* frame);\r
+\r
+}}
\ No newline at end of file