]> git.sesse.net Git - casparcg/blob - modules/ffmpeg/producer/filter/filter.cpp
2.0. ffmpeg: Header file optimization and fixes.
[casparcg] / modules / ffmpeg / producer / filter / filter.cpp
1 #include "../../stdafx.h"\r
2 \r
3 #include "filter.h"\r
4 \r
5 #include "parallel_yadif.h"\r
6 \r
7 #include "../../ffmpeg_error.h"\r
8 \r
9 #include <boost/assign.hpp>\r
10 \r
11 #include <cstdio>\r
12 #include <sstream>\r
13 \r
14 #if defined(_MSC_VER)\r
15 #pragma warning (push)\r
16 #pragma warning (disable : 4244)\r
17 #endif\r
18 extern "C" \r
19 {\r
20         #define __STDC_CONSTANT_MACROS\r
21         #define __STDC_LIMIT_MACROS\r
22         #include <libavutil/avutil.h>\r
23         #include <libavutil/imgutils.h>\r
24         #include <libavfilter/avfilter.h>\r
25         #include <libavfilter/avcodec.h>\r
26         #include <libavfilter/avfiltergraph.h>\r
27         #include <libavfilter/vsink_buffer.h>\r
28         #include <libavfilter/vsrc_buffer.h>\r
29 }\r
30 #if defined(_MSC_VER)\r
31 #pragma warning (pop)\r
32 #endif\r
33 \r
34 namespace caspar {\r
35         \r
36 PixelFormat pix_fmts[] = \r
37 {\r
38         PIX_FMT_YUV420P,\r
39         PIX_FMT_YUVA420P,\r
40         PIX_FMT_YUV422P,\r
41         PIX_FMT_YUV444P,\r
42         PIX_FMT_YUV411P,\r
43         PIX_FMT_ARGB, \r
44         PIX_FMT_RGBA,\r
45         PIX_FMT_ABGR,\r
46         PIX_FMT_GRAY8,\r
47         PIX_FMT_NONE\r
48 };      \r
49 \r
50 struct filter::implementation\r
51 {\r
52         std::string                                             filters_;\r
53         std::shared_ptr<AVFilterGraph>  graph_; \r
54         AVFilterContext*                                buffersink_ctx_;\r
55         AVFilterContext*                                buffersrc_ctx_;\r
56         std::shared_ptr<void>                   parallel_yadif_ctx_;\r
57                 \r
58         implementation(const std::wstring& filters) \r
59                 : filters_(narrow(filters))\r
60                 , parallel_yadif_ctx_(nullptr)\r
61         {\r
62                 std::transform(filters_.begin(), filters_.end(), filters_.begin(), ::tolower);\r
63         }\r
64         \r
65         std::vector<safe_ptr<AVFrame>> execute(const std::shared_ptr<AVFrame>& frame)\r
66         {\r
67                 if(!frame)\r
68                         return std::vector<safe_ptr<AVFrame>>();\r
69 \r
70                 if(filters_.empty())\r
71                         return boost::assign::list_of(frame);\r
72 \r
73                 push(frame);\r
74                 return poll();\r
75         }\r
76 \r
77         void push(const std::shared_ptr<AVFrame>& frame)\r
78         {               \r
79 \r
80                 if(!graph_)\r
81                 {\r
82                         graph_.reset(avfilter_graph_alloc(), [](AVFilterGraph* p){avfilter_graph_free(&p);});\r
83                                                                 \r
84                         // Input\r
85                         std::stringstream args;\r
86                         args << frame->width << ":" << frame->height << ":" << frame->format << ":" << 0 << ":" << 0 << ":" << 0 << ":" << 0; // don't care about pts and aspect_ratio\r
87                         THROW_ON_ERROR2(avfilter_graph_create_filter(&buffersrc_ctx_, avfilter_get_by_name("buffer"), "src", args.str().c_str(), NULL, graph_.get()), "[filter]");\r
88 \r
89                         // OPIX_FMT_BGRAutput\r
90                         THROW_ON_ERROR2(avfilter_graph_create_filter(&buffersink_ctx_, avfilter_get_by_name("buffersink"), "out", NULL, pix_fmts, graph_.get()), "[filter]");\r
91                         \r
92                         AVFilterInOut* outputs = avfilter_inout_alloc();\r
93                         AVFilterInOut* inputs  = avfilter_inout_alloc();\r
94                         \r
95                         outputs->name                   = av_strdup("in");\r
96                         outputs->filter_ctx             = buffersrc_ctx_;\r
97                         outputs->pad_idx                = 0;\r
98                         outputs->next                   = NULL;\r
99 \r
100                         inputs->name                    = av_strdup("out");\r
101                         inputs->filter_ctx              = buffersink_ctx_;\r
102                         inputs->pad_idx                 = 0;\r
103                         inputs->next                    = NULL;\r
104                         \r
105                         THROW_ON_ERROR2(avfilter_graph_parse(graph_.get(), filters_.c_str(), &inputs, &outputs, NULL), "[filter]");\r
106                         \r
107                         avfilter_inout_free(&inputs);\r
108                         avfilter_inout_free(&outputs);\r
109 \r
110                         THROW_ON_ERROR2(avfilter_graph_config(graph_.get(), NULL), "[filter]");                 \r
111 \r
112                         for(size_t n = 0; n < graph_->filter_count; ++n)\r
113                         {\r
114                                 auto filter_name = graph_->filters[n]->name;\r
115                                 if(strstr(filter_name, "yadif") != 0)\r
116                                         parallel_yadif_ctx_ = make_parallel_yadif(graph_->filters[n]);\r
117                         }\r
118                 }\r
119         \r
120                 THROW_ON_ERROR2(av_vsrc_buffer_add_frame(buffersrc_ctx_, frame.get(), 0), "[filter]");\r
121         }\r
122 \r
123         std::vector<safe_ptr<AVFrame>> poll()\r
124         {\r
125                 std::vector<safe_ptr<AVFrame>> result;\r
126 \r
127                 if(!graph_)\r
128                         return result;\r
129                 \r
130                 while (avfilter_poll_frame(buffersink_ctx_->inputs[0])) \r
131                 {\r
132                         AVFilterBufferRef *picref;\r
133                         THROW_ON_ERROR2(av_vsink_buffer_get_video_buffer_ref(buffersink_ctx_, &picref, 0), "[filter]");\r
134 \r
135             if (picref) \r
136                         {               \r
137                                 safe_ptr<AVFrame> frame(avcodec_alloc_frame(), [=](AVFrame* p)\r
138                                 {\r
139                                         av_free(p);\r
140                                         avfilter_unref_buffer(picref);\r
141                                 });\r
142 \r
143                                 avcodec_get_frame_defaults(frame.get());        \r
144 \r
145                                 memcpy(frame->data,     picref->data,     sizeof(frame->data));\r
146                                 memcpy(frame->linesize, picref->linesize, sizeof(frame->linesize));\r
147                                 frame->format                           = picref->format;\r
148                                 frame->width                            = picref->video->w;\r
149                                 frame->height                           = picref->video->h;\r
150                                 frame->pkt_pos                          = picref->pos;\r
151                                 frame->interlaced_frame         = picref->video->interlaced;\r
152                                 frame->top_field_first          = picref->video->top_field_first;\r
153                                 frame->key_frame                        = picref->video->key_frame;\r
154                                 frame->pict_type                        = picref->video->pict_type;\r
155                                 frame->sample_aspect_ratio      = picref->video->sample_aspect_ratio;\r
156 \r
157                                 result.push_back(frame);\r
158             }\r
159         }\r
160 \r
161                 return result;\r
162         }\r
163 };\r
164 \r
165 filter::filter(const std::wstring& filters) : impl_(new implementation(filters)){}\r
166 filter::filter(filter&& other) : impl_(std::move(other.impl_)){}\r
167 filter& filter::operator=(filter&& other){impl_ = std::move(other.impl_); return *this;}\r
168 std::vector<safe_ptr<AVFrame>> filter::execute(const std::shared_ptr<AVFrame>& frame) {return impl_->execute(frame);}\r
169 }