#include "../../ffmpeg_error.h"
#include "../../ffmpeg.h"
+#include "../util/util.h"
#include <common/assert.h>
#include <common/except.h>
#pragma warning (push)
#pragma warning (disable : 4244)
#endif
-extern "C"
+extern "C"
{
#include <libavutil/avutil.h>
#include <libavutil/imgutils.h>
boost::join(complete_filter_graph, ";"),
audio_graph_inputs_,
audio_graph_outputs_);
-
+
if (is_logging_quiet_for_thread())
CASPAR_LOG(trace)
- << u16(std::string("\n")
+ << u16(std::string("\n")
+ avfilter_graph_dump(
- audio_graph_.get(),
+ audio_graph_.get(),
nullptr));
else
CASPAR_LOG(debug)
audio_graph_.get(),
nullptr));
}
-
+
void configure_filtergraph(
AVFilterGraph& graph,
const std::string& filtergraph,
std::vector<AVFilterContext*>& source_contexts,
std::vector<AVFilterContext*>& sink_contexts)
{
- try
- {
- AVFilterInOut* outputs = nullptr;
- AVFilterInOut* inputs = nullptr;
+ AVFilterInOut* outputs = nullptr;
+ AVFilterInOut* inputs = nullptr;
- FF(avfilter_graph_parse2(
- &graph,
- filtergraph.c_str(),
- &inputs,
- &outputs));
+ FF(avfilter_graph_parse2(
+ &graph,
+ filtergraph.c_str(),
+ &inputs,
+ &outputs));
- // Workaround because outputs and inputs are not filled in for some reason
- for (unsigned i = 0; i < graph.nb_filters; ++i)
- {
- auto filter = graph.filters[i];
+ // Workaround because outputs and inputs are not filled in for some reason
+ for (unsigned i = 0; i < graph.nb_filters; ++i)
+ {
+ auto filter = graph.filters[i];
- if (std::string(filter->filter->name) == "abuffer")
- source_contexts.push_back(filter);
+ if (std::string(filter->filter->name) == "abuffer")
+ source_contexts.push_back(filter);
- if (std::string(filter->filter->name) == "abuffersink")
- sink_contexts.push_back(filter);
- }
+ if (std::string(filter->filter->name) == "abuffersink")
+ sink_contexts.push_back(filter);
+ }
- for (AVFilterInOut* iter = inputs; iter; iter = iter->next)
- source_contexts.push_back(iter->filter_ctx);
+ for (AVFilterInOut* iter = inputs; iter; iter = iter->next)
+ source_contexts.push_back(iter->filter_ctx);
- for (AVFilterInOut* iter = outputs; iter; iter = iter->next)
- sink_contexts.push_back(iter->filter_ctx);
+ for (AVFilterInOut* iter = outputs; iter; iter = iter->next)
+ sink_contexts.push_back(iter->filter_ctx);
- FF(avfilter_graph_config(
- &graph,
- nullptr));
- }
- catch(...)
- {
- //avfilter_inout_free(&outputs);
- //avfilter_inout_free(&inputs);
- throw;
- }
+ FF(avfilter_graph_config(
+ &graph,
+ nullptr));
}
void push(int input_pad_id, const std::shared_ptr<AVFrame>& src_av_frame)
- {
+ {
FF(av_buffersrc_add_frame(
audio_graph_inputs_.at(input_pad_id),
src_av_frame.get()));
std::shared_ptr<AVFrame> poll(int output_pad_id)
{
- std::shared_ptr<AVFrame> filt_frame(
- av_frame_alloc(),
- [](AVFrame* p)
- {
- av_frame_free(&p);
- });
-
+ auto filt_frame = create_frame();
+
const auto ret = av_buffersink_get_frame(
audio_graph_outputs_.at(output_pad_id),
filt_frame.get());
-
+
if(ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
return nullptr;
-
+
FF_RET(ret, "poll");
return filt_frame;
std::shared_ptr<AVFrame> audio_filter::poll(int output_pad_id){return impl_->poll(output_pad_id);}
std::wstring audio_filter::filter_str() const{return u16(impl_->filtergraph_);}
std::vector<spl::shared_ptr<AVFrame>> audio_filter::poll_all(int output_pad_id)
-{
+{
std::vector<spl::shared_ptr<AVFrame>> frames;
for(auto frame = poll(output_pad_id); frame; frame = poll(output_pad_id))
frames.push_back(spl::make_shared_ptr(frame));