3 * Copyright (c) 2007 Bobby Bingham
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/atomic.h"
23 #include "libavutil/avassert.h"
24 #include "libavutil/avstring.h"
25 #include "libavutil/buffer.h"
26 #include "libavutil/channel_layout.h"
27 #include "libavutil/common.h"
28 #include "libavutil/eval.h"
29 #include "libavutil/hwcontext.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/internal.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/rational.h"
35 #include "libavutil/samplefmt.h"
37 #define FF_INTERNAL_FIELDS 1
38 #include "framequeue.h"
46 #include "libavutil/ffversion.h"
47 const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
49 void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
51 av_unused char buf[16];
53 "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
54 ref, ref->buf, ref->data[0],
55 ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
56 ref->pts, ref->pkt_pos);
59 ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
60 ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
61 ref->width, ref->height,
62 !ref->interlaced_frame ? 'P' : /* Progressive */
63 ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
65 av_get_picture_type_char(ref->pict_type));
67 if (ref->nb_samples) {
68 ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
74 ff_tlog(ctx, "]%s", end ? "\n" : "");
77 unsigned avfilter_version(void)
79 av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
80 return LIBAVFILTER_VERSION_INT;
83 const char *avfilter_configuration(void)
85 return FFMPEG_CONFIGURATION;
88 const char *avfilter_license(void)
90 #define LICENSE_PREFIX "libavfilter license: "
91 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
94 void ff_command_queue_pop(AVFilterContext *filter)
96 AVFilterCommand *c= filter->command_queue;
98 av_freep(&c->command);
99 filter->command_queue= c->next;
103 int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
104 AVFilterPad **pads, AVFilterLink ***links,
107 AVFilterLink **newlinks;
108 AVFilterPad *newpads;
111 idx = FFMIN(idx, *count);
113 newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
114 newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
119 if (!newpads || !newlinks)
120 return AVERROR(ENOMEM);
122 memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
123 memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
124 memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
125 (*links)[idx] = NULL;
128 for (i = idx + 1; i < *count; i++)
130 (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
135 int avfilter_link(AVFilterContext *src, unsigned srcpad,
136 AVFilterContext *dst, unsigned dstpad)
140 av_assert0(src->graph);
141 av_assert0(dst->graph);
142 av_assert0(src->graph == dst->graph);
144 if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
145 src->outputs[srcpad] || dst->inputs[dstpad])
146 return AVERROR(EINVAL);
148 if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
149 av_log(src, AV_LOG_ERROR,
150 "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
151 src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
152 dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
153 return AVERROR(EINVAL);
156 link = av_mallocz(sizeof(*link));
158 return AVERROR(ENOMEM);
160 src->outputs[srcpad] = dst->inputs[dstpad] = link;
164 link->srcpad = &src->output_pads[srcpad];
165 link->dstpad = &dst->input_pads[dstpad];
166 link->type = src->output_pads[srcpad].type;
167 av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
169 ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
174 void avfilter_link_free(AVFilterLink **link)
179 av_frame_free(&(*link)->partial_buf);
180 ff_framequeue_free(&(*link)->fifo);
181 ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
186 int avfilter_link_get_channels(AVFilterLink *link)
188 return link->channels;
191 void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
193 filter->ready = FFMAX(filter->ready, priority);
197 * Clear frame_blocked_in on all outputs.
198 * This is necessary whenever something changes on input.
200 static void filter_unblock(AVFilterContext *filter)
204 for (i = 0; i < filter->nb_outputs; i++)
205 filter->outputs[i]->frame_blocked_in = 0;
209 void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
211 if (link->status_in == status)
213 av_assert0(!link->status_in);
214 link->status_in = status;
215 link->status_in_pts = pts;
216 link->frame_wanted_out = 0;
217 link->frame_blocked_in = 0;
218 filter_unblock(link->dst);
219 ff_filter_set_ready(link->dst, 200);
222 void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
224 av_assert0(!link->frame_wanted_out);
225 av_assert0(!link->status_out);
226 link->status_out = status;
227 if (pts != AV_NOPTS_VALUE)
228 ff_update_link_current_pts(link, pts);
229 filter_unblock(link->dst);
230 ff_filter_set_ready(link->src, 200);
233 void avfilter_link_set_closed(AVFilterLink *link, int closed)
235 ff_avfilter_link_set_out_status(link, closed ? AVERROR_EOF : 0, AV_NOPTS_VALUE);
238 int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
239 unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
242 unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
244 av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
245 "between the filter '%s' and the filter '%s'\n",
246 filt->name, link->src->name, link->dst->name);
248 link->dst->inputs[dstpad_idx] = NULL;
249 if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
250 /* failed to link output filter to new filter */
251 link->dst->inputs[dstpad_idx] = link;
255 /* re-hookup the link to the new destination filter we inserted */
257 link->dstpad = &filt->input_pads[filt_srcpad_idx];
258 filt->inputs[filt_srcpad_idx] = link;
260 /* if any information on supported media formats already exists on the
261 * link, we need to preserve that */
262 if (link->out_formats)
263 ff_formats_changeref(&link->out_formats,
264 &filt->outputs[filt_dstpad_idx]->out_formats);
265 if (link->out_samplerates)
266 ff_formats_changeref(&link->out_samplerates,
267 &filt->outputs[filt_dstpad_idx]->out_samplerates);
268 if (link->out_channel_layouts)
269 ff_channel_layouts_changeref(&link->out_channel_layouts,
270 &filt->outputs[filt_dstpad_idx]->out_channel_layouts);
275 int avfilter_config_links(AVFilterContext *filter)
277 int (*config_link)(AVFilterLink *);
281 for (i = 0; i < filter->nb_inputs; i ++) {
282 AVFilterLink *link = filter->inputs[i];
283 AVFilterLink *inlink;
286 if (!link->src || !link->dst) {
287 av_log(filter, AV_LOG_ERROR,
288 "Not all input and output are properly linked (%d).\n", i);
289 return AVERROR(EINVAL);
292 inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
294 link->current_pts_us = AV_NOPTS_VALUE;
296 switch (link->init_state) {
299 case AVLINK_STARTINIT:
300 av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
303 link->init_state = AVLINK_STARTINIT;
305 if ((ret = avfilter_config_links(link->src)) < 0)
308 if (!(config_link = link->srcpad->config_props)) {
309 if (link->src->nb_inputs != 1) {
310 av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
311 "with more than one input "
312 "must set config_props() "
313 "callbacks on all outputs\n");
314 return AVERROR(EINVAL);
316 } else if ((ret = config_link(link)) < 0) {
317 av_log(link->src, AV_LOG_ERROR,
318 "Failed to configure output pad on %s\n",
323 switch (link->type) {
324 case AVMEDIA_TYPE_VIDEO:
325 if (!link->time_base.num && !link->time_base.den)
326 link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
328 if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
329 link->sample_aspect_ratio = inlink ?
330 inlink->sample_aspect_ratio : (AVRational){1,1};
333 if (!link->frame_rate.num && !link->frame_rate.den)
334 link->frame_rate = inlink->frame_rate;
339 } else if (!link->w || !link->h) {
340 av_log(link->src, AV_LOG_ERROR,
341 "Video source filters must set their output link's "
342 "width and height\n");
343 return AVERROR(EINVAL);
347 case AVMEDIA_TYPE_AUDIO:
349 if (!link->time_base.num && !link->time_base.den)
350 link->time_base = inlink->time_base;
353 if (!link->time_base.num && !link->time_base.den)
354 link->time_base = (AVRational) {1, link->sample_rate};
357 if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
358 !(link->src->filter->flags_internal & FF_FILTER_FLAG_HWFRAME_AWARE)) {
359 av_assert0(!link->hw_frames_ctx &&
360 "should not be set by non-hwframe-aware filter");
361 link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
362 if (!link->hw_frames_ctx)
363 return AVERROR(ENOMEM);
366 if ((config_link = link->dstpad->config_props))
367 if ((ret = config_link(link)) < 0) {
368 av_log(link->dst, AV_LOG_ERROR,
369 "Failed to configure input pad on %s\n",
374 link->init_state = AVLINK_INIT;
381 void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
383 if (link->type == AVMEDIA_TYPE_VIDEO) {
385 "link[%p s:%dx%d fmt:%s %s->%s]%s",
386 link, link->w, link->h,
387 av_get_pix_fmt_name(link->format),
388 link->src ? link->src->filter->name : "",
389 link->dst ? link->dst->filter->name : "",
393 av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
396 "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
397 link, (int)link->sample_rate, buf,
398 av_get_sample_fmt_name(link->format),
399 link->src ? link->src->filter->name : "",
400 link->dst ? link->dst->filter->name : "",
405 int ff_request_frame(AVFilterLink *link)
407 FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
409 av_assert1(!link->dst->filter->activate);
410 if (link->status_out)
411 return link->status_out;
412 if (link->status_in) {
413 if (ff_framequeue_queued_frames(&link->fifo)) {
414 av_assert1(!link->frame_wanted_out);
415 av_assert1(link->dst->ready >= 300);
418 /* Acknowledge status change. Filters using ff_request_frame() will
419 handle the change automatically. Filters can also check the
420 status directly but none do yet. */
421 ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
422 return link->status_out;
425 link->frame_wanted_out = 1;
426 ff_filter_set_ready(link->src, 100);
430 static int64_t guess_status_pts(AVFilterContext *ctx, int status)
433 int64_t r = INT64_MAX;
435 for (i = 0; i < ctx->nb_inputs; i++)
436 if (ctx->inputs[i]->status_out == status)
437 r = FFMIN(r, ctx->inputs[i]->current_pts);
440 av_log(ctx, AV_LOG_WARNING, "EOF timestamp not reliable\n");
441 for (i = 0; i < ctx->nb_inputs; i++)
442 r = FFMIN(r, ctx->inputs[i]->status_in_pts);
445 return AV_NOPTS_VALUE;
448 static int ff_request_frame_to_filter(AVFilterLink *link)
452 FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
453 /* Assume the filter is blocked, let the method clear it if not */
454 link->frame_blocked_in = 1;
455 if (link->srcpad->request_frame)
456 ret = link->srcpad->request_frame(link);
457 else if (link->src->inputs[0])
458 ret = ff_request_frame(link->src->inputs[0]);
460 if (ret != AVERROR(EAGAIN) && ret != link->status_in)
461 ff_avfilter_link_set_in_status(link, ret, guess_status_pts(link->src, ret));
462 if (ret == AVERROR_EOF)
468 int ff_poll_frame(AVFilterLink *link)
470 int i, min = INT_MAX;
472 if (link->srcpad->poll_frame)
473 return link->srcpad->poll_frame(link);
475 for (i = 0; i < link->src->nb_inputs; i++) {
477 if (!link->src->inputs[i])
478 return AVERROR(EINVAL);
479 val = ff_poll_frame(link->src->inputs[i]);
480 min = FFMIN(min, val);
486 static const char *const var_names[] = {
504 static int set_enable_expr(AVFilterContext *ctx, const char *expr)
508 AVExpr *old = ctx->enable;
510 if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
511 av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
512 "with filter '%s'\n", ctx->filter->name);
513 return AVERROR_PATCHWELCOME;
516 expr_dup = av_strdup(expr);
518 return AVERROR(ENOMEM);
520 if (!ctx->var_values) {
521 ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
522 if (!ctx->var_values) {
524 return AVERROR(ENOMEM);
528 ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
529 NULL, NULL, NULL, NULL, 0, ctx->priv);
531 av_log(ctx->priv, AV_LOG_ERROR,
532 "Error when evaluating the expression '%s' for enable\n",
539 av_free(ctx->enable_str);
540 ctx->enable_str = expr_dup;
544 void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
546 if (pts == AV_NOPTS_VALUE)
548 link->current_pts = pts;
549 link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
550 /* TODO use duration */
551 if (link->graph && link->age_index >= 0)
552 ff_avfilter_graph_update_heap(link->graph, link);
555 int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
557 if(!strcmp(cmd, "ping")){
558 char local_res[256] = {0};
562 res_len = sizeof(local_res);
564 av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
565 if (res == local_res)
566 av_log(filter, AV_LOG_INFO, "%s", res);
568 }else if(!strcmp(cmd, "enable")) {
569 return set_enable_expr(filter, arg);
570 }else if(filter->filter->process_command) {
571 return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
573 return AVERROR(ENOSYS);
576 static AVFilter *first_filter;
577 static AVFilter **last_filter = &first_filter;
579 #if !FF_API_NOCONST_GET_NAME
582 AVFilter *avfilter_get_by_name(const char *name)
584 const AVFilter *f = NULL;
589 while ((f = avfilter_next(f)))
590 if (!strcmp(f->name, name))
591 return (AVFilter *)f;
596 int avfilter_register(AVFilter *filter)
598 AVFilter **f = last_filter;
600 /* the filter must select generic or internal exclusively */
601 av_assert0((filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE) != AVFILTER_FLAG_SUPPORT_TIMELINE);
605 while(*f || avpriv_atomic_ptr_cas((void * volatile *)f, NULL, filter))
607 last_filter = &filter->next;
612 const AVFilter *avfilter_next(const AVFilter *prev)
614 return prev ? prev->next : first_filter;
617 #if FF_API_OLD_FILTER_REGISTER
618 AVFilter **av_filter_next(AVFilter **filter)
620 return filter ? &(*filter)->next : &first_filter;
623 void avfilter_uninit(void)
628 int avfilter_pad_count(const AVFilterPad *pads)
635 for (count = 0; pads->name; count++)
640 static const char *default_filter_name(void *filter_ctx)
642 AVFilterContext *ctx = filter_ctx;
643 return ctx->name ? ctx->name : ctx->filter->name;
646 static void *filter_child_next(void *obj, void *prev)
648 AVFilterContext *ctx = obj;
649 if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
654 static const AVClass *filter_child_class_next(const AVClass *prev)
656 const AVFilter *f = NULL;
658 /* find the filter that corresponds to prev */
659 while (prev && (f = avfilter_next(f)))
660 if (f->priv_class == prev)
663 /* could not find filter corresponding to prev */
667 /* find next filter with specific options */
668 while ((f = avfilter_next(f)))
670 return f->priv_class;
675 #define OFFSET(x) offsetof(AVFilterContext, x)
676 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
677 static const AVOption avfilter_options[] = {
678 { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
679 { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
680 { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .unit = "thread_type" },
681 { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
682 { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
683 { .i64 = 0 }, 0, INT_MAX, FLAGS },
687 static const AVClass avfilter_class = {
688 .class_name = "AVFilter",
689 .item_name = default_filter_name,
690 .version = LIBAVUTIL_VERSION_INT,
691 .category = AV_CLASS_CATEGORY_FILTER,
692 .child_next = filter_child_next,
693 .child_class_next = filter_child_class_next,
694 .option = avfilter_options,
697 static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
698 int *ret, int nb_jobs)
702 for (i = 0; i < nb_jobs; i++) {
703 int r = func(ctx, arg, i, nb_jobs);
710 AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
712 AVFilterContext *ret;
718 ret = av_mallocz(sizeof(AVFilterContext));
722 ret->av_class = &avfilter_class;
723 ret->filter = filter;
724 ret->name = inst_name ? av_strdup(inst_name) : NULL;
725 if (filter->priv_size) {
726 ret->priv = av_mallocz(filter->priv_size);
730 if (filter->preinit) {
731 if (filter->preinit(ret) < 0)
736 av_opt_set_defaults(ret);
737 if (filter->priv_class) {
738 *(const AVClass**)ret->priv = filter->priv_class;
739 av_opt_set_defaults(ret->priv);
742 ret->internal = av_mallocz(sizeof(*ret->internal));
745 ret->internal->execute = default_execute;
747 ret->nb_inputs = avfilter_pad_count(filter->inputs);
748 if (ret->nb_inputs ) {
749 ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
750 if (!ret->input_pads)
752 memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
753 ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
758 ret->nb_outputs = avfilter_pad_count(filter->outputs);
759 if (ret->nb_outputs) {
760 ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
761 if (!ret->output_pads)
763 memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
764 ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
774 av_freep(&ret->inputs);
775 av_freep(&ret->input_pads);
777 av_freep(&ret->outputs);
778 av_freep(&ret->output_pads);
780 av_freep(&ret->priv);
781 av_freep(&ret->internal);
786 #if FF_API_AVFILTER_OPEN
787 int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name)
789 *filter_ctx = ff_filter_alloc(filter, inst_name);
790 return *filter_ctx ? 0 : AVERROR(ENOMEM);
794 static void free_link(AVFilterLink *link)
800 link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
802 link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
804 av_buffer_unref(&link->hw_frames_ctx);
806 ff_formats_unref(&link->in_formats);
807 ff_formats_unref(&link->out_formats);
808 ff_formats_unref(&link->in_samplerates);
809 ff_formats_unref(&link->out_samplerates);
810 ff_channel_layouts_unref(&link->in_channel_layouts);
811 ff_channel_layouts_unref(&link->out_channel_layouts);
812 avfilter_link_free(&link);
815 void avfilter_free(AVFilterContext *filter)
823 ff_filter_graph_remove_filter(filter->graph, filter);
825 if (filter->filter->uninit)
826 filter->filter->uninit(filter);
828 for (i = 0; i < filter->nb_inputs; i++) {
829 free_link(filter->inputs[i]);
831 for (i = 0; i < filter->nb_outputs; i++) {
832 free_link(filter->outputs[i]);
835 if (filter->filter->priv_class)
836 av_opt_free(filter->priv);
838 av_buffer_unref(&filter->hw_device_ctx);
840 av_freep(&filter->name);
841 av_freep(&filter->input_pads);
842 av_freep(&filter->output_pads);
843 av_freep(&filter->inputs);
844 av_freep(&filter->outputs);
845 av_freep(&filter->priv);
846 while(filter->command_queue){
847 ff_command_queue_pop(filter);
850 av_expr_free(filter->enable);
851 filter->enable = NULL;
852 av_freep(&filter->var_values);
853 av_freep(&filter->internal);
857 int ff_filter_get_nb_threads(AVFilterContext *ctx)
859 if (ctx->nb_threads > 0)
860 return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
861 return ctx->graph->nb_threads;
864 static int process_options(AVFilterContext *ctx, AVDictionary **options,
867 const AVOption *o = NULL;
869 char *av_uninit(parsed_key), *av_uninit(value);
877 const char *shorthand = NULL;
879 o = av_opt_next(ctx->priv, o);
881 if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
887 ret = av_opt_get_key_value(&args, "=", ":",
888 shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
889 &parsed_key, &value);
891 if (ret == AVERROR(EINVAL))
892 av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
894 av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
902 while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
907 av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
909 if (av_opt_find(ctx, key, NULL, 0, 0)) {
910 ret = av_opt_set(ctx, key, value, 0);
917 av_dict_set(options, key, value, 0);
918 if ((ret = av_opt_set(ctx->priv, key, value, AV_OPT_SEARCH_CHILDREN)) < 0) {
919 if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
920 if (ret == AVERROR_OPTION_NOT_FOUND)
921 av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
934 if (ctx->enable_str) {
935 ret = set_enable_expr(ctx, ctx->enable_str);
942 #if FF_API_AVFILTER_INIT_FILTER
943 int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque)
945 return avfilter_init_str(filter, args);
949 int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
953 ret = av_opt_set_dict(ctx, options);
955 av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
959 if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
960 ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
961 ctx->graph->internal->thread_execute) {
962 ctx->thread_type = AVFILTER_THREAD_SLICE;
963 ctx->internal->execute = ctx->graph->internal->thread_execute;
965 ctx->thread_type = 0;
968 if (ctx->filter->priv_class) {
969 ret = av_opt_set_dict2(ctx->priv, options, AV_OPT_SEARCH_CHILDREN);
971 av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
976 if (ctx->filter->init_opaque)
977 ret = ctx->filter->init_opaque(ctx, NULL);
978 else if (ctx->filter->init)
979 ret = ctx->filter->init(ctx);
980 else if (ctx->filter->init_dict)
981 ret = ctx->filter->init_dict(ctx, options);
986 int avfilter_init_str(AVFilterContext *filter, const char *args)
988 AVDictionary *options = NULL;
989 AVDictionaryEntry *e;
993 if (!filter->filter->priv_class) {
994 av_log(filter, AV_LOG_ERROR, "This filter does not take any "
995 "options, but options were provided: %s.\n", args);
996 return AVERROR(EINVAL);
999 #if FF_API_OLD_FILTER_OPTS || FF_API_OLD_FILTER_OPTS_ERROR
1000 if ( !strcmp(filter->filter->name, "format") ||
1001 !strcmp(filter->filter->name, "noformat") ||
1002 !strcmp(filter->filter->name, "frei0r") ||
1003 !strcmp(filter->filter->name, "frei0r_src") ||
1004 !strcmp(filter->filter->name, "ocv") ||
1005 !strcmp(filter->filter->name, "pan") ||
1006 !strcmp(filter->filter->name, "pp") ||
1007 !strcmp(filter->filter->name, "aevalsrc")) {
1008 /* a hack for compatibility with the old syntax
1009 * replace colons with |s */
1010 char *copy = av_strdup(args);
1012 int nb_leading = 0; // number of leading colons to skip
1016 ret = AVERROR(ENOMEM);
1020 if (!strcmp(filter->filter->name, "frei0r") ||
1021 !strcmp(filter->filter->name, "ocv"))
1023 else if (!strcmp(filter->filter->name, "frei0r_src"))
1026 while (nb_leading--) {
1029 p = copy + strlen(copy);
1035 deprecated = strchr(p, ':') != NULL;
1037 if (!strcmp(filter->filter->name, "aevalsrc")) {
1039 while ((p = strchr(p, ':')) && p[1] != ':') {
1040 const char *epos = strchr(p + 1, '=');
1041 const char *spos = strchr(p + 1, ':');
1042 const int next_token_is_opt = epos && (!spos || epos < spos);
1043 if (next_token_is_opt) {
1047 /* next token does not contain a '=', assume a channel expression */
1051 if (p && *p == ':') { // double sep '::' found
1053 memmove(p, p + 1, strlen(p));
1056 while ((p = strchr(p, ':')))
1059 #if FF_API_OLD_FILTER_OPTS
1061 av_log(filter, AV_LOG_WARNING, "This syntax is deprecated. Use "
1062 "'|' to separate the list items.\n");
1064 av_log(filter, AV_LOG_DEBUG, "compat: called with args=[%s]\n", copy);
1065 ret = process_options(filter, &options, copy);
1068 av_log(filter, AV_LOG_ERROR, "This syntax is deprecated. Use "
1069 "'|' to separate the list items ('%s' instead of '%s')\n",
1071 ret = AVERROR(EINVAL);
1073 ret = process_options(filter, &options, copy);
1083 ret = process_options(filter, &options, args);
1089 ret = avfilter_init_dict(filter, &options);
1093 if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1094 av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
1095 ret = AVERROR_OPTION_NOT_FOUND;
1100 av_dict_free(&options);
1105 const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
1107 return pads[pad_idx].name;
1110 enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
1112 return pads[pad_idx].type;
1115 static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
1117 return ff_filter_frame(link->dst->outputs[0], frame);
1120 static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
1122 int (*filter_frame)(AVFilterLink *, AVFrame *);
1123 AVFilterContext *dstctx = link->dst;
1124 AVFilterPad *dst = link->dstpad;
1127 if (!(filter_frame = dst->filter_frame))
1128 filter_frame = default_filter_frame;
1130 if (dst->needs_writable) {
1131 ret = ff_inlink_make_frame_writable(link, &frame);
1136 ff_inlink_process_commands(link, frame);
1137 dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1139 if (dstctx->is_disabled &&
1140 (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
1141 filter_frame = default_filter_frame;
1142 ret = filter_frame(link, frame);
1143 link->frame_count_out++;
1147 av_frame_free(&frame);
1151 int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
1154 FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
1156 /* Consistency checks */
1157 if (link->type == AVMEDIA_TYPE_VIDEO) {
1158 if (strcmp(link->dst->filter->name, "buffersink") &&
1159 strcmp(link->dst->filter->name, "format") &&
1160 strcmp(link->dst->filter->name, "idet") &&
1161 strcmp(link->dst->filter->name, "null") &&
1162 strcmp(link->dst->filter->name, "scale")) {
1163 av_assert1(frame->format == link->format);
1164 av_assert1(frame->width == link->w);
1165 av_assert1(frame->height == link->h);
1168 if (frame->format != link->format) {
1169 av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
1172 if (frame->channels != link->channels) {
1173 av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
1176 if (frame->channel_layout != link->channel_layout) {
1177 av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
1180 if (frame->sample_rate != link->sample_rate) {
1181 av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
1186 link->frame_blocked_in = link->frame_wanted_out = 0;
1187 link->frame_count_in++;
1188 filter_unblock(link->dst);
1189 ret = ff_framequeue_add(&link->fifo, frame);
1191 av_frame_free(&frame);
1194 ff_filter_set_ready(link->dst, 300);
1198 av_frame_free(&frame);
1199 return AVERROR_PATCHWELCOME;
1202 static int samples_ready(AVFilterLink *link, unsigned min)
1204 return ff_framequeue_queued_frames(&link->fifo) &&
1205 (ff_framequeue_queued_samples(&link->fifo) >= min ||
1209 static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
1212 AVFrame *frame0, *frame, *buf;
1213 unsigned nb_samples, nb_frames, i, p;
1216 /* Note: this function relies on no format changes and must only be
1217 called with enough samples. */
1218 av_assert1(samples_ready(link, link->min_samples));
1219 frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
1220 if (!link->fifo.samples_skipped && frame->nb_samples >= min && frame->nb_samples <= max) {
1221 *rframe = ff_framequeue_take(&link->fifo);
1227 if (nb_samples + frame->nb_samples > max) {
1228 if (nb_samples < min)
1232 nb_samples += frame->nb_samples;
1234 if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
1236 frame = ff_framequeue_peek(&link->fifo, nb_frames);
1239 buf = ff_get_audio_buffer(link, nb_samples);
1241 return AVERROR(ENOMEM);
1242 ret = av_frame_copy_props(buf, frame0);
1244 av_frame_free(&buf);
1247 buf->pts = frame0->pts;
1250 for (i = 0; i < nb_frames; i++) {
1251 frame = ff_framequeue_take(&link->fifo);
1252 av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
1253 frame->nb_samples, link->channels, link->format);
1254 p += frame->nb_samples;
1255 av_frame_free(&frame);
1257 if (p < nb_samples) {
1258 unsigned n = nb_samples - p;
1259 frame = ff_framequeue_peek(&link->fifo, 0);
1260 av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
1261 link->channels, link->format);
1262 ff_framequeue_skip_samples(&link->fifo, n, link->time_base);
1269 static int ff_filter_frame_to_filter(AVFilterLink *link)
1271 AVFrame *frame = NULL;
1272 AVFilterContext *dst = link->dst;
1275 av_assert1(ff_framequeue_queued_frames(&link->fifo));
1276 ret = link->min_samples ?
1277 ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
1278 ff_inlink_consume_frame(link, &frame);
1284 /* The filter will soon have received a new frame, that may allow it to
1285 produce one or more: unblock its outputs. */
1286 filter_unblock(dst);
1287 /* AVFilterPad.filter_frame() expect frame_count_out to have the value
1288 before the frame; ff_filter_frame_framed() will re-increment it. */
1289 link->frame_count_out--;
1290 ret = ff_filter_frame_framed(link, frame);
1291 if (ret < 0 && ret != link->status_out) {
1292 ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
1294 /* Run once again, to see if several frames were available, or if
1295 the input status has also changed, or any other reason. */
1296 ff_filter_set_ready(dst, 300);
1301 static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
1303 unsigned out = 0, progress = 0;
1306 av_assert0(!in->status_out);
1307 if (!filter->nb_outputs) {
1308 /* not necessary with the current API and sinks */
1311 while (!in->status_out) {
1312 if (!filter->outputs[out]->status_in) {
1314 ret = ff_request_frame_to_filter(filter->outputs[out]);
1318 if (++out == filter->nb_outputs) {
1320 /* Every output already closed: input no longer interesting
1321 (example: overlay in shortest mode, other input closed). */
1322 ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
1329 ff_filter_set_ready(filter, 200);
1333 static int ff_filter_activate_default(AVFilterContext *filter)
1337 for (i = 0; i < filter->nb_inputs; i++) {
1338 if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) {
1339 return ff_filter_frame_to_filter(filter->inputs[i]);
1342 for (i = 0; i < filter->nb_inputs; i++) {
1343 if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
1344 av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
1345 return forward_status_change(filter, filter->inputs[i]);
1348 for (i = 0; i < filter->nb_outputs; i++) {
1349 if (filter->outputs[i]->frame_wanted_out &&
1350 !filter->outputs[i]->frame_blocked_in) {
1351 return ff_request_frame_to_filter(filter->outputs[i]);
1354 return FFERROR_NOT_READY;
1358 Filter scheduling and activation
1360 When a filter is activated, it must:
1361 - if possible, output a frame;
1362 - else, if relevant, forward the input status change;
1363 - else, check outputs for wanted frames and forward the requests.
1365 The following AVFilterLink fields are used for activation:
1369 This field indicates if a frame is needed on this input of the
1370 destination filter. A positive value indicates that a frame is needed
1371 to process queued frames or internal data or to satisfy the
1372 application; a zero value indicates that a frame is not especially
1373 needed but could be processed anyway; a negative value indicates that a
1374 frame would just be queued.
1376 It is set by filters using ff_request_frame() or ff_request_no_frame(),
1377 when requested by the application through a specific API or when it is
1378 set on one of the outputs.
1380 It is cleared when a frame is sent from the source using
1383 It is also cleared when a status change is sent from the source using
1384 ff_avfilter_link_set_in_status().
1388 This field means that the source filter can not generate a frame as is.
1389 Its goal is to avoid repeatedly calling the request_frame() method on
1392 It is set by the framework on all outputs of a filter before activating it.
1394 It is automatically cleared by ff_filter_frame().
1396 It is also automatically cleared by ff_avfilter_link_set_in_status().
1398 It is also cleared on all outputs (using filter_unblock()) when
1399 something happens on an input: processing a frame or changing the
1404 Contains the frames queued on a filter input. If it contains frames and
1405 frame_wanted_out is not set, then the filter can be activated. If that
1406 result in the filter not able to use these frames, the filter must set
1407 frame_wanted_out to ask for more frames.
1409 - status_in and status_in_pts:
1411 Status (EOF or error code) of the link and timestamp of the status
1412 change (in link time base, same as frames) as seen from the input of
1413 the link. The status change is considered happening after the frames
1416 It is set by the source filter using ff_avfilter_link_set_in_status().
1420 Status of the link as seen from the output of the link. The status
1421 change is considered having already happened.
1423 It is set by the destination filter using
1424 ff_avfilter_link_set_out_status().
1426 Filters are activated according to the ready field, set using the
1427 ff_filter_set_ready(). Eventually, a priority queue will be used.
1428 ff_filter_set_ready() is called whenever anything could cause progress to
1429 be possible. Marking a filter ready when it is not is not a problem,
1430 except for the small overhead it causes.
1432 Conditions that cause a filter to be marked ready are:
1434 - frames added on an input link;
1436 - changes in the input or output status of an input link;
1438 - requests for a frame on an output link;
1440 - after any actual processing using the legacy methods (filter_frame(),
1441 and request_frame() to acknowledge status changes), to run once more
1442 and check if enough input was present for several frames.
1444 Exemples of scenarios to consider:
1446 - buffersrc: activate if frame_wanted_out to notify the application;
1447 activate when the application adds a frame to push it immediately.
1449 - testsrc: activate only if frame_wanted_out to produce and push a frame.
1451 - concat (not at stitch points): can process a frame on any output.
1452 Activate if frame_wanted_out on output to forward on the corresponding
1453 input. Activate when a frame is present on input to process it
1456 - framesync: needs at least one frame on each input; extra frames on the
1457 wrong input will accumulate. When a frame is first added on one input,
1458 set frame_wanted_out<0 on it to avoid getting more (would trigger
1459 testsrc) and frame_wanted_out>0 on the other to allow processing it.
1461 Activation of old filters:
1463 In order to activate a filter implementing the legacy filter_frame() and
1464 request_frame() methods, perform the first possible of the following
1467 - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
1468 frame and call filter_frame().
1470 Ratinale: filter frames as soon as possible instead of leaving them
1471 queued; frame_wanted_out < 0 is not possible since the old API does not
1472 set it nor provides any similar feedback; frame_wanted_out > 0 happens
1473 when min_samples > 0 and there are not enough samples queued.
1475 - If an input has status_in set but not status_out, try to call
1476 request_frame() on one of the outputs in the hope that it will trigger
1477 request_frame() on the input with status_in and acknowledge it. This is
1478 awkward and fragile, filters with several inputs or outputs should be
1479 updated to direct activation as soon as possible.
1481 - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
1484 Rationale: checking frame_blocked_in is necessary to avoid requesting
1485 repeatedly on a blocked input if another is not blocked (example:
1486 [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
1488 TODO: respect needs_fifo and remove auto-inserted fifos.
1492 int ff_filter_activate(AVFilterContext *filter)
1496 /* Generic timeline support is not yet implemented but should be easy */
1497 av_assert1(!(filter->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC &&
1498 filter->filter->activate));
1500 ret = filter->filter->activate ? filter->filter->activate(filter) :
1501 ff_filter_activate_default(filter);
1502 if (ret == FFERROR_NOT_READY)
1507 int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
1509 *rpts = link->current_pts;
1510 if (ff_framequeue_queued_frames(&link->fifo))
1511 return *rstatus = 0;
1512 if (link->status_out)
1513 return *rstatus = link->status_out;
1514 if (!link->status_in)
1515 return *rstatus = 0;
1516 *rstatus = link->status_out = link->status_in;
1517 ff_update_link_current_pts(link, link->status_in_pts);
1518 *rpts = link->current_pts;
1522 int ff_inlink_check_available_frame(AVFilterLink *link)
1524 return ff_framequeue_queued_frames(&link->fifo) > 0;
1527 int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
1529 uint64_t samples = ff_framequeue_queued_samples(&link->fifo);
1531 return samples >= min || (link->status_in && samples);
1534 static void consume_update(AVFilterLink *link, const AVFrame *frame)
1536 ff_update_link_current_pts(link, frame->pts);
1537 ff_inlink_process_commands(link, frame);
1538 link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1539 link->frame_count_out++;
1542 int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
1547 if (!ff_inlink_check_available_frame(link))
1550 if (link->fifo.samples_skipped) {
1551 frame = ff_framequeue_peek(&link->fifo, 0);
1552 return ff_inlink_consume_samples(link, frame->nb_samples, frame->nb_samples, rframe);
1555 frame = ff_framequeue_take(&link->fifo);
1556 consume_update(link, frame);
1561 int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max,
1569 if (!ff_inlink_check_available_samples(link, min))
1571 if (link->status_in)
1572 min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
1573 ret = take_samples(link, min, link->max_samples, &frame);
1576 consume_update(link, frame);
1581 int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
1583 AVFrame *frame = *rframe;
1587 if (av_frame_is_writable(frame))
1589 av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
1591 switch (link->type) {
1592 case AVMEDIA_TYPE_VIDEO:
1593 out = ff_get_video_buffer(link, link->w, link->h);
1595 case AVMEDIA_TYPE_AUDIO:
1596 out = ff_get_audio_buffer(link, frame->nb_samples);
1599 return AVERROR(EINVAL);
1602 return AVERROR(ENOMEM);
1604 ret = av_frame_copy_props(out, frame);
1606 av_frame_free(&out);
1610 switch (link->type) {
1611 case AVMEDIA_TYPE_VIDEO:
1612 av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
1613 frame->format, frame->width, frame->height);
1615 case AVMEDIA_TYPE_AUDIO:
1616 av_samples_copy(out->extended_data, frame->extended_data,
1617 0, 0, frame->nb_samples,
1622 av_assert0(!"reached");
1625 av_frame_free(&frame);
1630 int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
1632 AVFilterCommand *cmd = link->dst->command_queue;
1634 while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
1635 av_log(link->dst, AV_LOG_DEBUG,
1636 "Processing command time:%f command:%s arg:%s\n",
1637 cmd->time, cmd->command, cmd->arg);
1638 avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
1639 ff_command_queue_pop(link->dst);
1640 cmd= link->dst->command_queue;
1645 int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
1647 AVFilterContext *dstctx = link->dst;
1648 int64_t pts = frame->pts;
1649 int64_t pos = frame->pkt_pos;
1651 if (!dstctx->enable_str)
1654 dstctx->var_values[VAR_N] = link->frame_count_out;
1655 dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
1656 dstctx->var_values[VAR_W] = link->w;
1657 dstctx->var_values[VAR_H] = link->h;
1658 dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1660 return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5;
1663 void ff_inlink_request_frame(AVFilterLink *link)
1665 av_assert1(!link->status_in);
1666 av_assert1(!link->status_out);
1667 link->frame_wanted_out = 1;
1668 ff_filter_set_ready(link->src, 100);
1671 void ff_inlink_set_status(AVFilterLink *link, int status)
1673 if (link->status_out)
1675 link->frame_wanted_out = 0;
1676 link->frame_blocked_in = 0;
1677 ff_avfilter_link_set_out_status(link, status, AV_NOPTS_VALUE);
1678 while (ff_framequeue_queued_frames(&link->fifo)) {
1679 AVFrame *frame = ff_framequeue_take(&link->fifo);
1680 av_frame_free(&frame);
1682 if (!link->status_in)
1683 link->status_in = status;
1686 int ff_outlink_get_status(AVFilterLink *link)
1688 return link->status_in;
1691 const AVClass *avfilter_get_class(void)
1693 return &avfilter_class;