3 * Copyright (c) 2007 Bobby Bingham
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/atomic.h"
23 #include "libavutil/avassert.h"
24 #include "libavutil/avstring.h"
25 #include "libavutil/buffer.h"
26 #include "libavutil/channel_layout.h"
27 #include "libavutil/common.h"
28 #include "libavutil/eval.h"
29 #include "libavutil/hwcontext.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/internal.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/rational.h"
35 #include "libavutil/samplefmt.h"
37 #define FF_INTERNAL_FIELDS 1
38 #include "framequeue.h"
46 #include "libavutil/ffversion.h"
47 const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
49 void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
51 av_unused char buf[16];
53 "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
54 ref, ref->buf, ref->data[0],
55 ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
56 ref->pts, av_frame_get_pkt_pos(ref));
59 ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
60 ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
61 ref->width, ref->height,
62 !ref->interlaced_frame ? 'P' : /* Progressive */
63 ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
65 av_get_picture_type_char(ref->pict_type));
67 if (ref->nb_samples) {
68 ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
74 ff_tlog(ctx, "]%s", end ? "\n" : "");
77 unsigned avfilter_version(void)
79 av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
80 return LIBAVFILTER_VERSION_INT;
83 const char *avfilter_configuration(void)
85 return FFMPEG_CONFIGURATION;
88 const char *avfilter_license(void)
90 #define LICENSE_PREFIX "libavfilter license: "
91 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
94 void ff_command_queue_pop(AVFilterContext *filter)
96 AVFilterCommand *c= filter->command_queue;
98 av_freep(&c->command);
99 filter->command_queue= c->next;
103 int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
104 AVFilterPad **pads, AVFilterLink ***links,
107 AVFilterLink **newlinks;
108 AVFilterPad *newpads;
111 idx = FFMIN(idx, *count);
113 newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
114 newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
119 if (!newpads || !newlinks)
120 return AVERROR(ENOMEM);
122 memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
123 memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
124 memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
125 (*links)[idx] = NULL;
128 for (i = idx + 1; i < *count; i++)
130 (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
135 int avfilter_link(AVFilterContext *src, unsigned srcpad,
136 AVFilterContext *dst, unsigned dstpad)
140 av_assert0(src->graph);
141 av_assert0(dst->graph);
142 av_assert0(src->graph == dst->graph);
144 if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
145 src->outputs[srcpad] || dst->inputs[dstpad])
146 return AVERROR(EINVAL);
148 if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
149 av_log(src, AV_LOG_ERROR,
150 "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
151 src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
152 dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
153 return AVERROR(EINVAL);
156 link = av_mallocz(sizeof(*link));
158 return AVERROR(ENOMEM);
160 src->outputs[srcpad] = dst->inputs[dstpad] = link;
164 link->srcpad = &src->output_pads[srcpad];
165 link->dstpad = &dst->input_pads[dstpad];
166 link->type = src->output_pads[srcpad].type;
167 av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
169 ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
174 void avfilter_link_free(AVFilterLink **link)
179 av_frame_free(&(*link)->partial_buf);
180 ff_framequeue_free(&(*link)->fifo);
181 ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
186 int avfilter_link_get_channels(AVFilterLink *link)
188 return link->channels;
191 void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
193 filter->ready = FFMAX(filter->ready, priority);
197 * Clear frame_blocked_in on all outputs.
198 * This is necessary whenever something changes on input.
200 static void filter_unblock(AVFilterContext *filter)
204 for (i = 0; i < filter->nb_outputs; i++)
205 filter->outputs[i]->frame_blocked_in = 0;
209 void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
211 if (link->status_in == status)
213 av_assert0(!link->status_in);
214 link->status_in = status;
215 link->status_in_pts = pts;
216 link->frame_wanted_out = 0;
217 link->frame_blocked_in = 0;
218 filter_unblock(link->dst);
219 ff_filter_set_ready(link->dst, 200);
222 void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
224 av_assert0(!link->frame_wanted_out);
225 av_assert0(!link->status_out);
226 link->status_out = status;
227 if (pts != AV_NOPTS_VALUE)
228 ff_update_link_current_pts(link, pts);
229 filter_unblock(link->dst);
230 ff_filter_set_ready(link->src, 200);
233 void avfilter_link_set_closed(AVFilterLink *link, int closed)
235 ff_avfilter_link_set_out_status(link, closed ? AVERROR_EOF : 0, AV_NOPTS_VALUE);
238 int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
239 unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
242 unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
244 av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
245 "between the filter '%s' and the filter '%s'\n",
246 filt->name, link->src->name, link->dst->name);
248 link->dst->inputs[dstpad_idx] = NULL;
249 if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
250 /* failed to link output filter to new filter */
251 link->dst->inputs[dstpad_idx] = link;
255 /* re-hookup the link to the new destination filter we inserted */
257 link->dstpad = &filt->input_pads[filt_srcpad_idx];
258 filt->inputs[filt_srcpad_idx] = link;
260 /* if any information on supported media formats already exists on the
261 * link, we need to preserve that */
262 if (link->out_formats)
263 ff_formats_changeref(&link->out_formats,
264 &filt->outputs[filt_dstpad_idx]->out_formats);
265 if (link->out_samplerates)
266 ff_formats_changeref(&link->out_samplerates,
267 &filt->outputs[filt_dstpad_idx]->out_samplerates);
268 if (link->out_channel_layouts)
269 ff_channel_layouts_changeref(&link->out_channel_layouts,
270 &filt->outputs[filt_dstpad_idx]->out_channel_layouts);
275 int avfilter_config_links(AVFilterContext *filter)
277 int (*config_link)(AVFilterLink *);
281 for (i = 0; i < filter->nb_inputs; i ++) {
282 AVFilterLink *link = filter->inputs[i];
283 AVFilterLink *inlink;
286 if (!link->src || !link->dst) {
287 av_log(filter, AV_LOG_ERROR,
288 "Not all input and output are properly linked (%d).\n", i);
289 return AVERROR(EINVAL);
292 inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
294 link->current_pts_us = AV_NOPTS_VALUE;
296 switch (link->init_state) {
299 case AVLINK_STARTINIT:
300 av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
303 link->init_state = AVLINK_STARTINIT;
305 if ((ret = avfilter_config_links(link->src)) < 0)
308 if (!(config_link = link->srcpad->config_props)) {
309 if (link->src->nb_inputs != 1) {
310 av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
311 "with more than one input "
312 "must set config_props() "
313 "callbacks on all outputs\n");
314 return AVERROR(EINVAL);
316 } else if ((ret = config_link(link)) < 0) {
317 av_log(link->src, AV_LOG_ERROR,
318 "Failed to configure output pad on %s\n",
323 switch (link->type) {
324 case AVMEDIA_TYPE_VIDEO:
325 if (!link->time_base.num && !link->time_base.den)
326 link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
328 if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
329 link->sample_aspect_ratio = inlink ?
330 inlink->sample_aspect_ratio : (AVRational){1,1};
333 if (!link->frame_rate.num && !link->frame_rate.den)
334 link->frame_rate = inlink->frame_rate;
339 } else if (!link->w || !link->h) {
340 av_log(link->src, AV_LOG_ERROR,
341 "Video source filters must set their output link's "
342 "width and height\n");
343 return AVERROR(EINVAL);
347 case AVMEDIA_TYPE_AUDIO:
349 if (!link->time_base.num && !link->time_base.den)
350 link->time_base = inlink->time_base;
353 if (!link->time_base.num && !link->time_base.den)
354 link->time_base = (AVRational) {1, link->sample_rate};
357 if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
358 !link->hw_frames_ctx) {
359 AVHWFramesContext *input_ctx = (AVHWFramesContext*)link->src->inputs[0]->hw_frames_ctx->data;
361 if (input_ctx->format == link->format) {
362 link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
363 if (!link->hw_frames_ctx)
364 return AVERROR(ENOMEM);
368 if ((config_link = link->dstpad->config_props))
369 if ((ret = config_link(link)) < 0) {
370 av_log(link->dst, AV_LOG_ERROR,
371 "Failed to configure input pad on %s\n",
376 link->init_state = AVLINK_INIT;
383 void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
385 if (link->type == AVMEDIA_TYPE_VIDEO) {
387 "link[%p s:%dx%d fmt:%s %s->%s]%s",
388 link, link->w, link->h,
389 av_get_pix_fmt_name(link->format),
390 link->src ? link->src->filter->name : "",
391 link->dst ? link->dst->filter->name : "",
395 av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
398 "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
399 link, (int)link->sample_rate, buf,
400 av_get_sample_fmt_name(link->format),
401 link->src ? link->src->filter->name : "",
402 link->dst ? link->dst->filter->name : "",
407 int ff_request_frame(AVFilterLink *link)
409 FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
411 av_assert1(!link->dst->filter->activate);
412 if (link->status_out)
413 return link->status_out;
414 if (link->status_in) {
415 if (ff_framequeue_queued_frames(&link->fifo)) {
416 av_assert1(!link->frame_wanted_out);
417 av_assert1(link->dst->ready >= 300);
420 /* Acknowledge status change. Filters using ff_request_frame() will
421 handle the change automatically. Filters can also check the
422 status directly but none do yet. */
423 ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
424 return link->status_out;
427 link->frame_wanted_out = 1;
428 ff_filter_set_ready(link->src, 100);
432 static int ff_request_frame_to_filter(AVFilterLink *link)
436 FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
437 /* Assume the filter is blocked, let the method clear it if not */
438 link->frame_blocked_in = 1;
439 if (link->srcpad->request_frame)
440 ret = link->srcpad->request_frame(link);
441 else if (link->src->inputs[0])
442 ret = ff_request_frame(link->src->inputs[0]);
444 if (ret != AVERROR(EAGAIN) && ret != link->status_in)
445 ff_avfilter_link_set_in_status(link, ret, AV_NOPTS_VALUE);
446 if (ret == AVERROR_EOF)
452 int ff_poll_frame(AVFilterLink *link)
454 int i, min = INT_MAX;
456 if (link->srcpad->poll_frame)
457 return link->srcpad->poll_frame(link);
459 for (i = 0; i < link->src->nb_inputs; i++) {
461 if (!link->src->inputs[i])
462 return AVERROR(EINVAL);
463 val = ff_poll_frame(link->src->inputs[i]);
464 min = FFMIN(min, val);
470 static const char *const var_names[] = {
488 static int set_enable_expr(AVFilterContext *ctx, const char *expr)
492 AVExpr *old = ctx->enable;
494 if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
495 av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
496 "with filter '%s'\n", ctx->filter->name);
497 return AVERROR_PATCHWELCOME;
500 expr_dup = av_strdup(expr);
502 return AVERROR(ENOMEM);
504 if (!ctx->var_values) {
505 ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
506 if (!ctx->var_values) {
508 return AVERROR(ENOMEM);
512 ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
513 NULL, NULL, NULL, NULL, 0, ctx->priv);
515 av_log(ctx->priv, AV_LOG_ERROR,
516 "Error when evaluating the expression '%s' for enable\n",
523 av_free(ctx->enable_str);
524 ctx->enable_str = expr_dup;
528 void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
530 if (pts == AV_NOPTS_VALUE)
532 link->current_pts = pts;
533 link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
534 /* TODO use duration */
535 if (link->graph && link->age_index >= 0)
536 ff_avfilter_graph_update_heap(link->graph, link);
539 int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
541 if(!strcmp(cmd, "ping")){
542 char local_res[256] = {0};
546 res_len = sizeof(local_res);
548 av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
549 if (res == local_res)
550 av_log(filter, AV_LOG_INFO, "%s", res);
552 }else if(!strcmp(cmd, "enable")) {
553 return set_enable_expr(filter, arg);
554 }else if(filter->filter->process_command) {
555 return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
557 return AVERROR(ENOSYS);
560 static AVFilter *first_filter;
561 static AVFilter **last_filter = &first_filter;
563 #if !FF_API_NOCONST_GET_NAME
566 AVFilter *avfilter_get_by_name(const char *name)
568 const AVFilter *f = NULL;
573 while ((f = avfilter_next(f)))
574 if (!strcmp(f->name, name))
575 return (AVFilter *)f;
580 int avfilter_register(AVFilter *filter)
582 AVFilter **f = last_filter;
584 /* the filter must select generic or internal exclusively */
585 av_assert0((filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE) != AVFILTER_FLAG_SUPPORT_TIMELINE);
589 while(*f || avpriv_atomic_ptr_cas((void * volatile *)f, NULL, filter))
591 last_filter = &filter->next;
596 const AVFilter *avfilter_next(const AVFilter *prev)
598 return prev ? prev->next : first_filter;
601 #if FF_API_OLD_FILTER_REGISTER
602 AVFilter **av_filter_next(AVFilter **filter)
604 return filter ? &(*filter)->next : &first_filter;
607 void avfilter_uninit(void)
612 int avfilter_pad_count(const AVFilterPad *pads)
619 for (count = 0; pads->name; count++)
624 static const char *default_filter_name(void *filter_ctx)
626 AVFilterContext *ctx = filter_ctx;
627 return ctx->name ? ctx->name : ctx->filter->name;
630 static void *filter_child_next(void *obj, void *prev)
632 AVFilterContext *ctx = obj;
633 if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
638 static const AVClass *filter_child_class_next(const AVClass *prev)
640 const AVFilter *f = NULL;
642 /* find the filter that corresponds to prev */
643 while (prev && (f = avfilter_next(f)))
644 if (f->priv_class == prev)
647 /* could not find filter corresponding to prev */
651 /* find next filter with specific options */
652 while ((f = avfilter_next(f)))
654 return f->priv_class;
659 #define OFFSET(x) offsetof(AVFilterContext, x)
660 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
661 static const AVOption avfilter_options[] = {
662 { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
663 { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
664 { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .unit = "thread_type" },
665 { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
666 { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
667 { .i64 = 0 }, 0, INT_MAX, FLAGS },
671 static const AVClass avfilter_class = {
672 .class_name = "AVFilter",
673 .item_name = default_filter_name,
674 .version = LIBAVUTIL_VERSION_INT,
675 .category = AV_CLASS_CATEGORY_FILTER,
676 .child_next = filter_child_next,
677 .child_class_next = filter_child_class_next,
678 .option = avfilter_options,
681 static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
682 int *ret, int nb_jobs)
686 for (i = 0; i < nb_jobs; i++) {
687 int r = func(ctx, arg, i, nb_jobs);
694 AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
696 AVFilterContext *ret;
701 ret = av_mallocz(sizeof(AVFilterContext));
705 ret->av_class = &avfilter_class;
706 ret->filter = filter;
707 ret->name = inst_name ? av_strdup(inst_name) : NULL;
708 if (filter->priv_size) {
709 ret->priv = av_mallocz(filter->priv_size);
714 av_opt_set_defaults(ret);
715 if (filter->priv_class) {
716 *(const AVClass**)ret->priv = filter->priv_class;
717 av_opt_set_defaults(ret->priv);
720 ret->internal = av_mallocz(sizeof(*ret->internal));
723 ret->internal->execute = default_execute;
725 ret->nb_inputs = avfilter_pad_count(filter->inputs);
726 if (ret->nb_inputs ) {
727 ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
728 if (!ret->input_pads)
730 memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
731 ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
736 ret->nb_outputs = avfilter_pad_count(filter->outputs);
737 if (ret->nb_outputs) {
738 ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
739 if (!ret->output_pads)
741 memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
742 ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
750 av_freep(&ret->inputs);
751 av_freep(&ret->input_pads);
753 av_freep(&ret->outputs);
754 av_freep(&ret->output_pads);
756 av_freep(&ret->priv);
757 av_freep(&ret->internal);
762 #if FF_API_AVFILTER_OPEN
763 int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name)
765 *filter_ctx = ff_filter_alloc(filter, inst_name);
766 return *filter_ctx ? 0 : AVERROR(ENOMEM);
770 static void free_link(AVFilterLink *link)
776 link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
778 link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
780 av_buffer_unref(&link->hw_frames_ctx);
782 ff_formats_unref(&link->in_formats);
783 ff_formats_unref(&link->out_formats);
784 ff_formats_unref(&link->in_samplerates);
785 ff_formats_unref(&link->out_samplerates);
786 ff_channel_layouts_unref(&link->in_channel_layouts);
787 ff_channel_layouts_unref(&link->out_channel_layouts);
788 avfilter_link_free(&link);
791 void avfilter_free(AVFilterContext *filter)
799 ff_filter_graph_remove_filter(filter->graph, filter);
801 if (filter->filter->uninit)
802 filter->filter->uninit(filter);
804 for (i = 0; i < filter->nb_inputs; i++) {
805 free_link(filter->inputs[i]);
807 for (i = 0; i < filter->nb_outputs; i++) {
808 free_link(filter->outputs[i]);
811 if (filter->filter->priv_class)
812 av_opt_free(filter->priv);
814 av_buffer_unref(&filter->hw_device_ctx);
816 av_freep(&filter->name);
817 av_freep(&filter->input_pads);
818 av_freep(&filter->output_pads);
819 av_freep(&filter->inputs);
820 av_freep(&filter->outputs);
821 av_freep(&filter->priv);
822 while(filter->command_queue){
823 ff_command_queue_pop(filter);
826 av_expr_free(filter->enable);
827 filter->enable = NULL;
828 av_freep(&filter->var_values);
829 av_freep(&filter->internal);
833 int ff_filter_get_nb_threads(AVFilterContext *ctx)
835 if (ctx->nb_threads > 0)
836 return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
837 return ctx->graph->nb_threads;
840 static int process_options(AVFilterContext *ctx, AVDictionary **options,
843 const AVOption *o = NULL;
845 char *av_uninit(parsed_key), *av_uninit(value);
853 const char *shorthand = NULL;
855 o = av_opt_next(ctx->priv, o);
857 if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
863 ret = av_opt_get_key_value(&args, "=", ":",
864 shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
865 &parsed_key, &value);
867 if (ret == AVERROR(EINVAL))
868 av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
870 av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
878 while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
883 av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
885 if (av_opt_find(ctx, key, NULL, 0, 0)) {
886 ret = av_opt_set(ctx, key, value, 0);
893 av_dict_set(options, key, value, 0);
894 if ((ret = av_opt_set(ctx->priv, key, value, 0)) < 0) {
895 if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
896 if (ret == AVERROR_OPTION_NOT_FOUND)
897 av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
910 if (ctx->enable_str) {
911 ret = set_enable_expr(ctx, ctx->enable_str);
918 #if FF_API_AVFILTER_INIT_FILTER
919 int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque)
921 return avfilter_init_str(filter, args);
925 int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
929 ret = av_opt_set_dict(ctx, options);
931 av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
935 if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
936 ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
937 ctx->graph->internal->thread_execute) {
938 ctx->thread_type = AVFILTER_THREAD_SLICE;
939 ctx->internal->execute = ctx->graph->internal->thread_execute;
941 ctx->thread_type = 0;
944 if (ctx->filter->priv_class) {
945 ret = av_opt_set_dict(ctx->priv, options);
947 av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
952 if (ctx->filter->init_opaque)
953 ret = ctx->filter->init_opaque(ctx, NULL);
954 else if (ctx->filter->init)
955 ret = ctx->filter->init(ctx);
956 else if (ctx->filter->init_dict)
957 ret = ctx->filter->init_dict(ctx, options);
962 int avfilter_init_str(AVFilterContext *filter, const char *args)
964 AVDictionary *options = NULL;
965 AVDictionaryEntry *e;
969 if (!filter->filter->priv_class) {
970 av_log(filter, AV_LOG_ERROR, "This filter does not take any "
971 "options, but options were provided: %s.\n", args);
972 return AVERROR(EINVAL);
975 #if FF_API_OLD_FILTER_OPTS || FF_API_OLD_FILTER_OPTS_ERROR
976 if ( !strcmp(filter->filter->name, "format") ||
977 !strcmp(filter->filter->name, "noformat") ||
978 !strcmp(filter->filter->name, "frei0r") ||
979 !strcmp(filter->filter->name, "frei0r_src") ||
980 !strcmp(filter->filter->name, "ocv") ||
981 !strcmp(filter->filter->name, "pan") ||
982 !strcmp(filter->filter->name, "pp") ||
983 !strcmp(filter->filter->name, "aevalsrc")) {
984 /* a hack for compatibility with the old syntax
985 * replace colons with |s */
986 char *copy = av_strdup(args);
988 int nb_leading = 0; // number of leading colons to skip
992 ret = AVERROR(ENOMEM);
996 if (!strcmp(filter->filter->name, "frei0r") ||
997 !strcmp(filter->filter->name, "ocv"))
999 else if (!strcmp(filter->filter->name, "frei0r_src"))
1002 while (nb_leading--) {
1005 p = copy + strlen(copy);
1011 deprecated = strchr(p, ':') != NULL;
1013 if (!strcmp(filter->filter->name, "aevalsrc")) {
1015 while ((p = strchr(p, ':')) && p[1] != ':') {
1016 const char *epos = strchr(p + 1, '=');
1017 const char *spos = strchr(p + 1, ':');
1018 const int next_token_is_opt = epos && (!spos || epos < spos);
1019 if (next_token_is_opt) {
1023 /* next token does not contain a '=', assume a channel expression */
1027 if (p && *p == ':') { // double sep '::' found
1029 memmove(p, p + 1, strlen(p));
1032 while ((p = strchr(p, ':')))
1035 #if FF_API_OLD_FILTER_OPTS
1037 av_log(filter, AV_LOG_WARNING, "This syntax is deprecated. Use "
1038 "'|' to separate the list items.\n");
1040 av_log(filter, AV_LOG_DEBUG, "compat: called with args=[%s]\n", copy);
1041 ret = process_options(filter, &options, copy);
1044 av_log(filter, AV_LOG_ERROR, "This syntax is deprecated. Use "
1045 "'|' to separate the list items ('%s' instead of '%s')\n",
1047 ret = AVERROR(EINVAL);
1049 ret = process_options(filter, &options, copy);
1059 ret = process_options(filter, &options, args);
1065 ret = avfilter_init_dict(filter, &options);
1069 if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1070 av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
1071 ret = AVERROR_OPTION_NOT_FOUND;
1076 av_dict_free(&options);
1081 const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
1083 return pads[pad_idx].name;
1086 enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
1088 return pads[pad_idx].type;
1091 static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
1093 return ff_filter_frame(link->dst->outputs[0], frame);
1096 static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
1098 int (*filter_frame)(AVFilterLink *, AVFrame *);
1099 AVFilterContext *dstctx = link->dst;
1100 AVFilterPad *dst = link->dstpad;
1103 if (!(filter_frame = dst->filter_frame))
1104 filter_frame = default_filter_frame;
1106 if (dst->needs_writable) {
1107 ret = ff_inlink_make_frame_writable(link, &frame);
1112 ff_inlink_process_commands(link, frame);
1113 dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1115 if (dstctx->is_disabled &&
1116 (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
1117 filter_frame = default_filter_frame;
1118 ret = filter_frame(link, frame);
1119 link->frame_count_out++;
1123 av_frame_free(&frame);
1127 int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
1130 FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
1132 /* Consistency checks */
1133 if (link->type == AVMEDIA_TYPE_VIDEO) {
1134 if (strcmp(link->dst->filter->name, "buffersink") &&
1135 strcmp(link->dst->filter->name, "format") &&
1136 strcmp(link->dst->filter->name, "idet") &&
1137 strcmp(link->dst->filter->name, "null") &&
1138 strcmp(link->dst->filter->name, "scale")) {
1139 av_assert1(frame->format == link->format);
1140 av_assert1(frame->width == link->w);
1141 av_assert1(frame->height == link->h);
1144 if (frame->format != link->format) {
1145 av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
1148 if (av_frame_get_channels(frame) != link->channels) {
1149 av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
1152 if (frame->channel_layout != link->channel_layout) {
1153 av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
1156 if (frame->sample_rate != link->sample_rate) {
1157 av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
1162 link->frame_blocked_in = link->frame_wanted_out = 0;
1163 link->frame_count_in++;
1164 filter_unblock(link->dst);
1165 ret = ff_framequeue_add(&link->fifo, frame);
1167 av_frame_free(&frame);
1170 ff_filter_set_ready(link->dst, 300);
1174 av_frame_free(&frame);
1175 return AVERROR_PATCHWELCOME;
1178 static int samples_ready(AVFilterLink *link, unsigned min)
1180 return ff_framequeue_queued_frames(&link->fifo) &&
1181 (ff_framequeue_queued_samples(&link->fifo) >= min ||
1185 static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
1188 AVFrame *frame0, *frame, *buf;
1189 unsigned nb_samples, nb_frames, i, p;
1192 /* Note: this function relies on no format changes and must only be
1193 called with enough samples. */
1194 av_assert1(samples_ready(link, link->min_samples));
1195 frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
1196 if (frame->nb_samples >= min && frame->nb_samples < max) {
1197 *rframe = ff_framequeue_take(&link->fifo);
1203 if (nb_samples + frame->nb_samples > max) {
1204 if (nb_samples < min)
1208 nb_samples += frame->nb_samples;
1210 if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
1212 frame = ff_framequeue_peek(&link->fifo, nb_frames);
1215 buf = ff_get_audio_buffer(link, nb_samples);
1217 return AVERROR(ENOMEM);
1218 ret = av_frame_copy_props(buf, frame0);
1220 av_frame_free(&buf);
1223 buf->pts = frame0->pts;
1226 for (i = 0; i < nb_frames; i++) {
1227 frame = ff_framequeue_take(&link->fifo);
1228 av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
1229 frame->nb_samples, link->channels, link->format);
1230 p += frame->nb_samples;
1231 av_frame_free(&frame);
1233 if (p < nb_samples) {
1234 unsigned n = nb_samples - p;
1235 frame = ff_framequeue_peek(&link->fifo, 0);
1236 av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
1237 link->channels, link->format);
1238 ff_framequeue_skip_samples(&link->fifo, n, link->time_base);
1245 static int ff_filter_frame_to_filter(AVFilterLink *link)
1247 AVFrame *frame = NULL;
1248 AVFilterContext *dst = link->dst;
1251 av_assert1(ff_framequeue_queued_frames(&link->fifo));
1252 ret = link->min_samples ?
1253 ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
1254 ff_inlink_consume_frame(link, &frame);
1260 /* The filter will soon have received a new frame, that may allow it to
1261 produce one or more: unblock its outputs. */
1262 filter_unblock(dst);
1263 /* AVFilterPad.filter_frame() expect frame_count_out to have the value
1264 before the frame; ff_filter_frame_framed() will re-increment it. */
1265 link->frame_count_out--;
1266 ret = ff_filter_frame_framed(link, frame);
1267 if (ret < 0 && ret != link->status_out) {
1268 ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
1270 /* Run once again, to see if several frames were available, or if
1271 the input status has also changed, or any other reason. */
1272 ff_filter_set_ready(dst, 300);
1277 static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
1279 unsigned out = 0, progress = 0;
1282 av_assert0(!in->status_out);
1283 if (!filter->nb_outputs) {
1284 /* not necessary with the current API and sinks */
1287 while (!in->status_out) {
1288 if (!filter->outputs[out]->status_in) {
1290 ret = ff_request_frame_to_filter(filter->outputs[out]);
1294 if (++out == filter->nb_outputs) {
1296 /* Every output already closed: input no longer interesting
1297 (example: overlay in shortest mode, other input closed). */
1298 ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
1305 ff_filter_set_ready(filter, 200);
1309 #define FFERROR_NOT_READY FFERRTAG('N','R','D','Y')
1311 static int ff_filter_activate_default(AVFilterContext *filter)
1315 for (i = 0; i < filter->nb_inputs; i++) {
1316 if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) {
1317 return ff_filter_frame_to_filter(filter->inputs[i]);
1320 for (i = 0; i < filter->nb_inputs; i++) {
1321 if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
1322 av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
1323 return forward_status_change(filter, filter->inputs[i]);
1326 for (i = 0; i < filter->nb_outputs; i++) {
1327 if (filter->outputs[i]->frame_wanted_out &&
1328 !filter->outputs[i]->frame_blocked_in) {
1329 return ff_request_frame_to_filter(filter->outputs[i]);
1332 return FFERROR_NOT_READY;
1336 Filter scheduling and activation
1338 When a filter is activated, it must:
1339 - if possible, output a frame;
1340 - else, if relevant, forward the input status change;
1341 - else, check outputs for wanted frames and forward the requests.
1343 The following AVFilterLink fields are used for activation:
1347 This field indicates if a frame is needed on this input of the
1348 destination filter. A positive value indicates that a frame is needed
1349 to process queued frames or internal data or to satisfy the
1350 application; a zero value indicates that a frame is not especially
1351 needed but could be processed anyway; a negative value indicates that a
1352 frame would just be queued.
1354 It is set by filters using ff_request_frame() or ff_request_no_frame(),
1355 when requested by the application through a specific API or when it is
1356 set on one of the outputs.
1358 It is cleared when a frame is sent from the source using
1361 It is also cleared when a status change is sent from the source using
1362 ff_avfilter_link_set_in_status().
1366 This field means that the source filter can not generate a frame as is.
1367 Its goal is to avoid repeatedly calling the request_frame() method on
1370 It is set by the framework on all outputs of a filter before activating it.
1372 It is automatically cleared by ff_filter_frame().
1374 It is also automatically cleared by ff_avfilter_link_set_in_status().
1376 It is also cleared on all outputs (using filter_unblock()) when
1377 something happens on an input: processing a frame or changing the
1382 Contains the frames queued on a filter input. If it contains frames and
1383 frame_wanted_out is not set, then the filter can be activated. If that
1384 result in the filter not able to use these frames, the filter must set
1385 frame_wanted_out to ask for more frames.
1387 - status_in and status_in_pts:
1389 Status (EOF or error code) of the link and timestamp of the status
1390 change (in link time base, same as frames) as seen from the input of
1391 the link. The status change is considered happening after the frames
1394 It is set by the source filter using ff_avfilter_link_set_in_status().
1398 Status of the link as seen from the output of the link. The status
1399 change is considered having already happened.
1401 It is set by the destination filter using
1402 ff_avfilter_link_set_out_status().
1404 Filters are activated according to the ready field, set using the
1405 ff_filter_set_ready(). Eventually, a priority queue will be used.
1406 ff_filter_set_ready() is called whenever anything could cause progress to
1407 be possible. Marking a filter ready when it is not is not a problem,
1408 except for the small overhead it causes.
1410 Conditions that cause a filter to be marked ready are:
1412 - frames added on an input link;
1414 - changes in the input or output status of an input link;
1416 - requests for a frame on an output link;
1418 - after any actual processing using the legacy methods (filter_frame(),
1419 and request_frame() to acknowledge status changes), to run once more
1420 and check if enough input was present for several frames.
1422 Exemples of scenarios to consider:
1424 - buffersrc: activate if frame_wanted_out to notify the application;
1425 activate when the application adds a frame to push it immediately.
1427 - testsrc: activate only if frame_wanted_out to produce and push a frame.
1429 - concat (not at stitch points): can process a frame on any output.
1430 Activate if frame_wanted_out on output to forward on the corresponding
1431 input. Activate when a frame is present on input to process it
1434 - framesync: needs at least one frame on each input; extra frames on the
1435 wrong input will accumulate. When a frame is first added on one input,
1436 set frame_wanted_out<0 on it to avoid getting more (would trigger
1437 testsrc) and frame_wanted_out>0 on the other to allow processing it.
1439 Activation of old filters:
1441 In order to activate a filter implementing the legacy filter_frame() and
1442 request_frame() methods, perform the first possible of the following
1445 - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
1446 frame and call filter_frame().
1448 Ratinale: filter frames as soon as possible instead of leaving them
1449 queued; frame_wanted_out < 0 is not possible since the old API does not
1450 set it nor provides any similar feedback; frame_wanted_out > 0 happens
1451 when min_samples > 0 and there are not enough samples queued.
1453 - If an input has status_in set but not status_out, try to call
1454 request_frame() on one of the outputs in the hope that it will trigger
1455 request_frame() on the input with status_in and acknowledge it. This is
1456 awkward and fragile, filters with several inputs or outputs should be
1457 updated to direct activation as soon as possible.
1459 - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
1462 Rationale: checking frame_blocked_in is necessary to avoid requesting
1463 repeatedly on a blocked input if another is not blocked (example:
1464 [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
1466 TODO: respect needs_fifo and remove auto-inserted fifos.
1470 int ff_filter_activate(AVFilterContext *filter)
1474 /* Generic timeline support is not yet implemented but should be easy */
1475 av_assert1(!(filter->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC &&
1476 filter->filter->activate));
1478 ret = filter->filter->activate ? filter->filter->activate(filter) :
1479 ff_filter_activate_default(filter);
1480 if (ret == FFERROR_NOT_READY)
1485 int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
1487 *rpts = link->current_pts;
1488 if (ff_framequeue_queued_frames(&link->fifo))
1489 return *rstatus = 0;
1490 if (link->status_out)
1491 return *rstatus = link->status_out;
1492 if (!link->status_in)
1493 return *rstatus = 0;
1494 *rstatus = link->status_out = link->status_in;
1495 ff_update_link_current_pts(link, link->status_in_pts);
1496 *rpts = link->current_pts;
1500 int ff_inlink_check_available_frame(AVFilterLink *link)
1502 return ff_framequeue_queued_frames(&link->fifo) > 0;
1505 int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
1507 uint64_t samples = ff_framequeue_queued_samples(&link->fifo);
1509 return samples >= min || (link->status_in && samples);
1512 static void consume_update(AVFilterLink *link, const AVFrame *frame)
1514 ff_update_link_current_pts(link, frame->pts);
1515 ff_inlink_process_commands(link, frame);
1516 link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1517 link->frame_count_out++;
1520 int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
1525 if (!ff_inlink_check_available_frame(link))
1527 frame = ff_framequeue_take(&link->fifo);
1528 consume_update(link, frame);
1533 int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max,
1541 if (!ff_inlink_check_available_samples(link, min))
1543 if (link->status_in)
1544 min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
1545 ret = take_samples(link, min, link->max_samples, &frame);
1548 consume_update(link, frame);
1553 int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
1555 AVFrame *frame = *rframe;
1559 if (av_frame_is_writable(frame))
1561 av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
1563 switch (link->type) {
1564 case AVMEDIA_TYPE_VIDEO:
1565 out = ff_get_video_buffer(link, link->w, link->h);
1567 case AVMEDIA_TYPE_AUDIO:
1568 out = ff_get_audio_buffer(link, frame->nb_samples);
1571 return AVERROR(EINVAL);
1574 return AVERROR(ENOMEM);
1576 ret = av_frame_copy_props(out, frame);
1578 av_frame_free(&out);
1582 switch (link->type) {
1583 case AVMEDIA_TYPE_VIDEO:
1584 av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
1585 frame->format, frame->width, frame->height);
1587 case AVMEDIA_TYPE_AUDIO:
1588 av_samples_copy(out->extended_data, frame->extended_data,
1589 0, 0, frame->nb_samples,
1590 av_frame_get_channels(frame),
1594 av_assert0(!"reached");
1597 av_frame_free(&frame);
1602 int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
1604 AVFilterCommand *cmd = link->dst->command_queue;
1606 while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
1607 av_log(link->dst, AV_LOG_DEBUG,
1608 "Processing command time:%f command:%s arg:%s\n",
1609 cmd->time, cmd->command, cmd->arg);
1610 avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
1611 ff_command_queue_pop(link->dst);
1612 cmd= link->dst->command_queue;
1617 int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
1619 AVFilterContext *dstctx = link->dst;
1620 int64_t pts = frame->pts;
1621 int64_t pos = av_frame_get_pkt_pos(frame);
1623 if (!dstctx->enable_str)
1626 dstctx->var_values[VAR_N] = link->frame_count_out;
1627 dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
1628 dstctx->var_values[VAR_W] = link->w;
1629 dstctx->var_values[VAR_H] = link->h;
1630 dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1632 return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5;
1635 void ff_inlink_request_frame(AVFilterLink *link)
1637 av_assert1(!link->status_in);
1638 av_assert1(!link->status_out);
1639 link->frame_wanted_out = 1;
1640 ff_filter_set_ready(link->src, 100);
1643 const AVClass *avfilter_get_class(void)
1645 return &avfilter_class;