3 * Copyright (c) 2007 Bobby Bingham
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/atomic.h"
23 #include "libavutil/avassert.h"
24 #include "libavutil/avstring.h"
25 #include "libavutil/buffer.h"
26 #include "libavutil/channel_layout.h"
27 #include "libavutil/common.h"
28 #include "libavutil/eval.h"
29 #include "libavutil/hwcontext.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/internal.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/rational.h"
35 #include "libavutil/samplefmt.h"
37 #define FF_INTERNAL_FIELDS 1
38 #include "framequeue.h"
46 #include "libavutil/ffversion.h"
47 const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
49 void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
51 av_unused char buf[16];
53 "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
54 ref, ref->buf, ref->data[0],
55 ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
56 ref->pts, ref->pkt_pos);
59 ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
60 ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
61 ref->width, ref->height,
62 !ref->interlaced_frame ? 'P' : /* Progressive */
63 ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
65 av_get_picture_type_char(ref->pict_type));
67 if (ref->nb_samples) {
68 ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
74 ff_tlog(ctx, "]%s", end ? "\n" : "");
77 unsigned avfilter_version(void)
79 av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
80 return LIBAVFILTER_VERSION_INT;
83 const char *avfilter_configuration(void)
85 return FFMPEG_CONFIGURATION;
88 const char *avfilter_license(void)
90 #define LICENSE_PREFIX "libavfilter license: "
91 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
94 void ff_command_queue_pop(AVFilterContext *filter)
96 AVFilterCommand *c= filter->command_queue;
98 av_freep(&c->command);
99 filter->command_queue= c->next;
103 int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
104 AVFilterPad **pads, AVFilterLink ***links,
107 AVFilterLink **newlinks;
108 AVFilterPad *newpads;
111 idx = FFMIN(idx, *count);
113 newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
114 newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
119 if (!newpads || !newlinks)
120 return AVERROR(ENOMEM);
122 memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
123 memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
124 memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
125 (*links)[idx] = NULL;
128 for (i = idx + 1; i < *count; i++)
130 (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
135 int avfilter_link(AVFilterContext *src, unsigned srcpad,
136 AVFilterContext *dst, unsigned dstpad)
140 av_assert0(src->graph);
141 av_assert0(dst->graph);
142 av_assert0(src->graph == dst->graph);
144 if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
145 src->outputs[srcpad] || dst->inputs[dstpad])
146 return AVERROR(EINVAL);
148 if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
149 av_log(src, AV_LOG_ERROR,
150 "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
151 src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
152 dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
153 return AVERROR(EINVAL);
156 link = av_mallocz(sizeof(*link));
158 return AVERROR(ENOMEM);
160 src->outputs[srcpad] = dst->inputs[dstpad] = link;
164 link->srcpad = &src->output_pads[srcpad];
165 link->dstpad = &dst->input_pads[dstpad];
166 link->type = src->output_pads[srcpad].type;
167 av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
169 ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
174 void avfilter_link_free(AVFilterLink **link)
179 av_frame_free(&(*link)->partial_buf);
180 ff_framequeue_free(&(*link)->fifo);
181 ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
186 int avfilter_link_get_channels(AVFilterLink *link)
188 return link->channels;
191 void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
193 filter->ready = FFMAX(filter->ready, priority);
197 * Clear frame_blocked_in on all outputs.
198 * This is necessary whenever something changes on input.
200 static void filter_unblock(AVFilterContext *filter)
204 for (i = 0; i < filter->nb_outputs; i++)
205 filter->outputs[i]->frame_blocked_in = 0;
209 void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
211 if (link->status_in == status)
213 av_assert0(!link->status_in);
214 link->status_in = status;
215 link->status_in_pts = pts;
216 link->frame_wanted_out = 0;
217 link->frame_blocked_in = 0;
218 filter_unblock(link->dst);
219 ff_filter_set_ready(link->dst, 200);
222 void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
224 av_assert0(!link->frame_wanted_out);
225 av_assert0(!link->status_out);
226 link->status_out = status;
227 if (pts != AV_NOPTS_VALUE)
228 ff_update_link_current_pts(link, pts);
229 filter_unblock(link->dst);
230 ff_filter_set_ready(link->src, 200);
233 void avfilter_link_set_closed(AVFilterLink *link, int closed)
235 ff_avfilter_link_set_out_status(link, closed ? AVERROR_EOF : 0, AV_NOPTS_VALUE);
238 int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
239 unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
242 unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
244 av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
245 "between the filter '%s' and the filter '%s'\n",
246 filt->name, link->src->name, link->dst->name);
248 link->dst->inputs[dstpad_idx] = NULL;
249 if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
250 /* failed to link output filter to new filter */
251 link->dst->inputs[dstpad_idx] = link;
255 /* re-hookup the link to the new destination filter we inserted */
257 link->dstpad = &filt->input_pads[filt_srcpad_idx];
258 filt->inputs[filt_srcpad_idx] = link;
260 /* if any information on supported media formats already exists on the
261 * link, we need to preserve that */
262 if (link->out_formats)
263 ff_formats_changeref(&link->out_formats,
264 &filt->outputs[filt_dstpad_idx]->out_formats);
265 if (link->out_samplerates)
266 ff_formats_changeref(&link->out_samplerates,
267 &filt->outputs[filt_dstpad_idx]->out_samplerates);
268 if (link->out_channel_layouts)
269 ff_channel_layouts_changeref(&link->out_channel_layouts,
270 &filt->outputs[filt_dstpad_idx]->out_channel_layouts);
275 int avfilter_config_links(AVFilterContext *filter)
277 int (*config_link)(AVFilterLink *);
281 for (i = 0; i < filter->nb_inputs; i ++) {
282 AVFilterLink *link = filter->inputs[i];
283 AVFilterLink *inlink;
286 if (!link->src || !link->dst) {
287 av_log(filter, AV_LOG_ERROR,
288 "Not all input and output are properly linked (%d).\n", i);
289 return AVERROR(EINVAL);
292 inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
294 link->current_pts_us = AV_NOPTS_VALUE;
296 switch (link->init_state) {
299 case AVLINK_STARTINIT:
300 av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
303 link->init_state = AVLINK_STARTINIT;
305 if ((ret = avfilter_config_links(link->src)) < 0)
308 if (!(config_link = link->srcpad->config_props)) {
309 if (link->src->nb_inputs != 1) {
310 av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
311 "with more than one input "
312 "must set config_props() "
313 "callbacks on all outputs\n");
314 return AVERROR(EINVAL);
316 } else if ((ret = config_link(link)) < 0) {
317 av_log(link->src, AV_LOG_ERROR,
318 "Failed to configure output pad on %s\n",
323 switch (link->type) {
324 case AVMEDIA_TYPE_VIDEO:
325 if (!link->time_base.num && !link->time_base.den)
326 link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
328 if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
329 link->sample_aspect_ratio = inlink ?
330 inlink->sample_aspect_ratio : (AVRational){1,1};
333 if (!link->frame_rate.num && !link->frame_rate.den)
334 link->frame_rate = inlink->frame_rate;
339 } else if (!link->w || !link->h) {
340 av_log(link->src, AV_LOG_ERROR,
341 "Video source filters must set their output link's "
342 "width and height\n");
343 return AVERROR(EINVAL);
347 case AVMEDIA_TYPE_AUDIO:
349 if (!link->time_base.num && !link->time_base.den)
350 link->time_base = inlink->time_base;
353 if (!link->time_base.num && !link->time_base.den)
354 link->time_base = (AVRational) {1, link->sample_rate};
357 if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
358 !(link->src->filter->flags_internal & FF_FILTER_FLAG_HWFRAME_AWARE)) {
359 av_assert0(!link->hw_frames_ctx &&
360 "should not be set by non-hwframe-aware filter");
361 link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
362 if (!link->hw_frames_ctx)
363 return AVERROR(ENOMEM);
366 if ((config_link = link->dstpad->config_props))
367 if ((ret = config_link(link)) < 0) {
368 av_log(link->dst, AV_LOG_ERROR,
369 "Failed to configure input pad on %s\n",
374 link->init_state = AVLINK_INIT;
381 void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
383 if (link->type == AVMEDIA_TYPE_VIDEO) {
385 "link[%p s:%dx%d fmt:%s %s->%s]%s",
386 link, link->w, link->h,
387 av_get_pix_fmt_name(link->format),
388 link->src ? link->src->filter->name : "",
389 link->dst ? link->dst->filter->name : "",
393 av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
396 "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
397 link, (int)link->sample_rate, buf,
398 av_get_sample_fmt_name(link->format),
399 link->src ? link->src->filter->name : "",
400 link->dst ? link->dst->filter->name : "",
405 int ff_request_frame(AVFilterLink *link)
407 FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
409 av_assert1(!link->dst->filter->activate);
410 if (link->status_out)
411 return link->status_out;
412 if (link->status_in) {
413 if (ff_framequeue_queued_frames(&link->fifo)) {
414 av_assert1(!link->frame_wanted_out);
415 av_assert1(link->dst->ready >= 300);
418 /* Acknowledge status change. Filters using ff_request_frame() will
419 handle the change automatically. Filters can also check the
420 status directly but none do yet. */
421 ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
422 return link->status_out;
425 link->frame_wanted_out = 1;
426 ff_filter_set_ready(link->src, 100);
430 static int ff_request_frame_to_filter(AVFilterLink *link)
434 FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
435 /* Assume the filter is blocked, let the method clear it if not */
436 link->frame_blocked_in = 1;
437 if (link->srcpad->request_frame)
438 ret = link->srcpad->request_frame(link);
439 else if (link->src->inputs[0])
440 ret = ff_request_frame(link->src->inputs[0]);
442 if (ret != AVERROR(EAGAIN) && ret != link->status_in)
443 ff_avfilter_link_set_in_status(link, ret, AV_NOPTS_VALUE);
444 if (ret == AVERROR_EOF)
450 int ff_poll_frame(AVFilterLink *link)
452 int i, min = INT_MAX;
454 if (link->srcpad->poll_frame)
455 return link->srcpad->poll_frame(link);
457 for (i = 0; i < link->src->nb_inputs; i++) {
459 if (!link->src->inputs[i])
460 return AVERROR(EINVAL);
461 val = ff_poll_frame(link->src->inputs[i]);
462 min = FFMIN(min, val);
468 static const char *const var_names[] = {
486 static int set_enable_expr(AVFilterContext *ctx, const char *expr)
490 AVExpr *old = ctx->enable;
492 if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
493 av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
494 "with filter '%s'\n", ctx->filter->name);
495 return AVERROR_PATCHWELCOME;
498 expr_dup = av_strdup(expr);
500 return AVERROR(ENOMEM);
502 if (!ctx->var_values) {
503 ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
504 if (!ctx->var_values) {
506 return AVERROR(ENOMEM);
510 ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
511 NULL, NULL, NULL, NULL, 0, ctx->priv);
513 av_log(ctx->priv, AV_LOG_ERROR,
514 "Error when evaluating the expression '%s' for enable\n",
521 av_free(ctx->enable_str);
522 ctx->enable_str = expr_dup;
526 void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
528 if (pts == AV_NOPTS_VALUE)
530 link->current_pts = pts;
531 link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
532 /* TODO use duration */
533 if (link->graph && link->age_index >= 0)
534 ff_avfilter_graph_update_heap(link->graph, link);
537 int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
539 if(!strcmp(cmd, "ping")){
540 char local_res[256] = {0};
544 res_len = sizeof(local_res);
546 av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
547 if (res == local_res)
548 av_log(filter, AV_LOG_INFO, "%s", res);
550 }else if(!strcmp(cmd, "enable")) {
551 return set_enable_expr(filter, arg);
552 }else if(filter->filter->process_command) {
553 return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
555 return AVERROR(ENOSYS);
558 static AVFilter *first_filter;
559 static AVFilter **last_filter = &first_filter;
561 #if !FF_API_NOCONST_GET_NAME
564 AVFilter *avfilter_get_by_name(const char *name)
566 const AVFilter *f = NULL;
571 while ((f = avfilter_next(f)))
572 if (!strcmp(f->name, name))
573 return (AVFilter *)f;
578 int avfilter_register(AVFilter *filter)
580 AVFilter **f = last_filter;
582 /* the filter must select generic or internal exclusively */
583 av_assert0((filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE) != AVFILTER_FLAG_SUPPORT_TIMELINE);
587 while(*f || avpriv_atomic_ptr_cas((void * volatile *)f, NULL, filter))
589 last_filter = &filter->next;
594 const AVFilter *avfilter_next(const AVFilter *prev)
596 return prev ? prev->next : first_filter;
599 #if FF_API_OLD_FILTER_REGISTER
600 AVFilter **av_filter_next(AVFilter **filter)
602 return filter ? &(*filter)->next : &first_filter;
605 void avfilter_uninit(void)
610 int avfilter_pad_count(const AVFilterPad *pads)
617 for (count = 0; pads->name; count++)
622 static const char *default_filter_name(void *filter_ctx)
624 AVFilterContext *ctx = filter_ctx;
625 return ctx->name ? ctx->name : ctx->filter->name;
628 static void *filter_child_next(void *obj, void *prev)
630 AVFilterContext *ctx = obj;
631 if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
636 static const AVClass *filter_child_class_next(const AVClass *prev)
638 const AVFilter *f = NULL;
640 /* find the filter that corresponds to prev */
641 while (prev && (f = avfilter_next(f)))
642 if (f->priv_class == prev)
645 /* could not find filter corresponding to prev */
649 /* find next filter with specific options */
650 while ((f = avfilter_next(f)))
652 return f->priv_class;
657 #define OFFSET(x) offsetof(AVFilterContext, x)
658 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
659 static const AVOption avfilter_options[] = {
660 { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
661 { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
662 { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .unit = "thread_type" },
663 { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
664 { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
665 { .i64 = 0 }, 0, INT_MAX, FLAGS },
669 static const AVClass avfilter_class = {
670 .class_name = "AVFilter",
671 .item_name = default_filter_name,
672 .version = LIBAVUTIL_VERSION_INT,
673 .category = AV_CLASS_CATEGORY_FILTER,
674 .child_next = filter_child_next,
675 .child_class_next = filter_child_class_next,
676 .option = avfilter_options,
679 static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
680 int *ret, int nb_jobs)
684 for (i = 0; i < nb_jobs; i++) {
685 int r = func(ctx, arg, i, nb_jobs);
692 AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
694 AVFilterContext *ret;
699 ret = av_mallocz(sizeof(AVFilterContext));
703 ret->av_class = &avfilter_class;
704 ret->filter = filter;
705 ret->name = inst_name ? av_strdup(inst_name) : NULL;
706 if (filter->priv_size) {
707 ret->priv = av_mallocz(filter->priv_size);
712 av_opt_set_defaults(ret);
713 if (filter->priv_class) {
714 *(const AVClass**)ret->priv = filter->priv_class;
715 av_opt_set_defaults(ret->priv);
718 ret->internal = av_mallocz(sizeof(*ret->internal));
721 ret->internal->execute = default_execute;
723 ret->nb_inputs = avfilter_pad_count(filter->inputs);
724 if (ret->nb_inputs ) {
725 ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
726 if (!ret->input_pads)
728 memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
729 ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
734 ret->nb_outputs = avfilter_pad_count(filter->outputs);
735 if (ret->nb_outputs) {
736 ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
737 if (!ret->output_pads)
739 memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
740 ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
748 av_freep(&ret->inputs);
749 av_freep(&ret->input_pads);
751 av_freep(&ret->outputs);
752 av_freep(&ret->output_pads);
754 av_freep(&ret->priv);
755 av_freep(&ret->internal);
760 #if FF_API_AVFILTER_OPEN
761 int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name)
763 *filter_ctx = ff_filter_alloc(filter, inst_name);
764 return *filter_ctx ? 0 : AVERROR(ENOMEM);
768 static void free_link(AVFilterLink *link)
774 link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
776 link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
778 av_buffer_unref(&link->hw_frames_ctx);
780 ff_formats_unref(&link->in_formats);
781 ff_formats_unref(&link->out_formats);
782 ff_formats_unref(&link->in_samplerates);
783 ff_formats_unref(&link->out_samplerates);
784 ff_channel_layouts_unref(&link->in_channel_layouts);
785 ff_channel_layouts_unref(&link->out_channel_layouts);
786 avfilter_link_free(&link);
789 void avfilter_free(AVFilterContext *filter)
797 ff_filter_graph_remove_filter(filter->graph, filter);
799 if (filter->filter->uninit)
800 filter->filter->uninit(filter);
802 for (i = 0; i < filter->nb_inputs; i++) {
803 free_link(filter->inputs[i]);
805 for (i = 0; i < filter->nb_outputs; i++) {
806 free_link(filter->outputs[i]);
809 if (filter->filter->priv_class)
810 av_opt_free(filter->priv);
812 av_buffer_unref(&filter->hw_device_ctx);
814 av_freep(&filter->name);
815 av_freep(&filter->input_pads);
816 av_freep(&filter->output_pads);
817 av_freep(&filter->inputs);
818 av_freep(&filter->outputs);
819 av_freep(&filter->priv);
820 while(filter->command_queue){
821 ff_command_queue_pop(filter);
824 av_expr_free(filter->enable);
825 filter->enable = NULL;
826 av_freep(&filter->var_values);
827 av_freep(&filter->internal);
831 int ff_filter_get_nb_threads(AVFilterContext *ctx)
833 if (ctx->nb_threads > 0)
834 return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
835 return ctx->graph->nb_threads;
838 static int process_options(AVFilterContext *ctx, AVDictionary **options,
841 const AVOption *o = NULL;
843 char *av_uninit(parsed_key), *av_uninit(value);
851 const char *shorthand = NULL;
853 o = av_opt_next(ctx->priv, o);
855 if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
861 ret = av_opt_get_key_value(&args, "=", ":",
862 shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
863 &parsed_key, &value);
865 if (ret == AVERROR(EINVAL))
866 av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
868 av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
876 while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
881 av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
883 if (av_opt_find(ctx, key, NULL, 0, 0)) {
884 ret = av_opt_set(ctx, key, value, 0);
891 av_dict_set(options, key, value, 0);
892 if ((ret = av_opt_set(ctx->priv, key, value, 0)) < 0) {
893 if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
894 if (ret == AVERROR_OPTION_NOT_FOUND)
895 av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
908 if (ctx->enable_str) {
909 ret = set_enable_expr(ctx, ctx->enable_str);
916 #if FF_API_AVFILTER_INIT_FILTER
917 int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque)
919 return avfilter_init_str(filter, args);
923 int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
927 ret = av_opt_set_dict(ctx, options);
929 av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
933 if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
934 ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
935 ctx->graph->internal->thread_execute) {
936 ctx->thread_type = AVFILTER_THREAD_SLICE;
937 ctx->internal->execute = ctx->graph->internal->thread_execute;
939 ctx->thread_type = 0;
942 if (ctx->filter->priv_class) {
943 ret = av_opt_set_dict(ctx->priv, options);
945 av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
950 if (ctx->filter->init_opaque)
951 ret = ctx->filter->init_opaque(ctx, NULL);
952 else if (ctx->filter->init)
953 ret = ctx->filter->init(ctx);
954 else if (ctx->filter->init_dict)
955 ret = ctx->filter->init_dict(ctx, options);
960 int avfilter_init_str(AVFilterContext *filter, const char *args)
962 AVDictionary *options = NULL;
963 AVDictionaryEntry *e;
967 if (!filter->filter->priv_class) {
968 av_log(filter, AV_LOG_ERROR, "This filter does not take any "
969 "options, but options were provided: %s.\n", args);
970 return AVERROR(EINVAL);
973 #if FF_API_OLD_FILTER_OPTS || FF_API_OLD_FILTER_OPTS_ERROR
974 if ( !strcmp(filter->filter->name, "format") ||
975 !strcmp(filter->filter->name, "noformat") ||
976 !strcmp(filter->filter->name, "frei0r") ||
977 !strcmp(filter->filter->name, "frei0r_src") ||
978 !strcmp(filter->filter->name, "ocv") ||
979 !strcmp(filter->filter->name, "pan") ||
980 !strcmp(filter->filter->name, "pp") ||
981 !strcmp(filter->filter->name, "aevalsrc")) {
982 /* a hack for compatibility with the old syntax
983 * replace colons with |s */
984 char *copy = av_strdup(args);
986 int nb_leading = 0; // number of leading colons to skip
990 ret = AVERROR(ENOMEM);
994 if (!strcmp(filter->filter->name, "frei0r") ||
995 !strcmp(filter->filter->name, "ocv"))
997 else if (!strcmp(filter->filter->name, "frei0r_src"))
1000 while (nb_leading--) {
1003 p = copy + strlen(copy);
1009 deprecated = strchr(p, ':') != NULL;
1011 if (!strcmp(filter->filter->name, "aevalsrc")) {
1013 while ((p = strchr(p, ':')) && p[1] != ':') {
1014 const char *epos = strchr(p + 1, '=');
1015 const char *spos = strchr(p + 1, ':');
1016 const int next_token_is_opt = epos && (!spos || epos < spos);
1017 if (next_token_is_opt) {
1021 /* next token does not contain a '=', assume a channel expression */
1025 if (p && *p == ':') { // double sep '::' found
1027 memmove(p, p + 1, strlen(p));
1030 while ((p = strchr(p, ':')))
1033 #if FF_API_OLD_FILTER_OPTS
1035 av_log(filter, AV_LOG_WARNING, "This syntax is deprecated. Use "
1036 "'|' to separate the list items.\n");
1038 av_log(filter, AV_LOG_DEBUG, "compat: called with args=[%s]\n", copy);
1039 ret = process_options(filter, &options, copy);
1042 av_log(filter, AV_LOG_ERROR, "This syntax is deprecated. Use "
1043 "'|' to separate the list items ('%s' instead of '%s')\n",
1045 ret = AVERROR(EINVAL);
1047 ret = process_options(filter, &options, copy);
1057 ret = process_options(filter, &options, args);
1063 ret = avfilter_init_dict(filter, &options);
1067 if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1068 av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
1069 ret = AVERROR_OPTION_NOT_FOUND;
1074 av_dict_free(&options);
1079 const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
1081 return pads[pad_idx].name;
1084 enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
1086 return pads[pad_idx].type;
1089 static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
1091 return ff_filter_frame(link->dst->outputs[0], frame);
1094 static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
1096 int (*filter_frame)(AVFilterLink *, AVFrame *);
1097 AVFilterContext *dstctx = link->dst;
1098 AVFilterPad *dst = link->dstpad;
1101 if (!(filter_frame = dst->filter_frame))
1102 filter_frame = default_filter_frame;
1104 if (dst->needs_writable) {
1105 ret = ff_inlink_make_frame_writable(link, &frame);
1110 ff_inlink_process_commands(link, frame);
1111 dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1113 if (dstctx->is_disabled &&
1114 (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
1115 filter_frame = default_filter_frame;
1116 ret = filter_frame(link, frame);
1117 link->frame_count_out++;
1121 av_frame_free(&frame);
1125 int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
1128 FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
1130 /* Consistency checks */
1131 if (link->type == AVMEDIA_TYPE_VIDEO) {
1132 if (strcmp(link->dst->filter->name, "buffersink") &&
1133 strcmp(link->dst->filter->name, "format") &&
1134 strcmp(link->dst->filter->name, "idet") &&
1135 strcmp(link->dst->filter->name, "null") &&
1136 strcmp(link->dst->filter->name, "scale")) {
1137 av_assert1(frame->format == link->format);
1138 av_assert1(frame->width == link->w);
1139 av_assert1(frame->height == link->h);
1142 if (frame->format != link->format) {
1143 av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
1146 if (frame->channels != link->channels) {
1147 av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
1150 if (frame->channel_layout != link->channel_layout) {
1151 av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
1154 if (frame->sample_rate != link->sample_rate) {
1155 av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
1160 link->frame_blocked_in = link->frame_wanted_out = 0;
1161 link->frame_count_in++;
1162 filter_unblock(link->dst);
1163 ret = ff_framequeue_add(&link->fifo, frame);
1165 av_frame_free(&frame);
1168 ff_filter_set_ready(link->dst, 300);
1172 av_frame_free(&frame);
1173 return AVERROR_PATCHWELCOME;
1176 static int samples_ready(AVFilterLink *link, unsigned min)
1178 return ff_framequeue_queued_frames(&link->fifo) &&
1179 (ff_framequeue_queued_samples(&link->fifo) >= min ||
1183 static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
1186 AVFrame *frame0, *frame, *buf;
1187 unsigned nb_samples, nb_frames, i, p;
1190 /* Note: this function relies on no format changes and must only be
1191 called with enough samples. */
1192 av_assert1(samples_ready(link, link->min_samples));
1193 frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
1194 if (!link->fifo.samples_skipped && frame->nb_samples >= min && frame->nb_samples <= max) {
1195 *rframe = ff_framequeue_take(&link->fifo);
1201 if (nb_samples + frame->nb_samples > max) {
1202 if (nb_samples < min)
1206 nb_samples += frame->nb_samples;
1208 if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
1210 frame = ff_framequeue_peek(&link->fifo, nb_frames);
1213 buf = ff_get_audio_buffer(link, nb_samples);
1215 return AVERROR(ENOMEM);
1216 ret = av_frame_copy_props(buf, frame0);
1218 av_frame_free(&buf);
1221 buf->pts = frame0->pts;
1224 for (i = 0; i < nb_frames; i++) {
1225 frame = ff_framequeue_take(&link->fifo);
1226 av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
1227 frame->nb_samples, link->channels, link->format);
1228 p += frame->nb_samples;
1229 av_frame_free(&frame);
1231 if (p < nb_samples) {
1232 unsigned n = nb_samples - p;
1233 frame = ff_framequeue_peek(&link->fifo, 0);
1234 av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
1235 link->channels, link->format);
1236 ff_framequeue_skip_samples(&link->fifo, n, link->time_base);
1243 static int ff_filter_frame_to_filter(AVFilterLink *link)
1245 AVFrame *frame = NULL;
1246 AVFilterContext *dst = link->dst;
1249 av_assert1(ff_framequeue_queued_frames(&link->fifo));
1250 ret = link->min_samples ?
1251 ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
1252 ff_inlink_consume_frame(link, &frame);
1258 /* The filter will soon have received a new frame, that may allow it to
1259 produce one or more: unblock its outputs. */
1260 filter_unblock(dst);
1261 /* AVFilterPad.filter_frame() expect frame_count_out to have the value
1262 before the frame; ff_filter_frame_framed() will re-increment it. */
1263 link->frame_count_out--;
1264 ret = ff_filter_frame_framed(link, frame);
1265 if (ret < 0 && ret != link->status_out) {
1266 ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
1268 /* Run once again, to see if several frames were available, or if
1269 the input status has also changed, or any other reason. */
1270 ff_filter_set_ready(dst, 300);
1275 static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
1277 unsigned out = 0, progress = 0;
1280 av_assert0(!in->status_out);
1281 if (!filter->nb_outputs) {
1282 /* not necessary with the current API and sinks */
1285 while (!in->status_out) {
1286 if (!filter->outputs[out]->status_in) {
1288 ret = ff_request_frame_to_filter(filter->outputs[out]);
1292 if (++out == filter->nb_outputs) {
1294 /* Every output already closed: input no longer interesting
1295 (example: overlay in shortest mode, other input closed). */
1296 ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
1303 ff_filter_set_ready(filter, 200);
1307 static int ff_filter_activate_default(AVFilterContext *filter)
1311 for (i = 0; i < filter->nb_inputs; i++) {
1312 if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) {
1313 return ff_filter_frame_to_filter(filter->inputs[i]);
1316 for (i = 0; i < filter->nb_inputs; i++) {
1317 if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
1318 av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
1319 return forward_status_change(filter, filter->inputs[i]);
1322 for (i = 0; i < filter->nb_outputs; i++) {
1323 if (filter->outputs[i]->frame_wanted_out &&
1324 !filter->outputs[i]->frame_blocked_in) {
1325 return ff_request_frame_to_filter(filter->outputs[i]);
1328 return FFERROR_NOT_READY;
1332 Filter scheduling and activation
1334 When a filter is activated, it must:
1335 - if possible, output a frame;
1336 - else, if relevant, forward the input status change;
1337 - else, check outputs for wanted frames and forward the requests.
1339 The following AVFilterLink fields are used for activation:
1343 This field indicates if a frame is needed on this input of the
1344 destination filter. A positive value indicates that a frame is needed
1345 to process queued frames or internal data or to satisfy the
1346 application; a zero value indicates that a frame is not especially
1347 needed but could be processed anyway; a negative value indicates that a
1348 frame would just be queued.
1350 It is set by filters using ff_request_frame() or ff_request_no_frame(),
1351 when requested by the application through a specific API or when it is
1352 set on one of the outputs.
1354 It is cleared when a frame is sent from the source using
1357 It is also cleared when a status change is sent from the source using
1358 ff_avfilter_link_set_in_status().
1362 This field means that the source filter can not generate a frame as is.
1363 Its goal is to avoid repeatedly calling the request_frame() method on
1366 It is set by the framework on all outputs of a filter before activating it.
1368 It is automatically cleared by ff_filter_frame().
1370 It is also automatically cleared by ff_avfilter_link_set_in_status().
1372 It is also cleared on all outputs (using filter_unblock()) when
1373 something happens on an input: processing a frame or changing the
1378 Contains the frames queued on a filter input. If it contains frames and
1379 frame_wanted_out is not set, then the filter can be activated. If that
1380 result in the filter not able to use these frames, the filter must set
1381 frame_wanted_out to ask for more frames.
1383 - status_in and status_in_pts:
1385 Status (EOF or error code) of the link and timestamp of the status
1386 change (in link time base, same as frames) as seen from the input of
1387 the link. The status change is considered happening after the frames
1390 It is set by the source filter using ff_avfilter_link_set_in_status().
1394 Status of the link as seen from the output of the link. The status
1395 change is considered having already happened.
1397 It is set by the destination filter using
1398 ff_avfilter_link_set_out_status().
1400 Filters are activated according to the ready field, set using the
1401 ff_filter_set_ready(). Eventually, a priority queue will be used.
1402 ff_filter_set_ready() is called whenever anything could cause progress to
1403 be possible. Marking a filter ready when it is not is not a problem,
1404 except for the small overhead it causes.
1406 Conditions that cause a filter to be marked ready are:
1408 - frames added on an input link;
1410 - changes in the input or output status of an input link;
1412 - requests for a frame on an output link;
1414 - after any actual processing using the legacy methods (filter_frame(),
1415 and request_frame() to acknowledge status changes), to run once more
1416 and check if enough input was present for several frames.
1418 Exemples of scenarios to consider:
1420 - buffersrc: activate if frame_wanted_out to notify the application;
1421 activate when the application adds a frame to push it immediately.
1423 - testsrc: activate only if frame_wanted_out to produce and push a frame.
1425 - concat (not at stitch points): can process a frame on any output.
1426 Activate if frame_wanted_out on output to forward on the corresponding
1427 input. Activate when a frame is present on input to process it
1430 - framesync: needs at least one frame on each input; extra frames on the
1431 wrong input will accumulate. When a frame is first added on one input,
1432 set frame_wanted_out<0 on it to avoid getting more (would trigger
1433 testsrc) and frame_wanted_out>0 on the other to allow processing it.
1435 Activation of old filters:
1437 In order to activate a filter implementing the legacy filter_frame() and
1438 request_frame() methods, perform the first possible of the following
1441 - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
1442 frame and call filter_frame().
1444 Ratinale: filter frames as soon as possible instead of leaving them
1445 queued; frame_wanted_out < 0 is not possible since the old API does not
1446 set it nor provides any similar feedback; frame_wanted_out > 0 happens
1447 when min_samples > 0 and there are not enough samples queued.
1449 - If an input has status_in set but not status_out, try to call
1450 request_frame() on one of the outputs in the hope that it will trigger
1451 request_frame() on the input with status_in and acknowledge it. This is
1452 awkward and fragile, filters with several inputs or outputs should be
1453 updated to direct activation as soon as possible.
1455 - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
1458 Rationale: checking frame_blocked_in is necessary to avoid requesting
1459 repeatedly on a blocked input if another is not blocked (example:
1460 [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
1462 TODO: respect needs_fifo and remove auto-inserted fifos.
1466 int ff_filter_activate(AVFilterContext *filter)
1470 /* Generic timeline support is not yet implemented but should be easy */
1471 av_assert1(!(filter->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC &&
1472 filter->filter->activate));
1474 ret = filter->filter->activate ? filter->filter->activate(filter) :
1475 ff_filter_activate_default(filter);
1476 if (ret == FFERROR_NOT_READY)
1481 int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
1483 *rpts = link->current_pts;
1484 if (ff_framequeue_queued_frames(&link->fifo))
1485 return *rstatus = 0;
1486 if (link->status_out)
1487 return *rstatus = link->status_out;
1488 if (!link->status_in)
1489 return *rstatus = 0;
1490 *rstatus = link->status_out = link->status_in;
1491 ff_update_link_current_pts(link, link->status_in_pts);
1492 *rpts = link->current_pts;
1496 int ff_inlink_check_available_frame(AVFilterLink *link)
1498 return ff_framequeue_queued_frames(&link->fifo) > 0;
1501 int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
1503 uint64_t samples = ff_framequeue_queued_samples(&link->fifo);
1505 return samples >= min || (link->status_in && samples);
1508 static void consume_update(AVFilterLink *link, const AVFrame *frame)
1510 ff_update_link_current_pts(link, frame->pts);
1511 ff_inlink_process_commands(link, frame);
1512 link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1513 link->frame_count_out++;
1516 int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
1521 if (!ff_inlink_check_available_frame(link))
1524 if (link->fifo.samples_skipped) {
1525 frame = ff_framequeue_peek(&link->fifo, 0);
1526 return ff_inlink_consume_samples(link, frame->nb_samples, frame->nb_samples, rframe);
1529 frame = ff_framequeue_take(&link->fifo);
1530 consume_update(link, frame);
1535 int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max,
1543 if (!ff_inlink_check_available_samples(link, min))
1545 if (link->status_in)
1546 min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
1547 ret = take_samples(link, min, link->max_samples, &frame);
1550 consume_update(link, frame);
1555 int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
1557 AVFrame *frame = *rframe;
1561 if (av_frame_is_writable(frame))
1563 av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
1565 switch (link->type) {
1566 case AVMEDIA_TYPE_VIDEO:
1567 out = ff_get_video_buffer(link, link->w, link->h);
1569 case AVMEDIA_TYPE_AUDIO:
1570 out = ff_get_audio_buffer(link, frame->nb_samples);
1573 return AVERROR(EINVAL);
1576 return AVERROR(ENOMEM);
1578 ret = av_frame_copy_props(out, frame);
1580 av_frame_free(&out);
1584 switch (link->type) {
1585 case AVMEDIA_TYPE_VIDEO:
1586 av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
1587 frame->format, frame->width, frame->height);
1589 case AVMEDIA_TYPE_AUDIO:
1590 av_samples_copy(out->extended_data, frame->extended_data,
1591 0, 0, frame->nb_samples,
1596 av_assert0(!"reached");
1599 av_frame_free(&frame);
1604 int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
1606 AVFilterCommand *cmd = link->dst->command_queue;
1608 while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
1609 av_log(link->dst, AV_LOG_DEBUG,
1610 "Processing command time:%f command:%s arg:%s\n",
1611 cmd->time, cmd->command, cmd->arg);
1612 avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
1613 ff_command_queue_pop(link->dst);
1614 cmd= link->dst->command_queue;
1619 int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
1621 AVFilterContext *dstctx = link->dst;
1622 int64_t pts = frame->pts;
1623 int64_t pos = frame->pkt_pos;
1625 if (!dstctx->enable_str)
1628 dstctx->var_values[VAR_N] = link->frame_count_out;
1629 dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
1630 dstctx->var_values[VAR_W] = link->w;
1631 dstctx->var_values[VAR_H] = link->h;
1632 dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1634 return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5;
1637 void ff_inlink_request_frame(AVFilterLink *link)
1639 av_assert1(!link->status_in);
1640 av_assert1(!link->status_out);
1641 link->frame_wanted_out = 1;
1642 ff_filter_set_ready(link->src, 100);
1645 const AVClass *avfilter_get_class(void)
1647 return &avfilter_class;