3 * Copyright (c) 2007 Bobby Bingham
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/atomic.h"
23 #include "libavutil/avassert.h"
24 #include "libavutil/avstring.h"
25 #include "libavutil/buffer.h"
26 #include "libavutil/channel_layout.h"
27 #include "libavutil/common.h"
28 #include "libavutil/eval.h"
29 #include "libavutil/hwcontext.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/internal.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/rational.h"
35 #include "libavutil/samplefmt.h"
37 #define FF_INTERNAL_FIELDS 1
38 #include "framequeue.h"
46 #include "libavutil/ffversion.h"
47 const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
49 void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
51 av_unused char buf[16];
53 "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
54 ref, ref->buf, ref->data[0],
55 ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
56 ref->pts, av_frame_get_pkt_pos(ref));
59 ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
60 ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
61 ref->width, ref->height,
62 !ref->interlaced_frame ? 'P' : /* Progressive */
63 ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
65 av_get_picture_type_char(ref->pict_type));
67 if (ref->nb_samples) {
68 ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
74 ff_tlog(ctx, "]%s", end ? "\n" : "");
77 unsigned avfilter_version(void)
79 av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
80 return LIBAVFILTER_VERSION_INT;
83 const char *avfilter_configuration(void)
85 return FFMPEG_CONFIGURATION;
88 const char *avfilter_license(void)
90 #define LICENSE_PREFIX "libavfilter license: "
91 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
94 void ff_command_queue_pop(AVFilterContext *filter)
96 AVFilterCommand *c= filter->command_queue;
98 av_freep(&c->command);
99 filter->command_queue= c->next;
103 int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
104 AVFilterPad **pads, AVFilterLink ***links,
107 AVFilterLink **newlinks;
108 AVFilterPad *newpads;
111 idx = FFMIN(idx, *count);
113 newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
114 newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
119 if (!newpads || !newlinks)
120 return AVERROR(ENOMEM);
122 memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
123 memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
124 memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
125 (*links)[idx] = NULL;
128 for (i = idx + 1; i < *count; i++)
130 (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
135 int avfilter_link(AVFilterContext *src, unsigned srcpad,
136 AVFilterContext *dst, unsigned dstpad)
140 av_assert0(src->graph);
141 av_assert0(dst->graph);
142 av_assert0(src->graph == dst->graph);
144 if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
145 src->outputs[srcpad] || dst->inputs[dstpad])
146 return AVERROR(EINVAL);
148 if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
149 av_log(src, AV_LOG_ERROR,
150 "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
151 src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
152 dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
153 return AVERROR(EINVAL);
156 link = av_mallocz(sizeof(*link));
158 return AVERROR(ENOMEM);
160 src->outputs[srcpad] = dst->inputs[dstpad] = link;
164 link->srcpad = &src->output_pads[srcpad];
165 link->dstpad = &dst->input_pads[dstpad];
166 link->type = src->output_pads[srcpad].type;
167 av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
169 ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
174 void avfilter_link_free(AVFilterLink **link)
179 av_frame_free(&(*link)->partial_buf);
180 ff_framequeue_free(&(*link)->fifo);
181 ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
186 int avfilter_link_get_channels(AVFilterLink *link)
188 return link->channels;
191 void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
193 filter->ready = FFMAX(filter->ready, priority);
197 * Clear frame_blocked_in on all outputs.
198 * This is necessary whenever something changes on input.
200 static void filter_unblock(AVFilterContext *filter)
204 for (i = 0; i < filter->nb_outputs; i++)
205 filter->outputs[i]->frame_blocked_in = 0;
209 void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
211 if (link->status_in == status)
213 av_assert0(!link->status_in);
214 link->status_in = status;
215 link->status_in_pts = pts;
216 link->frame_wanted_out = 0;
217 link->frame_blocked_in = 0;
218 filter_unblock(link->dst);
219 ff_filter_set_ready(link->dst, 200);
222 void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
224 av_assert0(!link->frame_wanted_out);
225 av_assert0(!link->status_out);
226 link->status_out = status;
227 if (pts != AV_NOPTS_VALUE)
228 ff_update_link_current_pts(link, pts);
229 filter_unblock(link->dst);
230 ff_filter_set_ready(link->src, 200);
233 void avfilter_link_set_closed(AVFilterLink *link, int closed)
235 ff_avfilter_link_set_out_status(link, closed ? AVERROR_EOF : 0, AV_NOPTS_VALUE);
238 int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
239 unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
242 unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
244 av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
245 "between the filter '%s' and the filter '%s'\n",
246 filt->name, link->src->name, link->dst->name);
248 link->dst->inputs[dstpad_idx] = NULL;
249 if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
250 /* failed to link output filter to new filter */
251 link->dst->inputs[dstpad_idx] = link;
255 /* re-hookup the link to the new destination filter we inserted */
257 link->dstpad = &filt->input_pads[filt_srcpad_idx];
258 filt->inputs[filt_srcpad_idx] = link;
260 /* if any information on supported media formats already exists on the
261 * link, we need to preserve that */
262 if (link->out_formats)
263 ff_formats_changeref(&link->out_formats,
264 &filt->outputs[filt_dstpad_idx]->out_formats);
265 if (link->out_samplerates)
266 ff_formats_changeref(&link->out_samplerates,
267 &filt->outputs[filt_dstpad_idx]->out_samplerates);
268 if (link->out_channel_layouts)
269 ff_channel_layouts_changeref(&link->out_channel_layouts,
270 &filt->outputs[filt_dstpad_idx]->out_channel_layouts);
275 int avfilter_config_links(AVFilterContext *filter)
277 int (*config_link)(AVFilterLink *);
281 for (i = 0; i < filter->nb_inputs; i ++) {
282 AVFilterLink *link = filter->inputs[i];
283 AVFilterLink *inlink;
286 if (!link->src || !link->dst) {
287 av_log(filter, AV_LOG_ERROR,
288 "Not all input and output are properly linked (%d).\n", i);
289 return AVERROR(EINVAL);
292 inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
294 link->current_pts_us = AV_NOPTS_VALUE;
296 switch (link->init_state) {
299 case AVLINK_STARTINIT:
300 av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
303 link->init_state = AVLINK_STARTINIT;
305 if ((ret = avfilter_config_links(link->src)) < 0)
308 if (!(config_link = link->srcpad->config_props)) {
309 if (link->src->nb_inputs != 1) {
310 av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
311 "with more than one input "
312 "must set config_props() "
313 "callbacks on all outputs\n");
314 return AVERROR(EINVAL);
316 } else if ((ret = config_link(link)) < 0) {
317 av_log(link->src, AV_LOG_ERROR,
318 "Failed to configure output pad on %s\n",
323 switch (link->type) {
324 case AVMEDIA_TYPE_VIDEO:
325 if (!link->time_base.num && !link->time_base.den)
326 link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
328 if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
329 link->sample_aspect_ratio = inlink ?
330 inlink->sample_aspect_ratio : (AVRational){1,1};
333 if (!link->frame_rate.num && !link->frame_rate.den)
334 link->frame_rate = inlink->frame_rate;
339 } else if (!link->w || !link->h) {
340 av_log(link->src, AV_LOG_ERROR,
341 "Video source filters must set their output link's "
342 "width and height\n");
343 return AVERROR(EINVAL);
347 case AVMEDIA_TYPE_AUDIO:
349 if (!link->time_base.num && !link->time_base.den)
350 link->time_base = inlink->time_base;
353 if (!link->time_base.num && !link->time_base.den)
354 link->time_base = (AVRational) {1, link->sample_rate};
357 if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
358 !link->hw_frames_ctx) {
359 AVHWFramesContext *input_ctx = (AVHWFramesContext*)link->src->inputs[0]->hw_frames_ctx->data;
361 if (input_ctx->format == link->format) {
362 link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
363 if (!link->hw_frames_ctx)
364 return AVERROR(ENOMEM);
368 if ((config_link = link->dstpad->config_props))
369 if ((ret = config_link(link)) < 0) {
370 av_log(link->dst, AV_LOG_ERROR,
371 "Failed to configure input pad on %s\n",
376 link->init_state = AVLINK_INIT;
383 void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
385 if (link->type == AVMEDIA_TYPE_VIDEO) {
387 "link[%p s:%dx%d fmt:%s %s->%s]%s",
388 link, link->w, link->h,
389 av_get_pix_fmt_name(link->format),
390 link->src ? link->src->filter->name : "",
391 link->dst ? link->dst->filter->name : "",
395 av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
398 "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
399 link, (int)link->sample_rate, buf,
400 av_get_sample_fmt_name(link->format),
401 link->src ? link->src->filter->name : "",
402 link->dst ? link->dst->filter->name : "",
407 int ff_request_frame(AVFilterLink *link)
409 FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
411 if (link->status_out)
412 return link->status_out;
413 if (link->status_in) {
414 if (ff_framequeue_queued_frames(&link->fifo)) {
415 av_assert1(!link->frame_wanted_out);
416 av_assert1(link->dst->ready >= 300);
419 /* Acknowledge status change. Filters using ff_request_frame() will
420 handle the change automatically. Filters can also check the
421 status directly but none do yet. */
422 ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
423 return link->status_out;
426 link->frame_wanted_out = 1;
427 ff_filter_set_ready(link->src, 100);
431 int ff_request_frame_to_filter(AVFilterLink *link)
435 FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
436 /* Assume the filter is blocked, let the method clear it if not */
437 link->frame_blocked_in = 1;
438 if (link->srcpad->request_frame)
439 ret = link->srcpad->request_frame(link);
440 else if (link->src->inputs[0])
441 ret = ff_request_frame(link->src->inputs[0]);
443 if (ret != AVERROR(EAGAIN) && ret != link->status_in)
444 ff_avfilter_link_set_in_status(link, ret, AV_NOPTS_VALUE);
445 if (ret == AVERROR_EOF)
451 int ff_poll_frame(AVFilterLink *link)
453 int i, min = INT_MAX;
455 if (link->srcpad->poll_frame)
456 return link->srcpad->poll_frame(link);
458 for (i = 0; i < link->src->nb_inputs; i++) {
460 if (!link->src->inputs[i])
461 return AVERROR(EINVAL);
462 val = ff_poll_frame(link->src->inputs[i]);
463 min = FFMIN(min, val);
469 static const char *const var_names[] = {
487 static int set_enable_expr(AVFilterContext *ctx, const char *expr)
491 AVExpr *old = ctx->enable;
493 if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
494 av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
495 "with filter '%s'\n", ctx->filter->name);
496 return AVERROR_PATCHWELCOME;
499 expr_dup = av_strdup(expr);
501 return AVERROR(ENOMEM);
503 if (!ctx->var_values) {
504 ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
505 if (!ctx->var_values) {
507 return AVERROR(ENOMEM);
511 ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
512 NULL, NULL, NULL, NULL, 0, ctx->priv);
514 av_log(ctx->priv, AV_LOG_ERROR,
515 "Error when evaluating the expression '%s' for enable\n",
522 av_free(ctx->enable_str);
523 ctx->enable_str = expr_dup;
527 void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
529 if (pts == AV_NOPTS_VALUE)
531 link->current_pts = pts;
532 link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
533 /* TODO use duration */
534 if (link->graph && link->age_index >= 0)
535 ff_avfilter_graph_update_heap(link->graph, link);
538 int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
540 if(!strcmp(cmd, "ping")){
541 char local_res[256] = {0};
545 res_len = sizeof(local_res);
547 av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
548 if (res == local_res)
549 av_log(filter, AV_LOG_INFO, "%s", res);
551 }else if(!strcmp(cmd, "enable")) {
552 return set_enable_expr(filter, arg);
553 }else if(filter->filter->process_command) {
554 return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
556 return AVERROR(ENOSYS);
559 static AVFilter *first_filter;
560 static AVFilter **last_filter = &first_filter;
562 #if !FF_API_NOCONST_GET_NAME
565 AVFilter *avfilter_get_by_name(const char *name)
567 const AVFilter *f = NULL;
572 while ((f = avfilter_next(f)))
573 if (!strcmp(f->name, name))
574 return (AVFilter *)f;
579 int avfilter_register(AVFilter *filter)
581 AVFilter **f = last_filter;
583 /* the filter must select generic or internal exclusively */
584 av_assert0((filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE) != AVFILTER_FLAG_SUPPORT_TIMELINE);
588 while(*f || avpriv_atomic_ptr_cas((void * volatile *)f, NULL, filter))
590 last_filter = &filter->next;
595 const AVFilter *avfilter_next(const AVFilter *prev)
597 return prev ? prev->next : first_filter;
600 #if FF_API_OLD_FILTER_REGISTER
601 AVFilter **av_filter_next(AVFilter **filter)
603 return filter ? &(*filter)->next : &first_filter;
606 void avfilter_uninit(void)
611 int avfilter_pad_count(const AVFilterPad *pads)
618 for (count = 0; pads->name; count++)
623 static const char *default_filter_name(void *filter_ctx)
625 AVFilterContext *ctx = filter_ctx;
626 return ctx->name ? ctx->name : ctx->filter->name;
629 static void *filter_child_next(void *obj, void *prev)
631 AVFilterContext *ctx = obj;
632 if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
637 static const AVClass *filter_child_class_next(const AVClass *prev)
639 const AVFilter *f = NULL;
641 /* find the filter that corresponds to prev */
642 while (prev && (f = avfilter_next(f)))
643 if (f->priv_class == prev)
646 /* could not find filter corresponding to prev */
650 /* find next filter with specific options */
651 while ((f = avfilter_next(f)))
653 return f->priv_class;
658 #define OFFSET(x) offsetof(AVFilterContext, x)
659 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
660 static const AVOption avfilter_options[] = {
661 { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
662 { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
663 { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .unit = "thread_type" },
664 { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
665 { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
666 { .i64 = 0 }, 0, INT_MAX, FLAGS },
670 static const AVClass avfilter_class = {
671 .class_name = "AVFilter",
672 .item_name = default_filter_name,
673 .version = LIBAVUTIL_VERSION_INT,
674 .category = AV_CLASS_CATEGORY_FILTER,
675 .child_next = filter_child_next,
676 .child_class_next = filter_child_class_next,
677 .option = avfilter_options,
680 static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
681 int *ret, int nb_jobs)
685 for (i = 0; i < nb_jobs; i++) {
686 int r = func(ctx, arg, i, nb_jobs);
693 AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
695 AVFilterContext *ret;
700 ret = av_mallocz(sizeof(AVFilterContext));
704 ret->av_class = &avfilter_class;
705 ret->filter = filter;
706 ret->name = inst_name ? av_strdup(inst_name) : NULL;
707 if (filter->priv_size) {
708 ret->priv = av_mallocz(filter->priv_size);
713 av_opt_set_defaults(ret);
714 if (filter->priv_class) {
715 *(const AVClass**)ret->priv = filter->priv_class;
716 av_opt_set_defaults(ret->priv);
719 ret->internal = av_mallocz(sizeof(*ret->internal));
722 ret->internal->execute = default_execute;
724 ret->nb_inputs = avfilter_pad_count(filter->inputs);
725 if (ret->nb_inputs ) {
726 ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
727 if (!ret->input_pads)
729 memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
730 ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
735 ret->nb_outputs = avfilter_pad_count(filter->outputs);
736 if (ret->nb_outputs) {
737 ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
738 if (!ret->output_pads)
740 memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
741 ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
749 av_freep(&ret->inputs);
750 av_freep(&ret->input_pads);
752 av_freep(&ret->outputs);
753 av_freep(&ret->output_pads);
755 av_freep(&ret->priv);
756 av_freep(&ret->internal);
761 #if FF_API_AVFILTER_OPEN
762 int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name)
764 *filter_ctx = ff_filter_alloc(filter, inst_name);
765 return *filter_ctx ? 0 : AVERROR(ENOMEM);
769 static void free_link(AVFilterLink *link)
775 link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
777 link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
779 av_buffer_unref(&link->hw_frames_ctx);
781 ff_formats_unref(&link->in_formats);
782 ff_formats_unref(&link->out_formats);
783 ff_formats_unref(&link->in_samplerates);
784 ff_formats_unref(&link->out_samplerates);
785 ff_channel_layouts_unref(&link->in_channel_layouts);
786 ff_channel_layouts_unref(&link->out_channel_layouts);
787 avfilter_link_free(&link);
790 void avfilter_free(AVFilterContext *filter)
798 ff_filter_graph_remove_filter(filter->graph, filter);
800 if (filter->filter->uninit)
801 filter->filter->uninit(filter);
803 for (i = 0; i < filter->nb_inputs; i++) {
804 free_link(filter->inputs[i]);
806 for (i = 0; i < filter->nb_outputs; i++) {
807 free_link(filter->outputs[i]);
810 if (filter->filter->priv_class)
811 av_opt_free(filter->priv);
813 av_buffer_unref(&filter->hw_device_ctx);
815 av_freep(&filter->name);
816 av_freep(&filter->input_pads);
817 av_freep(&filter->output_pads);
818 av_freep(&filter->inputs);
819 av_freep(&filter->outputs);
820 av_freep(&filter->priv);
821 while(filter->command_queue){
822 ff_command_queue_pop(filter);
825 av_expr_free(filter->enable);
826 filter->enable = NULL;
827 av_freep(&filter->var_values);
828 av_freep(&filter->internal);
832 int ff_filter_get_nb_threads(AVFilterContext *ctx)
834 if (ctx->nb_threads > 0)
835 return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
836 return ctx->graph->nb_threads;
839 static int process_options(AVFilterContext *ctx, AVDictionary **options,
842 const AVOption *o = NULL;
844 char *av_uninit(parsed_key), *av_uninit(value);
852 const char *shorthand = NULL;
854 o = av_opt_next(ctx->priv, o);
856 if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
862 ret = av_opt_get_key_value(&args, "=", ":",
863 shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
864 &parsed_key, &value);
866 if (ret == AVERROR(EINVAL))
867 av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
869 av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
877 while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
882 av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
884 if (av_opt_find(ctx, key, NULL, 0, 0)) {
885 ret = av_opt_set(ctx, key, value, 0);
892 av_dict_set(options, key, value, 0);
893 if ((ret = av_opt_set(ctx->priv, key, value, 0)) < 0) {
894 if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
895 if (ret == AVERROR_OPTION_NOT_FOUND)
896 av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
909 if (ctx->enable_str) {
910 ret = set_enable_expr(ctx, ctx->enable_str);
917 #if FF_API_AVFILTER_INIT_FILTER
918 int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque)
920 return avfilter_init_str(filter, args);
924 int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
928 ret = av_opt_set_dict(ctx, options);
930 av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
934 if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
935 ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
936 ctx->graph->internal->thread_execute) {
937 ctx->thread_type = AVFILTER_THREAD_SLICE;
938 ctx->internal->execute = ctx->graph->internal->thread_execute;
940 ctx->thread_type = 0;
943 if (ctx->filter->priv_class) {
944 ret = av_opt_set_dict(ctx->priv, options);
946 av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
951 if (ctx->filter->init_opaque)
952 ret = ctx->filter->init_opaque(ctx, NULL);
953 else if (ctx->filter->init)
954 ret = ctx->filter->init(ctx);
955 else if (ctx->filter->init_dict)
956 ret = ctx->filter->init_dict(ctx, options);
961 int avfilter_init_str(AVFilterContext *filter, const char *args)
963 AVDictionary *options = NULL;
964 AVDictionaryEntry *e;
968 if (!filter->filter->priv_class) {
969 av_log(filter, AV_LOG_ERROR, "This filter does not take any "
970 "options, but options were provided: %s.\n", args);
971 return AVERROR(EINVAL);
974 #if FF_API_OLD_FILTER_OPTS || FF_API_OLD_FILTER_OPTS_ERROR
975 if ( !strcmp(filter->filter->name, "format") ||
976 !strcmp(filter->filter->name, "noformat") ||
977 !strcmp(filter->filter->name, "frei0r") ||
978 !strcmp(filter->filter->name, "frei0r_src") ||
979 !strcmp(filter->filter->name, "ocv") ||
980 !strcmp(filter->filter->name, "pan") ||
981 !strcmp(filter->filter->name, "pp") ||
982 !strcmp(filter->filter->name, "aevalsrc")) {
983 /* a hack for compatibility with the old syntax
984 * replace colons with |s */
985 char *copy = av_strdup(args);
987 int nb_leading = 0; // number of leading colons to skip
991 ret = AVERROR(ENOMEM);
995 if (!strcmp(filter->filter->name, "frei0r") ||
996 !strcmp(filter->filter->name, "ocv"))
998 else if (!strcmp(filter->filter->name, "frei0r_src"))
1001 while (nb_leading--) {
1004 p = copy + strlen(copy);
1010 deprecated = strchr(p, ':') != NULL;
1012 if (!strcmp(filter->filter->name, "aevalsrc")) {
1014 while ((p = strchr(p, ':')) && p[1] != ':') {
1015 const char *epos = strchr(p + 1, '=');
1016 const char *spos = strchr(p + 1, ':');
1017 const int next_token_is_opt = epos && (!spos || epos < spos);
1018 if (next_token_is_opt) {
1022 /* next token does not contain a '=', assume a channel expression */
1026 if (p && *p == ':') { // double sep '::' found
1028 memmove(p, p + 1, strlen(p));
1031 while ((p = strchr(p, ':')))
1034 #if FF_API_OLD_FILTER_OPTS
1036 av_log(filter, AV_LOG_WARNING, "This syntax is deprecated. Use "
1037 "'|' to separate the list items.\n");
1039 av_log(filter, AV_LOG_DEBUG, "compat: called with args=[%s]\n", copy);
1040 ret = process_options(filter, &options, copy);
1043 av_log(filter, AV_LOG_ERROR, "This syntax is deprecated. Use "
1044 "'|' to separate the list items ('%s' instead of '%s')\n",
1046 ret = AVERROR(EINVAL);
1048 ret = process_options(filter, &options, copy);
1058 ret = process_options(filter, &options, args);
1064 ret = avfilter_init_dict(filter, &options);
1068 if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1069 av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
1070 ret = AVERROR_OPTION_NOT_FOUND;
1075 av_dict_free(&options);
1080 const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
1082 return pads[pad_idx].name;
1085 enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
1087 return pads[pad_idx].type;
1090 static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
1092 return ff_filter_frame(link->dst->outputs[0], frame);
1095 static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
1097 int (*filter_frame)(AVFilterLink *, AVFrame *);
1098 AVFilterContext *dstctx = link->dst;
1099 AVFilterPad *dst = link->dstpad;
1102 if (!(filter_frame = dst->filter_frame))
1103 filter_frame = default_filter_frame;
1105 if (dst->needs_writable) {
1106 ret = ff_inlink_make_frame_writable(link, &frame);
1111 ff_inlink_process_commands(link, frame);
1112 dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1115 if (dstctx->is_disabled &&
1116 (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
1117 filter_frame = default_filter_frame;
1118 ret = filter_frame(link, frame);
1119 link->frame_count_out++;
1120 ff_update_link_current_pts(link, frame->pts);
1124 av_frame_free(&frame);
1128 int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
1131 FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
1133 /* Consistency checks */
1134 if (link->type == AVMEDIA_TYPE_VIDEO) {
1135 if (strcmp(link->dst->filter->name, "buffersink") &&
1136 strcmp(link->dst->filter->name, "format") &&
1137 strcmp(link->dst->filter->name, "idet") &&
1138 strcmp(link->dst->filter->name, "null") &&
1139 strcmp(link->dst->filter->name, "scale")) {
1140 av_assert1(frame->format == link->format);
1141 av_assert1(frame->width == link->w);
1142 av_assert1(frame->height == link->h);
1145 if (frame->format != link->format) {
1146 av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
1149 if (av_frame_get_channels(frame) != link->channels) {
1150 av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
1153 if (frame->channel_layout != link->channel_layout) {
1154 av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
1157 if (frame->sample_rate != link->sample_rate) {
1158 av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
1163 link->frame_blocked_in = link->frame_wanted_out = 0;
1164 link->frame_count_in++;
1165 filter_unblock(link->dst);
1166 ret = ff_framequeue_add(&link->fifo, frame);
1168 av_frame_free(&frame);
1171 ff_filter_set_ready(link->dst, 300);
1175 av_frame_free(&frame);
1176 return AVERROR_PATCHWELCOME;
1179 static int samples_ready(AVFilterLink *link, unsigned min)
1181 return ff_framequeue_queued_frames(&link->fifo) &&
1182 (ff_framequeue_queued_samples(&link->fifo) >= min ||
1186 static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
1189 AVFrame *frame0, *frame, *buf;
1190 unsigned nb_samples, nb_frames, i, p;
1193 /* Note: this function relies on no format changes and must only be
1194 called with enough samples. */
1195 av_assert1(samples_ready(link, link->min_samples));
1196 frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
1197 if (frame->nb_samples >= min && frame->nb_samples < max) {
1198 *rframe = ff_framequeue_take(&link->fifo);
1204 if (nb_samples + frame->nb_samples > max) {
1205 if (nb_samples < min)
1209 nb_samples += frame->nb_samples;
1211 if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
1213 frame = ff_framequeue_peek(&link->fifo, nb_frames);
1216 buf = ff_get_audio_buffer(link, nb_samples);
1218 return AVERROR(ENOMEM);
1219 ret = av_frame_copy_props(buf, frame0);
1221 av_frame_free(&buf);
1224 buf->pts = frame0->pts;
1227 for (i = 0; i < nb_frames; i++) {
1228 frame = ff_framequeue_take(&link->fifo);
1229 av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
1230 frame->nb_samples, link->channels, link->format);
1231 p += frame->nb_samples;
1232 av_frame_free(&frame);
1234 if (p < nb_samples) {
1235 unsigned n = nb_samples - p;
1236 frame = ff_framequeue_peek(&link->fifo, 0);
1237 av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
1238 link->channels, link->format);
1239 frame->nb_samples -= n;
1240 av_samples_copy(frame->extended_data, frame->extended_data, 0, n,
1241 frame->nb_samples, link->channels, link->format);
1242 if (frame->pts != AV_NOPTS_VALUE)
1243 frame->pts += av_rescale_q(n, av_make_q(1, link->sample_rate), link->time_base);
1244 ff_framequeue_update_peeked(&link->fifo, 0);
1245 ff_framequeue_skip_samples(&link->fifo, n);
1252 int ff_filter_frame_to_filter(AVFilterLink *link)
1254 AVFrame *frame = NULL;
1255 AVFilterContext *dst = link->dst;
1258 av_assert1(ff_framequeue_queued_frames(&link->fifo));
1259 ret = link->min_samples ?
1260 ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
1261 ff_inlink_consume_frame(link, &frame);
1267 /* The filter will soon have received a new frame, that may allow it to
1268 produce one or more: unblock its outputs. */
1269 filter_unblock(dst);
1270 /* AVFilterPad.filter_frame() expect frame_count_out to have the value
1271 before the frame; ff_filter_frame_framed() will re-increment it. */
1272 link->frame_count_out--;
1273 ret = ff_filter_frame_framed(link, frame);
1274 if (ret < 0 && ret != link->status_out) {
1275 ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
1277 /* Run once again, to see if several frames were available, or if
1278 the input status has also changed, or any other reason. */
1279 ff_filter_set_ready(dst, 300);
1284 static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
1286 unsigned out = 0, progress = 0;
1289 av_assert0(!in->status_out);
1290 if (!filter->nb_outputs) {
1291 /* not necessary with the current API and sinks */
1294 while (!in->status_out) {
1295 if (!filter->outputs[out]->status_in) {
1297 ret = ff_request_frame_to_filter(filter->outputs[out]);
1301 if (++out == filter->nb_outputs) {
1303 /* Every output already closed: input no longer interesting
1304 (example: overlay in shortest mode, other input closed). */
1305 ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
1312 ff_filter_set_ready(filter, 200);
1316 #define FFERROR_NOT_READY FFERRTAG('N','R','D','Y')
1318 static int ff_filter_activate_default(AVFilterContext *filter)
1322 for (i = 0; i < filter->nb_inputs; i++) {
1323 if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) {
1324 return ff_filter_frame_to_filter(filter->inputs[i]);
1327 for (i = 0; i < filter->nb_inputs; i++) {
1328 if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
1329 av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
1330 return forward_status_change(filter, filter->inputs[i]);
1333 for (i = 0; i < filter->nb_outputs; i++) {
1334 if (filter->outputs[i]->frame_wanted_out &&
1335 !filter->outputs[i]->frame_blocked_in) {
1336 return ff_request_frame_to_filter(filter->outputs[i]);
1339 return FFERROR_NOT_READY;
1343 Filter scheduling and activation
1345 When a filter is activated, it must:
1346 - if possible, output a frame;
1347 - else, if relevant, forward the input status change;
1348 - else, check outputs for wanted frames and forward the requests.
1350 The following AVFilterLink fields are used for activation:
1354 This field indicates if a frame is needed on this input of the
1355 destination filter. A positive value indicates that a frame is needed
1356 to process queued frames or internal data or to satisfy the
1357 application; a zero value indicates that a frame is not especially
1358 needed but could be processed anyway; a negative value indicates that a
1359 frame would just be queued.
1361 It is set by filters using ff_request_frame() or ff_request_no_frame(),
1362 when requested by the application through a specific API or when it is
1363 set on one of the outputs.
1365 It is cleared when a frame is sent from the source using
1368 It is also cleared when a status change is sent from the source using
1369 ff_avfilter_link_set_in_status().
1373 This field means that the source filter can not generate a frame as is.
1374 Its goal is to avoid repeatedly calling the request_frame() method on
1377 It is set by the framework on all outputs of a filter before activating it.
1379 It is automatically cleared by ff_filter_frame().
1381 It is also automatically cleared by ff_avfilter_link_set_in_status().
1383 It is also cleared on all outputs (using filter_unblock()) when
1384 something happens on an input: processing a frame or changing the
1389 Contains the frames queued on a filter input. If it contains frames and
1390 frame_wanted_out is not set, then the filter can be activated. If that
1391 result in the filter not able to use these frames, the filter must set
1392 frame_wanted_out to ask for more frames.
1394 - status_in and status_in_pts:
1396 Status (EOF or error code) of the link and timestamp of the status
1397 change (in link time base, same as frames) as seen from the input of
1398 the link. The status change is considered happening after the frames
1401 It is set by the source filter using ff_avfilter_link_set_in_status().
1405 Status of the link as seen from the output of the link. The status
1406 change is considered having already happened.
1408 It is set by the destination filter using
1409 ff_avfilter_link_set_out_status().
1411 Filters are activated according to the ready field, set using the
1412 ff_filter_set_ready(). Eventually, a priority queue will be used.
1413 ff_filter_set_ready() is called whenever anything could cause progress to
1414 be possible. Marking a filter ready when it is not is not a problem,
1415 except for the small overhead it causes.
1417 Conditions that cause a filter to be marked ready are:
1419 - frames added on an input link;
1421 - changes in the input or output status of an input link;
1423 - requests for a frame on an output link;
1425 - after any actual processing using the legacy methods (filter_frame(),
1426 and request_frame() to acknowledge status changes), to run once more
1427 and check if enough input was present for several frames.
1429 Exemples of scenarios to consider:
1431 - buffersrc: activate if frame_wanted_out to notify the application;
1432 activate when the application adds a frame to push it immediately.
1434 - testsrc: activate only if frame_wanted_out to produce and push a frame.
1436 - concat (not at stitch points): can process a frame on any output.
1437 Activate if frame_wanted_out on output to forward on the corresponding
1438 input. Activate when a frame is present on input to process it
1441 - framesync: needs at least one frame on each input; extra frames on the
1442 wrong input will accumulate. When a frame is first added on one input,
1443 set frame_wanted_out<0 on it to avoid getting more (would trigger
1444 testsrc) and frame_wanted_out>0 on the other to allow processing it.
1446 Activation of old filters:
1448 In order to activate a filter implementing the legacy filter_frame() and
1449 request_frame() methods, perform the first possible of the following
1452 - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
1453 frame and call filter_frame().
1455 Ratinale: filter frames as soon as possible instead of leaving them
1456 queued; frame_wanted_out < 0 is not possible since the old API does not
1457 set it nor provides any similar feedback; frame_wanted_out > 0 happens
1458 when min_samples > 0 and there are not enough samples queued.
1460 - If an input has status_in set but not status_out, try to call
1461 request_frame() on one of the outputs in the hope that it will trigger
1462 request_frame() on the input with status_in and acknowledge it. This is
1463 awkward and fragile, filters with several inputs or outputs should be
1464 updated to direct activation as soon as possible.
1466 - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
1469 Rationale: checking frame_blocked_in is necessary to avoid requesting
1470 repeatedly on a blocked input if another is not blocked (example:
1471 [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
1473 TODO: respect needs_fifo and remove auto-inserted fifos.
1477 int ff_filter_activate(AVFilterContext *filter)
1481 /* Generic timeline support is not yet implemented but should be easy */
1482 av_assert1(!(filter->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC &&
1483 filter->filter->activate));
1485 ret = filter->filter->activate ? filter->filter->activate(filter) :
1486 ff_filter_activate_default(filter);
1487 if (ret == FFERROR_NOT_READY)
1492 int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
1494 *rpts = link->current_pts;
1495 if (ff_framequeue_queued_frames(&link->fifo))
1496 return *rstatus = 0;
1497 if (link->status_out)
1498 return *rstatus = link->status_out;
1499 if (!link->status_in)
1500 return *rstatus = 0;
1501 *rstatus = link->status_out = link->status_in;
1502 ff_update_link_current_pts(link, link->status_in_pts);
1503 *rpts = link->current_pts;
1507 int ff_inlink_check_available_frame(AVFilterLink *link)
1509 return ff_framequeue_queued_frames(&link->fifo) > 0;
1512 int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
1514 uint64_t samples = ff_framequeue_queued_samples(&link->fifo);
1516 return samples >= min || (link->status_in && samples);
1519 static void consume_update(AVFilterLink *link, const AVFrame *frame)
1521 ff_inlink_process_commands(link, frame);
1522 link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1523 link->frame_count_out++;
1526 int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
1531 if (!ff_inlink_check_available_frame(link))
1533 frame = ff_framequeue_take(&link->fifo);
1534 consume_update(link, frame);
1539 int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max,
1547 if (!ff_inlink_check_available_samples(link, min))
1549 if (link->status_in)
1550 min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
1551 ret = take_samples(link, min, link->max_samples, &frame);
1554 consume_update(link, frame);
1559 int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
1561 AVFrame *frame = *rframe;
1565 if (av_frame_is_writable(frame))
1567 av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
1569 switch (link->type) {
1570 case AVMEDIA_TYPE_VIDEO:
1571 out = ff_get_video_buffer(link, link->w, link->h);
1573 case AVMEDIA_TYPE_AUDIO:
1574 out = ff_get_audio_buffer(link, frame->nb_samples);
1577 return AVERROR(EINVAL);
1580 return AVERROR(ENOMEM);
1582 ret = av_frame_copy_props(out, frame);
1584 av_frame_free(&out);
1588 switch (link->type) {
1589 case AVMEDIA_TYPE_VIDEO:
1590 av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
1591 frame->format, frame->width, frame->height);
1593 case AVMEDIA_TYPE_AUDIO:
1594 av_samples_copy(out->extended_data, frame->extended_data,
1595 0, 0, frame->nb_samples,
1596 av_frame_get_channels(frame),
1600 av_assert0(!"reached");
1603 av_frame_free(&frame);
1608 int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
1610 AVFilterCommand *cmd = link->dst->command_queue;
1612 while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
1613 av_log(link->dst, AV_LOG_DEBUG,
1614 "Processing command time:%f command:%s arg:%s\n",
1615 cmd->time, cmd->command, cmd->arg);
1616 avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
1617 ff_command_queue_pop(link->dst);
1618 cmd= link->dst->command_queue;
1623 int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
1625 AVFilterContext *dstctx = link->dst;
1626 int64_t pts = frame->pts;
1627 int64_t pos = av_frame_get_pkt_pos(frame);
1629 if (!dstctx->enable_str)
1632 dstctx->var_values[VAR_N] = link->frame_count_out;
1633 dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
1634 dstctx->var_values[VAR_W] = link->w;
1635 dstctx->var_values[VAR_H] = link->h;
1636 dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1638 return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5;
1641 const AVClass *avfilter_get_class(void)
1643 return &avfilter_class;