3 * Copyright (c) 2007 Bobby Bingham
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/atomic.h"
23 #include "libavutil/avassert.h"
24 #include "libavutil/avstring.h"
25 #include "libavutil/buffer.h"
26 #include "libavutil/channel_layout.h"
27 #include "libavutil/common.h"
28 #include "libavutil/eval.h"
29 #include "libavutil/hwcontext.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/internal.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/pixdesc.h"
34 #include "libavutil/rational.h"
35 #include "libavutil/samplefmt.h"
37 #define FF_INTERNAL_FIELDS 1
38 #include "framequeue.h"
46 #include "libavutil/ffversion.h"
47 const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
49 static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame);
51 void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
53 av_unused char buf[16];
55 "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
56 ref, ref->buf, ref->data[0],
57 ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
58 ref->pts, av_frame_get_pkt_pos(ref));
61 ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
62 ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
63 ref->width, ref->height,
64 !ref->interlaced_frame ? 'P' : /* Progressive */
65 ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
67 av_get_picture_type_char(ref->pict_type));
69 if (ref->nb_samples) {
70 ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
76 ff_tlog(ctx, "]%s", end ? "\n" : "");
79 unsigned avfilter_version(void)
81 av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
82 return LIBAVFILTER_VERSION_INT;
85 const char *avfilter_configuration(void)
87 return FFMPEG_CONFIGURATION;
90 const char *avfilter_license(void)
92 #define LICENSE_PREFIX "libavfilter license: "
93 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
96 void ff_command_queue_pop(AVFilterContext *filter)
98 AVFilterCommand *c= filter->command_queue;
100 av_freep(&c->command);
101 filter->command_queue= c->next;
105 int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
106 AVFilterPad **pads, AVFilterLink ***links,
109 AVFilterLink **newlinks;
110 AVFilterPad *newpads;
113 idx = FFMIN(idx, *count);
115 newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
116 newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
121 if (!newpads || !newlinks)
122 return AVERROR(ENOMEM);
124 memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
125 memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
126 memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
127 (*links)[idx] = NULL;
130 for (i = idx + 1; i < *count; i++)
132 (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
137 int avfilter_link(AVFilterContext *src, unsigned srcpad,
138 AVFilterContext *dst, unsigned dstpad)
142 av_assert0(src->graph);
143 av_assert0(dst->graph);
144 av_assert0(src->graph == dst->graph);
146 if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
147 src->outputs[srcpad] || dst->inputs[dstpad])
148 return AVERROR(EINVAL);
150 if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
151 av_log(src, AV_LOG_ERROR,
152 "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
153 src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
154 dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
155 return AVERROR(EINVAL);
158 link = av_mallocz(sizeof(*link));
160 return AVERROR(ENOMEM);
162 src->outputs[srcpad] = dst->inputs[dstpad] = link;
166 link->srcpad = &src->output_pads[srcpad];
167 link->dstpad = &dst->input_pads[dstpad];
168 link->type = src->output_pads[srcpad].type;
169 av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
171 ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
176 void avfilter_link_free(AVFilterLink **link)
181 av_frame_free(&(*link)->partial_buf);
182 ff_framequeue_free(&(*link)->fifo);
183 ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
188 int avfilter_link_get_channels(AVFilterLink *link)
190 return link->channels;
193 void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
195 filter->ready = FFMAX(filter->ready, priority);
199 * Clear frame_blocked_in on all outputs.
200 * This is necessary whenever something changes on input.
202 static void filter_unblock(AVFilterContext *filter)
206 for (i = 0; i < filter->nb_outputs; i++)
207 filter->outputs[i]->frame_blocked_in = 0;
211 void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
213 if (link->status_in == status)
215 av_assert0(!link->status_in);
216 link->status_in = status;
217 link->status_in_pts = pts;
218 link->frame_wanted_out = 0;
219 link->frame_blocked_in = 0;
220 filter_unblock(link->dst);
221 ff_filter_set_ready(link->dst, 200);
224 void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
226 av_assert0(!link->frame_wanted_out);
227 av_assert0(!link->status_out);
228 link->status_out = status;
229 if (pts != AV_NOPTS_VALUE)
230 ff_update_link_current_pts(link, pts);
231 filter_unblock(link->dst);
232 ff_filter_set_ready(link->src, 200);
235 void avfilter_link_set_closed(AVFilterLink *link, int closed)
237 ff_avfilter_link_set_out_status(link, closed ? AVERROR_EOF : 0, AV_NOPTS_VALUE);
240 int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
241 unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
244 unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
246 av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
247 "between the filter '%s' and the filter '%s'\n",
248 filt->name, link->src->name, link->dst->name);
250 link->dst->inputs[dstpad_idx] = NULL;
251 if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
252 /* failed to link output filter to new filter */
253 link->dst->inputs[dstpad_idx] = link;
257 /* re-hookup the link to the new destination filter we inserted */
259 link->dstpad = &filt->input_pads[filt_srcpad_idx];
260 filt->inputs[filt_srcpad_idx] = link;
262 /* if any information on supported media formats already exists on the
263 * link, we need to preserve that */
264 if (link->out_formats)
265 ff_formats_changeref(&link->out_formats,
266 &filt->outputs[filt_dstpad_idx]->out_formats);
267 if (link->out_samplerates)
268 ff_formats_changeref(&link->out_samplerates,
269 &filt->outputs[filt_dstpad_idx]->out_samplerates);
270 if (link->out_channel_layouts)
271 ff_channel_layouts_changeref(&link->out_channel_layouts,
272 &filt->outputs[filt_dstpad_idx]->out_channel_layouts);
277 int avfilter_config_links(AVFilterContext *filter)
279 int (*config_link)(AVFilterLink *);
283 for (i = 0; i < filter->nb_inputs; i ++) {
284 AVFilterLink *link = filter->inputs[i];
285 AVFilterLink *inlink;
288 if (!link->src || !link->dst) {
289 av_log(filter, AV_LOG_ERROR,
290 "Not all input and output are properly linked (%d).\n", i);
291 return AVERROR(EINVAL);
294 inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
296 link->current_pts_us = AV_NOPTS_VALUE;
298 switch (link->init_state) {
301 case AVLINK_STARTINIT:
302 av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
305 link->init_state = AVLINK_STARTINIT;
307 if ((ret = avfilter_config_links(link->src)) < 0)
310 if (!(config_link = link->srcpad->config_props)) {
311 if (link->src->nb_inputs != 1) {
312 av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
313 "with more than one input "
314 "must set config_props() "
315 "callbacks on all outputs\n");
316 return AVERROR(EINVAL);
318 } else if ((ret = config_link(link)) < 0) {
319 av_log(link->src, AV_LOG_ERROR,
320 "Failed to configure output pad on %s\n",
325 switch (link->type) {
326 case AVMEDIA_TYPE_VIDEO:
327 if (!link->time_base.num && !link->time_base.den)
328 link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
330 if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
331 link->sample_aspect_ratio = inlink ?
332 inlink->sample_aspect_ratio : (AVRational){1,1};
335 if (!link->frame_rate.num && !link->frame_rate.den)
336 link->frame_rate = inlink->frame_rate;
341 } else if (!link->w || !link->h) {
342 av_log(link->src, AV_LOG_ERROR,
343 "Video source filters must set their output link's "
344 "width and height\n");
345 return AVERROR(EINVAL);
349 case AVMEDIA_TYPE_AUDIO:
351 if (!link->time_base.num && !link->time_base.den)
352 link->time_base = inlink->time_base;
355 if (!link->time_base.num && !link->time_base.den)
356 link->time_base = (AVRational) {1, link->sample_rate};
359 if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
360 !link->hw_frames_ctx) {
361 AVHWFramesContext *input_ctx = (AVHWFramesContext*)link->src->inputs[0]->hw_frames_ctx->data;
363 if (input_ctx->format == link->format) {
364 link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
365 if (!link->hw_frames_ctx)
366 return AVERROR(ENOMEM);
370 if ((config_link = link->dstpad->config_props))
371 if ((ret = config_link(link)) < 0) {
372 av_log(link->dst, AV_LOG_ERROR,
373 "Failed to configure input pad on %s\n",
378 link->init_state = AVLINK_INIT;
385 void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
387 if (link->type == AVMEDIA_TYPE_VIDEO) {
389 "link[%p s:%dx%d fmt:%s %s->%s]%s",
390 link, link->w, link->h,
391 av_get_pix_fmt_name(link->format),
392 link->src ? link->src->filter->name : "",
393 link->dst ? link->dst->filter->name : "",
397 av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
400 "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
401 link, (int)link->sample_rate, buf,
402 av_get_sample_fmt_name(link->format),
403 link->src ? link->src->filter->name : "",
404 link->dst ? link->dst->filter->name : "",
409 int ff_request_frame(AVFilterLink *link)
411 FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
413 if (link->status_out)
414 return link->status_out;
415 if (link->status_in) {
416 if (ff_framequeue_queued_frames(&link->fifo)) {
417 av_assert1(!link->frame_wanted_out);
418 av_assert1(link->dst->ready >= 300);
421 /* Acknowledge status change. Filters using ff_request_frame() will
422 handle the change automatically. Filters can also check the
423 status directly but none do yet. */
424 ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
425 return link->status_out;
428 link->frame_wanted_out = 1;
429 ff_filter_set_ready(link->src, 100);
433 int ff_request_frame_to_filter(AVFilterLink *link)
437 FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
438 /* Assume the filter is blocked, let the method clear it if not */
439 link->frame_blocked_in = 1;
440 if (link->srcpad->request_frame)
441 ret = link->srcpad->request_frame(link);
442 else if (link->src->inputs[0])
443 ret = ff_request_frame(link->src->inputs[0]);
445 if (ret != AVERROR(EAGAIN) && ret != link->status_in)
446 ff_avfilter_link_set_in_status(link, ret, AV_NOPTS_VALUE);
447 if (ret == AVERROR_EOF)
453 int ff_poll_frame(AVFilterLink *link)
455 int i, min = INT_MAX;
457 if (link->srcpad->poll_frame)
458 return link->srcpad->poll_frame(link);
460 for (i = 0; i < link->src->nb_inputs; i++) {
462 if (!link->src->inputs[i])
463 return AVERROR(EINVAL);
464 val = ff_poll_frame(link->src->inputs[i]);
465 min = FFMIN(min, val);
471 static const char *const var_names[] = {
489 static int set_enable_expr(AVFilterContext *ctx, const char *expr)
493 AVExpr *old = ctx->enable;
495 if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
496 av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
497 "with filter '%s'\n", ctx->filter->name);
498 return AVERROR_PATCHWELCOME;
501 expr_dup = av_strdup(expr);
503 return AVERROR(ENOMEM);
505 if (!ctx->var_values) {
506 ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
507 if (!ctx->var_values) {
509 return AVERROR(ENOMEM);
513 ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
514 NULL, NULL, NULL, NULL, 0, ctx->priv);
516 av_log(ctx->priv, AV_LOG_ERROR,
517 "Error when evaluating the expression '%s' for enable\n",
524 av_free(ctx->enable_str);
525 ctx->enable_str = expr_dup;
529 void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
531 if (pts == AV_NOPTS_VALUE)
533 link->current_pts = pts;
534 link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
535 /* TODO use duration */
536 if (link->graph && link->age_index >= 0)
537 ff_avfilter_graph_update_heap(link->graph, link);
540 int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
542 if(!strcmp(cmd, "ping")){
543 char local_res[256] = {0};
547 res_len = sizeof(local_res);
549 av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
550 if (res == local_res)
551 av_log(filter, AV_LOG_INFO, "%s", res);
553 }else if(!strcmp(cmd, "enable")) {
554 return set_enable_expr(filter, arg);
555 }else if(filter->filter->process_command) {
556 return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
558 return AVERROR(ENOSYS);
561 static AVFilter *first_filter;
562 static AVFilter **last_filter = &first_filter;
564 #if !FF_API_NOCONST_GET_NAME
567 AVFilter *avfilter_get_by_name(const char *name)
569 const AVFilter *f = NULL;
574 while ((f = avfilter_next(f)))
575 if (!strcmp(f->name, name))
576 return (AVFilter *)f;
581 int avfilter_register(AVFilter *filter)
583 AVFilter **f = last_filter;
585 /* the filter must select generic or internal exclusively */
586 av_assert0((filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE) != AVFILTER_FLAG_SUPPORT_TIMELINE);
590 while(*f || avpriv_atomic_ptr_cas((void * volatile *)f, NULL, filter))
592 last_filter = &filter->next;
597 const AVFilter *avfilter_next(const AVFilter *prev)
599 return prev ? prev->next : first_filter;
602 #if FF_API_OLD_FILTER_REGISTER
603 AVFilter **av_filter_next(AVFilter **filter)
605 return filter ? &(*filter)->next : &first_filter;
608 void avfilter_uninit(void)
613 int avfilter_pad_count(const AVFilterPad *pads)
620 for (count = 0; pads->name; count++)
625 static const char *default_filter_name(void *filter_ctx)
627 AVFilterContext *ctx = filter_ctx;
628 return ctx->name ? ctx->name : ctx->filter->name;
631 static void *filter_child_next(void *obj, void *prev)
633 AVFilterContext *ctx = obj;
634 if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
639 static const AVClass *filter_child_class_next(const AVClass *prev)
641 const AVFilter *f = NULL;
643 /* find the filter that corresponds to prev */
644 while (prev && (f = avfilter_next(f)))
645 if (f->priv_class == prev)
648 /* could not find filter corresponding to prev */
652 /* find next filter with specific options */
653 while ((f = avfilter_next(f)))
655 return f->priv_class;
660 #define OFFSET(x) offsetof(AVFilterContext, x)
661 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
662 static const AVOption avfilter_options[] = {
663 { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
664 { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
665 { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .unit = "thread_type" },
666 { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
667 { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
668 { .i64 = 0 }, 0, INT_MAX, FLAGS },
672 static const AVClass avfilter_class = {
673 .class_name = "AVFilter",
674 .item_name = default_filter_name,
675 .version = LIBAVUTIL_VERSION_INT,
676 .category = AV_CLASS_CATEGORY_FILTER,
677 .child_next = filter_child_next,
678 .child_class_next = filter_child_class_next,
679 .option = avfilter_options,
682 static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
683 int *ret, int nb_jobs)
687 for (i = 0; i < nb_jobs; i++) {
688 int r = func(ctx, arg, i, nb_jobs);
695 AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
697 AVFilterContext *ret;
702 ret = av_mallocz(sizeof(AVFilterContext));
706 ret->av_class = &avfilter_class;
707 ret->filter = filter;
708 ret->name = inst_name ? av_strdup(inst_name) : NULL;
709 if (filter->priv_size) {
710 ret->priv = av_mallocz(filter->priv_size);
715 av_opt_set_defaults(ret);
716 if (filter->priv_class) {
717 *(const AVClass**)ret->priv = filter->priv_class;
718 av_opt_set_defaults(ret->priv);
721 ret->internal = av_mallocz(sizeof(*ret->internal));
724 ret->internal->execute = default_execute;
726 ret->nb_inputs = avfilter_pad_count(filter->inputs);
727 if (ret->nb_inputs ) {
728 ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
729 if (!ret->input_pads)
731 memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
732 ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
737 ret->nb_outputs = avfilter_pad_count(filter->outputs);
738 if (ret->nb_outputs) {
739 ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
740 if (!ret->output_pads)
742 memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
743 ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
751 av_freep(&ret->inputs);
752 av_freep(&ret->input_pads);
754 av_freep(&ret->outputs);
755 av_freep(&ret->output_pads);
757 av_freep(&ret->priv);
758 av_freep(&ret->internal);
763 #if FF_API_AVFILTER_OPEN
764 int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name)
766 *filter_ctx = ff_filter_alloc(filter, inst_name);
767 return *filter_ctx ? 0 : AVERROR(ENOMEM);
771 static void free_link(AVFilterLink *link)
777 link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
779 link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
781 av_buffer_unref(&link->hw_frames_ctx);
783 ff_formats_unref(&link->in_formats);
784 ff_formats_unref(&link->out_formats);
785 ff_formats_unref(&link->in_samplerates);
786 ff_formats_unref(&link->out_samplerates);
787 ff_channel_layouts_unref(&link->in_channel_layouts);
788 ff_channel_layouts_unref(&link->out_channel_layouts);
789 avfilter_link_free(&link);
792 void avfilter_free(AVFilterContext *filter)
800 ff_filter_graph_remove_filter(filter->graph, filter);
802 if (filter->filter->uninit)
803 filter->filter->uninit(filter);
805 for (i = 0; i < filter->nb_inputs; i++) {
806 free_link(filter->inputs[i]);
808 for (i = 0; i < filter->nb_outputs; i++) {
809 free_link(filter->outputs[i]);
812 if (filter->filter->priv_class)
813 av_opt_free(filter->priv);
815 av_buffer_unref(&filter->hw_device_ctx);
817 av_freep(&filter->name);
818 av_freep(&filter->input_pads);
819 av_freep(&filter->output_pads);
820 av_freep(&filter->inputs);
821 av_freep(&filter->outputs);
822 av_freep(&filter->priv);
823 while(filter->command_queue){
824 ff_command_queue_pop(filter);
827 av_expr_free(filter->enable);
828 filter->enable = NULL;
829 av_freep(&filter->var_values);
830 av_freep(&filter->internal);
834 int ff_filter_get_nb_threads(AVFilterContext *ctx)
836 if (ctx->nb_threads > 0)
837 return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
838 return ctx->graph->nb_threads;
841 static int process_options(AVFilterContext *ctx, AVDictionary **options,
844 const AVOption *o = NULL;
846 char *av_uninit(parsed_key), *av_uninit(value);
854 const char *shorthand = NULL;
856 o = av_opt_next(ctx->priv, o);
858 if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
864 ret = av_opt_get_key_value(&args, "=", ":",
865 shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
866 &parsed_key, &value);
868 if (ret == AVERROR(EINVAL))
869 av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
871 av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
879 while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
884 av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
886 if (av_opt_find(ctx, key, NULL, 0, 0)) {
887 ret = av_opt_set(ctx, key, value, 0);
894 av_dict_set(options, key, value, 0);
895 if ((ret = av_opt_set(ctx->priv, key, value, 0)) < 0) {
896 if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
897 if (ret == AVERROR_OPTION_NOT_FOUND)
898 av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
911 if (ctx->enable_str) {
912 ret = set_enable_expr(ctx, ctx->enable_str);
919 #if FF_API_AVFILTER_INIT_FILTER
920 int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque)
922 return avfilter_init_str(filter, args);
926 int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
930 ret = av_opt_set_dict(ctx, options);
932 av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
936 if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
937 ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
938 ctx->graph->internal->thread_execute) {
939 ctx->thread_type = AVFILTER_THREAD_SLICE;
940 ctx->internal->execute = ctx->graph->internal->thread_execute;
942 ctx->thread_type = 0;
945 if (ctx->filter->priv_class) {
946 ret = av_opt_set_dict(ctx->priv, options);
948 av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
953 if (ctx->filter->init_opaque)
954 ret = ctx->filter->init_opaque(ctx, NULL);
955 else if (ctx->filter->init)
956 ret = ctx->filter->init(ctx);
957 else if (ctx->filter->init_dict)
958 ret = ctx->filter->init_dict(ctx, options);
963 int avfilter_init_str(AVFilterContext *filter, const char *args)
965 AVDictionary *options = NULL;
966 AVDictionaryEntry *e;
970 if (!filter->filter->priv_class) {
971 av_log(filter, AV_LOG_ERROR, "This filter does not take any "
972 "options, but options were provided: %s.\n", args);
973 return AVERROR(EINVAL);
976 #if FF_API_OLD_FILTER_OPTS || FF_API_OLD_FILTER_OPTS_ERROR
977 if ( !strcmp(filter->filter->name, "format") ||
978 !strcmp(filter->filter->name, "noformat") ||
979 !strcmp(filter->filter->name, "frei0r") ||
980 !strcmp(filter->filter->name, "frei0r_src") ||
981 !strcmp(filter->filter->name, "ocv") ||
982 !strcmp(filter->filter->name, "pan") ||
983 !strcmp(filter->filter->name, "pp") ||
984 !strcmp(filter->filter->name, "aevalsrc")) {
985 /* a hack for compatibility with the old syntax
986 * replace colons with |s */
987 char *copy = av_strdup(args);
989 int nb_leading = 0; // number of leading colons to skip
993 ret = AVERROR(ENOMEM);
997 if (!strcmp(filter->filter->name, "frei0r") ||
998 !strcmp(filter->filter->name, "ocv"))
1000 else if (!strcmp(filter->filter->name, "frei0r_src"))
1003 while (nb_leading--) {
1006 p = copy + strlen(copy);
1012 deprecated = strchr(p, ':') != NULL;
1014 if (!strcmp(filter->filter->name, "aevalsrc")) {
1016 while ((p = strchr(p, ':')) && p[1] != ':') {
1017 const char *epos = strchr(p + 1, '=');
1018 const char *spos = strchr(p + 1, ':');
1019 const int next_token_is_opt = epos && (!spos || epos < spos);
1020 if (next_token_is_opt) {
1024 /* next token does not contain a '=', assume a channel expression */
1028 if (p && *p == ':') { // double sep '::' found
1030 memmove(p, p + 1, strlen(p));
1033 while ((p = strchr(p, ':')))
1036 #if FF_API_OLD_FILTER_OPTS
1038 av_log(filter, AV_LOG_WARNING, "This syntax is deprecated. Use "
1039 "'|' to separate the list items.\n");
1041 av_log(filter, AV_LOG_DEBUG, "compat: called with args=[%s]\n", copy);
1042 ret = process_options(filter, &options, copy);
1045 av_log(filter, AV_LOG_ERROR, "This syntax is deprecated. Use "
1046 "'|' to separate the list items ('%s' instead of '%s')\n",
1048 ret = AVERROR(EINVAL);
1050 ret = process_options(filter, &options, copy);
1060 ret = process_options(filter, &options, args);
1066 ret = avfilter_init_dict(filter, &options);
1070 if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1071 av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
1072 ret = AVERROR_OPTION_NOT_FOUND;
1077 av_dict_free(&options);
1082 const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
1084 return pads[pad_idx].name;
1087 enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
1089 return pads[pad_idx].type;
1092 static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
1094 return ff_filter_frame(link->dst->outputs[0], frame);
1097 static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
1099 int (*filter_frame)(AVFilterLink *, AVFrame *);
1100 AVFilterContext *dstctx = link->dst;
1101 AVFilterPad *dst = link->dstpad;
1103 AVFilterCommand *cmd= link->dst->command_queue;
1106 if (!(filter_frame = dst->filter_frame))
1107 filter_frame = default_filter_frame;
1109 if (dst->needs_writable) {
1110 ret = ff_inlink_make_frame_writable(link, &frame);
1115 while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
1116 av_log(link->dst, AV_LOG_DEBUG,
1117 "Processing command time:%f command:%s arg:%s\n",
1118 cmd->time, cmd->command, cmd->arg);
1119 avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
1120 ff_command_queue_pop(link->dst);
1121 cmd= link->dst->command_queue;
1125 if (dstctx->enable_str) {
1126 int64_t pos = av_frame_get_pkt_pos(frame);
1127 dstctx->var_values[VAR_N] = link->frame_count_out;
1128 dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
1129 dstctx->var_values[VAR_W] = link->w;
1130 dstctx->var_values[VAR_H] = link->h;
1131 dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1133 dstctx->is_disabled = fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) < 0.5;
1134 if (dstctx->is_disabled &&
1135 (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
1136 filter_frame = default_filter_frame;
1138 ret = filter_frame(link, frame);
1139 link->frame_count_out++;
1140 ff_update_link_current_pts(link, pts);
1144 av_frame_free(&frame);
1148 int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
1151 FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
1153 /* Consistency checks */
1154 if (link->type == AVMEDIA_TYPE_VIDEO) {
1155 if (strcmp(link->dst->filter->name, "buffersink") &&
1156 strcmp(link->dst->filter->name, "format") &&
1157 strcmp(link->dst->filter->name, "idet") &&
1158 strcmp(link->dst->filter->name, "null") &&
1159 strcmp(link->dst->filter->name, "scale")) {
1160 av_assert1(frame->format == link->format);
1161 av_assert1(frame->width == link->w);
1162 av_assert1(frame->height == link->h);
1165 if (frame->format != link->format) {
1166 av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
1169 if (av_frame_get_channels(frame) != link->channels) {
1170 av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
1173 if (frame->channel_layout != link->channel_layout) {
1174 av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
1177 if (frame->sample_rate != link->sample_rate) {
1178 av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
1183 link->frame_blocked_in = link->frame_wanted_out = 0;
1184 link->frame_count_in++;
1185 filter_unblock(link->dst);
1186 ret = ff_framequeue_add(&link->fifo, frame);
1188 av_frame_free(&frame);
1191 ff_filter_set_ready(link->dst, 300);
1195 av_frame_free(&frame);
1196 return AVERROR_PATCHWELCOME;
1199 static int samples_ready(AVFilterLink *link)
1201 return ff_framequeue_queued_frames(&link->fifo) &&
1202 (ff_framequeue_queued_samples(&link->fifo) >= link->min_samples ||
1206 static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
1209 AVFrame *frame0, *frame, *buf;
1210 unsigned nb_samples, nb_frames, i, p;
1213 /* Note: this function relies on no format changes and must only be
1214 called with enough samples. */
1215 av_assert1(samples_ready(link));
1216 frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
1217 if (frame->nb_samples >= min && frame->nb_samples < max) {
1218 *rframe = ff_framequeue_take(&link->fifo);
1224 if (nb_samples + frame->nb_samples > max) {
1225 if (nb_samples < min)
1229 nb_samples += frame->nb_samples;
1231 if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
1233 frame = ff_framequeue_peek(&link->fifo, nb_frames);
1236 buf = ff_get_audio_buffer(link, nb_samples);
1238 return AVERROR(ENOMEM);
1239 ret = av_frame_copy_props(buf, frame0);
1241 av_frame_free(&buf);
1244 buf->pts = frame0->pts;
1247 for (i = 0; i < nb_frames; i++) {
1248 frame = ff_framequeue_take(&link->fifo);
1249 av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
1250 frame->nb_samples, link->channels, link->format);
1251 p += frame->nb_samples;
1252 av_frame_free(&frame);
1254 if (p < nb_samples) {
1255 unsigned n = nb_samples - p;
1256 frame = ff_framequeue_peek(&link->fifo, 0);
1257 av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
1258 link->channels, link->format);
1259 frame->nb_samples -= n;
1260 av_samples_copy(frame->extended_data, frame->extended_data, 0, n,
1261 frame->nb_samples, link->channels, link->format);
1262 if (frame->pts != AV_NOPTS_VALUE)
1263 frame->pts += av_rescale_q(n, av_make_q(1, link->sample_rate), link->time_base);
1264 ff_framequeue_update_peeked(&link->fifo, 0);
1265 ff_framequeue_skip_samples(&link->fifo, n);
1272 int ff_filter_frame_to_filter(AVFilterLink *link)
1275 AVFilterContext *dst = link->dst;
1278 av_assert1(ff_framequeue_queued_frames(&link->fifo));
1279 if (link->min_samples) {
1280 int min = link->min_samples;
1281 if (link->status_in)
1282 min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
1283 ret = take_samples(link, min, link->max_samples, &frame);
1287 frame = ff_framequeue_take(&link->fifo);
1289 /* The filter will soon have received a new frame, that may allow it to
1290 produce one or more: unblock its outputs. */
1291 filter_unblock(dst);
1292 ret = ff_filter_frame_framed(link, frame);
1293 if (ret < 0 && ret != link->status_out) {
1294 ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
1296 /* Run once again, to see if several frames were available, or if
1297 the input status has also changed, or any other reason. */
1298 ff_filter_set_ready(dst, 300);
1303 static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
1305 unsigned out = 0, progress = 0;
1308 av_assert0(!in->status_out);
1309 if (!filter->nb_outputs) {
1310 /* not necessary with the current API and sinks */
1313 while (!in->status_out) {
1314 if (!filter->outputs[out]->status_in) {
1316 ret = ff_request_frame_to_filter(filter->outputs[out]);
1320 if (++out == filter->nb_outputs) {
1322 /* Every output already closed: input no longer interesting
1323 (example: overlay in shortest mode, other input closed). */
1324 ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
1331 ff_filter_set_ready(filter, 200);
1335 #define FFERROR_NOT_READY FFERRTAG('N','R','D','Y')
1337 static int ff_filter_activate_default(AVFilterContext *filter)
1341 for (i = 0; i < filter->nb_inputs; i++) {
1342 if (samples_ready(filter->inputs[i])) {
1343 return ff_filter_frame_to_filter(filter->inputs[i]);
1346 for (i = 0; i < filter->nb_inputs; i++) {
1347 if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
1348 av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
1349 return forward_status_change(filter, filter->inputs[i]);
1352 for (i = 0; i < filter->nb_outputs; i++) {
1353 if (filter->outputs[i]->frame_wanted_out &&
1354 !filter->outputs[i]->frame_blocked_in) {
1355 return ff_request_frame_to_filter(filter->outputs[i]);
1358 return FFERROR_NOT_READY;
1362 Filter scheduling and activation
1364 When a filter is activated, it must:
1365 - if possible, output a frame;
1366 - else, if relevant, forward the input status change;
1367 - else, check outputs for wanted frames and forward the requests.
1369 The following AVFilterLink fields are used for activation:
1373 This field indicates if a frame is needed on this input of the
1374 destination filter. A positive value indicates that a frame is needed
1375 to process queued frames or internal data or to satisfy the
1376 application; a zero value indicates that a frame is not especially
1377 needed but could be processed anyway; a negative value indicates that a
1378 frame would just be queued.
1380 It is set by filters using ff_request_frame() or ff_request_no_frame(),
1381 when requested by the application through a specific API or when it is
1382 set on one of the outputs.
1384 It is cleared when a frame is sent from the source using
1387 It is also cleared when a status change is sent from the source using
1388 ff_avfilter_link_set_in_status().
1392 This field means that the source filter can not generate a frame as is.
1393 Its goal is to avoid repeatedly calling the request_frame() method on
1396 It is set by the framework on all outputs of a filter before activating it.
1398 It is automatically cleared by ff_filter_frame().
1400 It is also automatically cleared by ff_avfilter_link_set_in_status().
1402 It is also cleared on all outputs (using filter_unblock()) when
1403 something happens on an input: processing a frame or changing the
1408 Contains the frames queued on a filter input. If it contains frames and
1409 frame_wanted_out is not set, then the filter can be activated. If that
1410 result in the filter not able to use these frames, the filter must set
1411 frame_wanted_out to ask for more frames.
1413 - status_in and status_in_pts:
1415 Status (EOF or error code) of the link and timestamp of the status
1416 change (in link time base, same as frames) as seen from the input of
1417 the link. The status change is considered happening after the frames
1420 It is set by the source filter using ff_avfilter_link_set_in_status().
1424 Status of the link as seen from the output of the link. The status
1425 change is considered having already happened.
1427 It is set by the destination filter using
1428 ff_avfilter_link_set_out_status().
1430 Filters are activated according to the ready field, set using the
1431 ff_filter_set_ready(). Eventually, a priority queue will be used.
1432 ff_filter_set_ready() is called whenever anything could cause progress to
1433 be possible. Marking a filter ready when it is not is not a problem,
1434 except for the small overhead it causes.
1436 Conditions that cause a filter to be marked ready are:
1438 - frames added on an input link;
1440 - changes in the input or output status of an input link;
1442 - requests for a frame on an output link;
1444 - after any actual processing using the legacy methods (filter_frame(),
1445 and request_frame() to acknowledge status changes), to run once more
1446 and check if enough input was present for several frames.
1448 Exemples of scenarios to consider:
1450 - buffersrc: activate if frame_wanted_out to notify the application;
1451 activate when the application adds a frame to push it immediately.
1453 - testsrc: activate only if frame_wanted_out to produce and push a frame.
1455 - concat (not at stitch points): can process a frame on any output.
1456 Activate if frame_wanted_out on output to forward on the corresponding
1457 input. Activate when a frame is present on input to process it
1460 - framesync: needs at least one frame on each input; extra frames on the
1461 wrong input will accumulate. When a frame is first added on one input,
1462 set frame_wanted_out<0 on it to avoid getting more (would trigger
1463 testsrc) and frame_wanted_out>0 on the other to allow processing it.
1465 Activation of old filters:
1467 In order to activate a filter implementing the legacy filter_frame() and
1468 request_frame() methods, perform the first possible of the following
1471 - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
1472 frame and call filter_frame().
1474 Ratinale: filter frames as soon as possible instead of leaving them
1475 queued; frame_wanted_out < 0 is not possible since the old API does not
1476 set it nor provides any similar feedback; frame_wanted_out > 0 happens
1477 when min_samples > 0 and there are not enough samples queued.
1479 - If an input has status_in set but not status_out, try to call
1480 request_frame() on one of the outputs in the hope that it will trigger
1481 request_frame() on the input with status_in and acknowledge it. This is
1482 awkward and fragile, filters with several inputs or outputs should be
1483 updated to direct activation as soon as possible.
1485 - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
1488 Rationale: checking frame_blocked_in is necessary to avoid requesting
1489 repeatedly on a blocked input if another is not blocked (example:
1490 [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
1492 TODO: respect needs_fifo and remove auto-inserted fifos.
1496 int ff_filter_activate(AVFilterContext *filter)
1501 ret = ff_filter_activate_default(filter);
1502 if (ret == FFERROR_NOT_READY)
1507 int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
1509 *rpts = link->current_pts;
1510 if (ff_framequeue_queued_frames(&link->fifo))
1511 return *rstatus = 0;
1512 if (link->status_out)
1513 return *rstatus = link->status_out;
1514 if (!link->status_in)
1515 return *rstatus = 0;
1516 *rstatus = link->status_out = link->status_in;
1517 ff_update_link_current_pts(link, link->status_in_pts);
1518 *rpts = link->current_pts;
1522 int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
1524 AVFrame *frame = *rframe;
1528 if (av_frame_is_writable(frame))
1530 av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
1532 switch (link->type) {
1533 case AVMEDIA_TYPE_VIDEO:
1534 out = ff_get_video_buffer(link, link->w, link->h);
1536 case AVMEDIA_TYPE_AUDIO:
1537 out = ff_get_audio_buffer(link, frame->nb_samples);
1540 return AVERROR(EINVAL);
1543 return AVERROR(ENOMEM);
1545 ret = av_frame_copy_props(out, frame);
1547 av_frame_free(&out);
1551 switch (link->type) {
1552 case AVMEDIA_TYPE_VIDEO:
1553 av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
1554 frame->format, frame->width, frame->height);
1556 case AVMEDIA_TYPE_AUDIO:
1557 av_samples_copy(out->extended_data, frame->extended_data,
1558 0, 0, frame->nb_samples,
1559 av_frame_get_channels(frame),
1563 av_assert0(!"reached");
1566 av_frame_free(&frame);
1571 const AVClass *avfilter_get_class(void)
1573 return &avfilter_class;