3 * Copyright (c) 2007 Bobby Bingham
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/avassert.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/buffer.h"
25 #include "libavutil/channel_layout.h"
26 #include "libavutil/common.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/hwcontext.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/rational.h"
34 #include "libavutil/samplefmt.h"
35 #include "libavutil/thread.h"
37 #define FF_INTERNAL_FIELDS 1
38 #include "framequeue.h"
46 #include "libavutil/ffversion.h"
47 const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
49 void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
51 av_unused char buf[16];
53 "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
54 ref, ref->buf, ref->data[0],
55 ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
56 ref->pts, ref->pkt_pos);
59 ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
60 ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
61 ref->width, ref->height,
62 !ref->interlaced_frame ? 'P' : /* Progressive */
63 ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
65 av_get_picture_type_char(ref->pict_type));
67 if (ref->nb_samples) {
68 ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
74 ff_tlog(ctx, "]%s", end ? "\n" : "");
77 unsigned avfilter_version(void)
79 av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
80 return LIBAVFILTER_VERSION_INT;
83 const char *avfilter_configuration(void)
85 return FFMPEG_CONFIGURATION;
88 const char *avfilter_license(void)
90 #define LICENSE_PREFIX "libavfilter license: "
91 return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1];
94 void ff_command_queue_pop(AVFilterContext *filter)
96 AVFilterCommand *c= filter->command_queue;
98 av_freep(&c->command);
99 filter->command_queue= c->next;
103 int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
104 AVFilterPad **pads, AVFilterLink ***links,
107 AVFilterLink **newlinks;
108 AVFilterPad *newpads;
111 idx = FFMIN(idx, *count);
113 newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
114 newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
119 if (!newpads || !newlinks)
120 return AVERROR(ENOMEM);
122 memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
123 memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
124 memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
125 (*links)[idx] = NULL;
128 for (i = idx + 1; i < *count; i++)
130 (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
135 int avfilter_link(AVFilterContext *src, unsigned srcpad,
136 AVFilterContext *dst, unsigned dstpad)
140 av_assert0(src->graph);
141 av_assert0(dst->graph);
142 av_assert0(src->graph == dst->graph);
144 if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
145 src->outputs[srcpad] || dst->inputs[dstpad])
146 return AVERROR(EINVAL);
148 if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
149 av_log(src, AV_LOG_ERROR,
150 "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
151 src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
152 dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
153 return AVERROR(EINVAL);
156 link = av_mallocz(sizeof(*link));
158 return AVERROR(ENOMEM);
160 src->outputs[srcpad] = dst->inputs[dstpad] = link;
164 link->srcpad = &src->output_pads[srcpad];
165 link->dstpad = &dst->input_pads[dstpad];
166 link->type = src->output_pads[srcpad].type;
167 av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
169 ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
174 void avfilter_link_free(AVFilterLink **link)
179 av_frame_free(&(*link)->partial_buf);
180 ff_framequeue_free(&(*link)->fifo);
181 ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
186 #if FF_API_FILTER_GET_SET
187 int avfilter_link_get_channels(AVFilterLink *link)
189 return link->channels;
193 void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
195 filter->ready = FFMAX(filter->ready, priority);
199 * Clear frame_blocked_in on all outputs.
200 * This is necessary whenever something changes on input.
202 static void filter_unblock(AVFilterContext *filter)
206 for (i = 0; i < filter->nb_outputs; i++)
207 filter->outputs[i]->frame_blocked_in = 0;
211 void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
213 if (link->status_in == status)
215 av_assert0(!link->status_in);
216 link->status_in = status;
217 link->status_in_pts = pts;
218 link->frame_wanted_out = 0;
219 link->frame_blocked_in = 0;
220 filter_unblock(link->dst);
221 ff_filter_set_ready(link->dst, 200);
224 void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
226 av_assert0(!link->frame_wanted_out);
227 av_assert0(!link->status_out);
228 link->status_out = status;
229 if (pts != AV_NOPTS_VALUE)
230 ff_update_link_current_pts(link, pts);
231 filter_unblock(link->dst);
232 ff_filter_set_ready(link->src, 200);
235 void avfilter_link_set_closed(AVFilterLink *link, int closed)
237 ff_avfilter_link_set_out_status(link, closed ? AVERROR_EOF : 0, AV_NOPTS_VALUE);
240 int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
241 unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
244 unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
246 av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
247 "between the filter '%s' and the filter '%s'\n",
248 filt->name, link->src->name, link->dst->name);
250 link->dst->inputs[dstpad_idx] = NULL;
251 if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
252 /* failed to link output filter to new filter */
253 link->dst->inputs[dstpad_idx] = link;
257 /* re-hookup the link to the new destination filter we inserted */
259 link->dstpad = &filt->input_pads[filt_srcpad_idx];
260 filt->inputs[filt_srcpad_idx] = link;
262 /* if any information on supported media formats already exists on the
263 * link, we need to preserve that */
264 if (link->out_formats)
265 ff_formats_changeref(&link->out_formats,
266 &filt->outputs[filt_dstpad_idx]->out_formats);
267 if (link->out_samplerates)
268 ff_formats_changeref(&link->out_samplerates,
269 &filt->outputs[filt_dstpad_idx]->out_samplerates);
270 if (link->out_channel_layouts)
271 ff_channel_layouts_changeref(&link->out_channel_layouts,
272 &filt->outputs[filt_dstpad_idx]->out_channel_layouts);
277 int avfilter_config_links(AVFilterContext *filter)
279 int (*config_link)(AVFilterLink *);
283 for (i = 0; i < filter->nb_inputs; i ++) {
284 AVFilterLink *link = filter->inputs[i];
285 AVFilterLink *inlink;
288 if (!link->src || !link->dst) {
289 av_log(filter, AV_LOG_ERROR,
290 "Not all input and output are properly linked (%d).\n", i);
291 return AVERROR(EINVAL);
294 inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
296 link->current_pts_us = AV_NOPTS_VALUE;
298 switch (link->init_state) {
301 case AVLINK_STARTINIT:
302 av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
305 link->init_state = AVLINK_STARTINIT;
307 if ((ret = avfilter_config_links(link->src)) < 0)
310 if (!(config_link = link->srcpad->config_props)) {
311 if (link->src->nb_inputs != 1) {
312 av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
313 "with more than one input "
314 "must set config_props() "
315 "callbacks on all outputs\n");
316 return AVERROR(EINVAL);
318 } else if ((ret = config_link(link)) < 0) {
319 av_log(link->src, AV_LOG_ERROR,
320 "Failed to configure output pad on %s\n",
325 switch (link->type) {
326 case AVMEDIA_TYPE_VIDEO:
327 if (!link->time_base.num && !link->time_base.den)
328 link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
330 if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
331 link->sample_aspect_ratio = inlink ?
332 inlink->sample_aspect_ratio : (AVRational){1,1};
335 if (!link->frame_rate.num && !link->frame_rate.den)
336 link->frame_rate = inlink->frame_rate;
341 } else if (!link->w || !link->h) {
342 av_log(link->src, AV_LOG_ERROR,
343 "Video source filters must set their output link's "
344 "width and height\n");
345 return AVERROR(EINVAL);
349 case AVMEDIA_TYPE_AUDIO:
351 if (!link->time_base.num && !link->time_base.den)
352 link->time_base = inlink->time_base;
355 if (!link->time_base.num && !link->time_base.den)
356 link->time_base = (AVRational) {1, link->sample_rate};
359 if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
360 !(link->src->filter->flags_internal & FF_FILTER_FLAG_HWFRAME_AWARE)) {
361 av_assert0(!link->hw_frames_ctx &&
362 "should not be set by non-hwframe-aware filter");
363 link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
364 if (!link->hw_frames_ctx)
365 return AVERROR(ENOMEM);
368 if ((config_link = link->dstpad->config_props))
369 if ((ret = config_link(link)) < 0) {
370 av_log(link->dst, AV_LOG_ERROR,
371 "Failed to configure input pad on %s\n",
376 link->init_state = AVLINK_INIT;
383 void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
385 if (link->type == AVMEDIA_TYPE_VIDEO) {
387 "link[%p s:%dx%d fmt:%s %s->%s]%s",
388 link, link->w, link->h,
389 av_get_pix_fmt_name(link->format),
390 link->src ? link->src->filter->name : "",
391 link->dst ? link->dst->filter->name : "",
395 av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
398 "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
399 link, (int)link->sample_rate, buf,
400 av_get_sample_fmt_name(link->format),
401 link->src ? link->src->filter->name : "",
402 link->dst ? link->dst->filter->name : "",
407 int ff_request_frame(AVFilterLink *link)
409 FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
411 av_assert1(!link->dst->filter->activate);
412 if (link->status_out)
413 return link->status_out;
414 if (link->status_in) {
415 if (ff_framequeue_queued_frames(&link->fifo)) {
416 av_assert1(!link->frame_wanted_out);
417 av_assert1(link->dst->ready >= 300);
420 /* Acknowledge status change. Filters using ff_request_frame() will
421 handle the change automatically. Filters can also check the
422 status directly but none do yet. */
423 ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
424 return link->status_out;
427 link->frame_wanted_out = 1;
428 ff_filter_set_ready(link->src, 100);
432 static int64_t guess_status_pts(AVFilterContext *ctx, int status, AVRational link_time_base)
435 int64_t r = INT64_MAX;
437 for (i = 0; i < ctx->nb_inputs; i++)
438 if (ctx->inputs[i]->status_out == status)
439 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->current_pts, ctx->inputs[i]->time_base, link_time_base));
442 av_log(ctx, AV_LOG_WARNING, "EOF timestamp not reliable\n");
443 for (i = 0; i < ctx->nb_inputs; i++)
444 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->status_in_pts, ctx->inputs[i]->time_base, link_time_base));
447 return AV_NOPTS_VALUE;
450 static int ff_request_frame_to_filter(AVFilterLink *link)
454 FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
455 /* Assume the filter is blocked, let the method clear it if not */
456 link->frame_blocked_in = 1;
457 if (link->srcpad->request_frame)
458 ret = link->srcpad->request_frame(link);
459 else if (link->src->inputs[0])
460 ret = ff_request_frame(link->src->inputs[0]);
462 if (ret != AVERROR(EAGAIN) && ret != link->status_in)
463 ff_avfilter_link_set_in_status(link, ret, guess_status_pts(link->src, ret, link->time_base));
464 if (ret == AVERROR_EOF)
470 static const char *const var_names[] = {
488 static int set_enable_expr(AVFilterContext *ctx, const char *expr)
492 AVExpr *old = ctx->enable;
494 if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
495 av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
496 "with filter '%s'\n", ctx->filter->name);
497 return AVERROR_PATCHWELCOME;
500 expr_dup = av_strdup(expr);
502 return AVERROR(ENOMEM);
504 if (!ctx->var_values) {
505 ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
506 if (!ctx->var_values) {
508 return AVERROR(ENOMEM);
512 ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
513 NULL, NULL, NULL, NULL, 0, ctx->priv);
515 av_log(ctx->priv, AV_LOG_ERROR,
516 "Error when evaluating the expression '%s' for enable\n",
523 av_free(ctx->enable_str);
524 ctx->enable_str = expr_dup;
528 void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
530 if (pts == AV_NOPTS_VALUE)
532 link->current_pts = pts;
533 link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
534 /* TODO use duration */
535 if (link->graph && link->age_index >= 0)
536 ff_avfilter_graph_update_heap(link->graph, link);
539 int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
541 if(!strcmp(cmd, "ping")){
542 char local_res[256] = {0};
546 res_len = sizeof(local_res);
548 av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
549 if (res == local_res)
550 av_log(filter, AV_LOG_INFO, "%s", res);
552 }else if(!strcmp(cmd, "enable")) {
553 return set_enable_expr(filter, arg);
554 }else if(filter->filter->process_command) {
555 return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
557 return AVERROR(ENOSYS);
560 int avfilter_pad_count(const AVFilterPad *pads)
567 for (count = 0; pads->name; count++)
572 static const char *default_filter_name(void *filter_ctx)
574 AVFilterContext *ctx = filter_ctx;
575 return ctx->name ? ctx->name : ctx->filter->name;
578 static void *filter_child_next(void *obj, void *prev)
580 AVFilterContext *ctx = obj;
581 if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
586 static const AVClass *filter_child_class_next(const AVClass *prev)
589 const AVFilter *f = NULL;
591 /* find the filter that corresponds to prev */
592 while (prev && (f = av_filter_iterate(&opaque)))
593 if (f->priv_class == prev)
596 /* could not find filter corresponding to prev */
600 /* find next filter with specific options */
601 while ((f = av_filter_iterate(&opaque)))
603 return f->priv_class;
608 #define OFFSET(x) offsetof(AVFilterContext, x)
609 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
610 static const AVOption avfilter_options[] = {
611 { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
612 { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
613 { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .flags = FLAGS, .unit = "thread_type" },
614 { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
615 { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
616 { .i64 = 0 }, 0, INT_MAX, FLAGS },
617 { "extra_hw_frames", "Number of extra hardware frames to allocate for the user",
618 OFFSET(extra_hw_frames), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
622 static const AVClass avfilter_class = {
623 .class_name = "AVFilter",
624 .item_name = default_filter_name,
625 .version = LIBAVUTIL_VERSION_INT,
626 .category = AV_CLASS_CATEGORY_FILTER,
627 .child_next = filter_child_next,
628 .child_class_next = filter_child_class_next,
629 .option = avfilter_options,
632 static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
633 int *ret, int nb_jobs)
637 for (i = 0; i < nb_jobs; i++) {
638 int r = func(ctx, arg, i, nb_jobs);
645 AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
647 AVFilterContext *ret;
653 ret = av_mallocz(sizeof(AVFilterContext));
657 ret->av_class = &avfilter_class;
658 ret->filter = filter;
659 ret->name = inst_name ? av_strdup(inst_name) : NULL;
660 if (filter->priv_size) {
661 ret->priv = av_mallocz(filter->priv_size);
665 if (filter->preinit) {
666 if (filter->preinit(ret) < 0)
671 av_opt_set_defaults(ret);
672 if (filter->priv_class) {
673 *(const AVClass**)ret->priv = filter->priv_class;
674 av_opt_set_defaults(ret->priv);
677 ret->internal = av_mallocz(sizeof(*ret->internal));
680 ret->internal->execute = default_execute;
682 ret->nb_inputs = avfilter_pad_count(filter->inputs);
683 if (ret->nb_inputs ) {
684 ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
685 if (!ret->input_pads)
687 memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
688 ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
693 ret->nb_outputs = avfilter_pad_count(filter->outputs);
694 if (ret->nb_outputs) {
695 ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
696 if (!ret->output_pads)
698 memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
699 ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
709 av_freep(&ret->inputs);
710 av_freep(&ret->input_pads);
712 av_freep(&ret->outputs);
713 av_freep(&ret->output_pads);
715 av_freep(&ret->priv);
716 av_freep(&ret->internal);
721 static void free_link(AVFilterLink *link)
727 link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
729 link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
731 av_buffer_unref(&link->hw_frames_ctx);
733 ff_formats_unref(&link->in_formats);
734 ff_formats_unref(&link->out_formats);
735 ff_formats_unref(&link->in_samplerates);
736 ff_formats_unref(&link->out_samplerates);
737 ff_channel_layouts_unref(&link->in_channel_layouts);
738 ff_channel_layouts_unref(&link->out_channel_layouts);
739 avfilter_link_free(&link);
742 void avfilter_free(AVFilterContext *filter)
750 ff_filter_graph_remove_filter(filter->graph, filter);
752 if (filter->filter->uninit)
753 filter->filter->uninit(filter);
755 for (i = 0; i < filter->nb_inputs; i++) {
756 free_link(filter->inputs[i]);
758 for (i = 0; i < filter->nb_outputs; i++) {
759 free_link(filter->outputs[i]);
762 if (filter->filter->priv_class)
763 av_opt_free(filter->priv);
765 av_buffer_unref(&filter->hw_device_ctx);
767 av_freep(&filter->name);
768 av_freep(&filter->input_pads);
769 av_freep(&filter->output_pads);
770 av_freep(&filter->inputs);
771 av_freep(&filter->outputs);
772 av_freep(&filter->priv);
773 while(filter->command_queue){
774 ff_command_queue_pop(filter);
777 av_expr_free(filter->enable);
778 filter->enable = NULL;
779 av_freep(&filter->var_values);
780 av_freep(&filter->internal);
784 int ff_filter_get_nb_threads(AVFilterContext *ctx)
786 if (ctx->nb_threads > 0)
787 return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
788 return ctx->graph->nb_threads;
791 static int process_options(AVFilterContext *ctx, AVDictionary **options,
794 const AVOption *o = NULL;
796 char *av_uninit(parsed_key), *av_uninit(value);
804 const char *shorthand = NULL;
806 o = av_opt_next(ctx->priv, o);
808 if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
814 ret = av_opt_get_key_value(&args, "=", ":",
815 shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
816 &parsed_key, &value);
818 if (ret == AVERROR(EINVAL))
819 av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
821 av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
829 while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
834 av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
836 if (av_opt_find(ctx, key, NULL, 0, 0)) {
837 ret = av_opt_set(ctx, key, value, 0);
844 av_dict_set(options, key, value, 0);
845 if ((ret = av_opt_set(ctx->priv, key, value, AV_OPT_SEARCH_CHILDREN)) < 0) {
846 if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
847 if (ret == AVERROR_OPTION_NOT_FOUND)
848 av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
861 if (ctx->enable_str) {
862 ret = set_enable_expr(ctx, ctx->enable_str);
869 int ff_filter_process_command(AVFilterContext *ctx, const char *cmd,
870 const char *arg, char *res, int res_len, int flags)
874 if (!ctx->filter->priv_class)
876 o = av_opt_find2(ctx->priv, cmd, NULL, AV_OPT_FLAG_RUNTIME_PARAM | AV_OPT_FLAG_FILTERING_PARAM, AV_OPT_SEARCH_CHILDREN, NULL);
878 return AVERROR(ENOSYS);
879 return av_opt_set(ctx->priv, cmd, arg, 0);
882 int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
886 ret = av_opt_set_dict(ctx, options);
888 av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
892 if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
893 ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
894 ctx->graph->internal->thread_execute) {
895 ctx->thread_type = AVFILTER_THREAD_SLICE;
896 ctx->internal->execute = ctx->graph->internal->thread_execute;
898 ctx->thread_type = 0;
901 if (ctx->filter->priv_class) {
902 ret = av_opt_set_dict2(ctx->priv, options, AV_OPT_SEARCH_CHILDREN);
904 av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
909 if (ctx->filter->init_opaque)
910 ret = ctx->filter->init_opaque(ctx, NULL);
911 else if (ctx->filter->init)
912 ret = ctx->filter->init(ctx);
913 else if (ctx->filter->init_dict)
914 ret = ctx->filter->init_dict(ctx, options);
919 int avfilter_init_str(AVFilterContext *filter, const char *args)
921 AVDictionary *options = NULL;
922 AVDictionaryEntry *e;
926 if (!filter->filter->priv_class) {
927 av_log(filter, AV_LOG_ERROR, "This filter does not take any "
928 "options, but options were provided: %s.\n", args);
929 return AVERROR(EINVAL);
932 #if FF_API_OLD_FILTER_OPTS_ERROR
933 if ( !strcmp(filter->filter->name, "format") ||
934 !strcmp(filter->filter->name, "noformat") ||
935 !strcmp(filter->filter->name, "frei0r") ||
936 !strcmp(filter->filter->name, "frei0r_src") ||
937 !strcmp(filter->filter->name, "ocv") ||
938 !strcmp(filter->filter->name, "pan") ||
939 !strcmp(filter->filter->name, "pp") ||
940 !strcmp(filter->filter->name, "aevalsrc")) {
941 /* a hack for compatibility with the old syntax
942 * replace colons with |s */
943 char *copy = av_strdup(args);
945 int nb_leading = 0; // number of leading colons to skip
949 ret = AVERROR(ENOMEM);
953 if (!strcmp(filter->filter->name, "frei0r") ||
954 !strcmp(filter->filter->name, "ocv"))
956 else if (!strcmp(filter->filter->name, "frei0r_src"))
959 while (nb_leading--) {
962 p = copy + strlen(copy);
968 deprecated = strchr(p, ':') != NULL;
970 if (!strcmp(filter->filter->name, "aevalsrc")) {
972 while ((p = strchr(p, ':')) && p[1] != ':') {
973 const char *epos = strchr(p + 1, '=');
974 const char *spos = strchr(p + 1, ':');
975 const int next_token_is_opt = epos && (!spos || epos < spos);
976 if (next_token_is_opt) {
980 /* next token does not contain a '=', assume a channel expression */
984 if (p && *p == ':') { // double sep '::' found
986 memmove(p, p + 1, strlen(p));
989 while ((p = strchr(p, ':')))
993 av_log(filter, AV_LOG_ERROR, "This syntax is deprecated. Use "
994 "'|' to separate the list items ('%s' instead of '%s')\n",
996 ret = AVERROR(EINVAL);
998 ret = process_options(filter, &options, copy);
1007 ret = process_options(filter, &options, args);
1013 ret = avfilter_init_dict(filter, &options);
1017 if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1018 av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
1019 ret = AVERROR_OPTION_NOT_FOUND;
1024 av_dict_free(&options);
1029 const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
1031 return pads[pad_idx].name;
1034 enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
1036 return pads[pad_idx].type;
1039 static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
1041 return ff_filter_frame(link->dst->outputs[0], frame);
1044 static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
1046 int (*filter_frame)(AVFilterLink *, AVFrame *);
1047 AVFilterContext *dstctx = link->dst;
1048 AVFilterPad *dst = link->dstpad;
1051 if (!(filter_frame = dst->filter_frame))
1052 filter_frame = default_filter_frame;
1054 if (dst->needs_writable) {
1055 ret = ff_inlink_make_frame_writable(link, &frame);
1060 ff_inlink_process_commands(link, frame);
1061 dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1063 if (dstctx->is_disabled &&
1064 (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
1065 filter_frame = default_filter_frame;
1066 ret = filter_frame(link, frame);
1067 link->frame_count_out++;
1071 av_frame_free(&frame);
1075 int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
1078 FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
1080 /* Consistency checks */
1081 if (link->type == AVMEDIA_TYPE_VIDEO) {
1082 if (strcmp(link->dst->filter->name, "buffersink") &&
1083 strcmp(link->dst->filter->name, "format") &&
1084 strcmp(link->dst->filter->name, "idet") &&
1085 strcmp(link->dst->filter->name, "null") &&
1086 strcmp(link->dst->filter->name, "scale")) {
1087 av_assert1(frame->format == link->format);
1088 av_assert1(frame->width == link->w);
1089 av_assert1(frame->height == link->h);
1092 if (frame->format != link->format) {
1093 av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
1096 if (frame->channels != link->channels) {
1097 av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
1100 if (frame->channel_layout != link->channel_layout) {
1101 av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
1104 if (frame->sample_rate != link->sample_rate) {
1105 av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
1110 link->frame_blocked_in = link->frame_wanted_out = 0;
1111 link->frame_count_in++;
1112 filter_unblock(link->dst);
1113 ret = ff_framequeue_add(&link->fifo, frame);
1115 av_frame_free(&frame);
1118 ff_filter_set_ready(link->dst, 300);
1122 av_frame_free(&frame);
1123 return AVERROR_PATCHWELCOME;
1126 static int samples_ready(AVFilterLink *link, unsigned min)
1128 return ff_framequeue_queued_frames(&link->fifo) &&
1129 (ff_framequeue_queued_samples(&link->fifo) >= min ||
1133 static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
1136 AVFrame *frame0, *frame, *buf;
1137 unsigned nb_samples, nb_frames, i, p;
1140 /* Note: this function relies on no format changes and must only be
1141 called with enough samples. */
1142 av_assert1(samples_ready(link, link->min_samples));
1143 frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
1144 if (!link->fifo.samples_skipped && frame->nb_samples >= min && frame->nb_samples <= max) {
1145 *rframe = ff_framequeue_take(&link->fifo);
1151 if (nb_samples + frame->nb_samples > max) {
1152 if (nb_samples < min)
1156 nb_samples += frame->nb_samples;
1158 if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
1160 frame = ff_framequeue_peek(&link->fifo, nb_frames);
1163 buf = ff_get_audio_buffer(link, nb_samples);
1165 return AVERROR(ENOMEM);
1166 ret = av_frame_copy_props(buf, frame0);
1168 av_frame_free(&buf);
1171 buf->pts = frame0->pts;
1174 for (i = 0; i < nb_frames; i++) {
1175 frame = ff_framequeue_take(&link->fifo);
1176 av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
1177 frame->nb_samples, link->channels, link->format);
1178 p += frame->nb_samples;
1179 av_frame_free(&frame);
1181 if (p < nb_samples) {
1182 unsigned n = nb_samples - p;
1183 frame = ff_framequeue_peek(&link->fifo, 0);
1184 av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
1185 link->channels, link->format);
1186 ff_framequeue_skip_samples(&link->fifo, n, link->time_base);
1193 static int ff_filter_frame_to_filter(AVFilterLink *link)
1195 AVFrame *frame = NULL;
1196 AVFilterContext *dst = link->dst;
1199 av_assert1(ff_framequeue_queued_frames(&link->fifo));
1200 ret = link->min_samples ?
1201 ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
1202 ff_inlink_consume_frame(link, &frame);
1208 /* The filter will soon have received a new frame, that may allow it to
1209 produce one or more: unblock its outputs. */
1210 filter_unblock(dst);
1211 /* AVFilterPad.filter_frame() expect frame_count_out to have the value
1212 before the frame; ff_filter_frame_framed() will re-increment it. */
1213 link->frame_count_out--;
1214 ret = ff_filter_frame_framed(link, frame);
1215 if (ret < 0 && ret != link->status_out) {
1216 ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
1218 /* Run once again, to see if several frames were available, or if
1219 the input status has also changed, or any other reason. */
1220 ff_filter_set_ready(dst, 300);
1225 static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
1227 unsigned out = 0, progress = 0;
1230 av_assert0(!in->status_out);
1231 if (!filter->nb_outputs) {
1232 /* not necessary with the current API and sinks */
1235 while (!in->status_out) {
1236 if (!filter->outputs[out]->status_in) {
1238 ret = ff_request_frame_to_filter(filter->outputs[out]);
1242 if (++out == filter->nb_outputs) {
1244 /* Every output already closed: input no longer interesting
1245 (example: overlay in shortest mode, other input closed). */
1246 ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
1253 ff_filter_set_ready(filter, 200);
1257 static int ff_filter_activate_default(AVFilterContext *filter)
1261 for (i = 0; i < filter->nb_inputs; i++) {
1262 if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) {
1263 return ff_filter_frame_to_filter(filter->inputs[i]);
1266 for (i = 0; i < filter->nb_inputs; i++) {
1267 if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
1268 av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
1269 return forward_status_change(filter, filter->inputs[i]);
1272 for (i = 0; i < filter->nb_outputs; i++) {
1273 if (filter->outputs[i]->frame_wanted_out &&
1274 !filter->outputs[i]->frame_blocked_in) {
1275 return ff_request_frame_to_filter(filter->outputs[i]);
1278 return FFERROR_NOT_READY;
1282 Filter scheduling and activation
1284 When a filter is activated, it must:
1285 - if possible, output a frame;
1286 - else, if relevant, forward the input status change;
1287 - else, check outputs for wanted frames and forward the requests.
1289 The following AVFilterLink fields are used for activation:
1293 This field indicates if a frame is needed on this input of the
1294 destination filter. A positive value indicates that a frame is needed
1295 to process queued frames or internal data or to satisfy the
1296 application; a zero value indicates that a frame is not especially
1297 needed but could be processed anyway; a negative value indicates that a
1298 frame would just be queued.
1300 It is set by filters using ff_request_frame() or ff_request_no_frame(),
1301 when requested by the application through a specific API or when it is
1302 set on one of the outputs.
1304 It is cleared when a frame is sent from the source using
1307 It is also cleared when a status change is sent from the source using
1308 ff_avfilter_link_set_in_status().
1312 This field means that the source filter can not generate a frame as is.
1313 Its goal is to avoid repeatedly calling the request_frame() method on
1316 It is set by the framework on all outputs of a filter before activating it.
1318 It is automatically cleared by ff_filter_frame().
1320 It is also automatically cleared by ff_avfilter_link_set_in_status().
1322 It is also cleared on all outputs (using filter_unblock()) when
1323 something happens on an input: processing a frame or changing the
1328 Contains the frames queued on a filter input. If it contains frames and
1329 frame_wanted_out is not set, then the filter can be activated. If that
1330 result in the filter not able to use these frames, the filter must set
1331 frame_wanted_out to ask for more frames.
1333 - status_in and status_in_pts:
1335 Status (EOF or error code) of the link and timestamp of the status
1336 change (in link time base, same as frames) as seen from the input of
1337 the link. The status change is considered happening after the frames
1340 It is set by the source filter using ff_avfilter_link_set_in_status().
1344 Status of the link as seen from the output of the link. The status
1345 change is considered having already happened.
1347 It is set by the destination filter using
1348 ff_avfilter_link_set_out_status().
1350 Filters are activated according to the ready field, set using the
1351 ff_filter_set_ready(). Eventually, a priority queue will be used.
1352 ff_filter_set_ready() is called whenever anything could cause progress to
1353 be possible. Marking a filter ready when it is not is not a problem,
1354 except for the small overhead it causes.
1356 Conditions that cause a filter to be marked ready are:
1358 - frames added on an input link;
1360 - changes in the input or output status of an input link;
1362 - requests for a frame on an output link;
1364 - after any actual processing using the legacy methods (filter_frame(),
1365 and request_frame() to acknowledge status changes), to run once more
1366 and check if enough input was present for several frames.
1368 Examples of scenarios to consider:
1370 - buffersrc: activate if frame_wanted_out to notify the application;
1371 activate when the application adds a frame to push it immediately.
1373 - testsrc: activate only if frame_wanted_out to produce and push a frame.
1375 - concat (not at stitch points): can process a frame on any output.
1376 Activate if frame_wanted_out on output to forward on the corresponding
1377 input. Activate when a frame is present on input to process it
1380 - framesync: needs at least one frame on each input; extra frames on the
1381 wrong input will accumulate. When a frame is first added on one input,
1382 set frame_wanted_out<0 on it to avoid getting more (would trigger
1383 testsrc) and frame_wanted_out>0 on the other to allow processing it.
1385 Activation of old filters:
1387 In order to activate a filter implementing the legacy filter_frame() and
1388 request_frame() methods, perform the first possible of the following
1391 - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
1392 frame and call filter_frame().
1394 Rationale: filter frames as soon as possible instead of leaving them
1395 queued; frame_wanted_out < 0 is not possible since the old API does not
1396 set it nor provides any similar feedback; frame_wanted_out > 0 happens
1397 when min_samples > 0 and there are not enough samples queued.
1399 - If an input has status_in set but not status_out, try to call
1400 request_frame() on one of the outputs in the hope that it will trigger
1401 request_frame() on the input with status_in and acknowledge it. This is
1402 awkward and fragile, filters with several inputs or outputs should be
1403 updated to direct activation as soon as possible.
1405 - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
1408 Rationale: checking frame_blocked_in is necessary to avoid requesting
1409 repeatedly on a blocked input if another is not blocked (example:
1410 [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
1412 TODO: respect needs_fifo and remove auto-inserted fifos.
1416 int ff_filter_activate(AVFilterContext *filter)
1420 /* Generic timeline support is not yet implemented but should be easy */
1421 av_assert1(!(filter->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC &&
1422 filter->filter->activate));
1424 ret = filter->filter->activate ? filter->filter->activate(filter) :
1425 ff_filter_activate_default(filter);
1426 if (ret == FFERROR_NOT_READY)
1431 int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
1433 *rpts = link->current_pts;
1434 if (ff_framequeue_queued_frames(&link->fifo))
1435 return *rstatus = 0;
1436 if (link->status_out)
1437 return *rstatus = link->status_out;
1438 if (!link->status_in)
1439 return *rstatus = 0;
1440 *rstatus = link->status_out = link->status_in;
1441 ff_update_link_current_pts(link, link->status_in_pts);
1442 *rpts = link->current_pts;
1446 size_t ff_inlink_queued_frames(AVFilterLink *link)
1448 return ff_framequeue_queued_frames(&link->fifo);
1451 int ff_inlink_check_available_frame(AVFilterLink *link)
1453 return ff_framequeue_queued_frames(&link->fifo) > 0;
1456 int ff_inlink_queued_samples(AVFilterLink *link)
1458 return ff_framequeue_queued_samples(&link->fifo);
1461 int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
1463 uint64_t samples = ff_framequeue_queued_samples(&link->fifo);
1465 return samples >= min || (link->status_in && samples);
1468 static void consume_update(AVFilterLink *link, const AVFrame *frame)
1470 ff_update_link_current_pts(link, frame->pts);
1471 ff_inlink_process_commands(link, frame);
1472 link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1473 link->frame_count_out++;
1476 int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
1481 if (!ff_inlink_check_available_frame(link))
1484 if (link->fifo.samples_skipped) {
1485 frame = ff_framequeue_peek(&link->fifo, 0);
1486 return ff_inlink_consume_samples(link, frame->nb_samples, frame->nb_samples, rframe);
1489 frame = ff_framequeue_take(&link->fifo);
1490 consume_update(link, frame);
1495 int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max,
1503 if (!ff_inlink_check_available_samples(link, min))
1505 if (link->status_in)
1506 min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
1507 ret = take_samples(link, min, max, &frame);
1510 consume_update(link, frame);
1515 AVFrame *ff_inlink_peek_frame(AVFilterLink *link, size_t idx)
1517 return ff_framequeue_peek(&link->fifo, idx);
1520 int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
1522 AVFrame *frame = *rframe;
1526 if (av_frame_is_writable(frame))
1528 av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
1530 switch (link->type) {
1531 case AVMEDIA_TYPE_VIDEO:
1532 out = ff_get_video_buffer(link, link->w, link->h);
1534 case AVMEDIA_TYPE_AUDIO:
1535 out = ff_get_audio_buffer(link, frame->nb_samples);
1538 return AVERROR(EINVAL);
1541 return AVERROR(ENOMEM);
1543 ret = av_frame_copy_props(out, frame);
1545 av_frame_free(&out);
1549 switch (link->type) {
1550 case AVMEDIA_TYPE_VIDEO:
1551 av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
1552 frame->format, frame->width, frame->height);
1554 case AVMEDIA_TYPE_AUDIO:
1555 av_samples_copy(out->extended_data, frame->extended_data,
1556 0, 0, frame->nb_samples,
1561 av_assert0(!"reached");
1564 av_frame_free(&frame);
1569 int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
1571 AVFilterCommand *cmd = link->dst->command_queue;
1573 while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
1574 av_log(link->dst, AV_LOG_DEBUG,
1575 "Processing command time:%f command:%s arg:%s\n",
1576 cmd->time, cmd->command, cmd->arg);
1577 avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
1578 ff_command_queue_pop(link->dst);
1579 cmd= link->dst->command_queue;
1584 int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
1586 AVFilterContext *dstctx = link->dst;
1587 int64_t pts = frame->pts;
1588 int64_t pos = frame->pkt_pos;
1590 if (!dstctx->enable_str)
1593 dstctx->var_values[VAR_N] = link->frame_count_out;
1594 dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
1595 dstctx->var_values[VAR_W] = link->w;
1596 dstctx->var_values[VAR_H] = link->h;
1597 dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1599 return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5;
1602 void ff_inlink_request_frame(AVFilterLink *link)
1604 av_assert1(!link->status_in);
1605 av_assert1(!link->status_out);
1606 link->frame_wanted_out = 1;
1607 ff_filter_set_ready(link->src, 100);
1610 void ff_inlink_set_status(AVFilterLink *link, int status)
1612 if (link->status_out)
1614 link->frame_wanted_out = 0;
1615 link->frame_blocked_in = 0;
1616 ff_avfilter_link_set_out_status(link, status, AV_NOPTS_VALUE);
1617 while (ff_framequeue_queued_frames(&link->fifo)) {
1618 AVFrame *frame = ff_framequeue_take(&link->fifo);
1619 av_frame_free(&frame);
1621 if (!link->status_in)
1622 link->status_in = status;
1625 int ff_outlink_get_status(AVFilterLink *link)
1627 return link->status_in;
1630 const AVClass *avfilter_get_class(void)
1632 return &avfilter_class;
1635 int ff_filter_init_hw_frames(AVFilterContext *avctx, AVFilterLink *link,
1636 int default_pool_size)
1638 AVHWFramesContext *frames;
1640 // Must already be set by caller.
1641 av_assert0(link->hw_frames_ctx);
1643 frames = (AVHWFramesContext*)link->hw_frames_ctx->data;
1645 if (frames->initial_pool_size == 0) {
1646 // Dynamic allocation is necessarily supported.
1647 } else if (avctx->extra_hw_frames >= 0) {
1648 frames->initial_pool_size += avctx->extra_hw_frames;
1650 frames->initial_pool_size = default_pool_size;