3 * Copyright (c) 2007 Bobby Bingham
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/avassert.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/buffer.h"
25 #include "libavutil/channel_layout.h"
26 #include "libavutil/common.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/hwcontext.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/rational.h"
34 #include "libavutil/samplefmt.h"
35 #include "libavutil/thread.h"
37 #define FF_INTERNAL_FIELDS 1
38 #include "framequeue.h"
46 #include "libavutil/ffversion.h"
47 const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
49 void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
51 av_unused char buf[16];
53 "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
54 ref, ref->buf, ref->data[0],
55 ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
56 ref->pts, ref->pkt_pos);
59 ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
60 ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
61 ref->width, ref->height,
62 !ref->interlaced_frame ? 'P' : /* Progressive */
63 ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
65 av_get_picture_type_char(ref->pict_type));
67 if (ref->nb_samples) {
68 ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
74 ff_tlog(ctx, "]%s", end ? "\n" : "");
77 unsigned avfilter_version(void)
79 av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
80 return LIBAVFILTER_VERSION_INT;
83 const char *avfilter_configuration(void)
85 return FFMPEG_CONFIGURATION;
88 const char *avfilter_license(void)
90 #define LICENSE_PREFIX "libavfilter license: "
91 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
94 void ff_command_queue_pop(AVFilterContext *filter)
96 AVFilterCommand *c= filter->command_queue;
98 av_freep(&c->command);
99 filter->command_queue= c->next;
103 int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
104 AVFilterPad **pads, AVFilterLink ***links,
107 AVFilterLink **newlinks;
108 AVFilterPad *newpads;
111 idx = FFMIN(idx, *count);
113 newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
114 newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
119 if (!newpads || !newlinks)
120 return AVERROR(ENOMEM);
122 memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
123 memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
124 memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
125 (*links)[idx] = NULL;
128 for (i = idx + 1; i < *count; i++)
130 (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
135 int avfilter_link(AVFilterContext *src, unsigned srcpad,
136 AVFilterContext *dst, unsigned dstpad)
140 av_assert0(src->graph);
141 av_assert0(dst->graph);
142 av_assert0(src->graph == dst->graph);
144 if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
145 src->outputs[srcpad] || dst->inputs[dstpad])
146 return AVERROR(EINVAL);
148 if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
149 av_log(src, AV_LOG_ERROR,
150 "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
151 src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
152 dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
153 return AVERROR(EINVAL);
156 link = av_mallocz(sizeof(*link));
158 return AVERROR(ENOMEM);
160 src->outputs[srcpad] = dst->inputs[dstpad] = link;
164 link->srcpad = &src->output_pads[srcpad];
165 link->dstpad = &dst->input_pads[dstpad];
166 link->type = src->output_pads[srcpad].type;
167 av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
169 ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
174 void avfilter_link_free(AVFilterLink **link)
179 av_frame_free(&(*link)->partial_buf);
180 ff_framequeue_free(&(*link)->fifo);
181 ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
186 #if FF_API_FILTER_GET_SET
187 int avfilter_link_get_channels(AVFilterLink *link)
189 return link->channels;
193 void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
195 filter->ready = FFMAX(filter->ready, priority);
199 * Clear frame_blocked_in on all outputs.
200 * This is necessary whenever something changes on input.
202 static void filter_unblock(AVFilterContext *filter)
206 for (i = 0; i < filter->nb_outputs; i++)
207 filter->outputs[i]->frame_blocked_in = 0;
211 void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
213 if (link->status_in == status)
215 av_assert0(!link->status_in);
216 link->status_in = status;
217 link->status_in_pts = pts;
218 link->frame_wanted_out = 0;
219 link->frame_blocked_in = 0;
220 filter_unblock(link->dst);
221 ff_filter_set_ready(link->dst, 200);
224 void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
226 av_assert0(!link->frame_wanted_out);
227 av_assert0(!link->status_out);
228 link->status_out = status;
229 if (pts != AV_NOPTS_VALUE)
230 ff_update_link_current_pts(link, pts);
231 filter_unblock(link->dst);
232 ff_filter_set_ready(link->src, 200);
235 void avfilter_link_set_closed(AVFilterLink *link, int closed)
237 ff_avfilter_link_set_out_status(link, closed ? AVERROR_EOF : 0, AV_NOPTS_VALUE);
240 int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
241 unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
244 unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
246 av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
247 "between the filter '%s' and the filter '%s'\n",
248 filt->name, link->src->name, link->dst->name);
250 link->dst->inputs[dstpad_idx] = NULL;
251 if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
252 /* failed to link output filter to new filter */
253 link->dst->inputs[dstpad_idx] = link;
257 /* re-hookup the link to the new destination filter we inserted */
259 link->dstpad = &filt->input_pads[filt_srcpad_idx];
260 filt->inputs[filt_srcpad_idx] = link;
262 /* if any information on supported media formats already exists on the
263 * link, we need to preserve that */
264 if (link->out_formats)
265 ff_formats_changeref(&link->out_formats,
266 &filt->outputs[filt_dstpad_idx]->out_formats);
267 if (link->out_samplerates)
268 ff_formats_changeref(&link->out_samplerates,
269 &filt->outputs[filt_dstpad_idx]->out_samplerates);
270 if (link->out_channel_layouts)
271 ff_channel_layouts_changeref(&link->out_channel_layouts,
272 &filt->outputs[filt_dstpad_idx]->out_channel_layouts);
277 int avfilter_config_links(AVFilterContext *filter)
279 int (*config_link)(AVFilterLink *);
283 for (i = 0; i < filter->nb_inputs; i ++) {
284 AVFilterLink *link = filter->inputs[i];
285 AVFilterLink *inlink;
288 if (!link->src || !link->dst) {
289 av_log(filter, AV_LOG_ERROR,
290 "Not all input and output are properly linked (%d).\n", i);
291 return AVERROR(EINVAL);
294 inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
296 link->current_pts_us = AV_NOPTS_VALUE;
298 switch (link->init_state) {
301 case AVLINK_STARTINIT:
302 av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
305 link->init_state = AVLINK_STARTINIT;
307 if ((ret = avfilter_config_links(link->src)) < 0)
310 if (!(config_link = link->srcpad->config_props)) {
311 if (link->src->nb_inputs != 1) {
312 av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
313 "with more than one input "
314 "must set config_props() "
315 "callbacks on all outputs\n");
316 return AVERROR(EINVAL);
318 } else if ((ret = config_link(link)) < 0) {
319 av_log(link->src, AV_LOG_ERROR,
320 "Failed to configure output pad on %s\n",
325 switch (link->type) {
326 case AVMEDIA_TYPE_VIDEO:
327 if (!link->time_base.num && !link->time_base.den)
328 link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
330 if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
331 link->sample_aspect_ratio = inlink ?
332 inlink->sample_aspect_ratio : (AVRational){1,1};
335 if (!link->frame_rate.num && !link->frame_rate.den)
336 link->frame_rate = inlink->frame_rate;
341 } else if (!link->w || !link->h) {
342 av_log(link->src, AV_LOG_ERROR,
343 "Video source filters must set their output link's "
344 "width and height\n");
345 return AVERROR(EINVAL);
349 case AVMEDIA_TYPE_AUDIO:
351 if (!link->time_base.num && !link->time_base.den)
352 link->time_base = inlink->time_base;
355 if (!link->time_base.num && !link->time_base.den)
356 link->time_base = (AVRational) {1, link->sample_rate};
359 if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
360 !(link->src->filter->flags_internal & FF_FILTER_FLAG_HWFRAME_AWARE)) {
361 av_assert0(!link->hw_frames_ctx &&
362 "should not be set by non-hwframe-aware filter");
363 link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
364 if (!link->hw_frames_ctx)
365 return AVERROR(ENOMEM);
368 if ((config_link = link->dstpad->config_props))
369 if ((ret = config_link(link)) < 0) {
370 av_log(link->dst, AV_LOG_ERROR,
371 "Failed to configure input pad on %s\n",
376 link->init_state = AVLINK_INIT;
383 void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
385 if (link->type == AVMEDIA_TYPE_VIDEO) {
387 "link[%p s:%dx%d fmt:%s %s->%s]%s",
388 link, link->w, link->h,
389 av_get_pix_fmt_name(link->format),
390 link->src ? link->src->filter->name : "",
391 link->dst ? link->dst->filter->name : "",
395 av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
398 "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
399 link, (int)link->sample_rate, buf,
400 av_get_sample_fmt_name(link->format),
401 link->src ? link->src->filter->name : "",
402 link->dst ? link->dst->filter->name : "",
407 int ff_request_frame(AVFilterLink *link)
409 FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
411 av_assert1(!link->dst->filter->activate);
412 if (link->status_out)
413 return link->status_out;
414 if (link->status_in) {
415 if (ff_framequeue_queued_frames(&link->fifo)) {
416 av_assert1(!link->frame_wanted_out);
417 av_assert1(link->dst->ready >= 300);
420 /* Acknowledge status change. Filters using ff_request_frame() will
421 handle the change automatically. Filters can also check the
422 status directly but none do yet. */
423 ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
424 return link->status_out;
427 link->frame_wanted_out = 1;
428 ff_filter_set_ready(link->src, 100);
432 static int64_t guess_status_pts(AVFilterContext *ctx, int status, AVRational link_time_base)
435 int64_t r = INT64_MAX;
437 for (i = 0; i < ctx->nb_inputs; i++)
438 if (ctx->inputs[i]->status_out == status)
439 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->current_pts, ctx->inputs[i]->time_base, link_time_base));
442 av_log(ctx, AV_LOG_WARNING, "EOF timestamp not reliable\n");
443 for (i = 0; i < ctx->nb_inputs; i++)
444 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->status_in_pts, ctx->inputs[i]->time_base, link_time_base));
447 return AV_NOPTS_VALUE;
450 static int ff_request_frame_to_filter(AVFilterLink *link)
454 FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
455 /* Assume the filter is blocked, let the method clear it if not */
456 link->frame_blocked_in = 1;
457 if (link->srcpad->request_frame)
458 ret = link->srcpad->request_frame(link);
459 else if (link->src->inputs[0])
460 ret = ff_request_frame(link->src->inputs[0]);
462 if (ret != AVERROR(EAGAIN) && ret != link->status_in)
463 ff_avfilter_link_set_in_status(link, ret, guess_status_pts(link->src, ret, link->time_base));
464 if (ret == AVERROR_EOF)
470 int ff_poll_frame(AVFilterLink *link)
472 int i, min = INT_MAX;
474 if (link->srcpad->poll_frame)
475 return link->srcpad->poll_frame(link);
477 for (i = 0; i < link->src->nb_inputs; i++) {
479 if (!link->src->inputs[i])
480 return AVERROR(EINVAL);
481 val = ff_poll_frame(link->src->inputs[i]);
482 min = FFMIN(min, val);
488 static const char *const var_names[] = {
506 static int set_enable_expr(AVFilterContext *ctx, const char *expr)
510 AVExpr *old = ctx->enable;
512 if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
513 av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
514 "with filter '%s'\n", ctx->filter->name);
515 return AVERROR_PATCHWELCOME;
518 expr_dup = av_strdup(expr);
520 return AVERROR(ENOMEM);
522 if (!ctx->var_values) {
523 ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
524 if (!ctx->var_values) {
526 return AVERROR(ENOMEM);
530 ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
531 NULL, NULL, NULL, NULL, 0, ctx->priv);
533 av_log(ctx->priv, AV_LOG_ERROR,
534 "Error when evaluating the expression '%s' for enable\n",
541 av_free(ctx->enable_str);
542 ctx->enable_str = expr_dup;
546 void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
548 if (pts == AV_NOPTS_VALUE)
550 link->current_pts = pts;
551 link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
552 /* TODO use duration */
553 if (link->graph && link->age_index >= 0)
554 ff_avfilter_graph_update_heap(link->graph, link);
557 int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
559 if(!strcmp(cmd, "ping")){
560 char local_res[256] = {0};
564 res_len = sizeof(local_res);
566 av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
567 if (res == local_res)
568 av_log(filter, AV_LOG_INFO, "%s", res);
570 }else if(!strcmp(cmd, "enable")) {
571 return set_enable_expr(filter, arg);
572 }else if(filter->filter->process_command) {
573 return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
575 return AVERROR(ENOSYS);
578 int avfilter_pad_count(const AVFilterPad *pads)
585 for (count = 0; pads->name; count++)
590 static const char *default_filter_name(void *filter_ctx)
592 AVFilterContext *ctx = filter_ctx;
593 return ctx->name ? ctx->name : ctx->filter->name;
596 static void *filter_child_next(void *obj, void *prev)
598 AVFilterContext *ctx = obj;
599 if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
604 static const AVClass *filter_child_class_next(const AVClass *prev)
607 const AVFilter *f = NULL;
609 /* find the filter that corresponds to prev */
610 while (prev && (f = av_filter_iterate(&opaque)))
611 if (f->priv_class == prev)
614 /* could not find filter corresponding to prev */
618 /* find next filter with specific options */
619 while ((f = av_filter_iterate(&opaque)))
621 return f->priv_class;
626 #define OFFSET(x) offsetof(AVFilterContext, x)
627 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
628 static const AVOption avfilter_options[] = {
629 { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
630 { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
631 { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .flags = FLAGS, .unit = "thread_type" },
632 { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
633 { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
634 { .i64 = 0 }, 0, INT_MAX, FLAGS },
635 { "extra_hw_frames", "Number of extra hardware frames to allocate for the user",
636 OFFSET(extra_hw_frames), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
640 static const AVClass avfilter_class = {
641 .class_name = "AVFilter",
642 .item_name = default_filter_name,
643 .version = LIBAVUTIL_VERSION_INT,
644 .category = AV_CLASS_CATEGORY_FILTER,
645 .child_next = filter_child_next,
646 .child_class_next = filter_child_class_next,
647 .option = avfilter_options,
650 static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
651 int *ret, int nb_jobs)
655 for (i = 0; i < nb_jobs; i++) {
656 int r = func(ctx, arg, i, nb_jobs);
663 AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
665 AVFilterContext *ret;
671 ret = av_mallocz(sizeof(AVFilterContext));
675 ret->av_class = &avfilter_class;
676 ret->filter = filter;
677 ret->name = inst_name ? av_strdup(inst_name) : NULL;
678 if (filter->priv_size) {
679 ret->priv = av_mallocz(filter->priv_size);
683 if (filter->preinit) {
684 if (filter->preinit(ret) < 0)
689 av_opt_set_defaults(ret);
690 if (filter->priv_class) {
691 *(const AVClass**)ret->priv = filter->priv_class;
692 av_opt_set_defaults(ret->priv);
695 ret->internal = av_mallocz(sizeof(*ret->internal));
698 ret->internal->execute = default_execute;
700 ret->nb_inputs = avfilter_pad_count(filter->inputs);
701 if (ret->nb_inputs ) {
702 ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
703 if (!ret->input_pads)
705 memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
706 ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
711 ret->nb_outputs = avfilter_pad_count(filter->outputs);
712 if (ret->nb_outputs) {
713 ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
714 if (!ret->output_pads)
716 memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
717 ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
727 av_freep(&ret->inputs);
728 av_freep(&ret->input_pads);
730 av_freep(&ret->outputs);
731 av_freep(&ret->output_pads);
733 av_freep(&ret->priv);
734 av_freep(&ret->internal);
739 static void free_link(AVFilterLink *link)
745 link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
747 link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
749 av_buffer_unref(&link->hw_frames_ctx);
751 ff_formats_unref(&link->in_formats);
752 ff_formats_unref(&link->out_formats);
753 ff_formats_unref(&link->in_samplerates);
754 ff_formats_unref(&link->out_samplerates);
755 ff_channel_layouts_unref(&link->in_channel_layouts);
756 ff_channel_layouts_unref(&link->out_channel_layouts);
757 avfilter_link_free(&link);
760 void avfilter_free(AVFilterContext *filter)
768 ff_filter_graph_remove_filter(filter->graph, filter);
770 if (filter->filter->uninit)
771 filter->filter->uninit(filter);
773 for (i = 0; i < filter->nb_inputs; i++) {
774 free_link(filter->inputs[i]);
776 for (i = 0; i < filter->nb_outputs; i++) {
777 free_link(filter->outputs[i]);
780 if (filter->filter->priv_class)
781 av_opt_free(filter->priv);
783 av_buffer_unref(&filter->hw_device_ctx);
785 av_freep(&filter->name);
786 av_freep(&filter->input_pads);
787 av_freep(&filter->output_pads);
788 av_freep(&filter->inputs);
789 av_freep(&filter->outputs);
790 av_freep(&filter->priv);
791 while(filter->command_queue){
792 ff_command_queue_pop(filter);
795 av_expr_free(filter->enable);
796 filter->enable = NULL;
797 av_freep(&filter->var_values);
798 av_freep(&filter->internal);
802 int ff_filter_get_nb_threads(AVFilterContext *ctx)
804 if (ctx->nb_threads > 0)
805 return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
806 return ctx->graph->nb_threads;
809 static int process_options(AVFilterContext *ctx, AVDictionary **options,
812 const AVOption *o = NULL;
814 char *av_uninit(parsed_key), *av_uninit(value);
822 const char *shorthand = NULL;
824 o = av_opt_next(ctx->priv, o);
826 if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
832 ret = av_opt_get_key_value(&args, "=", ":",
833 shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
834 &parsed_key, &value);
836 if (ret == AVERROR(EINVAL))
837 av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
839 av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
847 while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
852 av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
854 if (av_opt_find(ctx, key, NULL, 0, 0)) {
855 ret = av_opt_set(ctx, key, value, 0);
862 av_dict_set(options, key, value, 0);
863 if ((ret = av_opt_set(ctx->priv, key, value, AV_OPT_SEARCH_CHILDREN)) < 0) {
864 if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
865 if (ret == AVERROR_OPTION_NOT_FOUND)
866 av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
879 if (ctx->enable_str) {
880 ret = set_enable_expr(ctx, ctx->enable_str);
887 int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
891 ret = av_opt_set_dict(ctx, options);
893 av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
897 if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
898 ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
899 ctx->graph->internal->thread_execute) {
900 ctx->thread_type = AVFILTER_THREAD_SLICE;
901 ctx->internal->execute = ctx->graph->internal->thread_execute;
903 ctx->thread_type = 0;
906 if (ctx->filter->priv_class) {
907 ret = av_opt_set_dict2(ctx->priv, options, AV_OPT_SEARCH_CHILDREN);
909 av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
914 if (ctx->filter->init_opaque)
915 ret = ctx->filter->init_opaque(ctx, NULL);
916 else if (ctx->filter->init)
917 ret = ctx->filter->init(ctx);
918 else if (ctx->filter->init_dict)
919 ret = ctx->filter->init_dict(ctx, options);
924 int avfilter_init_str(AVFilterContext *filter, const char *args)
926 AVDictionary *options = NULL;
927 AVDictionaryEntry *e;
931 if (!filter->filter->priv_class) {
932 av_log(filter, AV_LOG_ERROR, "This filter does not take any "
933 "options, but options were provided: %s.\n", args);
934 return AVERROR(EINVAL);
937 #if FF_API_OLD_FILTER_OPTS_ERROR
938 if ( !strcmp(filter->filter->name, "format") ||
939 !strcmp(filter->filter->name, "noformat") ||
940 !strcmp(filter->filter->name, "frei0r") ||
941 !strcmp(filter->filter->name, "frei0r_src") ||
942 !strcmp(filter->filter->name, "ocv") ||
943 !strcmp(filter->filter->name, "pan") ||
944 !strcmp(filter->filter->name, "pp") ||
945 !strcmp(filter->filter->name, "aevalsrc")) {
946 /* a hack for compatibility with the old syntax
947 * replace colons with |s */
948 char *copy = av_strdup(args);
950 int nb_leading = 0; // number of leading colons to skip
954 ret = AVERROR(ENOMEM);
958 if (!strcmp(filter->filter->name, "frei0r") ||
959 !strcmp(filter->filter->name, "ocv"))
961 else if (!strcmp(filter->filter->name, "frei0r_src"))
964 while (nb_leading--) {
967 p = copy + strlen(copy);
973 deprecated = strchr(p, ':') != NULL;
975 if (!strcmp(filter->filter->name, "aevalsrc")) {
977 while ((p = strchr(p, ':')) && p[1] != ':') {
978 const char *epos = strchr(p + 1, '=');
979 const char *spos = strchr(p + 1, ':');
980 const int next_token_is_opt = epos && (!spos || epos < spos);
981 if (next_token_is_opt) {
985 /* next token does not contain a '=', assume a channel expression */
989 if (p && *p == ':') { // double sep '::' found
991 memmove(p, p + 1, strlen(p));
994 while ((p = strchr(p, ':')))
998 av_log(filter, AV_LOG_ERROR, "This syntax is deprecated. Use "
999 "'|' to separate the list items ('%s' instead of '%s')\n",
1001 ret = AVERROR(EINVAL);
1003 ret = process_options(filter, &options, copy);
1012 ret = process_options(filter, &options, args);
1018 ret = avfilter_init_dict(filter, &options);
1022 if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1023 av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
1024 ret = AVERROR_OPTION_NOT_FOUND;
1029 av_dict_free(&options);
1034 const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
1036 return pads[pad_idx].name;
1039 enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
1041 return pads[pad_idx].type;
1044 static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
1046 return ff_filter_frame(link->dst->outputs[0], frame);
1049 static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
1051 int (*filter_frame)(AVFilterLink *, AVFrame *);
1052 AVFilterContext *dstctx = link->dst;
1053 AVFilterPad *dst = link->dstpad;
1056 if (!(filter_frame = dst->filter_frame))
1057 filter_frame = default_filter_frame;
1059 if (dst->needs_writable) {
1060 ret = ff_inlink_make_frame_writable(link, &frame);
1065 ff_inlink_process_commands(link, frame);
1066 dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1068 if (dstctx->is_disabled &&
1069 (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
1070 filter_frame = default_filter_frame;
1071 ret = filter_frame(link, frame);
1072 link->frame_count_out++;
1076 av_frame_free(&frame);
1080 int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
1083 FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
1085 /* Consistency checks */
1086 if (link->type == AVMEDIA_TYPE_VIDEO) {
1087 if (strcmp(link->dst->filter->name, "buffersink") &&
1088 strcmp(link->dst->filter->name, "format") &&
1089 strcmp(link->dst->filter->name, "idet") &&
1090 strcmp(link->dst->filter->name, "null") &&
1091 strcmp(link->dst->filter->name, "scale")) {
1092 av_assert1(frame->format == link->format);
1093 av_assert1(frame->width == link->w);
1094 av_assert1(frame->height == link->h);
1097 if (frame->format != link->format) {
1098 av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
1101 if (frame->channels != link->channels) {
1102 av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
1105 if (frame->channel_layout != link->channel_layout) {
1106 av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
1109 if (frame->sample_rate != link->sample_rate) {
1110 av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
1115 link->frame_blocked_in = link->frame_wanted_out = 0;
1116 link->frame_count_in++;
1117 filter_unblock(link->dst);
1118 ret = ff_framequeue_add(&link->fifo, frame);
1120 av_frame_free(&frame);
1123 ff_filter_set_ready(link->dst, 300);
1127 av_frame_free(&frame);
1128 return AVERROR_PATCHWELCOME;
1131 static int samples_ready(AVFilterLink *link, unsigned min)
1133 return ff_framequeue_queued_frames(&link->fifo) &&
1134 (ff_framequeue_queued_samples(&link->fifo) >= min ||
1138 static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
1141 AVFrame *frame0, *frame, *buf;
1142 unsigned nb_samples, nb_frames, i, p;
1145 /* Note: this function relies on no format changes and must only be
1146 called with enough samples. */
1147 av_assert1(samples_ready(link, link->min_samples));
1148 frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
1149 if (!link->fifo.samples_skipped && frame->nb_samples >= min && frame->nb_samples <= max) {
1150 *rframe = ff_framequeue_take(&link->fifo);
1156 if (nb_samples + frame->nb_samples > max) {
1157 if (nb_samples < min)
1161 nb_samples += frame->nb_samples;
1163 if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
1165 frame = ff_framequeue_peek(&link->fifo, nb_frames);
1168 buf = ff_get_audio_buffer(link, nb_samples);
1170 return AVERROR(ENOMEM);
1171 ret = av_frame_copy_props(buf, frame0);
1173 av_frame_free(&buf);
1176 buf->pts = frame0->pts;
1179 for (i = 0; i < nb_frames; i++) {
1180 frame = ff_framequeue_take(&link->fifo);
1181 av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
1182 frame->nb_samples, link->channels, link->format);
1183 p += frame->nb_samples;
1184 av_frame_free(&frame);
1186 if (p < nb_samples) {
1187 unsigned n = nb_samples - p;
1188 frame = ff_framequeue_peek(&link->fifo, 0);
1189 av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
1190 link->channels, link->format);
1191 ff_framequeue_skip_samples(&link->fifo, n, link->time_base);
1198 static int ff_filter_frame_to_filter(AVFilterLink *link)
1200 AVFrame *frame = NULL;
1201 AVFilterContext *dst = link->dst;
1204 av_assert1(ff_framequeue_queued_frames(&link->fifo));
1205 ret = link->min_samples ?
1206 ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
1207 ff_inlink_consume_frame(link, &frame);
1213 /* The filter will soon have received a new frame, that may allow it to
1214 produce one or more: unblock its outputs. */
1215 filter_unblock(dst);
1216 /* AVFilterPad.filter_frame() expect frame_count_out to have the value
1217 before the frame; ff_filter_frame_framed() will re-increment it. */
1218 link->frame_count_out--;
1219 ret = ff_filter_frame_framed(link, frame);
1220 if (ret < 0 && ret != link->status_out) {
1221 ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
1223 /* Run once again, to see if several frames were available, or if
1224 the input status has also changed, or any other reason. */
1225 ff_filter_set_ready(dst, 300);
1230 static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
1232 unsigned out = 0, progress = 0;
1235 av_assert0(!in->status_out);
1236 if (!filter->nb_outputs) {
1237 /* not necessary with the current API and sinks */
1240 while (!in->status_out) {
1241 if (!filter->outputs[out]->status_in) {
1243 ret = ff_request_frame_to_filter(filter->outputs[out]);
1247 if (++out == filter->nb_outputs) {
1249 /* Every output already closed: input no longer interesting
1250 (example: overlay in shortest mode, other input closed). */
1251 ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
1258 ff_filter_set_ready(filter, 200);
1262 static int ff_filter_activate_default(AVFilterContext *filter)
1266 for (i = 0; i < filter->nb_inputs; i++) {
1267 if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) {
1268 return ff_filter_frame_to_filter(filter->inputs[i]);
1271 for (i = 0; i < filter->nb_inputs; i++) {
1272 if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
1273 av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
1274 return forward_status_change(filter, filter->inputs[i]);
1277 for (i = 0; i < filter->nb_outputs; i++) {
1278 if (filter->outputs[i]->frame_wanted_out &&
1279 !filter->outputs[i]->frame_blocked_in) {
1280 return ff_request_frame_to_filter(filter->outputs[i]);
1283 return FFERROR_NOT_READY;
1287 Filter scheduling and activation
1289 When a filter is activated, it must:
1290 - if possible, output a frame;
1291 - else, if relevant, forward the input status change;
1292 - else, check outputs for wanted frames and forward the requests.
1294 The following AVFilterLink fields are used for activation:
1298 This field indicates if a frame is needed on this input of the
1299 destination filter. A positive value indicates that a frame is needed
1300 to process queued frames or internal data or to satisfy the
1301 application; a zero value indicates that a frame is not especially
1302 needed but could be processed anyway; a negative value indicates that a
1303 frame would just be queued.
1305 It is set by filters using ff_request_frame() or ff_request_no_frame(),
1306 when requested by the application through a specific API or when it is
1307 set on one of the outputs.
1309 It is cleared when a frame is sent from the source using
1312 It is also cleared when a status change is sent from the source using
1313 ff_avfilter_link_set_in_status().
1317 This field means that the source filter can not generate a frame as is.
1318 Its goal is to avoid repeatedly calling the request_frame() method on
1321 It is set by the framework on all outputs of a filter before activating it.
1323 It is automatically cleared by ff_filter_frame().
1325 It is also automatically cleared by ff_avfilter_link_set_in_status().
1327 It is also cleared on all outputs (using filter_unblock()) when
1328 something happens on an input: processing a frame or changing the
1333 Contains the frames queued on a filter input. If it contains frames and
1334 frame_wanted_out is not set, then the filter can be activated. If that
1335 result in the filter not able to use these frames, the filter must set
1336 frame_wanted_out to ask for more frames.
1338 - status_in and status_in_pts:
1340 Status (EOF or error code) of the link and timestamp of the status
1341 change (in link time base, same as frames) as seen from the input of
1342 the link. The status change is considered happening after the frames
1345 It is set by the source filter using ff_avfilter_link_set_in_status().
1349 Status of the link as seen from the output of the link. The status
1350 change is considered having already happened.
1352 It is set by the destination filter using
1353 ff_avfilter_link_set_out_status().
1355 Filters are activated according to the ready field, set using the
1356 ff_filter_set_ready(). Eventually, a priority queue will be used.
1357 ff_filter_set_ready() is called whenever anything could cause progress to
1358 be possible. Marking a filter ready when it is not is not a problem,
1359 except for the small overhead it causes.
1361 Conditions that cause a filter to be marked ready are:
1363 - frames added on an input link;
1365 - changes in the input or output status of an input link;
1367 - requests for a frame on an output link;
1369 - after any actual processing using the legacy methods (filter_frame(),
1370 and request_frame() to acknowledge status changes), to run once more
1371 and check if enough input was present for several frames.
1373 Examples of scenarios to consider:
1375 - buffersrc: activate if frame_wanted_out to notify the application;
1376 activate when the application adds a frame to push it immediately.
1378 - testsrc: activate only if frame_wanted_out to produce and push a frame.
1380 - concat (not at stitch points): can process a frame on any output.
1381 Activate if frame_wanted_out on output to forward on the corresponding
1382 input. Activate when a frame is present on input to process it
1385 - framesync: needs at least one frame on each input; extra frames on the
1386 wrong input will accumulate. When a frame is first added on one input,
1387 set frame_wanted_out<0 on it to avoid getting more (would trigger
1388 testsrc) and frame_wanted_out>0 on the other to allow processing it.
1390 Activation of old filters:
1392 In order to activate a filter implementing the legacy filter_frame() and
1393 request_frame() methods, perform the first possible of the following
1396 - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
1397 frame and call filter_frame().
1399 Rationale: filter frames as soon as possible instead of leaving them
1400 queued; frame_wanted_out < 0 is not possible since the old API does not
1401 set it nor provides any similar feedback; frame_wanted_out > 0 happens
1402 when min_samples > 0 and there are not enough samples queued.
1404 - If an input has status_in set but not status_out, try to call
1405 request_frame() on one of the outputs in the hope that it will trigger
1406 request_frame() on the input with status_in and acknowledge it. This is
1407 awkward and fragile, filters with several inputs or outputs should be
1408 updated to direct activation as soon as possible.
1410 - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
1413 Rationale: checking frame_blocked_in is necessary to avoid requesting
1414 repeatedly on a blocked input if another is not blocked (example:
1415 [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
1417 TODO: respect needs_fifo and remove auto-inserted fifos.
1421 int ff_filter_activate(AVFilterContext *filter)
1425 /* Generic timeline support is not yet implemented but should be easy */
1426 av_assert1(!(filter->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC &&
1427 filter->filter->activate));
1429 ret = filter->filter->activate ? filter->filter->activate(filter) :
1430 ff_filter_activate_default(filter);
1431 if (ret == FFERROR_NOT_READY)
1436 int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
1438 *rpts = link->current_pts;
1439 if (ff_framequeue_queued_frames(&link->fifo))
1440 return *rstatus = 0;
1441 if (link->status_out)
1442 return *rstatus = link->status_out;
1443 if (!link->status_in)
1444 return *rstatus = 0;
1445 *rstatus = link->status_out = link->status_in;
1446 ff_update_link_current_pts(link, link->status_in_pts);
1447 *rpts = link->current_pts;
1451 size_t ff_inlink_queued_frames(AVFilterLink *link)
1453 return ff_framequeue_queued_frames(&link->fifo);
1456 int ff_inlink_check_available_frame(AVFilterLink *link)
1458 return ff_framequeue_queued_frames(&link->fifo) > 0;
1461 int ff_inlink_queued_samples(AVFilterLink *link)
1463 return ff_framequeue_queued_samples(&link->fifo);
1466 int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
1468 uint64_t samples = ff_framequeue_queued_samples(&link->fifo);
1470 return samples >= min || (link->status_in && samples);
1473 static void consume_update(AVFilterLink *link, const AVFrame *frame)
1475 ff_update_link_current_pts(link, frame->pts);
1476 ff_inlink_process_commands(link, frame);
1477 link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1478 link->frame_count_out++;
1481 int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
1486 if (!ff_inlink_check_available_frame(link))
1489 if (link->fifo.samples_skipped) {
1490 frame = ff_framequeue_peek(&link->fifo, 0);
1491 return ff_inlink_consume_samples(link, frame->nb_samples, frame->nb_samples, rframe);
1494 frame = ff_framequeue_take(&link->fifo);
1495 consume_update(link, frame);
1500 int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max,
1508 if (!ff_inlink_check_available_samples(link, min))
1510 if (link->status_in)
1511 min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
1512 ret = take_samples(link, min, max, &frame);
1515 consume_update(link, frame);
1520 AVFrame *ff_inlink_peek_frame(AVFilterLink *link, size_t idx)
1522 return ff_framequeue_peek(&link->fifo, idx);
1525 int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
1527 AVFrame *frame = *rframe;
1531 if (av_frame_is_writable(frame))
1533 av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
1535 switch (link->type) {
1536 case AVMEDIA_TYPE_VIDEO:
1537 out = ff_get_video_buffer(link, link->w, link->h);
1539 case AVMEDIA_TYPE_AUDIO:
1540 out = ff_get_audio_buffer(link, frame->nb_samples);
1543 return AVERROR(EINVAL);
1546 return AVERROR(ENOMEM);
1548 ret = av_frame_copy_props(out, frame);
1550 av_frame_free(&out);
1554 switch (link->type) {
1555 case AVMEDIA_TYPE_VIDEO:
1556 av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
1557 frame->format, frame->width, frame->height);
1559 case AVMEDIA_TYPE_AUDIO:
1560 av_samples_copy(out->extended_data, frame->extended_data,
1561 0, 0, frame->nb_samples,
1566 av_assert0(!"reached");
1569 av_frame_free(&frame);
1574 int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
1576 AVFilterCommand *cmd = link->dst->command_queue;
1578 while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
1579 av_log(link->dst, AV_LOG_DEBUG,
1580 "Processing command time:%f command:%s arg:%s\n",
1581 cmd->time, cmd->command, cmd->arg);
1582 avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
1583 ff_command_queue_pop(link->dst);
1584 cmd= link->dst->command_queue;
1589 int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
1591 AVFilterContext *dstctx = link->dst;
1592 int64_t pts = frame->pts;
1593 int64_t pos = frame->pkt_pos;
1595 if (!dstctx->enable_str)
1598 dstctx->var_values[VAR_N] = link->frame_count_out;
1599 dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
1600 dstctx->var_values[VAR_W] = link->w;
1601 dstctx->var_values[VAR_H] = link->h;
1602 dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1604 return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5;
1607 void ff_inlink_request_frame(AVFilterLink *link)
1609 av_assert1(!link->status_in);
1610 av_assert1(!link->status_out);
1611 link->frame_wanted_out = 1;
1612 ff_filter_set_ready(link->src, 100);
1615 void ff_inlink_set_status(AVFilterLink *link, int status)
1617 if (link->status_out)
1619 link->frame_wanted_out = 0;
1620 link->frame_blocked_in = 0;
1621 ff_avfilter_link_set_out_status(link, status, AV_NOPTS_VALUE);
1622 while (ff_framequeue_queued_frames(&link->fifo)) {
1623 AVFrame *frame = ff_framequeue_take(&link->fifo);
1624 av_frame_free(&frame);
1626 if (!link->status_in)
1627 link->status_in = status;
1630 int ff_outlink_get_status(AVFilterLink *link)
1632 return link->status_in;
1635 const AVClass *avfilter_get_class(void)
1637 return &avfilter_class;
1640 int ff_filter_init_hw_frames(AVFilterContext *avctx, AVFilterLink *link,
1641 int default_pool_size)
1643 AVHWFramesContext *frames;
1645 // Must already be set by caller.
1646 av_assert0(link->hw_frames_ctx);
1648 frames = (AVHWFramesContext*)link->hw_frames_ctx->data;
1650 if (frames->initial_pool_size == 0) {
1651 // Dynamic allocation is necessarily supported.
1652 } else if (avctx->extra_hw_frames >= 0) {
1653 frames->initial_pool_size += avctx->extra_hw_frames;
1655 frames->initial_pool_size = default_pool_size;