3 * Copyright (c) 2007 Bobby Bingham
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/avassert.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/buffer.h"
25 #include "libavutil/channel_layout.h"
26 #include "libavutil/common.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/hwcontext.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/rational.h"
34 #include "libavutil/samplefmt.h"
35 #include "libavutil/thread.h"
37 #define FF_INTERNAL_FIELDS 1
38 #include "framequeue.h"
46 #include "libavutil/ffversion.h"
47 const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
49 void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
51 av_unused char buf[16];
53 "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
54 ref, ref->buf, ref->data[0],
55 ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
56 ref->pts, ref->pkt_pos);
59 ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
60 ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
61 ref->width, ref->height,
62 !ref->interlaced_frame ? 'P' : /* Progressive */
63 ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
65 av_get_picture_type_char(ref->pict_type));
67 if (ref->nb_samples) {
68 ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
74 ff_tlog(ctx, "]%s", end ? "\n" : "");
77 unsigned avfilter_version(void)
79 av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
80 return LIBAVFILTER_VERSION_INT;
83 const char *avfilter_configuration(void)
85 return FFMPEG_CONFIGURATION;
88 const char *avfilter_license(void)
90 #define LICENSE_PREFIX "libavfilter license: "
91 return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1];
94 void ff_command_queue_pop(AVFilterContext *filter)
96 AVFilterCommand *c= filter->command_queue;
98 av_freep(&c->command);
99 filter->command_queue= c->next;
103 int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
104 AVFilterPad **pads, AVFilterLink ***links,
107 AVFilterLink **newlinks;
108 AVFilterPad *newpads;
111 idx = FFMIN(idx, *count);
113 newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
114 newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
119 if (!newpads || !newlinks)
120 return AVERROR(ENOMEM);
122 memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
123 memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
124 memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
125 (*links)[idx] = NULL;
128 for (i = idx + 1; i < *count; i++)
130 (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
135 int avfilter_link(AVFilterContext *src, unsigned srcpad,
136 AVFilterContext *dst, unsigned dstpad)
140 av_assert0(src->graph);
141 av_assert0(dst->graph);
142 av_assert0(src->graph == dst->graph);
144 if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
145 src->outputs[srcpad] || dst->inputs[dstpad])
146 return AVERROR(EINVAL);
148 if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
149 av_log(src, AV_LOG_ERROR,
150 "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
151 src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
152 dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
153 return AVERROR(EINVAL);
156 link = av_mallocz(sizeof(*link));
158 return AVERROR(ENOMEM);
160 src->outputs[srcpad] = dst->inputs[dstpad] = link;
164 link->srcpad = &src->output_pads[srcpad];
165 link->dstpad = &dst->input_pads[dstpad];
166 link->type = src->output_pads[srcpad].type;
167 av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
169 ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
174 void avfilter_link_free(AVFilterLink **link)
179 av_frame_free(&(*link)->partial_buf);
180 ff_framequeue_free(&(*link)->fifo);
181 ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
186 void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
188 filter->ready = FFMAX(filter->ready, priority);
192 * Clear frame_blocked_in on all outputs.
193 * This is necessary whenever something changes on input.
195 static void filter_unblock(AVFilterContext *filter)
199 for (i = 0; i < filter->nb_outputs; i++)
200 filter->outputs[i]->frame_blocked_in = 0;
204 void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
206 if (link->status_in == status)
208 av_assert0(!link->status_in);
209 link->status_in = status;
210 link->status_in_pts = pts;
211 link->frame_wanted_out = 0;
212 link->frame_blocked_in = 0;
213 filter_unblock(link->dst);
214 ff_filter_set_ready(link->dst, 200);
217 void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
219 av_assert0(!link->frame_wanted_out);
220 av_assert0(!link->status_out);
221 link->status_out = status;
222 if (pts != AV_NOPTS_VALUE)
223 ff_update_link_current_pts(link, pts);
224 filter_unblock(link->dst);
225 ff_filter_set_ready(link->src, 200);
228 int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
229 unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
232 unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
234 av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
235 "between the filter '%s' and the filter '%s'\n",
236 filt->name, link->src->name, link->dst->name);
238 link->dst->inputs[dstpad_idx] = NULL;
239 if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
240 /* failed to link output filter to new filter */
241 link->dst->inputs[dstpad_idx] = link;
245 /* re-hookup the link to the new destination filter we inserted */
247 link->dstpad = &filt->input_pads[filt_srcpad_idx];
248 filt->inputs[filt_srcpad_idx] = link;
250 /* if any information on supported media formats already exists on the
251 * link, we need to preserve that */
252 if (link->outcfg.formats)
253 ff_formats_changeref(&link->outcfg.formats,
254 &filt->outputs[filt_dstpad_idx]->outcfg.formats);
255 if (link->outcfg.samplerates)
256 ff_formats_changeref(&link->outcfg.samplerates,
257 &filt->outputs[filt_dstpad_idx]->outcfg.samplerates);
258 if (link->outcfg.channel_layouts)
259 ff_channel_layouts_changeref(&link->outcfg.channel_layouts,
260 &filt->outputs[filt_dstpad_idx]->outcfg.channel_layouts);
265 int avfilter_config_links(AVFilterContext *filter)
267 int (*config_link)(AVFilterLink *);
271 for (i = 0; i < filter->nb_inputs; i ++) {
272 AVFilterLink *link = filter->inputs[i];
273 AVFilterLink *inlink;
276 if (!link->src || !link->dst) {
277 av_log(filter, AV_LOG_ERROR,
278 "Not all input and output are properly linked (%d).\n", i);
279 return AVERROR(EINVAL);
282 inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
284 link->current_pts_us = AV_NOPTS_VALUE;
286 switch (link->init_state) {
289 case AVLINK_STARTINIT:
290 av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
293 link->init_state = AVLINK_STARTINIT;
295 if ((ret = avfilter_config_links(link->src)) < 0)
298 if (!(config_link = link->srcpad->config_props)) {
299 if (link->src->nb_inputs != 1) {
300 av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
301 "with more than one input "
302 "must set config_props() "
303 "callbacks on all outputs\n");
304 return AVERROR(EINVAL);
306 } else if ((ret = config_link(link)) < 0) {
307 av_log(link->src, AV_LOG_ERROR,
308 "Failed to configure output pad on %s\n",
313 switch (link->type) {
314 case AVMEDIA_TYPE_VIDEO:
315 if (!link->time_base.num && !link->time_base.den)
316 link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
318 if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
319 link->sample_aspect_ratio = inlink ?
320 inlink->sample_aspect_ratio : (AVRational){1,1};
323 if (!link->frame_rate.num && !link->frame_rate.den)
324 link->frame_rate = inlink->frame_rate;
329 } else if (!link->w || !link->h) {
330 av_log(link->src, AV_LOG_ERROR,
331 "Video source filters must set their output link's "
332 "width and height\n");
333 return AVERROR(EINVAL);
337 case AVMEDIA_TYPE_AUDIO:
339 if (!link->time_base.num && !link->time_base.den)
340 link->time_base = inlink->time_base;
343 if (!link->time_base.num && !link->time_base.den)
344 link->time_base = (AVRational) {1, link->sample_rate};
347 if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
348 !(link->src->filter->flags_internal & FF_FILTER_FLAG_HWFRAME_AWARE)) {
349 av_assert0(!link->hw_frames_ctx &&
350 "should not be set by non-hwframe-aware filter");
351 link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
352 if (!link->hw_frames_ctx)
353 return AVERROR(ENOMEM);
356 if ((config_link = link->dstpad->config_props))
357 if ((ret = config_link(link)) < 0) {
358 av_log(link->dst, AV_LOG_ERROR,
359 "Failed to configure input pad on %s\n",
364 link->init_state = AVLINK_INIT;
371 void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
373 if (link->type == AVMEDIA_TYPE_VIDEO) {
375 "link[%p s:%dx%d fmt:%s %s->%s]%s",
376 link, link->w, link->h,
377 av_get_pix_fmt_name(link->format),
378 link->src ? link->src->filter->name : "",
379 link->dst ? link->dst->filter->name : "",
383 av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
386 "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
387 link, (int)link->sample_rate, buf,
388 av_get_sample_fmt_name(link->format),
389 link->src ? link->src->filter->name : "",
390 link->dst ? link->dst->filter->name : "",
395 int ff_request_frame(AVFilterLink *link)
397 FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
399 av_assert1(!link->dst->filter->activate);
400 if (link->status_out)
401 return link->status_out;
402 if (link->status_in) {
403 if (ff_framequeue_queued_frames(&link->fifo)) {
404 av_assert1(!link->frame_wanted_out);
405 av_assert1(link->dst->ready >= 300);
408 /* Acknowledge status change. Filters using ff_request_frame() will
409 handle the change automatically. Filters can also check the
410 status directly but none do yet. */
411 ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
412 return link->status_out;
415 link->frame_wanted_out = 1;
416 ff_filter_set_ready(link->src, 100);
420 static int64_t guess_status_pts(AVFilterContext *ctx, int status, AVRational link_time_base)
423 int64_t r = INT64_MAX;
425 for (i = 0; i < ctx->nb_inputs; i++)
426 if (ctx->inputs[i]->status_out == status)
427 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->current_pts, ctx->inputs[i]->time_base, link_time_base));
430 av_log(ctx, AV_LOG_WARNING, "EOF timestamp not reliable\n");
431 for (i = 0; i < ctx->nb_inputs; i++)
432 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->status_in_pts, ctx->inputs[i]->time_base, link_time_base));
435 return AV_NOPTS_VALUE;
438 static int ff_request_frame_to_filter(AVFilterLink *link)
442 FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
443 /* Assume the filter is blocked, let the method clear it if not */
444 link->frame_blocked_in = 1;
445 if (link->srcpad->request_frame)
446 ret = link->srcpad->request_frame(link);
447 else if (link->src->inputs[0])
448 ret = ff_request_frame(link->src->inputs[0]);
450 if (ret != AVERROR(EAGAIN) && ret != link->status_in)
451 ff_avfilter_link_set_in_status(link, ret, guess_status_pts(link->src, ret, link->time_base));
452 if (ret == AVERROR_EOF)
458 static const char *const var_names[] = {
476 static int set_enable_expr(AVFilterContext *ctx, const char *expr)
480 AVExpr *old = ctx->enable;
482 if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
483 av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
484 "with filter '%s'\n", ctx->filter->name);
485 return AVERROR_PATCHWELCOME;
488 expr_dup = av_strdup(expr);
490 return AVERROR(ENOMEM);
492 if (!ctx->var_values) {
493 ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
494 if (!ctx->var_values) {
496 return AVERROR(ENOMEM);
500 ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
501 NULL, NULL, NULL, NULL, 0, ctx->priv);
503 av_log(ctx->priv, AV_LOG_ERROR,
504 "Error when evaluating the expression '%s' for enable\n",
511 av_free(ctx->enable_str);
512 ctx->enable_str = expr_dup;
516 void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
518 if (pts == AV_NOPTS_VALUE)
520 link->current_pts = pts;
521 link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
522 /* TODO use duration */
523 if (link->graph && link->age_index >= 0)
524 ff_avfilter_graph_update_heap(link->graph, link);
527 int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
529 if(!strcmp(cmd, "ping")){
530 char local_res[256] = {0};
534 res_len = sizeof(local_res);
536 av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
537 if (res == local_res)
538 av_log(filter, AV_LOG_INFO, "%s", res);
540 }else if(!strcmp(cmd, "enable")) {
541 return set_enable_expr(filter, arg);
542 }else if(filter->filter->process_command) {
543 return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
545 return AVERROR(ENOSYS);
548 int avfilter_pad_count(const AVFilterPad *pads)
555 for (count = 0; pads->name; count++)
560 static const char *default_filter_name(void *filter_ctx)
562 AVFilterContext *ctx = filter_ctx;
563 return ctx->name ? ctx->name : ctx->filter->name;
566 static void *filter_child_next(void *obj, void *prev)
568 AVFilterContext *ctx = obj;
569 if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
574 static const AVClass *filter_child_class_iterate(void **iter)
578 while ((f = av_filter_iterate(iter)))
580 return f->priv_class;
585 #define OFFSET(x) offsetof(AVFilterContext, x)
586 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
587 #define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
588 static const AVOption avfilter_options[] = {
589 { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
590 { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
591 { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .flags = FLAGS, .unit = "thread_type" },
592 { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = TFLAGS },
593 { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
594 { .i64 = 0 }, 0, INT_MAX, FLAGS },
595 { "extra_hw_frames", "Number of extra hardware frames to allocate for the user",
596 OFFSET(extra_hw_frames), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
600 static const AVClass avfilter_class = {
601 .class_name = "AVFilter",
602 .item_name = default_filter_name,
603 .version = LIBAVUTIL_VERSION_INT,
604 .category = AV_CLASS_CATEGORY_FILTER,
605 .child_next = filter_child_next,
606 .child_class_iterate = filter_child_class_iterate,
607 .option = avfilter_options,
610 static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
611 int *ret, int nb_jobs)
615 for (i = 0; i < nb_jobs; i++) {
616 int r = func(ctx, arg, i, nb_jobs);
623 AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
625 AVFilterContext *ret;
631 ret = av_mallocz(sizeof(AVFilterContext));
635 ret->av_class = &avfilter_class;
636 ret->filter = filter;
637 ret->name = inst_name ? av_strdup(inst_name) : NULL;
638 if (filter->priv_size) {
639 ret->priv = av_mallocz(filter->priv_size);
643 if (filter->preinit) {
644 if (filter->preinit(ret) < 0)
649 av_opt_set_defaults(ret);
650 if (filter->priv_class) {
651 *(const AVClass**)ret->priv = filter->priv_class;
652 av_opt_set_defaults(ret->priv);
655 ret->internal = av_mallocz(sizeof(*ret->internal));
658 ret->internal->execute = default_execute;
660 ret->nb_inputs = avfilter_pad_count(filter->inputs);
661 if (ret->nb_inputs ) {
662 ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
663 if (!ret->input_pads)
665 memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
666 ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
671 ret->nb_outputs = avfilter_pad_count(filter->outputs);
672 if (ret->nb_outputs) {
673 ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
674 if (!ret->output_pads)
676 memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
677 ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
687 av_freep(&ret->inputs);
688 av_freep(&ret->input_pads);
690 av_freep(&ret->outputs);
691 av_freep(&ret->output_pads);
693 av_freep(&ret->priv);
694 av_freep(&ret->internal);
699 static void free_link(AVFilterLink *link)
705 link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
707 link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
709 av_buffer_unref(&link->hw_frames_ctx);
711 ff_formats_unref(&link->incfg.formats);
712 ff_formats_unref(&link->outcfg.formats);
713 ff_formats_unref(&link->incfg.samplerates);
714 ff_formats_unref(&link->outcfg.samplerates);
715 ff_channel_layouts_unref(&link->incfg.channel_layouts);
716 ff_channel_layouts_unref(&link->outcfg.channel_layouts);
717 avfilter_link_free(&link);
720 void avfilter_free(AVFilterContext *filter)
728 ff_filter_graph_remove_filter(filter->graph, filter);
730 if (filter->filter->uninit)
731 filter->filter->uninit(filter);
733 for (i = 0; i < filter->nb_inputs; i++) {
734 free_link(filter->inputs[i]);
736 for (i = 0; i < filter->nb_outputs; i++) {
737 free_link(filter->outputs[i]);
740 if (filter->filter->priv_class)
741 av_opt_free(filter->priv);
743 av_buffer_unref(&filter->hw_device_ctx);
745 av_freep(&filter->name);
746 av_freep(&filter->input_pads);
747 av_freep(&filter->output_pads);
748 av_freep(&filter->inputs);
749 av_freep(&filter->outputs);
750 av_freep(&filter->priv);
751 while(filter->command_queue){
752 ff_command_queue_pop(filter);
755 av_expr_free(filter->enable);
756 filter->enable = NULL;
757 av_freep(&filter->var_values);
758 av_freep(&filter->internal);
762 int ff_filter_get_nb_threads(AVFilterContext *ctx)
764 if (ctx->nb_threads > 0)
765 return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
766 return ctx->graph->nb_threads;
769 static int process_options(AVFilterContext *ctx, AVDictionary **options,
772 const AVOption *o = NULL;
774 char *av_uninit(parsed_key), *av_uninit(value);
782 const char *shorthand = NULL;
784 o = av_opt_next(ctx->priv, o);
786 if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
792 ret = av_opt_get_key_value(&args, "=", ":",
793 shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
794 &parsed_key, &value);
796 if (ret == AVERROR(EINVAL))
797 av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
799 av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
807 while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
812 av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
814 if (av_opt_find(ctx, key, NULL, 0, 0)) {
815 ret = av_opt_set(ctx, key, value, 0);
822 av_dict_set(options, key, value, 0);
823 if ((ret = av_opt_set(ctx->priv, key, value, AV_OPT_SEARCH_CHILDREN)) < 0) {
824 if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
825 if (ret == AVERROR_OPTION_NOT_FOUND)
826 av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
842 int ff_filter_process_command(AVFilterContext *ctx, const char *cmd,
843 const char *arg, char *res, int res_len, int flags)
847 if (!ctx->filter->priv_class)
849 o = av_opt_find2(ctx->priv, cmd, NULL, AV_OPT_FLAG_RUNTIME_PARAM | AV_OPT_FLAG_FILTERING_PARAM, AV_OPT_SEARCH_CHILDREN, NULL);
851 return AVERROR(ENOSYS);
852 return av_opt_set(ctx->priv, cmd, arg, 0);
855 int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
859 ret = av_opt_set_dict(ctx, options);
861 av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
865 if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
866 ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
867 ctx->graph->internal->thread_execute) {
868 ctx->thread_type = AVFILTER_THREAD_SLICE;
869 ctx->internal->execute = ctx->graph->internal->thread_execute;
871 ctx->thread_type = 0;
874 if (ctx->filter->priv_class) {
875 ret = av_opt_set_dict2(ctx->priv, options, AV_OPT_SEARCH_CHILDREN);
877 av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
882 if (ctx->filter->init_opaque)
883 ret = ctx->filter->init_opaque(ctx, NULL);
884 else if (ctx->filter->init)
885 ret = ctx->filter->init(ctx);
886 else if (ctx->filter->init_dict)
887 ret = ctx->filter->init_dict(ctx, options);
889 if (ctx->enable_str) {
890 ret = set_enable_expr(ctx, ctx->enable_str);
898 int avfilter_init_str(AVFilterContext *filter, const char *args)
900 AVDictionary *options = NULL;
901 AVDictionaryEntry *e;
905 if (!filter->filter->priv_class) {
906 av_log(filter, AV_LOG_ERROR, "This filter does not take any "
907 "options, but options were provided: %s.\n", args);
908 return AVERROR(EINVAL);
911 ret = process_options(filter, &options, args);
916 ret = avfilter_init_dict(filter, &options);
920 if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
921 av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
922 ret = AVERROR_OPTION_NOT_FOUND;
927 av_dict_free(&options);
932 const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
934 return pads[pad_idx].name;
937 enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
939 return pads[pad_idx].type;
942 static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
944 return ff_filter_frame(link->dst->outputs[0], frame);
947 static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
949 int (*filter_frame)(AVFilterLink *, AVFrame *);
950 AVFilterContext *dstctx = link->dst;
951 AVFilterPad *dst = link->dstpad;
954 if (!(filter_frame = dst->filter_frame))
955 filter_frame = default_filter_frame;
957 if (dst->needs_writable) {
958 ret = ff_inlink_make_frame_writable(link, &frame);
963 ff_inlink_process_commands(link, frame);
964 dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
966 if (dstctx->is_disabled &&
967 (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
968 filter_frame = default_filter_frame;
969 ret = filter_frame(link, frame);
970 link->frame_count_out++;
974 av_frame_free(&frame);
978 int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
981 FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
983 /* Consistency checks */
984 if (link->type == AVMEDIA_TYPE_VIDEO) {
985 if (strcmp(link->dst->filter->name, "buffersink") &&
986 strcmp(link->dst->filter->name, "format") &&
987 strcmp(link->dst->filter->name, "idet") &&
988 strcmp(link->dst->filter->name, "null") &&
989 strcmp(link->dst->filter->name, "scale")) {
990 av_assert1(frame->format == link->format);
991 av_assert1(frame->width == link->w);
992 av_assert1(frame->height == link->h);
995 if (frame->format != link->format) {
996 av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
999 if (frame->channels != link->channels) {
1000 av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
1003 if (frame->channel_layout != link->channel_layout) {
1004 av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
1007 if (frame->sample_rate != link->sample_rate) {
1008 av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
1013 link->frame_blocked_in = link->frame_wanted_out = 0;
1014 link->frame_count_in++;
1015 filter_unblock(link->dst);
1016 ret = ff_framequeue_add(&link->fifo, frame);
1018 av_frame_free(&frame);
1021 ff_filter_set_ready(link->dst, 300);
1025 av_frame_free(&frame);
1026 return AVERROR_PATCHWELCOME;
1029 static int samples_ready(AVFilterLink *link, unsigned min)
1031 return ff_framequeue_queued_frames(&link->fifo) &&
1032 (ff_framequeue_queued_samples(&link->fifo) >= min ||
1036 static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
1039 AVFrame *frame0, *frame, *buf;
1040 unsigned nb_samples, nb_frames, i, p;
1043 /* Note: this function relies on no format changes and must only be
1044 called with enough samples. */
1045 av_assert1(samples_ready(link, link->min_samples));
1046 frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
1047 if (!link->fifo.samples_skipped && frame->nb_samples >= min && frame->nb_samples <= max) {
1048 *rframe = ff_framequeue_take(&link->fifo);
1054 if (nb_samples + frame->nb_samples > max) {
1055 if (nb_samples < min)
1059 nb_samples += frame->nb_samples;
1061 if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
1063 frame = ff_framequeue_peek(&link->fifo, nb_frames);
1066 buf = ff_get_audio_buffer(link, nb_samples);
1068 return AVERROR(ENOMEM);
1069 ret = av_frame_copy_props(buf, frame0);
1071 av_frame_free(&buf);
1074 buf->pts = frame0->pts;
1077 for (i = 0; i < nb_frames; i++) {
1078 frame = ff_framequeue_take(&link->fifo);
1079 av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
1080 frame->nb_samples, link->channels, link->format);
1081 p += frame->nb_samples;
1082 av_frame_free(&frame);
1084 if (p < nb_samples) {
1085 unsigned n = nb_samples - p;
1086 frame = ff_framequeue_peek(&link->fifo, 0);
1087 av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
1088 link->channels, link->format);
1089 ff_framequeue_skip_samples(&link->fifo, n, link->time_base);
1096 static int ff_filter_frame_to_filter(AVFilterLink *link)
1098 AVFrame *frame = NULL;
1099 AVFilterContext *dst = link->dst;
1102 av_assert1(ff_framequeue_queued_frames(&link->fifo));
1103 ret = link->min_samples ?
1104 ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
1105 ff_inlink_consume_frame(link, &frame);
1111 /* The filter will soon have received a new frame, that may allow it to
1112 produce one or more: unblock its outputs. */
1113 filter_unblock(dst);
1114 /* AVFilterPad.filter_frame() expect frame_count_out to have the value
1115 before the frame; ff_filter_frame_framed() will re-increment it. */
1116 link->frame_count_out--;
1117 ret = ff_filter_frame_framed(link, frame);
1118 if (ret < 0 && ret != link->status_out) {
1119 ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
1121 /* Run once again, to see if several frames were available, or if
1122 the input status has also changed, or any other reason. */
1123 ff_filter_set_ready(dst, 300);
1128 static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
1130 unsigned out = 0, progress = 0;
1133 av_assert0(!in->status_out);
1134 if (!filter->nb_outputs) {
1135 /* not necessary with the current API and sinks */
1138 while (!in->status_out) {
1139 if (!filter->outputs[out]->status_in) {
1141 ret = ff_request_frame_to_filter(filter->outputs[out]);
1145 if (++out == filter->nb_outputs) {
1147 /* Every output already closed: input no longer interesting
1148 (example: overlay in shortest mode, other input closed). */
1149 ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
1156 ff_filter_set_ready(filter, 200);
1160 static int ff_filter_activate_default(AVFilterContext *filter)
1164 for (i = 0; i < filter->nb_inputs; i++) {
1165 if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) {
1166 return ff_filter_frame_to_filter(filter->inputs[i]);
1169 for (i = 0; i < filter->nb_inputs; i++) {
1170 if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
1171 av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
1172 return forward_status_change(filter, filter->inputs[i]);
1175 for (i = 0; i < filter->nb_outputs; i++) {
1176 if (filter->outputs[i]->frame_wanted_out &&
1177 !filter->outputs[i]->frame_blocked_in) {
1178 return ff_request_frame_to_filter(filter->outputs[i]);
1181 return FFERROR_NOT_READY;
1185 Filter scheduling and activation
1187 When a filter is activated, it must:
1188 - if possible, output a frame;
1189 - else, if relevant, forward the input status change;
1190 - else, check outputs for wanted frames and forward the requests.
1192 The following AVFilterLink fields are used for activation:
1196 This field indicates if a frame is needed on this input of the
1197 destination filter. A positive value indicates that a frame is needed
1198 to process queued frames or internal data or to satisfy the
1199 application; a zero value indicates that a frame is not especially
1200 needed but could be processed anyway; a negative value indicates that a
1201 frame would just be queued.
1203 It is set by filters using ff_request_frame() or ff_request_no_frame(),
1204 when requested by the application through a specific API or when it is
1205 set on one of the outputs.
1207 It is cleared when a frame is sent from the source using
1210 It is also cleared when a status change is sent from the source using
1211 ff_avfilter_link_set_in_status().
1215 This field means that the source filter can not generate a frame as is.
1216 Its goal is to avoid repeatedly calling the request_frame() method on
1219 It is set by the framework on all outputs of a filter before activating it.
1221 It is automatically cleared by ff_filter_frame().
1223 It is also automatically cleared by ff_avfilter_link_set_in_status().
1225 It is also cleared on all outputs (using filter_unblock()) when
1226 something happens on an input: processing a frame or changing the
1231 Contains the frames queued on a filter input. If it contains frames and
1232 frame_wanted_out is not set, then the filter can be activated. If that
1233 result in the filter not able to use these frames, the filter must set
1234 frame_wanted_out to ask for more frames.
1236 - status_in and status_in_pts:
1238 Status (EOF or error code) of the link and timestamp of the status
1239 change (in link time base, same as frames) as seen from the input of
1240 the link. The status change is considered happening after the frames
1243 It is set by the source filter using ff_avfilter_link_set_in_status().
1247 Status of the link as seen from the output of the link. The status
1248 change is considered having already happened.
1250 It is set by the destination filter using
1251 ff_avfilter_link_set_out_status().
1253 Filters are activated according to the ready field, set using the
1254 ff_filter_set_ready(). Eventually, a priority queue will be used.
1255 ff_filter_set_ready() is called whenever anything could cause progress to
1256 be possible. Marking a filter ready when it is not is not a problem,
1257 except for the small overhead it causes.
1259 Conditions that cause a filter to be marked ready are:
1261 - frames added on an input link;
1263 - changes in the input or output status of an input link;
1265 - requests for a frame on an output link;
1267 - after any actual processing using the legacy methods (filter_frame(),
1268 and request_frame() to acknowledge status changes), to run once more
1269 and check if enough input was present for several frames.
1271 Examples of scenarios to consider:
1273 - buffersrc: activate if frame_wanted_out to notify the application;
1274 activate when the application adds a frame to push it immediately.
1276 - testsrc: activate only if frame_wanted_out to produce and push a frame.
1278 - concat (not at stitch points): can process a frame on any output.
1279 Activate if frame_wanted_out on output to forward on the corresponding
1280 input. Activate when a frame is present on input to process it
1283 - framesync: needs at least one frame on each input; extra frames on the
1284 wrong input will accumulate. When a frame is first added on one input,
1285 set frame_wanted_out<0 on it to avoid getting more (would trigger
1286 testsrc) and frame_wanted_out>0 on the other to allow processing it.
1288 Activation of old filters:
1290 In order to activate a filter implementing the legacy filter_frame() and
1291 request_frame() methods, perform the first possible of the following
1294 - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
1295 frame and call filter_frame().
1297 Rationale: filter frames as soon as possible instead of leaving them
1298 queued; frame_wanted_out < 0 is not possible since the old API does not
1299 set it nor provides any similar feedback; frame_wanted_out > 0 happens
1300 when min_samples > 0 and there are not enough samples queued.
1302 - If an input has status_in set but not status_out, try to call
1303 request_frame() on one of the outputs in the hope that it will trigger
1304 request_frame() on the input with status_in and acknowledge it. This is
1305 awkward and fragile, filters with several inputs or outputs should be
1306 updated to direct activation as soon as possible.
1308 - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
1311 Rationale: checking frame_blocked_in is necessary to avoid requesting
1312 repeatedly on a blocked input if another is not blocked (example:
1313 [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
1316 int ff_filter_activate(AVFilterContext *filter)
1320 /* Generic timeline support is not yet implemented but should be easy */
1321 av_assert1(!(filter->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC &&
1322 filter->filter->activate));
1324 ret = filter->filter->activate ? filter->filter->activate(filter) :
1325 ff_filter_activate_default(filter);
1326 if (ret == FFERROR_NOT_READY)
1331 int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
1333 *rpts = link->current_pts;
1334 if (ff_framequeue_queued_frames(&link->fifo))
1335 return *rstatus = 0;
1336 if (link->status_out)
1337 return *rstatus = link->status_out;
1338 if (!link->status_in)
1339 return *rstatus = 0;
1340 *rstatus = link->status_out = link->status_in;
1341 ff_update_link_current_pts(link, link->status_in_pts);
1342 *rpts = link->current_pts;
1346 size_t ff_inlink_queued_frames(AVFilterLink *link)
1348 return ff_framequeue_queued_frames(&link->fifo);
1351 int ff_inlink_check_available_frame(AVFilterLink *link)
1353 return ff_framequeue_queued_frames(&link->fifo) > 0;
1356 int ff_inlink_queued_samples(AVFilterLink *link)
1358 return ff_framequeue_queued_samples(&link->fifo);
1361 int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
1363 uint64_t samples = ff_framequeue_queued_samples(&link->fifo);
1365 return samples >= min || (link->status_in && samples);
1368 static void consume_update(AVFilterLink *link, const AVFrame *frame)
1370 ff_update_link_current_pts(link, frame->pts);
1371 ff_inlink_process_commands(link, frame);
1372 link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1373 link->frame_count_out++;
1376 int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
1381 if (!ff_inlink_check_available_frame(link))
1384 if (link->fifo.samples_skipped) {
1385 frame = ff_framequeue_peek(&link->fifo, 0);
1386 return ff_inlink_consume_samples(link, frame->nb_samples, frame->nb_samples, rframe);
1389 frame = ff_framequeue_take(&link->fifo);
1390 consume_update(link, frame);
1395 int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max,
1403 if (!ff_inlink_check_available_samples(link, min))
1405 if (link->status_in)
1406 min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
1407 ret = take_samples(link, min, max, &frame);
1410 consume_update(link, frame);
1415 AVFrame *ff_inlink_peek_frame(AVFilterLink *link, size_t idx)
1417 return ff_framequeue_peek(&link->fifo, idx);
1420 int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
1422 AVFrame *frame = *rframe;
1426 if (av_frame_is_writable(frame))
1428 av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
1430 switch (link->type) {
1431 case AVMEDIA_TYPE_VIDEO:
1432 out = ff_get_video_buffer(link, link->w, link->h);
1434 case AVMEDIA_TYPE_AUDIO:
1435 out = ff_get_audio_buffer(link, frame->nb_samples);
1438 return AVERROR(EINVAL);
1441 return AVERROR(ENOMEM);
1443 ret = av_frame_copy_props(out, frame);
1445 av_frame_free(&out);
1449 switch (link->type) {
1450 case AVMEDIA_TYPE_VIDEO:
1451 av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
1452 frame->format, frame->width, frame->height);
1454 case AVMEDIA_TYPE_AUDIO:
1455 av_samples_copy(out->extended_data, frame->extended_data,
1456 0, 0, frame->nb_samples,
1461 av_assert0(!"reached");
1464 av_frame_free(&frame);
1469 int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
1471 AVFilterCommand *cmd = link->dst->command_queue;
1473 while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
1474 av_log(link->dst, AV_LOG_DEBUG,
1475 "Processing command time:%f command:%s arg:%s\n",
1476 cmd->time, cmd->command, cmd->arg);
1477 avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
1478 ff_command_queue_pop(link->dst);
1479 cmd= link->dst->command_queue;
1484 int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
1486 AVFilterContext *dstctx = link->dst;
1487 int64_t pts = frame->pts;
1488 int64_t pos = frame->pkt_pos;
1490 if (!dstctx->enable_str)
1493 dstctx->var_values[VAR_N] = link->frame_count_out;
1494 dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
1495 dstctx->var_values[VAR_W] = link->w;
1496 dstctx->var_values[VAR_H] = link->h;
1497 dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1499 return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5;
1502 void ff_inlink_request_frame(AVFilterLink *link)
1504 av_assert1(!link->status_in);
1505 av_assert1(!link->status_out);
1506 link->frame_wanted_out = 1;
1507 ff_filter_set_ready(link->src, 100);
1510 void ff_inlink_set_status(AVFilterLink *link, int status)
1512 if (link->status_out)
1514 link->frame_wanted_out = 0;
1515 link->frame_blocked_in = 0;
1516 ff_avfilter_link_set_out_status(link, status, AV_NOPTS_VALUE);
1517 while (ff_framequeue_queued_frames(&link->fifo)) {
1518 AVFrame *frame = ff_framequeue_take(&link->fifo);
1519 av_frame_free(&frame);
1521 if (!link->status_in)
1522 link->status_in = status;
1525 int ff_outlink_get_status(AVFilterLink *link)
1527 return link->status_in;
1530 const AVClass *avfilter_get_class(void)
1532 return &avfilter_class;
1535 int ff_filter_init_hw_frames(AVFilterContext *avctx, AVFilterLink *link,
1536 int default_pool_size)
1538 AVHWFramesContext *frames;
1540 // Must already be set by caller.
1541 av_assert0(link->hw_frames_ctx);
1543 frames = (AVHWFramesContext*)link->hw_frames_ctx->data;
1545 if (frames->initial_pool_size == 0) {
1546 // Dynamic allocation is necessarily supported.
1547 } else if (avctx->extra_hw_frames >= 0) {
1548 frames->initial_pool_size += avctx->extra_hw_frames;
1550 frames->initial_pool_size = default_pool_size;