3 * Copyright (c) 2007 Bobby Bingham
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/avassert.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/buffer.h"
25 #include "libavutil/channel_layout.h"
26 #include "libavutil/common.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/hwcontext.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/rational.h"
34 #include "libavutil/samplefmt.h"
35 #include "libavutil/thread.h"
37 #define FF_INTERNAL_FIELDS 1
38 #include "framequeue.h"
46 #include "libavutil/ffversion.h"
47 const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
49 void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
51 av_unused char buf[16];
53 "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
54 ref, ref->buf, ref->data[0],
55 ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
56 ref->pts, ref->pkt_pos);
59 ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
60 ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
61 ref->width, ref->height,
62 !ref->interlaced_frame ? 'P' : /* Progressive */
63 ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
65 av_get_picture_type_char(ref->pict_type));
67 if (ref->nb_samples) {
68 ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
74 ff_tlog(ctx, "]%s", end ? "\n" : "");
77 unsigned avfilter_version(void)
79 av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
80 return LIBAVFILTER_VERSION_INT;
83 const char *avfilter_configuration(void)
85 return FFMPEG_CONFIGURATION;
88 const char *avfilter_license(void)
90 #define LICENSE_PREFIX "libavfilter license: "
91 return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1];
94 void ff_command_queue_pop(AVFilterContext *filter)
96 AVFilterCommand *c= filter->command_queue;
98 av_freep(&c->command);
99 filter->command_queue= c->next;
103 int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
104 AVFilterPad **pads, AVFilterLink ***links,
107 AVFilterLink **newlinks;
108 AVFilterPad *newpads;
111 idx = FFMIN(idx, *count);
113 newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
114 newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
119 if (!newpads || !newlinks)
120 return AVERROR(ENOMEM);
122 memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
123 memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
124 memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
125 (*links)[idx] = NULL;
128 for (i = idx + 1; i < *count; i++)
130 (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
135 int avfilter_link(AVFilterContext *src, unsigned srcpad,
136 AVFilterContext *dst, unsigned dstpad)
140 av_assert0(src->graph);
141 av_assert0(dst->graph);
142 av_assert0(src->graph == dst->graph);
144 if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
145 src->outputs[srcpad] || dst->inputs[dstpad])
146 return AVERROR(EINVAL);
148 if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
149 av_log(src, AV_LOG_ERROR,
150 "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
151 src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
152 dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
153 return AVERROR(EINVAL);
156 link = av_mallocz(sizeof(*link));
158 return AVERROR(ENOMEM);
160 src->outputs[srcpad] = dst->inputs[dstpad] = link;
164 link->srcpad = &src->output_pads[srcpad];
165 link->dstpad = &dst->input_pads[dstpad];
166 link->type = src->output_pads[srcpad].type;
167 av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
169 ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
174 void avfilter_link_free(AVFilterLink **link)
179 av_frame_free(&(*link)->partial_buf);
180 ff_framequeue_free(&(*link)->fifo);
181 ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
186 void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
188 filter->ready = FFMAX(filter->ready, priority);
192 * Clear frame_blocked_in on all outputs.
193 * This is necessary whenever something changes on input.
195 static void filter_unblock(AVFilterContext *filter)
199 for (i = 0; i < filter->nb_outputs; i++)
200 filter->outputs[i]->frame_blocked_in = 0;
204 void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
206 if (link->status_in == status)
208 av_assert0(!link->status_in);
209 link->status_in = status;
210 link->status_in_pts = pts;
211 link->frame_wanted_out = 0;
212 link->frame_blocked_in = 0;
213 filter_unblock(link->dst);
214 ff_filter_set_ready(link->dst, 200);
217 void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
219 av_assert0(!link->frame_wanted_out);
220 av_assert0(!link->status_out);
221 link->status_out = status;
222 if (pts != AV_NOPTS_VALUE)
223 ff_update_link_current_pts(link, pts);
224 filter_unblock(link->dst);
225 ff_filter_set_ready(link->src, 200);
228 #if FF_API_FILTER_LINK_SET_CLOSED
229 void avfilter_link_set_closed(AVFilterLink *link, int closed)
231 ff_avfilter_link_set_out_status(link, closed ? AVERROR_EOF : 0, AV_NOPTS_VALUE);
234 int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
235 unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
238 unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
240 av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
241 "between the filter '%s' and the filter '%s'\n",
242 filt->name, link->src->name, link->dst->name);
244 link->dst->inputs[dstpad_idx] = NULL;
245 if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
246 /* failed to link output filter to new filter */
247 link->dst->inputs[dstpad_idx] = link;
251 /* re-hookup the link to the new destination filter we inserted */
253 link->dstpad = &filt->input_pads[filt_srcpad_idx];
254 filt->inputs[filt_srcpad_idx] = link;
256 /* if any information on supported media formats already exists on the
257 * link, we need to preserve that */
258 if (link->outcfg.formats)
259 ff_formats_changeref(&link->outcfg.formats,
260 &filt->outputs[filt_dstpad_idx]->outcfg.formats);
261 if (link->outcfg.samplerates)
262 ff_formats_changeref(&link->outcfg.samplerates,
263 &filt->outputs[filt_dstpad_idx]->outcfg.samplerates);
264 if (link->outcfg.channel_layouts)
265 ff_channel_layouts_changeref(&link->outcfg.channel_layouts,
266 &filt->outputs[filt_dstpad_idx]->outcfg.channel_layouts);
271 int avfilter_config_links(AVFilterContext *filter)
273 int (*config_link)(AVFilterLink *);
277 for (i = 0; i < filter->nb_inputs; i ++) {
278 AVFilterLink *link = filter->inputs[i];
279 AVFilterLink *inlink;
282 if (!link->src || !link->dst) {
283 av_log(filter, AV_LOG_ERROR,
284 "Not all input and output are properly linked (%d).\n", i);
285 return AVERROR(EINVAL);
288 inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
290 link->current_pts_us = AV_NOPTS_VALUE;
292 switch (link->init_state) {
295 case AVLINK_STARTINIT:
296 av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
299 link->init_state = AVLINK_STARTINIT;
301 if ((ret = avfilter_config_links(link->src)) < 0)
304 if (!(config_link = link->srcpad->config_props)) {
305 if (link->src->nb_inputs != 1) {
306 av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
307 "with more than one input "
308 "must set config_props() "
309 "callbacks on all outputs\n");
310 return AVERROR(EINVAL);
312 } else if ((ret = config_link(link)) < 0) {
313 av_log(link->src, AV_LOG_ERROR,
314 "Failed to configure output pad on %s\n",
319 switch (link->type) {
320 case AVMEDIA_TYPE_VIDEO:
321 if (!link->time_base.num && !link->time_base.den)
322 link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
324 if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
325 link->sample_aspect_ratio = inlink ?
326 inlink->sample_aspect_ratio : (AVRational){1,1};
329 if (!link->frame_rate.num && !link->frame_rate.den)
330 link->frame_rate = inlink->frame_rate;
335 } else if (!link->w || !link->h) {
336 av_log(link->src, AV_LOG_ERROR,
337 "Video source filters must set their output link's "
338 "width and height\n");
339 return AVERROR(EINVAL);
343 case AVMEDIA_TYPE_AUDIO:
345 if (!link->time_base.num && !link->time_base.den)
346 link->time_base = inlink->time_base;
349 if (!link->time_base.num && !link->time_base.den)
350 link->time_base = (AVRational) {1, link->sample_rate};
353 if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
354 !(link->src->filter->flags_internal & FF_FILTER_FLAG_HWFRAME_AWARE)) {
355 av_assert0(!link->hw_frames_ctx &&
356 "should not be set by non-hwframe-aware filter");
357 link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
358 if (!link->hw_frames_ctx)
359 return AVERROR(ENOMEM);
362 if ((config_link = link->dstpad->config_props))
363 if ((ret = config_link(link)) < 0) {
364 av_log(link->dst, AV_LOG_ERROR,
365 "Failed to configure input pad on %s\n",
370 link->init_state = AVLINK_INIT;
377 void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
379 if (link->type == AVMEDIA_TYPE_VIDEO) {
381 "link[%p s:%dx%d fmt:%s %s->%s]%s",
382 link, link->w, link->h,
383 av_get_pix_fmt_name(link->format),
384 link->src ? link->src->filter->name : "",
385 link->dst ? link->dst->filter->name : "",
389 av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
392 "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
393 link, (int)link->sample_rate, buf,
394 av_get_sample_fmt_name(link->format),
395 link->src ? link->src->filter->name : "",
396 link->dst ? link->dst->filter->name : "",
401 int ff_request_frame(AVFilterLink *link)
403 FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
405 av_assert1(!link->dst->filter->activate);
406 if (link->status_out)
407 return link->status_out;
408 if (link->status_in) {
409 if (ff_framequeue_queued_frames(&link->fifo)) {
410 av_assert1(!link->frame_wanted_out);
411 av_assert1(link->dst->ready >= 300);
414 /* Acknowledge status change. Filters using ff_request_frame() will
415 handle the change automatically. Filters can also check the
416 status directly but none do yet. */
417 ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
418 return link->status_out;
421 link->frame_wanted_out = 1;
422 ff_filter_set_ready(link->src, 100);
426 static int64_t guess_status_pts(AVFilterContext *ctx, int status, AVRational link_time_base)
429 int64_t r = INT64_MAX;
431 for (i = 0; i < ctx->nb_inputs; i++)
432 if (ctx->inputs[i]->status_out == status)
433 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->current_pts, ctx->inputs[i]->time_base, link_time_base));
436 av_log(ctx, AV_LOG_WARNING, "EOF timestamp not reliable\n");
437 for (i = 0; i < ctx->nb_inputs; i++)
438 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->status_in_pts, ctx->inputs[i]->time_base, link_time_base));
441 return AV_NOPTS_VALUE;
444 static int ff_request_frame_to_filter(AVFilterLink *link)
448 FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
449 /* Assume the filter is blocked, let the method clear it if not */
450 link->frame_blocked_in = 1;
451 if (link->srcpad->request_frame)
452 ret = link->srcpad->request_frame(link);
453 else if (link->src->inputs[0])
454 ret = ff_request_frame(link->src->inputs[0]);
456 if (ret != AVERROR(EAGAIN) && ret != link->status_in)
457 ff_avfilter_link_set_in_status(link, ret, guess_status_pts(link->src, ret, link->time_base));
458 if (ret == AVERROR_EOF)
464 static const char *const var_names[] = {
482 static int set_enable_expr(AVFilterContext *ctx, const char *expr)
486 AVExpr *old = ctx->enable;
488 if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
489 av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
490 "with filter '%s'\n", ctx->filter->name);
491 return AVERROR_PATCHWELCOME;
494 expr_dup = av_strdup(expr);
496 return AVERROR(ENOMEM);
498 if (!ctx->var_values) {
499 ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
500 if (!ctx->var_values) {
502 return AVERROR(ENOMEM);
506 ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
507 NULL, NULL, NULL, NULL, 0, ctx->priv);
509 av_log(ctx->priv, AV_LOG_ERROR,
510 "Error when evaluating the expression '%s' for enable\n",
517 av_free(ctx->enable_str);
518 ctx->enable_str = expr_dup;
522 void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
524 if (pts == AV_NOPTS_VALUE)
526 link->current_pts = pts;
527 link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
528 /* TODO use duration */
529 if (link->graph && link->age_index >= 0)
530 ff_avfilter_graph_update_heap(link->graph, link);
533 int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
535 if(!strcmp(cmd, "ping")){
536 char local_res[256] = {0};
540 res_len = sizeof(local_res);
542 av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
543 if (res == local_res)
544 av_log(filter, AV_LOG_INFO, "%s", res);
546 }else if(!strcmp(cmd, "enable")) {
547 return set_enable_expr(filter, arg);
548 }else if(filter->filter->process_command) {
549 return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
551 return AVERROR(ENOSYS);
554 int avfilter_pad_count(const AVFilterPad *pads)
561 for (count = 0; pads->name; count++)
566 static const char *default_filter_name(void *filter_ctx)
568 AVFilterContext *ctx = filter_ctx;
569 return ctx->name ? ctx->name : ctx->filter->name;
572 static void *filter_child_next(void *obj, void *prev)
574 AVFilterContext *ctx = obj;
575 if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
580 #if FF_API_CHILD_CLASS_NEXT
581 static const AVClass *filter_child_class_next(const AVClass *prev)
584 const AVFilter *f = NULL;
586 /* find the filter that corresponds to prev */
587 while (prev && (f = av_filter_iterate(&opaque)))
588 if (f->priv_class == prev)
591 /* could not find filter corresponding to prev */
595 /* find next filter with specific options */
596 while ((f = av_filter_iterate(&opaque)))
598 return f->priv_class;
604 static const AVClass *filter_child_class_iterate(void **iter)
608 while ((f = av_filter_iterate(iter)))
610 return f->priv_class;
615 #define OFFSET(x) offsetof(AVFilterContext, x)
616 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
617 #define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
618 static const AVOption avfilter_options[] = {
619 { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
620 { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
621 { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .flags = FLAGS, .unit = "thread_type" },
622 { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = TFLAGS },
623 { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
624 { .i64 = 0 }, 0, INT_MAX, FLAGS },
625 { "extra_hw_frames", "Number of extra hardware frames to allocate for the user",
626 OFFSET(extra_hw_frames), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
630 static const AVClass avfilter_class = {
631 .class_name = "AVFilter",
632 .item_name = default_filter_name,
633 .version = LIBAVUTIL_VERSION_INT,
634 .category = AV_CLASS_CATEGORY_FILTER,
635 .child_next = filter_child_next,
636 #if FF_API_CHILD_CLASS_NEXT
637 .child_class_next = filter_child_class_next,
639 .child_class_iterate = filter_child_class_iterate,
640 .option = avfilter_options,
643 static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
644 int *ret, int nb_jobs)
648 for (i = 0; i < nb_jobs; i++) {
649 int r = func(ctx, arg, i, nb_jobs);
656 AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
658 AVFilterContext *ret;
664 ret = av_mallocz(sizeof(AVFilterContext));
668 ret->av_class = &avfilter_class;
669 ret->filter = filter;
670 ret->name = inst_name ? av_strdup(inst_name) : NULL;
671 if (filter->priv_size) {
672 ret->priv = av_mallocz(filter->priv_size);
676 if (filter->preinit) {
677 if (filter->preinit(ret) < 0)
682 av_opt_set_defaults(ret);
683 if (filter->priv_class) {
684 *(const AVClass**)ret->priv = filter->priv_class;
685 av_opt_set_defaults(ret->priv);
688 ret->internal = av_mallocz(sizeof(*ret->internal));
691 ret->internal->execute = default_execute;
693 ret->nb_inputs = avfilter_pad_count(filter->inputs);
694 if (ret->nb_inputs ) {
695 ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
696 if (!ret->input_pads)
698 memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
699 ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
704 ret->nb_outputs = avfilter_pad_count(filter->outputs);
705 if (ret->nb_outputs) {
706 ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
707 if (!ret->output_pads)
709 memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
710 ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
720 av_freep(&ret->inputs);
721 av_freep(&ret->input_pads);
723 av_freep(&ret->outputs);
724 av_freep(&ret->output_pads);
726 av_freep(&ret->priv);
727 av_freep(&ret->internal);
732 static void free_link(AVFilterLink *link)
738 link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
740 link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
742 av_buffer_unref(&link->hw_frames_ctx);
744 ff_formats_unref(&link->incfg.formats);
745 ff_formats_unref(&link->outcfg.formats);
746 ff_formats_unref(&link->incfg.samplerates);
747 ff_formats_unref(&link->outcfg.samplerates);
748 ff_channel_layouts_unref(&link->incfg.channel_layouts);
749 ff_channel_layouts_unref(&link->outcfg.channel_layouts);
750 avfilter_link_free(&link);
753 void avfilter_free(AVFilterContext *filter)
761 ff_filter_graph_remove_filter(filter->graph, filter);
763 if (filter->filter->uninit)
764 filter->filter->uninit(filter);
766 for (i = 0; i < filter->nb_inputs; i++) {
767 free_link(filter->inputs[i]);
769 for (i = 0; i < filter->nb_outputs; i++) {
770 free_link(filter->outputs[i]);
773 if (filter->filter->priv_class)
774 av_opt_free(filter->priv);
776 av_buffer_unref(&filter->hw_device_ctx);
778 av_freep(&filter->name);
779 av_freep(&filter->input_pads);
780 av_freep(&filter->output_pads);
781 av_freep(&filter->inputs);
782 av_freep(&filter->outputs);
783 av_freep(&filter->priv);
784 while(filter->command_queue){
785 ff_command_queue_pop(filter);
788 av_expr_free(filter->enable);
789 filter->enable = NULL;
790 av_freep(&filter->var_values);
791 av_freep(&filter->internal);
795 int ff_filter_get_nb_threads(AVFilterContext *ctx)
797 if (ctx->nb_threads > 0)
798 return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
799 return ctx->graph->nb_threads;
802 static int process_options(AVFilterContext *ctx, AVDictionary **options,
805 const AVOption *o = NULL;
807 char *av_uninit(parsed_key), *av_uninit(value);
815 const char *shorthand = NULL;
817 o = av_opt_next(ctx->priv, o);
819 if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
825 ret = av_opt_get_key_value(&args, "=", ":",
826 shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
827 &parsed_key, &value);
829 if (ret == AVERROR(EINVAL))
830 av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
832 av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
840 while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
845 av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
847 if (av_opt_find(ctx, key, NULL, 0, 0)) {
848 ret = av_opt_set(ctx, key, value, 0);
855 av_dict_set(options, key, value, 0);
856 if ((ret = av_opt_set(ctx->priv, key, value, AV_OPT_SEARCH_CHILDREN)) < 0) {
857 if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
858 if (ret == AVERROR_OPTION_NOT_FOUND)
859 av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
875 int ff_filter_process_command(AVFilterContext *ctx, const char *cmd,
876 const char *arg, char *res, int res_len, int flags)
880 if (!ctx->filter->priv_class)
882 o = av_opt_find2(ctx->priv, cmd, NULL, AV_OPT_FLAG_RUNTIME_PARAM | AV_OPT_FLAG_FILTERING_PARAM, AV_OPT_SEARCH_CHILDREN, NULL);
884 return AVERROR(ENOSYS);
885 return av_opt_set(ctx->priv, cmd, arg, 0);
888 int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
892 ret = av_opt_set_dict(ctx, options);
894 av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
898 if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
899 ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
900 ctx->graph->internal->thread_execute) {
901 ctx->thread_type = AVFILTER_THREAD_SLICE;
902 ctx->internal->execute = ctx->graph->internal->thread_execute;
904 ctx->thread_type = 0;
907 if (ctx->filter->priv_class) {
908 ret = av_opt_set_dict2(ctx->priv, options, AV_OPT_SEARCH_CHILDREN);
910 av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
915 if (ctx->filter->init_opaque)
916 ret = ctx->filter->init_opaque(ctx, NULL);
917 else if (ctx->filter->init)
918 ret = ctx->filter->init(ctx);
919 else if (ctx->filter->init_dict)
920 ret = ctx->filter->init_dict(ctx, options);
922 if (ctx->enable_str) {
923 ret = set_enable_expr(ctx, ctx->enable_str);
931 int avfilter_init_str(AVFilterContext *filter, const char *args)
933 AVDictionary *options = NULL;
934 AVDictionaryEntry *e;
938 if (!filter->filter->priv_class) {
939 av_log(filter, AV_LOG_ERROR, "This filter does not take any "
940 "options, but options were provided: %s.\n", args);
941 return AVERROR(EINVAL);
944 ret = process_options(filter, &options, args);
949 ret = avfilter_init_dict(filter, &options);
953 if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
954 av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
955 ret = AVERROR_OPTION_NOT_FOUND;
960 av_dict_free(&options);
965 const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
967 return pads[pad_idx].name;
970 enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
972 return pads[pad_idx].type;
975 static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
977 return ff_filter_frame(link->dst->outputs[0], frame);
980 static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
982 int (*filter_frame)(AVFilterLink *, AVFrame *);
983 AVFilterContext *dstctx = link->dst;
984 AVFilterPad *dst = link->dstpad;
987 if (!(filter_frame = dst->filter_frame))
988 filter_frame = default_filter_frame;
990 if (dst->needs_writable) {
991 ret = ff_inlink_make_frame_writable(link, &frame);
996 ff_inlink_process_commands(link, frame);
997 dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
999 if (dstctx->is_disabled &&
1000 (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
1001 filter_frame = default_filter_frame;
1002 ret = filter_frame(link, frame);
1003 link->frame_count_out++;
1007 av_frame_free(&frame);
1011 int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
1014 FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
1016 /* Consistency checks */
1017 if (link->type == AVMEDIA_TYPE_VIDEO) {
1018 if (strcmp(link->dst->filter->name, "buffersink") &&
1019 strcmp(link->dst->filter->name, "format") &&
1020 strcmp(link->dst->filter->name, "idet") &&
1021 strcmp(link->dst->filter->name, "null") &&
1022 strcmp(link->dst->filter->name, "scale")) {
1023 av_assert1(frame->format == link->format);
1024 av_assert1(frame->width == link->w);
1025 av_assert1(frame->height == link->h);
1028 if (frame->format != link->format) {
1029 av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
1032 if (frame->channels != link->channels) {
1033 av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
1036 if (frame->channel_layout != link->channel_layout) {
1037 av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
1040 if (frame->sample_rate != link->sample_rate) {
1041 av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
1046 link->frame_blocked_in = link->frame_wanted_out = 0;
1047 link->frame_count_in++;
1048 filter_unblock(link->dst);
1049 ret = ff_framequeue_add(&link->fifo, frame);
1051 av_frame_free(&frame);
1054 ff_filter_set_ready(link->dst, 300);
1058 av_frame_free(&frame);
1059 return AVERROR_PATCHWELCOME;
1062 static int samples_ready(AVFilterLink *link, unsigned min)
1064 return ff_framequeue_queued_frames(&link->fifo) &&
1065 (ff_framequeue_queued_samples(&link->fifo) >= min ||
1069 static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
1072 AVFrame *frame0, *frame, *buf;
1073 unsigned nb_samples, nb_frames, i, p;
1076 /* Note: this function relies on no format changes and must only be
1077 called with enough samples. */
1078 av_assert1(samples_ready(link, link->min_samples));
1079 frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
1080 if (!link->fifo.samples_skipped && frame->nb_samples >= min && frame->nb_samples <= max) {
1081 *rframe = ff_framequeue_take(&link->fifo);
1087 if (nb_samples + frame->nb_samples > max) {
1088 if (nb_samples < min)
1092 nb_samples += frame->nb_samples;
1094 if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
1096 frame = ff_framequeue_peek(&link->fifo, nb_frames);
1099 buf = ff_get_audio_buffer(link, nb_samples);
1101 return AVERROR(ENOMEM);
1102 ret = av_frame_copy_props(buf, frame0);
1104 av_frame_free(&buf);
1107 buf->pts = frame0->pts;
1110 for (i = 0; i < nb_frames; i++) {
1111 frame = ff_framequeue_take(&link->fifo);
1112 av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
1113 frame->nb_samples, link->channels, link->format);
1114 p += frame->nb_samples;
1115 av_frame_free(&frame);
1117 if (p < nb_samples) {
1118 unsigned n = nb_samples - p;
1119 frame = ff_framequeue_peek(&link->fifo, 0);
1120 av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
1121 link->channels, link->format);
1122 ff_framequeue_skip_samples(&link->fifo, n, link->time_base);
1129 static int ff_filter_frame_to_filter(AVFilterLink *link)
1131 AVFrame *frame = NULL;
1132 AVFilterContext *dst = link->dst;
1135 av_assert1(ff_framequeue_queued_frames(&link->fifo));
1136 ret = link->min_samples ?
1137 ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
1138 ff_inlink_consume_frame(link, &frame);
1144 /* The filter will soon have received a new frame, that may allow it to
1145 produce one or more: unblock its outputs. */
1146 filter_unblock(dst);
1147 /* AVFilterPad.filter_frame() expect frame_count_out to have the value
1148 before the frame; ff_filter_frame_framed() will re-increment it. */
1149 link->frame_count_out--;
1150 ret = ff_filter_frame_framed(link, frame);
1151 if (ret < 0 && ret != link->status_out) {
1152 ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
1154 /* Run once again, to see if several frames were available, or if
1155 the input status has also changed, or any other reason. */
1156 ff_filter_set_ready(dst, 300);
1161 static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
1163 unsigned out = 0, progress = 0;
1166 av_assert0(!in->status_out);
1167 if (!filter->nb_outputs) {
1168 /* not necessary with the current API and sinks */
1171 while (!in->status_out) {
1172 if (!filter->outputs[out]->status_in) {
1174 ret = ff_request_frame_to_filter(filter->outputs[out]);
1178 if (++out == filter->nb_outputs) {
1180 /* Every output already closed: input no longer interesting
1181 (example: overlay in shortest mode, other input closed). */
1182 ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
1189 ff_filter_set_ready(filter, 200);
1193 static int ff_filter_activate_default(AVFilterContext *filter)
1197 for (i = 0; i < filter->nb_inputs; i++) {
1198 if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) {
1199 return ff_filter_frame_to_filter(filter->inputs[i]);
1202 for (i = 0; i < filter->nb_inputs; i++) {
1203 if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
1204 av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
1205 return forward_status_change(filter, filter->inputs[i]);
1208 for (i = 0; i < filter->nb_outputs; i++) {
1209 if (filter->outputs[i]->frame_wanted_out &&
1210 !filter->outputs[i]->frame_blocked_in) {
1211 return ff_request_frame_to_filter(filter->outputs[i]);
1214 return FFERROR_NOT_READY;
1218 Filter scheduling and activation
1220 When a filter is activated, it must:
1221 - if possible, output a frame;
1222 - else, if relevant, forward the input status change;
1223 - else, check outputs for wanted frames and forward the requests.
1225 The following AVFilterLink fields are used for activation:
1229 This field indicates if a frame is needed on this input of the
1230 destination filter. A positive value indicates that a frame is needed
1231 to process queued frames or internal data or to satisfy the
1232 application; a zero value indicates that a frame is not especially
1233 needed but could be processed anyway; a negative value indicates that a
1234 frame would just be queued.
1236 It is set by filters using ff_request_frame() or ff_request_no_frame(),
1237 when requested by the application through a specific API or when it is
1238 set on one of the outputs.
1240 It is cleared when a frame is sent from the source using
1243 It is also cleared when a status change is sent from the source using
1244 ff_avfilter_link_set_in_status().
1248 This field means that the source filter can not generate a frame as is.
1249 Its goal is to avoid repeatedly calling the request_frame() method on
1252 It is set by the framework on all outputs of a filter before activating it.
1254 It is automatically cleared by ff_filter_frame().
1256 It is also automatically cleared by ff_avfilter_link_set_in_status().
1258 It is also cleared on all outputs (using filter_unblock()) when
1259 something happens on an input: processing a frame or changing the
1264 Contains the frames queued on a filter input. If it contains frames and
1265 frame_wanted_out is not set, then the filter can be activated. If that
1266 result in the filter not able to use these frames, the filter must set
1267 frame_wanted_out to ask for more frames.
1269 - status_in and status_in_pts:
1271 Status (EOF or error code) of the link and timestamp of the status
1272 change (in link time base, same as frames) as seen from the input of
1273 the link. The status change is considered happening after the frames
1276 It is set by the source filter using ff_avfilter_link_set_in_status().
1280 Status of the link as seen from the output of the link. The status
1281 change is considered having already happened.
1283 It is set by the destination filter using
1284 ff_avfilter_link_set_out_status().
1286 Filters are activated according to the ready field, set using the
1287 ff_filter_set_ready(). Eventually, a priority queue will be used.
1288 ff_filter_set_ready() is called whenever anything could cause progress to
1289 be possible. Marking a filter ready when it is not is not a problem,
1290 except for the small overhead it causes.
1292 Conditions that cause a filter to be marked ready are:
1294 - frames added on an input link;
1296 - changes in the input or output status of an input link;
1298 - requests for a frame on an output link;
1300 - after any actual processing using the legacy methods (filter_frame(),
1301 and request_frame() to acknowledge status changes), to run once more
1302 and check if enough input was present for several frames.
1304 Examples of scenarios to consider:
1306 - buffersrc: activate if frame_wanted_out to notify the application;
1307 activate when the application adds a frame to push it immediately.
1309 - testsrc: activate only if frame_wanted_out to produce and push a frame.
1311 - concat (not at stitch points): can process a frame on any output.
1312 Activate if frame_wanted_out on output to forward on the corresponding
1313 input. Activate when a frame is present on input to process it
1316 - framesync: needs at least one frame on each input; extra frames on the
1317 wrong input will accumulate. When a frame is first added on one input,
1318 set frame_wanted_out<0 on it to avoid getting more (would trigger
1319 testsrc) and frame_wanted_out>0 on the other to allow processing it.
1321 Activation of old filters:
1323 In order to activate a filter implementing the legacy filter_frame() and
1324 request_frame() methods, perform the first possible of the following
1327 - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
1328 frame and call filter_frame().
1330 Rationale: filter frames as soon as possible instead of leaving them
1331 queued; frame_wanted_out < 0 is not possible since the old API does not
1332 set it nor provides any similar feedback; frame_wanted_out > 0 happens
1333 when min_samples > 0 and there are not enough samples queued.
1335 - If an input has status_in set but not status_out, try to call
1336 request_frame() on one of the outputs in the hope that it will trigger
1337 request_frame() on the input with status_in and acknowledge it. This is
1338 awkward and fragile, filters with several inputs or outputs should be
1339 updated to direct activation as soon as possible.
1341 - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
1344 Rationale: checking frame_blocked_in is necessary to avoid requesting
1345 repeatedly on a blocked input if another is not blocked (example:
1346 [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
1349 int ff_filter_activate(AVFilterContext *filter)
1353 /* Generic timeline support is not yet implemented but should be easy */
1354 av_assert1(!(filter->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC &&
1355 filter->filter->activate));
1357 ret = filter->filter->activate ? filter->filter->activate(filter) :
1358 ff_filter_activate_default(filter);
1359 if (ret == FFERROR_NOT_READY)
1364 int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
1366 *rpts = link->current_pts;
1367 if (ff_framequeue_queued_frames(&link->fifo))
1368 return *rstatus = 0;
1369 if (link->status_out)
1370 return *rstatus = link->status_out;
1371 if (!link->status_in)
1372 return *rstatus = 0;
1373 *rstatus = link->status_out = link->status_in;
1374 ff_update_link_current_pts(link, link->status_in_pts);
1375 *rpts = link->current_pts;
1379 size_t ff_inlink_queued_frames(AVFilterLink *link)
1381 return ff_framequeue_queued_frames(&link->fifo);
1384 int ff_inlink_check_available_frame(AVFilterLink *link)
1386 return ff_framequeue_queued_frames(&link->fifo) > 0;
1389 int ff_inlink_queued_samples(AVFilterLink *link)
1391 return ff_framequeue_queued_samples(&link->fifo);
1394 int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
1396 uint64_t samples = ff_framequeue_queued_samples(&link->fifo);
1398 return samples >= min || (link->status_in && samples);
1401 static void consume_update(AVFilterLink *link, const AVFrame *frame)
1403 ff_update_link_current_pts(link, frame->pts);
1404 ff_inlink_process_commands(link, frame);
1405 link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1406 link->frame_count_out++;
1409 int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
1414 if (!ff_inlink_check_available_frame(link))
1417 if (link->fifo.samples_skipped) {
1418 frame = ff_framequeue_peek(&link->fifo, 0);
1419 return ff_inlink_consume_samples(link, frame->nb_samples, frame->nb_samples, rframe);
1422 frame = ff_framequeue_take(&link->fifo);
1423 consume_update(link, frame);
1428 int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max,
1436 if (!ff_inlink_check_available_samples(link, min))
1438 if (link->status_in)
1439 min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
1440 ret = take_samples(link, min, max, &frame);
1443 consume_update(link, frame);
1448 AVFrame *ff_inlink_peek_frame(AVFilterLink *link, size_t idx)
1450 return ff_framequeue_peek(&link->fifo, idx);
1453 int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
1455 AVFrame *frame = *rframe;
1459 if (av_frame_is_writable(frame))
1461 av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
1463 switch (link->type) {
1464 case AVMEDIA_TYPE_VIDEO:
1465 out = ff_get_video_buffer(link, link->w, link->h);
1467 case AVMEDIA_TYPE_AUDIO:
1468 out = ff_get_audio_buffer(link, frame->nb_samples);
1471 return AVERROR(EINVAL);
1474 return AVERROR(ENOMEM);
1476 ret = av_frame_copy_props(out, frame);
1478 av_frame_free(&out);
1482 switch (link->type) {
1483 case AVMEDIA_TYPE_VIDEO:
1484 av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
1485 frame->format, frame->width, frame->height);
1487 case AVMEDIA_TYPE_AUDIO:
1488 av_samples_copy(out->extended_data, frame->extended_data,
1489 0, 0, frame->nb_samples,
1494 av_assert0(!"reached");
1497 av_frame_free(&frame);
1502 int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
1504 AVFilterCommand *cmd = link->dst->command_queue;
1506 while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
1507 av_log(link->dst, AV_LOG_DEBUG,
1508 "Processing command time:%f command:%s arg:%s\n",
1509 cmd->time, cmd->command, cmd->arg);
1510 avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
1511 ff_command_queue_pop(link->dst);
1512 cmd= link->dst->command_queue;
1517 int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
1519 AVFilterContext *dstctx = link->dst;
1520 int64_t pts = frame->pts;
1521 int64_t pos = frame->pkt_pos;
1523 if (!dstctx->enable_str)
1526 dstctx->var_values[VAR_N] = link->frame_count_out;
1527 dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
1528 dstctx->var_values[VAR_W] = link->w;
1529 dstctx->var_values[VAR_H] = link->h;
1530 dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1532 return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5;
1535 void ff_inlink_request_frame(AVFilterLink *link)
1537 av_assert1(!link->status_in);
1538 av_assert1(!link->status_out);
1539 link->frame_wanted_out = 1;
1540 ff_filter_set_ready(link->src, 100);
1543 void ff_inlink_set_status(AVFilterLink *link, int status)
1545 if (link->status_out)
1547 link->frame_wanted_out = 0;
1548 link->frame_blocked_in = 0;
1549 ff_avfilter_link_set_out_status(link, status, AV_NOPTS_VALUE);
1550 while (ff_framequeue_queued_frames(&link->fifo)) {
1551 AVFrame *frame = ff_framequeue_take(&link->fifo);
1552 av_frame_free(&frame);
1554 if (!link->status_in)
1555 link->status_in = status;
1558 int ff_outlink_get_status(AVFilterLink *link)
1560 return link->status_in;
1563 const AVClass *avfilter_get_class(void)
1565 return &avfilter_class;
1568 int ff_filter_init_hw_frames(AVFilterContext *avctx, AVFilterLink *link,
1569 int default_pool_size)
1571 AVHWFramesContext *frames;
1573 // Must already be set by caller.
1574 av_assert0(link->hw_frames_ctx);
1576 frames = (AVHWFramesContext*)link->hw_frames_ctx->data;
1578 if (frames->initial_pool_size == 0) {
1579 // Dynamic allocation is necessarily supported.
1580 } else if (avctx->extra_hw_frames >= 0) {
1581 frames->initial_pool_size += avctx->extra_hw_frames;
1583 frames->initial_pool_size = default_pool_size;