3 * Copyright (c) 2007 Bobby Bingham
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/avassert.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/buffer.h"
25 #include "libavutil/channel_layout.h"
26 #include "libavutil/common.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/hwcontext.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/rational.h"
34 #include "libavutil/samplefmt.h"
35 #include "libavutil/thread.h"
37 #define FF_INTERNAL_FIELDS 1
38 #include "framequeue.h"
46 #include "libavutil/ffversion.h"
47 const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
49 void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
51 av_unused char buf[16];
53 "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
54 ref, ref->buf, ref->data[0],
55 ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
56 ref->pts, ref->pkt_pos);
59 ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
60 ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
61 ref->width, ref->height,
62 !ref->interlaced_frame ? 'P' : /* Progressive */
63 ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
65 av_get_picture_type_char(ref->pict_type));
67 if (ref->nb_samples) {
68 ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
74 ff_tlog(ctx, "]%s", end ? "\n" : "");
77 unsigned avfilter_version(void)
79 av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
80 return LIBAVFILTER_VERSION_INT;
83 const char *avfilter_configuration(void)
85 return FFMPEG_CONFIGURATION;
88 const char *avfilter_license(void)
90 #define LICENSE_PREFIX "libavfilter license: "
91 return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1];
94 void ff_command_queue_pop(AVFilterContext *filter)
96 AVFilterCommand *c= filter->command_queue;
98 av_freep(&c->command);
99 filter->command_queue= c->next;
103 int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
104 AVFilterPad **pads, AVFilterLink ***links,
107 AVFilterLink **newlinks;
108 AVFilterPad *newpads;
111 idx = FFMIN(idx, *count);
113 newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
114 newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
119 if (!newpads || !newlinks)
120 return AVERROR(ENOMEM);
122 memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
123 memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
124 memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
125 (*links)[idx] = NULL;
128 for (i = idx + 1; i < *count; i++)
130 (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
135 int avfilter_link(AVFilterContext *src, unsigned srcpad,
136 AVFilterContext *dst, unsigned dstpad)
140 av_assert0(src->graph);
141 av_assert0(dst->graph);
142 av_assert0(src->graph == dst->graph);
144 if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
145 src->outputs[srcpad] || dst->inputs[dstpad])
146 return AVERROR(EINVAL);
148 if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
149 av_log(src, AV_LOG_ERROR,
150 "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
151 src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
152 dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
153 return AVERROR(EINVAL);
156 link = av_mallocz(sizeof(*link));
158 return AVERROR(ENOMEM);
160 src->outputs[srcpad] = dst->inputs[dstpad] = link;
164 link->srcpad = &src->output_pads[srcpad];
165 link->dstpad = &dst->input_pads[dstpad];
166 link->type = src->output_pads[srcpad].type;
167 av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
169 ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
174 void avfilter_link_free(AVFilterLink **link)
179 av_frame_free(&(*link)->partial_buf);
180 ff_framequeue_free(&(*link)->fifo);
181 ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
186 #if FF_API_FILTER_GET_SET
187 int avfilter_link_get_channels(AVFilterLink *link)
189 return link->channels;
193 void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
195 filter->ready = FFMAX(filter->ready, priority);
199 * Clear frame_blocked_in on all outputs.
200 * This is necessary whenever something changes on input.
202 static void filter_unblock(AVFilterContext *filter)
206 for (i = 0; i < filter->nb_outputs; i++)
207 filter->outputs[i]->frame_blocked_in = 0;
211 void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
213 if (link->status_in == status)
215 av_assert0(!link->status_in);
216 link->status_in = status;
217 link->status_in_pts = pts;
218 link->frame_wanted_out = 0;
219 link->frame_blocked_in = 0;
220 filter_unblock(link->dst);
221 ff_filter_set_ready(link->dst, 200);
224 void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
226 av_assert0(!link->frame_wanted_out);
227 av_assert0(!link->status_out);
228 link->status_out = status;
229 if (pts != AV_NOPTS_VALUE)
230 ff_update_link_current_pts(link, pts);
231 filter_unblock(link->dst);
232 ff_filter_set_ready(link->src, 200);
235 #if FF_API_FILTER_LINK_SET_CLOSED
236 void avfilter_link_set_closed(AVFilterLink *link, int closed)
238 ff_avfilter_link_set_out_status(link, closed ? AVERROR_EOF : 0, AV_NOPTS_VALUE);
241 int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
242 unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
245 unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
247 av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
248 "between the filter '%s' and the filter '%s'\n",
249 filt->name, link->src->name, link->dst->name);
251 link->dst->inputs[dstpad_idx] = NULL;
252 if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
253 /* failed to link output filter to new filter */
254 link->dst->inputs[dstpad_idx] = link;
258 /* re-hookup the link to the new destination filter we inserted */
260 link->dstpad = &filt->input_pads[filt_srcpad_idx];
261 filt->inputs[filt_srcpad_idx] = link;
263 /* if any information on supported media formats already exists on the
264 * link, we need to preserve that */
265 if (link->outcfg.formats)
266 ff_formats_changeref(&link->outcfg.formats,
267 &filt->outputs[filt_dstpad_idx]->outcfg.formats);
268 if (link->outcfg.samplerates)
269 ff_formats_changeref(&link->outcfg.samplerates,
270 &filt->outputs[filt_dstpad_idx]->outcfg.samplerates);
271 if (link->outcfg.channel_layouts)
272 ff_channel_layouts_changeref(&link->outcfg.channel_layouts,
273 &filt->outputs[filt_dstpad_idx]->outcfg.channel_layouts);
278 int avfilter_config_links(AVFilterContext *filter)
280 int (*config_link)(AVFilterLink *);
284 for (i = 0; i < filter->nb_inputs; i ++) {
285 AVFilterLink *link = filter->inputs[i];
286 AVFilterLink *inlink;
289 if (!link->src || !link->dst) {
290 av_log(filter, AV_LOG_ERROR,
291 "Not all input and output are properly linked (%d).\n", i);
292 return AVERROR(EINVAL);
295 inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
297 link->current_pts_us = AV_NOPTS_VALUE;
299 switch (link->init_state) {
302 case AVLINK_STARTINIT:
303 av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
306 link->init_state = AVLINK_STARTINIT;
308 if ((ret = avfilter_config_links(link->src)) < 0)
311 if (!(config_link = link->srcpad->config_props)) {
312 if (link->src->nb_inputs != 1) {
313 av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
314 "with more than one input "
315 "must set config_props() "
316 "callbacks on all outputs\n");
317 return AVERROR(EINVAL);
319 } else if ((ret = config_link(link)) < 0) {
320 av_log(link->src, AV_LOG_ERROR,
321 "Failed to configure output pad on %s\n",
326 switch (link->type) {
327 case AVMEDIA_TYPE_VIDEO:
328 if (!link->time_base.num && !link->time_base.den)
329 link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
331 if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
332 link->sample_aspect_ratio = inlink ?
333 inlink->sample_aspect_ratio : (AVRational){1,1};
336 if (!link->frame_rate.num && !link->frame_rate.den)
337 link->frame_rate = inlink->frame_rate;
342 } else if (!link->w || !link->h) {
343 av_log(link->src, AV_LOG_ERROR,
344 "Video source filters must set their output link's "
345 "width and height\n");
346 return AVERROR(EINVAL);
350 case AVMEDIA_TYPE_AUDIO:
352 if (!link->time_base.num && !link->time_base.den)
353 link->time_base = inlink->time_base;
356 if (!link->time_base.num && !link->time_base.den)
357 link->time_base = (AVRational) {1, link->sample_rate};
360 if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
361 !(link->src->filter->flags_internal & FF_FILTER_FLAG_HWFRAME_AWARE)) {
362 av_assert0(!link->hw_frames_ctx &&
363 "should not be set by non-hwframe-aware filter");
364 link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
365 if (!link->hw_frames_ctx)
366 return AVERROR(ENOMEM);
369 if ((config_link = link->dstpad->config_props))
370 if ((ret = config_link(link)) < 0) {
371 av_log(link->dst, AV_LOG_ERROR,
372 "Failed to configure input pad on %s\n",
377 link->init_state = AVLINK_INIT;
384 void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
386 if (link->type == AVMEDIA_TYPE_VIDEO) {
388 "link[%p s:%dx%d fmt:%s %s->%s]%s",
389 link, link->w, link->h,
390 av_get_pix_fmt_name(link->format),
391 link->src ? link->src->filter->name : "",
392 link->dst ? link->dst->filter->name : "",
396 av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
399 "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
400 link, (int)link->sample_rate, buf,
401 av_get_sample_fmt_name(link->format),
402 link->src ? link->src->filter->name : "",
403 link->dst ? link->dst->filter->name : "",
408 int ff_request_frame(AVFilterLink *link)
410 FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
412 av_assert1(!link->dst->filter->activate);
413 if (link->status_out)
414 return link->status_out;
415 if (link->status_in) {
416 if (ff_framequeue_queued_frames(&link->fifo)) {
417 av_assert1(!link->frame_wanted_out);
418 av_assert1(link->dst->ready >= 300);
421 /* Acknowledge status change. Filters using ff_request_frame() will
422 handle the change automatically. Filters can also check the
423 status directly but none do yet. */
424 ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
425 return link->status_out;
428 link->frame_wanted_out = 1;
429 ff_filter_set_ready(link->src, 100);
433 static int64_t guess_status_pts(AVFilterContext *ctx, int status, AVRational link_time_base)
436 int64_t r = INT64_MAX;
438 for (i = 0; i < ctx->nb_inputs; i++)
439 if (ctx->inputs[i]->status_out == status)
440 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->current_pts, ctx->inputs[i]->time_base, link_time_base));
443 av_log(ctx, AV_LOG_WARNING, "EOF timestamp not reliable\n");
444 for (i = 0; i < ctx->nb_inputs; i++)
445 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->status_in_pts, ctx->inputs[i]->time_base, link_time_base));
448 return AV_NOPTS_VALUE;
451 static int ff_request_frame_to_filter(AVFilterLink *link)
455 FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
456 /* Assume the filter is blocked, let the method clear it if not */
457 link->frame_blocked_in = 1;
458 if (link->srcpad->request_frame)
459 ret = link->srcpad->request_frame(link);
460 else if (link->src->inputs[0])
461 ret = ff_request_frame(link->src->inputs[0]);
463 if (ret != AVERROR(EAGAIN) && ret != link->status_in)
464 ff_avfilter_link_set_in_status(link, ret, guess_status_pts(link->src, ret, link->time_base));
465 if (ret == AVERROR_EOF)
471 static const char *const var_names[] = {
489 static int set_enable_expr(AVFilterContext *ctx, const char *expr)
493 AVExpr *old = ctx->enable;
495 if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
496 av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
497 "with filter '%s'\n", ctx->filter->name);
498 return AVERROR_PATCHWELCOME;
501 expr_dup = av_strdup(expr);
503 return AVERROR(ENOMEM);
505 if (!ctx->var_values) {
506 ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
507 if (!ctx->var_values) {
509 return AVERROR(ENOMEM);
513 ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
514 NULL, NULL, NULL, NULL, 0, ctx->priv);
516 av_log(ctx->priv, AV_LOG_ERROR,
517 "Error when evaluating the expression '%s' for enable\n",
524 av_free(ctx->enable_str);
525 ctx->enable_str = expr_dup;
529 void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
531 if (pts == AV_NOPTS_VALUE)
533 link->current_pts = pts;
534 link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
535 /* TODO use duration */
536 if (link->graph && link->age_index >= 0)
537 ff_avfilter_graph_update_heap(link->graph, link);
540 int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
542 if(!strcmp(cmd, "ping")){
543 char local_res[256] = {0};
547 res_len = sizeof(local_res);
549 av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
550 if (res == local_res)
551 av_log(filter, AV_LOG_INFO, "%s", res);
553 }else if(!strcmp(cmd, "enable")) {
554 return set_enable_expr(filter, arg);
555 }else if(filter->filter->process_command) {
556 return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
558 return AVERROR(ENOSYS);
561 int avfilter_pad_count(const AVFilterPad *pads)
568 for (count = 0; pads->name; count++)
573 static const char *default_filter_name(void *filter_ctx)
575 AVFilterContext *ctx = filter_ctx;
576 return ctx->name ? ctx->name : ctx->filter->name;
579 static void *filter_child_next(void *obj, void *prev)
581 AVFilterContext *ctx = obj;
582 if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
587 #if FF_API_CHILD_CLASS_NEXT
588 static const AVClass *filter_child_class_next(const AVClass *prev)
591 const AVFilter *f = NULL;
593 /* find the filter that corresponds to prev */
594 while (prev && (f = av_filter_iterate(&opaque)))
595 if (f->priv_class == prev)
598 /* could not find filter corresponding to prev */
602 /* find next filter with specific options */
603 while ((f = av_filter_iterate(&opaque)))
605 return f->priv_class;
611 static const AVClass *filter_child_class_iterate(void **iter)
615 while ((f = av_filter_iterate(iter)))
617 return f->priv_class;
622 #define OFFSET(x) offsetof(AVFilterContext, x)
623 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
624 #define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
625 static const AVOption avfilter_options[] = {
626 { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
627 { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
628 { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .flags = FLAGS, .unit = "thread_type" },
629 { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = TFLAGS },
630 { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
631 { .i64 = 0 }, 0, INT_MAX, FLAGS },
632 { "extra_hw_frames", "Number of extra hardware frames to allocate for the user",
633 OFFSET(extra_hw_frames), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
637 static const AVClass avfilter_class = {
638 .class_name = "AVFilter",
639 .item_name = default_filter_name,
640 .version = LIBAVUTIL_VERSION_INT,
641 .category = AV_CLASS_CATEGORY_FILTER,
642 .child_next = filter_child_next,
643 #if FF_API_CHILD_CLASS_NEXT
644 .child_class_next = filter_child_class_next,
646 .child_class_iterate = filter_child_class_iterate,
647 .option = avfilter_options,
650 static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
651 int *ret, int nb_jobs)
655 for (i = 0; i < nb_jobs; i++) {
656 int r = func(ctx, arg, i, nb_jobs);
663 AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
665 AVFilterContext *ret;
671 ret = av_mallocz(sizeof(AVFilterContext));
675 ret->av_class = &avfilter_class;
676 ret->filter = filter;
677 ret->name = inst_name ? av_strdup(inst_name) : NULL;
678 if (filter->priv_size) {
679 ret->priv = av_mallocz(filter->priv_size);
683 if (filter->preinit) {
684 if (filter->preinit(ret) < 0)
689 av_opt_set_defaults(ret);
690 if (filter->priv_class) {
691 *(const AVClass**)ret->priv = filter->priv_class;
692 av_opt_set_defaults(ret->priv);
695 ret->internal = av_mallocz(sizeof(*ret->internal));
698 ret->internal->execute = default_execute;
700 ret->nb_inputs = avfilter_pad_count(filter->inputs);
701 if (ret->nb_inputs ) {
702 ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
703 if (!ret->input_pads)
705 memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
706 ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
711 ret->nb_outputs = avfilter_pad_count(filter->outputs);
712 if (ret->nb_outputs) {
713 ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
714 if (!ret->output_pads)
716 memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
717 ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
727 av_freep(&ret->inputs);
728 av_freep(&ret->input_pads);
730 av_freep(&ret->outputs);
731 av_freep(&ret->output_pads);
733 av_freep(&ret->priv);
734 av_freep(&ret->internal);
739 static void free_link(AVFilterLink *link)
745 link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
747 link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
749 av_buffer_unref(&link->hw_frames_ctx);
751 ff_formats_unref(&link->incfg.formats);
752 ff_formats_unref(&link->outcfg.formats);
753 ff_formats_unref(&link->incfg.samplerates);
754 ff_formats_unref(&link->outcfg.samplerates);
755 ff_channel_layouts_unref(&link->incfg.channel_layouts);
756 ff_channel_layouts_unref(&link->outcfg.channel_layouts);
757 avfilter_link_free(&link);
760 void avfilter_free(AVFilterContext *filter)
768 ff_filter_graph_remove_filter(filter->graph, filter);
770 if (filter->filter->uninit)
771 filter->filter->uninit(filter);
773 for (i = 0; i < filter->nb_inputs; i++) {
774 free_link(filter->inputs[i]);
776 for (i = 0; i < filter->nb_outputs; i++) {
777 free_link(filter->outputs[i]);
780 if (filter->filter->priv_class)
781 av_opt_free(filter->priv);
783 av_buffer_unref(&filter->hw_device_ctx);
785 av_freep(&filter->name);
786 av_freep(&filter->input_pads);
787 av_freep(&filter->output_pads);
788 av_freep(&filter->inputs);
789 av_freep(&filter->outputs);
790 av_freep(&filter->priv);
791 while(filter->command_queue){
792 ff_command_queue_pop(filter);
795 av_expr_free(filter->enable);
796 filter->enable = NULL;
797 av_freep(&filter->var_values);
798 av_freep(&filter->internal);
802 int ff_filter_get_nb_threads(AVFilterContext *ctx)
804 if (ctx->nb_threads > 0)
805 return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
806 return ctx->graph->nb_threads;
809 static int process_options(AVFilterContext *ctx, AVDictionary **options,
812 const AVOption *o = NULL;
814 char *av_uninit(parsed_key), *av_uninit(value);
822 const char *shorthand = NULL;
824 o = av_opt_next(ctx->priv, o);
826 if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
832 ret = av_opt_get_key_value(&args, "=", ":",
833 shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
834 &parsed_key, &value);
836 if (ret == AVERROR(EINVAL))
837 av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
839 av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
847 while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
852 av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
854 if (av_opt_find(ctx, key, NULL, 0, 0)) {
855 ret = av_opt_set(ctx, key, value, 0);
862 av_dict_set(options, key, value, 0);
863 if ((ret = av_opt_set(ctx->priv, key, value, AV_OPT_SEARCH_CHILDREN)) < 0) {
864 if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
865 if (ret == AVERROR_OPTION_NOT_FOUND)
866 av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
882 int ff_filter_process_command(AVFilterContext *ctx, const char *cmd,
883 const char *arg, char *res, int res_len, int flags)
887 if (!ctx->filter->priv_class)
889 o = av_opt_find2(ctx->priv, cmd, NULL, AV_OPT_FLAG_RUNTIME_PARAM | AV_OPT_FLAG_FILTERING_PARAM, AV_OPT_SEARCH_CHILDREN, NULL);
891 return AVERROR(ENOSYS);
892 return av_opt_set(ctx->priv, cmd, arg, 0);
895 int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
899 ret = av_opt_set_dict(ctx, options);
901 av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
905 if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
906 ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
907 ctx->graph->internal->thread_execute) {
908 ctx->thread_type = AVFILTER_THREAD_SLICE;
909 ctx->internal->execute = ctx->graph->internal->thread_execute;
911 ctx->thread_type = 0;
914 if (ctx->filter->priv_class) {
915 ret = av_opt_set_dict2(ctx->priv, options, AV_OPT_SEARCH_CHILDREN);
917 av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
922 if (ctx->filter->init_opaque)
923 ret = ctx->filter->init_opaque(ctx, NULL);
924 else if (ctx->filter->init)
925 ret = ctx->filter->init(ctx);
926 else if (ctx->filter->init_dict)
927 ret = ctx->filter->init_dict(ctx, options);
929 if (ctx->enable_str) {
930 ret = set_enable_expr(ctx, ctx->enable_str);
938 int avfilter_init_str(AVFilterContext *filter, const char *args)
940 AVDictionary *options = NULL;
941 AVDictionaryEntry *e;
945 if (!filter->filter->priv_class) {
946 av_log(filter, AV_LOG_ERROR, "This filter does not take any "
947 "options, but options were provided: %s.\n", args);
948 return AVERROR(EINVAL);
951 ret = process_options(filter, &options, args);
956 ret = avfilter_init_dict(filter, &options);
960 if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
961 av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
962 ret = AVERROR_OPTION_NOT_FOUND;
967 av_dict_free(&options);
972 const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
974 return pads[pad_idx].name;
977 enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
979 return pads[pad_idx].type;
982 static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
984 return ff_filter_frame(link->dst->outputs[0], frame);
987 static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
989 int (*filter_frame)(AVFilterLink *, AVFrame *);
990 AVFilterContext *dstctx = link->dst;
991 AVFilterPad *dst = link->dstpad;
994 if (!(filter_frame = dst->filter_frame))
995 filter_frame = default_filter_frame;
997 if (dst->needs_writable) {
998 ret = ff_inlink_make_frame_writable(link, &frame);
1003 ff_inlink_process_commands(link, frame);
1004 dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1006 if (dstctx->is_disabled &&
1007 (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
1008 filter_frame = default_filter_frame;
1009 ret = filter_frame(link, frame);
1010 link->frame_count_out++;
1014 av_frame_free(&frame);
1018 int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
1021 FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
1023 /* Consistency checks */
1024 if (link->type == AVMEDIA_TYPE_VIDEO) {
1025 if (strcmp(link->dst->filter->name, "buffersink") &&
1026 strcmp(link->dst->filter->name, "format") &&
1027 strcmp(link->dst->filter->name, "idet") &&
1028 strcmp(link->dst->filter->name, "null") &&
1029 strcmp(link->dst->filter->name, "scale")) {
1030 av_assert1(frame->format == link->format);
1031 av_assert1(frame->width == link->w);
1032 av_assert1(frame->height == link->h);
1035 if (frame->format != link->format) {
1036 av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
1039 if (frame->channels != link->channels) {
1040 av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
1043 if (frame->channel_layout != link->channel_layout) {
1044 av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
1047 if (frame->sample_rate != link->sample_rate) {
1048 av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
1053 link->frame_blocked_in = link->frame_wanted_out = 0;
1054 link->frame_count_in++;
1055 filter_unblock(link->dst);
1056 ret = ff_framequeue_add(&link->fifo, frame);
1058 av_frame_free(&frame);
1061 ff_filter_set_ready(link->dst, 300);
1065 av_frame_free(&frame);
1066 return AVERROR_PATCHWELCOME;
1069 static int samples_ready(AVFilterLink *link, unsigned min)
1071 return ff_framequeue_queued_frames(&link->fifo) &&
1072 (ff_framequeue_queued_samples(&link->fifo) >= min ||
1076 static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
1079 AVFrame *frame0, *frame, *buf;
1080 unsigned nb_samples, nb_frames, i, p;
1083 /* Note: this function relies on no format changes and must only be
1084 called with enough samples. */
1085 av_assert1(samples_ready(link, link->min_samples));
1086 frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
1087 if (!link->fifo.samples_skipped && frame->nb_samples >= min && frame->nb_samples <= max) {
1088 *rframe = ff_framequeue_take(&link->fifo);
1094 if (nb_samples + frame->nb_samples > max) {
1095 if (nb_samples < min)
1099 nb_samples += frame->nb_samples;
1101 if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
1103 frame = ff_framequeue_peek(&link->fifo, nb_frames);
1106 buf = ff_get_audio_buffer(link, nb_samples);
1108 return AVERROR(ENOMEM);
1109 ret = av_frame_copy_props(buf, frame0);
1111 av_frame_free(&buf);
1114 buf->pts = frame0->pts;
1117 for (i = 0; i < nb_frames; i++) {
1118 frame = ff_framequeue_take(&link->fifo);
1119 av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
1120 frame->nb_samples, link->channels, link->format);
1121 p += frame->nb_samples;
1122 av_frame_free(&frame);
1124 if (p < nb_samples) {
1125 unsigned n = nb_samples - p;
1126 frame = ff_framequeue_peek(&link->fifo, 0);
1127 av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
1128 link->channels, link->format);
1129 ff_framequeue_skip_samples(&link->fifo, n, link->time_base);
1136 static int ff_filter_frame_to_filter(AVFilterLink *link)
1138 AVFrame *frame = NULL;
1139 AVFilterContext *dst = link->dst;
1142 av_assert1(ff_framequeue_queued_frames(&link->fifo));
1143 ret = link->min_samples ?
1144 ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
1145 ff_inlink_consume_frame(link, &frame);
1151 /* The filter will soon have received a new frame, that may allow it to
1152 produce one or more: unblock its outputs. */
1153 filter_unblock(dst);
1154 /* AVFilterPad.filter_frame() expect frame_count_out to have the value
1155 before the frame; ff_filter_frame_framed() will re-increment it. */
1156 link->frame_count_out--;
1157 ret = ff_filter_frame_framed(link, frame);
1158 if (ret < 0 && ret != link->status_out) {
1159 ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
1161 /* Run once again, to see if several frames were available, or if
1162 the input status has also changed, or any other reason. */
1163 ff_filter_set_ready(dst, 300);
1168 static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
1170 unsigned out = 0, progress = 0;
1173 av_assert0(!in->status_out);
1174 if (!filter->nb_outputs) {
1175 /* not necessary with the current API and sinks */
1178 while (!in->status_out) {
1179 if (!filter->outputs[out]->status_in) {
1181 ret = ff_request_frame_to_filter(filter->outputs[out]);
1185 if (++out == filter->nb_outputs) {
1187 /* Every output already closed: input no longer interesting
1188 (example: overlay in shortest mode, other input closed). */
1189 ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
1196 ff_filter_set_ready(filter, 200);
1200 static int ff_filter_activate_default(AVFilterContext *filter)
1204 for (i = 0; i < filter->nb_inputs; i++) {
1205 if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) {
1206 return ff_filter_frame_to_filter(filter->inputs[i]);
1209 for (i = 0; i < filter->nb_inputs; i++) {
1210 if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
1211 av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
1212 return forward_status_change(filter, filter->inputs[i]);
1215 for (i = 0; i < filter->nb_outputs; i++) {
1216 if (filter->outputs[i]->frame_wanted_out &&
1217 !filter->outputs[i]->frame_blocked_in) {
1218 return ff_request_frame_to_filter(filter->outputs[i]);
1221 return FFERROR_NOT_READY;
1225 Filter scheduling and activation
1227 When a filter is activated, it must:
1228 - if possible, output a frame;
1229 - else, if relevant, forward the input status change;
1230 - else, check outputs for wanted frames and forward the requests.
1232 The following AVFilterLink fields are used for activation:
1236 This field indicates if a frame is needed on this input of the
1237 destination filter. A positive value indicates that a frame is needed
1238 to process queued frames or internal data or to satisfy the
1239 application; a zero value indicates that a frame is not especially
1240 needed but could be processed anyway; a negative value indicates that a
1241 frame would just be queued.
1243 It is set by filters using ff_request_frame() or ff_request_no_frame(),
1244 when requested by the application through a specific API or when it is
1245 set on one of the outputs.
1247 It is cleared when a frame is sent from the source using
1250 It is also cleared when a status change is sent from the source using
1251 ff_avfilter_link_set_in_status().
1255 This field means that the source filter can not generate a frame as is.
1256 Its goal is to avoid repeatedly calling the request_frame() method on
1259 It is set by the framework on all outputs of a filter before activating it.
1261 It is automatically cleared by ff_filter_frame().
1263 It is also automatically cleared by ff_avfilter_link_set_in_status().
1265 It is also cleared on all outputs (using filter_unblock()) when
1266 something happens on an input: processing a frame or changing the
1271 Contains the frames queued on a filter input. If it contains frames and
1272 frame_wanted_out is not set, then the filter can be activated. If that
1273 result in the filter not able to use these frames, the filter must set
1274 frame_wanted_out to ask for more frames.
1276 - status_in and status_in_pts:
1278 Status (EOF or error code) of the link and timestamp of the status
1279 change (in link time base, same as frames) as seen from the input of
1280 the link. The status change is considered happening after the frames
1283 It is set by the source filter using ff_avfilter_link_set_in_status().
1287 Status of the link as seen from the output of the link. The status
1288 change is considered having already happened.
1290 It is set by the destination filter using
1291 ff_avfilter_link_set_out_status().
1293 Filters are activated according to the ready field, set using the
1294 ff_filter_set_ready(). Eventually, a priority queue will be used.
1295 ff_filter_set_ready() is called whenever anything could cause progress to
1296 be possible. Marking a filter ready when it is not is not a problem,
1297 except for the small overhead it causes.
1299 Conditions that cause a filter to be marked ready are:
1301 - frames added on an input link;
1303 - changes in the input or output status of an input link;
1305 - requests for a frame on an output link;
1307 - after any actual processing using the legacy methods (filter_frame(),
1308 and request_frame() to acknowledge status changes), to run once more
1309 and check if enough input was present for several frames.
1311 Examples of scenarios to consider:
1313 - buffersrc: activate if frame_wanted_out to notify the application;
1314 activate when the application adds a frame to push it immediately.
1316 - testsrc: activate only if frame_wanted_out to produce and push a frame.
1318 - concat (not at stitch points): can process a frame on any output.
1319 Activate if frame_wanted_out on output to forward on the corresponding
1320 input. Activate when a frame is present on input to process it
1323 - framesync: needs at least one frame on each input; extra frames on the
1324 wrong input will accumulate. When a frame is first added on one input,
1325 set frame_wanted_out<0 on it to avoid getting more (would trigger
1326 testsrc) and frame_wanted_out>0 on the other to allow processing it.
1328 Activation of old filters:
1330 In order to activate a filter implementing the legacy filter_frame() and
1331 request_frame() methods, perform the first possible of the following
1334 - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
1335 frame and call filter_frame().
1337 Rationale: filter frames as soon as possible instead of leaving them
1338 queued; frame_wanted_out < 0 is not possible since the old API does not
1339 set it nor provides any similar feedback; frame_wanted_out > 0 happens
1340 when min_samples > 0 and there are not enough samples queued.
1342 - If an input has status_in set but not status_out, try to call
1343 request_frame() on one of the outputs in the hope that it will trigger
1344 request_frame() on the input with status_in and acknowledge it. This is
1345 awkward and fragile, filters with several inputs or outputs should be
1346 updated to direct activation as soon as possible.
1348 - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
1351 Rationale: checking frame_blocked_in is necessary to avoid requesting
1352 repeatedly on a blocked input if another is not blocked (example:
1353 [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
1356 int ff_filter_activate(AVFilterContext *filter)
1360 /* Generic timeline support is not yet implemented but should be easy */
1361 av_assert1(!(filter->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC &&
1362 filter->filter->activate));
1364 ret = filter->filter->activate ? filter->filter->activate(filter) :
1365 ff_filter_activate_default(filter);
1366 if (ret == FFERROR_NOT_READY)
1371 int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
1373 *rpts = link->current_pts;
1374 if (ff_framequeue_queued_frames(&link->fifo))
1375 return *rstatus = 0;
1376 if (link->status_out)
1377 return *rstatus = link->status_out;
1378 if (!link->status_in)
1379 return *rstatus = 0;
1380 *rstatus = link->status_out = link->status_in;
1381 ff_update_link_current_pts(link, link->status_in_pts);
1382 *rpts = link->current_pts;
1386 size_t ff_inlink_queued_frames(AVFilterLink *link)
1388 return ff_framequeue_queued_frames(&link->fifo);
1391 int ff_inlink_check_available_frame(AVFilterLink *link)
1393 return ff_framequeue_queued_frames(&link->fifo) > 0;
1396 int ff_inlink_queued_samples(AVFilterLink *link)
1398 return ff_framequeue_queued_samples(&link->fifo);
1401 int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
1403 uint64_t samples = ff_framequeue_queued_samples(&link->fifo);
1405 return samples >= min || (link->status_in && samples);
1408 static void consume_update(AVFilterLink *link, const AVFrame *frame)
1410 ff_update_link_current_pts(link, frame->pts);
1411 ff_inlink_process_commands(link, frame);
1412 link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1413 link->frame_count_out++;
1416 int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
1421 if (!ff_inlink_check_available_frame(link))
1424 if (link->fifo.samples_skipped) {
1425 frame = ff_framequeue_peek(&link->fifo, 0);
1426 return ff_inlink_consume_samples(link, frame->nb_samples, frame->nb_samples, rframe);
1429 frame = ff_framequeue_take(&link->fifo);
1430 consume_update(link, frame);
1435 int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max,
1443 if (!ff_inlink_check_available_samples(link, min))
1445 if (link->status_in)
1446 min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
1447 ret = take_samples(link, min, max, &frame);
1450 consume_update(link, frame);
1455 AVFrame *ff_inlink_peek_frame(AVFilterLink *link, size_t idx)
1457 return ff_framequeue_peek(&link->fifo, idx);
1460 int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
1462 AVFrame *frame = *rframe;
1466 if (av_frame_is_writable(frame))
1468 av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
1470 switch (link->type) {
1471 case AVMEDIA_TYPE_VIDEO:
1472 out = ff_get_video_buffer(link, link->w, link->h);
1474 case AVMEDIA_TYPE_AUDIO:
1475 out = ff_get_audio_buffer(link, frame->nb_samples);
1478 return AVERROR(EINVAL);
1481 return AVERROR(ENOMEM);
1483 ret = av_frame_copy_props(out, frame);
1485 av_frame_free(&out);
1489 switch (link->type) {
1490 case AVMEDIA_TYPE_VIDEO:
1491 av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
1492 frame->format, frame->width, frame->height);
1494 case AVMEDIA_TYPE_AUDIO:
1495 av_samples_copy(out->extended_data, frame->extended_data,
1496 0, 0, frame->nb_samples,
1501 av_assert0(!"reached");
1504 av_frame_free(&frame);
1509 int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
1511 AVFilterCommand *cmd = link->dst->command_queue;
1513 while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
1514 av_log(link->dst, AV_LOG_DEBUG,
1515 "Processing command time:%f command:%s arg:%s\n",
1516 cmd->time, cmd->command, cmd->arg);
1517 avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
1518 ff_command_queue_pop(link->dst);
1519 cmd= link->dst->command_queue;
1524 int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
1526 AVFilterContext *dstctx = link->dst;
1527 int64_t pts = frame->pts;
1528 int64_t pos = frame->pkt_pos;
1530 if (!dstctx->enable_str)
1533 dstctx->var_values[VAR_N] = link->frame_count_out;
1534 dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
1535 dstctx->var_values[VAR_W] = link->w;
1536 dstctx->var_values[VAR_H] = link->h;
1537 dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1539 return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5;
1542 void ff_inlink_request_frame(AVFilterLink *link)
1544 av_assert1(!link->status_in);
1545 av_assert1(!link->status_out);
1546 link->frame_wanted_out = 1;
1547 ff_filter_set_ready(link->src, 100);
1550 void ff_inlink_set_status(AVFilterLink *link, int status)
1552 if (link->status_out)
1554 link->frame_wanted_out = 0;
1555 link->frame_blocked_in = 0;
1556 ff_avfilter_link_set_out_status(link, status, AV_NOPTS_VALUE);
1557 while (ff_framequeue_queued_frames(&link->fifo)) {
1558 AVFrame *frame = ff_framequeue_take(&link->fifo);
1559 av_frame_free(&frame);
1561 if (!link->status_in)
1562 link->status_in = status;
1565 int ff_outlink_get_status(AVFilterLink *link)
1567 return link->status_in;
1570 const AVClass *avfilter_get_class(void)
1572 return &avfilter_class;
1575 int ff_filter_init_hw_frames(AVFilterContext *avctx, AVFilterLink *link,
1576 int default_pool_size)
1578 AVHWFramesContext *frames;
1580 // Must already be set by caller.
1581 av_assert0(link->hw_frames_ctx);
1583 frames = (AVHWFramesContext*)link->hw_frames_ctx->data;
1585 if (frames->initial_pool_size == 0) {
1586 // Dynamic allocation is necessarily supported.
1587 } else if (avctx->extra_hw_frames >= 0) {
1588 frames->initial_pool_size += avctx->extra_hw_frames;
1590 frames->initial_pool_size = default_pool_size;