3 * Copyright (c) 2007 Bobby Bingham
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/avassert.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/buffer.h"
25 #include "libavutil/channel_layout.h"
26 #include "libavutil/common.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/hwcontext.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/rational.h"
34 #include "libavutil/samplefmt.h"
35 #include "libavutil/thread.h"
37 #define FF_INTERNAL_FIELDS 1
38 #include "framequeue.h"
46 #include "libavutil/ffversion.h"
47 const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
49 void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
51 av_unused char buf[16];
53 "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
54 ref, ref->buf, ref->data[0],
55 ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
56 ref->pts, ref->pkt_pos);
59 ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
60 ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
61 ref->width, ref->height,
62 !ref->interlaced_frame ? 'P' : /* Progressive */
63 ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
65 av_get_picture_type_char(ref->pict_type));
67 if (ref->nb_samples) {
68 ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
74 ff_tlog(ctx, "]%s", end ? "\n" : "");
77 unsigned avfilter_version(void)
79 av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
80 return LIBAVFILTER_VERSION_INT;
83 const char *avfilter_configuration(void)
85 return FFMPEG_CONFIGURATION;
88 const char *avfilter_license(void)
90 #define LICENSE_PREFIX "libavfilter license: "
91 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
94 void ff_command_queue_pop(AVFilterContext *filter)
96 AVFilterCommand *c= filter->command_queue;
98 av_freep(&c->command);
99 filter->command_queue= c->next;
103 int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
104 AVFilterPad **pads, AVFilterLink ***links,
107 AVFilterLink **newlinks;
108 AVFilterPad *newpads;
111 idx = FFMIN(idx, *count);
113 newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
114 newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
119 if (!newpads || !newlinks)
120 return AVERROR(ENOMEM);
122 memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
123 memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
124 memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
125 (*links)[idx] = NULL;
128 for (i = idx + 1; i < *count; i++)
130 (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
135 int avfilter_link(AVFilterContext *src, unsigned srcpad,
136 AVFilterContext *dst, unsigned dstpad)
140 av_assert0(src->graph);
141 av_assert0(dst->graph);
142 av_assert0(src->graph == dst->graph);
144 if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
145 src->outputs[srcpad] || dst->inputs[dstpad])
146 return AVERROR(EINVAL);
148 if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
149 av_log(src, AV_LOG_ERROR,
150 "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
151 src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
152 dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
153 return AVERROR(EINVAL);
156 link = av_mallocz(sizeof(*link));
158 return AVERROR(ENOMEM);
160 src->outputs[srcpad] = dst->inputs[dstpad] = link;
164 link->srcpad = &src->output_pads[srcpad];
165 link->dstpad = &dst->input_pads[dstpad];
166 link->type = src->output_pads[srcpad].type;
167 av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
169 ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
174 void avfilter_link_free(AVFilterLink **link)
179 av_frame_free(&(*link)->partial_buf);
180 ff_framequeue_free(&(*link)->fifo);
181 ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
186 int avfilter_link_get_channels(AVFilterLink *link)
188 return link->channels;
191 void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
193 filter->ready = FFMAX(filter->ready, priority);
197 * Clear frame_blocked_in on all outputs.
198 * This is necessary whenever something changes on input.
200 static void filter_unblock(AVFilterContext *filter)
204 for (i = 0; i < filter->nb_outputs; i++)
205 filter->outputs[i]->frame_blocked_in = 0;
209 void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
211 if (link->status_in == status)
213 av_assert0(!link->status_in);
214 link->status_in = status;
215 link->status_in_pts = pts;
216 link->frame_wanted_out = 0;
217 link->frame_blocked_in = 0;
218 filter_unblock(link->dst);
219 ff_filter_set_ready(link->dst, 200);
222 void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
224 av_assert0(!link->frame_wanted_out);
225 av_assert0(!link->status_out);
226 link->status_out = status;
227 if (pts != AV_NOPTS_VALUE)
228 ff_update_link_current_pts(link, pts);
229 filter_unblock(link->dst);
230 ff_filter_set_ready(link->src, 200);
233 void avfilter_link_set_closed(AVFilterLink *link, int closed)
235 ff_avfilter_link_set_out_status(link, closed ? AVERROR_EOF : 0, AV_NOPTS_VALUE);
238 int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
239 unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
242 unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
244 av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
245 "between the filter '%s' and the filter '%s'\n",
246 filt->name, link->src->name, link->dst->name);
248 link->dst->inputs[dstpad_idx] = NULL;
249 if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
250 /* failed to link output filter to new filter */
251 link->dst->inputs[dstpad_idx] = link;
255 /* re-hookup the link to the new destination filter we inserted */
257 link->dstpad = &filt->input_pads[filt_srcpad_idx];
258 filt->inputs[filt_srcpad_idx] = link;
260 /* if any information on supported media formats already exists on the
261 * link, we need to preserve that */
262 if (link->out_formats)
263 ff_formats_changeref(&link->out_formats,
264 &filt->outputs[filt_dstpad_idx]->out_formats);
265 if (link->out_samplerates)
266 ff_formats_changeref(&link->out_samplerates,
267 &filt->outputs[filt_dstpad_idx]->out_samplerates);
268 if (link->out_channel_layouts)
269 ff_channel_layouts_changeref(&link->out_channel_layouts,
270 &filt->outputs[filt_dstpad_idx]->out_channel_layouts);
275 int avfilter_config_links(AVFilterContext *filter)
277 int (*config_link)(AVFilterLink *);
281 for (i = 0; i < filter->nb_inputs; i ++) {
282 AVFilterLink *link = filter->inputs[i];
283 AVFilterLink *inlink;
286 if (!link->src || !link->dst) {
287 av_log(filter, AV_LOG_ERROR,
288 "Not all input and output are properly linked (%d).\n", i);
289 return AVERROR(EINVAL);
292 inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
294 link->current_pts_us = AV_NOPTS_VALUE;
296 switch (link->init_state) {
299 case AVLINK_STARTINIT:
300 av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
303 link->init_state = AVLINK_STARTINIT;
305 if ((ret = avfilter_config_links(link->src)) < 0)
308 if (!(config_link = link->srcpad->config_props)) {
309 if (link->src->nb_inputs != 1) {
310 av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
311 "with more than one input "
312 "must set config_props() "
313 "callbacks on all outputs\n");
314 return AVERROR(EINVAL);
316 } else if ((ret = config_link(link)) < 0) {
317 av_log(link->src, AV_LOG_ERROR,
318 "Failed to configure output pad on %s\n",
323 switch (link->type) {
324 case AVMEDIA_TYPE_VIDEO:
325 if (!link->time_base.num && !link->time_base.den)
326 link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
328 if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
329 link->sample_aspect_ratio = inlink ?
330 inlink->sample_aspect_ratio : (AVRational){1,1};
333 if (!link->frame_rate.num && !link->frame_rate.den)
334 link->frame_rate = inlink->frame_rate;
339 } else if (!link->w || !link->h) {
340 av_log(link->src, AV_LOG_ERROR,
341 "Video source filters must set their output link's "
342 "width and height\n");
343 return AVERROR(EINVAL);
347 case AVMEDIA_TYPE_AUDIO:
349 if (!link->time_base.num && !link->time_base.den)
350 link->time_base = inlink->time_base;
353 if (!link->time_base.num && !link->time_base.den)
354 link->time_base = (AVRational) {1, link->sample_rate};
357 if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
358 !(link->src->filter->flags_internal & FF_FILTER_FLAG_HWFRAME_AWARE)) {
359 av_assert0(!link->hw_frames_ctx &&
360 "should not be set by non-hwframe-aware filter");
361 link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
362 if (!link->hw_frames_ctx)
363 return AVERROR(ENOMEM);
366 if ((config_link = link->dstpad->config_props))
367 if ((ret = config_link(link)) < 0) {
368 av_log(link->dst, AV_LOG_ERROR,
369 "Failed to configure input pad on %s\n",
374 link->init_state = AVLINK_INIT;
381 void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
383 if (link->type == AVMEDIA_TYPE_VIDEO) {
385 "link[%p s:%dx%d fmt:%s %s->%s]%s",
386 link, link->w, link->h,
387 av_get_pix_fmt_name(link->format),
388 link->src ? link->src->filter->name : "",
389 link->dst ? link->dst->filter->name : "",
393 av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
396 "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
397 link, (int)link->sample_rate, buf,
398 av_get_sample_fmt_name(link->format),
399 link->src ? link->src->filter->name : "",
400 link->dst ? link->dst->filter->name : "",
405 int ff_request_frame(AVFilterLink *link)
407 FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
409 av_assert1(!link->dst->filter->activate);
410 if (link->status_out)
411 return link->status_out;
412 if (link->status_in) {
413 if (ff_framequeue_queued_frames(&link->fifo)) {
414 av_assert1(!link->frame_wanted_out);
415 av_assert1(link->dst->ready >= 300);
418 /* Acknowledge status change. Filters using ff_request_frame() will
419 handle the change automatically. Filters can also check the
420 status directly but none do yet. */
421 ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
422 return link->status_out;
425 link->frame_wanted_out = 1;
426 ff_filter_set_ready(link->src, 100);
430 static int64_t guess_status_pts(AVFilterContext *ctx, int status, AVRational link_time_base)
433 int64_t r = INT64_MAX;
435 for (i = 0; i < ctx->nb_inputs; i++)
436 if (ctx->inputs[i]->status_out == status)
437 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->current_pts, ctx->inputs[i]->time_base, link_time_base));
440 av_log(ctx, AV_LOG_WARNING, "EOF timestamp not reliable\n");
441 for (i = 0; i < ctx->nb_inputs; i++)
442 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->status_in_pts, ctx->inputs[i]->time_base, link_time_base));
445 return AV_NOPTS_VALUE;
448 static int ff_request_frame_to_filter(AVFilterLink *link)
452 FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
453 /* Assume the filter is blocked, let the method clear it if not */
454 link->frame_blocked_in = 1;
455 if (link->srcpad->request_frame)
456 ret = link->srcpad->request_frame(link);
457 else if (link->src->inputs[0])
458 ret = ff_request_frame(link->src->inputs[0]);
460 if (ret != AVERROR(EAGAIN) && ret != link->status_in)
461 ff_avfilter_link_set_in_status(link, ret, guess_status_pts(link->src, ret, link->time_base));
462 if (ret == AVERROR_EOF)
468 int ff_poll_frame(AVFilterLink *link)
470 int i, min = INT_MAX;
472 if (link->srcpad->poll_frame)
473 return link->srcpad->poll_frame(link);
475 for (i = 0; i < link->src->nb_inputs; i++) {
477 if (!link->src->inputs[i])
478 return AVERROR(EINVAL);
479 val = ff_poll_frame(link->src->inputs[i]);
480 min = FFMIN(min, val);
486 static const char *const var_names[] = {
504 static int set_enable_expr(AVFilterContext *ctx, const char *expr)
508 AVExpr *old = ctx->enable;
510 if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
511 av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
512 "with filter '%s'\n", ctx->filter->name);
513 return AVERROR_PATCHWELCOME;
516 expr_dup = av_strdup(expr);
518 return AVERROR(ENOMEM);
520 if (!ctx->var_values) {
521 ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
522 if (!ctx->var_values) {
524 return AVERROR(ENOMEM);
528 ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
529 NULL, NULL, NULL, NULL, 0, ctx->priv);
531 av_log(ctx->priv, AV_LOG_ERROR,
532 "Error when evaluating the expression '%s' for enable\n",
539 av_free(ctx->enable_str);
540 ctx->enable_str = expr_dup;
544 void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
546 if (pts == AV_NOPTS_VALUE)
548 link->current_pts = pts;
549 link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
550 /* TODO use duration */
551 if (link->graph && link->age_index >= 0)
552 ff_avfilter_graph_update_heap(link->graph, link);
555 int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
557 if(!strcmp(cmd, "ping")){
558 char local_res[256] = {0};
562 res_len = sizeof(local_res);
564 av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
565 if (res == local_res)
566 av_log(filter, AV_LOG_INFO, "%s", res);
568 }else if(!strcmp(cmd, "enable")) {
569 return set_enable_expr(filter, arg);
570 }else if(filter->filter->process_command) {
571 return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
573 return AVERROR(ENOSYS);
576 static AVFilter *first_filter;
577 static AVFilter **last_filter = &first_filter;
579 const AVFilter *avfilter_get_by_name(const char *name)
581 const AVFilter *f = NULL;
586 while ((f = avfilter_next(f)))
587 if (!strcmp(f->name, name))
588 return (AVFilter *)f;
593 static AVMutex filter_register_mutex = AV_MUTEX_INITIALIZER;
595 int avfilter_register(AVFilter *filter)
599 /* the filter must select generic or internal exclusively */
600 av_assert0((filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE) != AVFILTER_FLAG_SUPPORT_TIMELINE);
602 ff_mutex_lock(&filter_register_mutex);
609 last_filter = &filter->next;
611 ff_mutex_unlock(&filter_register_mutex);
616 const AVFilter *avfilter_next(const AVFilter *prev)
618 return prev ? prev->next : first_filter;
621 int avfilter_pad_count(const AVFilterPad *pads)
628 for (count = 0; pads->name; count++)
633 static const char *default_filter_name(void *filter_ctx)
635 AVFilterContext *ctx = filter_ctx;
636 return ctx->name ? ctx->name : ctx->filter->name;
639 static void *filter_child_next(void *obj, void *prev)
641 AVFilterContext *ctx = obj;
642 if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
647 static const AVClass *filter_child_class_next(const AVClass *prev)
649 const AVFilter *f = NULL;
651 /* find the filter that corresponds to prev */
652 while (prev && (f = avfilter_next(f)))
653 if (f->priv_class == prev)
656 /* could not find filter corresponding to prev */
660 /* find next filter with specific options */
661 while ((f = avfilter_next(f)))
663 return f->priv_class;
668 #define OFFSET(x) offsetof(AVFilterContext, x)
669 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
670 static const AVOption avfilter_options[] = {
671 { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
672 { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
673 { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .unit = "thread_type" },
674 { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
675 { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
676 { .i64 = 0 }, 0, INT_MAX, FLAGS },
680 static const AVClass avfilter_class = {
681 .class_name = "AVFilter",
682 .item_name = default_filter_name,
683 .version = LIBAVUTIL_VERSION_INT,
684 .category = AV_CLASS_CATEGORY_FILTER,
685 .child_next = filter_child_next,
686 .child_class_next = filter_child_class_next,
687 .option = avfilter_options,
690 static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
691 int *ret, int nb_jobs)
695 for (i = 0; i < nb_jobs; i++) {
696 int r = func(ctx, arg, i, nb_jobs);
703 AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
705 AVFilterContext *ret;
711 ret = av_mallocz(sizeof(AVFilterContext));
715 ret->av_class = &avfilter_class;
716 ret->filter = filter;
717 ret->name = inst_name ? av_strdup(inst_name) : NULL;
718 if (filter->priv_size) {
719 ret->priv = av_mallocz(filter->priv_size);
723 if (filter->preinit) {
724 if (filter->preinit(ret) < 0)
729 av_opt_set_defaults(ret);
730 if (filter->priv_class) {
731 *(const AVClass**)ret->priv = filter->priv_class;
732 av_opt_set_defaults(ret->priv);
735 ret->internal = av_mallocz(sizeof(*ret->internal));
738 ret->internal->execute = default_execute;
740 ret->nb_inputs = avfilter_pad_count(filter->inputs);
741 if (ret->nb_inputs ) {
742 ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
743 if (!ret->input_pads)
745 memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
746 ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
751 ret->nb_outputs = avfilter_pad_count(filter->outputs);
752 if (ret->nb_outputs) {
753 ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
754 if (!ret->output_pads)
756 memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
757 ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
767 av_freep(&ret->inputs);
768 av_freep(&ret->input_pads);
770 av_freep(&ret->outputs);
771 av_freep(&ret->output_pads);
773 av_freep(&ret->priv);
774 av_freep(&ret->internal);
779 static void free_link(AVFilterLink *link)
785 link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
787 link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
789 av_buffer_unref(&link->hw_frames_ctx);
791 ff_formats_unref(&link->in_formats);
792 ff_formats_unref(&link->out_formats);
793 ff_formats_unref(&link->in_samplerates);
794 ff_formats_unref(&link->out_samplerates);
795 ff_channel_layouts_unref(&link->in_channel_layouts);
796 ff_channel_layouts_unref(&link->out_channel_layouts);
797 avfilter_link_free(&link);
800 void avfilter_free(AVFilterContext *filter)
808 ff_filter_graph_remove_filter(filter->graph, filter);
810 if (filter->filter->uninit)
811 filter->filter->uninit(filter);
813 for (i = 0; i < filter->nb_inputs; i++) {
814 free_link(filter->inputs[i]);
816 for (i = 0; i < filter->nb_outputs; i++) {
817 free_link(filter->outputs[i]);
820 if (filter->filter->priv_class)
821 av_opt_free(filter->priv);
823 av_buffer_unref(&filter->hw_device_ctx);
825 av_freep(&filter->name);
826 av_freep(&filter->input_pads);
827 av_freep(&filter->output_pads);
828 av_freep(&filter->inputs);
829 av_freep(&filter->outputs);
830 av_freep(&filter->priv);
831 while(filter->command_queue){
832 ff_command_queue_pop(filter);
835 av_expr_free(filter->enable);
836 filter->enable = NULL;
837 av_freep(&filter->var_values);
838 av_freep(&filter->internal);
842 int ff_filter_get_nb_threads(AVFilterContext *ctx)
844 if (ctx->nb_threads > 0)
845 return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
846 return ctx->graph->nb_threads;
849 static int process_options(AVFilterContext *ctx, AVDictionary **options,
852 const AVOption *o = NULL;
854 char *av_uninit(parsed_key), *av_uninit(value);
862 const char *shorthand = NULL;
864 o = av_opt_next(ctx->priv, o);
866 if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
872 ret = av_opt_get_key_value(&args, "=", ":",
873 shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
874 &parsed_key, &value);
876 if (ret == AVERROR(EINVAL))
877 av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
879 av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
887 while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
892 av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
894 if (av_opt_find(ctx, key, NULL, 0, 0)) {
895 ret = av_opt_set(ctx, key, value, 0);
902 av_dict_set(options, key, value, 0);
903 if ((ret = av_opt_set(ctx->priv, key, value, AV_OPT_SEARCH_CHILDREN)) < 0) {
904 if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
905 if (ret == AVERROR_OPTION_NOT_FOUND)
906 av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
919 if (ctx->enable_str) {
920 ret = set_enable_expr(ctx, ctx->enable_str);
927 int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
931 ret = av_opt_set_dict(ctx, options);
933 av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
937 if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
938 ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
939 ctx->graph->internal->thread_execute) {
940 ctx->thread_type = AVFILTER_THREAD_SLICE;
941 ctx->internal->execute = ctx->graph->internal->thread_execute;
943 ctx->thread_type = 0;
946 if (ctx->filter->priv_class) {
947 ret = av_opt_set_dict2(ctx->priv, options, AV_OPT_SEARCH_CHILDREN);
949 av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
954 if (ctx->filter->init_opaque)
955 ret = ctx->filter->init_opaque(ctx, NULL);
956 else if (ctx->filter->init)
957 ret = ctx->filter->init(ctx);
958 else if (ctx->filter->init_dict)
959 ret = ctx->filter->init_dict(ctx, options);
964 int avfilter_init_str(AVFilterContext *filter, const char *args)
966 AVDictionary *options = NULL;
967 AVDictionaryEntry *e;
971 if (!filter->filter->priv_class) {
972 av_log(filter, AV_LOG_ERROR, "This filter does not take any "
973 "options, but options were provided: %s.\n", args);
974 return AVERROR(EINVAL);
977 #if FF_API_OLD_FILTER_OPTS_ERROR
978 if ( !strcmp(filter->filter->name, "format") ||
979 !strcmp(filter->filter->name, "noformat") ||
980 !strcmp(filter->filter->name, "frei0r") ||
981 !strcmp(filter->filter->name, "frei0r_src") ||
982 !strcmp(filter->filter->name, "ocv") ||
983 !strcmp(filter->filter->name, "pan") ||
984 !strcmp(filter->filter->name, "pp") ||
985 !strcmp(filter->filter->name, "aevalsrc")) {
986 /* a hack for compatibility with the old syntax
987 * replace colons with |s */
988 char *copy = av_strdup(args);
990 int nb_leading = 0; // number of leading colons to skip
994 ret = AVERROR(ENOMEM);
998 if (!strcmp(filter->filter->name, "frei0r") ||
999 !strcmp(filter->filter->name, "ocv"))
1001 else if (!strcmp(filter->filter->name, "frei0r_src"))
1004 while (nb_leading--) {
1007 p = copy + strlen(copy);
1013 deprecated = strchr(p, ':') != NULL;
1015 if (!strcmp(filter->filter->name, "aevalsrc")) {
1017 while ((p = strchr(p, ':')) && p[1] != ':') {
1018 const char *epos = strchr(p + 1, '=');
1019 const char *spos = strchr(p + 1, ':');
1020 const int next_token_is_opt = epos && (!spos || epos < spos);
1021 if (next_token_is_opt) {
1025 /* next token does not contain a '=', assume a channel expression */
1029 if (p && *p == ':') { // double sep '::' found
1031 memmove(p, p + 1, strlen(p));
1034 while ((p = strchr(p, ':')))
1038 av_log(filter, AV_LOG_ERROR, "This syntax is deprecated. Use "
1039 "'|' to separate the list items ('%s' instead of '%s')\n",
1041 ret = AVERROR(EINVAL);
1043 ret = process_options(filter, &options, copy);
1052 ret = process_options(filter, &options, args);
1058 ret = avfilter_init_dict(filter, &options);
1062 if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1063 av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
1064 ret = AVERROR_OPTION_NOT_FOUND;
1069 av_dict_free(&options);
1074 const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
1076 return pads[pad_idx].name;
1079 enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
1081 return pads[pad_idx].type;
1084 static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
1086 return ff_filter_frame(link->dst->outputs[0], frame);
1089 static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
1091 int (*filter_frame)(AVFilterLink *, AVFrame *);
1092 AVFilterContext *dstctx = link->dst;
1093 AVFilterPad *dst = link->dstpad;
1096 if (!(filter_frame = dst->filter_frame))
1097 filter_frame = default_filter_frame;
1099 if (dst->needs_writable) {
1100 ret = ff_inlink_make_frame_writable(link, &frame);
1105 ff_inlink_process_commands(link, frame);
1106 dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1108 if (dstctx->is_disabled &&
1109 (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
1110 filter_frame = default_filter_frame;
1111 ret = filter_frame(link, frame);
1112 link->frame_count_out++;
1116 av_frame_free(&frame);
1120 int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
1123 FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
1125 /* Consistency checks */
1126 if (link->type == AVMEDIA_TYPE_VIDEO) {
1127 if (strcmp(link->dst->filter->name, "buffersink") &&
1128 strcmp(link->dst->filter->name, "format") &&
1129 strcmp(link->dst->filter->name, "idet") &&
1130 strcmp(link->dst->filter->name, "null") &&
1131 strcmp(link->dst->filter->name, "scale")) {
1132 av_assert1(frame->format == link->format);
1133 av_assert1(frame->width == link->w);
1134 av_assert1(frame->height == link->h);
1137 if (frame->format != link->format) {
1138 av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
1141 if (frame->channels != link->channels) {
1142 av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
1145 if (frame->channel_layout != link->channel_layout) {
1146 av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
1149 if (frame->sample_rate != link->sample_rate) {
1150 av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
1155 link->frame_blocked_in = link->frame_wanted_out = 0;
1156 link->frame_count_in++;
1157 filter_unblock(link->dst);
1158 ret = ff_framequeue_add(&link->fifo, frame);
1160 av_frame_free(&frame);
1163 ff_filter_set_ready(link->dst, 300);
1167 av_frame_free(&frame);
1168 return AVERROR_PATCHWELCOME;
1171 static int samples_ready(AVFilterLink *link, unsigned min)
1173 return ff_framequeue_queued_frames(&link->fifo) &&
1174 (ff_framequeue_queued_samples(&link->fifo) >= min ||
1178 static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
1181 AVFrame *frame0, *frame, *buf;
1182 unsigned nb_samples, nb_frames, i, p;
1185 /* Note: this function relies on no format changes and must only be
1186 called with enough samples. */
1187 av_assert1(samples_ready(link, link->min_samples));
1188 frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
1189 if (!link->fifo.samples_skipped && frame->nb_samples >= min && frame->nb_samples <= max) {
1190 *rframe = ff_framequeue_take(&link->fifo);
1196 if (nb_samples + frame->nb_samples > max) {
1197 if (nb_samples < min)
1201 nb_samples += frame->nb_samples;
1203 if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
1205 frame = ff_framequeue_peek(&link->fifo, nb_frames);
1208 buf = ff_get_audio_buffer(link, nb_samples);
1210 return AVERROR(ENOMEM);
1211 ret = av_frame_copy_props(buf, frame0);
1213 av_frame_free(&buf);
1216 buf->pts = frame0->pts;
1219 for (i = 0; i < nb_frames; i++) {
1220 frame = ff_framequeue_take(&link->fifo);
1221 av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
1222 frame->nb_samples, link->channels, link->format);
1223 p += frame->nb_samples;
1224 av_frame_free(&frame);
1226 if (p < nb_samples) {
1227 unsigned n = nb_samples - p;
1228 frame = ff_framequeue_peek(&link->fifo, 0);
1229 av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
1230 link->channels, link->format);
1231 ff_framequeue_skip_samples(&link->fifo, n, link->time_base);
1238 static int ff_filter_frame_to_filter(AVFilterLink *link)
1240 AVFrame *frame = NULL;
1241 AVFilterContext *dst = link->dst;
1244 av_assert1(ff_framequeue_queued_frames(&link->fifo));
1245 ret = link->min_samples ?
1246 ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
1247 ff_inlink_consume_frame(link, &frame);
1253 /* The filter will soon have received a new frame, that may allow it to
1254 produce one or more: unblock its outputs. */
1255 filter_unblock(dst);
1256 /* AVFilterPad.filter_frame() expect frame_count_out to have the value
1257 before the frame; ff_filter_frame_framed() will re-increment it. */
1258 link->frame_count_out--;
1259 ret = ff_filter_frame_framed(link, frame);
1260 if (ret < 0 && ret != link->status_out) {
1261 ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
1263 /* Run once again, to see if several frames were available, or if
1264 the input status has also changed, or any other reason. */
1265 ff_filter_set_ready(dst, 300);
1270 static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
1272 unsigned out = 0, progress = 0;
1275 av_assert0(!in->status_out);
1276 if (!filter->nb_outputs) {
1277 /* not necessary with the current API and sinks */
1280 while (!in->status_out) {
1281 if (!filter->outputs[out]->status_in) {
1283 ret = ff_request_frame_to_filter(filter->outputs[out]);
1287 if (++out == filter->nb_outputs) {
1289 /* Every output already closed: input no longer interesting
1290 (example: overlay in shortest mode, other input closed). */
1291 ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
1298 ff_filter_set_ready(filter, 200);
1302 static int ff_filter_activate_default(AVFilterContext *filter)
1306 for (i = 0; i < filter->nb_inputs; i++) {
1307 if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) {
1308 return ff_filter_frame_to_filter(filter->inputs[i]);
1311 for (i = 0; i < filter->nb_inputs; i++) {
1312 if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
1313 av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
1314 return forward_status_change(filter, filter->inputs[i]);
1317 for (i = 0; i < filter->nb_outputs; i++) {
1318 if (filter->outputs[i]->frame_wanted_out &&
1319 !filter->outputs[i]->frame_blocked_in) {
1320 return ff_request_frame_to_filter(filter->outputs[i]);
1323 return FFERROR_NOT_READY;
1327 Filter scheduling and activation
1329 When a filter is activated, it must:
1330 - if possible, output a frame;
1331 - else, if relevant, forward the input status change;
1332 - else, check outputs for wanted frames and forward the requests.
1334 The following AVFilterLink fields are used for activation:
1338 This field indicates if a frame is needed on this input of the
1339 destination filter. A positive value indicates that a frame is needed
1340 to process queued frames or internal data or to satisfy the
1341 application; a zero value indicates that a frame is not especially
1342 needed but could be processed anyway; a negative value indicates that a
1343 frame would just be queued.
1345 It is set by filters using ff_request_frame() or ff_request_no_frame(),
1346 when requested by the application through a specific API or when it is
1347 set on one of the outputs.
1349 It is cleared when a frame is sent from the source using
1352 It is also cleared when a status change is sent from the source using
1353 ff_avfilter_link_set_in_status().
1357 This field means that the source filter can not generate a frame as is.
1358 Its goal is to avoid repeatedly calling the request_frame() method on
1361 It is set by the framework on all outputs of a filter before activating it.
1363 It is automatically cleared by ff_filter_frame().
1365 It is also automatically cleared by ff_avfilter_link_set_in_status().
1367 It is also cleared on all outputs (using filter_unblock()) when
1368 something happens on an input: processing a frame or changing the
1373 Contains the frames queued on a filter input. If it contains frames and
1374 frame_wanted_out is not set, then the filter can be activated. If that
1375 result in the filter not able to use these frames, the filter must set
1376 frame_wanted_out to ask for more frames.
1378 - status_in and status_in_pts:
1380 Status (EOF or error code) of the link and timestamp of the status
1381 change (in link time base, same as frames) as seen from the input of
1382 the link. The status change is considered happening after the frames
1385 It is set by the source filter using ff_avfilter_link_set_in_status().
1389 Status of the link as seen from the output of the link. The status
1390 change is considered having already happened.
1392 It is set by the destination filter using
1393 ff_avfilter_link_set_out_status().
1395 Filters are activated according to the ready field, set using the
1396 ff_filter_set_ready(). Eventually, a priority queue will be used.
1397 ff_filter_set_ready() is called whenever anything could cause progress to
1398 be possible. Marking a filter ready when it is not is not a problem,
1399 except for the small overhead it causes.
1401 Conditions that cause a filter to be marked ready are:
1403 - frames added on an input link;
1405 - changes in the input or output status of an input link;
1407 - requests for a frame on an output link;
1409 - after any actual processing using the legacy methods (filter_frame(),
1410 and request_frame() to acknowledge status changes), to run once more
1411 and check if enough input was present for several frames.
1413 Exemples of scenarios to consider:
1415 - buffersrc: activate if frame_wanted_out to notify the application;
1416 activate when the application adds a frame to push it immediately.
1418 - testsrc: activate only if frame_wanted_out to produce and push a frame.
1420 - concat (not at stitch points): can process a frame on any output.
1421 Activate if frame_wanted_out on output to forward on the corresponding
1422 input. Activate when a frame is present on input to process it
1425 - framesync: needs at least one frame on each input; extra frames on the
1426 wrong input will accumulate. When a frame is first added on one input,
1427 set frame_wanted_out<0 on it to avoid getting more (would trigger
1428 testsrc) and frame_wanted_out>0 on the other to allow processing it.
1430 Activation of old filters:
1432 In order to activate a filter implementing the legacy filter_frame() and
1433 request_frame() methods, perform the first possible of the following
1436 - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
1437 frame and call filter_frame().
1439 Ratinale: filter frames as soon as possible instead of leaving them
1440 queued; frame_wanted_out < 0 is not possible since the old API does not
1441 set it nor provides any similar feedback; frame_wanted_out > 0 happens
1442 when min_samples > 0 and there are not enough samples queued.
1444 - If an input has status_in set but not status_out, try to call
1445 request_frame() on one of the outputs in the hope that it will trigger
1446 request_frame() on the input with status_in and acknowledge it. This is
1447 awkward and fragile, filters with several inputs or outputs should be
1448 updated to direct activation as soon as possible.
1450 - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
1453 Rationale: checking frame_blocked_in is necessary to avoid requesting
1454 repeatedly on a blocked input if another is not blocked (example:
1455 [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
1457 TODO: respect needs_fifo and remove auto-inserted fifos.
1461 int ff_filter_activate(AVFilterContext *filter)
1465 /* Generic timeline support is not yet implemented but should be easy */
1466 av_assert1(!(filter->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC &&
1467 filter->filter->activate));
1469 ret = filter->filter->activate ? filter->filter->activate(filter) :
1470 ff_filter_activate_default(filter);
1471 if (ret == FFERROR_NOT_READY)
1476 int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
1478 *rpts = link->current_pts;
1479 if (ff_framequeue_queued_frames(&link->fifo))
1480 return *rstatus = 0;
1481 if (link->status_out)
1482 return *rstatus = link->status_out;
1483 if (!link->status_in)
1484 return *rstatus = 0;
1485 *rstatus = link->status_out = link->status_in;
1486 ff_update_link_current_pts(link, link->status_in_pts);
1487 *rpts = link->current_pts;
1491 int ff_inlink_check_available_frame(AVFilterLink *link)
1493 return ff_framequeue_queued_frames(&link->fifo) > 0;
1496 int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
1498 uint64_t samples = ff_framequeue_queued_samples(&link->fifo);
1500 return samples >= min || (link->status_in && samples);
1503 static void consume_update(AVFilterLink *link, const AVFrame *frame)
1505 ff_update_link_current_pts(link, frame->pts);
1506 ff_inlink_process_commands(link, frame);
1507 link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1508 link->frame_count_out++;
1511 int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
1516 if (!ff_inlink_check_available_frame(link))
1519 if (link->fifo.samples_skipped) {
1520 frame = ff_framequeue_peek(&link->fifo, 0);
1521 return ff_inlink_consume_samples(link, frame->nb_samples, frame->nb_samples, rframe);
1524 frame = ff_framequeue_take(&link->fifo);
1525 consume_update(link, frame);
1530 int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max,
1538 if (!ff_inlink_check_available_samples(link, min))
1540 if (link->status_in)
1541 min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
1542 ret = take_samples(link, min, max, &frame);
1545 consume_update(link, frame);
1550 int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
1552 AVFrame *frame = *rframe;
1556 if (av_frame_is_writable(frame))
1558 av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
1560 switch (link->type) {
1561 case AVMEDIA_TYPE_VIDEO:
1562 out = ff_get_video_buffer(link, link->w, link->h);
1564 case AVMEDIA_TYPE_AUDIO:
1565 out = ff_get_audio_buffer(link, frame->nb_samples);
1568 return AVERROR(EINVAL);
1571 return AVERROR(ENOMEM);
1573 ret = av_frame_copy_props(out, frame);
1575 av_frame_free(&out);
1579 switch (link->type) {
1580 case AVMEDIA_TYPE_VIDEO:
1581 av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
1582 frame->format, frame->width, frame->height);
1584 case AVMEDIA_TYPE_AUDIO:
1585 av_samples_copy(out->extended_data, frame->extended_data,
1586 0, 0, frame->nb_samples,
1591 av_assert0(!"reached");
1594 av_frame_free(&frame);
1599 int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
1601 AVFilterCommand *cmd = link->dst->command_queue;
1603 while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
1604 av_log(link->dst, AV_LOG_DEBUG,
1605 "Processing command time:%f command:%s arg:%s\n",
1606 cmd->time, cmd->command, cmd->arg);
1607 avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
1608 ff_command_queue_pop(link->dst);
1609 cmd= link->dst->command_queue;
1614 int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
1616 AVFilterContext *dstctx = link->dst;
1617 int64_t pts = frame->pts;
1618 int64_t pos = frame->pkt_pos;
1620 if (!dstctx->enable_str)
1623 dstctx->var_values[VAR_N] = link->frame_count_out;
1624 dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
1625 dstctx->var_values[VAR_W] = link->w;
1626 dstctx->var_values[VAR_H] = link->h;
1627 dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1629 return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5;
1632 void ff_inlink_request_frame(AVFilterLink *link)
1634 av_assert1(!link->status_in);
1635 av_assert1(!link->status_out);
1636 link->frame_wanted_out = 1;
1637 ff_filter_set_ready(link->src, 100);
1640 void ff_inlink_set_status(AVFilterLink *link, int status)
1642 if (link->status_out)
1644 link->frame_wanted_out = 0;
1645 link->frame_blocked_in = 0;
1646 ff_avfilter_link_set_out_status(link, status, AV_NOPTS_VALUE);
1647 while (ff_framequeue_queued_frames(&link->fifo)) {
1648 AVFrame *frame = ff_framequeue_take(&link->fifo);
1649 av_frame_free(&frame);
1651 if (!link->status_in)
1652 link->status_in = status;
1655 int ff_outlink_get_status(AVFilterLink *link)
1657 return link->status_in;
1660 const AVClass *avfilter_get_class(void)
1662 return &avfilter_class;