3 * Copyright (c) 2007 Bobby Bingham
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/avassert.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/buffer.h"
25 #include "libavutil/channel_layout.h"
26 #include "libavutil/common.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/hwcontext.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/rational.h"
34 #include "libavutil/samplefmt.h"
35 #include "libavutil/thread.h"
37 #define FF_INTERNAL_FIELDS 1
38 #include "framequeue.h"
46 #include "libavutil/ffversion.h"
47 const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
49 void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
51 av_unused char buf[16];
53 "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
54 ref, ref->buf, ref->data[0],
55 ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
56 ref->pts, ref->pkt_pos);
59 ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
60 ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
61 ref->width, ref->height,
62 !ref->interlaced_frame ? 'P' : /* Progressive */
63 ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
65 av_get_picture_type_char(ref->pict_type));
67 if (ref->nb_samples) {
68 ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
74 ff_tlog(ctx, "]%s", end ? "\n" : "");
77 unsigned avfilter_version(void)
79 av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
80 return LIBAVFILTER_VERSION_INT;
83 const char *avfilter_configuration(void)
85 return FFMPEG_CONFIGURATION;
88 const char *avfilter_license(void)
90 #define LICENSE_PREFIX "libavfilter license: "
91 return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1];
94 void ff_command_queue_pop(AVFilterContext *filter)
96 AVFilterCommand *c= filter->command_queue;
98 av_freep(&c->command);
99 filter->command_queue= c->next;
103 int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
104 AVFilterPad **pads, AVFilterLink ***links,
107 AVFilterLink **newlinks;
108 AVFilterPad *newpads;
111 idx = FFMIN(idx, *count);
113 newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
114 newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
119 if (!newpads || !newlinks)
120 return AVERROR(ENOMEM);
122 memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
123 memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
124 memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
125 (*links)[idx] = NULL;
128 for (i = idx + 1; i < *count; i++)
130 (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
135 int avfilter_link(AVFilterContext *src, unsigned srcpad,
136 AVFilterContext *dst, unsigned dstpad)
140 av_assert0(src->graph);
141 av_assert0(dst->graph);
142 av_assert0(src->graph == dst->graph);
144 if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
145 src->outputs[srcpad] || dst->inputs[dstpad])
146 return AVERROR(EINVAL);
148 if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
149 av_log(src, AV_LOG_ERROR,
150 "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
151 src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
152 dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
153 return AVERROR(EINVAL);
156 link = av_mallocz(sizeof(*link));
158 return AVERROR(ENOMEM);
160 src->outputs[srcpad] = dst->inputs[dstpad] = link;
164 link->srcpad = &src->output_pads[srcpad];
165 link->dstpad = &dst->input_pads[dstpad];
166 link->type = src->output_pads[srcpad].type;
167 av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
169 ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
174 void avfilter_link_free(AVFilterLink **link)
179 av_frame_free(&(*link)->partial_buf);
180 ff_framequeue_free(&(*link)->fifo);
181 ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
186 void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
188 filter->ready = FFMAX(filter->ready, priority);
192 * Clear frame_blocked_in on all outputs.
193 * This is necessary whenever something changes on input.
195 static void filter_unblock(AVFilterContext *filter)
199 for (i = 0; i < filter->nb_outputs; i++)
200 filter->outputs[i]->frame_blocked_in = 0;
204 void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
206 if (link->status_in == status)
208 av_assert0(!link->status_in);
209 link->status_in = status;
210 link->status_in_pts = pts;
211 link->frame_wanted_out = 0;
212 link->frame_blocked_in = 0;
213 filter_unblock(link->dst);
214 ff_filter_set_ready(link->dst, 200);
217 void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
219 av_assert0(!link->frame_wanted_out);
220 av_assert0(!link->status_out);
221 link->status_out = status;
222 if (pts != AV_NOPTS_VALUE)
223 ff_update_link_current_pts(link, pts);
224 filter_unblock(link->dst);
225 ff_filter_set_ready(link->src, 200);
228 int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
229 unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
232 unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
234 av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
235 "between the filter '%s' and the filter '%s'\n",
236 filt->name, link->src->name, link->dst->name);
238 link->dst->inputs[dstpad_idx] = NULL;
239 if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
240 /* failed to link output filter to new filter */
241 link->dst->inputs[dstpad_idx] = link;
245 /* re-hookup the link to the new destination filter we inserted */
247 link->dstpad = &filt->input_pads[filt_srcpad_idx];
248 filt->inputs[filt_srcpad_idx] = link;
250 /* if any information on supported media formats already exists on the
251 * link, we need to preserve that */
252 if (link->outcfg.formats)
253 ff_formats_changeref(&link->outcfg.formats,
254 &filt->outputs[filt_dstpad_idx]->outcfg.formats);
255 if (link->outcfg.samplerates)
256 ff_formats_changeref(&link->outcfg.samplerates,
257 &filt->outputs[filt_dstpad_idx]->outcfg.samplerates);
258 if (link->outcfg.channel_layouts)
259 ff_channel_layouts_changeref(&link->outcfg.channel_layouts,
260 &filt->outputs[filt_dstpad_idx]->outcfg.channel_layouts);
265 int avfilter_config_links(AVFilterContext *filter)
267 int (*config_link)(AVFilterLink *);
271 for (i = 0; i < filter->nb_inputs; i ++) {
272 AVFilterLink *link = filter->inputs[i];
273 AVFilterLink *inlink;
276 if (!link->src || !link->dst) {
277 av_log(filter, AV_LOG_ERROR,
278 "Not all input and output are properly linked (%d).\n", i);
279 return AVERROR(EINVAL);
282 inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
284 link->current_pts_us = AV_NOPTS_VALUE;
286 switch (link->init_state) {
289 case AVLINK_STARTINIT:
290 av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
293 link->init_state = AVLINK_STARTINIT;
295 if ((ret = avfilter_config_links(link->src)) < 0)
298 if (!(config_link = link->srcpad->config_props)) {
299 if (link->src->nb_inputs != 1) {
300 av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
301 "with more than one input "
302 "must set config_props() "
303 "callbacks on all outputs\n");
304 return AVERROR(EINVAL);
306 } else if ((ret = config_link(link)) < 0) {
307 av_log(link->src, AV_LOG_ERROR,
308 "Failed to configure output pad on %s\n",
313 switch (link->type) {
314 case AVMEDIA_TYPE_VIDEO:
315 if (!link->time_base.num && !link->time_base.den)
316 link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
318 if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
319 link->sample_aspect_ratio = inlink ?
320 inlink->sample_aspect_ratio : (AVRational){1,1};
323 if (!link->frame_rate.num && !link->frame_rate.den)
324 link->frame_rate = inlink->frame_rate;
329 } else if (!link->w || !link->h) {
330 av_log(link->src, AV_LOG_ERROR,
331 "Video source filters must set their output link's "
332 "width and height\n");
333 return AVERROR(EINVAL);
337 case AVMEDIA_TYPE_AUDIO:
339 if (!link->time_base.num && !link->time_base.den)
340 link->time_base = inlink->time_base;
343 if (!link->time_base.num && !link->time_base.den)
344 link->time_base = (AVRational) {1, link->sample_rate};
347 if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
348 !(link->src->filter->flags_internal & FF_FILTER_FLAG_HWFRAME_AWARE)) {
349 av_assert0(!link->hw_frames_ctx &&
350 "should not be set by non-hwframe-aware filter");
351 link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
352 if (!link->hw_frames_ctx)
353 return AVERROR(ENOMEM);
356 if ((config_link = link->dstpad->config_props))
357 if ((ret = config_link(link)) < 0) {
358 av_log(link->dst, AV_LOG_ERROR,
359 "Failed to configure input pad on %s\n",
364 link->init_state = AVLINK_INIT;
371 void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
373 if (link->type == AVMEDIA_TYPE_VIDEO) {
375 "link[%p s:%dx%d fmt:%s %s->%s]%s",
376 link, link->w, link->h,
377 av_get_pix_fmt_name(link->format),
378 link->src ? link->src->filter->name : "",
379 link->dst ? link->dst->filter->name : "",
383 av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
386 "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
387 link, (int)link->sample_rate, buf,
388 av_get_sample_fmt_name(link->format),
389 link->src ? link->src->filter->name : "",
390 link->dst ? link->dst->filter->name : "",
395 int ff_request_frame(AVFilterLink *link)
397 FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
399 av_assert1(!link->dst->filter->activate);
400 if (link->status_out)
401 return link->status_out;
402 if (link->status_in) {
403 if (ff_framequeue_queued_frames(&link->fifo)) {
404 av_assert1(!link->frame_wanted_out);
405 av_assert1(link->dst->ready >= 300);
408 /* Acknowledge status change. Filters using ff_request_frame() will
409 handle the change automatically. Filters can also check the
410 status directly but none do yet. */
411 ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
412 return link->status_out;
415 link->frame_wanted_out = 1;
416 ff_filter_set_ready(link->src, 100);
420 static int64_t guess_status_pts(AVFilterContext *ctx, int status, AVRational link_time_base)
423 int64_t r = INT64_MAX;
425 for (i = 0; i < ctx->nb_inputs; i++)
426 if (ctx->inputs[i]->status_out == status)
427 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->current_pts, ctx->inputs[i]->time_base, link_time_base));
430 av_log(ctx, AV_LOG_WARNING, "EOF timestamp not reliable\n");
431 for (i = 0; i < ctx->nb_inputs; i++)
432 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->status_in_pts, ctx->inputs[i]->time_base, link_time_base));
435 return AV_NOPTS_VALUE;
438 static int ff_request_frame_to_filter(AVFilterLink *link)
442 FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
443 /* Assume the filter is blocked, let the method clear it if not */
444 link->frame_blocked_in = 1;
445 if (link->srcpad->request_frame)
446 ret = link->srcpad->request_frame(link);
447 else if (link->src->inputs[0])
448 ret = ff_request_frame(link->src->inputs[0]);
450 if (ret != AVERROR(EAGAIN) && ret != link->status_in)
451 ff_avfilter_link_set_in_status(link, ret, guess_status_pts(link->src, ret, link->time_base));
452 if (ret == AVERROR_EOF)
458 static const char *const var_names[] = {
476 static int set_enable_expr(AVFilterContext *ctx, const char *expr)
480 AVExpr *old = ctx->enable;
482 if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
483 av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
484 "with filter '%s'\n", ctx->filter->name);
485 return AVERROR_PATCHWELCOME;
488 expr_dup = av_strdup(expr);
490 return AVERROR(ENOMEM);
492 if (!ctx->var_values) {
493 ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
494 if (!ctx->var_values) {
496 return AVERROR(ENOMEM);
500 ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
501 NULL, NULL, NULL, NULL, 0, ctx->priv);
503 av_log(ctx->priv, AV_LOG_ERROR,
504 "Error when evaluating the expression '%s' for enable\n",
511 av_free(ctx->enable_str);
512 ctx->enable_str = expr_dup;
516 void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
518 if (pts == AV_NOPTS_VALUE)
520 link->current_pts = pts;
521 link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
522 /* TODO use duration */
523 if (link->graph && link->age_index >= 0)
524 ff_avfilter_graph_update_heap(link->graph, link);
527 int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
529 if(!strcmp(cmd, "ping")){
530 char local_res[256] = {0};
534 res_len = sizeof(local_res);
536 av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
537 if (res == local_res)
538 av_log(filter, AV_LOG_INFO, "%s", res);
540 }else if(!strcmp(cmd, "enable")) {
541 return set_enable_expr(filter, arg);
542 }else if(filter->filter->process_command) {
543 return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
545 return AVERROR(ENOSYS);
548 int avfilter_pad_count(const AVFilterPad *pads)
555 for (count = 0; pads->name; count++)
560 static const char *default_filter_name(void *filter_ctx)
562 AVFilterContext *ctx = filter_ctx;
563 return ctx->name ? ctx->name : ctx->filter->name;
566 static void *filter_child_next(void *obj, void *prev)
568 AVFilterContext *ctx = obj;
569 if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
574 #if FF_API_CHILD_CLASS_NEXT
575 static const AVClass *filter_child_class_next(const AVClass *prev)
578 const AVFilter *f = NULL;
580 /* find the filter that corresponds to prev */
581 while (prev && (f = av_filter_iterate(&opaque)))
582 if (f->priv_class == prev)
585 /* could not find filter corresponding to prev */
589 /* find next filter with specific options */
590 while ((f = av_filter_iterate(&opaque)))
592 return f->priv_class;
598 static const AVClass *filter_child_class_iterate(void **iter)
602 while ((f = av_filter_iterate(iter)))
604 return f->priv_class;
609 #define OFFSET(x) offsetof(AVFilterContext, x)
610 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
611 #define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
612 static const AVOption avfilter_options[] = {
613 { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
614 { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
615 { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .flags = FLAGS, .unit = "thread_type" },
616 { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = TFLAGS },
617 { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
618 { .i64 = 0 }, 0, INT_MAX, FLAGS },
619 { "extra_hw_frames", "Number of extra hardware frames to allocate for the user",
620 OFFSET(extra_hw_frames), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
624 static const AVClass avfilter_class = {
625 .class_name = "AVFilter",
626 .item_name = default_filter_name,
627 .version = LIBAVUTIL_VERSION_INT,
628 .category = AV_CLASS_CATEGORY_FILTER,
629 .child_next = filter_child_next,
630 #if FF_API_CHILD_CLASS_NEXT
631 .child_class_next = filter_child_class_next,
633 .child_class_iterate = filter_child_class_iterate,
634 .option = avfilter_options,
637 static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
638 int *ret, int nb_jobs)
642 for (i = 0; i < nb_jobs; i++) {
643 int r = func(ctx, arg, i, nb_jobs);
650 AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
652 AVFilterContext *ret;
658 ret = av_mallocz(sizeof(AVFilterContext));
662 ret->av_class = &avfilter_class;
663 ret->filter = filter;
664 ret->name = inst_name ? av_strdup(inst_name) : NULL;
665 if (filter->priv_size) {
666 ret->priv = av_mallocz(filter->priv_size);
670 if (filter->preinit) {
671 if (filter->preinit(ret) < 0)
676 av_opt_set_defaults(ret);
677 if (filter->priv_class) {
678 *(const AVClass**)ret->priv = filter->priv_class;
679 av_opt_set_defaults(ret->priv);
682 ret->internal = av_mallocz(sizeof(*ret->internal));
685 ret->internal->execute = default_execute;
687 ret->nb_inputs = avfilter_pad_count(filter->inputs);
688 if (ret->nb_inputs ) {
689 ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
690 if (!ret->input_pads)
692 memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
693 ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
698 ret->nb_outputs = avfilter_pad_count(filter->outputs);
699 if (ret->nb_outputs) {
700 ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
701 if (!ret->output_pads)
703 memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
704 ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
714 av_freep(&ret->inputs);
715 av_freep(&ret->input_pads);
717 av_freep(&ret->outputs);
718 av_freep(&ret->output_pads);
720 av_freep(&ret->priv);
721 av_freep(&ret->internal);
726 static void free_link(AVFilterLink *link)
732 link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
734 link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
736 av_buffer_unref(&link->hw_frames_ctx);
738 ff_formats_unref(&link->incfg.formats);
739 ff_formats_unref(&link->outcfg.formats);
740 ff_formats_unref(&link->incfg.samplerates);
741 ff_formats_unref(&link->outcfg.samplerates);
742 ff_channel_layouts_unref(&link->incfg.channel_layouts);
743 ff_channel_layouts_unref(&link->outcfg.channel_layouts);
744 avfilter_link_free(&link);
747 void avfilter_free(AVFilterContext *filter)
755 ff_filter_graph_remove_filter(filter->graph, filter);
757 if (filter->filter->uninit)
758 filter->filter->uninit(filter);
760 for (i = 0; i < filter->nb_inputs; i++) {
761 free_link(filter->inputs[i]);
763 for (i = 0; i < filter->nb_outputs; i++) {
764 free_link(filter->outputs[i]);
767 if (filter->filter->priv_class)
768 av_opt_free(filter->priv);
770 av_buffer_unref(&filter->hw_device_ctx);
772 av_freep(&filter->name);
773 av_freep(&filter->input_pads);
774 av_freep(&filter->output_pads);
775 av_freep(&filter->inputs);
776 av_freep(&filter->outputs);
777 av_freep(&filter->priv);
778 while(filter->command_queue){
779 ff_command_queue_pop(filter);
782 av_expr_free(filter->enable);
783 filter->enable = NULL;
784 av_freep(&filter->var_values);
785 av_freep(&filter->internal);
789 int ff_filter_get_nb_threads(AVFilterContext *ctx)
791 if (ctx->nb_threads > 0)
792 return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
793 return ctx->graph->nb_threads;
796 static int process_options(AVFilterContext *ctx, AVDictionary **options,
799 const AVOption *o = NULL;
801 char *av_uninit(parsed_key), *av_uninit(value);
809 const char *shorthand = NULL;
811 o = av_opt_next(ctx->priv, o);
813 if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
819 ret = av_opt_get_key_value(&args, "=", ":",
820 shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
821 &parsed_key, &value);
823 if (ret == AVERROR(EINVAL))
824 av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
826 av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
834 while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
839 av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
841 if (av_opt_find(ctx, key, NULL, 0, 0)) {
842 ret = av_opt_set(ctx, key, value, 0);
849 av_dict_set(options, key, value, 0);
850 if ((ret = av_opt_set(ctx->priv, key, value, AV_OPT_SEARCH_CHILDREN)) < 0) {
851 if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
852 if (ret == AVERROR_OPTION_NOT_FOUND)
853 av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
869 int ff_filter_process_command(AVFilterContext *ctx, const char *cmd,
870 const char *arg, char *res, int res_len, int flags)
874 if (!ctx->filter->priv_class)
876 o = av_opt_find2(ctx->priv, cmd, NULL, AV_OPT_FLAG_RUNTIME_PARAM | AV_OPT_FLAG_FILTERING_PARAM, AV_OPT_SEARCH_CHILDREN, NULL);
878 return AVERROR(ENOSYS);
879 return av_opt_set(ctx->priv, cmd, arg, 0);
882 int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
886 ret = av_opt_set_dict(ctx, options);
888 av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
892 if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
893 ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
894 ctx->graph->internal->thread_execute) {
895 ctx->thread_type = AVFILTER_THREAD_SLICE;
896 ctx->internal->execute = ctx->graph->internal->thread_execute;
898 ctx->thread_type = 0;
901 if (ctx->filter->priv_class) {
902 ret = av_opt_set_dict2(ctx->priv, options, AV_OPT_SEARCH_CHILDREN);
904 av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
909 if (ctx->filter->init_opaque)
910 ret = ctx->filter->init_opaque(ctx, NULL);
911 else if (ctx->filter->init)
912 ret = ctx->filter->init(ctx);
913 else if (ctx->filter->init_dict)
914 ret = ctx->filter->init_dict(ctx, options);
916 if (ctx->enable_str) {
917 ret = set_enable_expr(ctx, ctx->enable_str);
925 int avfilter_init_str(AVFilterContext *filter, const char *args)
927 AVDictionary *options = NULL;
928 AVDictionaryEntry *e;
932 if (!filter->filter->priv_class) {
933 av_log(filter, AV_LOG_ERROR, "This filter does not take any "
934 "options, but options were provided: %s.\n", args);
935 return AVERROR(EINVAL);
938 ret = process_options(filter, &options, args);
943 ret = avfilter_init_dict(filter, &options);
947 if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
948 av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
949 ret = AVERROR_OPTION_NOT_FOUND;
954 av_dict_free(&options);
959 const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
961 return pads[pad_idx].name;
964 enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
966 return pads[pad_idx].type;
969 static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
971 return ff_filter_frame(link->dst->outputs[0], frame);
974 static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
976 int (*filter_frame)(AVFilterLink *, AVFrame *);
977 AVFilterContext *dstctx = link->dst;
978 AVFilterPad *dst = link->dstpad;
981 if (!(filter_frame = dst->filter_frame))
982 filter_frame = default_filter_frame;
984 if (dst->needs_writable) {
985 ret = ff_inlink_make_frame_writable(link, &frame);
990 ff_inlink_process_commands(link, frame);
991 dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
993 if (dstctx->is_disabled &&
994 (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
995 filter_frame = default_filter_frame;
996 ret = filter_frame(link, frame);
997 link->frame_count_out++;
1001 av_frame_free(&frame);
1005 int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
1008 FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
1010 /* Consistency checks */
1011 if (link->type == AVMEDIA_TYPE_VIDEO) {
1012 if (strcmp(link->dst->filter->name, "buffersink") &&
1013 strcmp(link->dst->filter->name, "format") &&
1014 strcmp(link->dst->filter->name, "idet") &&
1015 strcmp(link->dst->filter->name, "null") &&
1016 strcmp(link->dst->filter->name, "scale")) {
1017 av_assert1(frame->format == link->format);
1018 av_assert1(frame->width == link->w);
1019 av_assert1(frame->height == link->h);
1022 if (frame->format != link->format) {
1023 av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
1026 if (frame->channels != link->channels) {
1027 av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
1030 if (frame->channel_layout != link->channel_layout) {
1031 av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
1034 if (frame->sample_rate != link->sample_rate) {
1035 av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
1040 link->frame_blocked_in = link->frame_wanted_out = 0;
1041 link->frame_count_in++;
1042 filter_unblock(link->dst);
1043 ret = ff_framequeue_add(&link->fifo, frame);
1045 av_frame_free(&frame);
1048 ff_filter_set_ready(link->dst, 300);
1052 av_frame_free(&frame);
1053 return AVERROR_PATCHWELCOME;
1056 static int samples_ready(AVFilterLink *link, unsigned min)
1058 return ff_framequeue_queued_frames(&link->fifo) &&
1059 (ff_framequeue_queued_samples(&link->fifo) >= min ||
1063 static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
1066 AVFrame *frame0, *frame, *buf;
1067 unsigned nb_samples, nb_frames, i, p;
1070 /* Note: this function relies on no format changes and must only be
1071 called with enough samples. */
1072 av_assert1(samples_ready(link, link->min_samples));
1073 frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
1074 if (!link->fifo.samples_skipped && frame->nb_samples >= min && frame->nb_samples <= max) {
1075 *rframe = ff_framequeue_take(&link->fifo);
1081 if (nb_samples + frame->nb_samples > max) {
1082 if (nb_samples < min)
1086 nb_samples += frame->nb_samples;
1088 if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
1090 frame = ff_framequeue_peek(&link->fifo, nb_frames);
1093 buf = ff_get_audio_buffer(link, nb_samples);
1095 return AVERROR(ENOMEM);
1096 ret = av_frame_copy_props(buf, frame0);
1098 av_frame_free(&buf);
1101 buf->pts = frame0->pts;
1104 for (i = 0; i < nb_frames; i++) {
1105 frame = ff_framequeue_take(&link->fifo);
1106 av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
1107 frame->nb_samples, link->channels, link->format);
1108 p += frame->nb_samples;
1109 av_frame_free(&frame);
1111 if (p < nb_samples) {
1112 unsigned n = nb_samples - p;
1113 frame = ff_framequeue_peek(&link->fifo, 0);
1114 av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
1115 link->channels, link->format);
1116 ff_framequeue_skip_samples(&link->fifo, n, link->time_base);
1123 static int ff_filter_frame_to_filter(AVFilterLink *link)
1125 AVFrame *frame = NULL;
1126 AVFilterContext *dst = link->dst;
1129 av_assert1(ff_framequeue_queued_frames(&link->fifo));
1130 ret = link->min_samples ?
1131 ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
1132 ff_inlink_consume_frame(link, &frame);
1138 /* The filter will soon have received a new frame, that may allow it to
1139 produce one or more: unblock its outputs. */
1140 filter_unblock(dst);
1141 /* AVFilterPad.filter_frame() expect frame_count_out to have the value
1142 before the frame; ff_filter_frame_framed() will re-increment it. */
1143 link->frame_count_out--;
1144 ret = ff_filter_frame_framed(link, frame);
1145 if (ret < 0 && ret != link->status_out) {
1146 ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
1148 /* Run once again, to see if several frames were available, or if
1149 the input status has also changed, or any other reason. */
1150 ff_filter_set_ready(dst, 300);
1155 static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
1157 unsigned out = 0, progress = 0;
1160 av_assert0(!in->status_out);
1161 if (!filter->nb_outputs) {
1162 /* not necessary with the current API and sinks */
1165 while (!in->status_out) {
1166 if (!filter->outputs[out]->status_in) {
1168 ret = ff_request_frame_to_filter(filter->outputs[out]);
1172 if (++out == filter->nb_outputs) {
1174 /* Every output already closed: input no longer interesting
1175 (example: overlay in shortest mode, other input closed). */
1176 ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
1183 ff_filter_set_ready(filter, 200);
1187 static int ff_filter_activate_default(AVFilterContext *filter)
1191 for (i = 0; i < filter->nb_inputs; i++) {
1192 if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) {
1193 return ff_filter_frame_to_filter(filter->inputs[i]);
1196 for (i = 0; i < filter->nb_inputs; i++) {
1197 if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
1198 av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
1199 return forward_status_change(filter, filter->inputs[i]);
1202 for (i = 0; i < filter->nb_outputs; i++) {
1203 if (filter->outputs[i]->frame_wanted_out &&
1204 !filter->outputs[i]->frame_blocked_in) {
1205 return ff_request_frame_to_filter(filter->outputs[i]);
1208 return FFERROR_NOT_READY;
1212 Filter scheduling and activation
1214 When a filter is activated, it must:
1215 - if possible, output a frame;
1216 - else, if relevant, forward the input status change;
1217 - else, check outputs for wanted frames and forward the requests.
1219 The following AVFilterLink fields are used for activation:
1223 This field indicates if a frame is needed on this input of the
1224 destination filter. A positive value indicates that a frame is needed
1225 to process queued frames or internal data or to satisfy the
1226 application; a zero value indicates that a frame is not especially
1227 needed but could be processed anyway; a negative value indicates that a
1228 frame would just be queued.
1230 It is set by filters using ff_request_frame() or ff_request_no_frame(),
1231 when requested by the application through a specific API or when it is
1232 set on one of the outputs.
1234 It is cleared when a frame is sent from the source using
1237 It is also cleared when a status change is sent from the source using
1238 ff_avfilter_link_set_in_status().
1242 This field means that the source filter can not generate a frame as is.
1243 Its goal is to avoid repeatedly calling the request_frame() method on
1246 It is set by the framework on all outputs of a filter before activating it.
1248 It is automatically cleared by ff_filter_frame().
1250 It is also automatically cleared by ff_avfilter_link_set_in_status().
1252 It is also cleared on all outputs (using filter_unblock()) when
1253 something happens on an input: processing a frame or changing the
1258 Contains the frames queued on a filter input. If it contains frames and
1259 frame_wanted_out is not set, then the filter can be activated. If that
1260 result in the filter not able to use these frames, the filter must set
1261 frame_wanted_out to ask for more frames.
1263 - status_in and status_in_pts:
1265 Status (EOF or error code) of the link and timestamp of the status
1266 change (in link time base, same as frames) as seen from the input of
1267 the link. The status change is considered happening after the frames
1270 It is set by the source filter using ff_avfilter_link_set_in_status().
1274 Status of the link as seen from the output of the link. The status
1275 change is considered having already happened.
1277 It is set by the destination filter using
1278 ff_avfilter_link_set_out_status().
1280 Filters are activated according to the ready field, set using the
1281 ff_filter_set_ready(). Eventually, a priority queue will be used.
1282 ff_filter_set_ready() is called whenever anything could cause progress to
1283 be possible. Marking a filter ready when it is not is not a problem,
1284 except for the small overhead it causes.
1286 Conditions that cause a filter to be marked ready are:
1288 - frames added on an input link;
1290 - changes in the input or output status of an input link;
1292 - requests for a frame on an output link;
1294 - after any actual processing using the legacy methods (filter_frame(),
1295 and request_frame() to acknowledge status changes), to run once more
1296 and check if enough input was present for several frames.
1298 Examples of scenarios to consider:
1300 - buffersrc: activate if frame_wanted_out to notify the application;
1301 activate when the application adds a frame to push it immediately.
1303 - testsrc: activate only if frame_wanted_out to produce and push a frame.
1305 - concat (not at stitch points): can process a frame on any output.
1306 Activate if frame_wanted_out on output to forward on the corresponding
1307 input. Activate when a frame is present on input to process it
1310 - framesync: needs at least one frame on each input; extra frames on the
1311 wrong input will accumulate. When a frame is first added on one input,
1312 set frame_wanted_out<0 on it to avoid getting more (would trigger
1313 testsrc) and frame_wanted_out>0 on the other to allow processing it.
1315 Activation of old filters:
1317 In order to activate a filter implementing the legacy filter_frame() and
1318 request_frame() methods, perform the first possible of the following
1321 - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
1322 frame and call filter_frame().
1324 Rationale: filter frames as soon as possible instead of leaving them
1325 queued; frame_wanted_out < 0 is not possible since the old API does not
1326 set it nor provides any similar feedback; frame_wanted_out > 0 happens
1327 when min_samples > 0 and there are not enough samples queued.
1329 - If an input has status_in set but not status_out, try to call
1330 request_frame() on one of the outputs in the hope that it will trigger
1331 request_frame() on the input with status_in and acknowledge it. This is
1332 awkward and fragile, filters with several inputs or outputs should be
1333 updated to direct activation as soon as possible.
1335 - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
1338 Rationale: checking frame_blocked_in is necessary to avoid requesting
1339 repeatedly on a blocked input if another is not blocked (example:
1340 [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
1343 int ff_filter_activate(AVFilterContext *filter)
1347 /* Generic timeline support is not yet implemented but should be easy */
1348 av_assert1(!(filter->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC &&
1349 filter->filter->activate));
1351 ret = filter->filter->activate ? filter->filter->activate(filter) :
1352 ff_filter_activate_default(filter);
1353 if (ret == FFERROR_NOT_READY)
1358 int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
1360 *rpts = link->current_pts;
1361 if (ff_framequeue_queued_frames(&link->fifo))
1362 return *rstatus = 0;
1363 if (link->status_out)
1364 return *rstatus = link->status_out;
1365 if (!link->status_in)
1366 return *rstatus = 0;
1367 *rstatus = link->status_out = link->status_in;
1368 ff_update_link_current_pts(link, link->status_in_pts);
1369 *rpts = link->current_pts;
1373 size_t ff_inlink_queued_frames(AVFilterLink *link)
1375 return ff_framequeue_queued_frames(&link->fifo);
1378 int ff_inlink_check_available_frame(AVFilterLink *link)
1380 return ff_framequeue_queued_frames(&link->fifo) > 0;
1383 int ff_inlink_queued_samples(AVFilterLink *link)
1385 return ff_framequeue_queued_samples(&link->fifo);
1388 int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
1390 uint64_t samples = ff_framequeue_queued_samples(&link->fifo);
1392 return samples >= min || (link->status_in && samples);
1395 static void consume_update(AVFilterLink *link, const AVFrame *frame)
1397 ff_update_link_current_pts(link, frame->pts);
1398 ff_inlink_process_commands(link, frame);
1399 link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1400 link->frame_count_out++;
1403 int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
1408 if (!ff_inlink_check_available_frame(link))
1411 if (link->fifo.samples_skipped) {
1412 frame = ff_framequeue_peek(&link->fifo, 0);
1413 return ff_inlink_consume_samples(link, frame->nb_samples, frame->nb_samples, rframe);
1416 frame = ff_framequeue_take(&link->fifo);
1417 consume_update(link, frame);
1422 int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max,
1430 if (!ff_inlink_check_available_samples(link, min))
1432 if (link->status_in)
1433 min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
1434 ret = take_samples(link, min, max, &frame);
1437 consume_update(link, frame);
1442 AVFrame *ff_inlink_peek_frame(AVFilterLink *link, size_t idx)
1444 return ff_framequeue_peek(&link->fifo, idx);
1447 int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
1449 AVFrame *frame = *rframe;
1453 if (av_frame_is_writable(frame))
1455 av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
1457 switch (link->type) {
1458 case AVMEDIA_TYPE_VIDEO:
1459 out = ff_get_video_buffer(link, link->w, link->h);
1461 case AVMEDIA_TYPE_AUDIO:
1462 out = ff_get_audio_buffer(link, frame->nb_samples);
1465 return AVERROR(EINVAL);
1468 return AVERROR(ENOMEM);
1470 ret = av_frame_copy_props(out, frame);
1472 av_frame_free(&out);
1476 switch (link->type) {
1477 case AVMEDIA_TYPE_VIDEO:
1478 av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
1479 frame->format, frame->width, frame->height);
1481 case AVMEDIA_TYPE_AUDIO:
1482 av_samples_copy(out->extended_data, frame->extended_data,
1483 0, 0, frame->nb_samples,
1488 av_assert0(!"reached");
1491 av_frame_free(&frame);
1496 int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
1498 AVFilterCommand *cmd = link->dst->command_queue;
1500 while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
1501 av_log(link->dst, AV_LOG_DEBUG,
1502 "Processing command time:%f command:%s arg:%s\n",
1503 cmd->time, cmd->command, cmd->arg);
1504 avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
1505 ff_command_queue_pop(link->dst);
1506 cmd= link->dst->command_queue;
1511 int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
1513 AVFilterContext *dstctx = link->dst;
1514 int64_t pts = frame->pts;
1515 int64_t pos = frame->pkt_pos;
1517 if (!dstctx->enable_str)
1520 dstctx->var_values[VAR_N] = link->frame_count_out;
1521 dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
1522 dstctx->var_values[VAR_W] = link->w;
1523 dstctx->var_values[VAR_H] = link->h;
1524 dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1526 return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5;
1529 void ff_inlink_request_frame(AVFilterLink *link)
1531 av_assert1(!link->status_in);
1532 av_assert1(!link->status_out);
1533 link->frame_wanted_out = 1;
1534 ff_filter_set_ready(link->src, 100);
1537 void ff_inlink_set_status(AVFilterLink *link, int status)
1539 if (link->status_out)
1541 link->frame_wanted_out = 0;
1542 link->frame_blocked_in = 0;
1543 ff_avfilter_link_set_out_status(link, status, AV_NOPTS_VALUE);
1544 while (ff_framequeue_queued_frames(&link->fifo)) {
1545 AVFrame *frame = ff_framequeue_take(&link->fifo);
1546 av_frame_free(&frame);
1548 if (!link->status_in)
1549 link->status_in = status;
1552 int ff_outlink_get_status(AVFilterLink *link)
1554 return link->status_in;
1557 const AVClass *avfilter_get_class(void)
1559 return &avfilter_class;
1562 int ff_filter_init_hw_frames(AVFilterContext *avctx, AVFilterLink *link,
1563 int default_pool_size)
1565 AVHWFramesContext *frames;
1567 // Must already be set by caller.
1568 av_assert0(link->hw_frames_ctx);
1570 frames = (AVHWFramesContext*)link->hw_frames_ctx->data;
1572 if (frames->initial_pool_size == 0) {
1573 // Dynamic allocation is necessarily supported.
1574 } else if (avctx->extra_hw_frames >= 0) {
1575 frames->initial_pool_size += avctx->extra_hw_frames;
1577 frames->initial_pool_size = default_pool_size;