3 * Copyright (c) 2007 Bobby Bingham
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/avassert.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/buffer.h"
25 #include "libavutil/channel_layout.h"
26 #include "libavutil/common.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/hwcontext.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/rational.h"
34 #include "libavutil/samplefmt.h"
35 #include "libavutil/thread.h"
37 #define FF_INTERNAL_FIELDS 1
38 #include "framequeue.h"
46 #include "libavutil/ffversion.h"
47 const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
49 void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
51 av_unused char buf[16];
53 "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
54 ref, ref->buf, ref->data[0],
55 ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
56 ref->pts, ref->pkt_pos);
59 ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
60 ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
61 ref->width, ref->height,
62 !ref->interlaced_frame ? 'P' : /* Progressive */
63 ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
65 av_get_picture_type_char(ref->pict_type));
67 if (ref->nb_samples) {
68 ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
74 ff_tlog(ctx, "]%s", end ? "\n" : "");
77 unsigned avfilter_version(void)
79 av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
80 return LIBAVFILTER_VERSION_INT;
83 const char *avfilter_configuration(void)
85 return FFMPEG_CONFIGURATION;
88 const char *avfilter_license(void)
90 #define LICENSE_PREFIX "libavfilter license: "
91 return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1];
94 void ff_command_queue_pop(AVFilterContext *filter)
96 AVFilterCommand *c= filter->command_queue;
98 av_freep(&c->command);
99 filter->command_queue= c->next;
103 int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
104 AVFilterPad **pads, AVFilterLink ***links,
107 AVFilterLink **newlinks;
108 AVFilterPad *newpads;
111 idx = FFMIN(idx, *count);
113 newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
114 newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
119 if (!newpads || !newlinks)
120 return AVERROR(ENOMEM);
122 memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
123 memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
124 memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
125 (*links)[idx] = NULL;
128 for (i = idx + 1; i < *count; i++)
130 (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
135 int avfilter_link(AVFilterContext *src, unsigned srcpad,
136 AVFilterContext *dst, unsigned dstpad)
140 av_assert0(src->graph);
141 av_assert0(dst->graph);
142 av_assert0(src->graph == dst->graph);
144 if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
145 src->outputs[srcpad] || dst->inputs[dstpad])
146 return AVERROR(EINVAL);
148 if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
149 av_log(src, AV_LOG_ERROR,
150 "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
151 src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
152 dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
153 return AVERROR(EINVAL);
156 link = av_mallocz(sizeof(*link));
158 return AVERROR(ENOMEM);
160 src->outputs[srcpad] = dst->inputs[dstpad] = link;
164 link->srcpad = &src->output_pads[srcpad];
165 link->dstpad = &dst->input_pads[dstpad];
166 link->type = src->output_pads[srcpad].type;
167 av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
169 ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
174 void avfilter_link_free(AVFilterLink **link)
179 av_frame_free(&(*link)->partial_buf);
180 ff_framequeue_free(&(*link)->fifo);
181 ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
186 #if FF_API_FILTER_GET_SET
187 int avfilter_link_get_channels(AVFilterLink *link)
189 return link->channels;
193 void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
195 filter->ready = FFMAX(filter->ready, priority);
199 * Clear frame_blocked_in on all outputs.
200 * This is necessary whenever something changes on input.
202 static void filter_unblock(AVFilterContext *filter)
206 for (i = 0; i < filter->nb_outputs; i++)
207 filter->outputs[i]->frame_blocked_in = 0;
211 void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
213 if (link->status_in == status)
215 av_assert0(!link->status_in);
216 link->status_in = status;
217 link->status_in_pts = pts;
218 link->frame_wanted_out = 0;
219 link->frame_blocked_in = 0;
220 filter_unblock(link->dst);
221 ff_filter_set_ready(link->dst, 200);
224 void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
226 av_assert0(!link->frame_wanted_out);
227 av_assert0(!link->status_out);
228 link->status_out = status;
229 if (pts != AV_NOPTS_VALUE)
230 ff_update_link_current_pts(link, pts);
231 filter_unblock(link->dst);
232 ff_filter_set_ready(link->src, 200);
235 void avfilter_link_set_closed(AVFilterLink *link, int closed)
237 ff_avfilter_link_set_out_status(link, closed ? AVERROR_EOF : 0, AV_NOPTS_VALUE);
240 int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
241 unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
244 unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
246 av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
247 "between the filter '%s' and the filter '%s'\n",
248 filt->name, link->src->name, link->dst->name);
250 link->dst->inputs[dstpad_idx] = NULL;
251 if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
252 /* failed to link output filter to new filter */
253 link->dst->inputs[dstpad_idx] = link;
257 /* re-hookup the link to the new destination filter we inserted */
259 link->dstpad = &filt->input_pads[filt_srcpad_idx];
260 filt->inputs[filt_srcpad_idx] = link;
262 /* if any information on supported media formats already exists on the
263 * link, we need to preserve that */
264 if (link->outcfg.formats)
265 ff_formats_changeref(&link->outcfg.formats,
266 &filt->outputs[filt_dstpad_idx]->outcfg.formats);
267 if (link->outcfg.samplerates)
268 ff_formats_changeref(&link->outcfg.samplerates,
269 &filt->outputs[filt_dstpad_idx]->outcfg.samplerates);
270 if (link->outcfg.channel_layouts)
271 ff_channel_layouts_changeref(&link->outcfg.channel_layouts,
272 &filt->outputs[filt_dstpad_idx]->outcfg.channel_layouts);
277 int avfilter_config_links(AVFilterContext *filter)
279 int (*config_link)(AVFilterLink *);
283 for (i = 0; i < filter->nb_inputs; i ++) {
284 AVFilterLink *link = filter->inputs[i];
285 AVFilterLink *inlink;
288 if (!link->src || !link->dst) {
289 av_log(filter, AV_LOG_ERROR,
290 "Not all input and output are properly linked (%d).\n", i);
291 return AVERROR(EINVAL);
294 inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
296 link->current_pts_us = AV_NOPTS_VALUE;
298 switch (link->init_state) {
301 case AVLINK_STARTINIT:
302 av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
305 link->init_state = AVLINK_STARTINIT;
307 if ((ret = avfilter_config_links(link->src)) < 0)
310 if (!(config_link = link->srcpad->config_props)) {
311 if (link->src->nb_inputs != 1) {
312 av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
313 "with more than one input "
314 "must set config_props() "
315 "callbacks on all outputs\n");
316 return AVERROR(EINVAL);
318 } else if ((ret = config_link(link)) < 0) {
319 av_log(link->src, AV_LOG_ERROR,
320 "Failed to configure output pad on %s\n",
325 switch (link->type) {
326 case AVMEDIA_TYPE_VIDEO:
327 if (!link->time_base.num && !link->time_base.den)
328 link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
330 if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
331 link->sample_aspect_ratio = inlink ?
332 inlink->sample_aspect_ratio : (AVRational){1,1};
335 if (!link->frame_rate.num && !link->frame_rate.den)
336 link->frame_rate = inlink->frame_rate;
341 } else if (!link->w || !link->h) {
342 av_log(link->src, AV_LOG_ERROR,
343 "Video source filters must set their output link's "
344 "width and height\n");
345 return AVERROR(EINVAL);
349 case AVMEDIA_TYPE_AUDIO:
351 if (!link->time_base.num && !link->time_base.den)
352 link->time_base = inlink->time_base;
355 if (!link->time_base.num && !link->time_base.den)
356 link->time_base = (AVRational) {1, link->sample_rate};
359 if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
360 !(link->src->filter->flags_internal & FF_FILTER_FLAG_HWFRAME_AWARE)) {
361 av_assert0(!link->hw_frames_ctx &&
362 "should not be set by non-hwframe-aware filter");
363 link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
364 if (!link->hw_frames_ctx)
365 return AVERROR(ENOMEM);
368 if ((config_link = link->dstpad->config_props))
369 if ((ret = config_link(link)) < 0) {
370 av_log(link->dst, AV_LOG_ERROR,
371 "Failed to configure input pad on %s\n",
376 link->init_state = AVLINK_INIT;
383 void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
385 if (link->type == AVMEDIA_TYPE_VIDEO) {
387 "link[%p s:%dx%d fmt:%s %s->%s]%s",
388 link, link->w, link->h,
389 av_get_pix_fmt_name(link->format),
390 link->src ? link->src->filter->name : "",
391 link->dst ? link->dst->filter->name : "",
395 av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
398 "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
399 link, (int)link->sample_rate, buf,
400 av_get_sample_fmt_name(link->format),
401 link->src ? link->src->filter->name : "",
402 link->dst ? link->dst->filter->name : "",
407 int ff_request_frame(AVFilterLink *link)
409 FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
411 av_assert1(!link->dst->filter->activate);
412 if (link->status_out)
413 return link->status_out;
414 if (link->status_in) {
415 if (ff_framequeue_queued_frames(&link->fifo)) {
416 av_assert1(!link->frame_wanted_out);
417 av_assert1(link->dst->ready >= 300);
420 /* Acknowledge status change. Filters using ff_request_frame() will
421 handle the change automatically. Filters can also check the
422 status directly but none do yet. */
423 ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
424 return link->status_out;
427 link->frame_wanted_out = 1;
428 ff_filter_set_ready(link->src, 100);
432 static int64_t guess_status_pts(AVFilterContext *ctx, int status, AVRational link_time_base)
435 int64_t r = INT64_MAX;
437 for (i = 0; i < ctx->nb_inputs; i++)
438 if (ctx->inputs[i]->status_out == status)
439 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->current_pts, ctx->inputs[i]->time_base, link_time_base));
442 av_log(ctx, AV_LOG_WARNING, "EOF timestamp not reliable\n");
443 for (i = 0; i < ctx->nb_inputs; i++)
444 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->status_in_pts, ctx->inputs[i]->time_base, link_time_base));
447 return AV_NOPTS_VALUE;
450 static int ff_request_frame_to_filter(AVFilterLink *link)
454 FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
455 /* Assume the filter is blocked, let the method clear it if not */
456 link->frame_blocked_in = 1;
457 if (link->srcpad->request_frame)
458 ret = link->srcpad->request_frame(link);
459 else if (link->src->inputs[0])
460 ret = ff_request_frame(link->src->inputs[0]);
462 if (ret != AVERROR(EAGAIN) && ret != link->status_in)
463 ff_avfilter_link_set_in_status(link, ret, guess_status_pts(link->src, ret, link->time_base));
464 if (ret == AVERROR_EOF)
470 static const char *const var_names[] = {
488 static int set_enable_expr(AVFilterContext *ctx, const char *expr)
492 AVExpr *old = ctx->enable;
494 if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
495 av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
496 "with filter '%s'\n", ctx->filter->name);
497 return AVERROR_PATCHWELCOME;
500 expr_dup = av_strdup(expr);
502 return AVERROR(ENOMEM);
504 if (!ctx->var_values) {
505 ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
506 if (!ctx->var_values) {
508 return AVERROR(ENOMEM);
512 ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
513 NULL, NULL, NULL, NULL, 0, ctx->priv);
515 av_log(ctx->priv, AV_LOG_ERROR,
516 "Error when evaluating the expression '%s' for enable\n",
523 av_free(ctx->enable_str);
524 ctx->enable_str = expr_dup;
528 void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
530 if (pts == AV_NOPTS_VALUE)
532 link->current_pts = pts;
533 link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
534 /* TODO use duration */
535 if (link->graph && link->age_index >= 0)
536 ff_avfilter_graph_update_heap(link->graph, link);
539 int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
541 if(!strcmp(cmd, "ping")){
542 char local_res[256] = {0};
546 res_len = sizeof(local_res);
548 av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
549 if (res == local_res)
550 av_log(filter, AV_LOG_INFO, "%s", res);
552 }else if(!strcmp(cmd, "enable")) {
553 return set_enable_expr(filter, arg);
554 }else if(filter->filter->process_command) {
555 return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
557 return AVERROR(ENOSYS);
560 int avfilter_pad_count(const AVFilterPad *pads)
567 for (count = 0; pads->name; count++)
572 static const char *default_filter_name(void *filter_ctx)
574 AVFilterContext *ctx = filter_ctx;
575 return ctx->name ? ctx->name : ctx->filter->name;
578 static void *filter_child_next(void *obj, void *prev)
580 AVFilterContext *ctx = obj;
581 if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
586 #if FF_API_CHILD_CLASS_NEXT
587 static const AVClass *filter_child_class_next(const AVClass *prev)
590 const AVFilter *f = NULL;
592 /* find the filter that corresponds to prev */
593 while (prev && (f = av_filter_iterate(&opaque)))
594 if (f->priv_class == prev)
597 /* could not find filter corresponding to prev */
601 /* find next filter with specific options */
602 while ((f = av_filter_iterate(&opaque)))
604 return f->priv_class;
610 static const AVClass *filter_child_class_iterate(void **iter)
614 while ((f = av_filter_iterate(iter)))
616 return f->priv_class;
621 #define OFFSET(x) offsetof(AVFilterContext, x)
622 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
623 #define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
624 static const AVOption avfilter_options[] = {
625 { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
626 { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
627 { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .flags = FLAGS, .unit = "thread_type" },
628 { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = TFLAGS },
629 { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
630 { .i64 = 0 }, 0, INT_MAX, FLAGS },
631 { "extra_hw_frames", "Number of extra hardware frames to allocate for the user",
632 OFFSET(extra_hw_frames), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
636 static const AVClass avfilter_class = {
637 .class_name = "AVFilter",
638 .item_name = default_filter_name,
639 .version = LIBAVUTIL_VERSION_INT,
640 .category = AV_CLASS_CATEGORY_FILTER,
641 .child_next = filter_child_next,
642 #if FF_API_CHILD_CLASS_NEXT
643 .child_class_next = filter_child_class_next,
645 .child_class_iterate = filter_child_class_iterate,
646 .option = avfilter_options,
649 static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
650 int *ret, int nb_jobs)
654 for (i = 0; i < nb_jobs; i++) {
655 int r = func(ctx, arg, i, nb_jobs);
662 AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
664 AVFilterContext *ret;
670 ret = av_mallocz(sizeof(AVFilterContext));
674 ret->av_class = &avfilter_class;
675 ret->filter = filter;
676 ret->name = inst_name ? av_strdup(inst_name) : NULL;
677 if (filter->priv_size) {
678 ret->priv = av_mallocz(filter->priv_size);
682 if (filter->preinit) {
683 if (filter->preinit(ret) < 0)
688 av_opt_set_defaults(ret);
689 if (filter->priv_class) {
690 *(const AVClass**)ret->priv = filter->priv_class;
691 av_opt_set_defaults(ret->priv);
694 ret->internal = av_mallocz(sizeof(*ret->internal));
697 ret->internal->execute = default_execute;
699 ret->nb_inputs = avfilter_pad_count(filter->inputs);
700 if (ret->nb_inputs ) {
701 ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
702 if (!ret->input_pads)
704 memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
705 ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
710 ret->nb_outputs = avfilter_pad_count(filter->outputs);
711 if (ret->nb_outputs) {
712 ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
713 if (!ret->output_pads)
715 memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
716 ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
726 av_freep(&ret->inputs);
727 av_freep(&ret->input_pads);
729 av_freep(&ret->outputs);
730 av_freep(&ret->output_pads);
732 av_freep(&ret->priv);
733 av_freep(&ret->internal);
738 static void free_link(AVFilterLink *link)
744 link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
746 link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
748 av_buffer_unref(&link->hw_frames_ctx);
750 ff_formats_unref(&link->incfg.formats);
751 ff_formats_unref(&link->outcfg.formats);
752 ff_formats_unref(&link->incfg.samplerates);
753 ff_formats_unref(&link->outcfg.samplerates);
754 ff_channel_layouts_unref(&link->incfg.channel_layouts);
755 ff_channel_layouts_unref(&link->outcfg.channel_layouts);
756 avfilter_link_free(&link);
759 void avfilter_free(AVFilterContext *filter)
767 ff_filter_graph_remove_filter(filter->graph, filter);
769 if (filter->filter->uninit)
770 filter->filter->uninit(filter);
772 for (i = 0; i < filter->nb_inputs; i++) {
773 free_link(filter->inputs[i]);
775 for (i = 0; i < filter->nb_outputs; i++) {
776 free_link(filter->outputs[i]);
779 if (filter->filter->priv_class)
780 av_opt_free(filter->priv);
782 av_buffer_unref(&filter->hw_device_ctx);
784 av_freep(&filter->name);
785 av_freep(&filter->input_pads);
786 av_freep(&filter->output_pads);
787 av_freep(&filter->inputs);
788 av_freep(&filter->outputs);
789 av_freep(&filter->priv);
790 while(filter->command_queue){
791 ff_command_queue_pop(filter);
794 av_expr_free(filter->enable);
795 filter->enable = NULL;
796 av_freep(&filter->var_values);
797 av_freep(&filter->internal);
801 int ff_filter_get_nb_threads(AVFilterContext *ctx)
803 if (ctx->nb_threads > 0)
804 return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
805 return ctx->graph->nb_threads;
808 static int process_options(AVFilterContext *ctx, AVDictionary **options,
811 const AVOption *o = NULL;
813 char *av_uninit(parsed_key), *av_uninit(value);
821 const char *shorthand = NULL;
823 o = av_opt_next(ctx->priv, o);
825 if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
831 ret = av_opt_get_key_value(&args, "=", ":",
832 shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
833 &parsed_key, &value);
835 if (ret == AVERROR(EINVAL))
836 av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
838 av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
846 while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
851 av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
853 if (av_opt_find(ctx, key, NULL, 0, 0)) {
854 ret = av_opt_set(ctx, key, value, 0);
861 av_dict_set(options, key, value, 0);
862 if ((ret = av_opt_set(ctx->priv, key, value, AV_OPT_SEARCH_CHILDREN)) < 0) {
863 if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
864 if (ret == AVERROR_OPTION_NOT_FOUND)
865 av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
878 if (ctx->enable_str) {
879 ret = set_enable_expr(ctx, ctx->enable_str);
886 int ff_filter_process_command(AVFilterContext *ctx, const char *cmd,
887 const char *arg, char *res, int res_len, int flags)
891 if (!ctx->filter->priv_class)
893 o = av_opt_find2(ctx->priv, cmd, NULL, AV_OPT_FLAG_RUNTIME_PARAM | AV_OPT_FLAG_FILTERING_PARAM, AV_OPT_SEARCH_CHILDREN, NULL);
895 return AVERROR(ENOSYS);
896 return av_opt_set(ctx->priv, cmd, arg, 0);
899 int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
903 ret = av_opt_set_dict(ctx, options);
905 av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
909 if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
910 ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
911 ctx->graph->internal->thread_execute) {
912 ctx->thread_type = AVFILTER_THREAD_SLICE;
913 ctx->internal->execute = ctx->graph->internal->thread_execute;
915 ctx->thread_type = 0;
918 if (ctx->filter->priv_class) {
919 ret = av_opt_set_dict2(ctx->priv, options, AV_OPT_SEARCH_CHILDREN);
921 av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
926 if (ctx->filter->init_opaque)
927 ret = ctx->filter->init_opaque(ctx, NULL);
928 else if (ctx->filter->init)
929 ret = ctx->filter->init(ctx);
930 else if (ctx->filter->init_dict)
931 ret = ctx->filter->init_dict(ctx, options);
936 int avfilter_init_str(AVFilterContext *filter, const char *args)
938 AVDictionary *options = NULL;
939 AVDictionaryEntry *e;
943 if (!filter->filter->priv_class) {
944 av_log(filter, AV_LOG_ERROR, "This filter does not take any "
945 "options, but options were provided: %s.\n", args);
946 return AVERROR(EINVAL);
949 #if FF_API_OLD_FILTER_OPTS_ERROR
950 if ( !strcmp(filter->filter->name, "format") ||
951 !strcmp(filter->filter->name, "noformat") ||
952 !strcmp(filter->filter->name, "frei0r") ||
953 !strcmp(filter->filter->name, "frei0r_src") ||
954 !strcmp(filter->filter->name, "ocv") ||
955 !strcmp(filter->filter->name, "pan") ||
956 !strcmp(filter->filter->name, "pp") ||
957 !strcmp(filter->filter->name, "aevalsrc")) {
958 /* a hack for compatibility with the old syntax
959 * replace colons with |s */
960 char *copy = av_strdup(args);
962 int nb_leading = 0; // number of leading colons to skip
966 ret = AVERROR(ENOMEM);
970 if (!strcmp(filter->filter->name, "frei0r") ||
971 !strcmp(filter->filter->name, "ocv"))
973 else if (!strcmp(filter->filter->name, "frei0r_src"))
976 while (nb_leading--) {
979 p = copy + strlen(copy);
985 deprecated = strchr(p, ':') != NULL;
987 if (!strcmp(filter->filter->name, "aevalsrc")) {
989 while ((p = strchr(p, ':')) && p[1] != ':') {
990 const char *epos = strchr(p + 1, '=');
991 const char *spos = strchr(p + 1, ':');
992 const int next_token_is_opt = epos && (!spos || epos < spos);
993 if (next_token_is_opt) {
997 /* next token does not contain a '=', assume a channel expression */
1001 if (p && *p == ':') { // double sep '::' found
1003 memmove(p, p + 1, strlen(p));
1006 while ((p = strchr(p, ':')))
1010 av_log(filter, AV_LOG_ERROR, "This syntax is deprecated. Use "
1011 "'|' to separate the list items ('%s' instead of '%s')\n",
1013 ret = AVERROR(EINVAL);
1015 ret = process_options(filter, &options, copy);
1024 ret = process_options(filter, &options, args);
1030 ret = avfilter_init_dict(filter, &options);
1034 if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1035 av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
1036 ret = AVERROR_OPTION_NOT_FOUND;
1041 av_dict_free(&options);
1046 const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
1048 return pads[pad_idx].name;
1051 enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
1053 return pads[pad_idx].type;
1056 static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
1058 return ff_filter_frame(link->dst->outputs[0], frame);
1061 static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
1063 int (*filter_frame)(AVFilterLink *, AVFrame *);
1064 AVFilterContext *dstctx = link->dst;
1065 AVFilterPad *dst = link->dstpad;
1068 if (!(filter_frame = dst->filter_frame))
1069 filter_frame = default_filter_frame;
1071 if (dst->needs_writable) {
1072 ret = ff_inlink_make_frame_writable(link, &frame);
1077 ff_inlink_process_commands(link, frame);
1078 dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1080 if (dstctx->is_disabled &&
1081 (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
1082 filter_frame = default_filter_frame;
1083 ret = filter_frame(link, frame);
1084 link->frame_count_out++;
1088 av_frame_free(&frame);
1092 int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
1095 FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
1097 /* Consistency checks */
1098 if (link->type == AVMEDIA_TYPE_VIDEO) {
1099 if (strcmp(link->dst->filter->name, "buffersink") &&
1100 strcmp(link->dst->filter->name, "format") &&
1101 strcmp(link->dst->filter->name, "idet") &&
1102 strcmp(link->dst->filter->name, "null") &&
1103 strcmp(link->dst->filter->name, "scale")) {
1104 av_assert1(frame->format == link->format);
1105 av_assert1(frame->width == link->w);
1106 av_assert1(frame->height == link->h);
1109 if (frame->format != link->format) {
1110 av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
1113 if (frame->channels != link->channels) {
1114 av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
1117 if (frame->channel_layout != link->channel_layout) {
1118 av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
1121 if (frame->sample_rate != link->sample_rate) {
1122 av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
1127 link->frame_blocked_in = link->frame_wanted_out = 0;
1128 link->frame_count_in++;
1129 filter_unblock(link->dst);
1130 ret = ff_framequeue_add(&link->fifo, frame);
1132 av_frame_free(&frame);
1135 ff_filter_set_ready(link->dst, 300);
1139 av_frame_free(&frame);
1140 return AVERROR_PATCHWELCOME;
1143 static int samples_ready(AVFilterLink *link, unsigned min)
1145 return ff_framequeue_queued_frames(&link->fifo) &&
1146 (ff_framequeue_queued_samples(&link->fifo) >= min ||
1150 static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
1153 AVFrame *frame0, *frame, *buf;
1154 unsigned nb_samples, nb_frames, i, p;
1157 /* Note: this function relies on no format changes and must only be
1158 called with enough samples. */
1159 av_assert1(samples_ready(link, link->min_samples));
1160 frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
1161 if (!link->fifo.samples_skipped && frame->nb_samples >= min && frame->nb_samples <= max) {
1162 *rframe = ff_framequeue_take(&link->fifo);
1168 if (nb_samples + frame->nb_samples > max) {
1169 if (nb_samples < min)
1173 nb_samples += frame->nb_samples;
1175 if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
1177 frame = ff_framequeue_peek(&link->fifo, nb_frames);
1180 buf = ff_get_audio_buffer(link, nb_samples);
1182 return AVERROR(ENOMEM);
1183 ret = av_frame_copy_props(buf, frame0);
1185 av_frame_free(&buf);
1188 buf->pts = frame0->pts;
1191 for (i = 0; i < nb_frames; i++) {
1192 frame = ff_framequeue_take(&link->fifo);
1193 av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
1194 frame->nb_samples, link->channels, link->format);
1195 p += frame->nb_samples;
1196 av_frame_free(&frame);
1198 if (p < nb_samples) {
1199 unsigned n = nb_samples - p;
1200 frame = ff_framequeue_peek(&link->fifo, 0);
1201 av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
1202 link->channels, link->format);
1203 ff_framequeue_skip_samples(&link->fifo, n, link->time_base);
1210 static int ff_filter_frame_to_filter(AVFilterLink *link)
1212 AVFrame *frame = NULL;
1213 AVFilterContext *dst = link->dst;
1216 av_assert1(ff_framequeue_queued_frames(&link->fifo));
1217 ret = link->min_samples ?
1218 ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
1219 ff_inlink_consume_frame(link, &frame);
1225 /* The filter will soon have received a new frame, that may allow it to
1226 produce one or more: unblock its outputs. */
1227 filter_unblock(dst);
1228 /* AVFilterPad.filter_frame() expect frame_count_out to have the value
1229 before the frame; ff_filter_frame_framed() will re-increment it. */
1230 link->frame_count_out--;
1231 ret = ff_filter_frame_framed(link, frame);
1232 if (ret < 0 && ret != link->status_out) {
1233 ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
1235 /* Run once again, to see if several frames were available, or if
1236 the input status has also changed, or any other reason. */
1237 ff_filter_set_ready(dst, 300);
1242 static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
1244 unsigned out = 0, progress = 0;
1247 av_assert0(!in->status_out);
1248 if (!filter->nb_outputs) {
1249 /* not necessary with the current API and sinks */
1252 while (!in->status_out) {
1253 if (!filter->outputs[out]->status_in) {
1255 ret = ff_request_frame_to_filter(filter->outputs[out]);
1259 if (++out == filter->nb_outputs) {
1261 /* Every output already closed: input no longer interesting
1262 (example: overlay in shortest mode, other input closed). */
1263 ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
1270 ff_filter_set_ready(filter, 200);
1274 static int ff_filter_activate_default(AVFilterContext *filter)
1278 for (i = 0; i < filter->nb_inputs; i++) {
1279 if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) {
1280 return ff_filter_frame_to_filter(filter->inputs[i]);
1283 for (i = 0; i < filter->nb_inputs; i++) {
1284 if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
1285 av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
1286 return forward_status_change(filter, filter->inputs[i]);
1289 for (i = 0; i < filter->nb_outputs; i++) {
1290 if (filter->outputs[i]->frame_wanted_out &&
1291 !filter->outputs[i]->frame_blocked_in) {
1292 return ff_request_frame_to_filter(filter->outputs[i]);
1295 return FFERROR_NOT_READY;
1299 Filter scheduling and activation
1301 When a filter is activated, it must:
1302 - if possible, output a frame;
1303 - else, if relevant, forward the input status change;
1304 - else, check outputs for wanted frames and forward the requests.
1306 The following AVFilterLink fields are used for activation:
1310 This field indicates if a frame is needed on this input of the
1311 destination filter. A positive value indicates that a frame is needed
1312 to process queued frames or internal data or to satisfy the
1313 application; a zero value indicates that a frame is not especially
1314 needed but could be processed anyway; a negative value indicates that a
1315 frame would just be queued.
1317 It is set by filters using ff_request_frame() or ff_request_no_frame(),
1318 when requested by the application through a specific API or when it is
1319 set on one of the outputs.
1321 It is cleared when a frame is sent from the source using
1324 It is also cleared when a status change is sent from the source using
1325 ff_avfilter_link_set_in_status().
1329 This field means that the source filter can not generate a frame as is.
1330 Its goal is to avoid repeatedly calling the request_frame() method on
1333 It is set by the framework on all outputs of a filter before activating it.
1335 It is automatically cleared by ff_filter_frame().
1337 It is also automatically cleared by ff_avfilter_link_set_in_status().
1339 It is also cleared on all outputs (using filter_unblock()) when
1340 something happens on an input: processing a frame or changing the
1345 Contains the frames queued on a filter input. If it contains frames and
1346 frame_wanted_out is not set, then the filter can be activated. If that
1347 result in the filter not able to use these frames, the filter must set
1348 frame_wanted_out to ask for more frames.
1350 - status_in and status_in_pts:
1352 Status (EOF or error code) of the link and timestamp of the status
1353 change (in link time base, same as frames) as seen from the input of
1354 the link. The status change is considered happening after the frames
1357 It is set by the source filter using ff_avfilter_link_set_in_status().
1361 Status of the link as seen from the output of the link. The status
1362 change is considered having already happened.
1364 It is set by the destination filter using
1365 ff_avfilter_link_set_out_status().
1367 Filters are activated according to the ready field, set using the
1368 ff_filter_set_ready(). Eventually, a priority queue will be used.
1369 ff_filter_set_ready() is called whenever anything could cause progress to
1370 be possible. Marking a filter ready when it is not is not a problem,
1371 except for the small overhead it causes.
1373 Conditions that cause a filter to be marked ready are:
1375 - frames added on an input link;
1377 - changes in the input or output status of an input link;
1379 - requests for a frame on an output link;
1381 - after any actual processing using the legacy methods (filter_frame(),
1382 and request_frame() to acknowledge status changes), to run once more
1383 and check if enough input was present for several frames.
1385 Examples of scenarios to consider:
1387 - buffersrc: activate if frame_wanted_out to notify the application;
1388 activate when the application adds a frame to push it immediately.
1390 - testsrc: activate only if frame_wanted_out to produce and push a frame.
1392 - concat (not at stitch points): can process a frame on any output.
1393 Activate if frame_wanted_out on output to forward on the corresponding
1394 input. Activate when a frame is present on input to process it
1397 - framesync: needs at least one frame on each input; extra frames on the
1398 wrong input will accumulate. When a frame is first added on one input,
1399 set frame_wanted_out<0 on it to avoid getting more (would trigger
1400 testsrc) and frame_wanted_out>0 on the other to allow processing it.
1402 Activation of old filters:
1404 In order to activate a filter implementing the legacy filter_frame() and
1405 request_frame() methods, perform the first possible of the following
1408 - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
1409 frame and call filter_frame().
1411 Rationale: filter frames as soon as possible instead of leaving them
1412 queued; frame_wanted_out < 0 is not possible since the old API does not
1413 set it nor provides any similar feedback; frame_wanted_out > 0 happens
1414 when min_samples > 0 and there are not enough samples queued.
1416 - If an input has status_in set but not status_out, try to call
1417 request_frame() on one of the outputs in the hope that it will trigger
1418 request_frame() on the input with status_in and acknowledge it. This is
1419 awkward and fragile, filters with several inputs or outputs should be
1420 updated to direct activation as soon as possible.
1422 - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
1425 Rationale: checking frame_blocked_in is necessary to avoid requesting
1426 repeatedly on a blocked input if another is not blocked (example:
1427 [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
1430 int ff_filter_activate(AVFilterContext *filter)
1434 /* Generic timeline support is not yet implemented but should be easy */
1435 av_assert1(!(filter->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC &&
1436 filter->filter->activate));
1438 ret = filter->filter->activate ? filter->filter->activate(filter) :
1439 ff_filter_activate_default(filter);
1440 if (ret == FFERROR_NOT_READY)
1445 int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
1447 *rpts = link->current_pts;
1448 if (ff_framequeue_queued_frames(&link->fifo))
1449 return *rstatus = 0;
1450 if (link->status_out)
1451 return *rstatus = link->status_out;
1452 if (!link->status_in)
1453 return *rstatus = 0;
1454 *rstatus = link->status_out = link->status_in;
1455 ff_update_link_current_pts(link, link->status_in_pts);
1456 *rpts = link->current_pts;
1460 size_t ff_inlink_queued_frames(AVFilterLink *link)
1462 return ff_framequeue_queued_frames(&link->fifo);
1465 int ff_inlink_check_available_frame(AVFilterLink *link)
1467 return ff_framequeue_queued_frames(&link->fifo) > 0;
1470 int ff_inlink_queued_samples(AVFilterLink *link)
1472 return ff_framequeue_queued_samples(&link->fifo);
1475 int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
1477 uint64_t samples = ff_framequeue_queued_samples(&link->fifo);
1479 return samples >= min || (link->status_in && samples);
1482 static void consume_update(AVFilterLink *link, const AVFrame *frame)
1484 ff_update_link_current_pts(link, frame->pts);
1485 ff_inlink_process_commands(link, frame);
1486 link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1487 link->frame_count_out++;
1490 int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
1495 if (!ff_inlink_check_available_frame(link))
1498 if (link->fifo.samples_skipped) {
1499 frame = ff_framequeue_peek(&link->fifo, 0);
1500 return ff_inlink_consume_samples(link, frame->nb_samples, frame->nb_samples, rframe);
1503 frame = ff_framequeue_take(&link->fifo);
1504 consume_update(link, frame);
1509 int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max,
1517 if (!ff_inlink_check_available_samples(link, min))
1519 if (link->status_in)
1520 min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
1521 ret = take_samples(link, min, max, &frame);
1524 consume_update(link, frame);
1529 AVFrame *ff_inlink_peek_frame(AVFilterLink *link, size_t idx)
1531 return ff_framequeue_peek(&link->fifo, idx);
1534 int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
1536 AVFrame *frame = *rframe;
1540 if (av_frame_is_writable(frame))
1542 av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
1544 switch (link->type) {
1545 case AVMEDIA_TYPE_VIDEO:
1546 out = ff_get_video_buffer(link, link->w, link->h);
1548 case AVMEDIA_TYPE_AUDIO:
1549 out = ff_get_audio_buffer(link, frame->nb_samples);
1552 return AVERROR(EINVAL);
1555 return AVERROR(ENOMEM);
1557 ret = av_frame_copy_props(out, frame);
1559 av_frame_free(&out);
1563 switch (link->type) {
1564 case AVMEDIA_TYPE_VIDEO:
1565 av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
1566 frame->format, frame->width, frame->height);
1568 case AVMEDIA_TYPE_AUDIO:
1569 av_samples_copy(out->extended_data, frame->extended_data,
1570 0, 0, frame->nb_samples,
1575 av_assert0(!"reached");
1578 av_frame_free(&frame);
1583 int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
1585 AVFilterCommand *cmd = link->dst->command_queue;
1587 while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
1588 av_log(link->dst, AV_LOG_DEBUG,
1589 "Processing command time:%f command:%s arg:%s\n",
1590 cmd->time, cmd->command, cmd->arg);
1591 avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
1592 ff_command_queue_pop(link->dst);
1593 cmd= link->dst->command_queue;
1598 int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
1600 AVFilterContext *dstctx = link->dst;
1601 int64_t pts = frame->pts;
1602 int64_t pos = frame->pkt_pos;
1604 if (!dstctx->enable_str)
1607 dstctx->var_values[VAR_N] = link->frame_count_out;
1608 dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
1609 dstctx->var_values[VAR_W] = link->w;
1610 dstctx->var_values[VAR_H] = link->h;
1611 dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1613 return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5;
1616 void ff_inlink_request_frame(AVFilterLink *link)
1618 av_assert1(!link->status_in);
1619 av_assert1(!link->status_out);
1620 link->frame_wanted_out = 1;
1621 ff_filter_set_ready(link->src, 100);
1624 void ff_inlink_set_status(AVFilterLink *link, int status)
1626 if (link->status_out)
1628 link->frame_wanted_out = 0;
1629 link->frame_blocked_in = 0;
1630 ff_avfilter_link_set_out_status(link, status, AV_NOPTS_VALUE);
1631 while (ff_framequeue_queued_frames(&link->fifo)) {
1632 AVFrame *frame = ff_framequeue_take(&link->fifo);
1633 av_frame_free(&frame);
1635 if (!link->status_in)
1636 link->status_in = status;
1639 int ff_outlink_get_status(AVFilterLink *link)
1641 return link->status_in;
1644 const AVClass *avfilter_get_class(void)
1646 return &avfilter_class;
1649 int ff_filter_init_hw_frames(AVFilterContext *avctx, AVFilterLink *link,
1650 int default_pool_size)
1652 AVHWFramesContext *frames;
1654 // Must already be set by caller.
1655 av_assert0(link->hw_frames_ctx);
1657 frames = (AVHWFramesContext*)link->hw_frames_ctx->data;
1659 if (frames->initial_pool_size == 0) {
1660 // Dynamic allocation is necessarily supported.
1661 } else if (avctx->extra_hw_frames >= 0) {
1662 frames->initial_pool_size += avctx->extra_hw_frames;
1664 frames->initial_pool_size = default_pool_size;