3 * Copyright (c) 2007 Bobby Bingham
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavutil/avassert.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/buffer.h"
25 #include "libavutil/channel_layout.h"
26 #include "libavutil/common.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/hwcontext.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/rational.h"
34 #include "libavutil/samplefmt.h"
35 #include "libavutil/thread.h"
37 #define FF_INTERNAL_FIELDS 1
38 #include "framequeue.h"
46 #include "libavutil/ffversion.h"
47 const char av_filter_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
49 void ff_tlog_ref(void *ctx, AVFrame *ref, int end)
51 av_unused char buf[16];
53 "ref[%p buf:%p data:%p linesize[%d, %d, %d, %d] pts:%"PRId64" pos:%"PRId64,
54 ref, ref->buf, ref->data[0],
55 ref->linesize[0], ref->linesize[1], ref->linesize[2], ref->linesize[3],
56 ref->pts, ref->pkt_pos);
59 ff_tlog(ctx, " a:%d/%d s:%dx%d i:%c iskey:%d type:%c",
60 ref->sample_aspect_ratio.num, ref->sample_aspect_ratio.den,
61 ref->width, ref->height,
62 !ref->interlaced_frame ? 'P' : /* Progressive */
63 ref->top_field_first ? 'T' : 'B', /* Top / Bottom */
65 av_get_picture_type_char(ref->pict_type));
67 if (ref->nb_samples) {
68 ff_tlog(ctx, " cl:%"PRId64"d n:%d r:%d",
74 ff_tlog(ctx, "]%s", end ? "\n" : "");
77 unsigned avfilter_version(void)
79 av_assert0(LIBAVFILTER_VERSION_MICRO >= 100);
80 return LIBAVFILTER_VERSION_INT;
83 const char *avfilter_configuration(void)
85 return FFMPEG_CONFIGURATION;
88 const char *avfilter_license(void)
90 #define LICENSE_PREFIX "libavfilter license: "
91 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
94 void ff_command_queue_pop(AVFilterContext *filter)
96 AVFilterCommand *c= filter->command_queue;
98 av_freep(&c->command);
99 filter->command_queue= c->next;
103 int ff_insert_pad(unsigned idx, unsigned *count, size_t padidx_off,
104 AVFilterPad **pads, AVFilterLink ***links,
107 AVFilterLink **newlinks;
108 AVFilterPad *newpads;
111 idx = FFMIN(idx, *count);
113 newpads = av_realloc_array(*pads, *count + 1, sizeof(AVFilterPad));
114 newlinks = av_realloc_array(*links, *count + 1, sizeof(AVFilterLink*));
119 if (!newpads || !newlinks)
120 return AVERROR(ENOMEM);
122 memmove(*pads + idx + 1, *pads + idx, sizeof(AVFilterPad) * (*count - idx));
123 memmove(*links + idx + 1, *links + idx, sizeof(AVFilterLink*) * (*count - idx));
124 memcpy(*pads + idx, newpad, sizeof(AVFilterPad));
125 (*links)[idx] = NULL;
128 for (i = idx + 1; i < *count; i++)
130 (*(unsigned *)((uint8_t *) (*links)[i] + padidx_off))++;
135 int avfilter_link(AVFilterContext *src, unsigned srcpad,
136 AVFilterContext *dst, unsigned dstpad)
140 av_assert0(src->graph);
141 av_assert0(dst->graph);
142 av_assert0(src->graph == dst->graph);
144 if (src->nb_outputs <= srcpad || dst->nb_inputs <= dstpad ||
145 src->outputs[srcpad] || dst->inputs[dstpad])
146 return AVERROR(EINVAL);
148 if (src->output_pads[srcpad].type != dst->input_pads[dstpad].type) {
149 av_log(src, AV_LOG_ERROR,
150 "Media type mismatch between the '%s' filter output pad %d (%s) and the '%s' filter input pad %d (%s)\n",
151 src->name, srcpad, (char *)av_x_if_null(av_get_media_type_string(src->output_pads[srcpad].type), "?"),
152 dst->name, dstpad, (char *)av_x_if_null(av_get_media_type_string(dst-> input_pads[dstpad].type), "?"));
153 return AVERROR(EINVAL);
156 link = av_mallocz(sizeof(*link));
158 return AVERROR(ENOMEM);
160 src->outputs[srcpad] = dst->inputs[dstpad] = link;
164 link->srcpad = &src->output_pads[srcpad];
165 link->dstpad = &dst->input_pads[dstpad];
166 link->type = src->output_pads[srcpad].type;
167 av_assert0(AV_PIX_FMT_NONE == -1 && AV_SAMPLE_FMT_NONE == -1);
169 ff_framequeue_init(&link->fifo, &src->graph->internal->frame_queues);
174 void avfilter_link_free(AVFilterLink **link)
179 av_frame_free(&(*link)->partial_buf);
180 ff_framequeue_free(&(*link)->fifo);
181 ff_frame_pool_uninit((FFFramePool**)&(*link)->frame_pool);
186 #if FF_API_FILTER_GET_SET
187 int avfilter_link_get_channels(AVFilterLink *link)
189 return link->channels;
193 void ff_filter_set_ready(AVFilterContext *filter, unsigned priority)
195 filter->ready = FFMAX(filter->ready, priority);
199 * Clear frame_blocked_in on all outputs.
200 * This is necessary whenever something changes on input.
202 static void filter_unblock(AVFilterContext *filter)
206 for (i = 0; i < filter->nb_outputs; i++)
207 filter->outputs[i]->frame_blocked_in = 0;
211 void ff_avfilter_link_set_in_status(AVFilterLink *link, int status, int64_t pts)
213 if (link->status_in == status)
215 av_assert0(!link->status_in);
216 link->status_in = status;
217 link->status_in_pts = pts;
218 link->frame_wanted_out = 0;
219 link->frame_blocked_in = 0;
220 filter_unblock(link->dst);
221 ff_filter_set_ready(link->dst, 200);
224 void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts)
226 av_assert0(!link->frame_wanted_out);
227 av_assert0(!link->status_out);
228 link->status_out = status;
229 if (pts != AV_NOPTS_VALUE)
230 ff_update_link_current_pts(link, pts);
231 filter_unblock(link->dst);
232 ff_filter_set_ready(link->src, 200);
235 void avfilter_link_set_closed(AVFilterLink *link, int closed)
237 ff_avfilter_link_set_out_status(link, closed ? AVERROR_EOF : 0, AV_NOPTS_VALUE);
240 int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
241 unsigned filt_srcpad_idx, unsigned filt_dstpad_idx)
244 unsigned dstpad_idx = link->dstpad - link->dst->input_pads;
246 av_log(link->dst, AV_LOG_VERBOSE, "auto-inserting filter '%s' "
247 "between the filter '%s' and the filter '%s'\n",
248 filt->name, link->src->name, link->dst->name);
250 link->dst->inputs[dstpad_idx] = NULL;
251 if ((ret = avfilter_link(filt, filt_dstpad_idx, link->dst, dstpad_idx)) < 0) {
252 /* failed to link output filter to new filter */
253 link->dst->inputs[dstpad_idx] = link;
257 /* re-hookup the link to the new destination filter we inserted */
259 link->dstpad = &filt->input_pads[filt_srcpad_idx];
260 filt->inputs[filt_srcpad_idx] = link;
262 /* if any information on supported media formats already exists on the
263 * link, we need to preserve that */
264 if (link->out_formats)
265 ff_formats_changeref(&link->out_formats,
266 &filt->outputs[filt_dstpad_idx]->out_formats);
267 if (link->out_samplerates)
268 ff_formats_changeref(&link->out_samplerates,
269 &filt->outputs[filt_dstpad_idx]->out_samplerates);
270 if (link->out_channel_layouts)
271 ff_channel_layouts_changeref(&link->out_channel_layouts,
272 &filt->outputs[filt_dstpad_idx]->out_channel_layouts);
277 int avfilter_config_links(AVFilterContext *filter)
279 int (*config_link)(AVFilterLink *);
283 for (i = 0; i < filter->nb_inputs; i ++) {
284 AVFilterLink *link = filter->inputs[i];
285 AVFilterLink *inlink;
288 if (!link->src || !link->dst) {
289 av_log(filter, AV_LOG_ERROR,
290 "Not all input and output are properly linked (%d).\n", i);
291 return AVERROR(EINVAL);
294 inlink = link->src->nb_inputs ? link->src->inputs[0] : NULL;
296 link->current_pts_us = AV_NOPTS_VALUE;
298 switch (link->init_state) {
301 case AVLINK_STARTINIT:
302 av_log(filter, AV_LOG_INFO, "circular filter chain detected\n");
305 link->init_state = AVLINK_STARTINIT;
307 if ((ret = avfilter_config_links(link->src)) < 0)
310 if (!(config_link = link->srcpad->config_props)) {
311 if (link->src->nb_inputs != 1) {
312 av_log(link->src, AV_LOG_ERROR, "Source filters and filters "
313 "with more than one input "
314 "must set config_props() "
315 "callbacks on all outputs\n");
316 return AVERROR(EINVAL);
318 } else if ((ret = config_link(link)) < 0) {
319 av_log(link->src, AV_LOG_ERROR,
320 "Failed to configure output pad on %s\n",
325 switch (link->type) {
326 case AVMEDIA_TYPE_VIDEO:
327 if (!link->time_base.num && !link->time_base.den)
328 link->time_base = inlink ? inlink->time_base : AV_TIME_BASE_Q;
330 if (!link->sample_aspect_ratio.num && !link->sample_aspect_ratio.den)
331 link->sample_aspect_ratio = inlink ?
332 inlink->sample_aspect_ratio : (AVRational){1,1};
335 if (!link->frame_rate.num && !link->frame_rate.den)
336 link->frame_rate = inlink->frame_rate;
341 } else if (!link->w || !link->h) {
342 av_log(link->src, AV_LOG_ERROR,
343 "Video source filters must set their output link's "
344 "width and height\n");
345 return AVERROR(EINVAL);
349 case AVMEDIA_TYPE_AUDIO:
351 if (!link->time_base.num && !link->time_base.den)
352 link->time_base = inlink->time_base;
355 if (!link->time_base.num && !link->time_base.den)
356 link->time_base = (AVRational) {1, link->sample_rate};
359 if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
360 !(link->src->filter->flags_internal & FF_FILTER_FLAG_HWFRAME_AWARE)) {
361 av_assert0(!link->hw_frames_ctx &&
362 "should not be set by non-hwframe-aware filter");
363 link->hw_frames_ctx = av_buffer_ref(link->src->inputs[0]->hw_frames_ctx);
364 if (!link->hw_frames_ctx)
365 return AVERROR(ENOMEM);
368 if ((config_link = link->dstpad->config_props))
369 if ((ret = config_link(link)) < 0) {
370 av_log(link->dst, AV_LOG_ERROR,
371 "Failed to configure input pad on %s\n",
376 link->init_state = AVLINK_INIT;
383 void ff_tlog_link(void *ctx, AVFilterLink *link, int end)
385 if (link->type == AVMEDIA_TYPE_VIDEO) {
387 "link[%p s:%dx%d fmt:%s %s->%s]%s",
388 link, link->w, link->h,
389 av_get_pix_fmt_name(link->format),
390 link->src ? link->src->filter->name : "",
391 link->dst ? link->dst->filter->name : "",
395 av_get_channel_layout_string(buf, sizeof(buf), -1, link->channel_layout);
398 "link[%p r:%d cl:%s fmt:%s %s->%s]%s",
399 link, (int)link->sample_rate, buf,
400 av_get_sample_fmt_name(link->format),
401 link->src ? link->src->filter->name : "",
402 link->dst ? link->dst->filter->name : "",
407 int ff_request_frame(AVFilterLink *link)
409 FF_TPRINTF_START(NULL, request_frame); ff_tlog_link(NULL, link, 1);
411 av_assert1(!link->dst->filter->activate);
412 if (link->status_out)
413 return link->status_out;
414 if (link->status_in) {
415 if (ff_framequeue_queued_frames(&link->fifo)) {
416 av_assert1(!link->frame_wanted_out);
417 av_assert1(link->dst->ready >= 300);
420 /* Acknowledge status change. Filters using ff_request_frame() will
421 handle the change automatically. Filters can also check the
422 status directly but none do yet. */
423 ff_avfilter_link_set_out_status(link, link->status_in, link->status_in_pts);
424 return link->status_out;
427 link->frame_wanted_out = 1;
428 ff_filter_set_ready(link->src, 100);
432 static int64_t guess_status_pts(AVFilterContext *ctx, int status, AVRational link_time_base)
435 int64_t r = INT64_MAX;
437 for (i = 0; i < ctx->nb_inputs; i++)
438 if (ctx->inputs[i]->status_out == status)
439 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->current_pts, ctx->inputs[i]->time_base, link_time_base));
442 av_log(ctx, AV_LOG_WARNING, "EOF timestamp not reliable\n");
443 for (i = 0; i < ctx->nb_inputs; i++)
444 r = FFMIN(r, av_rescale_q(ctx->inputs[i]->status_in_pts, ctx->inputs[i]->time_base, link_time_base));
447 return AV_NOPTS_VALUE;
450 static int ff_request_frame_to_filter(AVFilterLink *link)
454 FF_TPRINTF_START(NULL, request_frame_to_filter); ff_tlog_link(NULL, link, 1);
455 /* Assume the filter is blocked, let the method clear it if not */
456 link->frame_blocked_in = 1;
457 if (link->srcpad->request_frame)
458 ret = link->srcpad->request_frame(link);
459 else if (link->src->inputs[0])
460 ret = ff_request_frame(link->src->inputs[0]);
462 if (ret != AVERROR(EAGAIN) && ret != link->status_in)
463 ff_avfilter_link_set_in_status(link, ret, guess_status_pts(link->src, ret, link->time_base));
464 if (ret == AVERROR_EOF)
470 int ff_poll_frame(AVFilterLink *link)
472 int i, min = INT_MAX;
474 if (link->srcpad->poll_frame)
475 return link->srcpad->poll_frame(link);
477 for (i = 0; i < link->src->nb_inputs; i++) {
479 if (!link->src->inputs[i])
480 return AVERROR(EINVAL);
481 val = ff_poll_frame(link->src->inputs[i]);
482 min = FFMIN(min, val);
488 static const char *const var_names[] = {
506 static int set_enable_expr(AVFilterContext *ctx, const char *expr)
510 AVExpr *old = ctx->enable;
512 if (!(ctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE)) {
513 av_log(ctx, AV_LOG_ERROR, "Timeline ('enable' option) not supported "
514 "with filter '%s'\n", ctx->filter->name);
515 return AVERROR_PATCHWELCOME;
518 expr_dup = av_strdup(expr);
520 return AVERROR(ENOMEM);
522 if (!ctx->var_values) {
523 ctx->var_values = av_calloc(VAR_VARS_NB, sizeof(*ctx->var_values));
524 if (!ctx->var_values) {
526 return AVERROR(ENOMEM);
530 ret = av_expr_parse((AVExpr**)&ctx->enable, expr_dup, var_names,
531 NULL, NULL, NULL, NULL, 0, ctx->priv);
533 av_log(ctx->priv, AV_LOG_ERROR,
534 "Error when evaluating the expression '%s' for enable\n",
541 av_free(ctx->enable_str);
542 ctx->enable_str = expr_dup;
546 void ff_update_link_current_pts(AVFilterLink *link, int64_t pts)
548 if (pts == AV_NOPTS_VALUE)
550 link->current_pts = pts;
551 link->current_pts_us = av_rescale_q(pts, link->time_base, AV_TIME_BASE_Q);
552 /* TODO use duration */
553 if (link->graph && link->age_index >= 0)
554 ff_avfilter_graph_update_heap(link->graph, link);
557 int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags)
559 if(!strcmp(cmd, "ping")){
560 char local_res[256] = {0};
564 res_len = sizeof(local_res);
566 av_strlcatf(res, res_len, "pong from:%s %s\n", filter->filter->name, filter->name);
567 if (res == local_res)
568 av_log(filter, AV_LOG_INFO, "%s", res);
570 }else if(!strcmp(cmd, "enable")) {
571 return set_enable_expr(filter, arg);
572 }else if(filter->filter->process_command) {
573 return filter->filter->process_command(filter, cmd, arg, res, res_len, flags);
575 return AVERROR(ENOSYS);
578 static AVFilter *first_filter;
579 static AVFilter **last_filter = &first_filter;
581 const AVFilter *avfilter_get_by_name(const char *name)
583 const AVFilter *f = NULL;
588 while ((f = avfilter_next(f)))
589 if (!strcmp(f->name, name))
590 return (AVFilter *)f;
595 static AVMutex filter_register_mutex = AV_MUTEX_INITIALIZER;
597 int avfilter_register(AVFilter *filter)
601 /* the filter must select generic or internal exclusively */
602 av_assert0((filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE) != AVFILTER_FLAG_SUPPORT_TIMELINE);
604 ff_mutex_lock(&filter_register_mutex);
611 last_filter = &filter->next;
613 ff_mutex_unlock(&filter_register_mutex);
618 const AVFilter *avfilter_next(const AVFilter *prev)
620 return prev ? prev->next : first_filter;
623 int avfilter_pad_count(const AVFilterPad *pads)
630 for (count = 0; pads->name; count++)
635 static const char *default_filter_name(void *filter_ctx)
637 AVFilterContext *ctx = filter_ctx;
638 return ctx->name ? ctx->name : ctx->filter->name;
641 static void *filter_child_next(void *obj, void *prev)
643 AVFilterContext *ctx = obj;
644 if (!prev && ctx->filter && ctx->filter->priv_class && ctx->priv)
649 static const AVClass *filter_child_class_next(const AVClass *prev)
651 const AVFilter *f = NULL;
653 /* find the filter that corresponds to prev */
654 while (prev && (f = avfilter_next(f)))
655 if (f->priv_class == prev)
658 /* could not find filter corresponding to prev */
662 /* find next filter with specific options */
663 while ((f = avfilter_next(f)))
665 return f->priv_class;
670 #define OFFSET(x) offsetof(AVFilterContext, x)
671 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM
672 static const AVOption avfilter_options[] = {
673 { "thread_type", "Allowed thread types", OFFSET(thread_type), AV_OPT_TYPE_FLAGS,
674 { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
675 { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .unit = "thread_type" },
676 { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
677 { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
678 { .i64 = 0 }, 0, INT_MAX, FLAGS },
679 { "extra_hw_frames", "Number of extra hardware frames to allocate for the user",
680 OFFSET(extra_hw_frames), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
684 static const AVClass avfilter_class = {
685 .class_name = "AVFilter",
686 .item_name = default_filter_name,
687 .version = LIBAVUTIL_VERSION_INT,
688 .category = AV_CLASS_CATEGORY_FILTER,
689 .child_next = filter_child_next,
690 .child_class_next = filter_child_class_next,
691 .option = avfilter_options,
694 static int default_execute(AVFilterContext *ctx, avfilter_action_func *func, void *arg,
695 int *ret, int nb_jobs)
699 for (i = 0; i < nb_jobs; i++) {
700 int r = func(ctx, arg, i, nb_jobs);
707 AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
709 AVFilterContext *ret;
715 ret = av_mallocz(sizeof(AVFilterContext));
719 ret->av_class = &avfilter_class;
720 ret->filter = filter;
721 ret->name = inst_name ? av_strdup(inst_name) : NULL;
722 if (filter->priv_size) {
723 ret->priv = av_mallocz(filter->priv_size);
727 if (filter->preinit) {
728 if (filter->preinit(ret) < 0)
733 av_opt_set_defaults(ret);
734 if (filter->priv_class) {
735 *(const AVClass**)ret->priv = filter->priv_class;
736 av_opt_set_defaults(ret->priv);
739 ret->internal = av_mallocz(sizeof(*ret->internal));
742 ret->internal->execute = default_execute;
744 ret->nb_inputs = avfilter_pad_count(filter->inputs);
745 if (ret->nb_inputs ) {
746 ret->input_pads = av_malloc_array(ret->nb_inputs, sizeof(AVFilterPad));
747 if (!ret->input_pads)
749 memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
750 ret->inputs = av_mallocz_array(ret->nb_inputs, sizeof(AVFilterLink*));
755 ret->nb_outputs = avfilter_pad_count(filter->outputs);
756 if (ret->nb_outputs) {
757 ret->output_pads = av_malloc_array(ret->nb_outputs, sizeof(AVFilterPad));
758 if (!ret->output_pads)
760 memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
761 ret->outputs = av_mallocz_array(ret->nb_outputs, sizeof(AVFilterLink*));
771 av_freep(&ret->inputs);
772 av_freep(&ret->input_pads);
774 av_freep(&ret->outputs);
775 av_freep(&ret->output_pads);
777 av_freep(&ret->priv);
778 av_freep(&ret->internal);
783 static void free_link(AVFilterLink *link)
789 link->src->outputs[link->srcpad - link->src->output_pads] = NULL;
791 link->dst->inputs[link->dstpad - link->dst->input_pads] = NULL;
793 av_buffer_unref(&link->hw_frames_ctx);
795 ff_formats_unref(&link->in_formats);
796 ff_formats_unref(&link->out_formats);
797 ff_formats_unref(&link->in_samplerates);
798 ff_formats_unref(&link->out_samplerates);
799 ff_channel_layouts_unref(&link->in_channel_layouts);
800 ff_channel_layouts_unref(&link->out_channel_layouts);
801 avfilter_link_free(&link);
804 void avfilter_free(AVFilterContext *filter)
812 ff_filter_graph_remove_filter(filter->graph, filter);
814 if (filter->filter->uninit)
815 filter->filter->uninit(filter);
817 for (i = 0; i < filter->nb_inputs; i++) {
818 free_link(filter->inputs[i]);
820 for (i = 0; i < filter->nb_outputs; i++) {
821 free_link(filter->outputs[i]);
824 if (filter->filter->priv_class)
825 av_opt_free(filter->priv);
827 av_buffer_unref(&filter->hw_device_ctx);
829 av_freep(&filter->name);
830 av_freep(&filter->input_pads);
831 av_freep(&filter->output_pads);
832 av_freep(&filter->inputs);
833 av_freep(&filter->outputs);
834 av_freep(&filter->priv);
835 while(filter->command_queue){
836 ff_command_queue_pop(filter);
839 av_expr_free(filter->enable);
840 filter->enable = NULL;
841 av_freep(&filter->var_values);
842 av_freep(&filter->internal);
846 int ff_filter_get_nb_threads(AVFilterContext *ctx)
848 if (ctx->nb_threads > 0)
849 return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
850 return ctx->graph->nb_threads;
853 static int process_options(AVFilterContext *ctx, AVDictionary **options,
856 const AVOption *o = NULL;
858 char *av_uninit(parsed_key), *av_uninit(value);
866 const char *shorthand = NULL;
868 o = av_opt_next(ctx->priv, o);
870 if (o->type == AV_OPT_TYPE_CONST || o->offset == offset)
876 ret = av_opt_get_key_value(&args, "=", ":",
877 shorthand ? AV_OPT_FLAG_IMPLICIT_KEY : 0,
878 &parsed_key, &value);
880 if (ret == AVERROR(EINVAL))
881 av_log(ctx, AV_LOG_ERROR, "No option name near '%s'\n", args);
883 av_log(ctx, AV_LOG_ERROR, "Unable to parse '%s': %s\n", args,
891 while ((o = av_opt_next(ctx->priv, o))); /* discard all remaining shorthand */
896 av_log(ctx, AV_LOG_DEBUG, "Setting '%s' to value '%s'\n", key, value);
898 if (av_opt_find(ctx, key, NULL, 0, 0)) {
899 ret = av_opt_set(ctx, key, value, 0);
906 av_dict_set(options, key, value, 0);
907 if ((ret = av_opt_set(ctx->priv, key, value, AV_OPT_SEARCH_CHILDREN)) < 0) {
908 if (!av_opt_find(ctx->priv, key, NULL, 0, AV_OPT_SEARCH_CHILDREN | AV_OPT_SEARCH_FAKE_OBJ)) {
909 if (ret == AVERROR_OPTION_NOT_FOUND)
910 av_log(ctx, AV_LOG_ERROR, "Option '%s' not found\n", key);
923 if (ctx->enable_str) {
924 ret = set_enable_expr(ctx, ctx->enable_str);
931 int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options)
935 ret = av_opt_set_dict(ctx, options);
937 av_log(ctx, AV_LOG_ERROR, "Error applying generic filter options.\n");
941 if (ctx->filter->flags & AVFILTER_FLAG_SLICE_THREADS &&
942 ctx->thread_type & ctx->graph->thread_type & AVFILTER_THREAD_SLICE &&
943 ctx->graph->internal->thread_execute) {
944 ctx->thread_type = AVFILTER_THREAD_SLICE;
945 ctx->internal->execute = ctx->graph->internal->thread_execute;
947 ctx->thread_type = 0;
950 if (ctx->filter->priv_class) {
951 ret = av_opt_set_dict2(ctx->priv, options, AV_OPT_SEARCH_CHILDREN);
953 av_log(ctx, AV_LOG_ERROR, "Error applying options to the filter.\n");
958 if (ctx->filter->init_opaque)
959 ret = ctx->filter->init_opaque(ctx, NULL);
960 else if (ctx->filter->init)
961 ret = ctx->filter->init(ctx);
962 else if (ctx->filter->init_dict)
963 ret = ctx->filter->init_dict(ctx, options);
968 int avfilter_init_str(AVFilterContext *filter, const char *args)
970 AVDictionary *options = NULL;
971 AVDictionaryEntry *e;
975 if (!filter->filter->priv_class) {
976 av_log(filter, AV_LOG_ERROR, "This filter does not take any "
977 "options, but options were provided: %s.\n", args);
978 return AVERROR(EINVAL);
981 #if FF_API_OLD_FILTER_OPTS_ERROR
982 if ( !strcmp(filter->filter->name, "format") ||
983 !strcmp(filter->filter->name, "noformat") ||
984 !strcmp(filter->filter->name, "frei0r") ||
985 !strcmp(filter->filter->name, "frei0r_src") ||
986 !strcmp(filter->filter->name, "ocv") ||
987 !strcmp(filter->filter->name, "pan") ||
988 !strcmp(filter->filter->name, "pp") ||
989 !strcmp(filter->filter->name, "aevalsrc")) {
990 /* a hack for compatibility with the old syntax
991 * replace colons with |s */
992 char *copy = av_strdup(args);
994 int nb_leading = 0; // number of leading colons to skip
998 ret = AVERROR(ENOMEM);
1002 if (!strcmp(filter->filter->name, "frei0r") ||
1003 !strcmp(filter->filter->name, "ocv"))
1005 else if (!strcmp(filter->filter->name, "frei0r_src"))
1008 while (nb_leading--) {
1011 p = copy + strlen(copy);
1017 deprecated = strchr(p, ':') != NULL;
1019 if (!strcmp(filter->filter->name, "aevalsrc")) {
1021 while ((p = strchr(p, ':')) && p[1] != ':') {
1022 const char *epos = strchr(p + 1, '=');
1023 const char *spos = strchr(p + 1, ':');
1024 const int next_token_is_opt = epos && (!spos || epos < spos);
1025 if (next_token_is_opt) {
1029 /* next token does not contain a '=', assume a channel expression */
1033 if (p && *p == ':') { // double sep '::' found
1035 memmove(p, p + 1, strlen(p));
1038 while ((p = strchr(p, ':')))
1042 av_log(filter, AV_LOG_ERROR, "This syntax is deprecated. Use "
1043 "'|' to separate the list items ('%s' instead of '%s')\n",
1045 ret = AVERROR(EINVAL);
1047 ret = process_options(filter, &options, copy);
1056 ret = process_options(filter, &options, args);
1062 ret = avfilter_init_dict(filter, &options);
1066 if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
1067 av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
1068 ret = AVERROR_OPTION_NOT_FOUND;
1073 av_dict_free(&options);
1078 const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx)
1080 return pads[pad_idx].name;
1083 enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx)
1085 return pads[pad_idx].type;
1088 static int default_filter_frame(AVFilterLink *link, AVFrame *frame)
1090 return ff_filter_frame(link->dst->outputs[0], frame);
1093 static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
1095 int (*filter_frame)(AVFilterLink *, AVFrame *);
1096 AVFilterContext *dstctx = link->dst;
1097 AVFilterPad *dst = link->dstpad;
1100 if (!(filter_frame = dst->filter_frame))
1101 filter_frame = default_filter_frame;
1103 if (dst->needs_writable) {
1104 ret = ff_inlink_make_frame_writable(link, &frame);
1109 ff_inlink_process_commands(link, frame);
1110 dstctx->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1112 if (dstctx->is_disabled &&
1113 (dstctx->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC))
1114 filter_frame = default_filter_frame;
1115 ret = filter_frame(link, frame);
1116 link->frame_count_out++;
1120 av_frame_free(&frame);
1124 int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
1127 FF_TPRINTF_START(NULL, filter_frame); ff_tlog_link(NULL, link, 1); ff_tlog(NULL, " "); ff_tlog_ref(NULL, frame, 1);
1129 /* Consistency checks */
1130 if (link->type == AVMEDIA_TYPE_VIDEO) {
1131 if (strcmp(link->dst->filter->name, "buffersink") &&
1132 strcmp(link->dst->filter->name, "format") &&
1133 strcmp(link->dst->filter->name, "idet") &&
1134 strcmp(link->dst->filter->name, "null") &&
1135 strcmp(link->dst->filter->name, "scale")) {
1136 av_assert1(frame->format == link->format);
1137 av_assert1(frame->width == link->w);
1138 av_assert1(frame->height == link->h);
1141 if (frame->format != link->format) {
1142 av_log(link->dst, AV_LOG_ERROR, "Format change is not supported\n");
1145 if (frame->channels != link->channels) {
1146 av_log(link->dst, AV_LOG_ERROR, "Channel count change is not supported\n");
1149 if (frame->channel_layout != link->channel_layout) {
1150 av_log(link->dst, AV_LOG_ERROR, "Channel layout change is not supported\n");
1153 if (frame->sample_rate != link->sample_rate) {
1154 av_log(link->dst, AV_LOG_ERROR, "Sample rate change is not supported\n");
1159 link->frame_blocked_in = link->frame_wanted_out = 0;
1160 link->frame_count_in++;
1161 filter_unblock(link->dst);
1162 ret = ff_framequeue_add(&link->fifo, frame);
1164 av_frame_free(&frame);
1167 ff_filter_set_ready(link->dst, 300);
1171 av_frame_free(&frame);
1172 return AVERROR_PATCHWELCOME;
1175 static int samples_ready(AVFilterLink *link, unsigned min)
1177 return ff_framequeue_queued_frames(&link->fifo) &&
1178 (ff_framequeue_queued_samples(&link->fifo) >= min ||
1182 static int take_samples(AVFilterLink *link, unsigned min, unsigned max,
1185 AVFrame *frame0, *frame, *buf;
1186 unsigned nb_samples, nb_frames, i, p;
1189 /* Note: this function relies on no format changes and must only be
1190 called with enough samples. */
1191 av_assert1(samples_ready(link, link->min_samples));
1192 frame0 = frame = ff_framequeue_peek(&link->fifo, 0);
1193 if (!link->fifo.samples_skipped && frame->nb_samples >= min && frame->nb_samples <= max) {
1194 *rframe = ff_framequeue_take(&link->fifo);
1200 if (nb_samples + frame->nb_samples > max) {
1201 if (nb_samples < min)
1205 nb_samples += frame->nb_samples;
1207 if (nb_frames == ff_framequeue_queued_frames(&link->fifo))
1209 frame = ff_framequeue_peek(&link->fifo, nb_frames);
1212 buf = ff_get_audio_buffer(link, nb_samples);
1214 return AVERROR(ENOMEM);
1215 ret = av_frame_copy_props(buf, frame0);
1217 av_frame_free(&buf);
1220 buf->pts = frame0->pts;
1223 for (i = 0; i < nb_frames; i++) {
1224 frame = ff_framequeue_take(&link->fifo);
1225 av_samples_copy(buf->extended_data, frame->extended_data, p, 0,
1226 frame->nb_samples, link->channels, link->format);
1227 p += frame->nb_samples;
1228 av_frame_free(&frame);
1230 if (p < nb_samples) {
1231 unsigned n = nb_samples - p;
1232 frame = ff_framequeue_peek(&link->fifo, 0);
1233 av_samples_copy(buf->extended_data, frame->extended_data, p, 0, n,
1234 link->channels, link->format);
1235 ff_framequeue_skip_samples(&link->fifo, n, link->time_base);
1242 static int ff_filter_frame_to_filter(AVFilterLink *link)
1244 AVFrame *frame = NULL;
1245 AVFilterContext *dst = link->dst;
1248 av_assert1(ff_framequeue_queued_frames(&link->fifo));
1249 ret = link->min_samples ?
1250 ff_inlink_consume_samples(link, link->min_samples, link->max_samples, &frame) :
1251 ff_inlink_consume_frame(link, &frame);
1257 /* The filter will soon have received a new frame, that may allow it to
1258 produce one or more: unblock its outputs. */
1259 filter_unblock(dst);
1260 /* AVFilterPad.filter_frame() expect frame_count_out to have the value
1261 before the frame; ff_filter_frame_framed() will re-increment it. */
1262 link->frame_count_out--;
1263 ret = ff_filter_frame_framed(link, frame);
1264 if (ret < 0 && ret != link->status_out) {
1265 ff_avfilter_link_set_out_status(link, ret, AV_NOPTS_VALUE);
1267 /* Run once again, to see if several frames were available, or if
1268 the input status has also changed, or any other reason. */
1269 ff_filter_set_ready(dst, 300);
1274 static int forward_status_change(AVFilterContext *filter, AVFilterLink *in)
1276 unsigned out = 0, progress = 0;
1279 av_assert0(!in->status_out);
1280 if (!filter->nb_outputs) {
1281 /* not necessary with the current API and sinks */
1284 while (!in->status_out) {
1285 if (!filter->outputs[out]->status_in) {
1287 ret = ff_request_frame_to_filter(filter->outputs[out]);
1291 if (++out == filter->nb_outputs) {
1293 /* Every output already closed: input no longer interesting
1294 (example: overlay in shortest mode, other input closed). */
1295 ff_avfilter_link_set_out_status(in, in->status_in, in->status_in_pts);
1302 ff_filter_set_ready(filter, 200);
1306 static int ff_filter_activate_default(AVFilterContext *filter)
1310 for (i = 0; i < filter->nb_inputs; i++) {
1311 if (samples_ready(filter->inputs[i], filter->inputs[i]->min_samples)) {
1312 return ff_filter_frame_to_filter(filter->inputs[i]);
1315 for (i = 0; i < filter->nb_inputs; i++) {
1316 if (filter->inputs[i]->status_in && !filter->inputs[i]->status_out) {
1317 av_assert1(!ff_framequeue_queued_frames(&filter->inputs[i]->fifo));
1318 return forward_status_change(filter, filter->inputs[i]);
1321 for (i = 0; i < filter->nb_outputs; i++) {
1322 if (filter->outputs[i]->frame_wanted_out &&
1323 !filter->outputs[i]->frame_blocked_in) {
1324 return ff_request_frame_to_filter(filter->outputs[i]);
1327 return FFERROR_NOT_READY;
1331 Filter scheduling and activation
1333 When a filter is activated, it must:
1334 - if possible, output a frame;
1335 - else, if relevant, forward the input status change;
1336 - else, check outputs for wanted frames and forward the requests.
1338 The following AVFilterLink fields are used for activation:
1342 This field indicates if a frame is needed on this input of the
1343 destination filter. A positive value indicates that a frame is needed
1344 to process queued frames or internal data or to satisfy the
1345 application; a zero value indicates that a frame is not especially
1346 needed but could be processed anyway; a negative value indicates that a
1347 frame would just be queued.
1349 It is set by filters using ff_request_frame() or ff_request_no_frame(),
1350 when requested by the application through a specific API or when it is
1351 set on one of the outputs.
1353 It is cleared when a frame is sent from the source using
1356 It is also cleared when a status change is sent from the source using
1357 ff_avfilter_link_set_in_status().
1361 This field means that the source filter can not generate a frame as is.
1362 Its goal is to avoid repeatedly calling the request_frame() method on
1365 It is set by the framework on all outputs of a filter before activating it.
1367 It is automatically cleared by ff_filter_frame().
1369 It is also automatically cleared by ff_avfilter_link_set_in_status().
1371 It is also cleared on all outputs (using filter_unblock()) when
1372 something happens on an input: processing a frame or changing the
1377 Contains the frames queued on a filter input. If it contains frames and
1378 frame_wanted_out is not set, then the filter can be activated. If that
1379 result in the filter not able to use these frames, the filter must set
1380 frame_wanted_out to ask for more frames.
1382 - status_in and status_in_pts:
1384 Status (EOF or error code) of the link and timestamp of the status
1385 change (in link time base, same as frames) as seen from the input of
1386 the link. The status change is considered happening after the frames
1389 It is set by the source filter using ff_avfilter_link_set_in_status().
1393 Status of the link as seen from the output of the link. The status
1394 change is considered having already happened.
1396 It is set by the destination filter using
1397 ff_avfilter_link_set_out_status().
1399 Filters are activated according to the ready field, set using the
1400 ff_filter_set_ready(). Eventually, a priority queue will be used.
1401 ff_filter_set_ready() is called whenever anything could cause progress to
1402 be possible. Marking a filter ready when it is not is not a problem,
1403 except for the small overhead it causes.
1405 Conditions that cause a filter to be marked ready are:
1407 - frames added on an input link;
1409 - changes in the input or output status of an input link;
1411 - requests for a frame on an output link;
1413 - after any actual processing using the legacy methods (filter_frame(),
1414 and request_frame() to acknowledge status changes), to run once more
1415 and check if enough input was present for several frames.
1417 Exemples of scenarios to consider:
1419 - buffersrc: activate if frame_wanted_out to notify the application;
1420 activate when the application adds a frame to push it immediately.
1422 - testsrc: activate only if frame_wanted_out to produce and push a frame.
1424 - concat (not at stitch points): can process a frame on any output.
1425 Activate if frame_wanted_out on output to forward on the corresponding
1426 input. Activate when a frame is present on input to process it
1429 - framesync: needs at least one frame on each input; extra frames on the
1430 wrong input will accumulate. When a frame is first added on one input,
1431 set frame_wanted_out<0 on it to avoid getting more (would trigger
1432 testsrc) and frame_wanted_out>0 on the other to allow processing it.
1434 Activation of old filters:
1436 In order to activate a filter implementing the legacy filter_frame() and
1437 request_frame() methods, perform the first possible of the following
1440 - If an input has frames in fifo and frame_wanted_out == 0, dequeue a
1441 frame and call filter_frame().
1443 Ratinale: filter frames as soon as possible instead of leaving them
1444 queued; frame_wanted_out < 0 is not possible since the old API does not
1445 set it nor provides any similar feedback; frame_wanted_out > 0 happens
1446 when min_samples > 0 and there are not enough samples queued.
1448 - If an input has status_in set but not status_out, try to call
1449 request_frame() on one of the outputs in the hope that it will trigger
1450 request_frame() on the input with status_in and acknowledge it. This is
1451 awkward and fragile, filters with several inputs or outputs should be
1452 updated to direct activation as soon as possible.
1454 - If an output has frame_wanted_out > 0 and not frame_blocked_in, call
1457 Rationale: checking frame_blocked_in is necessary to avoid requesting
1458 repeatedly on a blocked input if another is not blocked (example:
1459 [buffersrc1][testsrc1][buffersrc2][testsrc2]concat=v=2).
1461 TODO: respect needs_fifo and remove auto-inserted fifos.
1465 int ff_filter_activate(AVFilterContext *filter)
1469 /* Generic timeline support is not yet implemented but should be easy */
1470 av_assert1(!(filter->filter->flags & AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC &&
1471 filter->filter->activate));
1473 ret = filter->filter->activate ? filter->filter->activate(filter) :
1474 ff_filter_activate_default(filter);
1475 if (ret == FFERROR_NOT_READY)
1480 int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
1482 *rpts = link->current_pts;
1483 if (ff_framequeue_queued_frames(&link->fifo))
1484 return *rstatus = 0;
1485 if (link->status_out)
1486 return *rstatus = link->status_out;
1487 if (!link->status_in)
1488 return *rstatus = 0;
1489 *rstatus = link->status_out = link->status_in;
1490 ff_update_link_current_pts(link, link->status_in_pts);
1491 *rpts = link->current_pts;
1495 int ff_inlink_check_available_frame(AVFilterLink *link)
1497 return ff_framequeue_queued_frames(&link->fifo) > 0;
1500 int ff_inlink_check_available_samples(AVFilterLink *link, unsigned min)
1502 uint64_t samples = ff_framequeue_queued_samples(&link->fifo);
1504 return samples >= min || (link->status_in && samples);
1507 static void consume_update(AVFilterLink *link, const AVFrame *frame)
1509 ff_update_link_current_pts(link, frame->pts);
1510 ff_inlink_process_commands(link, frame);
1511 link->dst->is_disabled = !ff_inlink_evaluate_timeline_at_frame(link, frame);
1512 link->frame_count_out++;
1515 int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
1520 if (!ff_inlink_check_available_frame(link))
1523 if (link->fifo.samples_skipped) {
1524 frame = ff_framequeue_peek(&link->fifo, 0);
1525 return ff_inlink_consume_samples(link, frame->nb_samples, frame->nb_samples, rframe);
1528 frame = ff_framequeue_take(&link->fifo);
1529 consume_update(link, frame);
1534 int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max,
1542 if (!ff_inlink_check_available_samples(link, min))
1544 if (link->status_in)
1545 min = FFMIN(min, ff_framequeue_queued_samples(&link->fifo));
1546 ret = take_samples(link, min, max, &frame);
1549 consume_update(link, frame);
1554 int ff_inlink_make_frame_writable(AVFilterLink *link, AVFrame **rframe)
1556 AVFrame *frame = *rframe;
1560 if (av_frame_is_writable(frame))
1562 av_log(link->dst, AV_LOG_DEBUG, "Copying data in avfilter.\n");
1564 switch (link->type) {
1565 case AVMEDIA_TYPE_VIDEO:
1566 out = ff_get_video_buffer(link, link->w, link->h);
1568 case AVMEDIA_TYPE_AUDIO:
1569 out = ff_get_audio_buffer(link, frame->nb_samples);
1572 return AVERROR(EINVAL);
1575 return AVERROR(ENOMEM);
1577 ret = av_frame_copy_props(out, frame);
1579 av_frame_free(&out);
1583 switch (link->type) {
1584 case AVMEDIA_TYPE_VIDEO:
1585 av_image_copy(out->data, out->linesize, (const uint8_t **)frame->data, frame->linesize,
1586 frame->format, frame->width, frame->height);
1588 case AVMEDIA_TYPE_AUDIO:
1589 av_samples_copy(out->extended_data, frame->extended_data,
1590 0, 0, frame->nb_samples,
1595 av_assert0(!"reached");
1598 av_frame_free(&frame);
1603 int ff_inlink_process_commands(AVFilterLink *link, const AVFrame *frame)
1605 AVFilterCommand *cmd = link->dst->command_queue;
1607 while(cmd && cmd->time <= frame->pts * av_q2d(link->time_base)){
1608 av_log(link->dst, AV_LOG_DEBUG,
1609 "Processing command time:%f command:%s arg:%s\n",
1610 cmd->time, cmd->command, cmd->arg);
1611 avfilter_process_command(link->dst, cmd->command, cmd->arg, 0, 0, cmd->flags);
1612 ff_command_queue_pop(link->dst);
1613 cmd= link->dst->command_queue;
1618 int ff_inlink_evaluate_timeline_at_frame(AVFilterLink *link, const AVFrame *frame)
1620 AVFilterContext *dstctx = link->dst;
1621 int64_t pts = frame->pts;
1622 int64_t pos = frame->pkt_pos;
1624 if (!dstctx->enable_str)
1627 dstctx->var_values[VAR_N] = link->frame_count_out;
1628 dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
1629 dstctx->var_values[VAR_W] = link->w;
1630 dstctx->var_values[VAR_H] = link->h;
1631 dstctx->var_values[VAR_POS] = pos == -1 ? NAN : pos;
1633 return fabs(av_expr_eval(dstctx->enable, dstctx->var_values, NULL)) >= 0.5;
1636 void ff_inlink_request_frame(AVFilterLink *link)
1638 av_assert1(!link->status_in);
1639 av_assert1(!link->status_out);
1640 link->frame_wanted_out = 1;
1641 ff_filter_set_ready(link->src, 100);
1644 void ff_inlink_set_status(AVFilterLink *link, int status)
1646 if (link->status_out)
1648 link->frame_wanted_out = 0;
1649 link->frame_blocked_in = 0;
1650 ff_avfilter_link_set_out_status(link, status, AV_NOPTS_VALUE);
1651 while (ff_framequeue_queued_frames(&link->fifo)) {
1652 AVFrame *frame = ff_framequeue_take(&link->fifo);
1653 av_frame_free(&frame);
1655 if (!link->status_in)
1656 link->status_in = status;
1659 int ff_outlink_get_status(AVFilterLink *link)
1661 return link->status_in;
1664 const AVClass *avfilter_get_class(void)
1666 return &avfilter_class;
1669 int ff_filter_init_hw_frames(AVFilterContext *avctx, AVFilterLink *link,
1670 int default_pool_size)
1672 AVHWFramesContext *frames;
1674 // Must already be set by caller.
1675 av_assert0(link->hw_frames_ctx);
1677 frames = (AVHWFramesContext*)link->hw_frames_ctx->data;
1679 if (frames->initial_pool_size == 0) {
1680 // Dynamic allocation is necessarily supported.
1681 } else if (avctx->extra_hw_frames >= 0) {
1682 frames->initial_pool_size += avctx->extra_hw_frames;
1684 frames->initial_pool_size = default_pool_size;