]> git.sesse.net Git - ffmpeg/blobdiff - libavfilter/avfilter.c
Merge commit 'e48746deec48e9ff195841bc3266b4e153a878cd'
[ffmpeg] / libavfilter / avfilter.c
index 21f8d9e00df4848d933f06bcdac7de0703815f79..662f93304135b0906839e78f306efc62530c732f 100644 (file)
@@ -316,14 +316,6 @@ int avfilter_config_links(AVFilterContext *filter)
                     link->time_base = (AVRational) {1, link->sample_rate};
             }
 
-            if ((config_link = link->dstpad->config_props))
-                if ((ret = config_link(link)) < 0) {
-                    av_log(link->dst, AV_LOG_ERROR,
-                           "Failed to configure input pad on %s\n",
-                           link->dst->name);
-                    return ret;
-                }
-
             if (link->src->nb_inputs && link->src->inputs[0]->hw_frames_ctx &&
                 !link->hw_frames_ctx) {
                 AVHWFramesContext *input_ctx = (AVHWFramesContext*)link->src->inputs[0]->hw_frames_ctx->data;
@@ -335,6 +327,14 @@ int avfilter_config_links(AVFilterContext *filter)
                 }
             }
 
+            if ((config_link = link->dstpad->config_props))
+                if ((ret = config_link(link)) < 0) {
+                    av_log(link->dst, AV_LOG_ERROR,
+                           "Failed to configure input pad on %s\n",
+                           link->dst->name);
+                    return ret;
+                }
+
             link->init_state = AVLINK_INIT;
         }
     }
@@ -616,6 +616,8 @@ static const AVOption avfilter_options[] = {
         { .i64 = AVFILTER_THREAD_SLICE }, 0, INT_MAX, FLAGS, "thread_type" },
         { "slice", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AVFILTER_THREAD_SLICE }, .unit = "thread_type" },
     { "enable", "set enable expression", OFFSET(enable_str), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
+    { "threads", "Allowed number of threads", OFFSET(nb_threads), AV_OPT_TYPE_INT,
+        { .i64 = 0 }, 0, INT_MAX, FLAGS },
     { NULL },
 };
 
@@ -781,6 +783,13 @@ void avfilter_free(AVFilterContext *filter)
     av_free(filter);
 }
 
+int ff_filter_get_nb_threads(AVFilterContext *ctx)
+{
+     if (ctx->nb_threads > 0)
+         return FFMIN(ctx->nb_threads, ctx->graph->nb_threads);
+     return ctx->graph->nb_threads;
+}
+
 static int process_options(AVFilterContext *ctx, AVDictionary **options,
                            const char *args)
 {
@@ -1111,7 +1120,7 @@ static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
     pts = out->pts;
     if (dstctx->enable_str) {
         int64_t pos = av_frame_get_pkt_pos(out);
-        dstctx->var_values[VAR_N] = link->frame_count;
+        dstctx->var_values[VAR_N] = link->frame_count_out;
         dstctx->var_values[VAR_T] = pts == AV_NOPTS_VALUE ? NAN : pts * av_q2d(link->time_base);
         dstctx->var_values[VAR_W] = link->w;
         dstctx->var_values[VAR_H] = link->h;
@@ -1123,7 +1132,7 @@ static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
             filter_frame = default_filter_frame;
     }
     ret = filter_frame(link, out);
-    link->frame_count++;
+    link->frame_count_out++;
     ff_update_link_current_pts(link, pts);
     return ret;
 
@@ -1212,6 +1221,7 @@ int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
     }
 
     link->frame_wanted_out = 0;
+    link->frame_count_in++;
     /* Go directly to actual filtering if possible */
     if (link->type == AVMEDIA_TYPE_AUDIO &&
         link->min_samples &&