2 * Copyright (c) 2011 Stefano Sabatini
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * filter for selecting which frame passes in the filterchain
26 #include "libavutil/avstring.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/fifo.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
38 #include "scene_sad.h"
40 static const char *const var_names[] = {
43 "pts", ///< original pts in the file of the frame
44 "start_pts", ///< first PTS in the stream, expressed in TB units
45 "prev_pts", ///< previous frame PTS
46 "prev_selected_pts", ///< previous selected frame PTS
48 "t", ///< timestamp expressed in seconds
49 "start_t", ///< first PTS in the stream, expressed in seconds
50 "prev_t", ///< previous frame time
51 "prev_selected_t", ///< previously selected time
53 "pict_type", ///< the type of picture in the movie
69 "interlace_type", ///< the frame interlace type
74 "consumed_samples_n",///< number of samples consumed by the filter (only audio)
75 "samples_n", ///< number of samples in the current frame (only audio)
76 "sample_rate", ///< sample rate (only audio)
78 "n", ///< frame number (starting from zero)
79 "selected_n", ///< selected frame number (starting from zero)
80 "prev_selected_n", ///< number of the last selected frame
82 "key", ///< tell if the frame is a key frame
83 "pos", ///< original position in the file of the frame
87 "concatdec_select", ///< frame is within the interval set by the concat demuxer
98 VAR_PREV_SELECTED_PTS,
122 VAR_INTERLACE_TYPE_P,
123 VAR_INTERLACE_TYPE_T,
124 VAR_INTERLACE_TYPE_B,
126 VAR_CONSUMED_SAMPLES_N,
139 VAR_CONCATDEC_SELECT,
144 typedef struct SelectContext {
145 const AVClass *class;
148 double var_values[VAR_VARS_NB];
153 int do_scene_detect; ///< 1 if the expression requires scene detection variables, 0 otherwise
154 ff_scene_sad_fn sad; ///< Sum of the absolute difference function (scene detect only)
155 double prev_mafd; ///< previous MAFD (scene detect only)
156 AVFrame *prev_picref; ///< previous frame (scene detect only)
158 int select_out; ///< mark the selected output pad index
162 #define OFFSET(x) offsetof(SelectContext, x)
163 #define DEFINE_OPTIONS(filt_name, FLAGS) \
164 static const AVOption filt_name##_options[] = { \
165 { "expr", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
166 { "e", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
167 { "outputs", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
168 { "n", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
172 static int request_frame(AVFilterLink *outlink);
174 static av_cold int init(AVFilterContext *ctx)
176 SelectContext *select = ctx->priv;
179 if ((ret = av_expr_parse(&select->expr, select->expr_str,
180 var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
181 av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n",
185 select->do_scene_detect = !!strstr(select->expr_str, "scene");
187 for (i = 0; i < select->nb_outputs; i++) {
188 AVFilterPad pad = { 0 };
190 pad.name = av_asprintf("output%d", i);
192 return AVERROR(ENOMEM);
193 pad.type = ctx->filter->inputs[0].type;
194 pad.request_frame = request_frame;
195 if ((ret = ff_insert_outpad(ctx, i, &pad)) < 0) {
204 #define INTERLACE_TYPE_P 0
205 #define INTERLACE_TYPE_T 1
206 #define INTERLACE_TYPE_B 2
208 static int config_input(AVFilterLink *inlink)
210 SelectContext *select = inlink->dst->priv;
211 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
212 int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) &&
213 (desc->flags & AV_PIX_FMT_FLAG_PLANAR) &&
214 desc->nb_components >= 3;
216 select->bitdepth = desc->comp[0].depth;
217 select->nb_planes = is_yuv ? 1 : av_pix_fmt_count_planes(inlink->format);
219 for (int plane = 0; plane < select->nb_planes; plane++) {
220 ptrdiff_t line_size = av_image_get_linesize(inlink->format, inlink->w, plane);
221 int vsub = desc->log2_chroma_h;
223 select->width[plane] = line_size >> (select->bitdepth > 8);
224 select->height[plane] = plane == 1 || plane == 2 ? AV_CEIL_RSHIFT(inlink->h, vsub) : inlink->h;
227 select->var_values[VAR_N] = 0.0;
228 select->var_values[VAR_SELECTED_N] = 0.0;
230 select->var_values[VAR_TB] = av_q2d(inlink->time_base);
232 select->var_values[VAR_PREV_PTS] = NAN;
233 select->var_values[VAR_PREV_SELECTED_PTS] = NAN;
234 select->var_values[VAR_PREV_SELECTED_T] = NAN;
235 select->var_values[VAR_PREV_T] = NAN;
236 select->var_values[VAR_START_PTS] = NAN;
237 select->var_values[VAR_START_T] = NAN;
239 select->var_values[VAR_I] = AV_PICTURE_TYPE_I;
240 select->var_values[VAR_P] = AV_PICTURE_TYPE_P;
241 select->var_values[VAR_B] = AV_PICTURE_TYPE_B;
242 select->var_values[VAR_SI] = AV_PICTURE_TYPE_SI;
243 select->var_values[VAR_SP] = AV_PICTURE_TYPE_SP;
244 select->var_values[VAR_BI] = AV_PICTURE_TYPE_BI;
245 select->var_values[VAR_PICT_TYPE_I] = AV_PICTURE_TYPE_I;
246 select->var_values[VAR_PICT_TYPE_P] = AV_PICTURE_TYPE_P;
247 select->var_values[VAR_PICT_TYPE_B] = AV_PICTURE_TYPE_B;
248 select->var_values[VAR_PICT_TYPE_SI] = AV_PICTURE_TYPE_SI;
249 select->var_values[VAR_PICT_TYPE_SP] = AV_PICTURE_TYPE_SP;
250 select->var_values[VAR_PICT_TYPE_BI] = AV_PICTURE_TYPE_BI;
252 select->var_values[VAR_INTERLACE_TYPE_P] = INTERLACE_TYPE_P;
253 select->var_values[VAR_INTERLACE_TYPE_T] = INTERLACE_TYPE_T;
254 select->var_values[VAR_INTERLACE_TYPE_B] = INTERLACE_TYPE_B;
256 select->var_values[VAR_PICT_TYPE] = NAN;
257 select->var_values[VAR_INTERLACE_TYPE] = NAN;
258 select->var_values[VAR_SCENE] = NAN;
259 select->var_values[VAR_CONSUMED_SAMPLES_N] = NAN;
260 select->var_values[VAR_SAMPLES_N] = NAN;
262 select->var_values[VAR_SAMPLE_RATE] =
263 inlink->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
265 if (CONFIG_SELECT_FILTER && select->do_scene_detect) {
266 select->sad = ff_scene_sad_get_fn(select->bitdepth == 8 ? 8 : 16);
268 return AVERROR(EINVAL);
273 static double get_scene_score(AVFilterContext *ctx, AVFrame *frame)
276 SelectContext *select = ctx->priv;
277 AVFrame *prev_picref = select->prev_picref;
280 frame->height == prev_picref->height &&
281 frame->width == prev_picref->width) {
286 for (int plane = 0; plane < select->nb_planes; plane++) {
288 select->sad(prev_picref->data[plane], prev_picref->linesize[plane],
289 frame->data[plane], frame->linesize[plane],
290 select->width[plane], select->height[plane], &plane_sad);
292 count += select->width[plane] * select->height[plane];
296 mafd = (double)sad / count / (1ULL << (select->bitdepth - 8));
297 diff = fabs(mafd - select->prev_mafd);
298 ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1);
299 select->prev_mafd = mafd;
300 av_frame_free(&prev_picref);
302 select->prev_picref = av_frame_clone(frame);
306 static double get_concatdec_select(AVFrame *frame, int64_t pts)
308 AVDictionary *metadata = frame->metadata;
309 AVDictionaryEntry *start_time_entry = av_dict_get(metadata, "lavf.concatdec.start_time", NULL, 0);
310 AVDictionaryEntry *duration_entry = av_dict_get(metadata, "lavf.concatdec.duration", NULL, 0);
311 if (start_time_entry) {
312 int64_t start_time = strtoll(start_time_entry->value, NULL, 10);
313 if (pts >= start_time) {
314 if (duration_entry) {
315 int64_t duration = strtoll(duration_entry->value, NULL, 10);
316 if (pts < start_time + duration)
328 static void select_frame(AVFilterContext *ctx, AVFrame *frame)
330 SelectContext *select = ctx->priv;
331 AVFilterLink *inlink = ctx->inputs[0];
334 if (isnan(select->var_values[VAR_START_PTS]))
335 select->var_values[VAR_START_PTS] = TS2D(frame->pts);
336 if (isnan(select->var_values[VAR_START_T]))
337 select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);
339 select->var_values[VAR_N ] = inlink->frame_count_out;
340 select->var_values[VAR_PTS] = TS2D(frame->pts);
341 select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
342 select->var_values[VAR_POS] = frame->pkt_pos == -1 ? NAN : frame->pkt_pos;
343 select->var_values[VAR_KEY] = frame->key_frame;
344 select->var_values[VAR_CONCATDEC_SELECT] = get_concatdec_select(frame, av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q));
346 switch (inlink->type) {
347 case AVMEDIA_TYPE_AUDIO:
348 select->var_values[VAR_SAMPLES_N] = frame->nb_samples;
351 case AVMEDIA_TYPE_VIDEO:
352 select->var_values[VAR_INTERLACE_TYPE] =
353 !frame->interlaced_frame ? INTERLACE_TYPE_P :
354 frame->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B;
355 select->var_values[VAR_PICT_TYPE] = frame->pict_type;
356 if (select->do_scene_detect) {
358 select->var_values[VAR_SCENE] = get_scene_score(ctx, frame);
359 // TODO: document metadata
360 snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]);
361 av_dict_set(&frame->metadata, "lavfi.scene_score", buf, 0);
366 select->select = res = av_expr_eval(select->expr, select->var_values, NULL);
367 av_log(inlink->dst, AV_LOG_DEBUG,
368 "n:%f pts:%f t:%f key:%d",
369 select->var_values[VAR_N],
370 select->var_values[VAR_PTS],
371 select->var_values[VAR_T],
374 switch (inlink->type) {
375 case AVMEDIA_TYPE_VIDEO:
376 av_log(inlink->dst, AV_LOG_DEBUG, " interlace_type:%c pict_type:%c scene:%f",
377 (!frame->interlaced_frame) ? 'P' :
378 frame->top_field_first ? 'T' : 'B',
379 av_get_picture_type_char(frame->pict_type),
380 select->var_values[VAR_SCENE]);
382 case AVMEDIA_TYPE_AUDIO:
383 av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%f",
385 select->var_values[VAR_CONSUMED_SAMPLES_N]);
390 select->select_out = -1; /* drop */
391 } else if (isnan(res) || res < 0) {
392 select->select_out = 0; /* first output */
394 select->select_out = FFMIN(ceilf(res)-1, select->nb_outputs-1); /* other outputs */
397 av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f select_out:%d\n", res, select->select_out);
400 select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N];
401 select->var_values[VAR_PREV_SELECTED_PTS] = select->var_values[VAR_PTS];
402 select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T];
403 select->var_values[VAR_SELECTED_N] += 1.0;
404 if (inlink->type == AVMEDIA_TYPE_AUDIO)
405 select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples;
408 select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS];
409 select->var_values[VAR_PREV_T] = select->var_values[VAR_T];
412 static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
414 AVFilterContext *ctx = inlink->dst;
415 SelectContext *select = ctx->priv;
417 select_frame(ctx, frame);
419 return ff_filter_frame(ctx->outputs[select->select_out], frame);
421 av_frame_free(&frame);
425 static int request_frame(AVFilterLink *outlink)
427 AVFilterLink *inlink = outlink->src->inputs[0];
428 int ret = ff_request_frame(inlink);
432 static av_cold void uninit(AVFilterContext *ctx)
434 SelectContext *select = ctx->priv;
437 av_expr_free(select->expr);
440 for (i = 0; i < ctx->nb_outputs; i++)
441 av_freep(&ctx->output_pads[i].name);
443 if (select->do_scene_detect) {
444 av_frame_free(&select->prev_picref);
448 #if CONFIG_ASELECT_FILTER
450 DEFINE_OPTIONS(aselect, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
451 AVFILTER_DEFINE_CLASS(aselect);
453 static av_cold int aselect_init(AVFilterContext *ctx)
455 SelectContext *select = ctx->priv;
458 if ((ret = init(ctx)) < 0)
461 if (select->do_scene_detect) {
462 av_log(ctx, AV_LOG_ERROR, "Scene detection is ignored in aselect filter\n");
463 return AVERROR(EINVAL);
469 static const AVFilterPad avfilter_af_aselect_inputs[] = {
472 .type = AVMEDIA_TYPE_AUDIO,
473 .config_props = config_input,
474 .filter_frame = filter_frame,
479 const AVFilter ff_af_aselect = {
481 .description = NULL_IF_CONFIG_SMALL("Select audio frames to pass in output."),
482 .init = aselect_init,
484 .priv_size = sizeof(SelectContext),
485 .inputs = avfilter_af_aselect_inputs,
486 .priv_class = &aselect_class,
487 .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
489 #endif /* CONFIG_ASELECT_FILTER */
491 #if CONFIG_SELECT_FILTER
493 static int query_formats(AVFilterContext *ctx)
495 SelectContext *select = ctx->priv;
497 if (!select->do_scene_detect) {
498 return ff_default_query_formats(ctx);
501 static const enum AVPixelFormat pix_fmts[] = {
502 AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, AV_PIX_FMT_RGBA,
503 AV_PIX_FMT_ABGR, AV_PIX_FMT_BGRA, AV_PIX_FMT_GRAY8,
504 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVJ420P,
505 AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVJ422P,
506 AV_PIX_FMT_YUV420P10,
509 AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
512 return AVERROR(ENOMEM);
513 ret = ff_set_common_formats(ctx, fmts_list);
520 DEFINE_OPTIONS(select, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
521 AVFILTER_DEFINE_CLASS(select);
523 static av_cold int select_init(AVFilterContext *ctx)
527 if ((ret = init(ctx)) < 0)
533 static const AVFilterPad avfilter_vf_select_inputs[] = {
536 .type = AVMEDIA_TYPE_VIDEO,
537 .config_props = config_input,
538 .filter_frame = filter_frame,
543 const AVFilter ff_vf_select = {
545 .description = NULL_IF_CONFIG_SMALL("Select video frames to pass in output."),
548 .query_formats = query_formats,
549 .priv_size = sizeof(SelectContext),
550 .priv_class = &select_class,
551 .inputs = avfilter_vf_select_inputs,
552 .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
554 #endif /* CONFIG_SELECT_FILTER */