2 * This file is part of FFmpeg.
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavutil/avassert.h"
24 #include "libavutil/channel_layout.h"
25 #include "libavutil/common.h"
26 #include "libavutil/log.h"
27 #include "libavutil/mathematics.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/samplefmt.h"
35 typedef struct TrimContext {
42 int64_t start_time, end_time;
43 int64_t start_frame, end_frame;
45 * in the link timebase for video,
46 * in 1/samplerate for audio
48 int64_t start_pts, end_pts;
49 int64_t start_sample, end_sample;
52 * number of video frames that arrived on this filter so far
56 * number of audio samples that arrived on this filter so far
60 * timestamp of the first frame in the output, in the timebase units
64 * duration in the timebase units
73 static av_cold int init(AVFilterContext *ctx)
75 TrimContext *s = ctx->priv;
77 s->first_pts = AV_NOPTS_VALUE;
82 static int config_input(AVFilterLink *inlink)
84 AVFilterContext *ctx = inlink->dst;
85 TrimContext *s = ctx->priv;
86 AVRational tb = (inlink->type == AVMEDIA_TYPE_VIDEO) ?
87 inlink->time_base : (AVRational){ 1, inlink->sample_rate };
89 if (s->start_time != INT64_MAX) {
90 int64_t start_pts = av_rescale_q(s->start_time, AV_TIME_BASE_Q, tb);
91 if (s->start_pts == AV_NOPTS_VALUE || start_pts < s->start_pts)
92 s->start_pts = start_pts;
94 if (s->end_time != INT64_MAX) {
95 int64_t end_pts = av_rescale_q(s->end_time, AV_TIME_BASE_Q, tb);
96 if (s->end_pts == AV_NOPTS_VALUE || end_pts > s->end_pts)
100 s->duration_tb = av_rescale_q(s->duration, AV_TIME_BASE_Q, tb);
105 #define OFFSET(x) offsetof(TrimContext, x)
106 #define COMMON_OPTS \
107 { "start", "Timestamp of the first frame that " \
108 "should be passed", OFFSET(start_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
109 { "starti", "Timestamp of the first frame that " \
110 "should be passed", OFFSET(start_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
111 { "end", "Timestamp of the first frame that " \
112 "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
113 { "endi", "Timestamp of the first frame that " \
114 "should be dropped again", OFFSET(end_time), AV_OPT_TYPE_DURATION, { .i64 = INT64_MAX }, INT64_MIN, INT64_MAX, FLAGS }, \
115 { "start_pts", "Timestamp of the first frame that should be " \
116 " passed", OFFSET(start_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
117 { "end_pts", "Timestamp of the first frame that should be " \
118 "dropped again", OFFSET(end_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, FLAGS }, \
119 { "duration", "Maximum duration of the output", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, FLAGS }, \
120 { "durationi", "Maximum duration of the output", OFFSET(duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, FLAGS },
123 #if CONFIG_TRIM_FILTER
124 static int trim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
126 AVFilterContext *ctx = inlink->dst;
127 TrimContext *s = ctx->priv;
130 /* drop everything if EOF has already been returned */
132 av_frame_free(&frame);
136 if (s->start_frame >= 0 || s->start_pts != AV_NOPTS_VALUE) {
138 if (s->start_frame >= 0 && s->nb_frames >= s->start_frame)
140 if (s->start_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
141 frame->pts >= s->start_pts)
147 if (s->first_pts == AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE)
148 s->first_pts = frame->pts;
150 if (s->end_frame != INT64_MAX || s->end_pts != AV_NOPTS_VALUE || s->duration_tb) {
153 if (s->end_frame != INT64_MAX && s->nb_frames < s->end_frame)
155 if (s->end_pts != AV_NOPTS_VALUE && frame->pts != AV_NOPTS_VALUE &&
156 frame->pts < s->end_pts)
158 if (s->duration_tb && frame->pts != AV_NOPTS_VALUE &&
159 frame->pts - s->first_pts < s->duration_tb)
164 ff_avfilter_link_set_out_status(inlink, AVERROR_EOF, AV_NOPTS_VALUE);
171 return ff_filter_frame(ctx->outputs[0], frame);
175 av_frame_free(&frame);
179 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
180 static const AVOption trim_options[] = {
182 { "start_frame", "Number of the first frame that should be passed "
183 "to the output", OFFSET(start_frame), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
184 { "end_frame", "Number of the first frame that should be dropped "
185 "again", OFFSET(end_frame), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
190 AVFILTER_DEFINE_CLASS(trim);
192 static const AVFilterPad trim_inputs[] = {
195 .type = AVMEDIA_TYPE_VIDEO,
196 .filter_frame = trim_filter_frame,
197 .config_props = config_input,
202 static const AVFilterPad trim_outputs[] = {
205 .type = AVMEDIA_TYPE_VIDEO,
210 AVFilter ff_vf_trim = {
212 .description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
214 .priv_size = sizeof(TrimContext),
215 .priv_class = &trim_class,
216 .inputs = trim_inputs,
217 .outputs = trim_outputs,
219 #endif // CONFIG_TRIM_FILTER
221 #if CONFIG_ATRIM_FILTER
222 static int atrim_filter_frame(AVFilterLink *inlink, AVFrame *frame)
224 AVFilterContext *ctx = inlink->dst;
225 TrimContext *s = ctx->priv;
226 int64_t start_sample, end_sample;
230 /* drop everything if EOF has already been returned */
232 av_frame_free(&frame);
236 if (frame->pts != AV_NOPTS_VALUE)
237 pts = av_rescale_q(frame->pts, inlink->time_base,
238 (AVRational){ 1, inlink->sample_rate });
241 s->next_pts = pts + frame->nb_samples;
243 /* check if at least a part of the frame is after the start time */
244 if (s->start_sample < 0 && s->start_pts == AV_NOPTS_VALUE) {
248 start_sample = frame->nb_samples;
250 if (s->start_sample >= 0 &&
251 s->nb_samples + frame->nb_samples > s->start_sample) {
253 start_sample = FFMIN(start_sample, s->start_sample - s->nb_samples);
256 if (s->start_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
257 pts + frame->nb_samples > s->start_pts) {
259 start_sample = FFMIN(start_sample, s->start_pts - pts);
266 if (s->first_pts == AV_NOPTS_VALUE)
267 s->first_pts = pts + start_sample;
269 /* check if at least a part of the frame is before the end time */
270 if (s->end_sample == INT64_MAX && s->end_pts == AV_NOPTS_VALUE && !s->duration_tb) {
271 end_sample = frame->nb_samples;
276 if (s->end_sample != INT64_MAX &&
277 s->nb_samples < s->end_sample) {
279 end_sample = FFMAX(end_sample, s->end_sample - s->nb_samples);
282 if (s->end_pts != AV_NOPTS_VALUE && pts != AV_NOPTS_VALUE &&
285 end_sample = FFMAX(end_sample, s->end_pts - pts);
288 if (s->duration_tb && pts - s->first_pts < s->duration_tb) {
290 end_sample = FFMAX(end_sample, s->first_pts + s->duration_tb - pts);
295 ff_avfilter_link_set_out_status(inlink, AVERROR_EOF, AV_NOPTS_VALUE);
300 s->nb_samples += frame->nb_samples;
301 start_sample = FFMAX(0, start_sample);
302 end_sample = FFMIN(frame->nb_samples, end_sample);
303 if (start_sample >= end_sample || !frame->nb_samples)
307 AVFrame *out = ff_get_audio_buffer(ctx->outputs[0], end_sample - start_sample);
309 av_frame_free(&frame);
310 return AVERROR(ENOMEM);
313 av_frame_copy_props(out, frame);
314 av_samples_copy(out->extended_data, frame->extended_data, 0, start_sample,
315 out->nb_samples, inlink->channels,
317 if (out->pts != AV_NOPTS_VALUE)
318 out->pts += av_rescale_q(start_sample, (AVRational){ 1, out->sample_rate },
321 av_frame_free(&frame);
324 frame->nb_samples = end_sample;
326 return ff_filter_frame(ctx->outputs[0], frame);
329 s->nb_samples += frame->nb_samples;
330 av_frame_free(&frame);
334 #define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
335 static const AVOption atrim_options[] = {
337 { "start_sample", "Number of the first audio sample that should be "
338 "passed to the output", OFFSET(start_sample), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, FLAGS },
339 { "end_sample", "Number of the first audio sample that should be "
340 "dropped again", OFFSET(end_sample), AV_OPT_TYPE_INT64, { .i64 = INT64_MAX }, 0, INT64_MAX, FLAGS },
345 AVFILTER_DEFINE_CLASS(atrim);
347 static const AVFilterPad atrim_inputs[] = {
350 .type = AVMEDIA_TYPE_AUDIO,
351 .filter_frame = atrim_filter_frame,
352 .config_props = config_input,
357 static const AVFilterPad atrim_outputs[] = {
360 .type = AVMEDIA_TYPE_AUDIO,
365 AVFilter ff_af_atrim = {
367 .description = NULL_IF_CONFIG_SMALL("Pick one continuous section from the input, drop the rest."),
369 .priv_size = sizeof(TrimContext),
370 .priv_class = &atrim_class,
371 .inputs = atrim_inputs,
372 .outputs = atrim_outputs,
374 #endif // CONFIG_ATRIM_FILTER