2 * Copyright (c) 2012 Pavel Koshevoy <pkoshevoy at gmail dot com>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * tempo scaling audio filter -- an implementation of WSOLA algorithm
25 * Based on MIT licensed yaeAudioTempoFilter.h and yaeAudioFragment.h
26 * from Apprentice Video player by Pavel Koshevoy.
27 * https://sourceforge.net/projects/apprenticevideo/
29 * An explanation of SOLA algorithm is available at
30 * http://www.surina.net/article/time-and-pitch-scaling.html
32 * WSOLA is very similar to SOLA, only one major difference exists between
33 * these algorithms. SOLA shifts audio fragments along the output stream,
34 * where as WSOLA shifts audio fragments along the input stream.
36 * The advantage of WSOLA algorithm is that the overlap region size is
37 * always the same, therefore the blending function is constant and
42 #include "libavcodec/avfft.h"
43 #include "libavutil/avassert.h"
44 #include "libavutil/avstring.h"
45 #include "libavutil/channel_layout.h"
46 #include "libavutil/eval.h"
47 #include "libavutil/opt.h"
48 #include "libavutil/samplefmt.h"
54 * A fragment of audio waveform
56 typedef struct AudioFragment {
57 // index of the first sample of this fragment in the overall waveform;
58 // 0: input sample position
59 // 1: output sample position
62 // original packed multi-channel samples:
65 // number of samples in this fragment:
68 // rDFT transform of the down-mixed mono fragment, used for
69 // fast waveform alignment via correlation in frequency domain:
74 * Filter state machine states
80 YAE_OUTPUT_OVERLAP_ADD,
85 * Filter state machine
87 typedef struct ATempoContext {
90 // ring-buffer of input samples, necessary because some times
91 // input fragment position may be adjusted backwards:
94 // ring-buffer maximum capacity, expressed in sample rate time base:
97 // ring-buffer house keeping:
102 // 0: input sample position corresponding to the ring buffer tail
103 // 1: output sample position
106 // first input timestamp, all other timestamps are offset by this one
110 enum AVSampleFormat format;
112 // number of channels:
115 // row of bytes to skip from one sample to next, across multple channels;
116 // stride = (number-of-channels * bits-per-sample-per-channel) / 8
119 // fragment window size, power-of-two integer:
122 // Hann window coefficients, for feathering
123 // (blending) the overlapping fragment region:
126 // tempo scaling factor:
129 // a snapshot of previous fragment input and output position values
130 // captured when the tempo scale factor was set most recently:
133 // current/previous fragment ring-buffer:
134 AudioFragment frag[2];
136 // current fragment index:
142 // for fast correlation calculation in frequency domain:
143 RDFTContext *real_to_complex;
144 RDFTContext *complex_to_real;
145 FFTSample *correlation;
147 // for managing AVFilterPad.request_frame and AVFilterPad.filter_frame
151 uint64_t nsamples_in;
152 uint64_t nsamples_out;
155 #define YAE_ATEMPO_MIN 0.5
156 #define YAE_ATEMPO_MAX 100.0
158 #define OFFSET(x) offsetof(ATempoContext, x)
160 static const AVOption atempo_options[] = {
161 { "tempo", "set tempo scale factor",
162 OFFSET(tempo), AV_OPT_TYPE_DOUBLE, { .dbl = 1.0 },
165 AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM },
169 AVFILTER_DEFINE_CLASS(atempo);
171 inline static AudioFragment *yae_curr_frag(ATempoContext *atempo)
173 return &atempo->frag[atempo->nfrag % 2];
176 inline static AudioFragment *yae_prev_frag(ATempoContext *atempo)
178 return &atempo->frag[(atempo->nfrag + 1) % 2];
182 * Reset filter to initial state, do not deallocate existing local buffers.
184 static void yae_clear(ATempoContext *atempo)
191 atempo->state = YAE_LOAD_FRAGMENT;
192 atempo->start_pts = AV_NOPTS_VALUE;
194 atempo->position[0] = 0;
195 atempo->position[1] = 0;
197 atempo->origin[0] = 0;
198 atempo->origin[1] = 0;
200 atempo->frag[0].position[0] = 0;
201 atempo->frag[0].position[1] = 0;
202 atempo->frag[0].nsamples = 0;
204 atempo->frag[1].position[0] = 0;
205 atempo->frag[1].position[1] = 0;
206 atempo->frag[1].nsamples = 0;
208 // shift left position of 1st fragment by half a window
209 // so that no re-normalization would be required for
210 // the left half of the 1st fragment:
211 atempo->frag[0].position[0] = -(int64_t)(atempo->window / 2);
212 atempo->frag[0].position[1] = -(int64_t)(atempo->window / 2);
214 av_frame_free(&atempo->dst_buffer);
216 atempo->dst_end = NULL;
218 atempo->nsamples_in = 0;
219 atempo->nsamples_out = 0;
223 * Reset filter to initial state and deallocate all buffers.
225 static void yae_release_buffers(ATempoContext *atempo)
229 av_freep(&atempo->frag[0].data);
230 av_freep(&atempo->frag[1].data);
231 av_freep(&atempo->frag[0].xdat);
232 av_freep(&atempo->frag[1].xdat);
234 av_freep(&atempo->buffer);
235 av_freep(&atempo->hann);
236 av_freep(&atempo->correlation);
238 av_rdft_end(atempo->real_to_complex);
239 atempo->real_to_complex = NULL;
241 av_rdft_end(atempo->complex_to_real);
242 atempo->complex_to_real = NULL;
245 /* av_realloc is not aligned enough; fortunately, the data does not need to
247 #define RE_MALLOC_OR_FAIL(field, field_size) \
250 field = av_malloc(field_size); \
252 yae_release_buffers(atempo); \
253 return AVERROR(ENOMEM); \
258 * Prepare filter for processing audio data of given format,
259 * sample rate and number of channels.
261 static int yae_reset(ATempoContext *atempo,
262 enum AVSampleFormat format,
266 const int sample_size = av_get_bytes_per_sample(format);
267 uint32_t nlevels = 0;
271 atempo->format = format;
272 atempo->channels = channels;
273 atempo->stride = sample_size * channels;
275 // pick a segment window size:
276 atempo->window = sample_rate / 24;
278 // adjust window size to be a power-of-two integer:
279 nlevels = av_log2(atempo->window);
281 av_assert0(pot <= atempo->window);
283 if (pot < atempo->window) {
284 atempo->window = pot * 2;
288 // initialize audio fragment buffers:
289 RE_MALLOC_OR_FAIL(atempo->frag[0].data, atempo->window * atempo->stride);
290 RE_MALLOC_OR_FAIL(atempo->frag[1].data, atempo->window * atempo->stride);
291 RE_MALLOC_OR_FAIL(atempo->frag[0].xdat, atempo->window * sizeof(FFTComplex));
292 RE_MALLOC_OR_FAIL(atempo->frag[1].xdat, atempo->window * sizeof(FFTComplex));
294 // initialize rDFT contexts:
295 av_rdft_end(atempo->real_to_complex);
296 atempo->real_to_complex = NULL;
298 av_rdft_end(atempo->complex_to_real);
299 atempo->complex_to_real = NULL;
301 atempo->real_to_complex = av_rdft_init(nlevels + 1, DFT_R2C);
302 if (!atempo->real_to_complex) {
303 yae_release_buffers(atempo);
304 return AVERROR(ENOMEM);
307 atempo->complex_to_real = av_rdft_init(nlevels + 1, IDFT_C2R);
308 if (!atempo->complex_to_real) {
309 yae_release_buffers(atempo);
310 return AVERROR(ENOMEM);
313 RE_MALLOC_OR_FAIL(atempo->correlation, atempo->window * sizeof(FFTComplex));
315 atempo->ring = atempo->window * 3;
316 RE_MALLOC_OR_FAIL(atempo->buffer, atempo->ring * atempo->stride);
318 // initialize the Hann window function:
319 RE_MALLOC_OR_FAIL(atempo->hann, atempo->window * sizeof(float));
321 for (i = 0; i < atempo->window; i++) {
322 double t = (double)i / (double)(atempo->window - 1);
323 double h = 0.5 * (1.0 - cos(2.0 * M_PI * t));
324 atempo->hann[i] = (float)h;
331 static int yae_set_tempo(AVFilterContext *ctx, const char *arg_tempo)
333 const AudioFragment *prev;
334 ATempoContext *atempo = ctx->priv;
336 double tempo = av_strtod(arg_tempo, &tail);
339 av_log(ctx, AV_LOG_ERROR, "Invalid tempo value '%s'\n", arg_tempo);
340 return AVERROR(EINVAL);
343 if (tempo < YAE_ATEMPO_MIN || tempo > YAE_ATEMPO_MAX) {
344 av_log(ctx, AV_LOG_ERROR, "Tempo value %f exceeds [%f, %f] range\n",
345 tempo, YAE_ATEMPO_MIN, YAE_ATEMPO_MAX);
346 return AVERROR(EINVAL);
349 prev = yae_prev_frag(atempo);
350 atempo->origin[0] = prev->position[0] + atempo->window / 2;
351 atempo->origin[1] = prev->position[1] + atempo->window / 2;
352 atempo->tempo = tempo;
357 * A helper macro for initializing complex data buffer with scalar data
360 #define yae_init_xdat(scalar_type, scalar_max) \
362 const uint8_t *src_end = src + \
363 frag->nsamples * atempo->channels * sizeof(scalar_type); \
365 FFTSample *xdat = frag->xdat; \
368 if (atempo->channels == 1) { \
369 for (; src < src_end; xdat++) { \
370 tmp = *(const scalar_type *)src; \
371 src += sizeof(scalar_type); \
373 *xdat = (FFTSample)tmp; \
376 FFTSample s, max, ti, si; \
379 for (; src < src_end; xdat++) { \
380 tmp = *(const scalar_type *)src; \
381 src += sizeof(scalar_type); \
383 max = (FFTSample)tmp; \
384 s = FFMIN((FFTSample)scalar_max, \
385 (FFTSample)fabsf(max)); \
387 for (i = 1; i < atempo->channels; i++) { \
388 tmp = *(const scalar_type *)src; \
389 src += sizeof(scalar_type); \
391 ti = (FFTSample)tmp; \
392 si = FFMIN((FFTSample)scalar_max, \
393 (FFTSample)fabsf(ti)); \
407 * Initialize complex data buffer of a given audio fragment
408 * with down-mixed mono data of appropriate scalar type.
410 static void yae_downmix(ATempoContext *atempo, AudioFragment *frag)
413 const uint8_t *src = frag->data;
415 // init complex data buffer used for FFT and Correlation:
416 memset(frag->xdat, 0, sizeof(FFTComplex) * atempo->window);
418 if (atempo->format == AV_SAMPLE_FMT_U8) {
419 yae_init_xdat(uint8_t, 127);
420 } else if (atempo->format == AV_SAMPLE_FMT_S16) {
421 yae_init_xdat(int16_t, 32767);
422 } else if (atempo->format == AV_SAMPLE_FMT_S32) {
423 yae_init_xdat(int, 2147483647);
424 } else if (atempo->format == AV_SAMPLE_FMT_FLT) {
425 yae_init_xdat(float, 1);
426 } else if (atempo->format == AV_SAMPLE_FMT_DBL) {
427 yae_init_xdat(double, 1);
432 * Populate the internal data buffer on as-needed basis.
435 * 0 if requested data was already available or was successfully loaded,
436 * AVERROR(EAGAIN) if more input data is required.
438 static int yae_load_data(ATempoContext *atempo,
439 const uint8_t **src_ref,
440 const uint8_t *src_end,
444 const uint8_t *src = *src_ref;
445 const int read_size = stop_here - atempo->position[0];
447 if (stop_here <= atempo->position[0]) {
451 // samples are not expected to be skipped, unless tempo is greater than 2:
452 av_assert0(read_size <= atempo->ring || atempo->tempo > 2.0);
454 while (atempo->position[0] < stop_here && src < src_end) {
455 int src_samples = (src_end - src) / atempo->stride;
457 // load data piece-wise, in order to avoid complicating the logic:
458 int nsamples = FFMIN(read_size, src_samples);
462 nsamples = FFMIN(nsamples, atempo->ring);
463 na = FFMIN(nsamples, atempo->ring - atempo->tail);
464 nb = FFMIN(nsamples - na, atempo->ring);
467 uint8_t *a = atempo->buffer + atempo->tail * atempo->stride;
468 memcpy(a, src, na * atempo->stride);
470 src += na * atempo->stride;
471 atempo->position[0] += na;
473 atempo->size = FFMIN(atempo->size + na, atempo->ring);
474 atempo->tail = (atempo->tail + na) % atempo->ring;
476 atempo->size < atempo->ring ?
477 atempo->tail - atempo->size :
482 uint8_t *b = atempo->buffer;
483 memcpy(b, src, nb * atempo->stride);
485 src += nb * atempo->stride;
486 atempo->position[0] += nb;
488 atempo->size = FFMIN(atempo->size + nb, atempo->ring);
489 atempo->tail = (atempo->tail + nb) % atempo->ring;
491 atempo->size < atempo->ring ?
492 atempo->tail - atempo->size :
497 // pass back the updated source buffer pointer:
501 av_assert0(atempo->position[0] <= stop_here);
503 return atempo->position[0] == stop_here ? 0 : AVERROR(EAGAIN);
507 * Populate current audio fragment data buffer.
510 * 0 when the fragment is ready,
511 * AVERROR(EAGAIN) if more input data is required.
513 static int yae_load_frag(ATempoContext *atempo,
514 const uint8_t **src_ref,
515 const uint8_t *src_end)
518 AudioFragment *frag = yae_curr_frag(atempo);
520 int64_t missing, start, zeros;
522 const uint8_t *a, *b;
523 int i0, i1, n0, n1, na, nb;
525 int64_t stop_here = frag->position[0] + atempo->window;
526 if (src_ref && yae_load_data(atempo, src_ref, src_end, stop_here) != 0) {
527 return AVERROR(EAGAIN);
530 // calculate the number of samples we don't have:
532 stop_here > atempo->position[0] ?
533 stop_here - atempo->position[0] : 0;
536 missing < (int64_t)atempo->window ?
537 (uint32_t)(atempo->window - missing) : 0;
539 // setup the output buffer:
540 frag->nsamples = nsamples;
543 start = atempo->position[0] - atempo->size;
546 if (frag->position[0] < start) {
547 // what we don't have we substitute with zeros:
548 zeros = FFMIN(start - frag->position[0], (int64_t)nsamples);
549 av_assert0(zeros != nsamples);
551 memset(dst, 0, zeros * atempo->stride);
552 dst += zeros * atempo->stride;
555 if (zeros == nsamples) {
559 // get the remaining data from the ring buffer:
560 na = (atempo->head < atempo->tail ?
561 atempo->tail - atempo->head :
562 atempo->ring - atempo->head);
564 nb = atempo->head < atempo->tail ? 0 : atempo->tail;
567 av_assert0(nsamples <= zeros + na + nb);
569 a = atempo->buffer + atempo->head * atempo->stride;
572 i0 = frag->position[0] + zeros - start;
573 i1 = i0 < na ? 0 : i0 - na;
575 n0 = i0 < na ? FFMIN(na - i0, (int)(nsamples - zeros)) : 0;
576 n1 = nsamples - zeros - n0;
579 memcpy(dst, a + i0 * atempo->stride, n0 * atempo->stride);
580 dst += n0 * atempo->stride;
584 memcpy(dst, b + i1 * atempo->stride, n1 * atempo->stride);
591 * Prepare for loading next audio fragment.
593 static void yae_advance_to_next_frag(ATempoContext *atempo)
595 const double fragment_step = atempo->tempo * (double)(atempo->window / 2);
597 const AudioFragment *prev;
601 prev = yae_prev_frag(atempo);
602 frag = yae_curr_frag(atempo);
604 frag->position[0] = prev->position[0] + (int64_t)fragment_step;
605 frag->position[1] = prev->position[1] + atempo->window / 2;
610 * Calculate cross-correlation via rDFT.
612 * Multiply two vectors of complex numbers (result of real_to_complex rDFT)
613 * and transform back via complex_to_real rDFT.
615 static void yae_xcorr_via_rdft(FFTSample *xcorr,
616 RDFTContext *complex_to_real,
617 const FFTComplex *xa,
618 const FFTComplex *xb,
621 FFTComplex *xc = (FFTComplex *)xcorr;
624 // NOTE: first element requires special care -- Given Y = rDFT(X),
625 // Im(Y[0]) and Im(Y[N/2]) are always zero, therefore av_rdft_calc
626 // stores Re(Y[N/2]) in place of Im(Y[0]).
628 xc->re = xa->re * xb->re;
629 xc->im = xa->im * xb->im;
634 for (i = 1; i < window; i++, xa++, xb++, xc++) {
635 xc->re = (xa->re * xb->re + xa->im * xb->im);
636 xc->im = (xa->im * xb->re - xa->re * xb->im);
639 // apply inverse rDFT:
640 av_rdft_calc(complex_to_real, xcorr);
644 * Calculate alignment offset for given fragment
645 * relative to the previous fragment.
647 * @return alignment offset of current fragment relative to previous.
649 static int yae_align(AudioFragment *frag,
650 const AudioFragment *prev,
654 FFTSample *correlation,
655 RDFTContext *complex_to_real)
657 int best_offset = -drift;
658 FFTSample best_metric = -FLT_MAX;
665 yae_xcorr_via_rdft(correlation,
667 (const FFTComplex *)prev->xdat,
668 (const FFTComplex *)frag->xdat,
671 // identify search window boundaries:
672 i0 = FFMAX(window / 2 - delta_max - drift, 0);
673 i0 = FFMIN(i0, window);
675 i1 = FFMIN(window / 2 + delta_max - drift, window - window / 16);
678 // identify cross-correlation peaks within search window:
679 xcorr = correlation + i0;
681 for (i = i0; i < i1; i++, xcorr++) {
682 FFTSample metric = *xcorr;
685 FFTSample drifti = (FFTSample)(drift + i);
686 metric *= drifti * (FFTSample)(i - i0) * (FFTSample)(i1 - i);
688 if (metric > best_metric) {
689 best_metric = metric;
690 best_offset = i - window / 2;
698 * Adjust current fragment position for better alignment
699 * with previous fragment.
701 * @return alignment correction.
703 static int yae_adjust_position(ATempoContext *atempo)
705 const AudioFragment *prev = yae_prev_frag(atempo);
706 AudioFragment *frag = yae_curr_frag(atempo);
708 const double prev_output_position =
709 (double)(prev->position[1] - atempo->origin[1] + atempo->window / 2) *
712 const double ideal_output_position =
713 (double)(prev->position[0] - atempo->origin[0] + atempo->window / 2);
715 const int drift = (int)(prev_output_position - ideal_output_position);
717 const int delta_max = atempo->window / 2;
718 const int correction = yae_align(frag,
724 atempo->complex_to_real);
727 // adjust fragment position:
728 frag->position[0] -= correction;
730 // clear so that the fragment can be reloaded:
738 * A helper macro for blending the overlap region of previous
739 * and current audio fragment.
741 #define yae_blend(scalar_type) \
743 const scalar_type *aaa = (const scalar_type *)a; \
744 const scalar_type *bbb = (const scalar_type *)b; \
746 scalar_type *out = (scalar_type *)dst; \
747 scalar_type *out_end = (scalar_type *)dst_end; \
750 for (i = 0; i < overlap && out < out_end; \
751 i++, atempo->position[1]++, wa++, wb++) { \
756 for (j = 0; j < atempo->channels; \
757 j++, aaa++, bbb++, out++) { \
758 float t0 = (float)*aaa; \
759 float t1 = (float)*bbb; \
762 frag->position[0] + i < 0 ? \
764 (scalar_type)(t0 * w0 + t1 * w1); \
767 dst = (uint8_t *)out; \
771 * Blend the overlap region of previous and current audio fragment
772 * and output the results to the given destination buffer.
775 * 0 if the overlap region was completely stored in the dst buffer,
776 * AVERROR(EAGAIN) if more destination buffer space is required.
778 static int yae_overlap_add(ATempoContext *atempo,
783 const AudioFragment *prev = yae_prev_frag(atempo);
784 const AudioFragment *frag = yae_curr_frag(atempo);
786 const int64_t start_here = FFMAX(atempo->position[1],
789 const int64_t stop_here = FFMIN(prev->position[1] + prev->nsamples,
790 frag->position[1] + frag->nsamples);
792 const int64_t overlap = stop_here - start_here;
794 const int64_t ia = start_here - prev->position[1];
795 const int64_t ib = start_here - frag->position[1];
797 const float *wa = atempo->hann + ia;
798 const float *wb = atempo->hann + ib;
800 const uint8_t *a = prev->data + ia * atempo->stride;
801 const uint8_t *b = frag->data + ib * atempo->stride;
803 uint8_t *dst = *dst_ref;
805 av_assert0(start_here <= stop_here &&
806 frag->position[1] <= start_here &&
807 overlap <= frag->nsamples);
809 if (atempo->format == AV_SAMPLE_FMT_U8) {
811 } else if (atempo->format == AV_SAMPLE_FMT_S16) {
813 } else if (atempo->format == AV_SAMPLE_FMT_S32) {
815 } else if (atempo->format == AV_SAMPLE_FMT_FLT) {
817 } else if (atempo->format == AV_SAMPLE_FMT_DBL) {
821 // pass-back the updated destination buffer pointer:
824 return atempo->position[1] == stop_here ? 0 : AVERROR(EAGAIN);
828 * Feed as much data to the filter as it is able to consume
829 * and receive as much processed data in the destination buffer
830 * as it is able to produce or store.
833 yae_apply(ATempoContext *atempo,
834 const uint8_t **src_ref,
835 const uint8_t *src_end,
840 if (atempo->state == YAE_LOAD_FRAGMENT) {
841 // load additional data for the current fragment:
842 if (yae_load_frag(atempo, src_ref, src_end) != 0) {
847 yae_downmix(atempo, yae_curr_frag(atempo));
850 av_rdft_calc(atempo->real_to_complex, yae_curr_frag(atempo)->xdat);
852 // must load the second fragment before alignment can start:
853 if (!atempo->nfrag) {
854 yae_advance_to_next_frag(atempo);
858 atempo->state = YAE_ADJUST_POSITION;
861 if (atempo->state == YAE_ADJUST_POSITION) {
862 // adjust position for better alignment:
863 if (yae_adjust_position(atempo)) {
864 // reload the fragment at the corrected position, so that the
865 // Hann window blending would not require normalization:
866 atempo->state = YAE_RELOAD_FRAGMENT;
868 atempo->state = YAE_OUTPUT_OVERLAP_ADD;
872 if (atempo->state == YAE_RELOAD_FRAGMENT) {
873 // load additional data if necessary due to position adjustment:
874 if (yae_load_frag(atempo, src_ref, src_end) != 0) {
879 yae_downmix(atempo, yae_curr_frag(atempo));
882 av_rdft_calc(atempo->real_to_complex, yae_curr_frag(atempo)->xdat);
884 atempo->state = YAE_OUTPUT_OVERLAP_ADD;
887 if (atempo->state == YAE_OUTPUT_OVERLAP_ADD) {
888 // overlap-add and output the result:
889 if (yae_overlap_add(atempo, dst_ref, dst_end) != 0) {
893 // advance to the next fragment, repeat:
894 yae_advance_to_next_frag(atempo);
895 atempo->state = YAE_LOAD_FRAGMENT;
901 * Flush any buffered data from the filter.
904 * 0 if all data was completely stored in the dst buffer,
905 * AVERROR(EAGAIN) if more destination buffer space is required.
907 static int yae_flush(ATempoContext *atempo,
911 AudioFragment *frag = yae_curr_frag(atempo);
924 atempo->state = YAE_FLUSH_OUTPUT;
926 if (!atempo->nfrag) {
927 // there is nothing to flush:
931 if (atempo->position[0] == frag->position[0] + frag->nsamples &&
932 atempo->position[1] == frag->position[1] + frag->nsamples) {
933 // the current fragment is already flushed:
937 if (frag->position[0] + frag->nsamples < atempo->position[0]) {
938 // finish loading the current (possibly partial) fragment:
939 yae_load_frag(atempo, NULL, NULL);
943 yae_downmix(atempo, frag);
946 av_rdft_calc(atempo->real_to_complex, frag->xdat);
948 // align current fragment to previous fragment:
949 if (yae_adjust_position(atempo)) {
950 // reload the current fragment due to adjusted position:
951 yae_load_frag(atempo, NULL, NULL);
956 // flush the overlap region:
957 overlap_end = frag->position[1] + FFMIN(atempo->window / 2,
960 while (atempo->position[1] < overlap_end) {
961 if (yae_overlap_add(atempo, dst_ref, dst_end) != 0) {
962 return AVERROR(EAGAIN);
966 // check whether all of the input samples have been consumed:
967 if (frag->position[0] + frag->nsamples < atempo->position[0]) {
968 yae_advance_to_next_frag(atempo);
969 return AVERROR(EAGAIN);
972 // flush the remainder of the current fragment:
973 start_here = FFMAX(atempo->position[1], overlap_end);
974 stop_here = frag->position[1] + frag->nsamples;
975 offset = start_here - frag->position[1];
976 av_assert0(start_here <= stop_here && frag->position[1] <= start_here);
978 src = frag->data + offset * atempo->stride;
979 dst = (uint8_t *)*dst_ref;
981 src_size = (int)(stop_here - start_here) * atempo->stride;
982 dst_size = dst_end - dst;
983 nbytes = FFMIN(src_size, dst_size);
985 memcpy(dst, src, nbytes);
988 atempo->position[1] += (nbytes / atempo->stride);
990 // pass-back the updated destination buffer pointer:
991 *dst_ref = (uint8_t *)dst;
993 return atempo->position[1] == stop_here ? 0 : AVERROR(EAGAIN);
996 static av_cold int init(AVFilterContext *ctx)
998 ATempoContext *atempo = ctx->priv;
999 atempo->format = AV_SAMPLE_FMT_NONE;
1000 atempo->state = YAE_LOAD_FRAGMENT;
1004 static av_cold void uninit(AVFilterContext *ctx)
1006 ATempoContext *atempo = ctx->priv;
1007 yae_release_buffers(atempo);
1010 static int query_formats(AVFilterContext *ctx)
1012 AVFilterChannelLayouts *layouts = NULL;
1013 AVFilterFormats *formats = NULL;
1015 // WSOLA necessitates an internal sliding window ring buffer
1016 // for incoming audio stream.
1018 // Planar sample formats are too cumbersome to store in a ring buffer,
1019 // therefore planar sample formats are not supported.
1021 static const enum AVSampleFormat sample_fmts[] = {
1031 layouts = ff_all_channel_counts();
1033 return AVERROR(ENOMEM);
1035 ret = ff_set_common_channel_layouts(ctx, layouts);
1039 formats = ff_make_format_list(sample_fmts);
1041 return AVERROR(ENOMEM);
1043 ret = ff_set_common_formats(ctx, formats);
1047 formats = ff_all_samplerates();
1049 return AVERROR(ENOMEM);
1051 return ff_set_common_samplerates(ctx, formats);
1054 static int config_props(AVFilterLink *inlink)
1056 AVFilterContext *ctx = inlink->dst;
1057 ATempoContext *atempo = ctx->priv;
1059 enum AVSampleFormat format = inlink->format;
1060 int sample_rate = (int)inlink->sample_rate;
1062 return yae_reset(atempo, format, sample_rate, inlink->channels);
1065 static int push_samples(ATempoContext *atempo,
1066 AVFilterLink *outlink,
1071 atempo->dst_buffer->sample_rate = outlink->sample_rate;
1072 atempo->dst_buffer->nb_samples = n_out;
1075 atempo->dst_buffer->pts = atempo->start_pts +
1076 av_rescale_q(atempo->nsamples_out,
1077 (AVRational){ 1, outlink->sample_rate },
1078 outlink->time_base);
1080 ret = ff_filter_frame(outlink, atempo->dst_buffer);
1081 atempo->dst_buffer = NULL;
1083 atempo->dst_end = NULL;
1087 atempo->nsamples_out += n_out;
1091 static int filter_frame(AVFilterLink *inlink, AVFrame *src_buffer)
1093 AVFilterContext *ctx = inlink->dst;
1094 ATempoContext *atempo = ctx->priv;
1095 AVFilterLink *outlink = ctx->outputs[0];
1098 int n_in = src_buffer->nb_samples;
1099 int n_out = (int)(0.5 + ((double)n_in) / atempo->tempo);
1101 const uint8_t *src = src_buffer->data[0];
1102 const uint8_t *src_end = src + n_in * atempo->stride;
1104 if (atempo->start_pts == AV_NOPTS_VALUE)
1105 atempo->start_pts = av_rescale_q(src_buffer->pts,
1107 outlink->time_base);
1109 while (src < src_end) {
1110 if (!atempo->dst_buffer) {
1111 atempo->dst_buffer = ff_get_audio_buffer(outlink, n_out);
1112 if (!atempo->dst_buffer) {
1113 av_frame_free(&src_buffer);
1114 return AVERROR(ENOMEM);
1116 av_frame_copy_props(atempo->dst_buffer, src_buffer);
1118 atempo->dst = atempo->dst_buffer->data[0];
1119 atempo->dst_end = atempo->dst + n_out * atempo->stride;
1122 yae_apply(atempo, &src, src_end, &atempo->dst, atempo->dst_end);
1124 if (atempo->dst == atempo->dst_end) {
1125 int n_samples = ((atempo->dst - atempo->dst_buffer->data[0]) /
1127 ret = push_samples(atempo, outlink, n_samples);
1133 atempo->nsamples_in += n_in;
1135 av_frame_free(&src_buffer);
1139 static int request_frame(AVFilterLink *outlink)
1141 AVFilterContext *ctx = outlink->src;
1142 ATempoContext *atempo = ctx->priv;
1145 ret = ff_request_frame(ctx->inputs[0]);
1147 if (ret == AVERROR_EOF) {
1148 // flush the filter:
1149 int n_max = atempo->ring;
1151 int err = AVERROR(EAGAIN);
1153 while (err == AVERROR(EAGAIN)) {
1154 if (!atempo->dst_buffer) {
1155 atempo->dst_buffer = ff_get_audio_buffer(outlink, n_max);
1156 if (!atempo->dst_buffer)
1157 return AVERROR(ENOMEM);
1159 atempo->dst = atempo->dst_buffer->data[0];
1160 atempo->dst_end = atempo->dst + n_max * atempo->stride;
1163 err = yae_flush(atempo, &atempo->dst, atempo->dst_end);
1165 n_out = ((atempo->dst - atempo->dst_buffer->data[0]) /
1169 ret = push_samples(atempo, outlink, n_out);
1175 av_frame_free(&atempo->dst_buffer);
1177 atempo->dst_end = NULL;
1185 static int process_command(AVFilterContext *ctx,
1192 return !strcmp(cmd, "tempo") ? yae_set_tempo(ctx, arg) : AVERROR(ENOSYS);
1195 static const AVFilterPad atempo_inputs[] = {
1198 .type = AVMEDIA_TYPE_AUDIO,
1199 .filter_frame = filter_frame,
1200 .config_props = config_props,
1205 static const AVFilterPad atempo_outputs[] = {
1208 .request_frame = request_frame,
1209 .type = AVMEDIA_TYPE_AUDIO,
1214 AVFilter ff_af_atempo = {
1216 .description = NULL_IF_CONFIG_SMALL("Adjust audio tempo."),
1219 .query_formats = query_formats,
1220 .process_command = process_command,
1221 .priv_size = sizeof(ATempoContext),
1222 .priv_class = &atempo_class,
1223 .inputs = atempo_inputs,
1224 .outputs = atempo_outputs,