2 * Copyright (c) 2014-2015 Muhammad Faiz <mfcc64@gmail.com>
4 * This file is part of FFmpeg.
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "libavcodec/avfft.h"
23 #include "libavutil/avassert.h"
24 #include "libavutil/opt.h"
25 #include "libavutil/xga_font_data.h"
26 #include "libavutil/eval.h"
27 #include "libavutil/pixdesc.h"
28 #include "libavutil/time.h"
31 #include "lavfutils.h"
32 #include "lswsutils.h"
34 #if CONFIG_LIBFREETYPE
36 #include FT_FREETYPE_H
39 #include "avf_showcqt.h"
41 #define BASEFREQ 20.01523126408007475
42 #define ENDFREQ 20495.59681441799654
43 #define TLENGTH "384*tc/(384+tc*f)"
44 #define TLENGTH_MIN 0.001
45 #define VOLUME_MAX 100.0
46 #define FONTCOLOR "st(0, (midi(f)-59.5)/12);" \
47 "st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));" \
48 "r(1-ld(1)) + b(ld(1))"
50 #define PTS_TOLERANCE 1
52 #define OFFSET(x) offsetof(ShowCQTContext, x)
53 #define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM)
55 static const AVOption showcqt_options[] = {
56 { "size", "set video size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, { .str = "1920x1080" }, 0, 0, FLAGS },
57 { "s", "set video size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, { .str = "1920x1080" }, 0, 0, FLAGS },
58 { "fps", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, 0, 0, FLAGS },
59 { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, 0, 0, FLAGS },
60 { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, { .str = "25" }, 0, 0, FLAGS },
61 { "bar_h", "set bargraph height", OFFSET(bar_h), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
62 { "axis_h", "set axis height", OFFSET(axis_h), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
63 { "sono_h", "set sonogram height", OFFSET(sono_h), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, FLAGS },
64 { "fullhd", "set fullhd size", OFFSET(fullhd), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
65 { "sono_v", "set sonogram volume", OFFSET(sono_v), AV_OPT_TYPE_STRING, { .str = "16" }, CHAR_MIN, CHAR_MAX, FLAGS },
66 { "volume", "set sonogram volume", OFFSET(sono_v), AV_OPT_TYPE_STRING, { .str = "16" }, CHAR_MIN, CHAR_MAX, FLAGS },
67 { "bar_v", "set bargraph volume", OFFSET(bar_v), AV_OPT_TYPE_STRING, { .str = "sono_v" }, CHAR_MIN, CHAR_MAX, FLAGS },
68 { "volume2", "set bargraph volume", OFFSET(bar_v), AV_OPT_TYPE_STRING, { .str = "sono_v" }, CHAR_MIN, CHAR_MAX, FLAGS },
69 { "sono_g", "set sonogram gamma", OFFSET(sono_g), AV_OPT_TYPE_FLOAT, { .dbl = 3.0 }, 1.0, 7.0, FLAGS },
70 { "gamma", "set sonogram gamma", OFFSET(sono_g), AV_OPT_TYPE_FLOAT, { .dbl = 3.0 }, 1.0, 7.0, FLAGS },
71 { "bar_g", "set bargraph gamma", OFFSET(bar_g), AV_OPT_TYPE_FLOAT, { .dbl = 1.0 }, 1.0, 7.0, FLAGS },
72 { "gamma2", "set bargraph gamma", OFFSET(bar_g), AV_OPT_TYPE_FLOAT, { .dbl = 1.0 }, 1.0, 7.0, FLAGS },
73 { "timeclamp", "set timeclamp", OFFSET(timeclamp), AV_OPT_TYPE_DOUBLE, { .dbl = 0.17 }, 0.1, 1.0, FLAGS },
74 { "tc", "set timeclamp", OFFSET(timeclamp), AV_OPT_TYPE_DOUBLE, { .dbl = 0.17 }, 0.1, 1.0, FLAGS },
75 { "basefreq", "set base frequency", OFFSET(basefreq), AV_OPT_TYPE_DOUBLE, { .dbl = BASEFREQ }, 10.0, 100000.0, FLAGS },
76 { "endfreq", "set end frequency", OFFSET(endfreq), AV_OPT_TYPE_DOUBLE, { .dbl = ENDFREQ }, 10.0, 100000.0, FLAGS },
77 { "coeffclamp", "set coeffclamp", OFFSET(coeffclamp), AV_OPT_TYPE_FLOAT, { .dbl = 1.0 }, 0.1, 10.0, FLAGS },
78 { "tlength", "set tlength", OFFSET(tlength), AV_OPT_TYPE_STRING, { .str = TLENGTH }, CHAR_MIN, CHAR_MAX, FLAGS },
79 { "count", "set transform count", OFFSET(count), AV_OPT_TYPE_INT, { .i64 = 6 }, 1, 30, FLAGS },
80 { "fcount", "set frequency count", OFFSET(fcount), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 10, FLAGS },
81 { "fontfile", "set axis font", OFFSET(fontfile), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, FLAGS },
82 { "fontcolor", "set font color", OFFSET(fontcolor), AV_OPT_TYPE_STRING, { .str = FONTCOLOR }, CHAR_MIN, CHAR_MAX, FLAGS },
83 { "axisfile", "set axis image", OFFSET(axisfile), AV_OPT_TYPE_STRING, { .str = NULL }, CHAR_MIN, CHAR_MAX, FLAGS },
84 { "axis", "draw axis", OFFSET(axis), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
85 { "text", "draw axis", OFFSET(axis), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
89 AVFILTER_DEFINE_CLASS(showcqt);
91 static void common_uninit(ShowCQTContext *s)
94 int level = AV_LOG_DEBUG;
98 av_log(s->ctx, level, "fft_time = %16.3f s.\n", s->fft_time * 1e-6);
100 av_log(s->ctx, level, "cqt_time = %16.3f s.\n", s->cqt_time * 1e-6);
101 if (s->process_cqt_time)
102 av_log(s->ctx, level, "process_cqt_time = %16.3f s.\n", s->process_cqt_time * 1e-6);
103 if (s->update_sono_time)
104 av_log(s->ctx, level, "update_sono_time = %16.3f s.\n", s->update_sono_time * 1e-6);
106 av_log(s->ctx, level, "alloc_time = %16.3f s.\n", s->alloc_time * 1e-6);
108 av_log(s->ctx, level, "bar_time = %16.3f s.\n", s->bar_time * 1e-6);
110 av_log(s->ctx, level, "axis_time = %16.3f s.\n", s->axis_time * 1e-6);
112 av_log(s->ctx, level, "sono_time = %16.3f s.\n", s->sono_time * 1e-6);
114 plot_time = s->fft_time + s->cqt_time + s->process_cqt_time + s->update_sono_time
115 + s->alloc_time + s->bar_time + s->axis_time + s->sono_time;
117 av_log(s->ctx, level, "plot_time = %16.3f s.\n", plot_time * 1e-6);
119 s->fft_time = s->cqt_time = s->process_cqt_time = s->update_sono_time
120 = s->alloc_time = s->bar_time = s->axis_time = s->sono_time = 0;
121 /* axis_frame may be non reference counted frame */
122 if (s->axis_frame && !s->axis_frame->buf[0]) {
123 av_freep(s->axis_frame->data);
124 for (k = 0; k < 4; k++)
125 s->axis_frame->data[k] = NULL;
128 av_frame_free(&s->axis_frame);
129 av_frame_free(&s->sono_frame);
130 av_fft_end(s->fft_ctx);
133 for (k = 0; k < s->cqt_len; k++)
134 av_freep(&s->coeffs[k].val);
135 av_freep(&s->coeffs);
136 av_freep(&s->fft_data);
137 av_freep(&s->fft_result);
138 av_freep(&s->cqt_result);
141 av_freep(&s->rcp_h_buf);
143 av_freep(&s->sono_v_buf);
144 av_freep(&s->bar_v_buf);
147 static double *create_freq_table(double base, double end, int n)
149 double log_base, log_end;
150 double rcp_n = 1.0 / n;
154 freq = av_malloc_array(n, sizeof(*freq));
158 log_base = log(base);
160 for (x = 0; x < n; x++) {
161 double log_freq = log_base + (x + 0.5) * (log_end - log_base) * rcp_n;
162 freq[x] = exp(log_freq);
167 static double clip_with_log(void *log_ctx, const char *name,
168 double val, double min, double max,
169 double nan_replace, int idx)
171 int level = AV_LOG_WARNING;
173 av_log(log_ctx, level, "[%d] %s is nan, setting it to %g.\n",
174 idx, name, nan_replace);
176 } else if (val < min) {
177 av_log(log_ctx, level, "[%d] %s is too low (%g), setting it to %g.\n",
178 idx, name, val, min);
180 } else if (val > max) {
181 av_log(log_ctx, level, "[%d] %s it too high (%g), setting it to %g.\n",
182 idx, name, val, max);
188 static double a_weighting(void *p, double f)
190 double ret = 12200.0*12200.0 * (f*f*f*f);
191 ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0) *
192 sqrt((f*f + 107.7*107.7) * (f*f + 737.9*737.9));
196 static double b_weighting(void *p, double f)
198 double ret = 12200.0*12200.0 * (f*f*f);
199 ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0) * sqrt(f*f + 158.5*158.5);
203 static double c_weighting(void *p, double f)
205 double ret = 12200.0*12200.0 * (f*f);
206 ret /= (f*f + 20.6*20.6) * (f*f + 12200.0*12200.0);
210 static int init_volume(ShowCQTContext *s)
212 const char *func_names[] = { "a_weighting", "b_weighting", "c_weighting", NULL };
213 const char *sono_names[] = { "timeclamp", "tc", "frequency", "freq", "f", "bar_v", NULL };
214 const char *bar_names[] = { "timeclamp", "tc", "frequency", "freq", "f", "sono_v", NULL };
215 double (*funcs[])(void *, double) = { a_weighting, b_weighting, c_weighting };
216 AVExpr *sono = NULL, *bar = NULL;
217 int x, ret = AVERROR(ENOMEM);
219 s->sono_v_buf = av_malloc_array(s->cqt_len, sizeof(*s->sono_v_buf));
220 s->bar_v_buf = av_malloc_array(s->cqt_len, sizeof(*s->bar_v_buf));
221 if (!s->sono_v_buf || !s->bar_v_buf)
224 if ((ret = av_expr_parse(&sono, s->sono_v, sono_names, func_names, funcs, NULL, NULL, 0, s->ctx)) < 0)
227 if ((ret = av_expr_parse(&bar, s->bar_v, bar_names, func_names, funcs, NULL, NULL, 0, s->ctx)) < 0)
230 for (x = 0; x < s->cqt_len; x++) {
231 double vars[] = { s->timeclamp, s->timeclamp, s->freq[x], s->freq[x], s->freq[x], 0.0 };
232 double vol = clip_with_log(s->ctx, "sono_v", av_expr_eval(sono, vars, NULL), 0.0, VOLUME_MAX, 0.0, x);
234 vol = clip_with_log(s->ctx, "bar_v", av_expr_eval(bar, vars, NULL), 0.0, VOLUME_MAX, 0.0, x);
235 s->bar_v_buf[x] = vol * vol;
237 vol = clip_with_log(s->ctx, "sono_v", av_expr_eval(sono, vars, NULL), 0.0, VOLUME_MAX, 0.0, x);
238 s->sono_v_buf[x] = vol * vol;
245 av_freep(&s->sono_v_buf);
246 av_freep(&s->bar_v_buf);
252 static void cqt_calc(FFTComplex *dst, const FFTComplex *src, const Coeffs *coeffs,
253 int len, int fft_len)
256 for (k = 0; k < len; k++) {
257 FFTComplex l, r, a = {0,0}, b = {0,0};
259 for (x = 0; x < coeffs[k].len; x++) {
260 FFTSample u = coeffs[k].val[x];
261 i = coeffs[k].start + x;
263 a.re += u * src[i].re;
264 a.im += u * src[i].im;
265 b.re += u * src[j].re;
266 b.im += u * src[j].im;
269 /* separate left and right, (and multiply by 2.0) */
274 dst[k].re = l.re * l.re + l.im * l.im;
275 dst[k].im = r.re * r.re + r.im * r.im;
279 static int init_cqt(ShowCQTContext *s)
281 const char *var_names[] = { "timeclamp", "tc", "frequency", "freq", "f", NULL };
283 int rate = s->ctx->inputs[0]->sample_rate;
284 int nb_cqt_coeffs = 0;
287 if ((ret = av_expr_parse(&expr, s->tlength, var_names, NULL, NULL, NULL, NULL, 0, s->ctx)) < 0)
290 ret = AVERROR(ENOMEM);
291 if (!(s->coeffs = av_calloc(s->cqt_len, sizeof(*s->coeffs))))
294 for (k = 0; k < s->cqt_len; k++) {
295 double vars[] = { s->timeclamp, s->timeclamp, s->freq[k], s->freq[k], s->freq[k] };
296 double flen, center, tlength;
297 int start, end, m = k;
299 if (s->freq[k] > 0.5 * rate)
301 tlength = clip_with_log(s->ctx, "tlength", av_expr_eval(expr, vars, NULL),
302 TLENGTH_MIN, s->timeclamp, s->timeclamp, k);
304 flen = 8.0 * s->fft_len / (tlength * rate);
305 center = s->freq[k] * s->fft_len / rate;
306 start = FFMAX(0, ceil(center - 0.5 * flen));
307 end = FFMIN(s->fft_len, floor(center + 0.5 * flen));
309 s->coeffs[m].start = start & ~(s->cqt_align - 1);
310 s->coeffs[m].len = (end | (s->cqt_align - 1)) + 1 - s->coeffs[m].start;
311 nb_cqt_coeffs += s->coeffs[m].len;
312 if (!(s->coeffs[m].val = av_calloc(s->coeffs[m].len, sizeof(*s->coeffs[m].val))))
315 for (x = start; x <= end; x++) {
316 int sign = (x & 1) ? (-1) : 1;
317 double y = 2.0 * M_PI * (x - center) * (1.0 / flen);
319 double w = 0.355768 + 0.487396 * cos(y) + 0.144232 * cos(2*y) + 0.012604 * cos(3*y);
320 w *= sign * (1.0 / s->fft_len);
321 s->coeffs[m].val[x - s->coeffs[m].start] = w;
326 av_log(s->ctx, AV_LOG_INFO, "nb_cqt_coeffs = %d.\n", nb_cqt_coeffs);
332 for (k = 0; k < s->cqt_len; k++)
333 av_freep(&s->coeffs[k].val);
334 av_freep(&s->coeffs);
338 static AVFrame *alloc_frame_empty(enum AVPixelFormat format, int w, int h)
341 out = av_frame_alloc();
344 out->format = format;
347 if (av_frame_get_buffer(out, 32) < 0) {
351 if (format == AV_PIX_FMT_RGB24 || format == AV_PIX_FMT_RGBA) {
352 memset(out->data[0], 0, out->linesize[0] * h);
354 int hh = (format == AV_PIX_FMT_YUV420P || format == AV_PIX_FMT_YUVA420P) ? h / 2 : h;
355 memset(out->data[0], 16, out->linesize[0] * h);
356 memset(out->data[1], 128, out->linesize[1] * hh);
357 memset(out->data[2], 128, out->linesize[2] * hh);
359 memset(out->data[3], 0, out->linesize[3] * h);
364 static enum AVPixelFormat convert_axis_pixel_format(enum AVPixelFormat format)
367 case AV_PIX_FMT_RGB24: format = AV_PIX_FMT_RGBA; break;
368 case AV_PIX_FMT_YUV444P: format = AV_PIX_FMT_YUVA444P; break;
369 case AV_PIX_FMT_YUV422P: format = AV_PIX_FMT_YUVA422P; break;
370 case AV_PIX_FMT_YUV420P: format = AV_PIX_FMT_YUVA420P; break;
375 static int init_axis_empty(ShowCQTContext *s)
377 if (!(s->axis_frame = alloc_frame_empty(convert_axis_pixel_format(s->format), s->width, s->axis_h)))
378 return AVERROR(ENOMEM);
382 static int init_axis_from_file(ShowCQTContext *s)
384 uint8_t *tmp_data[4] = { NULL };
386 enum AVPixelFormat tmp_format;
387 int tmp_w, tmp_h, ret;
389 if ((ret = ff_load_image(tmp_data, tmp_linesize, &tmp_w, &tmp_h, &tmp_format,
390 s->axisfile, s->ctx)) < 0)
393 ret = AVERROR(ENOMEM);
394 if (!(s->axis_frame = av_frame_alloc()))
397 if ((ret = ff_scale_image(s->axis_frame->data, s->axis_frame->linesize, s->width, s->axis_h,
398 convert_axis_pixel_format(s->format), tmp_data, tmp_linesize, tmp_w, tmp_h,
399 tmp_format, s->ctx)) < 0)
402 s->axis_frame->width = s->width;
403 s->axis_frame->height = s->axis_h;
404 s->axis_frame->format = convert_axis_pixel_format(s->format);
409 av_frame_free(&s->axis_frame);
414 static double midi(void *p, double f)
416 return log2(f/440.0) * 12.0 + 69.0;
419 static double r_func(void *p, double x)
421 x = av_clipd(x, 0.0, 1.0);
422 return lrint(x*255.0) << 16;
425 static double g_func(void *p, double x)
427 x = av_clipd(x, 0.0, 1.0);
428 return lrint(x*255.0) << 8;
431 static double b_func(void *p, double x)
433 x = av_clipd(x, 0.0, 1.0);
434 return lrint(x*255.0);
437 static int init_axis_color(ShowCQTContext *s, AVFrame *tmp)
439 const char *var_names[] = { "timeclamp", "tc", "frequency", "freq", "f", NULL };
440 const char *func_names[] = { "midi", "r", "g", "b", NULL };
441 double (*funcs[])(void *, double) = { midi, r_func, g_func, b_func };
446 if (s->basefreq != (double) BASEFREQ || s->endfreq != (double) ENDFREQ) {
447 av_log(s->ctx, AV_LOG_WARNING, "font axis rendering is not implemented in non-default frequency range,"
448 " please use axisfile option instead.\n");
449 return AVERROR(EINVAL);
452 if (s->cqt_len == 1920)
454 else if (!(freq = create_freq_table(s->basefreq, s->endfreq, 1920)))
455 return AVERROR(ENOMEM);
457 if ((ret = av_expr_parse(&expr, s->fontcolor, var_names, func_names, funcs, NULL, NULL, 0, s->ctx)) < 0) {
463 for (x = 0; x < 1920; x++) {
464 double vars[] = { s->timeclamp, s->timeclamp, freq[x], freq[x], freq[x] };
465 int color = (int) av_expr_eval(expr, vars, NULL);
466 uint8_t r = (color >> 16) & 0xFF, g = (color >> 8) & 0xFF, b = color & 0xFF;
467 uint8_t *data = tmp->data[0];
468 int linesize = tmp->linesize[0];
469 for (y = 0; y < 32; y++) {
470 data[linesize * y + 4 * x] = r;
471 data[linesize * y + 4 * x + 1] = g;
472 data[linesize * y + 4 * x + 2] = b;
473 data[linesize * y + 4 * x + 3] = 0;
483 static int render_freetype(ShowCQTContext *s, AVFrame *tmp)
485 #if CONFIG_LIBFREETYPE
486 const char *str = "EF G A BC D ";
487 uint8_t *data = tmp->data[0];
488 int linesize = tmp->linesize[0];
489 FT_Library lib = NULL;
491 int font_width = 16, font_height = 32;
492 int font_repeat = font_width * 12;
493 int linear_hori_advance = font_width * 65536;
494 int non_monospace_warning = 0;
498 return AVERROR(EINVAL);
500 if (FT_Init_FreeType(&lib))
503 if (FT_New_Face(lib, s->fontfile, 0, &face))
506 if (FT_Set_Char_Size(face, 16*64, 0, 0, 0))
509 if (FT_Load_Char(face, 'A', FT_LOAD_RENDER))
512 if (FT_Set_Char_Size(face, 16*64 * linear_hori_advance / face->glyph->linearHoriAdvance, 0, 0, 0))
515 for (x = 0; x < 12; x++) {
516 int sx, sy, rx, bx, by, dx, dy;
521 if (FT_Load_Char(face, str[x], FT_LOAD_RENDER))
524 if (face->glyph->advance.x != font_width*64 && !non_monospace_warning) {
525 av_log(s->ctx, AV_LOG_WARNING, "font is not monospace.\n");
526 non_monospace_warning = 1;
529 sy = font_height - 8 - face->glyph->bitmap_top;
530 for (rx = 0; rx < 10; rx++) {
531 sx = rx * font_repeat + x * font_width + face->glyph->bitmap_left;
532 for (by = 0; by < face->glyph->bitmap.rows; by++) {
536 if (dy >= font_height)
539 for (bx = 0; bx < face->glyph->bitmap.width; bx++) {
545 data[dy*linesize+4*dx+3] = face->glyph->bitmap.buffer[by*face->glyph->bitmap.width+bx];
552 FT_Done_FreeType(lib);
556 av_log(s->ctx, AV_LOG_WARNING, "error while loading freetype font, using default font instead.\n");
558 FT_Done_FreeType(lib);
559 return AVERROR(EINVAL);
562 av_log(s->ctx, AV_LOG_WARNING, "freetype is not available, ignoring fontfile option.\n");
563 return AVERROR(EINVAL);
567 static int render_default_font(AVFrame *tmp)
569 const char *str = "EF G A BC D ";
571 uint8_t *data = tmp->data[0];
572 int linesize = tmp->linesize[0];
574 for (x = 0; x < 1920; x += 192) {
575 uint8_t *startptr = data + 4 * x;
576 for (u = 0; u < 12; u++) {
577 for (v = 0; v < 16; v++) {
578 uint8_t *p = startptr + 2 * v * linesize + 16 * 4 * u;
579 for (mask = 0x80; mask; mask >>= 1, p += 8) {
580 if (mask & avpriv_vga16_font[str[u] * 16 + v]) {
594 static int init_axis_from_font(ShowCQTContext *s)
597 int ret = AVERROR(ENOMEM);
599 if (!(tmp = alloc_frame_empty(AV_PIX_FMT_RGBA, 1920, 32)))
602 if (!(s->axis_frame = av_frame_alloc()))
605 if ((ret = init_axis_color(s, tmp)) < 0)
608 if (render_freetype(s, tmp) < 0 && (ret = render_default_font(tmp)) < 0)
611 if ((ret = ff_scale_image(s->axis_frame->data, s->axis_frame->linesize, s->width, s->axis_h,
612 convert_axis_pixel_format(s->format), tmp->data, tmp->linesize,
613 1920, 32, AV_PIX_FMT_RGBA, s->ctx)) < 0)
617 s->axis_frame->width = s->width;
618 s->axis_frame->height = s->axis_h;
619 s->axis_frame->format = convert_axis_pixel_format(s->format);
624 av_frame_free(&s->axis_frame);
628 static float calculate_gamma(float v, float g)
637 return sqrtf(sqrtf(v));
638 return expf(logf(v) / g);
641 static void rgb_from_cqt(ColorFloat *c, const FFTComplex *v, float g, int len)
644 for (x = 0; x < len; x++) {
645 c[x].rgb.r = 255.0f * calculate_gamma(FFMIN(1.0f, v[x].re), g);
646 c[x].rgb.g = 255.0f * calculate_gamma(FFMIN(1.0f, 0.5f * (v[x].re + v[x].im)), g);
647 c[x].rgb.b = 255.0f * calculate_gamma(FFMIN(1.0f, v[x].im), g);
651 static void yuv_from_cqt(ColorFloat *c, const FFTComplex *v, float gamma, int len)
654 for (x = 0; x < len; x++) {
656 r = calculate_gamma(FFMIN(1.0f, v[x].re), gamma);
657 g = calculate_gamma(FFMIN(1.0f, 0.5f * (v[x].re + v[x].im)), gamma);
658 b = calculate_gamma(FFMIN(1.0f, v[x].im), gamma);
659 c[x].yuv.y = 65.481f * r + 128.553f * g + 24.966f * b;
660 c[x].yuv.u = -37.797f * r - 74.203f * g + 112.0f * b;
661 c[x].yuv.v = 112.0f * r - 93.786f * g - 18.214 * b;
665 static void draw_bar_rgb(AVFrame *out, const float *h, const float *rcp_h,
666 const ColorFloat *c, int bar_h)
668 int x, y, w = out->width;
669 float mul, ht, rcp_bar_h = 1.0f / bar_h;
670 uint8_t *v = out->data[0], *lp;
671 int ls = out->linesize[0];
673 for (y = 0; y < bar_h; y++) {
674 ht = (bar_h - y) * rcp_bar_h;
676 for (x = 0; x < w; x++) {
682 mul = (h[x] - ht) * rcp_h[x];
683 *lp++ = lrintf(mul * c[x].rgb.r);
684 *lp++ = lrintf(mul * c[x].rgb.g);
685 *lp++ = lrintf(mul * c[x].rgb.b);
691 #define DRAW_BAR_WITH_CHROMA(x) \
698 mul = (h[x] - ht) * rcp_h[x]; \
699 *lpy++ = lrintf(mul * c[x].yuv.y + 16.0f); \
700 *lpu++ = lrintf(mul * c[x].yuv.u + 128.0f); \
701 *lpv++ = lrintf(mul * c[x].yuv.v + 128.0f); \
705 #define DRAW_BAR_WITHOUT_CHROMA(x) \
710 mul = (h[x] - ht) * rcp_h[x]; \
711 *lpy++ = lrintf(mul * c[x].yuv.y + 16.0f); \
715 static void draw_bar_yuv(AVFrame *out, const float *h, const float *rcp_h,
716 const ColorFloat *c, int bar_h)
718 int x, y, yh, w = out->width;
719 float mul, ht, rcp_bar_h = 1.0f / bar_h;
720 uint8_t *vy = out->data[0], *vu = out->data[1], *vv = out->data[2];
721 uint8_t *lpy, *lpu, *lpv;
722 int lsy = out->linesize[0], lsu = out->linesize[1], lsv = out->linesize[2];
723 int fmt = out->format;
725 for (y = 0; y < bar_h; y += 2) {
726 yh = (fmt == AV_PIX_FMT_YUV420P) ? y / 2 : y;
727 ht = (bar_h - y) * rcp_bar_h;
731 if (fmt == AV_PIX_FMT_YUV444P) {
732 for (x = 0; x < w; x += 2) {
733 DRAW_BAR_WITH_CHROMA(x);
734 DRAW_BAR_WITH_CHROMA(x+1);
737 for (x = 0; x < w; x += 2) {
738 DRAW_BAR_WITH_CHROMA(x);
739 DRAW_BAR_WITHOUT_CHROMA(x+1);
743 ht = (bar_h - (y+1)) * rcp_bar_h;
744 lpy = vy + (y+1) * lsy;
745 lpu = vu + (y+1) * lsu;
746 lpv = vv + (y+1) * lsv;
747 if (fmt == AV_PIX_FMT_YUV444P) {
748 for (x = 0; x < w; x += 2) {
749 DRAW_BAR_WITH_CHROMA(x);
750 DRAW_BAR_WITH_CHROMA(x+1);
752 } else if (fmt == AV_PIX_FMT_YUV422P) {
753 for (x = 0; x < w; x += 2) {
754 DRAW_BAR_WITH_CHROMA(x);
755 DRAW_BAR_WITHOUT_CHROMA(x+1);
758 for (x = 0; x < w; x += 2) {
759 DRAW_BAR_WITHOUT_CHROMA(x);
760 DRAW_BAR_WITHOUT_CHROMA(x+1);
766 static void draw_axis_rgb(AVFrame *out, AVFrame *axis, const ColorFloat *c, int off)
768 int x, y, w = axis->width, h = axis->height;
769 float a, rcp_255 = 1.0f / 255.0f;
772 for (y = 0; y < h; y++) {
773 lp = out->data[0] + (off + y) * out->linesize[0];
774 lpa = axis->data[0] + y * axis->linesize[0];
775 for (x = 0; x < w; x++) {
777 *lp++ = lrintf(c[x].rgb.r);
778 *lp++ = lrintf(c[x].rgb.g);
779 *lp++ = lrintf(c[x].rgb.b);
780 } else if (lpa[3] == 255) {
785 a = rcp_255 * lpa[3];
786 *lp++ = lrintf(a * lpa[0] + (1.0f - a) * c[x].rgb.r);
787 *lp++ = lrintf(a * lpa[1] + (1.0f - a) * c[x].rgb.g);
788 *lp++ = lrintf(a * lpa[2] + (1.0f - a) * c[x].rgb.b);
795 #define BLEND_WITH_CHROMA(c) \
798 *lpy = lrintf(c.yuv.y + 16.0f); \
799 *lpu = lrintf(c.yuv.u + 128.0f); \
800 *lpv = lrintf(c.yuv.v + 128.0f); \
801 } else if (255 == *lpaa) { \
806 float a = (1.0f/255.0f) * (*lpaa); \
807 *lpy = lrintf(a * (*lpay) + (1.0f - a) * (c.yuv.y + 16.0f)); \
808 *lpu = lrintf(a * (*lpau) + (1.0f - a) * (c.yuv.u + 128.0f)); \
809 *lpv = lrintf(a * (*lpav) + (1.0f - a) * (c.yuv.v + 128.0f)); \
811 lpy++; lpu++; lpv++; \
812 lpay++; lpau++; lpav++; lpaa++; \
815 #define BLEND_WITHOUT_CHROMA(c) \
818 *lpy = lrintf(c.yuv.y + 16.0f); \
819 } else if (255 == *lpaa) { \
822 float a = (1.0f/255.0f) * (*lpaa); \
823 *lpy = lrintf(a * (*lpay) + (1.0f - a) * (c.yuv.y + 16.0f)); \
829 static void draw_axis_yuv(AVFrame *out, AVFrame *axis, const ColorFloat *c, int off)
831 int fmt = out->format, x, y, yh, w = axis->width, h = axis->height;
832 int offh = (fmt == AV_PIX_FMT_YUV420P) ? off / 2 : off;
833 uint8_t *vy = out->data[0], *vu = out->data[1], *vv = out->data[2];
834 uint8_t *vay = axis->data[0], *vau = axis->data[1], *vav = axis->data[2], *vaa = axis->data[3];
835 int lsy = out->linesize[0], lsu = out->linesize[1], lsv = out->linesize[2];
836 int lsay = axis->linesize[0], lsau = axis->linesize[1], lsav = axis->linesize[2], lsaa = axis->linesize[3];
837 uint8_t *lpy, *lpu, *lpv, *lpay, *lpau, *lpav, *lpaa;
839 for (y = 0; y < h; y += 2) {
840 yh = (fmt == AV_PIX_FMT_YUV420P) ? y / 2 : y;
841 lpy = vy + (off + y) * lsy;
842 lpu = vu + (offh + yh) * lsu;
843 lpv = vv + (offh + yh) * lsv;
844 lpay = vay + y * lsay;
845 lpau = vau + yh * lsau;
846 lpav = vav + yh * lsav;
847 lpaa = vaa + y * lsaa;
848 if (fmt == AV_PIX_FMT_YUV444P) {
849 for (x = 0; x < w; x += 2) {
850 BLEND_WITH_CHROMA(c[x]);
851 BLEND_WITH_CHROMA(c[x+1]);
854 for (x = 0; x < w; x += 2) {
855 BLEND_WITH_CHROMA(c[x]);
856 BLEND_WITHOUT_CHROMA(c[x+1]);
860 lpy = vy + (off + y + 1) * lsy;
861 lpu = vu + (off + y + 1) * lsu;
862 lpv = vv + (off + y + 1) * lsv;
863 lpay = vay + (y + 1) * lsay;
864 lpau = vau + (y + 1) * lsau;
865 lpav = vav + (y + 1) * lsav;
866 lpaa = vaa + (y + 1) * lsaa;
867 if (fmt == AV_PIX_FMT_YUV444P) {
868 for (x = 0; x < w; x += 2) {
869 BLEND_WITH_CHROMA(c[x]);
870 BLEND_WITH_CHROMA(c[x+1]);
872 } else if (fmt == AV_PIX_FMT_YUV422P) {
873 for (x = 0; x < w; x += 2) {
874 BLEND_WITH_CHROMA(c[x]);
875 BLEND_WITHOUT_CHROMA(c[x+1]);
878 for (x = 0; x < w; x += 2) {
879 BLEND_WITHOUT_CHROMA(c[x]);
880 BLEND_WITHOUT_CHROMA(c[x+1]);
886 static void draw_sono(AVFrame *out, AVFrame *sono, int off, int idx)
888 int fmt = out->format, h = sono->height;
889 int nb_planes = (fmt == AV_PIX_FMT_RGB24) ? 1 : 3;
890 int offh = (fmt == AV_PIX_FMT_YUV420P) ? off / 2 : off;
891 int inc = (fmt == AV_PIX_FMT_YUV420P) ? 2 : 1;
894 ls = FFMIN(out->linesize[0], sono->linesize[0]);
895 for (y = 0; y < h; y++) {
896 memcpy(out->data[0] + (off + y) * out->linesize[0],
897 sono->data[0] + (idx + y) % h * sono->linesize[0], ls);
900 for (i = 1; i < nb_planes; i++) {
901 ls = FFMIN(out->linesize[i], sono->linesize[i]);
902 for (y = 0; y < h; y += inc) {
903 yh = (fmt == AV_PIX_FMT_YUV420P) ? y / 2 : y;
904 memcpy(out->data[i] + (offh + yh) * out->linesize[i],
905 sono->data[i] + (idx + y) % h * sono->linesize[i], ls);
910 static void update_sono_rgb(AVFrame *sono, const ColorFloat *c, int idx)
912 int x, w = sono->width;
913 uint8_t *lp = sono->data[0] + idx * sono->linesize[0];
915 for (x = 0; x < w; x++) {
916 *lp++ = lrintf(c[x].rgb.r);
917 *lp++ = lrintf(c[x].rgb.g);
918 *lp++ = lrintf(c[x].rgb.b);
922 static void update_sono_yuv(AVFrame *sono, const ColorFloat *c, int idx)
924 int x, fmt = sono->format, w = sono->width;
925 uint8_t *lpy = sono->data[0] + idx * sono->linesize[0];
926 uint8_t *lpu = sono->data[1] + idx * sono->linesize[1];
927 uint8_t *lpv = sono->data[2] + idx * sono->linesize[2];
929 for (x = 0; x < w; x += 2) {
930 *lpy++ = lrintf(c[x].yuv.y + 16.0f);
931 *lpu++ = lrintf(c[x].yuv.u + 128.0f);
932 *lpv++ = lrintf(c[x].yuv.v + 128.0f);
933 *lpy++ = lrintf(c[x+1].yuv.y + 16.0f);
934 if (fmt == AV_PIX_FMT_YUV444P) {
935 *lpu++ = lrintf(c[x+1].yuv.u + 128.0f);
936 *lpv++ = lrintf(c[x+1].yuv.v + 128.0f);
941 static void process_cqt(ShowCQTContext *s)
944 if (!s->sono_count) {
945 for (x = 0; x < s->cqt_len; x++) {
946 s->h_buf[x] = s->bar_v_buf[x] * 0.5f * (s->cqt_result[x].re + s->cqt_result[x].im);
949 float rcp_fcount = 1.0f / s->fcount;
950 for (x = 0; x < s->width; x++) {
952 for (i = 0; i < s->fcount; i++)
953 h += s->h_buf[s->fcount * x + i];
954 s->h_buf[x] = rcp_fcount * h;
957 for (x = 0; x < s->width; x++) {
958 s->h_buf[x] = calculate_gamma(s->h_buf[x], s->bar_g);
959 s->rcp_h_buf[x] = 1.0f / (s->h_buf[x] + 0.0001f);
963 for (x = 0; x < s->cqt_len; x++) {
964 s->cqt_result[x].re *= s->sono_v_buf[x];
965 s->cqt_result[x].im *= s->sono_v_buf[x];
969 float rcp_fcount = 1.0f / s->fcount;
970 for (x = 0; x < s->width; x++) {
971 FFTComplex result = {0.0f, 0.0f};
972 for (i = 0; i < s->fcount; i++) {
973 result.re += s->cqt_result[s->fcount * x + i].re;
974 result.im += s->cqt_result[s->fcount * x + i].im;
976 s->cqt_result[x].re = rcp_fcount * result.re;
977 s->cqt_result[x].im = rcp_fcount * result.im;
981 if (s->format == AV_PIX_FMT_RGB24)
982 rgb_from_cqt(s->c_buf, s->cqt_result, s->sono_g, s->width);
984 yuv_from_cqt(s->c_buf, s->cqt_result, s->sono_g, s->width);
987 static int plot_cqt(AVFilterContext *ctx, AVFrame **frameout)
989 AVFilterLink *outlink = ctx->outputs[0];
990 ShowCQTContext *s = ctx->priv;
991 int64_t last_time, cur_time;
993 #define UPDATE_TIME(t) \
994 cur_time = av_gettime(); \
995 t += cur_time - last_time; \
998 last_time = av_gettime();
1000 memcpy(s->fft_result, s->fft_data, s->fft_len * sizeof(*s->fft_data));
1001 av_fft_permute(s->fft_ctx, s->fft_result);
1002 av_fft_calc(s->fft_ctx, s->fft_result);
1003 s->fft_result[s->fft_len] = s->fft_result[0];
1004 UPDATE_TIME(s->fft_time);
1006 s->cqt_calc(s->cqt_result, s->fft_result, s->coeffs, s->cqt_len, s->fft_len);
1007 UPDATE_TIME(s->cqt_time);
1010 UPDATE_TIME(s->process_cqt_time);
1013 s->update_sono(s->sono_frame, s->c_buf, s->sono_idx);
1014 UPDATE_TIME(s->update_sono_time);
1017 if (!s->sono_count) {
1018 AVFrame *out = *frameout = ff_get_video_buffer(outlink, outlink->w, outlink->h);
1020 return AVERROR(ENOMEM);
1021 UPDATE_TIME(s->alloc_time);
1024 s->draw_bar(out, s->h_buf, s->rcp_h_buf, s->c_buf, s->bar_h);
1025 UPDATE_TIME(s->bar_time);
1029 s->draw_axis(out, s->axis_frame, s->c_buf, s->bar_h);
1030 UPDATE_TIME(s->axis_time);
1034 s->draw_sono(out, s->sono_frame, s->bar_h + s->axis_h, s->sono_idx);
1035 UPDATE_TIME(s->sono_time);
1037 out->pts = s->next_pts;
1038 s->next_pts += PTS_STEP;
1040 s->sono_count = (s->sono_count + 1) % s->count;
1042 s->sono_idx = (s->sono_idx + s->sono_h - 1) % s->sono_h;
1046 /* main filter control */
1047 static av_cold int init(AVFilterContext *ctx)
1049 ShowCQTContext *s = ctx->priv;
1053 av_log(ctx, AV_LOG_WARNING, "fullhd option is deprecated, use size/s option instead.\n");
1054 if (s->width != 1920 || s->height != 1080) {
1055 av_log(ctx, AV_LOG_ERROR, "fullhd set to 0 but with custom dimension.\n");
1056 return AVERROR(EINVAL);
1063 if (s->axis_h < 0) {
1064 s->axis_h = s->width / 60;
1067 if (s->bar_h >= 0 && s->sono_h >= 0)
1068 s->axis_h = s->height - s->bar_h - s->sono_h;
1069 if (s->bar_h >= 0 && s->sono_h < 0)
1070 s->axis_h = FFMIN(s->axis_h, s->height - s->bar_h);
1071 if (s->bar_h < 0 && s->sono_h >= 0)
1072 s->axis_h = FFMIN(s->axis_h, s->height - s->sono_h);
1076 s->bar_h = (s->height - s->axis_h) / 2;
1080 s->bar_h = s->height - s->sono_h - s->axis_h;
1084 s->sono_h = s->height - s->axis_h - s->bar_h;
1086 if ((s->width & 1) || (s->height & 1) || (s->bar_h & 1) || (s->axis_h & 1) || (s->sono_h & 1) ||
1087 (s->bar_h < 0) || (s->axis_h < 0) || (s->sono_h < 0) || (s->bar_h > s->height) ||
1088 (s->axis_h > s->height) || (s->sono_h > s->height) || (s->bar_h + s->axis_h + s->sono_h != s->height)) {
1089 av_log(ctx, AV_LOG_ERROR, "invalid dimension.\n");
1090 return AVERROR(EINVAL);
1096 } while(s->fcount * s->width < 1920 && s->fcount < 10);
1102 static av_cold void uninit(AVFilterContext *ctx)
1104 common_uninit(ctx->priv);
1107 static int query_formats(AVFilterContext *ctx)
1109 AVFilterFormats *formats = NULL;
1110 AVFilterChannelLayouts *layouts = NULL;
1111 AVFilterLink *inlink = ctx->inputs[0];
1112 AVFilterLink *outlink = ctx->outputs[0];
1113 enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_NONE };
1114 enum AVPixelFormat pix_fmts[] = {
1115 AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P,
1116 AV_PIX_FMT_YUV444P, AV_PIX_FMT_RGB24, AV_PIX_FMT_NONE
1118 int64_t channel_layouts[] = { AV_CH_LAYOUT_STEREO, AV_CH_LAYOUT_STEREO_DOWNMIX, -1 };
1121 /* set input audio formats */
1122 formats = ff_make_format_list(sample_fmts);
1123 if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
1126 layouts = avfilter_make_format64_list(channel_layouts);
1127 if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
1130 formats = ff_all_samplerates();
1131 if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
1134 /* set output video format */
1135 formats = ff_make_format_list(pix_fmts);
1136 if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
1142 static int config_output(AVFilterLink *outlink)
1144 AVFilterContext *ctx = outlink->src;
1145 AVFilterLink *inlink = ctx->inputs[0];
1146 ShowCQTContext *s = ctx->priv;
1151 outlink->w = s->width;
1152 outlink->h = s->height;
1153 s->format = outlink->format;
1154 outlink->sample_aspect_ratio = av_make_q(1, 1);
1155 outlink->frame_rate = s->rate;
1156 outlink->time_base = av_mul_q(av_inv_q(s->rate), av_make_q(1, PTS_STEP));
1157 av_log(ctx, AV_LOG_INFO, "video: %dx%d %s %d/%d fps, bar_h = %d, axis_h = %d, sono_h = %d.\n",
1158 s->width, s->height, av_get_pix_fmt_name(s->format), s->rate.num, s->rate.den,
1159 s->bar_h, s->axis_h, s->sono_h);
1161 s->cqt_len = s->width * s->fcount;
1162 if (!(s->freq = create_freq_table(s->basefreq, s->endfreq, s->cqt_len)))
1163 return AVERROR(ENOMEM);
1165 if ((ret = init_volume(s)) < 0)
1168 s->fft_bits = ceil(log2(inlink->sample_rate * s->timeclamp));
1169 s->fft_len = 1 << s->fft_bits;
1170 av_log(ctx, AV_LOG_INFO, "fft_len = %d, cqt_len = %d.\n", s->fft_len, s->cqt_len);
1172 s->fft_ctx = av_fft_init(s->fft_bits, 0);
1173 s->fft_data = av_calloc(s->fft_len, sizeof(*s->fft_data));
1174 s->fft_result = av_calloc(s->fft_len + 64, sizeof(*s->fft_result));
1175 s->cqt_result = av_malloc_array(s->cqt_len, sizeof(*s->cqt_result));
1176 if (!s->fft_ctx || !s->fft_data || !s->fft_result || !s->cqt_result)
1177 return AVERROR(ENOMEM);
1180 s->cqt_calc = cqt_calc;
1181 s->draw_sono = draw_sono;
1182 if (s->format == AV_PIX_FMT_RGB24) {
1183 s->draw_bar = draw_bar_rgb;
1184 s->draw_axis = draw_axis_rgb;
1185 s->update_sono = update_sono_rgb;
1187 s->draw_bar = draw_bar_yuv;
1188 s->draw_axis = draw_axis_yuv;
1189 s->update_sono = update_sono_yuv;
1192 if ((ret = init_cqt(s)) < 0)
1197 if ((ret = init_axis_empty(s)) < 0)
1199 } else if (s->axisfile) {
1200 if (init_axis_from_file(s) < 0) {
1201 av_log(ctx, AV_LOG_WARNING, "loading axis image failed, fallback to font rendering.\n");
1202 if (init_axis_from_font(s) < 0) {
1203 av_log(ctx, AV_LOG_WARNING, "loading axis font failed, disable text drawing.\n");
1204 if ((ret = init_axis_empty(s)) < 0)
1209 if (init_axis_from_font(s) < 0) {
1210 av_log(ctx, AV_LOG_WARNING, "loading axis font failed, disable text drawing.\n");
1211 if ((ret = init_axis_empty(s)) < 0)
1218 s->sono_frame = alloc_frame_empty((outlink->format == AV_PIX_FMT_YUV420P) ?
1219 AV_PIX_FMT_YUV422P : outlink->format, s->width, s->sono_h);
1221 return AVERROR(ENOMEM);
1224 s->h_buf = av_malloc_array(s->cqt_len, sizeof (*s->h_buf));
1225 s->rcp_h_buf = av_malloc_array(s->width, sizeof(*s->rcp_h_buf));
1226 s->c_buf = av_malloc_array(s->width, sizeof(*s->c_buf));
1227 if (!s->h_buf || !s->rcp_h_buf || !s->c_buf)
1228 return AVERROR(ENOMEM);
1233 s->remaining_fill = s->fft_len / 2;
1234 s->remaining_frac = 0;
1235 s->step_frac = av_div_q(av_make_q(inlink->sample_rate, s->count) , s->rate);
1236 s->step = (int)(s->step_frac.num / s->step_frac.den);
1237 s->step_frac.num %= s->step_frac.den;
1238 if (s->step_frac.num) {
1239 av_log(ctx, AV_LOG_INFO, "audio: %d Hz, step = %d + %d/%d.\n",
1240 inlink->sample_rate, s->step, s->step_frac.num, s->step_frac.den);
1241 av_log(ctx, AV_LOG_WARNING, "fractional step.\n");
1243 av_log(ctx, AV_LOG_INFO, "audio: %d Hz, step = %d.\n",
1244 inlink->sample_rate, s->step);
1251 static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
1253 AVFilterContext *ctx = inlink->dst;
1254 AVFilterLink *outlink = ctx->outputs[0];
1255 ShowCQTContext *s = ctx->priv;
1256 int remaining, step, ret, x, i, j, m;
1258 AVFrame *out = NULL;
1261 while (s->remaining_fill < s->fft_len / 2) {
1262 memset(&s->fft_data[s->fft_len - s->remaining_fill], 0, sizeof(*s->fft_data) * s->remaining_fill);
1263 ret = plot_cqt(ctx, &out);
1267 step = s->step + (s->step_frac.num + s->remaining_frac) / s->step_frac.den;
1268 s->remaining_frac = (s->step_frac.num + s->remaining_frac) % s->step_frac.den;
1269 for (x = 0; x < (s->fft_len-step); x++)
1270 s->fft_data[x] = s->fft_data[x+step];
1271 s->remaining_fill += step;
1274 return ff_filter_frame(outlink, out);
1279 remaining = insamples->nb_samples;
1280 audio_data = (float*) insamples->data[0];
1283 i = insamples->nb_samples - remaining;
1284 j = s->fft_len - s->remaining_fill;
1285 if (remaining >= s->remaining_fill) {
1286 for (m = 0; m < s->remaining_fill; m++) {
1287 s->fft_data[j+m].re = audio_data[2*(i+m)];
1288 s->fft_data[j+m].im = audio_data[2*(i+m)+1];
1290 ret = plot_cqt(ctx, &out);
1292 av_frame_free(&insamples);
1295 remaining -= s->remaining_fill;
1297 int64_t pts = av_rescale_q(insamples->pts, inlink->time_base, av_make_q(1, inlink->sample_rate));
1298 pts += insamples->nb_samples - remaining - s->fft_len/2;
1299 pts = av_rescale_q(pts, av_make_q(1, inlink->sample_rate), outlink->time_base);
1300 if (FFABS(pts - out->pts) > PTS_TOLERANCE) {
1301 av_log(ctx, AV_LOG_DEBUG, "changing pts from %"PRId64" (%.3f) to %"PRId64" (%.3f).\n",
1302 out->pts, out->pts * av_q2d(outlink->time_base),
1303 pts, pts * av_q2d(outlink->time_base));
1305 s->next_pts = pts + PTS_STEP;
1307 ret = ff_filter_frame(outlink, out);
1309 av_frame_free(&insamples);
1314 step = s->step + (s->step_frac.num + s->remaining_frac) / s->step_frac.den;
1315 s->remaining_frac = (s->step_frac.num + s->remaining_frac) % s->step_frac.den;
1316 for (m = 0; m < s->fft_len-step; m++)
1317 s->fft_data[m] = s->fft_data[m+step];
1318 s->remaining_fill = step;
1320 for (m = 0; m < remaining; m++) {
1321 s->fft_data[j+m].re = audio_data[2*(i+m)];
1322 s->fft_data[j+m].im = audio_data[2*(i+m)+1];
1324 s->remaining_fill -= remaining;
1328 av_frame_free(&insamples);
1332 static int request_frame(AVFilterLink *outlink)
1334 AVFilterLink *inlink = outlink->src->inputs[0];
1337 ret = ff_request_frame(inlink);
1338 if (ret == AVERROR_EOF)
1339 ret = filter_frame(inlink, NULL);
1343 static const AVFilterPad showcqt_inputs[] = {
1346 .type = AVMEDIA_TYPE_AUDIO,
1347 .filter_frame = filter_frame,
1352 static const AVFilterPad showcqt_outputs[] = {
1355 .type = AVMEDIA_TYPE_VIDEO,
1356 .config_props = config_output,
1357 .request_frame = request_frame,
1362 AVFilter ff_avf_showcqt = {
1364 .description = NULL_IF_CONFIG_SMALL("Convert input audio to a CQT (Constant/Clamped Q Transform) spectrum video output."),
1367 .query_formats = query_formats,
1368 .priv_size = sizeof(ShowCQTContext),
1369 .inputs = showcqt_inputs,
1370 .outputs = showcqt_outputs,
1371 .priv_class = &showcqt_class,