From: Simon A. Eugster Date: Sat, 27 Nov 2010 07:03:18 +0000 (+0000) Subject: Some comments added X-Git-Url: https://git.sesse.net/?a=commitdiff_plain;h=f26c07fd5d901fead8ea74490220fe5ea11f97c4;p=kdenlive Some comments added svn path=/trunk/kdenlive/; revision=5121 --- diff --git a/src/audioscopes/audiospectrum.cpp b/src/audioscopes/audiospectrum.cpp index d4bb715b..f0b8cf4d 100644 --- a/src/audioscopes/audiospectrum.cpp +++ b/src/audioscopes/audiospectrum.cpp @@ -16,8 +16,6 @@ AudioSpectrum::AudioSpectrum(Monitor *projMonitor, Monitor *clipMonitor, QWidget m_cfg = kiss_fftr_alloc(512, 0,0,0); - m_aAutoRefresh->setChecked(true); // TODO remove - m_aLin = new QAction(i18n("Linear scale"), this); m_aLin->setCheckable(true); m_aLog = new QAction(i18n("Logarithmic scale"), this); @@ -27,7 +25,7 @@ AudioSpectrum::AudioSpectrum(Monitor *projMonitor, Monitor *clipMonitor, QWidget m_agScale->addAction(m_aLin); m_agScale->addAction(m_aLog); - m_menu->addSeparator()->setText(i18n("Scale"));; + m_menu->addSeparator()->setText(i18n("Scale")); m_menu->addAction(m_aLin); m_menu->addAction(m_aLog); @@ -76,47 +74,56 @@ bool AudioSpectrum::isScopeDependingOnInput() const { return true; } bool AudioSpectrum::isHUDDependingOnInput() const { return false; } QImage AudioSpectrum::renderBackground(uint) { return QImage(); } -QImage AudioSpectrum::renderScope(uint accelerationFactor, const QVector audioFrame, const int freq, const int num_channels, const int num_samples) +QImage AudioSpectrum::renderScope(uint, const QVector audioFrame, const int freq, const int num_channels, const int num_samples) { + QTime start = QTime::currentTime(); float data[512]; // The resulting FFT vector is only half as long kiss_fft_cpx freqData[256]; // Copy the first channel's audio into a vector for the FFT display + // (only one channel handled at the moment) for (int i = 0; i < 512; i++) { data[i] = (float) audioFrame.data()[i*num_channels]; } + // Calculate the Fast Fourier Transform for the input data kiss_fftr(m_cfg, data, freqData); + // qDebug() << num_samples << " samples at " << freq << " Hz"; // qDebug() << "FFT Freq: " << freqData[0].r << " " << freqData[1].r << ", " << freqData[2].r; // qDebug() << "FFT imag: " << freqData[0].i << " " << freqData[1].i << ", " << freqData[2].i; - qDebug() << QMetaObject::normalizedSignature("void audioSamplesSignal(const QVector&, int freq, int num_channels, int num_samples)"); float max = 0; + float maxSignal = 0; float min = 1000; float val; + // Get the minimum and the maximum value of the Fourier transformed (for scaling) for (int i = 0; i < 256; i++) { - if (m_aLin->isChecked()) { - val = pow(pow(fabs(freqData[i].r),2) + pow(fabs(freqData[i].i),2), .5); - } else { - val = log(pow(pow(fabs(freqData[i].r),2) + pow(fabs(freqData[i].i),2), .5)); + // sqrt(r² + i²) + val = pow(pow(fabs(freqData[i].r),2) + pow(fabs(freqData[i].i),2), .5); + if (maxSignal < val) { maxSignal = val; } + if (!m_aLin->isChecked()) { + // Logarithmic scale + val = log(pow(pow(fabs(freqData[i].r),2) + pow(fabs(freqData[i].i),2), .5)/512.0f); } max = (max > val) ? max : val; min = (min < val) ? min : val; } - qDebug() << "MAX: " << max << ", MIN: " << min; + qDebug() << "MAX: " << max << " (" << maxSignal << "), MIN: " << min; + // Scaling factor float factor = 100./(max-min); + // Draw the spectrum QImage spectrum(512, 100, QImage::Format_ARGB32); spectrum.fill(qRgba(0,0,0,0)); for (int i = 0; i < 256; i++) { if (m_aLin->isChecked()) { val = pow(pow(fabs(freqData[i].r),2) + pow(fabs(freqData[i].i),2), .5); } else { - val = log(pow(pow(fabs(freqData[i].r),2) + pow(fabs(freqData[i].i),2), .5)); + val = log(pow(pow(fabs(freqData[i].r),2) + pow(fabs(freqData[i].i),2), .5)/512.0f); } //val = val >> 16; val = factor * (val-min); @@ -127,7 +134,7 @@ QImage AudioSpectrum::renderScope(uint accelerationFactor, const QVector