signals:
/** mseconds represent the time taken for the calculation,
- accelerationFactor is the acceleration factor that has been used. */
+ accelerationFactor is the acceleration factor that has been used for this calculation. */
void signalHUDRenderingFinished(uint mseconds, uint accelerationFactor);
void signalScopeRenderingFinished(uint mseconds, uint accelerationFactor);
void signalBackgroundRenderingFinished(uint mseconds, uint accelerationFactor);
b &= connect(this, SIGNAL(customContextMenuRequested(QPoint)), this, SLOT(customContextMenuRequested(QPoint)));
//b &= connect(m_activeRender, SIGNAL(rendererPosition(int)), this, SLOT(slotRenderZoneUpdated()));
- b &= connect(m_activeRender, SIGNAL(frameUpdated(QImage)), this, SLOT(slotRenderZoneUpdated(QImage)));
+// b &= connect(m_activeRender, SIGNAL(frameUpdated(QImage)), this, SLOT(slotRenderZoneUpdated(QImage)));
b &= connect(this, SIGNAL(signalHUDRenderingFinished(uint, uint)), this, SLOT(slotHUDRenderingFinished(uint, uint)));
b &= connect(this, SIGNAL(signalScopeRenderingFinished(uint, uint)), this, SLOT(slotScopeRenderingFinished(uint, uint)));
}
}
-void AbstractAudioScopeWidget::slotActiveMonitorChanged(bool isClipMonitor)
-{
-// qDebug() << "Active monitor has changed in " << m_widgetName << ". Is the clip monitor active now? " << isClipMonitor;
+//void AbstractAudioScopeWidget::slotActiveMonitorChanged(bool isClipMonitor)
+//{
+//// qDebug() << "Active monitor has changed in " << m_widgetName << ". Is the clip monitor active now? " << isClipMonitor;
- bool b = m_activeRender->disconnect(this);
- Q_ASSERT(b);
+// bool b = m_activeRender->disconnect(this);
+// Q_ASSERT(b);
- m_activeRender = (isClipMonitor) ? m_clipMonitor->render : m_projMonitor->render;
+// m_activeRender = (isClipMonitor) ? m_clipMonitor->render : m_projMonitor->render;
- //b &= connect(m_activeRender, SIGNAL(rendererPosition(int)), this, SLOT(slotRenderZoneUpdated()));
- b &= connect(m_activeRender, SIGNAL(frameUpdated(QImage)), this, SLOT(slotRenderZoneUpdated(QImage)));
- Q_ASSERT(b);
+// //b &= connect(m_activeRender, SIGNAL(rendererPosition(int)), this, SLOT(slotRenderZoneUpdated()));
+// b &= connect(m_activeRender, SIGNAL(frameUpdated(QImage)), this, SLOT(slotRenderZoneUpdated(QImage)));
+// Q_ASSERT(b);
- // Update the scope for the new monitor.
- prodHUDThread();
- prodScopeThread();
- prodBackgroundThread();
-}
+// // Update the scope for the new monitor.
+// prodHUDThread();
+// prodScopeThread();
+// prodBackgroundThread();
+//}
void AbstractAudioScopeWidget::slotRenderZoneUpdated()
{
m_newScopeFrames.fetchAndAddRelaxed(1);
m_newBackgroundFrames.fetchAndAddRelaxed(1);
-// qDebug() << "Monitor incoming. New frames total HUD/Scope/Background: " << m_newHUDFrames
+// qDebug() << "Audio incoming. New frames total HUD/Scope/Background: " << m_newHUDFrames
// << "/" << m_newScopeFrames << "/" << m_newBackgroundFrames;
if (this->visibleRegion().isEmpty()) {
}
}
-void AbstractAudioScopeWidget::slotRenderZoneUpdated(QImage frame)
-{
- m_scopeImage = frame;
- slotRenderZoneUpdated();
-}
-
void AbstractAudioScopeWidget::slotReceiveAudio(const QVector<int16_t>& sampleData, int freq, int num_channels, int num_samples)
{
- qDebug() << "Received audio. Size is " << (int) sampleData.size() << ".";
+ //qDebug() << "Received audio. Size is " << (int) sampleData.size() << ".";
if (sampleData.size() > 0) {
- qDebug() << sampleData.data()[0];
+ //qDebug() << "Received: " << sampleData.data()[0] << ", " << sampleData.data()[1] << ", " << sampleData.data()[2];
}
+ m_audioFrame = sampleData;
+ m_freq = freq;
+ m_nChannels = num_channels;
+ m_nSamples = num_samples;
+ slotRenderZoneUpdated();
//TODO
}
-void AbstractAudioScopeWidget::slotReceiveAudioTemp(const QByteArray arr)
-{
- qDebug() << "Audio signal received";
-}
-
void AbstractAudioScopeWidget::slotResetRealtimeFactor(bool realtimeChecked)
{
if (!realtimeChecked) {
bool initialDimensionUpdateDone;
bool m_requestForcedUpdate;
- QImage m_scopeImage;
+// QImage m_scopeImage;
QVector<int16_t> m_audioFrame; //NEW
int m_freq;
int m_nChannels;
/** @brief Must be called when the active monitor has shown a new frame.
This slot must be connected in the implementing class, it is *not*
done in this abstract class. */
- void slotActiveMonitorChanged(bool isClipMonitor);
+// void slotActiveMonitorChanged(bool isClipMonitor);
private slots:
void customContextMenuRequested(const QPoint &pos);
The scope then decides whether and when it wants to recalculate the scope, depending
on whether it is currently visible and whether a calculation thread is already running. */
void slotRenderZoneUpdated();
- void slotRenderZoneUpdated(QImage);//OLD
+// void slotRenderZoneUpdated(QImage);//OLD
void slotReceiveAudio(const QVector<int16_t>& sampleData, int freq, int num_channels, int num_samples); // NEW, TODO comment
- void slotReceiveAudioTemp(const QByteArray arr);
/** The following slots are called when rendering of a component has finished. They e.g. update
the widget and decide whether to immediately restart the calculation thread. */
void slotHUDRenderingFinished(uint mseconds, uint accelerationFactor);
#include "audiospectrum.h"
#include "tools/kiss_fftr.h"
+#include <QMenu>
+
+//#include <iostream>
+//#include <fstream>
+
+bool fileWritten = false;
+
AudioSpectrum::AudioSpectrum(Monitor *projMonitor, Monitor *clipMonitor, QWidget *parent) :
AbstractAudioScopeWidget(projMonitor, clipMonitor, true, parent)
{
- init();
+ ui = new Ui::AudioSpectrum_UI;
+ ui->setupUi(this);
+
m_cfg = kiss_fftr_alloc(512, 0,0,0);
+
+ m_aAutoRefresh->setChecked(true); // TODO remove
+
+ m_aLin = new QAction(i18n("Linear scale"), this);
+ m_aLin->setCheckable(true);
+ m_aLog = new QAction(i18n("Logarithmic scale"), this);
+ m_aLog->setCheckable(true);
+
+ m_agScale = new QActionGroup(this);
+ m_agScale->addAction(m_aLin);
+ m_agScale->addAction(m_aLog);
+
+ m_menu->addSeparator()->setText(i18n("Scale"));;
+ m_menu->addAction(m_aLin);
+ m_menu->addAction(m_aLog);
+
+ init();
}
AudioSpectrum::~AudioSpectrum()
{
free(m_cfg);
+ delete m_agScale;
+ delete m_aLin;
+ delete m_aLog;
+}
+
+void AudioSpectrum::readConfig()
+{
+ AbstractAudioScopeWidget::readConfig();
+
+ KSharedConfigPtr config = KGlobal::config();
+ KConfigGroup scopeConfig(config, configName());
+ QString scale = scopeConfig.readEntry("scale");
+ if (scale == "lin") {
+ m_aLin->setChecked(true);
+ } else {
+ m_aLog->setChecked(true);
+ }
+
+}
+void AudioSpectrum::writeConfig()
+{
+ KSharedConfigPtr config = KGlobal::config();
+ KConfigGroup scopeConfig(config, configName());
+ QString scale;
+ if (m_aLin->isChecked()) {
+ scale = "lin";
+ } else {
+ scale = "log";
+ }
+ scopeConfig.writeEntry("scale", scale);
+ scopeConfig.sync();
}
QString AudioSpectrum::widgetName() const { return QString("audiospectrum"); }
QImage AudioSpectrum::renderScope(uint accelerationFactor, const QVector<int16_t> audioFrame, const int freq, const int num_channels, const int num_samples)
{
float data[512];
- kiss_fft_cpx freqData[512];
+
+ // The resulting FFT vector is only half as long
+ kiss_fft_cpx freqData[256];
+
+ // Copy the first channel's audio into a vector for the FFT display
for (int i = 0; i < 512; i++) {
- data[i] = (float) audioFrame.data()[i];
+ data[i] = (float) audioFrame.data()[i*num_channels];
}
kiss_fftr(m_cfg, data, freqData);
- qDebug() << freqData[0].r << " " << freqData[1].r << " " << freqData[2].r;
- return QImage();
+// qDebug() << num_samples << " samples at " << freq << " Hz";
+// qDebug() << "FFT Freq: " << freqData[0].r << " " << freqData[1].r << ", " << freqData[2].r;
+// qDebug() << "FFT imag: " << freqData[0].i << " " << freqData[1].i << ", " << freqData[2].i;
+
+ qDebug() << QMetaObject::normalizedSignature("void audioSamplesSignal(const QVector<int16_t>&, int freq, int num_channels, int num_samples)");
+
+ float max = 0;
+ float min = 1000;
+ float val;
+ for (int i = 0; i < 256; i++) {
+ if (m_aLin->isChecked()) {
+ val = pow(pow(fabs(freqData[i].r),2) + pow(fabs(freqData[i].i),2), .5);
+ } else {
+ val = log(pow(pow(fabs(freqData[i].r),2) + pow(fabs(freqData[i].i),2), .5));
+ }
+ max = (max > val) ? max : val;
+ min = (min < val) ? min : val;
+ }
+ qDebug() << "MAX: " << max << ", MIN: " << min;
+
+ float factor = 100./(max-min);
+
+ QImage spectrum(512, 100, QImage::Format_ARGB32);
+ spectrum.fill(qRgba(0,0,0,0));
+ for (int i = 0; i < 256; i++) {
+ if (m_aLin->isChecked()) {
+ val = pow(pow(fabs(freqData[i].r),2) + pow(fabs(freqData[i].i),2), .5);
+ } else {
+ val = log(pow(pow(fabs(freqData[i].r),2) + pow(fabs(freqData[i].i),2), .5));
+ }
+ //val = val >> 16;
+ val = factor * (val-min);
+// qDebug() << val;
+ for (int y = 0; y < val && y < 100; y++) {
+ spectrum.setPixel(2*i, 99-y, qRgba(225, 182, 255, 255));
+ spectrum.setPixel(2*i+1, 99-y, qRgba(225, 182, 255, 255));
+ }
+ }
+
+ emit signalScopeRenderingFinished(0, 1);
+
+ /*
+ if (!fileWritten || true) {
+ std::ofstream mFile;
+ mFile.open("/tmp/freq.m");
+ if (!mFile) {
+ qDebug() << "Opening file failed.";
+ } else {
+ mFile << "val = [ ";
+
+ for (int sample = 0; sample < 256; sample++) {
+ mFile << data[sample] << " ";
+ }
+ mFile << " ];\n";
+
+ mFile << "freq = [ ";
+ for (int sample = 0; sample < 256; sample++) {
+ mFile << freqData[sample].r << "+" << freqData[sample].i << "*i ";
+ }
+ mFile << " ];\n";
+
+ mFile.close();
+ fileWritten = true;
+ qDebug() << "File written.";
+ }
+ } else {
+ qDebug() << "File already written.";
+ }
+ */
+
+ return spectrum;
}
QImage AudioSpectrum::renderHUD(uint) { return QImage(); }
QRect AudioSpectrum::scopeRect() {
return QRect(0,0,40,40);
}
-
-void AudioSpectrum::readConfig()
-{
-
-}
bool isScopeDependingOnInput() const;
bool isBackgroundDependingOnInput() const;
virtual void readConfig();
+ void writeConfig();
private:
Ui::AudioSpectrum_UI *ui;
kiss_fftr_cfg m_cfg;
+
+ QAction *m_aLin;
+ QAction *m_aLog;
+ QActionGroup *m_agScale;
+
};
#endif // AUDIOSPECTRUM_H
void AudioSignal::slotReceiveAudio(const QVector<int16_t>& data, int freq ,int num_channels ,int samples){
- int num_samples = samples > 200 ? 200 : samples;
-
+ int num_samples = samples > 200 ? 200 : samples;
QByteArray channels;
for (int i = 0; i < num_channels; i++) {
}
channels.append(val / num_samples);
}
- showAudio(channels);
+ showAudio(channels);
}
void AudioSignal::showAudio(const QByteArray arr)
{
m_audioSpectrumDock->setObjectName(m_audioSpectrum->widgetName());
m_audioSpectrumDock->setWidget(m_audioSpectrum);
addDockWidget(Qt::TopDockWidgetArea, m_audioSpectrumDock);
+
+ // Connect the audio signal to the audio scope slots
bool b = true;
if (m_projectMonitor) {
qDebug() << "project monitor connected";
+ b &= connect(m_projectMonitor->render, SIGNAL(audioSamplesSignal(QVector<int16_t>,int,int,int)),
+ m_audioSpectrum, SLOT(slotReceiveAudio(QVector<int16_t>,int,int,int)));
b &= connect(m_projectMonitor->render, SIGNAL(audioSamplesSignal(const QVector<int16_t>&,const int&,const int&, const int&)),
- m_audioSpectrum, SLOT(slotReceiveAudio(const QVector<int16_t>&,const int&,const int&,const int&)));
- b &= connect(m_projectMonitor->render, SIGNAL(showAudioSignal(const QByteArray&)),
- m_audioSpectrum, SLOT(slotReceiveAudioTemp(const QByteArray&)));
- connect(m_projectMonitor->render, SIGNAL(audioSamplesSignal(const QVector<int16_t>&,const int&,const int&, const int&)),
m_audiosignal, SLOT(slotReceiveAudio(const QVector<int16_t>&,const int&,const int&,const int&)));
}
if (m_clipMonitor) {
qDebug() << "clip monitor connected";
- b &= connect(m_clipMonitor->render, SIGNAL(audioSamplesSignal(const QVector<int16_t>&,int,int,int)),
- m_audioSpectrum, SLOT(slotReceiveAudio(const QVector<int16_t>&,int,int,int)));
+ b &= connect(m_clipMonitor->render, SIGNAL(audioSamplesSignal(QVector<int16_t>,int,int,int)),
+ m_audioSpectrum, SLOT(slotReceiveAudio(QVector<int16_t>,int,int,int)));
b &= connect(m_clipMonitor->render, SIGNAL(audioSamplesSignal(const QVector<int16_t>&,int,int,int)),
m_audiosignal, SLOT(slotReceiveAudio(const QVector<int16_t>&,int,int,int)));
}
+ // Ensure connection was set up correctly
Q_ASSERT(b);
m_undoViewDock = new QDockWidget(i18n("Undo History"), this);
int samples = 0;
int16_t* data = (int16_t*)frame.get_audio(audio_format, freq, num_channels, samples);
- QVector<int16_t> sampleVector(samples);
- memcpy(sampleVector.data(), data, samples*sizeof(int16_t));
- //qDebug() << samples << " samples. Freq=" << freq << ", channels=" << num_channels;
-
- if (!data)
+ if (!data) {
return;
- /*int num_samples = samples > 200 ? 200 : samples;
-
+ }
- QByteArray channels;
- for (int i = 0; i < num_channels; i++) {
- long val = 0;
- for (int s = 0; s < num_samples; s ++) {
- val += abs(data[i+s*num_channels] / 128);
- }
- channels.append(val / num_samples);
- }*/
+ // Data format: [ c00 c10 c01 c11 c02 c12 c03 c13 ... c0{samples-1} c1{samples-1} for 2 channels.
+ // So the vector is of size samples*channels.
+ QVector<int16_t> sampleVector(samples*num_channels);
+ memcpy(sampleVector.data(), data, samples*num_channels*sizeof(int16_t));
if (samples > 0) {
- //emit showAudioSignal(channels);
- //qDebug() << "Emitting audioSamplesSignal with " << samples << " samples.";
emit audioSamplesSignal(sampleVector, freq, num_channels, samples);
- } else {
- //emit showAudioSignal(QByteArray());
- //qDebug() << "Not emitting audioSamplesSignal.";
}
}
void showAudioSignal(const QByteArray);
/** @brief The renderer refreshed the current frame, but no seeking was done. */
void frameUpdated(QImage);
+ /** @brief This signal contains the audio of the current frame. */
void audioSamplesSignal(const QVector<int16_t>&, int freq, int num_channels, int num_samples);
public slots: