}
}
-void RotoWidget::setupTrackingListen(ItemInfo info)
+void RotoWidget::setupTrackingListen(const ItemInfo &info)
{
if (info.startPos < GenTime()) {
// TODO: track effects
QList <BPoint> getPoints(int keyframe);
/** @brief Adds tracking_finished as listener for "tracking-finished" event in MLT rotoscoping filter. */
- void setupTrackingListen(ItemInfo info);
+ void setupTrackingListen(const ItemInfo &info);
/** @brief Passes list of keyframe positions to keyframe timeline widget. */
void keyframeTimelineFullUpdate();
{
}
-void AbstractAudioScopeWidget::slotReceiveAudio(QVector<int16_t> sampleData, int freq, int num_channels, int num_samples)
+void AbstractAudioScopeWidget::slotReceiveAudio(const QVector<int16_t>& sampleData, int freq, int num_channels, int num_samples)
{
#ifdef DEBUG_AASW
qDebug() << "Received audio for " << widgetName() << ".";
AbstractScopeWidget::slotRenderZoneUpdated();
}
-AbstractAudioScopeWidget::~AbstractAudioScopeWidget() {}
+AbstractAudioScopeWidget::~AbstractAudioScopeWidget()
+{
+}
QImage AbstractAudioScopeWidget::renderScope(uint accelerationFactor)
{
- int newData = m_newData.fetchAndStoreAcquire(0);
+ const int newData = m_newData.fetchAndStoreAcquire(0);
return renderAudioScope(accelerationFactor, m_audioFrame, m_freq, m_nChannels, m_nSamples, newData);
}
virtual ~AbstractAudioScopeWidget();
public slots:
- void slotReceiveAudio(QVector<int16_t> sampleData, int freq, int num_channels, int num_samples);
+ void slotReceiveAudio(const QVector<int16_t> &sampleData, int freq, int num_channels, int num_samples);
protected:
/** @brief This is just a wrapper function, subclasses can use renderAudioScope. */
when calculation has finished, to allow multi-threading.
accelerationFactor hints how much faster than usual the calculation should be accomplished, if possible. */
virtual QImage renderAudioScope(uint accelerationFactor,
- const QVector<int16_t> audioFrame, const int freq, const int num_channels, const int num_samples,
+ const QVector<int16_t> &audioFrame, const int freq, const int num_channels, const int num_samples,
const int newData) = 0;
int m_freq;
{
}
-QImage AudioSignal::renderAudioScope(uint, const QVector<int16_t> audioFrame,
+QImage AudioSignal::renderAudioScope(uint, const QVector<int16_t> &audioFrame,
const int, const int num_channels, const int samples, const int)
{
QTime start = QTime::currentTime();
QRect scopeRect();
QImage renderHUD(uint accelerationFactor);
QImage renderBackground(uint accelerationFactor);
- QImage renderAudioScope(uint accelerationFactor, const QVector<int16_t> audioFrame, const int, const int num_channels, const int samples, const int);
+ QImage renderAudioScope(uint accelerationFactor, const QVector<int16_t> &audioFrame, const int, const int num_channels, const int samples, const int);
QString widgetName() const { return "audioSignal"; }
bool isHUDDependingOnInput() const { return false; }
scopeConfig.sync();
}
-QString AudioSpectrum::widgetName() const { return QString("AudioSpectrum"); }
-bool AudioSpectrum::isBackgroundDependingOnInput() const { return false; }
-bool AudioSpectrum::isScopeDependingOnInput() const { return true; }
-bool AudioSpectrum::isHUDDependingOnInput() const { return false; }
+QString AudioSpectrum::widgetName() const
+{
+ return QLatin1String("AudioSpectrum");
+}
-QImage AudioSpectrum::renderBackground(uint) { return QImage(); }
+bool AudioSpectrum::isBackgroundDependingOnInput() const
+{
+ return false;
+}
+
+bool AudioSpectrum::isScopeDependingOnInput() const
+{
+ return true;
+}
+
+bool AudioSpectrum::isHUDDependingOnInput() const
+{
+ return false;
+}
+
+QImage AudioSpectrum::renderBackground(uint)
+{
+ return QImage();
+}
-QImage AudioSpectrum::renderAudioScope(uint, const QVector<int16_t> audioFrame, const int freq, const int num_channels,
+QImage AudioSpectrum::renderAudioScope(uint, const QVector<int16_t> &audioFrame, const int freq, const int num_channels,
const int num_samples, const int)
{
if (
///// Implemented methods /////
QRect scopeRect();
QImage renderHUD(uint accelerationFactor);
- QImage renderAudioScope(uint accelerationFactor, const QVector<int16_t> audioFrame, const int freq, const int num_channels, const int num_samples, const int newData);
+ QImage renderAudioScope(uint accelerationFactor, const QVector<int16_t> &audioFrame, const int freq, const int num_channels, const int num_samples, const int newData);
QImage renderBackground(uint accelerationFactor);
bool isHUDDependingOnInput() const;
bool isScopeDependingOnInput() const;
scopeConfig.sync();
}
-QString Spectrogram::widgetName() const { return QString("Spectrogram"); }
+QString Spectrogram::widgetName() const
+{
+ return QLatin1String("Spectrogram");
+}
QRect Spectrogram::scopeRect()
{
return QImage();
}
}
-QImage Spectrogram::renderAudioScope(uint, const QVector<int16_t> audioFrame, const int freq,
+QImage Spectrogram::renderAudioScope(uint, const QVector<int16_t> &audioFrame, const int freq,
const int num_channels, const int num_samples, const int newData) {
if (
audioFrame.size() > 63
///// Implemented methods /////
QRect scopeRect();
QImage renderHUD(uint accelerationFactor);
- QImage renderAudioScope(uint accelerationFactor, const QVector<int16_t> audioFrame, const int freq, const int num_channels, const int num_samples, const int newData);
+ QImage renderAudioScope(uint accelerationFactor, const QVector<int16_t> &audioFrame, const int freq, const int num_channels, const int num_samples, const int newData);
QImage renderBackground(uint accelerationFactor);
bool isHUDDependingOnInput() const;
bool isScopeDependingOnInput() const;