bug 1221830 use WEBAUDIO_BLOCK_SIZE constant in Reverb methods r=padenot

This commit is contained in:
Karl Tomlinson 2015-11-03 16:35:32 +13:00
parent 9952f12975
commit 3be150faee
7 changed files with 64 additions and 51 deletions

View File

@ -96,7 +96,6 @@ public:
}
mReverb = new WebCore::Reverb(mBuffer, mBufferLength,
WEBAUDIO_BLOCK_SIZE,
MaxFFTSize, 2, mUseBackgroundThreads,
mNormalize, mSampleRate);
}
@ -153,7 +152,7 @@ public:
}
aOutput->AllocateChannels(2);
mReverb->process(&input, aOutput, WEBAUDIO_BLOCK_SIZE);
mReverb->process(&input, aOutput);
}
virtual bool IsActive() const override

View File

@ -77,7 +77,7 @@ static float calculateNormalizationScale(ThreadSharedFloatArrayBufferList* respo
return scale;
}
Reverb::Reverb(ThreadSharedFloatArrayBufferList* impulseResponse, size_t impulseResponseBufferLength, size_t renderSliceSize, size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads, bool normalize, float sampleRate)
Reverb::Reverb(ThreadSharedFloatArrayBufferList* impulseResponse, size_t impulseResponseBufferLength, size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads, bool normalize, float sampleRate)
{
float scale = 1;
@ -101,7 +101,7 @@ Reverb::Reverb(ThreadSharedFloatArrayBufferList* impulseResponse, size_t impulse
}
}
initialize(irChannels, impulseResponseBufferLength, renderSliceSize,
initialize(irChannels, impulseResponseBufferLength,
maxFFTSize, numberOfChannels, useBackgroundThreads);
}
@ -121,7 +121,7 @@ size_t Reverb::sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
void Reverb::initialize(const nsTArray<const float*>& impulseResponseBuffer,
size_t impulseResponseBufferLength, size_t renderSliceSize,
size_t impulseResponseBufferLength,
size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads)
{
m_impulseResponseLength = impulseResponseBufferLength;
@ -135,10 +135,10 @@ void Reverb::initialize(const nsTArray<const float*>& impulseResponseBuffer,
const float* channel = impulseResponseBuffer[i];
size_t length = impulseResponseBufferLength;
nsAutoPtr<ReverbConvolver> convolver(new ReverbConvolver(channel, length, renderSliceSize, maxFFTSize, convolverRenderPhase, useBackgroundThreads));
nsAutoPtr<ReverbConvolver> convolver(new ReverbConvolver(channel, length, maxFFTSize, convolverRenderPhase, useBackgroundThreads));
m_convolvers.AppendElement(convolver.forget());
convolverRenderPhase += renderSliceSize;
convolverRenderPhase += WEBAUDIO_BLOCK_SIZE;
}
// For "True" stereo processing we allocate a temporary buffer to avoid repeatedly allocating it in the process() method.
@ -149,12 +149,12 @@ void Reverb::initialize(const nsTArray<const float*>& impulseResponseBuffer,
}
}
void Reverb::process(const AudioBlock* sourceBus, AudioBlock* destinationBus, size_t framesToProcess)
void Reverb::process(const AudioBlock* sourceBus, AudioBlock* destinationBus)
{
// Do a fairly comprehensive sanity check.
// If these conditions are satisfied, all of the source and destination pointers will be valid for the various matrixing cases.
bool isSafeToProcess = sourceBus && destinationBus && sourceBus->ChannelCount() > 0 && destinationBus->mChannelData.Length() > 0
&& framesToProcess <= MaxFrameSize && framesToProcess <= size_t(sourceBus->GetDuration()) && framesToProcess <= size_t(destinationBus->GetDuration());
&& WEBAUDIO_BLOCK_SIZE <= MaxFrameSize && WEBAUDIO_BLOCK_SIZE <= size_t(sourceBus->GetDuration()) && WEBAUDIO_BLOCK_SIZE <= size_t(destinationBus->GetDuration());
MOZ_ASSERT(isSafeToProcess);
if (!isSafeToProcess)
@ -175,28 +175,28 @@ void Reverb::process(const AudioBlock* sourceBus, AudioBlock* destinationBus, si
// 2 -> 2 -> 2
const float* sourceBusR = static_cast<const float*>(sourceBus->mChannelData[1]);
float* destinationChannelR = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[1]));
m_convolvers[0]->process(sourceBusL, sourceBus->GetDuration(), destinationChannelL, destinationBus->GetDuration(), framesToProcess);
m_convolvers[1]->process(sourceBusR, sourceBus->GetDuration(), destinationChannelR, destinationBus->GetDuration(), framesToProcess);
m_convolvers[0]->process(sourceBusL, destinationChannelL);
m_convolvers[1]->process(sourceBusR, destinationChannelR);
} else if (numInputChannels == 1 && numOutputChannels == 2 && numReverbChannels == 2) {
// 1 -> 2 -> 2
for (int i = 0; i < 2; ++i) {
float* destinationChannel = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[i]));
m_convolvers[i]->process(sourceBusL, sourceBus->GetDuration(), destinationChannel, destinationBus->GetDuration(), framesToProcess);
m_convolvers[i]->process(sourceBusL, destinationChannel);
}
} else if (numInputChannels == 1 && numReverbChannels == 1 && numOutputChannels == 2) {
// 1 -> 1 -> 2
m_convolvers[0]->process(sourceBusL, sourceBus->GetDuration(), destinationChannelL, destinationBus->GetDuration(), framesToProcess);
m_convolvers[0]->process(sourceBusL, destinationChannelL);
// simply copy L -> R
float* destinationChannelR = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[1]));
bool isCopySafe = destinationChannelL && destinationChannelR && size_t(destinationBus->GetDuration()) >= framesToProcess;
bool isCopySafe = destinationChannelL && destinationChannelR && size_t(destinationBus->GetDuration()) >= WEBAUDIO_BLOCK_SIZE;
MOZ_ASSERT(isCopySafe);
if (!isCopySafe)
return;
PodCopy(destinationChannelR, destinationChannelL, framesToProcess);
PodCopy(destinationChannelR, destinationChannelL, WEBAUDIO_BLOCK_SIZE);
} else if (numInputChannels == 1 && numReverbChannels == 1 && numOutputChannels == 1) {
// 1 -> 1 -> 1
m_convolvers[0]->process(sourceBusL, sourceBus->GetDuration(), destinationChannelL, destinationBus->GetDuration(), framesToProcess);
m_convolvers[0]->process(sourceBusL, destinationChannelL);
} else if (numInputChannels == 2 && numReverbChannels == 4 && numOutputChannels == 2) {
// 2 -> 4 -> 2 ("True" stereo)
const float* sourceBusR = static_cast<const float*>(sourceBus->mChannelData[1]);
@ -206,12 +206,12 @@ void Reverb::process(const AudioBlock* sourceBus, AudioBlock* destinationBus, si
float* tempChannelR = static_cast<float*>(const_cast<void*>(m_tempBuffer.mChannelData[1]));
// Process left virtual source
m_convolvers[0]->process(sourceBusL, sourceBus->GetDuration(), destinationChannelL, destinationBus->GetDuration(), framesToProcess);
m_convolvers[1]->process(sourceBusL, sourceBus->GetDuration(), destinationChannelR, destinationBus->GetDuration(), framesToProcess);
m_convolvers[0]->process(sourceBusL, destinationChannelL);
m_convolvers[1]->process(sourceBusL, destinationChannelR);
// Process right virtual source
m_convolvers[2]->process(sourceBusR, sourceBus->GetDuration(), tempChannelL, m_tempBuffer.GetDuration(), framesToProcess);
m_convolvers[3]->process(sourceBusR, sourceBus->GetDuration(), tempChannelR, m_tempBuffer.GetDuration(), framesToProcess);
m_convolvers[2]->process(sourceBusR, tempChannelL);
m_convolvers[3]->process(sourceBusR, tempChannelR);
AudioBufferAddWithScale(tempChannelL, 1.0f, destinationChannelL, sourceBus->GetDuration());
AudioBufferAddWithScale(tempChannelR, 1.0f, destinationChannelR, sourceBus->GetDuration());
@ -224,12 +224,12 @@ void Reverb::process(const AudioBlock* sourceBus, AudioBlock* destinationBus, si
float* tempChannelR = static_cast<float*>(const_cast<void*>(m_tempBuffer.mChannelData[1]));
// Process left virtual source
m_convolvers[0]->process(sourceBusL, sourceBus->GetDuration(), destinationChannelL, destinationBus->GetDuration(), framesToProcess);
m_convolvers[1]->process(sourceBusL, sourceBus->GetDuration(), destinationChannelR, destinationBus->GetDuration(), framesToProcess);
m_convolvers[0]->process(sourceBusL, destinationChannelL);
m_convolvers[1]->process(sourceBusL, destinationChannelR);
// Process right virtual source
m_convolvers[2]->process(sourceBusL, sourceBus->GetDuration(), tempChannelL, m_tempBuffer.GetDuration(), framesToProcess);
m_convolvers[3]->process(sourceBusL, sourceBus->GetDuration(), tempChannelR, m_tempBuffer.GetDuration(), framesToProcess);
m_convolvers[2]->process(sourceBusL, tempChannelL);
m_convolvers[3]->process(sourceBusL, tempChannelR);
AudioBufferAddWithScale(tempChannelL, 1.0f, destinationChannelL, sourceBus->GetDuration());
AudioBufferAddWithScale(tempChannelR, 1.0f, destinationChannelR, sourceBus->GetDuration());

View File

@ -48,16 +48,22 @@ public:
enum { MaxFrameSize = 256 };
// renderSliceSize is a rendering hint, so the FFTs can be optimized to not all occur at the same time (very bad when rendering on a real-time thread).
Reverb(mozilla::ThreadSharedFloatArrayBufferList* impulseResponseBuffer, size_t impulseResponseBufferLength, size_t renderSliceSize, size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads, bool normalize, float sampleRate);
Reverb(mozilla::ThreadSharedFloatArrayBufferList* impulseResponseBuffer,
size_t impulseResponseBufferLength, size_t maxFFTSize,
size_t numberOfChannels, bool useBackgroundThreads, bool normalize,
float sampleRate);
void process(const mozilla::AudioBlock* sourceBus, mozilla::AudioBlock* destinationBus, size_t framesToProcess);
void process(const mozilla::AudioBlock* sourceBus,
mozilla::AudioBlock* destinationBus);
size_t impulseResponseLength() const { return m_impulseResponseLength; }
size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
private:
void initialize(const nsTArray<const float*>& impulseResponseBuffer, size_t impulseResponseBufferLength, size_t renderSliceSize, size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads);
void initialize(const nsTArray<const float*>& impulseResponseBuffer,
size_t impulseResponseBufferLength, size_t maxFFTSize,
size_t numberOfChannels, bool useBackgroundThreads);
size_t m_impulseResponseLength;

View File

@ -54,9 +54,13 @@ const size_t RealtimeFrameLimit = 8192 + 4096; // ~278msec @ 44.1KHz
const size_t MinFFTSize = 128;
const size_t MaxRealtimeFFTSize = 2048;
ReverbConvolver::ReverbConvolver(const float* impulseResponseData, size_t impulseResponseLength, size_t renderSliceSize, size_t maxFFTSize, size_t convolverRenderPhase, bool useBackgroundThreads)
ReverbConvolver::ReverbConvolver(const float* impulseResponseData,
size_t impulseResponseLength,
size_t maxFFTSize,
size_t convolverRenderPhase,
bool useBackgroundThreads)
: m_impulseResponseLength(impulseResponseLength)
, m_accumulationBuffer(impulseResponseLength + renderSliceSize)
, m_accumulationBuffer(impulseResponseLength + WEBAUDIO_BLOCK_SIZE)
, m_inputBuffer(InputBufferSize)
, m_minFFTSize(MinFFTSize) // First stage will have this size - successive stages will double in size each time
, m_maxFFTSize(maxFFTSize) // until we hit m_maxFFTSize
@ -94,11 +98,15 @@ ReverbConvolver::ReverbConvolver(const float* impulseResponseData, size_t impuls
stageSize = totalResponseLength - stageOffset;
// This "staggers" the time when each FFT happens so they don't all happen at the same time
int renderPhase = convolverRenderPhase + i * renderSliceSize;
int renderPhase = convolverRenderPhase + i * WEBAUDIO_BLOCK_SIZE;
bool useDirectConvolver = !stageOffset;
nsAutoPtr<ReverbConvolverStage> stage(new ReverbConvolverStage(response, totalResponseLength, reverbTotalLatency, stageOffset, stageSize, fftSize, renderPhase, renderSliceSize, &m_accumulationBuffer, useDirectConvolver));
nsAutoPtr<ReverbConvolverStage> stage
(new ReverbConvolverStage(response, totalResponseLength,
reverbTotalLatency, stageOffset, stageSize,
fftSize, renderPhase,
&m_accumulationBuffer, useDirectConvolver));
bool isBackgroundStage = false;
@ -209,15 +217,9 @@ void ReverbConvolver::backgroundThreadEntry()
}
}
void ReverbConvolver::process(const float* sourceChannelData, size_t sourceChannelLength,
float* destinationChannelData, size_t destinationChannelLength,
size_t framesToProcess)
void ReverbConvolver::process(const float* sourceChannelData,
float* destinationChannelData)
{
bool isSafe = sourceChannelData && destinationChannelData && sourceChannelLength >= framesToProcess && destinationChannelLength >= framesToProcess;
MOZ_ASSERT(isSafe);
if (!isSafe)
return;
const float* source = sourceChannelData;
float* destination = destinationChannelData;
bool isDataSafe = source && destination;
@ -226,14 +228,14 @@ void ReverbConvolver::process(const float* sourceChannelData, size_t sourceChann
return;
// Feed input buffer (read by all threads)
m_inputBuffer.write(source, framesToProcess);
m_inputBuffer.write(source, WEBAUDIO_BLOCK_SIZE);
// Accumulate contributions from each stage
for (size_t i = 0; i < m_stages.Length(); ++i)
m_stages[i]->process(source, framesToProcess);
m_stages[i]->process(source, WEBAUDIO_BLOCK_SIZE);
// Finally read from accumulation buffer
m_accumulationBuffer.readAndClear(destination, framesToProcess);
m_accumulationBuffer.readAndClear(destination, WEBAUDIO_BLOCK_SIZE);
// Now that we've buffered more input, wake up our background thread.

View File

@ -50,12 +50,13 @@ public:
// For certain tweaky de-convolving applications the phase errors add up quickly and lead to non-sensical results with
// larger FFT sizes and single-precision floats. In these cases 2048 is a good size.
// If not doing multi-threaded convolution, then should not go > 8192.
ReverbConvolver(const float* impulseResponseData, size_t impulseResponseLength, size_t renderSliceSize, size_t maxFFTSize, size_t convolverRenderPhase, bool useBackgroundThreads);
ReverbConvolver(const float* impulseResponseData,
size_t impulseResponseLength, size_t maxFFTSize,
size_t convolverRenderPhase, bool useBackgroundThreads);
~ReverbConvolver();
void process(const float* sourceChannelData, size_t sourceChannelLength,
float* destinationChannelData, size_t destinationChannelLength,
size_t framesToProcess);
void process(const float* sourceChannelData,
float* destinationChannelData);
size_t impulseResponseLength() const { return m_impulseResponseLength; }

View File

@ -37,8 +37,13 @@ using namespace mozilla;
namespace WebCore {
ReverbConvolverStage::ReverbConvolverStage(const float* impulseResponse, size_t, size_t reverbTotalLatency, size_t stageOffset, size_t stageLength,
size_t fftSize, size_t renderPhase, size_t renderSliceSize, ReverbAccumulationBuffer* accumulationBuffer, bool directMode)
ReverbConvolverStage::ReverbConvolverStage(const float* impulseResponse, size_t,
size_t reverbTotalLatency,
size_t stageOffset,
size_t stageLength,
size_t fftSize, size_t renderPhase,
ReverbAccumulationBuffer* accumulationBuffer,
bool directMode)
: m_accumulationBuffer(accumulationBuffer)
, m_accumulationReadIndex(0)
, m_inputReadIndex(0)
@ -54,9 +59,9 @@ ReverbConvolverStage::ReverbConvolverStage(const float* impulseResponse, size_t,
} else {
m_directKernel.SetLength(fftSize / 2);
PodCopy(m_directKernel.Elements(), impulseResponse + stageOffset, fftSize / 2);
m_directConvolver = new DirectConvolver(renderSliceSize);
m_directConvolver = new DirectConvolver(WEBAUDIO_BLOCK_SIZE);
}
m_temporaryBuffer.SetLength(renderSliceSize);
m_temporaryBuffer.SetLength(WEBAUDIO_BLOCK_SIZE);
PodZero(m_temporaryBuffer.Elements(), m_temporaryBuffer.Length());
// The convolution stage at offset stageOffset needs to have a corresponding delay to cancel out the offset.

View File

@ -49,7 +49,7 @@ class ReverbConvolverStage {
public:
// renderPhase is useful to know so that we can manipulate the pre versus post delay so that stages will perform
// their heavy work (FFT processing) on different slices to balance the load in a real-time thread.
ReverbConvolverStage(const float* impulseResponse, size_t responseLength, size_t reverbTotalLatency, size_t stageOffset, size_t stageLength, size_t fftSize, size_t renderPhase, size_t renderSliceSize, ReverbAccumulationBuffer*, bool directMode = false);
ReverbConvolverStage(const float* impulseResponse, size_t responseLength, size_t reverbTotalLatency, size_t stageOffset, size_t stageLength, size_t fftSize, size_t renderPhase, ReverbAccumulationBuffer*, bool directMode = false);
// WARNING: framesToProcess must be such that it evenly divides the delay buffer size (stage_offset).
void process(const float* source, size_t framesToProcess);