2012-09-18 16:07:33 -07:00
|
|
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
|
|
|
/* vim:set ts=2 sw=2 sts=2 et cindent: */
|
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#include "AudioBufferSourceNode.h"
|
|
|
|
#include "mozilla/dom/AudioBufferSourceNodeBinding.h"
|
2013-02-04 15:07:25 -08:00
|
|
|
#include "nsMathUtils.h"
|
|
|
|
#include "AudioNodeEngine.h"
|
|
|
|
#include "AudioNodeStream.h"
|
2013-04-09 05:47:42 -07:00
|
|
|
#include "AudioDestinationNode.h"
|
2013-03-18 17:54:32 -07:00
|
|
|
#include "speex/speex_resampler.h"
|
2012-09-18 16:07:33 -07:00
|
|
|
|
|
|
|
namespace mozilla {
|
|
|
|
namespace dom {
|
|
|
|
|
2013-04-09 05:47:42 -07:00
|
|
|
NS_IMPL_CYCLE_COLLECTION_INHERITED_2(AudioBufferSourceNode, AudioNode,
|
|
|
|
mBuffer, mPlaybackRate)
|
2012-09-24 20:31:58 -07:00
|
|
|
|
|
|
|
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioBufferSourceNode)
|
2013-04-08 19:45:02 -07:00
|
|
|
NS_INTERFACE_MAP_END_INHERITING(AudioNode)
|
2012-09-24 20:31:58 -07:00
|
|
|
|
2013-04-08 19:45:02 -07:00
|
|
|
NS_IMPL_ADDREF_INHERITED(AudioBufferSourceNode, AudioNode)
|
|
|
|
NS_IMPL_RELEASE_INHERITED(AudioBufferSourceNode, AudioNode)
|
2012-09-18 16:07:33 -07:00
|
|
|
|
2013-02-04 15:07:25 -08:00
|
|
|
class AudioBufferSourceNodeEngine : public AudioNodeEngine
|
|
|
|
{
|
|
|
|
public:
|
2013-04-09 05:47:42 -07:00
|
|
|
explicit AudioBufferSourceNodeEngine(AudioDestinationNode* aDestination) :
|
2013-03-10 10:59:41 -07:00
|
|
|
mStart(0), mStop(TRACK_TICKS_MAX),
|
2013-03-18 17:54:32 -07:00
|
|
|
mResampler(nullptr),
|
2013-03-10 10:59:41 -07:00
|
|
|
mOffset(0), mDuration(0),
|
2013-03-18 17:54:32 -07:00
|
|
|
mLoopStart(0), mLoopEnd(0),
|
2013-04-09 05:47:42 -07:00
|
|
|
mSampleRate(0), mPosition(0), mChannels(0), mPlaybackRate(1.0f),
|
|
|
|
mDestination(static_cast<AudioNodeStream*>(aDestination->Stream())),
|
|
|
|
mPlaybackRateTimeline(1.0f), mLoop(false)
|
2013-03-10 10:59:41 -07:00
|
|
|
{}
|
2013-02-04 15:07:25 -08:00
|
|
|
|
2013-03-18 17:54:32 -07:00
|
|
|
~AudioBufferSourceNodeEngine()
|
|
|
|
{
|
|
|
|
if (mResampler) {
|
|
|
|
speex_resampler_destroy(mResampler);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-04 15:07:25 -08:00
|
|
|
// START, OFFSET and DURATION are always set by start() (along with setting
|
|
|
|
// mBuffer to something non-null).
|
|
|
|
// STOP is set by stop().
|
|
|
|
enum Parameters {
|
2013-03-18 17:54:32 -07:00
|
|
|
SAMPLE_RATE,
|
2013-02-04 15:07:25 -08:00
|
|
|
START,
|
|
|
|
STOP,
|
|
|
|
OFFSET,
|
2013-03-10 10:59:41 -07:00
|
|
|
DURATION,
|
|
|
|
LOOP,
|
|
|
|
LOOPSTART,
|
2013-04-09 05:47:42 -07:00
|
|
|
LOOPEND,
|
|
|
|
PLAYBACKRATE
|
2013-02-04 15:07:25 -08:00
|
|
|
};
|
2013-04-09 05:47:42 -07:00
|
|
|
virtual void SetTimelineParameter(uint32_t aIndex, const dom::AudioParamTimeline& aValue)
|
|
|
|
{
|
|
|
|
switch (aIndex) {
|
|
|
|
case PLAYBACKRATE:
|
|
|
|
mPlaybackRateTimeline = aValue;
|
|
|
|
// If we have a simple value that is 1.0 (i.e. intrinsic speed), and our
|
|
|
|
// input buffer is already at the ideal audio rate, and we have a
|
|
|
|
// resampler, we can release it.
|
|
|
|
if (mResampler && mPlaybackRateTimeline.HasSimpleValue() &&
|
|
|
|
mPlaybackRateTimeline.GetValue() == 1.0 &&
|
|
|
|
mSampleRate == IdealAudioRate()) {
|
|
|
|
speex_resampler_destroy(mResampler);
|
|
|
|
mResampler = nullptr;
|
|
|
|
}
|
|
|
|
WebAudioUtils::ConvertAudioParamToTicks(mPlaybackRateTimeline, nullptr, mDestination);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
NS_ERROR("Bad GainNodeEngine TimelineParameter");
|
|
|
|
}
|
|
|
|
}
|
2013-02-04 15:07:25 -08:00
|
|
|
virtual void SetStreamTimeParameter(uint32_t aIndex, TrackTicks aParam)
|
|
|
|
{
|
|
|
|
switch (aIndex) {
|
|
|
|
case START: mStart = aParam; break;
|
|
|
|
case STOP: mStop = aParam; break;
|
|
|
|
default:
|
|
|
|
NS_ERROR("Bad AudioBufferSourceNodeEngine StreamTimeParameter");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
virtual void SetInt32Parameter(uint32_t aIndex, int32_t aParam)
|
|
|
|
{
|
|
|
|
switch (aIndex) {
|
2013-03-18 17:54:32 -07:00
|
|
|
case SAMPLE_RATE: mSampleRate = aParam; break;
|
2013-02-04 15:07:25 -08:00
|
|
|
case OFFSET: mOffset = aParam; break;
|
|
|
|
case DURATION: mDuration = aParam; break;
|
2013-03-10 10:59:41 -07:00
|
|
|
case LOOP: mLoop = !!aParam; break;
|
|
|
|
case LOOPSTART: mLoopStart = aParam; break;
|
|
|
|
case LOOPEND: mLoopEnd = aParam; break;
|
2013-02-04 15:07:25 -08:00
|
|
|
default:
|
|
|
|
NS_ERROR("Bad AudioBufferSourceNodeEngine Int32Parameter");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
virtual void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer)
|
|
|
|
{
|
|
|
|
mBuffer = aBuffer;
|
|
|
|
}
|
|
|
|
|
2013-03-18 17:54:32 -07:00
|
|
|
SpeexResamplerState* Resampler(uint32_t aChannels)
|
|
|
|
{
|
|
|
|
if (aChannels != mChannels && mResampler) {
|
|
|
|
speex_resampler_destroy(mResampler);
|
|
|
|
mResampler = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mResampler) {
|
|
|
|
mChannels = aChannels;
|
|
|
|
mResampler = speex_resampler_init(mChannels, mSampleRate,
|
|
|
|
IdealAudioRate(),
|
|
|
|
SPEEX_RESAMPLER_QUALITY_DEFAULT,
|
|
|
|
nullptr);
|
|
|
|
}
|
|
|
|
return mResampler;
|
|
|
|
}
|
|
|
|
|
2013-03-10 18:02:22 -07:00
|
|
|
// Borrow a full buffer of size WEBAUDIO_BLOCK_SIZE from the source buffer
|
|
|
|
// at offset aSourceOffset. This avoids copying memory.
|
2013-03-10 15:38:57 -07:00
|
|
|
void BorrowFromInputBuffer(AudioChunk* aOutput,
|
|
|
|
uint32_t aChannels,
|
2013-03-10 18:02:22 -07:00
|
|
|
uintptr_t aSourceOffset)
|
2013-03-10 15:38:57 -07:00
|
|
|
{
|
|
|
|
aOutput->mDuration = WEBAUDIO_BLOCK_SIZE;
|
|
|
|
aOutput->mBuffer = mBuffer;
|
|
|
|
aOutput->mChannelData.SetLength(aChannels);
|
|
|
|
for (uint32_t i = 0; i < aChannels; ++i) {
|
2013-03-10 18:02:22 -07:00
|
|
|
aOutput->mChannelData[i] = mBuffer->GetData(i) + aSourceOffset;
|
2013-03-10 15:38:57 -07:00
|
|
|
}
|
|
|
|
aOutput->mVolume = 1.0f;
|
|
|
|
aOutput->mBufferFormat = AUDIO_FORMAT_FLOAT32;
|
|
|
|
}
|
|
|
|
|
2013-03-10 18:02:22 -07:00
|
|
|
// Copy aNumberOfFrames frames from the source buffer at offset aSourceOffset
|
|
|
|
// and put it at offset aBufferOffset in the destination buffer.
|
|
|
|
void CopyFromInputBuffer(AudioChunk* aOutput,
|
|
|
|
uint32_t aChannels,
|
|
|
|
uintptr_t aSourceOffset,
|
|
|
|
uintptr_t aBufferOffset,
|
|
|
|
uint32_t aNumberOfFrames) {
|
|
|
|
for (uint32_t i = 0; i < aChannels; ++i) {
|
|
|
|
float* baseChannelData = static_cast<float*>(const_cast<void*>(aOutput->mChannelData[i]));
|
|
|
|
memcpy(baseChannelData + aBufferOffset,
|
|
|
|
mBuffer->GetData(i) + aSourceOffset,
|
|
|
|
aNumberOfFrames * sizeof(float));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-09 05:47:42 -07:00
|
|
|
// Resamples input data to an output buffer, according to |mSampleRate| and
|
|
|
|
// the playbackRate.
|
2013-03-18 17:54:32 -07:00
|
|
|
// The number of frames consumed/produced depends on the amount of space
|
|
|
|
// remaining in both the input and output buffer, and the playback rate (that
|
|
|
|
// is, the ratio between the output samplerate and the input samplerate).
|
|
|
|
void CopyFromInputBufferWithResampling(AudioChunk* aOutput,
|
|
|
|
uint32_t aChannels,
|
|
|
|
uintptr_t aSourceOffset,
|
|
|
|
uintptr_t aBufferOffset,
|
|
|
|
uint32_t aAvailableInInputBuffer,
|
|
|
|
uint32_t& aFramesRead,
|
|
|
|
uint32_t& aFramesWritten) {
|
|
|
|
// Compute the sample rate we want to resample to.
|
2013-04-09 05:47:42 -07:00
|
|
|
double finalSampleRate = mSampleRate / mPlaybackRate;
|
2013-03-18 17:54:32 -07:00
|
|
|
double finalPlaybackRate = finalSampleRate / IdealAudioRate();
|
|
|
|
uint32_t availableInOuputBuffer = WEBAUDIO_BLOCK_SIZE - aBufferOffset;
|
|
|
|
uint32_t inputSamples, outputSamples;
|
|
|
|
|
|
|
|
// Check if we are short on input or output buffer.
|
|
|
|
if (aAvailableInInputBuffer < availableInOuputBuffer * finalPlaybackRate) {
|
|
|
|
outputSamples = ceil(aAvailableInInputBuffer / finalPlaybackRate);
|
|
|
|
inputSamples = aAvailableInInputBuffer;
|
|
|
|
} else {
|
|
|
|
inputSamples = ceil(availableInOuputBuffer * finalPlaybackRate);
|
|
|
|
outputSamples = availableInOuputBuffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
SpeexResamplerState* resampler = Resampler(aChannels);
|
|
|
|
|
|
|
|
for (uint32_t i = 0; i < aChannels; ++i) {
|
|
|
|
uint32_t inSamples = inputSamples;
|
|
|
|
uint32_t outSamples = outputSamples;
|
|
|
|
|
|
|
|
const float* inputData = mBuffer->GetData(i) + aSourceOffset;
|
|
|
|
float* outputData =
|
|
|
|
static_cast<float*>(const_cast<void*>(aOutput->mChannelData[i])) +
|
|
|
|
aBufferOffset;
|
|
|
|
|
|
|
|
speex_resampler_process_float(resampler, i,
|
|
|
|
inputData, &inSamples,
|
|
|
|
outputData, &outSamples);
|
|
|
|
|
|
|
|
aFramesRead = inSamples;
|
|
|
|
aFramesWritten = outSamples;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-10 18:02:22 -07:00
|
|
|
/**
|
|
|
|
* Fill aOutput with as many zero frames as we can, and advance
|
|
|
|
* aOffsetWithinBlock and aCurrentPosition based on how many frames we write.
|
|
|
|
* This will never advance aOffsetWithinBlock past WEBAUDIO_BLOCK_SIZE or
|
|
|
|
* aCurrentPosition past aMaxPos. This function knows when it needs to
|
|
|
|
* allocate the output buffer, and also optimizes the case where it can avoid
|
|
|
|
* memory allocations.
|
|
|
|
*/
|
|
|
|
void FillWithZeroes(AudioChunk* aOutput,
|
|
|
|
uint32_t aChannels,
|
|
|
|
uint32_t* aOffsetWithinBlock,
|
|
|
|
TrackTicks* aCurrentPosition,
|
|
|
|
TrackTicks aMaxPos)
|
|
|
|
{
|
|
|
|
uint32_t numFrames = std::min(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock,
|
|
|
|
uint32_t(aMaxPos - *aCurrentPosition));
|
|
|
|
if (numFrames == WEBAUDIO_BLOCK_SIZE) {
|
|
|
|
aOutput->SetNull(numFrames);
|
|
|
|
} else {
|
|
|
|
if (aOutput->IsNull()) {
|
|
|
|
AllocateAudioBlock(aChannels, aOutput);
|
|
|
|
}
|
|
|
|
WriteZeroesToAudioBlock(aOutput, *aOffsetWithinBlock, numFrames);
|
|
|
|
}
|
|
|
|
*aOffsetWithinBlock += numFrames;
|
|
|
|
*aCurrentPosition += numFrames;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Copy as many frames as possible from the source buffer to aOutput, and
|
|
|
|
* advance aOffsetWithinBlock and aCurrentPosition based on how many frames
|
|
|
|
* we copy. This will never advance aOffsetWithinBlock past
|
|
|
|
* WEBAUDIO_BLOCK_SIZE, or aCurrentPosition past mStop. It takes data from
|
|
|
|
* the buffer at aBufferOffset, and never takes more data than aBufferMax.
|
|
|
|
* This function knows when it needs to allocate the output buffer, and also
|
|
|
|
* optimizes the case where it can avoid memory allocations.
|
|
|
|
*/
|
|
|
|
void CopyFromBuffer(AudioChunk* aOutput,
|
|
|
|
uint32_t aChannels,
|
|
|
|
uint32_t* aOffsetWithinBlock,
|
|
|
|
TrackTicks* aCurrentPosition,
|
|
|
|
uint32_t aBufferOffset,
|
|
|
|
uint32_t aBufferMax)
|
|
|
|
{
|
|
|
|
uint32_t numFrames = std::min(std::min(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock,
|
|
|
|
aBufferMax - aBufferOffset),
|
|
|
|
uint32_t(mStop - *aCurrentPosition));
|
2013-03-18 17:54:32 -07:00
|
|
|
if (numFrames == WEBAUDIO_BLOCK_SIZE &&
|
2013-04-09 05:47:42 -07:00
|
|
|
mSampleRate == IdealAudioRate() &&
|
|
|
|
mPlaybackRate == 1.0f) {
|
2013-03-10 18:02:22 -07:00
|
|
|
BorrowFromInputBuffer(aOutput, aChannels, aBufferOffset);
|
2013-03-18 17:54:32 -07:00
|
|
|
*aOffsetWithinBlock += numFrames;
|
|
|
|
*aCurrentPosition += numFrames;
|
|
|
|
mPosition += numFrames;
|
2013-03-10 18:02:22 -07:00
|
|
|
} else {
|
|
|
|
if (aOutput->IsNull()) {
|
2013-03-18 17:54:32 -07:00
|
|
|
MOZ_ASSERT(*aOffsetWithinBlock == 0);
|
2013-03-10 18:02:22 -07:00
|
|
|
AllocateAudioBlock(aChannels, aOutput);
|
|
|
|
}
|
2013-04-09 05:47:42 -07:00
|
|
|
if (mSampleRate == IdealAudioRate() && mPlaybackRate == 1.0f) {
|
2013-03-18 17:54:32 -07:00
|
|
|
CopyFromInputBuffer(aOutput, aChannels, aBufferOffset, *aOffsetWithinBlock, numFrames);
|
|
|
|
*aOffsetWithinBlock += numFrames;
|
|
|
|
*aCurrentPosition += numFrames;
|
|
|
|
mPosition += numFrames;
|
|
|
|
} else {
|
|
|
|
uint32_t framesRead, framesWritten, availableInInputBuffer;
|
|
|
|
|
|
|
|
availableInInputBuffer = aBufferMax - aBufferOffset;
|
|
|
|
|
|
|
|
CopyFromInputBufferWithResampling(aOutput, aChannels, aBufferOffset, *aOffsetWithinBlock, availableInInputBuffer, framesRead, framesWritten);
|
|
|
|
*aOffsetWithinBlock += framesWritten;
|
|
|
|
*aCurrentPosition += framesRead;
|
|
|
|
mPosition += framesRead;
|
|
|
|
}
|
2013-03-10 18:02:22 -07:00
|
|
|
}
|
2013-03-18 17:54:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
TrackTicks GetPosition(AudioNodeStream* aStream)
|
|
|
|
{
|
|
|
|
if (aStream->GetCurrentPosition() < mStart) {
|
|
|
|
return aStream->GetCurrentPosition();
|
|
|
|
}
|
|
|
|
return mStart + mPosition;
|
2013-03-10 18:02:22 -07:00
|
|
|
}
|
|
|
|
|
2013-02-04 15:07:25 -08:00
|
|
|
virtual void ProduceAudioBlock(AudioNodeStream* aStream,
|
|
|
|
const AudioChunk& aInput,
|
|
|
|
AudioChunk* aOutput,
|
|
|
|
bool* aFinished)
|
|
|
|
{
|
|
|
|
if (!mBuffer)
|
|
|
|
return;
|
|
|
|
|
|
|
|
uint32_t channels = mBuffer->GetChannels();
|
|
|
|
if (!channels) {
|
|
|
|
aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-04-09 05:47:42 -07:00
|
|
|
// WebKit treats the playbackRate as a k-rate parameter in their code,
|
|
|
|
// despite the spec saying that it should be an a-rate parameter. We treat
|
|
|
|
// it as k-rate. Spec bug: https://www.w3.org/Bugs/Public/show_bug.cgi?id=21592
|
|
|
|
float newPlaybackRate;
|
|
|
|
if (mPlaybackRateTimeline.HasSimpleValue()) {
|
|
|
|
newPlaybackRate = mPlaybackRateTimeline.GetValue();
|
|
|
|
} else {
|
|
|
|
newPlaybackRate = mPlaybackRateTimeline.GetValueAtTime<TrackTicks>(aStream->GetCurrentPosition());
|
|
|
|
}
|
|
|
|
if (newPlaybackRate != mPlaybackRate) {
|
|
|
|
mPlaybackRate = newPlaybackRate;
|
|
|
|
speex_resampler_set_rate(Resampler(mChannels), mSampleRate, mSampleRate / mPlaybackRate);
|
|
|
|
}
|
|
|
|
|
2013-03-10 18:02:22 -07:00
|
|
|
uint32_t written = 0;
|
2013-03-18 17:54:32 -07:00
|
|
|
TrackTicks currentPosition = GetPosition(aStream);
|
2013-03-10 18:02:22 -07:00
|
|
|
while (written < WEBAUDIO_BLOCK_SIZE) {
|
|
|
|
if (mStop != TRACK_TICKS_MAX &&
|
|
|
|
currentPosition >= mStop) {
|
|
|
|
FillWithZeroes(aOutput, channels, &written, ¤tPosition, TRACK_TICKS_MAX);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (currentPosition < mStart) {
|
|
|
|
FillWithZeroes(aOutput, channels, &written, ¤tPosition, mStart);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
TrackTicks t = currentPosition - mStart;
|
|
|
|
if (mLoop) {
|
|
|
|
if (mOffset + t < mLoopEnd) {
|
|
|
|
CopyFromBuffer(aOutput, channels, &written, ¤tPosition, mOffset + t, mLoopEnd);
|
|
|
|
} else {
|
|
|
|
uint32_t offsetInLoop = (mOffset + t - mLoopEnd) % (mLoopEnd - mLoopStart);
|
|
|
|
CopyFromBuffer(aOutput, channels, &written, ¤tPosition, mLoopStart + offsetInLoop, mLoopEnd);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (mOffset + t < mDuration) {
|
|
|
|
CopyFromBuffer(aOutput, channels, &written, ¤tPosition, mOffset + t, mDuration);
|
|
|
|
} else {
|
|
|
|
FillWithZeroes(aOutput, channels, &written, ¤tPosition, TRACK_TICKS_MAX);
|
|
|
|
}
|
|
|
|
}
|
2013-02-04 15:07:25 -08:00
|
|
|
}
|
|
|
|
|
2013-03-10 18:02:22 -07:00
|
|
|
// We've finished if we've gone past mStop, or if we're past mDuration when
|
|
|
|
// looping is disabled.
|
|
|
|
if (currentPosition >= mStop ||
|
|
|
|
(!mLoop && currentPosition - mStart + mOffset > mDuration)) {
|
|
|
|
*aFinished = true;
|
2013-02-04 15:07:25 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TrackTicks mStart;
|
|
|
|
TrackTicks mStop;
|
|
|
|
nsRefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
|
2013-03-18 17:54:32 -07:00
|
|
|
SpeexResamplerState* mResampler;
|
2013-02-04 15:07:25 -08:00
|
|
|
int32_t mOffset;
|
|
|
|
int32_t mDuration;
|
2013-03-10 10:59:41 -07:00
|
|
|
int32_t mLoopStart;
|
|
|
|
int32_t mLoopEnd;
|
2013-03-18 17:54:32 -07:00
|
|
|
int32_t mSampleRate;
|
|
|
|
uint32_t mPosition;
|
|
|
|
uint32_t mChannels;
|
2013-04-09 05:47:42 -07:00
|
|
|
float mPlaybackRate;
|
|
|
|
AudioNodeStream* mDestination;
|
|
|
|
AudioParamTimeline mPlaybackRateTimeline;
|
2013-03-18 17:54:32 -07:00
|
|
|
bool mLoop;
|
2013-02-04 15:07:25 -08:00
|
|
|
};
|
|
|
|
|
2012-09-18 16:07:33 -07:00
|
|
|
AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* aContext)
|
2013-04-08 19:45:02 -07:00
|
|
|
: AudioNode(aContext)
|
2013-03-10 09:56:14 -07:00
|
|
|
, mLoopStart(0.0)
|
|
|
|
, mLoopEnd(0.0)
|
|
|
|
, mLoop(false)
|
2013-02-04 15:07:25 -08:00
|
|
|
, mStartCalled(false)
|
2013-04-09 05:47:42 -07:00
|
|
|
, mPlaybackRate(new AudioParam(this, SendPlaybackRateToStream, 1.0f))
|
2013-02-04 15:07:25 -08:00
|
|
|
{
|
|
|
|
SetProduceOwnOutput(true);
|
2013-03-18 17:54:32 -07:00
|
|
|
mStream = aContext->Graph()->CreateAudioNodeStream(
|
2013-04-09 05:47:42 -07:00
|
|
|
new AudioBufferSourceNodeEngine(aContext->Destination()),
|
2013-03-18 17:54:32 -07:00
|
|
|
MediaStreamGraph::INTERNAL_STREAM);
|
2013-02-04 15:07:25 -08:00
|
|
|
mStream->AddMainThreadListener(this);
|
|
|
|
}
|
|
|
|
|
|
|
|
AudioBufferSourceNode::~AudioBufferSourceNode()
|
2012-09-18 16:07:33 -07:00
|
|
|
{
|
2013-02-04 15:07:25 -08:00
|
|
|
DestroyMediaStream();
|
2012-09-18 16:07:33 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
JSObject*
|
2013-01-23 16:50:18 -08:00
|
|
|
AudioBufferSourceNode::WrapObject(JSContext* aCx, JSObject* aScope)
|
2012-09-18 16:07:33 -07:00
|
|
|
{
|
2013-01-23 16:50:18 -08:00
|
|
|
return AudioBufferSourceNodeBinding::Wrap(aCx, aScope, this);
|
2012-09-18 16:07:33 -07:00
|
|
|
}
|
|
|
|
|
2012-09-24 20:31:58 -07:00
|
|
|
void
|
2013-02-04 15:07:25 -08:00
|
|
|
AudioBufferSourceNode::Start(JSContext* aCx, double aWhen, double aOffset,
|
|
|
|
const Optional<double>& aDuration, ErrorResult& aRv)
|
2012-09-24 20:31:58 -07:00
|
|
|
{
|
2013-02-04 15:07:25 -08:00
|
|
|
if (mStartCalled) {
|
|
|
|
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
mStartCalled = true;
|
|
|
|
|
|
|
|
AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
|
|
|
|
if (!mBuffer || !ns) {
|
|
|
|
// Nothing to play, or we're already dead for some reason
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-03-18 17:54:32 -07:00
|
|
|
uint32_t rate;
|
2013-02-04 15:07:25 -08:00
|
|
|
uint32_t lengthSamples;
|
|
|
|
nsRefPtr<ThreadSharedFloatArrayBufferList> data =
|
2013-03-18 17:54:32 -07:00
|
|
|
mBuffer->GetThreadSharedChannelsForRate(aCx, &rate, &lengthSamples);
|
|
|
|
double length = double(lengthSamples) / rate;
|
2013-02-04 15:07:25 -08:00
|
|
|
double offset = std::max(0.0, aOffset);
|
|
|
|
double endOffset = aDuration.WasPassed() ?
|
|
|
|
std::min(aOffset + aDuration.Value(), length) : length;
|
2013-03-18 17:54:32 -07:00
|
|
|
|
2013-02-04 15:07:25 -08:00
|
|
|
if (offset >= endOffset) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-03-10 11:11:12 -07:00
|
|
|
// Don't compute and set the loop parameters unnecessarily
|
|
|
|
if (mLoop) {
|
|
|
|
double actualLoopStart, actualLoopEnd;
|
|
|
|
if (((mLoopStart != 0.0) || (mLoopEnd != 0.0)) &&
|
|
|
|
mLoopStart >= 0.0 && mLoopEnd > 0.0 &&
|
|
|
|
mLoopStart < mLoopEnd) {
|
|
|
|
actualLoopStart = (mLoopStart > length) ? 0.0 : mLoopStart;
|
|
|
|
actualLoopEnd = std::min(mLoopEnd, length);
|
|
|
|
} else {
|
|
|
|
actualLoopStart = 0.0;
|
|
|
|
actualLoopEnd = length;
|
|
|
|
}
|
|
|
|
int32_t loopStartTicks = NS_lround(actualLoopStart * rate);
|
|
|
|
int32_t loopEndTicks = NS_lround(actualLoopEnd * rate);
|
|
|
|
ns->SetInt32Parameter(AudioBufferSourceNodeEngine::LOOP, 1);
|
|
|
|
ns->SetInt32Parameter(AudioBufferSourceNodeEngine::LOOPSTART, loopStartTicks);
|
|
|
|
ns->SetInt32Parameter(AudioBufferSourceNodeEngine::LOOPEND, loopEndTicks);
|
|
|
|
}
|
|
|
|
|
2013-02-04 15:07:25 -08:00
|
|
|
ns->SetBuffer(data.forget());
|
|
|
|
// Don't set parameter unnecessarily
|
|
|
|
if (aWhen > 0.0) {
|
|
|
|
ns->SetStreamTimeParameter(AudioBufferSourceNodeEngine::START,
|
|
|
|
Context()->DestinationStream(),
|
|
|
|
aWhen);
|
|
|
|
}
|
|
|
|
int32_t offsetTicks = NS_lround(offset*rate);
|
|
|
|
// Don't set parameter unnecessarily
|
|
|
|
if (offsetTicks > 0) {
|
|
|
|
ns->SetInt32Parameter(AudioBufferSourceNodeEngine::OFFSET, offsetTicks);
|
|
|
|
}
|
|
|
|
ns->SetInt32Parameter(AudioBufferSourceNodeEngine::DURATION,
|
|
|
|
NS_lround(endOffset*rate) - offsetTicks);
|
2013-03-18 17:54:32 -07:00
|
|
|
ns->SetInt32Parameter(AudioBufferSourceNodeEngine::SAMPLE_RATE, rate);
|
2012-09-24 20:31:58 -07:00
|
|
|
}
|
|
|
|
|
2013-02-04 15:07:25 -08:00
|
|
|
void
|
|
|
|
AudioBufferSourceNode::Stop(double aWhen, ErrorResult& aRv)
|
|
|
|
{
|
|
|
|
if (!mStartCalled) {
|
|
|
|
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
AudioNodeStream* ns = static_cast<AudioNodeStream*>(mStream.get());
|
|
|
|
if (!ns) {
|
|
|
|
// We've already stopped and had our stream shut down
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ns->SetStreamTimeParameter(AudioBufferSourceNodeEngine::STOP,
|
|
|
|
Context()->DestinationStream(),
|
|
|
|
std::max(0.0, aWhen));
|
2012-09-18 16:07:33 -07:00
|
|
|
}
|
2013-02-04 15:07:25 -08:00
|
|
|
|
|
|
|
void
|
|
|
|
AudioBufferSourceNode::NotifyMainThreadStateChanged()
|
|
|
|
{
|
|
|
|
if (mStream->IsFinished()) {
|
|
|
|
SetProduceOwnOutput(false);
|
|
|
|
}
|
2012-09-18 16:07:33 -07:00
|
|
|
}
|
|
|
|
|
2013-04-09 05:47:42 -07:00
|
|
|
void
|
|
|
|
AudioBufferSourceNode::SendPlaybackRateToStream(AudioNode* aNode)
|
|
|
|
{
|
|
|
|
AudioBufferSourceNode* This = static_cast<AudioBufferSourceNode*>(aNode);
|
|
|
|
SendTimelineParameterToStream(This, AudioBufferSourceNodeEngine::PLAYBACKRATE, *This->mPlaybackRate);
|
|
|
|
}
|
|
|
|
|
2013-02-04 15:07:25 -08:00
|
|
|
}
|
|
|
|
}
|