Bug 856361. Part 3: Refactor AudioNodeStream to create ComputeFinalOuputChannelCount, AccumulateInputChunk and AdvanceOutputSegment, and make AudioNodeStreams be treated as always consumed by the MediaStreamGraph. r=ehsan

--HG--
extra : rebase_source : 3c1fe8769dc616d0ea70f00e41d6952346ead491
This commit is contained in:
Robert O'Callahan 2013-07-24 22:11:35 +12:00
parent 4eea200b92
commit 410fc1c903
5 changed files with 118 additions and 65 deletions

View File

@ -248,6 +248,24 @@ AudioNodeStream::AllInputsFinished() const
return !!inputCount;
}
uint32_t
AudioNodeStream::ComputeFinalOuputChannelCount(uint32_t aInputChannelCount)
{
switch (mChannelCountMode) {
case ChannelCountMode::Explicit:
// Disregard the channel count we've calculated from inputs, and just use
// mNumberOfInputChannels.
return mNumberOfInputChannels;
case ChannelCountMode::Clamped_max:
// Clamp the computed output channel count to mNumberOfInputChannels.
return std::min(aInputChannelCount, mNumberOfInputChannels);
default:
case ChannelCountMode::Max:
// Nothing to do here, just shut up the compiler warning.
return aInputChannelCount;
}
}
void
AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
{
@ -277,20 +295,7 @@ AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
}
switch (mChannelCountMode) {
case ChannelCountMode::Explicit:
// Disregard the output channel count that we've calculated, and just use
// mNumberOfInputChannels.
outputChannelCount = mNumberOfInputChannels;
break;
case ChannelCountMode::Clamped_max:
// Clamp the computed output channel count to mNumberOfInputChannels.
outputChannelCount = std::min(outputChannelCount, mNumberOfInputChannels);
break;
case ChannelCountMode::Max:
// Nothing to do here, just shut up the compiler warning.
break;
}
outputChannelCount = ComputeFinalOuputChannelCount(outputChannelCount);
uint32_t inputChunkCount = inputChunks.Length();
if (inputChunkCount == 0 ||
@ -311,63 +316,80 @@ AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
}
AllocateAudioBlock(outputChannelCount, &aTmpChunk);
float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
// The static storage here should be 1KB, so it's fine
nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
for (uint32_t i = 0; i < inputChunkCount; ++i) {
AudioChunk* chunk = inputChunks[i];
nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
channels.AppendElements(chunk->mChannelData);
if (channels.Length() < outputChannelCount) {
if (mChannelInterpretation == ChannelInterpretation::Speakers) {
AudioChannelsUpMix(&channels, outputChannelCount, nullptr);
NS_ASSERTION(outputChannelCount == channels.Length(),
"We called GetAudioChannelsSuperset to avoid this");
AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer);
}
}
void
AudioNodeStream::AccumulateInputChunk(uint32_t aInputIndex, const AudioChunk& aChunk,
AudioChunk* aBlock,
nsTArray<float>* aDownmixBuffer)
{
nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
UpMixDownMixChunk(&aChunk, aBlock->mChannelData.Length(), channels, *aDownmixBuffer);
for (uint32_t c = 0; c < channels.Length(); ++c) {
const float* inputData = static_cast<const float*>(channels[c]);
float* outputData = static_cast<float*>(const_cast<void*>(aBlock->mChannelData[c]));
if (inputData) {
if (aInputIndex == 0) {
AudioBlockCopyChannelWithScale(inputData, aChunk.mVolume, outputData);
} else {
// Fill up the remaining channels by zeros
for (uint32_t j = channels.Length(); j < outputChannelCount; ++j) {
channels.AppendElement(silenceChannel);
}
AudioBlockAddChannelWithScale(inputData, aChunk.mVolume, outputData);
}
} else if (channels.Length() > outputChannelCount) {
if (mChannelInterpretation == ChannelInterpretation::Speakers) {
nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
outputChannels.SetLength(outputChannelCount);
downmixBuffer.SetLength(outputChannelCount * WEBAUDIO_BLOCK_SIZE);
for (uint32_t j = 0; j < outputChannelCount; ++j) {
outputChannels[j] = &downmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
}
AudioChannelsDownMix(channels, outputChannels.Elements(),
outputChannelCount, WEBAUDIO_BLOCK_SIZE);
channels.SetLength(outputChannelCount);
for (uint32_t j = 0; j < channels.Length(); ++j) {
channels[j] = outputChannels[j];
}
} else {
// Drop the remaining channels
channels.RemoveElementsAt(outputChannelCount,
channels.Length() - outputChannelCount);
} else {
if (aInputIndex == 0) {
PodZero(outputData, WEBAUDIO_BLOCK_SIZE);
}
}
}
}
for (uint32_t c = 0; c < channels.Length(); ++c) {
const float* inputData = static_cast<const float*>(channels[c]);
float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk.mChannelData[c]));
if (inputData) {
if (i == 0) {
AudioBlockCopyChannelWithScale(inputData, chunk->mVolume, outputData);
} else {
AudioBlockAddChannelWithScale(inputData, chunk->mVolume, outputData);
}
} else {
if (i == 0) {
memset(outputData, 0, WEBAUDIO_BLOCK_SIZE*sizeof(float));
}
void
AudioNodeStream::UpMixDownMixChunk(const AudioChunk* aChunk,
uint32_t aOutputChannelCount,
nsTArray<const void*>& aOutputChannels,
nsTArray<float>& aDownmixBuffer)
{
static const float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
aOutputChannels.AppendElements(aChunk->mChannelData);
if (aOutputChannels.Length() < aOutputChannelCount) {
if (mChannelInterpretation == ChannelInterpretation::Speakers) {
AudioChannelsUpMix(&aOutputChannels, aOutputChannelCount, nullptr);
NS_ASSERTION(aOutputChannelCount == aOutputChannels.Length(),
"We called GetAudioChannelsSuperset to avoid this");
} else {
// Fill up the remaining aOutputChannels by zeros
for (uint32_t j = aOutputChannels.Length(); j < aOutputChannelCount; ++j) {
aOutputChannels.AppendElement(silenceChannel);
}
}
} else if (aOutputChannels.Length() > aOutputChannelCount) {
if (mChannelInterpretation == ChannelInterpretation::Speakers) {
nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
outputChannels.SetLength(aOutputChannelCount);
aDownmixBuffer.SetLength(aOutputChannelCount * WEBAUDIO_BLOCK_SIZE);
for (uint32_t j = 0; j < aOutputChannelCount; ++j) {
outputChannels[j] = &aDownmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
}
AudioChannelsDownMix(aOutputChannels, outputChannels.Elements(),
aOutputChannelCount, WEBAUDIO_BLOCK_SIZE);
aOutputChannels.SetLength(aOutputChannelCount);
for (uint32_t j = 0; j < aOutputChannels.Length(); ++j) {
aOutputChannels[j] = outputChannels[j];
}
} else {
// Drop the remaining aOutputChannels
aOutputChannels.RemoveElementsAt(aOutputChannelCount,
aOutputChannels.Length() - aOutputChannelCount);
}
}
}
@ -383,9 +405,7 @@ AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
FinishOutput();
}
StreamBuffer::Track* track = EnsureTrack(AUDIO_NODE_STREAM_TRACK_ID, mSampleRate);
AudioSegment* segment = track->Get<AudioSegment>();
EnsureTrack(AUDIO_NODE_STREAM_TRACK_ID, mSampleRate);
uint16_t outputCount = std::max(uint16_t(1), mEngine->OutputCount());
mLastChunks.SetLength(outputCount);
@ -424,6 +444,15 @@ AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
}
}
AdvanceOutputSegment();
}
void
AudioNodeStream::AdvanceOutputSegment()
{
StreamBuffer::Track* track = EnsureTrack(AUDIO_NODE_STREAM_TRACK_ID, mSampleRate);
AudioSegment* segment = track->Get<AudioSegment>();
if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
segment->AppendAndConsumeChunk(&mLastChunks[0]);
} else {

View File

@ -114,14 +114,26 @@ public:
return (mKind == MediaStreamGraph::SOURCE_STREAM && mFinished) ||
mKind == MediaStreamGraph::EXTERNAL_STREAM;
}
virtual bool IsIntrinsicallyConsumed() const MOZ_OVERRIDE
{
return true;
}
// Any thread
AudioNodeEngine* Engine() { return mEngine; }
TrackRate SampleRate() const { return mSampleRate; }
protected:
void AdvanceOutputSegment();
void FinishOutput();
void AccumulateInputChunk(uint32_t aInputIndex, const AudioChunk& aChunk,
AudioChunk* aBlock,
nsTArray<float>* aDownmixBuffer);
void UpMixDownMixChunk(const AudioChunk* aChunk, uint32_t aOutputChannelCount,
nsTArray<const void*>& aOutputChannels,
nsTArray<float>& aDownmixBuffer);
uint32_t ComputeFinalOuputChannelCount(uint32_t aInputChannelCount);
void ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex);
// The engine that will generate output for this node.

View File

@ -68,10 +68,12 @@ public:
// WebIDL
double CurrentTime();
void GetAudioTracks(nsTArray<nsRefPtr<AudioStreamTrack> >& aTracks);
void GetVideoTracks(nsTArray<nsRefPtr<VideoStreamTrack> >& aTracks);
MediaStream* GetStream() { return mStream; }
MediaStream* GetStream() const { return mStream; }
bool IsFinished();
/**
* Returns a principal indicating who may access this stream. The stream contents

View File

@ -520,7 +520,7 @@ MediaStreamGraphImpl::UpdateStreamOrder()
mozilla::LinkedList<MediaStream> stack;
for (uint32_t i = 0; i < mOldStreams.Length(); ++i) {
nsRefPtr<MediaStream>& s = mOldStreams[i];
if (!s->mAudioOutputs.IsEmpty() || !s->mVideoOutputs.IsEmpty()) {
if (s->IsIntrinsicallyConsumed()) {
MarkConsumed(s);
}
if (!s->mHasBeenOrdered) {

View File

@ -400,6 +400,16 @@ public:
void RemoveListenerImpl(MediaStreamListener* aListener);
void RemoveAllListenersImpl();
void SetTrackEnabledImpl(TrackID aTrackID, bool aEnabled);
/**
* Returns true when this stream requires the contents of its inputs even if
* its own outputs are not being consumed. This is used to signal inputs to
* this stream that they are being consumed; when they're not being consumed,
* we make some optimizations.
*/
virtual bool IsIntrinsicallyConsumed() const
{
return !mAudioOutputs.IsEmpty() || !mVideoOutputs.IsEmpty();
}
void AddConsumer(MediaInputPort* aPort)
{