Bug 804387. Part 8: Create AudioNodeEngine and AudioNodeStream. r=jesup

Modifies MediaStreamGraph to always advance its time by a multiple of
WEBAUDIO_BLOCK_SIZE.

--HG--
extra : rebase_source : 99524b09edd4ac0e1bc6607f2ba14925bc2f11c2
This commit is contained in:
Robert O'Callahan 2013-01-14 11:46:57 +13:00
parent 82088f3e65
commit 5d60f92b6f
10 changed files with 675 additions and 9 deletions

View File

@ -0,0 +1,71 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "AudioNodeEngine.h"
namespace mozilla {
void
AllocateAudioBlock(uint32_t aChannelCount, AudioChunk* aChunk)
{
// XXX for SIMD purposes we should do something here to make sure the
// channel buffers are 16-byte aligned.
nsRefPtr<SharedBuffer> buffer =
SharedBuffer::Create(WEBAUDIO_BLOCK_SIZE*aChannelCount*sizeof(float));
aChunk->mDuration = WEBAUDIO_BLOCK_SIZE;
aChunk->mChannelData.SetLength(aChannelCount);
float* data = static_cast<float*>(buffer->Data());
for (uint32_t i = 0; i < aChannelCount; ++i) {
aChunk->mChannelData[i] = data + i*WEBAUDIO_BLOCK_SIZE;
}
aChunk->mBuffer = buffer.forget();
aChunk->mVolume = 1.0f;
aChunk->mBufferFormat = AUDIO_FORMAT_FLOAT32;
}
void
WriteZeroesToAudioBlock(AudioChunk* aChunk, uint32_t aStart, uint32_t aLength)
{
MOZ_ASSERT(aStart + aLength <= WEBAUDIO_BLOCK_SIZE);
if (aLength == 0)
return;
for (uint32_t i = 0; i < aChunk->mChannelData.Length(); ++i) {
memset(static_cast<float*>(const_cast<void*>(aChunk->mChannelData[i])) + aStart,
0, aLength*sizeof(float));
}
}
void
AudioBlockAddChannelWithScale(const float aInput[WEBAUDIO_BLOCK_SIZE],
float aScale,
float aOutput[WEBAUDIO_BLOCK_SIZE])
{
if (aScale == 1.0f) {
for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) {
aOutput[i] += aInput[i];
}
} else {
for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) {
aOutput[i] += aInput[i]*aScale;
}
}
}
void
AudioBlockCopyChannelWithScale(const float aInput[WEBAUDIO_BLOCK_SIZE],
float aScale,
float aOutput[WEBAUDIO_BLOCK_SIZE])
{
if (aScale == 1.0f) {
memcpy(aOutput, aInput, WEBAUDIO_BLOCK_SIZE*sizeof(float));
} else {
for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) {
aOutput[i] = aInput[i]*aScale;
}
}
}
}

View File

@ -0,0 +1,149 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MOZILLA_AUDIONODEENGINE_H_
#define MOZILLA_AUDIONODEENGINE_H_
#include "AudioSegment.h"
namespace mozilla {
class AudioNodeStream;
// We ensure that the graph advances in steps that are multiples of the Web
// Audio block size
const uint32_t WEBAUDIO_BLOCK_SIZE_BITS = 7;
const uint32_t WEBAUDIO_BLOCK_SIZE = 1 << WEBAUDIO_BLOCK_SIZE_BITS;
/**
* This class holds onto a set of immutable channel buffers. The storage
* for the buffers must be malloced, but the buffer pointers and the malloc
* pointers can be different (e.g. if the buffers are contained inside
* some malloced object).
*/
class ThreadSharedFloatArrayBufferList : public ThreadSharedObject {
public:
/**
* Construct with null data.
*/
ThreadSharedFloatArrayBufferList(uint32_t aCount)
{
mContents.SetLength(aCount);
}
struct Storage {
Storage()
{
mDataToFree = nullptr;
mSampleData = nullptr;
}
~Storage() { free(mDataToFree); }
void* mDataToFree;
const float* mSampleData;
};
/**
* This can be called on any thread.
*/
uint32_t GetChannels() const { return mContents.Length(); }
/**
* This can be called on any thread.
*/
const float* GetData(uint32_t aIndex) const { return mContents[aIndex].mSampleData; }
/**
* Call this only during initialization, before the object is handed to
* any other thread.
*/
void SetData(uint32_t aIndex, void* aDataToFree, const float* aData)
{
Storage* s = &mContents[aIndex];
free(s->mDataToFree);
s->mDataToFree = aDataToFree;
s->mSampleData = aData;
}
/**
* Put this object into an error state where there are no channels.
*/
void Clear() { mContents.Clear(); }
private:
AutoFallibleTArray<Storage,2> mContents;
};
/**
* Allocates an AudioChunk with fresh buffers of WEBAUDIO_BLOCK_SIZE float samples.
* AudioChunk::mChannelData's entries can be cast to float* for writing.
*/
void AllocateAudioBlock(uint32_t aChannelCount, AudioChunk* aChunk);
/**
* aChunk must have been allocated by AllocateAudioBlock.
*/
void WriteZeroesToAudioBlock(AudioChunk* aChunk, uint32_t aStart, uint32_t aLength);
/**
* Pointwise multiply-add operation. aScale == 1.0f should be optimized.
*/
void AudioBlockAddChannelWithScale(const float aInput[WEBAUDIO_BLOCK_SIZE],
float aScale,
float aOutput[WEBAUDIO_BLOCK_SIZE]);
/**
* Pointwise copy-scaled operation. aScale == 1.0f should be optimized.
*/
void AudioBlockCopyChannelWithScale(const float aInput[WEBAUDIO_BLOCK_SIZE],
float aScale,
float aOutput[WEBAUDIO_BLOCK_SIZE]);
/**
* All methods of this class and its subclasses are called on the
* MediaStreamGraph thread.
*/
class AudioNodeEngine {
public:
AudioNodeEngine() {}
virtual ~AudioNodeEngine() {}
virtual void SetStreamTimeParameter(uint32_t aIndex, TrackTicks aParam)
{
NS_ERROR("Invalid SetStreamTimeParameter index");
}
virtual void SetDoubleParameter(uint32_t aIndex, double aParam)
{
NS_ERROR("Invalid SetDoubleParameter index");
}
virtual void SetInt32Parameter(uint32_t aIndex, int32_t aParam)
{
NS_ERROR("Invalid SetInt32Parameter index");
}
virtual void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer)
{
NS_ERROR("SetBuffer called on engine that doesn't support it");
}
/**
* Produce the next block of audio samples, given input samples aInput
* (the mixed data for input 0).
* By default, simply returns the mixed input.
* aInput is guaranteed to have float sample format (if it has samples at all)
* and to have been resampled to IdealAudioRate(), and to have exactly
* WEBAUDIO_BLOCK_SIZE samples.
* *aFinished is set to false by the caller. If the callee sets it to true,
* we'll finish the stream and not call this again.
*/
virtual void ProduceAudioBlock(AudioNodeStream* aStream,
const AudioChunk& aInput,
AudioChunk* aOutput,
bool* aFinished)
{
*aOutput = aInput;
}
};
}
#endif /* MOZILLA_AUDIONODEENGINE_H_ */

View File

@ -0,0 +1,270 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "AudioNodeStream.h"
#include "MediaStreamGraphImpl.h"
#include "AudioNodeEngine.h"
using namespace mozilla::dom;
namespace mozilla {
/**
* An AudioNodeStream produces a single audio track with ID
* AUDIO_NODE_STREAM_TRACK_ID. This track has rate IdealAudioRate().
* Each chunk in the track is a single block of WEBAUDIO_BLOCK_SIZE samples.
*/
static const int AUDIO_NODE_STREAM_TRACK_ID = 1;
AudioNodeStream::~AudioNodeStream()
{
}
void
AudioNodeStream::SetStreamTimeParameter(uint32_t aIndex, MediaStream* aRelativeToStream,
double aStreamTime)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream, uint32_t aIndex, MediaStream* aRelativeToStream,
double aStreamTime)
: ControlMessage(aStream), mStreamTime(aStreamTime),
mRelativeToStream(aRelativeToStream), mIndex(aIndex) {}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->
SetStreamTimeParameterImpl(mIndex, mRelativeToStream, mStreamTime);
}
double mStreamTime;
MediaStream* mRelativeToStream;
uint32_t mIndex;
};
MOZ_ASSERT(this);
GraphImpl()->AppendMessage(new Message(this, aIndex, aRelativeToStream, aStreamTime));
}
void
AudioNodeStream::SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
double aStreamTime)
{
StreamTime streamTime = std::max<MediaTime>(0, SecondsToMediaTime(aStreamTime));
GraphTime graphTime = aRelativeToStream->StreamTimeToGraphTime(streamTime);
StreamTime thisStreamTime = GraphTimeToStreamTimeOptimistic(graphTime);
TrackTicks ticks = TimeToTicksRoundDown(IdealAudioRate(), thisStreamTime);
mEngine->SetStreamTimeParameter(aIndex, ticks);
}
void
AudioNodeStream::SetDoubleParameter(uint32_t aIndex, double aValue)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream, uint32_t aIndex, double aValue)
: ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->Engine()->
SetDoubleParameter(mIndex, mValue);
}
double mValue;
uint32_t mIndex;
};
MOZ_ASSERT(this);
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
}
void
AudioNodeStream::SetInt32Parameter(uint32_t aIndex, int32_t aValue)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream, uint32_t aIndex, int32_t aValue)
: ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->Engine()->
SetInt32Parameter(mIndex, mValue);
}
int32_t mValue;
uint32_t mIndex;
};
MOZ_ASSERT(this);
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
}
void
AudioNodeStream::SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream,
already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer)
: ControlMessage(aStream), mBuffer(aBuffer) {}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->Engine()->
SetBuffer(mBuffer.forget());
}
nsRefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
};
MOZ_ASSERT(this);
GraphImpl()->AppendMessage(new Message(this, aBuffer));
}
StreamBuffer::Track*
AudioNodeStream::EnsureTrack()
{
StreamBuffer::Track* track = mBuffer.FindTrack(AUDIO_NODE_STREAM_TRACK_ID);
if (!track) {
nsAutoPtr<MediaSegment> segment(new AudioSegment());
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
MediaStreamListener* l = mListeners[j];
l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID, IdealAudioRate(), 0,
MediaStreamListener::TRACK_EVENT_CREATED,
*segment);
}
track = &mBuffer.AddTrack(AUDIO_NODE_STREAM_TRACK_ID, IdealAudioRate(), 0, segment.forget());
}
return track;
}
AudioChunk*
AudioNodeStream::ObtainInputBlock(AudioChunk* aTmpChunk)
{
uint32_t inputCount = mInputs.Length();
uint32_t outputChannelCount = 0;
nsAutoTArray<AudioChunk*,250> inputChunks;
for (uint32_t i = 0; i < inputCount; ++i) {
MediaStream* s = mInputs[i]->GetSource();
AudioNodeStream* a = s->AsAudioNodeStream();
MOZ_ASSERT(a);
if (a->IsFinishedOnGraphThread()) {
continue;
}
AudioChunk* chunk = a->mLastChunk;
// XXX when we implement DelayNode, this will no longer be true and we'll
// need to treat a null chunk (when the DelayNode hasn't had a chance
// to produce data yet) as silence here.
MOZ_ASSERT(chunk);
if (chunk->IsNull()) {
continue;
}
inputChunks.AppendElement(chunk);
outputChannelCount =
GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
}
uint32_t inputChunkCount = inputChunks.Length();
if (inputChunkCount == 0) {
aTmpChunk->SetNull(WEBAUDIO_BLOCK_SIZE);
return aTmpChunk;
}
if (inputChunkCount == 1) {
return inputChunks[0];
}
AllocateAudioBlock(outputChannelCount, aTmpChunk);
for (uint32_t i = 0; i < inputChunkCount; ++i) {
AudioChunk* chunk = inputChunks[i];
nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
channels.AppendElements(chunk->mChannelData);
if (channels.Length() < outputChannelCount) {
AudioChannelsUpMix(&channels, outputChannelCount, nullptr);
NS_ASSERTION(outputChannelCount == channels.Length(),
"We called GetAudioChannelsSuperset to avoid this");
}
for (uint32_t c = 0; c < channels.Length(); ++c) {
const float* inputData = static_cast<const float*>(channels[c]);
float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk->mChannelData[c]));
if (inputData) {
if (i == 0) {
AudioBlockCopyChannelWithScale(inputData, chunk->mVolume, outputData);
} else {
AudioBlockAddChannelWithScale(inputData, chunk->mVolume, outputData);
}
} else {
if (i == 0) {
memset(outputData, 0, WEBAUDIO_BLOCK_SIZE*sizeof(float));
}
}
}
}
return aTmpChunk;
}
// The MediaStreamGraph guarantees that this is actually one block, for
// AudioNodeStreams.
void
AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
{
StreamBuffer::Track* track = EnsureTrack();
AudioChunk outputChunk;
AudioSegment* segment = track->Get<AudioSegment>();
if (mInCycle) {
// XXX DelayNode not supported yet so just produce silence
outputChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
} else {
AudioChunk tmpChunk;
AudioChunk* inputChunk = ObtainInputBlock(&tmpChunk);
bool finished = false;
mEngine->ProduceAudioBlock(this, *inputChunk, &outputChunk, &finished);
if (finished) {
FinishOutput();
}
}
mLastChunk = segment->AppendAndConsumeChunk(&outputChunk);
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
MediaStreamListener* l = mListeners[j];
AudioChunk copyChunk = *mLastChunk;
AudioSegment tmpSegment;
tmpSegment.AppendAndConsumeChunk(&copyChunk);
l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID,
IdealAudioRate(), segment->GetDuration(), 0,
tmpSegment);
}
}
TrackTicks
AudioNodeStream::GetCurrentPosition()
{
return EnsureTrack()->Get<AudioSegment>()->GetDuration();
}
void
AudioNodeStream::FinishOutput()
{
if (IsFinishedOnGraphThread()) {
return;
}
StreamBuffer::Track* track = EnsureTrack();
track->SetEnded();
FinishOnGraphThread();
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
MediaStreamListener* l = mListeners[j];
AudioSegment emptySegment;
l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID,
IdealAudioRate(),
track->GetSegment()->GetDuration(),
MediaStreamListener::TRACK_EVENT_ENDED, emptySegment);
}
}
}

View File

@ -0,0 +1,82 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MOZILLA_AUDIONODESTREAM_H_
#define MOZILLA_AUDIONODESTREAM_H_
#include "MediaStreamGraph.h"
#include "AudioChannelFormat.h"
#include "AudioNodeEngine.h"
#ifdef PR_LOGGING
#define LOG(type, msg) PR_LOG(gMediaStreamGraphLog, type, msg)
#else
#define LOG(type, msg)
#endif
namespace mozilla {
class ThreadSharedFloatArrayBufferList;
/**
* An AudioNodeStream produces one audio track with ID AUDIO_TRACK.
* The start time of the AudioTrack is aligned to the start time of the
* AudioContext's destination node stream, plus some multiple of BLOCK_SIZE
* samples.
*
* An AudioNodeStream has an AudioNodeEngine plugged into it that does the
* actual audio processing. AudioNodeStream contains the glue code that
* integrates audio processing with the MediaStreamGraph.
*/
class AudioNodeStream : public ProcessedMediaStream {
public:
enum { AUDIO_TRACK = 1 };
/**
* Transfers ownership of aEngine to the new AudioNodeStream.
*/
explicit AudioNodeStream(AudioNodeEngine* aEngine)
: ProcessedMediaStream(nullptr), mEngine(aEngine), mLastChunk(nullptr)
{
}
~AudioNodeStream();
// Control API
/**
* Sets a parameter that's a time relative to some stream's played time.
* This time is converted to a time relative to this stream when it's set.
*/
void SetStreamTimeParameter(uint32_t aIndex, MediaStream* aRelativeToStream,
double aStreamTime);
void SetDoubleParameter(uint32_t aIndex, double aValue);
void SetInt32Parameter(uint32_t aIndex, int32_t aValue);
void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer);
virtual AudioNodeStream* AsAudioNodeStream() { return this; }
// Graph thread only
void SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
double aStreamTime);
virtual void ProduceOutput(GraphTime aFrom, GraphTime aTo);
TrackTicks GetCurrentPosition();
// Any thread
AudioNodeEngine* Engine() { return mEngine; }
protected:
void FinishOutput();
StreamBuffer::Track* EnsureTrack();
AudioChunk* ObtainInputBlock(AudioChunk* aTmpChunk);
// The engine that will generate output for this node.
nsAutoPtr<AudioNodeEngine> mEngine;
// The last block produced by this node.
AudioChunk* mLastChunk;
};
}
#endif /* MOZILLA_AUDIONODESTREAM_H_ */

View File

@ -89,7 +89,6 @@ AudioSegment::ApplyVolume(float aVolume)
}
static const int AUDIO_PROCESSING_FRAMES = 640; /* > 10ms of 48KHz audio */
static const int GUESS_AUDIO_CHANNELS = 2;
static const uint8_t gZeroChannel[MAX_AUDIO_SAMPLE_SIZE*AUDIO_PROCESSING_FRAMES] = {0};
void

View File

@ -15,6 +15,11 @@ namespace mozilla {
class AudioStream;
/**
* For auto-arrays etc, guess this as the common number of channels.
*/
const int GUESS_AUDIO_CHANNELS = 2;
/**
* An AudioChunk represents a multi-channel buffer of audio samples.
* It references an underlying ThreadSharedObject which manages the lifetime

View File

@ -19,6 +19,8 @@ endif # !_MSC_VER
EXPORTS = \
AbstractMediaDecoder.h \
AudioChannelFormat.h \
AudioNodeEngine.h \
AudioNodeStream.h \
AudioSampleFormat.h \
AudioSegment.h \
BufferMediaResource.h \
@ -46,6 +48,8 @@ EXPORTS = \
CPPSRCS = \
AudioChannelFormat.cpp \
AudioNodeEngine.cpp \
AudioNodeStream.cpp \
AudioSegment.cpp \
DecoderTraits.cpp \
FileBlockCache.cpp \

View File

@ -19,6 +19,8 @@
#include "TrackUnionStream.h"
#include "ImageContainer.h"
#include "AudioChannelCommon.h"
#include "AudioNodeEngine.h"
#include "AudioNodeStream.h"
#include <algorithm>
using namespace mozilla::layers;
@ -874,6 +876,40 @@ MediaStreamGraphImpl::EnsureNextIterationLocked(MonitorAutoLock& aLock)
}
}
static GraphTime
RoundUpToAudioBlock(GraphTime aTime)
{
TrackRate rate = IdealAudioRate();
int64_t ticksAtIdealRate = (aTime*rate) >> MEDIA_TIME_FRAC_BITS;
// Round up to nearest block boundary
int64_t blocksAtIdealRate =
(ticksAtIdealRate + (WEBAUDIO_BLOCK_SIZE - 1)) >>
WEBAUDIO_BLOCK_SIZE_BITS;
// Round up to nearest MediaTime unit
return
((((blocksAtIdealRate + 1)*WEBAUDIO_BLOCK_SIZE) << MEDIA_TIME_FRAC_BITS)
+ rate - 1)/rate;
}
void
MediaStreamGraphImpl::ProduceDataForStreamsBlockByBlock(uint32_t aStreamIndex,
GraphTime aFrom,
GraphTime aTo)
{
GraphTime t = aFrom;
while (t < aTo) {
GraphTime next = RoundUpToAudioBlock(t + 1);
for (uint32_t i = aStreamIndex; i < mStreams.Length(); ++i) {
ProcessedMediaStream* ps = mStreams[i]->AsProcessedStream();
if (ps) {
ps->ProduceOutput(t, next);
}
}
t = next;
}
NS_ASSERTION(t == aTo, "Something went wrong with rounding to block boundaries");
}
void
MediaStreamGraphImpl::RunThread()
{
@ -905,9 +941,8 @@ MediaStreamGraphImpl::RunThread()
UpdateStreamOrder();
int32_t writeAudioUpTo = AUDIO_TARGET_MS;
GraphTime endBlockingDecisions =
mCurrentTime + MillisecondsToMediaTime(writeAudioUpTo);
RoundUpToAudioBlock(mCurrentTime + MillisecondsToMediaTime(AUDIO_TARGET_MS));
bool ensureNextIteration = false;
// Grab pending stream input.
@ -926,15 +961,27 @@ MediaStreamGraphImpl::RunThread()
// Play stream contents.
uint32_t audioStreamsActive = 0;
bool allBlockedForever = true;
// True when we've done ProduceOutput for all processed streams.
bool doneAllProducing = false;
// Figure out what each stream wants to do
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
MediaStream* stream = mStreams[i];
ProcessedMediaStream* ps = stream->AsProcessedStream();
if (ps && !ps->mFinished) {
ps->ProduceOutput(prevComputedTime, mStateComputedTime);
NS_ASSERTION(stream->mBuffer.GetEnd() >=
GraphTimeToStreamTime(stream, mStateComputedTime),
"Stream did not produce enough data");
if (!doneAllProducing && !stream->IsFinishedOnGraphThread()) {
ProcessedMediaStream* ps = stream->AsProcessedStream();
if (ps) {
AudioNodeStream* n = stream->AsAudioNodeStream();
if (n) {
// Since an AudioNodeStream is present, go ahead and
// produce audio block by block for all the rest of the streams.
ProduceDataForStreamsBlockByBlock(i, prevComputedTime, mStateComputedTime);
doneAllProducing = true;
} else {
ps->ProduceOutput(prevComputedTime, mStateComputedTime);
NS_ASSERTION(stream->mBuffer.GetEnd() >=
GraphTimeToStreamTime(stream, mStateComputedTime),
"Stream did not produce enough data");
}
}
}
NotifyHasCurrentData(stream);
CreateOrDestroyAudioStreams(prevComputedTime, stream);
@ -1896,4 +1943,13 @@ MediaStreamGraph::CreateTrackUnionStream(nsDOMMediaStream* aWrapper)
return stream;
}
AudioNodeStream*
MediaStreamGraph::CreateAudioNodeStream(AudioNodeEngine* aEngine)
{
AudioNodeStream* stream = new AudioNodeStream(aEngine);
NS_ADDREF(stream);
static_cast<MediaStreamGraphImpl*>(this)->AppendMessage(new CreateMessage(stream));
return stream;
}
}

View File

@ -189,6 +189,9 @@ class MediaStreamGraphImpl;
class SourceMediaStream;
class ProcessedMediaStream;
class MediaInputPort;
class AudioNodeStream;
class AudioNodeEngine;
struct AudioChunk;
/**
* A stream of synchronized audio and video data. All (not blocked) streams
@ -345,6 +348,7 @@ public:
virtual SourceMediaStream* AsSourceStream() { return nullptr; }
virtual ProcessedMediaStream* AsProcessedStream() { return nullptr; }
virtual AudioNodeStream* AsAudioNodeStream() { return nullptr; }
// media graph thread only
void Init();
@ -819,12 +823,20 @@ protected:
bool mInCycle;
};
// Returns ideal audio rate for processing
inline TrackRate IdealAudioRate() { return 48000; }
/**
* Initially, at least, we will have a singleton MediaStreamGraph per
* process.
*/
class MediaStreamGraph {
public:
// We ensure that the graph current time advances in multiples of
// IdealAudioBlockSize()/IdealAudioRate(). A stream that never blocks
// and has a track with the ideal audio rate will produce audio in
// multiples of the block size.
// Main thread only
static MediaStreamGraph* GetInstance();
// Control API.
@ -848,6 +860,11 @@ public:
* particular tracks of each input stream.
*/
ProcessedMediaStream* CreateTrackUnionStream(nsDOMMediaStream* aWrapper);
/**
* Create a stream that will process audio for an AudioNode.
* Takes ownership of aEngine.
*/
AudioNodeStream* CreateAudioNodeStream(AudioNodeEngine* aEngine);
/**
* Returns the number of graph updates sent. This can be used to track
* whether a given update has been processed by the graph thread and reflected

View File

@ -249,6 +249,19 @@ public:
void RecomputeBlockingAt(const nsTArray<MediaStream*>& aStreams,
GraphTime aTime, GraphTime aEndBlockingDecisions,
GraphTime* aEnd);
/**
* Produce data for all streams >= aStreamIndex for the given time interval.
* Advances block by block, each iteration producing data for all streams
* for a single block.
* This is needed if there are WebAudio delay nodes, whose output for a block
* may depend on the output of any other node (including itself) for the
* previous block. This is probably also more performant due to better memory
* locality.
* This is called whenever we have an AudioNodeStream in the graph.
*/
void ProduceDataForStreamsBlockByBlock(uint32_t aStreamIndex,
GraphTime aFrom,
GraphTime aTo);
/**
* Returns true if aStream will underrun at aTime for its own playback.
* aEndBlockingDecisions is when we plan to stop making blocking decisions.