Bug 989921 - Allow the MediaStreamGraph mixer to send data back to multiple consumers. r=jesup

This commit is contained in:
Paul Adenot 2014-08-25 14:13:08 +02:00
parent e50e0d25cd
commit 3a76d049e6
6 changed files with 110 additions and 53 deletions

View File

@ -9,13 +9,17 @@
#include "AudioSampleFormat.h"
#include "nsTArray.h"
#include "mozilla/PodOperations.h"
#include "mozilla/LinkedList.h"
namespace mozilla {
typedef void(*MixerFunc)(AudioDataValue* aMixedBuffer,
AudioSampleFormat aFormat,
uint32_t aChannels,
uint32_t aFrames,
uint32_t aSampleRate);
struct MixerCallbackReceiver {
virtual void MixerCallback(AudioDataValue* aMixedBuffer,
AudioSampleFormat aFormat,
uint32_t aChannels,
uint32_t aFrames,
uint32_t aSampleRate) = 0;
};
/**
* This class mixes multiple streams of audio together to output a single audio
@ -32,21 +36,34 @@ typedef void(*MixerFunc)(AudioDataValue* aMixedBuffer,
class AudioMixer
{
public:
AudioMixer(MixerFunc aCallback)
: mCallback(aCallback),
mFrames(0),
AudioMixer()
: mFrames(0),
mChannels(0),
mSampleRate(0)
{ }
~AudioMixer()
{
mCallbacks.clear();
}
void StartMixing()
{
mSampleRate = mChannels = mFrames = 0;
}
/* Get the data from the mixer. This is supposed to be called when all the
* tracks have been mixed in. The caller should not hold onto the data. */
void FinishMixing() {
mCallback(mMixedAudio.Elements(),
AudioSampleTypeToFormat<AudioDataValue>::Format,
mChannels,
mFrames,
mSampleRate);
MOZ_ASSERT(mChannels && mFrames && mSampleRate, "Mix not called for this cycle?");
for (MixerCallback* cb = mCallbacks.getFirst();
cb != nullptr; cb = cb->getNext()) {
cb->mReceiver->MixerCallback(mMixedAudio.Elements(),
AudioSampleTypeToFormat<AudioDataValue>::Format,
mChannels,
mFrames,
mSampleRate);
}
PodZero(mMixedAudio.Elements(), mMixedAudio.Length());
mSampleRate = mChannels = mFrames = 0;
}
@ -71,6 +88,21 @@ public:
mMixedAudio[i] += aSamples[i];
}
}
void AddCallback(MixerCallbackReceiver* aReceiver) {
mCallbacks.insertBack(new MixerCallback(aReceiver));
}
bool RemoveCallback(MixerCallbackReceiver* aReceiver) {
for (MixerCallback* cb = mCallbacks.getFirst();
cb != nullptr; cb = cb->getNext()) {
if (cb->mReceiver == aReceiver) {
cb->remove();
return true;
}
}
return false;
}
private:
void EnsureCapacityAndSilence() {
if (mFrames * mChannels > mMixedAudio.Length()) {
@ -79,8 +111,17 @@ private:
PodZero(mMixedAudio.Elements(), mMixedAudio.Length());
}
class MixerCallback : public LinkedListElement<MixerCallback>
{
public:
MixerCallback(MixerCallbackReceiver* aReceiver)
: mReceiver(aReceiver)
{ }
MixerCallbackReceiver* mReceiver;
};
/* Function that is called when the mixing is done. */
MixerFunc mCallback;
LinkedList<MixerCallback> mCallbacks;
/* Number of frames for this mixing block. */
uint32_t mFrames;
/* Number of channels for this mixing block. */

View File

@ -529,24 +529,6 @@ MediaStreamGraphImpl::MarkConsumed(MediaStream* aStream)
}
}
static void AudioMixerCallback(AudioDataValue* aMixedBuffer,
AudioSampleFormat aFormat,
uint32_t aChannels,
uint32_t aFrames,
uint32_t aSampleRate)
{
// Need an api to register mixer callbacks, bug 989921
#ifdef MOZ_WEBRTC
if (aFrames > 0 && aChannels > 0) {
// XXX need Observer base class and registration API
if (gFarendObserver) {
gFarendObserver->InsertFarEnd(aMixedBuffer, aFrames, false,
aSampleRate, aChannels, aFormat);
}
}
#endif
}
void
MediaStreamGraphImpl::UpdateStreamOrder()
{
@ -573,7 +555,11 @@ MediaStreamGraphImpl::UpdateStreamOrder()
mStreams[i]->mAudioOutputStreams[j].mStream->SetMicrophoneActive(true);
}
}
if (gFarendObserver) {
mMixer->AddCallback(gFarendObserver);
}
} else if (mMixer && !shouldMix) {
mMixer->RemoveCallback(gFarendObserver);
mMixer = nullptr;
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
for (uint32_t j = 0; j < mStreams[i]->mAudioOutputStreams.Length(); ++j) {
@ -1425,6 +1411,11 @@ MediaStreamGraphImpl::RunThread()
// This is the number of frame that are written to the AudioStreams, for
// this cycle.
TrackTicks ticksPlayed = 0;
if (mMixer) {
mMixer->StartMixing();
}
// Figure out what each stream wants to do
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
MediaStream* stream = mStreams[i];
@ -1480,7 +1471,7 @@ MediaStreamGraphImpl::RunThread()
}
}
if (mMixer) {
if (mMixer && ticksPlayed) {
mMixer->FinishMixing();
}

View File

@ -18,7 +18,6 @@
#include "MainThreadUtils.h"
#include "nsAutoRef.h"
#include <speex/speex_resampler.h>
#include "AudioMixer.h"
#include "mozilla/dom/AudioChannelBinding.h"
class nsIRunnable;

View File

@ -9,25 +9,28 @@
using mozilla::AudioDataValue;
using mozilla::AudioSampleFormat;
struct MixerConsumer : public mozilla::MixerCallbackReceiver
{
/* In this test, the different audio stream and channels are always created to
* cancel each other. */
void MixingDone(AudioDataValue* aData, AudioSampleFormat aFormat, uint32_t aChannels, uint32_t aFrames, uint32_t aSampleRate)
{
bool silent = true;
for (uint32_t i = 0; i < aChannels * aFrames; i++) {
if (aData[i] != 0.0) {
if (aFormat == mozilla::AUDIO_FORMAT_S16) {
fprintf(stderr, "Sample at %d is not silent: %d\n", i, (short)aData[i]);
} else {
fprintf(stderr, "Sample at %d is not silent: %f\n", i, (float)aData[i]);
void MixerCallback(AudioDataValue* aData, AudioSampleFormat aFormat, uint32_t aChannels, uint32_t aFrames, uint32_t aSampleRate)
{
bool silent = true;
for (uint32_t i = 0; i < aChannels * aFrames; i++) {
if (aData[i] != 0.0) {
if (aFormat == mozilla::AUDIO_FORMAT_S16) {
fprintf(stderr, "Sample at %d is not silent: %d\n", i, (short)aData[i]);
} else {
fprintf(stderr, "Sample at %d is not silent: %f\n", i, (float)aData[i]);
}
silent = false;
}
silent = false;
}
if (!silent) {
MOZ_CRASH();
}
}
if (!silent) {
MOZ_CRASH();
}
}
};
/* Helper function to give us the maximum and minimum value that don't clip,
* for a given sample format (integer or floating-point). */
@ -68,6 +71,7 @@ void FillBuffer(AudioDataValue* aBuffer, uint32_t aLength, AudioDataValue aValue
int main(int argc, char* argv[]) {
const uint32_t CHANNEL_LENGTH = 256;
const uint32_t AUDIO_RATE = 44100;
MixerConsumer consumer;
AudioDataValue a[CHANNEL_LENGTH * 2];
AudioDataValue b[CHANNEL_LENGTH * 2];
FillBuffer(a, CHANNEL_LENGTH, GetLowValue<AudioDataValue>());
@ -77,7 +81,8 @@ int main(int argc, char* argv[]) {
{
int iterations = 2;
mozilla::AudioMixer mixer(MixingDone);
mozilla::AudioMixer mixer;
mixer.AddCallback(&consumer);
fprintf(stderr, "Test AudioMixer constant buffer length.\n");
@ -89,7 +94,8 @@ int main(int argc, char* argv[]) {
}
{
mozilla::AudioMixer mixer(MixingDone);
mozilla::AudioMixer mixer;
mixer.AddCallback(&consumer);
fprintf(stderr, "Test AudioMixer variable buffer length.\n");
@ -120,7 +126,9 @@ int main(int argc, char* argv[]) {
FillBuffer(b, CHANNEL_LENGTH, GetHighValue<AudioDataValue>());
{
mozilla::AudioMixer mixer(MixingDone);
mozilla::AudioMixer mixer;
mixer.AddCallback(&consumer);
fprintf(stderr, "Test AudioMixer variable channel count.\n");
mixer.Mix(a, 1, CHANNEL_LENGTH, AUDIO_RATE);
@ -135,7 +143,8 @@ int main(int argc, char* argv[]) {
}
{
mozilla::AudioMixer mixer(MixingDone);
mozilla::AudioMixer mixer;
mixer.AddCallback(&consumer);
fprintf(stderr, "Test AudioMixer variable stream count.\n");
mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE);

View File

@ -20,12 +20,18 @@ typedef struct FarEndAudioChunk_ {
} FarEndAudioChunk;
// XXX Really a singleton currently
class AudioOutputObserver // : public MSGOutputObserver
class AudioOutputObserver : public MixerCallbackReceiver
{
public:
AudioOutputObserver();
virtual ~AudioOutputObserver();
void MixerCallback(AudioDataValue* aMixedBuffer,
AudioSampleFormat aFormat,
uint32_t aChannels,
uint32_t aFrames,
uint32_t aSampleRate) MOZ_OVERRIDE;
void Clear();
void InsertFarEnd(const AudioDataValue *aBuffer, uint32_t aFrames, bool aOverran,
int aFreq, int aChannels, AudioSampleFormat aFormat);

View File

@ -90,6 +90,17 @@ AudioOutputObserver::Size()
return mPlayoutFifo->size();
}
void
AudioOutputObserver::MixerCallback(AudioDataValue* aMixedBuffer,
AudioSampleFormat aFormat,
uint32_t aChannels,
uint32_t aFrames,
uint32_t aSampleRate)
{
gFarendObserver->InsertFarEnd(aMixedBuffer, aFrames, false,
aSampleRate, aChannels, aFormat);
}
// static
void
AudioOutputObserver::InsertFarEnd(const AudioDataValue *aBuffer, uint32_t aFrames, bool aOverran,