gecko/content/media/webaudio/AudioDestinationNode.cpp
Karl Tomlinson 2bb6ffd9ed b=932400 rename ProduceAudioBlock to ProcessBlock r=padenot
--HG--
rename : content/media/webaudio/DelayProcessor.cpp => content/media/webaudio/DelayBuffer.cpp
extra : rebase_source : ebdc7404c8d27e3a24098f21a7752df529bb44c9
2014-03-05 10:09:49 +13:00

567 lines
17 KiB
C++

/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim:set ts=2 sw=2 sts=2 et cindent: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "AudioDestinationNode.h"
#include "mozilla/dom/AudioDestinationNodeBinding.h"
#include "mozilla/Preferences.h"
#include "AudioChannelAgent.h"
#include "AudioNodeEngine.h"
#include "AudioNodeStream.h"
#include "MediaStreamGraph.h"
#include "OfflineAudioCompletionEvent.h"
#include "nsIInterfaceRequestorUtils.h"
#include "nsIDocShell.h"
#include "nsIPermissionManager.h"
#include "nsIScriptObjectPrincipal.h"
#include "nsServiceManagerUtils.h"
#include "nsIAppShell.h"
#include "nsWidgetsCID.h"
namespace mozilla {
namespace dom {
class OfflineDestinationNodeEngine : public AudioNodeEngine
{
public:
typedef AutoFallibleTArray<nsAutoArrayPtr<float>, 2> InputChannels;
OfflineDestinationNodeEngine(AudioDestinationNode* aNode,
uint32_t aNumberOfChannels,
uint32_t aLength,
float aSampleRate)
: AudioNodeEngine(aNode)
, mWriteIndex(0)
, mLength(aLength)
, mSampleRate(aSampleRate)
{
// These allocations might fail if content provides a huge number of
// channels or size, but it's OK since we'll deal with the failure
// gracefully.
if (mInputChannels.SetLength(aNumberOfChannels)) {
static const fallible_t fallible = fallible_t();
for (uint32_t i = 0; i < aNumberOfChannels; ++i) {
mInputChannels[i] = new(fallible) float[aLength];
if (!mInputChannels[i]) {
mInputChannels.Clear();
break;
}
}
}
}
virtual void ProcessBlock(AudioNodeStream* aStream,
const AudioChunk& aInput,
AudioChunk* aOutput,
bool* aFinished) MOZ_OVERRIDE
{
// Do this just for the sake of political correctness; this output
// will not go anywhere.
*aOutput = aInput;
// Handle the case of allocation failure in the input buffer
if (mInputChannels.IsEmpty()) {
return;
}
if (mWriteIndex >= mLength) {
NS_ASSERTION(mWriteIndex == mLength, "Overshot length");
// Don't record any more.
return;
}
// Record our input buffer
MOZ_ASSERT(mWriteIndex < mLength, "How did this happen?");
const uint32_t duration = std::min(WEBAUDIO_BLOCK_SIZE, mLength - mWriteIndex);
const uint32_t commonChannelCount = std::min(mInputChannels.Length(),
aInput.mChannelData.Length());
// First, copy as many channels in the input as we have
for (uint32_t i = 0; i < commonChannelCount; ++i) {
if (aInput.IsNull()) {
PodZero(mInputChannels[i] + mWriteIndex, duration);
} else {
const float* inputBuffer = static_cast<const float*>(aInput.mChannelData[i]);
if (duration == WEBAUDIO_BLOCK_SIZE) {
// Use the optimized version of the copy with scale operation
AudioBlockCopyChannelWithScale(inputBuffer, aInput.mVolume,
mInputChannels[i] + mWriteIndex);
} else {
if (aInput.mVolume == 1.0f) {
PodCopy(mInputChannels[i] + mWriteIndex, inputBuffer, duration);
} else {
for (uint32_t j = 0; j < duration; ++j) {
mInputChannels[i][mWriteIndex + j] = aInput.mVolume * inputBuffer[j];
}
}
}
}
}
// Then, silence all of the remaining channels
for (uint32_t i = commonChannelCount; i < mInputChannels.Length(); ++i) {
PodZero(mInputChannels[i] + mWriteIndex, duration);
}
mWriteIndex += duration;
if (mWriteIndex >= mLength) {
NS_ASSERTION(mWriteIndex == mLength, "Overshot length");
// Go to finished state. When the graph's current time eventually reaches
// the end of the stream, then the main thread will be notified and we'll
// shut down the AudioContext.
*aFinished = true;
}
}
void FireOfflineCompletionEvent(AudioDestinationNode* aNode)
{
AudioContext* context = aNode->Context();
context->Shutdown();
// Shutdown drops self reference, but the context is still referenced by aNode,
// which is strongly referenced by the runnable that called
// AudioDestinationNode::FireOfflineCompletionEvent.
AutoPushJSContext cx(context->GetJSContext());
if (!cx) {
return;
}
JSAutoRequest ar(cx);
// Create the input buffer
nsRefPtr<AudioBuffer> renderedBuffer = new AudioBuffer(context,
mLength,
mSampleRate);
if (!renderedBuffer->InitializeBuffers(mInputChannels.Length(), cx)) {
return;
}
for (uint32_t i = 0; i < mInputChannels.Length(); ++i) {
renderedBuffer->SetRawChannelContents(cx, i, mInputChannels[i]);
}
nsRefPtr<OfflineAudioCompletionEvent> event =
new OfflineAudioCompletionEvent(context, nullptr, nullptr);
event->InitEvent(renderedBuffer);
context->DispatchTrustedEvent(event);
}
private:
// The input to the destination node is recorded in the mInputChannels buffer.
// When this buffer fills up with mLength frames, the buffered input is sent
// to the main thread in order to dispatch OfflineAudioCompletionEvent.
InputChannels mInputChannels;
// An index representing the next offset in mInputChannels to be written to.
uint32_t mWriteIndex;
// How many frames the OfflineAudioContext intends to produce.
uint32_t mLength;
float mSampleRate;
};
class DestinationNodeEngine : public AudioNodeEngine
{
public:
explicit DestinationNodeEngine(AudioDestinationNode* aNode)
: AudioNodeEngine(aNode)
, mVolume(1.0f)
{
}
virtual void ProcessBlock(AudioNodeStream* aStream,
const AudioChunk& aInput,
AudioChunk* aOutput,
bool* aFinished) MOZ_OVERRIDE
{
*aOutput = aInput;
aOutput->mVolume *= mVolume;
}
virtual void SetDoubleParameter(uint32_t aIndex, double aParam) MOZ_OVERRIDE
{
if (aIndex == VOLUME) {
mVolume = aParam;
}
}
enum Parameters {
VOLUME,
};
private:
float mVolume;
};
static bool UseAudioChannelService()
{
return Preferences::GetBool("media.useAudioChannelService");
}
NS_IMPL_CYCLE_COLLECTION_INHERITED_1(AudioDestinationNode, AudioNode,
mAudioChannelAgent)
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioDestinationNode)
NS_INTERFACE_MAP_ENTRY(nsIDOMEventListener)
NS_INTERFACE_MAP_ENTRY(nsIAudioChannelAgentCallback)
NS_INTERFACE_MAP_ENTRY(nsISupportsWeakReference)
NS_INTERFACE_MAP_END_INHERITING(AudioNode)
NS_IMPL_ADDREF_INHERITED(AudioDestinationNode, AudioNode)
NS_IMPL_RELEASE_INHERITED(AudioDestinationNode, AudioNode)
AudioDestinationNode::AudioDestinationNode(AudioContext* aContext,
bool aIsOffline,
uint32_t aNumberOfChannels,
uint32_t aLength,
float aSampleRate)
: AudioNode(aContext,
aIsOffline ? aNumberOfChannels : 2,
ChannelCountMode::Explicit,
ChannelInterpretation::Speakers)
, mFramesToProduce(aLength)
, mAudioChannel(AudioChannel::Normal)
, mIsOffline(aIsOffline)
, mHasFinished(false)
, mExtraCurrentTime(0)
, mExtraCurrentTimeSinceLastStartedBlocking(0)
, mExtraCurrentTimeUpdatedSinceLastStableState(false)
{
MediaStreamGraph* graph = aIsOffline ?
MediaStreamGraph::CreateNonRealtimeInstance() :
MediaStreamGraph::GetInstance();
AudioNodeEngine* engine = aIsOffline ?
new OfflineDestinationNodeEngine(this, aNumberOfChannels,
aLength, aSampleRate) :
static_cast<AudioNodeEngine*>(new DestinationNodeEngine(this));
mStream = graph->CreateAudioNodeStream(engine, MediaStreamGraph::EXTERNAL_STREAM);
mStream->AddMainThreadListener(this);
if (!aIsOffline && UseAudioChannelService()) {
nsCOMPtr<nsIDOMEventTarget> target = do_QueryInterface(GetOwner());
if (target) {
target->AddSystemEventListener(NS_LITERAL_STRING("visibilitychange"), this,
/* useCapture = */ true,
/* wantsUntrusted = */ false);
}
CreateAudioChannelAgent();
}
}
void
AudioDestinationNode::DestroyMediaStream()
{
if (mAudioChannelAgent && !Context()->IsOffline()) {
mAudioChannelAgent->StopPlaying();
mAudioChannelAgent = nullptr;
nsCOMPtr<nsIDOMEventTarget> target = do_QueryInterface(GetOwner());
NS_ENSURE_TRUE_VOID(target);
target->RemoveSystemEventListener(NS_LITERAL_STRING("visibilitychange"), this,
/* useCapture = */ true);
}
if (!mStream)
return;
mStream->RemoveMainThreadListener(this);
MediaStreamGraph* graph = mStream->Graph();
if (graph->IsNonRealtime()) {
MediaStreamGraph::DestroyNonRealtimeInstance(graph);
}
AudioNode::DestroyMediaStream();
}
void
AudioDestinationNode::NotifyMainThreadStateChanged()
{
if (mStream->IsFinished() && !mHasFinished) {
mHasFinished = true;
if (mIsOffline) {
nsCOMPtr<nsIRunnable> runnable =
NS_NewRunnableMethod(this, &AudioDestinationNode::FireOfflineCompletionEvent);
NS_DispatchToCurrentThread(runnable);
}
}
}
void
AudioDestinationNode::FireOfflineCompletionEvent()
{
AudioNodeStream* stream = static_cast<AudioNodeStream*>(Stream());
OfflineDestinationNodeEngine* engine =
static_cast<OfflineDestinationNodeEngine*>(stream->Engine());
engine->FireOfflineCompletionEvent(this);
}
uint32_t
AudioDestinationNode::MaxChannelCount() const
{
return Context()->MaxChannelCount();
}
void
AudioDestinationNode::SetChannelCount(uint32_t aChannelCount, ErrorResult& aRv)
{
if (aChannelCount > MaxChannelCount()) {
aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
return;
}
AudioNode::SetChannelCount(aChannelCount, aRv);
}
void
AudioDestinationNode::Mute()
{
MOZ_ASSERT(Context() && !Context()->IsOffline());
SendDoubleParameterToStream(DestinationNodeEngine::VOLUME, 0.0f);
}
void
AudioDestinationNode::Unmute()
{
MOZ_ASSERT(Context() && !Context()->IsOffline());
SendDoubleParameterToStream(DestinationNodeEngine::VOLUME, 1.0f);
}
void
AudioDestinationNode::OfflineShutdown()
{
MOZ_ASSERT(Context() && Context()->IsOffline(),
"Should only be called on a valid OfflineAudioContext");
MediaStreamGraph::DestroyNonRealtimeInstance(mStream->Graph());
mOfflineRenderingRef.Drop(this);
}
JSObject*
AudioDestinationNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aScope)
{
return AudioDestinationNodeBinding::Wrap(aCx, aScope, this);
}
void
AudioDestinationNode::StartRendering()
{
mOfflineRenderingRef.Take(this);
mStream->Graph()->StartNonRealtimeProcessing(TrackRate(Context()->SampleRate()), mFramesToProduce);
}
void
AudioDestinationNode::SetCanPlay(bool aCanPlay)
{
mStream->SetTrackEnabled(AudioNodeStream::AUDIO_TRACK, aCanPlay);
}
NS_IMETHODIMP
AudioDestinationNode::HandleEvent(nsIDOMEvent* aEvent)
{
nsAutoString type;
aEvent->GetType(type);
if (!type.EqualsLiteral("visibilitychange")) {
return NS_ERROR_FAILURE;
}
nsCOMPtr<nsIDocShell> docshell = do_GetInterface(GetOwner());
NS_ENSURE_TRUE(docshell, NS_ERROR_FAILURE);
bool isActive = false;
docshell->GetIsActive(&isActive);
mAudioChannelAgent->SetVisibilityState(isActive);
return NS_OK;
}
NS_IMETHODIMP
AudioDestinationNode::CanPlayChanged(int32_t aCanPlay)
{
SetCanPlay(aCanPlay == AudioChannelState::AUDIO_CHANNEL_STATE_NORMAL);
return NS_OK;
}
AudioChannel
AudioDestinationNode::MozAudioChannelType() const
{
return mAudioChannel;
}
void
AudioDestinationNode::SetMozAudioChannelType(AudioChannel aValue, ErrorResult& aRv)
{
if (Context()->IsOffline()) {
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
return;
}
if (aValue != mAudioChannel &&
CheckAudioChannelPermissions(aValue)) {
mAudioChannel = aValue;
if (mAudioChannelAgent) {
CreateAudioChannelAgent();
}
}
}
bool
AudioDestinationNode::CheckAudioChannelPermissions(AudioChannel aValue)
{
if (!Preferences::GetBool("media.useAudioChannelService")) {
return true;
}
// Only normal channel doesn't need permission.
if (aValue == AudioChannel::Normal) {
return true;
}
nsCOMPtr<nsIPermissionManager> permissionManager =
do_GetService(NS_PERMISSIONMANAGER_CONTRACTID);
if (!permissionManager) {
return false;
}
nsCOMPtr<nsIScriptObjectPrincipal> sop = do_QueryInterface(GetOwner());
NS_ASSERTION(sop, "Window didn't QI to nsIScriptObjectPrincipal!");
nsCOMPtr<nsIPrincipal> principal = sop->GetPrincipal();
uint32_t perm = nsIPermissionManager::UNKNOWN_ACTION;
nsCString channel;
channel.AssignASCII(AudioChannelValues::strings[uint32_t(aValue)].value,
AudioChannelValues::strings[uint32_t(aValue)].length);
permissionManager->TestExactPermissionFromPrincipal(principal,
nsCString(NS_LITERAL_CSTRING("audio-channel-") + channel).get(),
&perm);
return perm == nsIPermissionManager::ALLOW_ACTION;
}
void
AudioDestinationNode::CreateAudioChannelAgent()
{
if (mAudioChannelAgent) {
mAudioChannelAgent->StopPlaying();
}
AudioChannelType type = AUDIO_CHANNEL_NORMAL;
switch(mAudioChannel) {
case AudioChannel::Normal:
type = AUDIO_CHANNEL_NORMAL;
break;
case AudioChannel::Content:
type = AUDIO_CHANNEL_CONTENT;
break;
case AudioChannel::Notification:
type = AUDIO_CHANNEL_NOTIFICATION;
break;
case AudioChannel::Alarm:
type = AUDIO_CHANNEL_ALARM;
break;
case AudioChannel::Telephony:
type = AUDIO_CHANNEL_TELEPHONY;
break;
case AudioChannel::Ringer:
type = AUDIO_CHANNEL_RINGER;
break;
case AudioChannel::Publicnotification:
type = AUDIO_CHANNEL_PUBLICNOTIFICATION;
break;
}
mAudioChannelAgent = new AudioChannelAgent();
mAudioChannelAgent->InitWithWeakCallback(type, this);
nsCOMPtr<nsIDocShell> docshell = do_GetInterface(GetOwner());
if (docshell) {
bool isActive = false;
docshell->GetIsActive(&isActive);
mAudioChannelAgent->SetVisibilityState(isActive);
}
int32_t state = 0;
mAudioChannelAgent->StartPlaying(&state);
SetCanPlay(state == AudioChannelState::AUDIO_CHANNEL_STATE_NORMAL);
}
void
AudioDestinationNode::NotifyStableState()
{
mExtraCurrentTimeUpdatedSinceLastStableState = false;
}
static NS_DEFINE_CID(kAppShellCID, NS_APPSHELL_CID);
void
AudioDestinationNode::ScheduleStableStateNotification()
{
nsCOMPtr<nsIAppShell> appShell = do_GetService(kAppShellCID);
if (appShell) {
nsCOMPtr<nsIRunnable> event =
NS_NewRunnableMethod(this, &AudioDestinationNode::NotifyStableState);
appShell->RunInStableState(event);
}
}
double
AudioDestinationNode::ExtraCurrentTime()
{
if (!mStartedBlockingDueToBeingOnlyNode.IsNull() &&
!mExtraCurrentTimeUpdatedSinceLastStableState) {
mExtraCurrentTimeUpdatedSinceLastStableState = true;
mExtraCurrentTimeSinceLastStartedBlocking =
(TimeStamp::Now() - mStartedBlockingDueToBeingOnlyNode).ToSeconds();
ScheduleStableStateNotification();
}
return mExtraCurrentTime + mExtraCurrentTimeSinceLastStartedBlocking;
}
void
AudioDestinationNode::SetIsOnlyNodeForContext(bool aIsOnlyNode)
{
if (!mStartedBlockingDueToBeingOnlyNode.IsNull() == aIsOnlyNode) {
// Nothing changed.
return;
}
if (!mStream) {
// DestroyMediaStream has been called, presumably during CC Unlink().
return;
}
if (mIsOffline) {
// Don't block the destination stream for offline AudioContexts, since
// we expect the zero data produced when there are no other nodes to
// show up in its result buffer. Also, we would get confused by adding
// ExtraCurrentTime before StartRendering has even been called.
return;
}
if (aIsOnlyNode) {
mStream->ChangeExplicitBlockerCount(1);
mStartedBlockingDueToBeingOnlyNode = TimeStamp::Now();
mExtraCurrentTimeSinceLastStartedBlocking = 0;
// Don't do an update of mExtraCurrentTimeSinceLastStartedBlocking until the next stable state.
mExtraCurrentTimeUpdatedSinceLastStableState = true;
ScheduleStableStateNotification();
} else {
// Force update of mExtraCurrentTimeSinceLastStartedBlocking if necessary
ExtraCurrentTime();
mExtraCurrentTime += mExtraCurrentTimeSinceLastStartedBlocking;
mStream->ChangeExplicitBlockerCount(-1);
mStartedBlockingDueToBeingOnlyNode = TimeStamp();
}
}
}
}