mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
670b091738
These MediaStreams are used as a way to down-mix the input AudioChunks, and also as a way to get proper stream processing ordering. The MediaStream for the source AudioNode is an input to these streams, and these streams in turn are inputs to the MediaStream that the AudioNode that owns the AudioParam owns. This way, the Media Streams Graph processing code will order the streams so that by the time that the MediaStream for a given node is processed, all of the MediaStreams belonging to the AudioNode(s) feeding into the AudioParam have been processed. This has a tricky side-effect that those streams also being considered when determining the input block for the AudioNodeStream belonging to the AudioParam's owner AudioNode. In order to fix that, we simply special case those streams and make AudioNodeStream::ObtainInputBlock ignore them.
153 lines
4.2 KiB
C++
153 lines
4.2 KiB
C++
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
|
/* vim:set ts=2 sw=2 sts=2 et cindent: */
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
#ifndef AudioParam_h_
|
|
#define AudioParam_h_
|
|
|
|
#include "AudioParamTimeline.h"
|
|
#include "nsWrapperCache.h"
|
|
#include "nsCycleCollectionParticipant.h"
|
|
#include "nsCOMPtr.h"
|
|
#include "EnableWebAudioCheck.h"
|
|
#include "nsAutoPtr.h"
|
|
#include "AudioNode.h"
|
|
#include "mozilla/dom/TypedArray.h"
|
|
#include "mozilla/Util.h"
|
|
#include "WebAudioUtils.h"
|
|
|
|
struct JSContext;
|
|
class nsIDOMWindow;
|
|
|
|
namespace mozilla {
|
|
|
|
namespace dom {
|
|
|
|
class AudioParam MOZ_FINAL : public nsWrapperCache,
|
|
public EnableWebAudioCheck,
|
|
public AudioParamTimeline
|
|
{
|
|
public:
|
|
typedef void (*CallbackType)(AudioNode*);
|
|
|
|
AudioParam(AudioNode* aNode,
|
|
CallbackType aCallback,
|
|
float aDefaultValue);
|
|
virtual ~AudioParam();
|
|
|
|
NS_IMETHOD_(nsrefcnt) AddRef(void);
|
|
NS_IMETHOD_(nsrefcnt) Release(void);
|
|
NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(AudioParam)
|
|
|
|
AudioContext* GetParentObject() const
|
|
{
|
|
return mNode->Context();
|
|
}
|
|
|
|
virtual JSObject* WrapObject(JSContext* aCx,
|
|
JS::Handle<JSObject*> aScope) MOZ_OVERRIDE;
|
|
|
|
// We override SetValueCurveAtTime to convert the Float32Array to the wrapper
|
|
// object.
|
|
void SetValueCurveAtTime(JSContext* cx, const Float32Array& aValues, double aStartTime, double aDuration, ErrorResult& aRv)
|
|
{
|
|
AudioParamTimeline::SetValueCurveAtTime(aValues.Data(), aValues.Length(),
|
|
aStartTime, aDuration, aRv);
|
|
mCallback(mNode);
|
|
}
|
|
|
|
// We override the rest of the mutating AudioParamTimeline methods in order to make
|
|
// sure that the callback is called every time that this object gets mutated.
|
|
void SetValue(float aValue)
|
|
{
|
|
// Optimize away setting the same value on an AudioParam
|
|
if (HasSimpleValue() &&
|
|
WebAudioUtils::FuzzyEqual(GetValue(), aValue)) {
|
|
return;
|
|
}
|
|
AudioParamTimeline::SetValue(aValue);
|
|
mCallback(mNode);
|
|
}
|
|
void SetValueAtTime(float aValue, double aStartTime, ErrorResult& aRv)
|
|
{
|
|
AudioParamTimeline::SetValueAtTime(aValue, aStartTime, aRv);
|
|
mCallback(mNode);
|
|
}
|
|
void LinearRampToValueAtTime(float aValue, double aEndTime, ErrorResult& aRv)
|
|
{
|
|
AudioParamTimeline::LinearRampToValueAtTime(aValue, aEndTime, aRv);
|
|
mCallback(mNode);
|
|
}
|
|
void ExponentialRampToValueAtTime(float aValue, double aEndTime, ErrorResult& aRv)
|
|
{
|
|
AudioParamTimeline::ExponentialRampToValueAtTime(aValue, aEndTime, aRv);
|
|
mCallback(mNode);
|
|
}
|
|
void SetTargetAtTime(float aTarget, double aStartTime, double aTimeConstant, ErrorResult& aRv)
|
|
{
|
|
AudioParamTimeline::SetTargetAtTime(aTarget, aStartTime, aTimeConstant, aRv);
|
|
mCallback(mNode);
|
|
}
|
|
void SetTargetValueAtTime(float aTarget, double aStartTime, double aTimeConstant, ErrorResult& aRv)
|
|
{
|
|
SetTargetAtTime(aTarget, aStartTime, aTimeConstant, aRv);
|
|
}
|
|
void CancelScheduledValues(double aStartTime)
|
|
{
|
|
AudioParamTimeline::CancelScheduledValues(aStartTime);
|
|
mCallback(mNode);
|
|
}
|
|
|
|
float DefaultValue() const
|
|
{
|
|
return mDefaultValue;
|
|
}
|
|
|
|
AudioNode* Node() const
|
|
{
|
|
return mNode;
|
|
}
|
|
|
|
const nsTArray<AudioNode::InputNode>& InputNodes() const
|
|
{
|
|
return mInputNodes;
|
|
}
|
|
|
|
void RemoveInputNode(uint32_t aIndex)
|
|
{
|
|
mInputNodes.RemoveElementAt(aIndex);
|
|
}
|
|
|
|
AudioNode::InputNode* AppendInputNode()
|
|
{
|
|
return mInputNodes.AppendElement();
|
|
}
|
|
|
|
void DisconnectFromGraphAndDestroyStream();
|
|
|
|
// May create the stream if it doesn't exist
|
|
MediaStream* Stream();
|
|
|
|
protected:
|
|
nsCycleCollectingAutoRefCnt mRefCnt;
|
|
NS_DECL_OWNINGTHREAD
|
|
|
|
private:
|
|
nsRefPtr<AudioNode> mNode;
|
|
// For every InputNode, there is a corresponding entry in mOutputParams of the
|
|
// InputNode's mInputNode.
|
|
nsTArray<AudioNode::InputNode> mInputNodes;
|
|
CallbackType mCallback;
|
|
const float mDefaultValue;
|
|
// The input port used to connect the AudioParam's stream to its node's stream
|
|
nsRefPtr<MediaInputPort> mNodeStreamPort;
|
|
};
|
|
|
|
}
|
|
}
|
|
|
|
#endif
|
|
|