gecko/dom/webidl/AudioContext.webidl
Paul Adenot 67071397ed Bug 1094764 - Implement AudioContext.suspend and friends. r=roc,ehsan
- Relevant spec text:
    - http://webaudio.github.io/web-audio-api/#widl-AudioContext-suspend-Promise
    - http://webaudio.github.io/web-audio-api/#widl-AudioContext-resume-Promise
    - http://webaudio.github.io/web-audio-api/#widl-AudioContext-close-Promise
    - http://webaudio.github.io/web-audio-api/#widl-AudioContext-state
    - http://webaudio.github.io/web-audio-api/#widl-AudioContext-onstatechange

- In a couple words, the behavior we want:
    - Closed context cannot have new nodes created, but can do decodeAudioData,
    and create buffers, and such.
    - OfflineAudioContexts don't support those methods, transitions happen at
    startRendering and at the end of processing. onstatechange is used to make
    this observable.
    - (regular) AudioContexts support those methods. The promises and
    onstatechange should be resolved/called when the operation has actually
    completed on the rendering thread.  Once a context has been closed, it
    cannot transition back to "running". An AudioContext switches to "running"
    when the audio callback start running, this allow authors to know how long
    the audio stack takes to start running.
    - MediaStreams that feed in/go out of a suspended graph should respectively
    not buffer at the graph input, and output silence
    - suspended context should not be doing much on the CPU, and we should try
    to pause audio streams if we can (this behaviour is the main reason we need
    this in the first place, for saving battery on mobile, and CPU on all
    platforms)

- Now, the implementation:
    - AudioNodeStreams are now tagged with a context id, to be able to operate
    on all the streams of a given AudioContext on the Graph thread without
    having to go and lock everytime to touch the AudioContext. This happens in
    the AudioNodeStream ctor. IDs are of course constant for the lifetime of the
    node.
    - When an AudioContext goes into suspended mode, streams for this
    AudioContext are moved out of the mStreams array to a second array,
    mSuspendedStreams. Streams in mSuspendedStream are not ordered, and are not
    processed.
    - The MSG will automatically switch to a SystemClockDriver when it finds
    that there are no more AudioNodeStream/Stream with an audio track. This is
    how pausing the audio subsystem and saving battery works. Subsequently, when
    the MSG finds that there are only streams in mSuspendedStreams, it will go
    to sleep (block on a monitor), so we save CPU, but it does not shut itself
    down. This is mostly not a new behaviour (this is what the MSG does since
    the refactoring), but is important to note.
    - Promises are gripped (addref-ed) on the main thread, and then shepherd
    down other threads and to the GraphDriver, if needed (sometimes we can
    resolve them right away). They move between threads as void* to prevent
    calling methods on them, as they are not thread safe. Then, the driver
    executes the operation, and when it's done (initializing and closing audio
    streams can take some time), we send the promise back to the main thread,
    and resolve it, casting back to Promise* after asserting we're back on the
    main thread. This way, we can send them back on the main thread once an
    operation has complete (suspending an audio stream, starting it again on
    resume(), etc.), without having to do bookkeeping between suspend calls and
    their result. Promises are not thread safe, so we can't move them around
    AddRef-ed.
    - The stream destruction logic now takes into account that a stream can be
    destroyed while not being in mStreams.
    - A graph can now switch GraphDriver twice or more per iteration, for
    example if an author goes suspend()/resume()/suspend() in the same script.
    - Some operation have to be done on suspended stream, so we now use double
    for-loop around mSuspendedStreams and mStreams in some places in
    MediaStreamGraph.cpp.
    - A tricky part was making sure everything worked at AudioContext
    boundaries.  TrackUnionStream that have one of their input stream suspended
    append null ticks instead.
    - The graph ordering algorithm had to be altered to not include suspended
    streams.
    - There are some edge cases (adding a stream on a suspended graph, calling
    suspend/resume when a graph has just been close()d).
2015-02-27 18:22:05 +01:00

113 lines
4.0 KiB
Plaintext

/* -*- Mode: IDL; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/.
*
* The origin of this IDL file is
* https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html
*
* Copyright © 2012 W3C® (MIT, ERCIM, Keio), All Rights Reserved. W3C
* liability, trademark and document use rules apply.
*/
callback DecodeSuccessCallback = void (AudioBuffer decodedData);
callback DecodeErrorCallback = void ();
enum AudioContextState {
"suspended",
"running",
"closed"
};
[Constructor,
Constructor(AudioChannel audioChannelType)]
interface AudioContext : EventTarget {
readonly attribute AudioDestinationNode destination;
readonly attribute float sampleRate;
readonly attribute double currentTime;
readonly attribute AudioListener listener;
readonly attribute AudioContextState state;
[Throws]
Promise<void> suspend();
[Throws]
Promise<void> resume();
[Throws]
Promise<void> close();
attribute EventHandler onstatechange;
[NewObject, Throws]
AudioBuffer createBuffer(unsigned long numberOfChannels, unsigned long length, float sampleRate);
[Throws]
Promise<AudioBuffer> decodeAudioData(ArrayBuffer audioData,
optional DecodeSuccessCallback successCallback,
optional DecodeErrorCallback errorCallback);
// AudioNode creation
[NewObject, Throws]
AudioBufferSourceNode createBufferSource();
[NewObject, Throws]
MediaStreamAudioDestinationNode createMediaStreamDestination();
[NewObject, Throws]
ScriptProcessorNode createScriptProcessor(optional unsigned long bufferSize = 0,
optional unsigned long numberOfInputChannels = 2,
optional unsigned long numberOfOutputChannels = 2);
[NewObject, Throws]
StereoPannerNode createStereoPanner();
[NewObject, Throws]
AnalyserNode createAnalyser();
[NewObject, Throws, UnsafeInPrerendering]
MediaElementAudioSourceNode createMediaElementSource(HTMLMediaElement mediaElement);
[NewObject, Throws, UnsafeInPrerendering]
MediaStreamAudioSourceNode createMediaStreamSource(MediaStream mediaStream);
[NewObject, Throws]
GainNode createGain();
[NewObject, Throws]
DelayNode createDelay(optional double maxDelayTime = 1);
[NewObject, Throws]
BiquadFilterNode createBiquadFilter();
[NewObject, Throws]
WaveShaperNode createWaveShaper();
[NewObject, Throws]
PannerNode createPanner();
[NewObject, Throws]
ConvolverNode createConvolver();
[NewObject, Throws]
ChannelSplitterNode createChannelSplitter(optional unsigned long numberOfOutputs = 6);
[NewObject, Throws]
ChannelMergerNode createChannelMerger(optional unsigned long numberOfInputs = 6);
[NewObject, Throws]
DynamicsCompressorNode createDynamicsCompressor();
[NewObject, Throws]
OscillatorNode createOscillator();
[NewObject, Throws]
PeriodicWave createPeriodicWave(Float32Array real, Float32Array imag);
};
// Mozilla extensions
partial interface AudioContext {
// Read AudioChannel.webidl for more information about this attribute.
[Pref="media.useAudioChannelService"]
readonly attribute AudioChannel mozAudioChannelType;
// These 2 events are dispatched when the AudioContext object is muted by
// the AudioChannelService. It's call 'interrupt' because when this event is
// dispatched on a HTMLMediaElement, the audio stream is paused.
[Pref="media.useAudioChannelService"]
attribute EventHandler onmozinterruptbegin;
[Pref="media.useAudioChannelService"]
attribute EventHandler onmozinterruptend;
// This method is for test only.
[ChromeOnly] AudioChannel testAudioChannelInAudioNodeStream();
};