mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Backed out 3 changesets (bug 1081819) for frequent mochitest-e10s failures
Backed out changeset b78fd38002f5 (bug 1081819) Backed out changeset ff063b9a1ea2 (bug 1081819) Backed out changeset 322d60fc413e (bug 1081819)
This commit is contained in:
parent
e1306cda70
commit
6cdbfe4378
@ -50,7 +50,8 @@ PRLogModuleInfo* gTrackUnionStreamLog;
|
||||
#endif
|
||||
|
||||
TrackUnionStream::TrackUnionStream(DOMMediaStream* aWrapper) :
|
||||
ProcessedMediaStream(aWrapper)
|
||||
ProcessedMediaStream(aWrapper),
|
||||
mFilterCallback(nullptr)
|
||||
{
|
||||
#ifdef PR_LOGGING
|
||||
if (!gTrackUnionStreamLog) {
|
||||
@ -113,7 +114,7 @@ TrackUnionStream::TrackUnionStream(DOMMediaStream* aWrapper) :
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
if (!found && (!mFilterCallback || mFilterCallback(tracks.get()))) {
|
||||
bool trackFinished = false;
|
||||
trackAdded = true;
|
||||
uint32_t mapIndex = AddTrack(mInputs[i], tracks.get(), aFrom);
|
||||
@ -152,6 +153,14 @@ TrackUnionStream::TrackUnionStream(DOMMediaStream* aWrapper) :
|
||||
}
|
||||
}
|
||||
|
||||
// Consumers may specify a filtering callback to apply to every input track.
|
||||
// Returns true to allow the track to act as an input; false to reject it entirely.
|
||||
|
||||
void TrackUnionStream::SetTrackIDFilter(TrackIDFilterCallback aCallback)
|
||||
{
|
||||
mFilterCallback = aCallback;
|
||||
}
|
||||
|
||||
// Forward SetTrackEnabled(output_track_id, enabled) to the Source MediaStream,
|
||||
// translating the output track ID into the correct ID in the source.
|
||||
void TrackUnionStream::ForwardTrackEnabled(TrackID aOutputID, bool aEnabled)
|
||||
@ -272,8 +281,6 @@ TrackUnionStream::TrackUnionStream(DOMMediaStream* aWrapper) :
|
||||
segment->AppendNullData(ticks);
|
||||
STREAM_LOG(PR_LOG_DEBUG+1, ("TrackUnionStream %p appending %lld ticks of null data to track %d",
|
||||
this, (long long)ticks, outputTrack->GetID()));
|
||||
} else if (InMutedCycle()) {
|
||||
segment->AppendNullData(ticks);
|
||||
} else {
|
||||
MOZ_ASSERT(outputTrack->GetEnd() == GraphTimeToStreamTime(interval.mStart),
|
||||
"Samples missing");
|
||||
|
@ -21,11 +21,19 @@ public:
|
||||
virtual void RemoveInput(MediaInputPort* aPort) MOZ_OVERRIDE;
|
||||
virtual void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) MOZ_OVERRIDE;
|
||||
|
||||
// Consumers may specify a filtering callback to apply to every input track.
|
||||
// Returns true to allow the track to act as an input; false to reject it entirely.
|
||||
typedef bool (*TrackIDFilterCallback)(StreamBuffer::Track*);
|
||||
|
||||
void SetTrackIDFilter(TrackIDFilterCallback aCallback);
|
||||
|
||||
// Forward SetTrackEnabled(output_track_id, enabled) to the Source MediaStream,
|
||||
// translating the output track ID into the correct ID in the source.
|
||||
virtual void ForwardTrackEnabled(TrackID aOutputID, bool aEnabled) MOZ_OVERRIDE;
|
||||
|
||||
protected:
|
||||
TrackIDFilterCallback mFilterCallback;
|
||||
|
||||
// Only non-ended tracks are allowed to persist in this map.
|
||||
struct TrackMapEntry {
|
||||
// mEndOfConsumedInputTicks is the end of the input ticks that we've consumed.
|
||||
|
@ -178,8 +178,6 @@ skip-if = toolkit == 'gonk' # b2g (Bug 1059867)
|
||||
skip-if = toolkit == 'gonk' # b2g (Bug 1059867)
|
||||
[test_peerConnection_addDataChannelNoBundle.html]
|
||||
skip-if = toolkit == 'gonk' # b2g (Bug 1059867)
|
||||
[test_peerConnection_webAudio.html]
|
||||
skip-if = toolkit == 'gonk' # b2g (Bug 1059867)
|
||||
|
||||
# Bug 950317: Hack for making a cleanup hook after finishing all WebRTC cases
|
||||
[test_zmedia_cleanup.html]
|
||||
|
@ -1,97 +0,0 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html>
|
||||
<head>
|
||||
<script type="application/javascript" src="pc.js"></script>
|
||||
</head>
|
||||
<body>
|
||||
<pre id="test">
|
||||
<script type="application/javascript;version=1.8">
|
||||
createHTML({
|
||||
bug: "1081819",
|
||||
title: "WebAudio on both input and output side of peerconnection"
|
||||
});
|
||||
|
||||
// This tests WebAudio as input to a PeerConnection and a PeerConnection as
|
||||
// input to WebAudio. This is done by piping a 700Hz oscillator through an
|
||||
// analyser on the input side, the PeerConnection, and an analyser on the
|
||||
// output side. We then sanity check the audio by comparing the frequency domain
|
||||
// data from both analysers.
|
||||
|
||||
runNetworkTest(function() {
|
||||
var test = new PeerConnectionTest();
|
||||
|
||||
var audioContext = new AudioContext();
|
||||
var inputAnalyser;
|
||||
var outputAnalyser;
|
||||
|
||||
test.setMediaConstraints([{audio: true}], []);
|
||||
test.chain.replace("PC_LOCAL_GUM", [
|
||||
function PC_LOCAL_WEBAUDIO_SOURCE(test) {
|
||||
var oscillator = audioContext.createOscillator();
|
||||
oscillator.type = 'sine';
|
||||
oscillator.frequency.value = 700;
|
||||
oscillator.start();
|
||||
inputAnalyser = audioContext.createAnalyser();
|
||||
var dest = audioContext.createMediaStreamDestination();
|
||||
|
||||
oscillator.connect(inputAnalyser);
|
||||
inputAnalyser.connect(dest);
|
||||
test.pcLocal.attachMedia(dest.stream, 'audio', 'local');
|
||||
|
||||
return Promise.resolve();
|
||||
}
|
||||
]);
|
||||
test.chain.insertBefore("PC_REMOTE_SETUP_ADDSTREAM_HANDLER", [
|
||||
function PC_REMOTE_REPLACE_ATTACHMEDIA(test) {
|
||||
var realAttachMedia = test.pcRemote.attachMedia.bind(test.pcRemote);
|
||||
test.pcRemote.attachMedia = function(stream, type, side) {
|
||||
var source = audioContext.createMediaStreamSource(stream);
|
||||
outputAnalyser = audioContext.createAnalyser();
|
||||
var dest = audioContext.createMediaStreamDestination();
|
||||
|
||||
source.connect(outputAnalyser);
|
||||
outputAnalyser.connect(dest);
|
||||
realAttachMedia(dest.stream, type, side);
|
||||
};
|
||||
return Promise.resolve();
|
||||
}]);
|
||||
test.chain.append([
|
||||
function CHECK_AUDIO_FLOW(test) {
|
||||
var inputData = new Uint8Array(inputAnalyser.frequencyBinCount);
|
||||
inputAnalyser.getByteFrequencyData(inputData);
|
||||
|
||||
var outputData = new Uint8Array(outputAnalyser.frequencyBinCount);
|
||||
outputAnalyser.getByteFrequencyData(outputData);
|
||||
|
||||
is(inputData.length, outputData.length, "Equally sized datasets");
|
||||
var numChecks = 0;
|
||||
var sanityCheckFrequencyValue = function(i, input, output) {
|
||||
// This is for sanity check only. The audio encoding applied on the
|
||||
// output will cause some fairly large deviations from the input around
|
||||
// the oscillator's frequency. However, the input analyser will reach
|
||||
// its max value of 255 for multiple indices, so allowing a deviation
|
||||
// of 50 for these is fine.
|
||||
if (input < 200 && output < 200) {
|
||||
// Save us some log output by skipping when both input and output
|
||||
// are sufficiently low. 200 is a bit higher than preferred, but on
|
||||
// Android we've seen the output being quite high (100-150) when
|
||||
// input is fairly low (0-50), i.e., on frequencies neighboring 700Hz.
|
||||
return 0;
|
||||
}
|
||||
ok(Math.abs(input - output) < 50,
|
||||
"Sane audio frequency values at index " + i + "/" + inputData.length +
|
||||
", input=" + input + ", output=" + output);
|
||||
return 1;
|
||||
}
|
||||
for (i = 0; i < inputData.length; ++i) {
|
||||
numChecks += sanityCheckFrequencyValue(i, inputData[i], outputData[i]);
|
||||
}
|
||||
isnot(numChecks, 0, "Should have had some non-zero values analyzed");
|
||||
return Promise.resolve();
|
||||
}]);
|
||||
test.run();
|
||||
});
|
||||
</script>
|
||||
</pre>
|
||||
</body>
|
||||
</html>
|
@ -23,21 +23,60 @@ NS_INTERFACE_MAP_END_INHERITING(AudioNode)
|
||||
NS_IMPL_ADDREF_INHERITED(MediaStreamAudioDestinationNode, AudioNode)
|
||||
NS_IMPL_RELEASE_INHERITED(MediaStreamAudioDestinationNode, AudioNode)
|
||||
|
||||
static const int MEDIA_STREAM_DEST_TRACK_ID = 2;
|
||||
static_assert(MEDIA_STREAM_DEST_TRACK_ID != AudioNodeStream::AUDIO_TRACK,
|
||||
"MediaStreamAudioDestinationNode::MEDIA_STREAM_DEST_TRACK_ID must be a different value than AudioNodeStream::AUDIO_TRACK");
|
||||
|
||||
class MediaStreamDestinationEngine : public AudioNodeEngine {
|
||||
public:
|
||||
MediaStreamDestinationEngine(AudioNode* aNode, ProcessedMediaStream* aOutputStream)
|
||||
: AudioNodeEngine(aNode)
|
||||
, mOutputStream(aOutputStream)
|
||||
{
|
||||
MOZ_ASSERT(mOutputStream);
|
||||
}
|
||||
|
||||
virtual void ProcessBlock(AudioNodeStream* aStream,
|
||||
const AudioChunk& aInput,
|
||||
AudioChunk* aOutput,
|
||||
bool* aFinished) MOZ_OVERRIDE
|
||||
{
|
||||
*aOutput = aInput;
|
||||
StreamBuffer::Track* track = mOutputStream->EnsureTrack(MEDIA_STREAM_DEST_TRACK_ID);
|
||||
AudioSegment* segment = track->Get<AudioSegment>();
|
||||
segment->AppendAndConsumeChunk(aOutput);
|
||||
}
|
||||
|
||||
virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE
|
||||
{
|
||||
return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
|
||||
}
|
||||
|
||||
private:
|
||||
ProcessedMediaStream* mOutputStream;
|
||||
};
|
||||
|
||||
// This callback is used to ensure that only the audio data for this track is audible
|
||||
static bool FilterAudioNodeStreamTrack(StreamBuffer::Track* aTrack)
|
||||
{
|
||||
return aTrack->GetID() == MEDIA_STREAM_DEST_TRACK_ID;
|
||||
}
|
||||
|
||||
MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode(AudioContext* aContext)
|
||||
: AudioNode(aContext,
|
||||
2,
|
||||
ChannelCountMode::Explicit,
|
||||
ChannelInterpretation::Speakers)
|
||||
, mDOMStream(DOMAudioNodeMediaStream::CreateTrackUnionStream(GetOwner(), this))
|
||||
, mDOMStream(DOMAudioNodeMediaStream::CreateTrackUnionStream(GetOwner(),
|
||||
this))
|
||||
{
|
||||
// Ensure an audio track with the correct ID is exposed to JS
|
||||
mDOMStream->CreateDOMTrack(AudioNodeStream::AUDIO_TRACK, MediaSegment::AUDIO);
|
||||
TrackUnionStream* tus = static_cast<TrackUnionStream*>(mDOMStream->GetStream());
|
||||
MOZ_ASSERT(tus == mDOMStream->GetStream()->AsProcessedStream());
|
||||
tus->SetTrackIDFilter(FilterAudioNodeStreamTrack);
|
||||
|
||||
ProcessedMediaStream* outputStream = mDOMStream->GetStream()->AsProcessedStream();
|
||||
MOZ_ASSERT(!!outputStream);
|
||||
AudioNodeEngine* engine = new AudioNodeEngine(this);
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::EXTERNAL_STREAM);
|
||||
mPort = outputStream->AllocateInputPort(mStream);
|
||||
MediaStreamDestinationEngine* engine = new MediaStreamDestinationEngine(this, tus);
|
||||
mStream = aContext->Graph()->CreateAudioNodeStream(engine, MediaStreamGraph::INTERNAL_STREAM);
|
||||
mPort = tus->AllocateInputPort(mStream, 0);
|
||||
|
||||
nsIDocument* doc = aContext->GetParentObject()->GetExtantDoc();
|
||||
if (doc) {
|
||||
|
Loading…
Reference in New Issue
Block a user