Merge m-c to inbound.

This commit is contained in:
Ryan VanderMeulen 2013-05-07 10:10:46 -04:00
commit c8bea520cd
21 changed files with 672 additions and 543 deletions

View File

@ -689,3 +689,6 @@ pref("consoleservice.buffered", false);
// Performance testing suggests 2k is a better page size for SQLite.
pref("toolkit.storage.pageSize", 2048);
#endif
// The url of the manifest we use for ADU pings.
pref("ping.manifestURL", "https://marketplace.firefox.com/packaged.webapp");

View File

@ -190,12 +190,6 @@ public:
}
}
float ComputedValue() const
{
// TODO: implement
return 0;
}
void SetValueAtTime(float aValue, double aStartTime, ErrorResult& aRv)
{
InsertEvent(AudioTimelineEvent(AudioTimelineEvent::SetValue, aStartTime, aValue), aRv);

View File

@ -1,442 +1,442 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "AudioNodeStream.h"
#include "MediaStreamGraphImpl.h"
#include "AudioNodeEngine.h"
#include "ThreeDPoint.h"
using namespace mozilla::dom;
namespace mozilla {
/**
* An AudioNodeStream produces a single audio track with ID
* AUDIO_NODE_STREAM_TRACK_ID. This track has rate IdealAudioRate().
* Each chunk in the track is a single block of WEBAUDIO_BLOCK_SIZE samples.
*/
static const int AUDIO_NODE_STREAM_TRACK_ID = 1;
AudioNodeStream::~AudioNodeStream()
{
MOZ_COUNT_DTOR(AudioNodeStream);
}
void
AudioNodeStream::SetStreamTimeParameter(uint32_t aIndex, MediaStream* aRelativeToStream,
double aStreamTime)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream, uint32_t aIndex, MediaStream* aRelativeToStream,
double aStreamTime)
: ControlMessage(aStream), mStreamTime(aStreamTime),
mRelativeToStream(aRelativeToStream), mIndex(aIndex) {}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->
SetStreamTimeParameterImpl(mIndex, mRelativeToStream, mStreamTime);
}
double mStreamTime;
MediaStream* mRelativeToStream;
uint32_t mIndex;
};
MOZ_ASSERT(this);
GraphImpl()->AppendMessage(new Message(this, aIndex, aRelativeToStream, aStreamTime));
}
void
AudioNodeStream::SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
double aStreamTime)
{
StreamTime streamTime = std::max<MediaTime>(0, SecondsToMediaTime(aStreamTime));
GraphTime graphTime = aRelativeToStream->StreamTimeToGraphTime(streamTime);
StreamTime thisStreamTime = GraphTimeToStreamTimeOptimistic(graphTime);
TrackTicks ticks = TimeToTicksRoundUp(IdealAudioRate(), thisStreamTime);
mEngine->SetStreamTimeParameter(aIndex, ticks);
}
void
AudioNodeStream::SetDoubleParameter(uint32_t aIndex, double aValue)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream, uint32_t aIndex, double aValue)
: ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->Engine()->
SetDoubleParameter(mIndex, mValue);
}
double mValue;
uint32_t mIndex;
};
MOZ_ASSERT(this);
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
}
void
AudioNodeStream::SetInt32Parameter(uint32_t aIndex, int32_t aValue)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream, uint32_t aIndex, int32_t aValue)
: ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->Engine()->
SetInt32Parameter(mIndex, mValue);
}
int32_t mValue;
uint32_t mIndex;
};
MOZ_ASSERT(this);
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
}
void
AudioNodeStream::SetTimelineParameter(uint32_t aIndex,
const AudioParamTimeline& aValue)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream, uint32_t aIndex,
const AudioParamTimeline& aValue)
: ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->Engine()->
SetTimelineParameter(mIndex, mValue);
}
AudioParamTimeline mValue;
uint32_t mIndex;
};
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
}
void
AudioNodeStream::SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aValue)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream, uint32_t aIndex, const ThreeDPoint& aValue)
: ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->Engine()->
SetThreeDPointParameter(mIndex, mValue);
}
ThreeDPoint mValue;
uint32_t mIndex;
};
MOZ_ASSERT(this);
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
}
void
AudioNodeStream::SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream,
already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer)
: ControlMessage(aStream), mBuffer(aBuffer) {}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->Engine()->
SetBuffer(mBuffer.forget());
}
nsRefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
};
MOZ_ASSERT(this);
GraphImpl()->AppendMessage(new Message(this, aBuffer));
}
void
AudioNodeStream::SetChannelMixingParameters(uint32_t aNumberOfChannels,
ChannelCountMode aChannelCountMode,
ChannelInterpretation aChannelInterpretation)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream,
uint32_t aNumberOfChannels,
ChannelCountMode aChannelCountMode,
ChannelInterpretation aChannelInterpretation)
: ControlMessage(aStream),
mNumberOfChannels(aNumberOfChannels),
mChannelCountMode(aChannelCountMode),
mChannelInterpretation(aChannelInterpretation)
{}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->
SetChannelMixingParametersImpl(mNumberOfChannels, mChannelCountMode,
mChannelInterpretation);
}
uint32_t mNumberOfChannels;
ChannelCountMode mChannelCountMode;
ChannelInterpretation mChannelInterpretation;
};
MOZ_ASSERT(this);
GraphImpl()->AppendMessage(new Message(this, aNumberOfChannels,
aChannelCountMode,
aChannelInterpretation));
}
void
AudioNodeStream::SetChannelMixingParametersImpl(uint32_t aNumberOfChannels,
ChannelCountMode aChannelCountMode,
ChannelInterpretation aChannelInterpretation)
{
// Make sure that we're not clobbering any significant bits by fitting these
// values in 16 bits.
MOZ_ASSERT(int(aChannelCountMode) < INT16_MAX);
MOZ_ASSERT(int(aChannelInterpretation) < INT16_MAX);
mNumberOfInputChannels = aNumberOfChannels;
mMixingMode.mChannelCountMode = aChannelCountMode;
mMixingMode.mChannelInterpretation = aChannelInterpretation;
}
StreamBuffer::Track*
AudioNodeStream::EnsureTrack()
{
StreamBuffer::Track* track = mBuffer.FindTrack(AUDIO_NODE_STREAM_TRACK_ID);
if (!track) {
nsAutoPtr<MediaSegment> segment(new AudioSegment());
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
MediaStreamListener* l = mListeners[j];
l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID, IdealAudioRate(), 0,
MediaStreamListener::TRACK_EVENT_CREATED,
*segment);
}
track = &mBuffer.AddTrack(AUDIO_NODE_STREAM_TRACK_ID, IdealAudioRate(), 0, segment.forget());
}
return track;
}
bool
AudioNodeStream::AllInputsFinished() const
{
uint32_t inputCount = mInputs.Length();
for (uint32_t i = 0; i < inputCount; ++i) {
if (!mInputs[i]->GetSource()->IsFinishedOnGraphThread()) {
return false;
}
}
return !!inputCount;
}
void
AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
{
uint32_t inputCount = mInputs.Length();
uint32_t outputChannelCount = 1;
nsAutoTArray<AudioChunk*,250> inputChunks;
for (uint32_t i = 0; i < inputCount; ++i) {
if (aPortIndex != mInputs[i]->InputNumber()) {
// This input is connected to a different port
continue;
}
MediaStream* s = mInputs[i]->GetSource();
AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
MOZ_ASSERT(a == s->AsAudioNodeStream());
if (a->IsFinishedOnGraphThread() ||
a->IsAudioParamStream()) {
continue;
}
AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
MOZ_ASSERT(chunk);
if (chunk->IsNull()) {
continue;
}
inputChunks.AppendElement(chunk);
outputChannelCount =
GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
}
switch (mMixingMode.mChannelCountMode) {
case ChannelCountMode::Explicit:
// Disregard the output channel count that we've calculated, and just use
// mNumberOfInputChannels.
outputChannelCount = mNumberOfInputChannels;
break;
case ChannelCountMode::Clamped_max:
// Clamp the computed output channel count to mNumberOfInputChannels.
outputChannelCount = std::min(outputChannelCount, mNumberOfInputChannels);
break;
case ChannelCountMode::Max:
// Nothing to do here, just shut up the compiler warning.
break;
}
uint32_t inputChunkCount = inputChunks.Length();
if (inputChunkCount == 0) {
aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
return;
}
if (inputChunkCount == 1 &&
inputChunks[0]->mChannelData.Length() == outputChannelCount) {
aTmpChunk = *inputChunks[0];
return;
}
AllocateAudioBlock(outputChannelCount, &aTmpChunk);
float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
// The static storage here should be 1KB, so it's fine
nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
for (uint32_t i = 0; i < inputChunkCount; ++i) {
AudioChunk* chunk = inputChunks[i];
nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
channels.AppendElements(chunk->mChannelData);
if (channels.Length() < outputChannelCount) {
if (mMixingMode.mChannelInterpretation == ChannelInterpretation::Speakers) {
AudioChannelsUpMix(&channels, outputChannelCount, nullptr);
NS_ASSERTION(outputChannelCount == channels.Length(),
"We called GetAudioChannelsSuperset to avoid this");
} else {
// Fill up the remaining channels by zeros
for (uint32_t j = channels.Length(); j < outputChannelCount; ++j) {
channels.AppendElement(silenceChannel);
}
}
} else if (channels.Length() > outputChannelCount) {
if (mMixingMode.mChannelInterpretation == ChannelInterpretation::Speakers) {
nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
outputChannels.SetLength(outputChannelCount);
downmixBuffer.SetLength(outputChannelCount * WEBAUDIO_BLOCK_SIZE);
for (uint32_t j = 0; j < outputChannelCount; ++j) {
outputChannels[j] = &downmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
}
AudioChannelsDownMix(channels, outputChannels.Elements(),
outputChannelCount, WEBAUDIO_BLOCK_SIZE);
channels.SetLength(outputChannelCount);
for (uint32_t j = 0; j < channels.Length(); ++j) {
channels[j] = outputChannels[j];
}
} else {
// Drop the remaining channels
channels.RemoveElementsAt(outputChannelCount,
channels.Length() - outputChannelCount);
}
}
for (uint32_t c = 0; c < channels.Length(); ++c) {
const float* inputData = static_cast<const float*>(channels[c]);
float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk.mChannelData[c]));
if (inputData) {
if (i == 0) {
AudioBlockCopyChannelWithScale(inputData, chunk->mVolume, outputData);
} else {
AudioBlockAddChannelWithScale(inputData, chunk->mVolume, outputData);
}
} else {
if (i == 0) {
memset(outputData, 0, WEBAUDIO_BLOCK_SIZE*sizeof(float));
}
}
}
}
}
// The MediaStreamGraph guarantees that this is actually one block, for
// AudioNodeStreams.
void
AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
{
if (mMarkAsFinishedAfterThisBlock) {
// This stream was finished the last time that we looked at it, and all
// of the depending streams have finished their output as well, so now
// it's time to mark this stream as finished.
FinishOutput();
}
StreamBuffer::Track* track = EnsureTrack();
AudioSegment* segment = track->Get<AudioSegment>();
mLastChunks.SetLength(1);
mLastChunks[0].SetNull(0);
if (mInCycle) {
// XXX DelayNode not supported yet so just produce silence
mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
} else {
// We need to generate at least one input
uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount());
OutputChunks inputChunks;
inputChunks.SetLength(maxInputs);
for (uint16_t i = 0; i < maxInputs; ++i) {
ObtainInputBlock(inputChunks[i], i);
}
bool finished = false;
if (maxInputs <= 1 && mEngine->OutputCount() <= 1) {
mEngine->ProduceAudioBlock(this, inputChunks[0], &mLastChunks[0], &finished);
} else {
mEngine->ProduceAudioBlocksOnPorts(this, inputChunks, mLastChunks, &finished);
}
if (finished) {
mMarkAsFinishedAfterThisBlock = true;
}
}
if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
segment->AppendAndConsumeChunk(&mLastChunks[0]);
} else {
segment->AppendNullData(mLastChunks[0].GetDuration());
}
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
MediaStreamListener* l = mListeners[j];
AudioChunk copyChunk = mLastChunks[0];
AudioSegment tmpSegment;
tmpSegment.AppendAndConsumeChunk(&copyChunk);
l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID,
IdealAudioRate(), segment->GetDuration(), 0,
tmpSegment);
}
}
TrackTicks
AudioNodeStream::GetCurrentPosition()
{
return EnsureTrack()->Get<AudioSegment>()->GetDuration();
}
void
AudioNodeStream::FinishOutput()
{
if (IsFinishedOnGraphThread()) {
return;
}
StreamBuffer::Track* track = EnsureTrack();
track->SetEnded();
FinishOnGraphThread();
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
MediaStreamListener* l = mListeners[j];
AudioSegment emptySegment;
l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID,
IdealAudioRate(),
track->GetSegment()->GetDuration(),
MediaStreamListener::TRACK_EVENT_ENDED, emptySegment);
}
}
}
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "AudioNodeStream.h"
#include "MediaStreamGraphImpl.h"
#include "AudioNodeEngine.h"
#include "ThreeDPoint.h"
using namespace mozilla::dom;
namespace mozilla {
/**
* An AudioNodeStream produces a single audio track with ID
* AUDIO_NODE_STREAM_TRACK_ID. This track has rate IdealAudioRate().
* Each chunk in the track is a single block of WEBAUDIO_BLOCK_SIZE samples.
*/
static const int AUDIO_NODE_STREAM_TRACK_ID = 1;
AudioNodeStream::~AudioNodeStream()
{
MOZ_COUNT_DTOR(AudioNodeStream);
}
void
AudioNodeStream::SetStreamTimeParameter(uint32_t aIndex, MediaStream* aRelativeToStream,
double aStreamTime)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream, uint32_t aIndex, MediaStream* aRelativeToStream,
double aStreamTime)
: ControlMessage(aStream), mStreamTime(aStreamTime),
mRelativeToStream(aRelativeToStream), mIndex(aIndex) {}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->
SetStreamTimeParameterImpl(mIndex, mRelativeToStream, mStreamTime);
}
double mStreamTime;
MediaStream* mRelativeToStream;
uint32_t mIndex;
};
MOZ_ASSERT(this);
GraphImpl()->AppendMessage(new Message(this, aIndex, aRelativeToStream, aStreamTime));
}
void
AudioNodeStream::SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream,
double aStreamTime)
{
StreamTime streamTime = std::max<MediaTime>(0, SecondsToMediaTime(aStreamTime));
GraphTime graphTime = aRelativeToStream->StreamTimeToGraphTime(streamTime);
StreamTime thisStreamTime = GraphTimeToStreamTimeOptimistic(graphTime);
TrackTicks ticks = TimeToTicksRoundUp(IdealAudioRate(), thisStreamTime);
mEngine->SetStreamTimeParameter(aIndex, ticks);
}
void
AudioNodeStream::SetDoubleParameter(uint32_t aIndex, double aValue)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream, uint32_t aIndex, double aValue)
: ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->Engine()->
SetDoubleParameter(mIndex, mValue);
}
double mValue;
uint32_t mIndex;
};
MOZ_ASSERT(this);
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
}
void
AudioNodeStream::SetInt32Parameter(uint32_t aIndex, int32_t aValue)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream, uint32_t aIndex, int32_t aValue)
: ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->Engine()->
SetInt32Parameter(mIndex, mValue);
}
int32_t mValue;
uint32_t mIndex;
};
MOZ_ASSERT(this);
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
}
void
AudioNodeStream::SetTimelineParameter(uint32_t aIndex,
const AudioParamTimeline& aValue)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream, uint32_t aIndex,
const AudioParamTimeline& aValue)
: ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->Engine()->
SetTimelineParameter(mIndex, mValue);
}
AudioParamTimeline mValue;
uint32_t mIndex;
};
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
}
void
AudioNodeStream::SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aValue)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream, uint32_t aIndex, const ThreeDPoint& aValue)
: ControlMessage(aStream), mValue(aValue), mIndex(aIndex) {}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->Engine()->
SetThreeDPointParameter(mIndex, mValue);
}
ThreeDPoint mValue;
uint32_t mIndex;
};
MOZ_ASSERT(this);
GraphImpl()->AppendMessage(new Message(this, aIndex, aValue));
}
void
AudioNodeStream::SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream,
already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer)
: ControlMessage(aStream), mBuffer(aBuffer) {}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->Engine()->
SetBuffer(mBuffer.forget());
}
nsRefPtr<ThreadSharedFloatArrayBufferList> mBuffer;
};
MOZ_ASSERT(this);
GraphImpl()->AppendMessage(new Message(this, aBuffer));
}
void
AudioNodeStream::SetChannelMixingParameters(uint32_t aNumberOfChannels,
ChannelCountMode aChannelCountMode,
ChannelInterpretation aChannelInterpretation)
{
class Message : public ControlMessage {
public:
Message(AudioNodeStream* aStream,
uint32_t aNumberOfChannels,
ChannelCountMode aChannelCountMode,
ChannelInterpretation aChannelInterpretation)
: ControlMessage(aStream),
mNumberOfChannels(aNumberOfChannels),
mChannelCountMode(aChannelCountMode),
mChannelInterpretation(aChannelInterpretation)
{}
virtual void Run()
{
static_cast<AudioNodeStream*>(mStream)->
SetChannelMixingParametersImpl(mNumberOfChannels, mChannelCountMode,
mChannelInterpretation);
}
uint32_t mNumberOfChannels;
ChannelCountMode mChannelCountMode;
ChannelInterpretation mChannelInterpretation;
};
MOZ_ASSERT(this);
GraphImpl()->AppendMessage(new Message(this, aNumberOfChannels,
aChannelCountMode,
aChannelInterpretation));
}
void
AudioNodeStream::SetChannelMixingParametersImpl(uint32_t aNumberOfChannels,
ChannelCountMode aChannelCountMode,
ChannelInterpretation aChannelInterpretation)
{
// Make sure that we're not clobbering any significant bits by fitting these
// values in 16 bits.
MOZ_ASSERT(int(aChannelCountMode) < INT16_MAX);
MOZ_ASSERT(int(aChannelInterpretation) < INT16_MAX);
mNumberOfInputChannels = aNumberOfChannels;
mMixingMode.mChannelCountMode = aChannelCountMode;
mMixingMode.mChannelInterpretation = aChannelInterpretation;
}
StreamBuffer::Track*
AudioNodeStream::EnsureTrack()
{
StreamBuffer::Track* track = mBuffer.FindTrack(AUDIO_NODE_STREAM_TRACK_ID);
if (!track) {
nsAutoPtr<MediaSegment> segment(new AudioSegment());
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
MediaStreamListener* l = mListeners[j];
l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID, IdealAudioRate(), 0,
MediaStreamListener::TRACK_EVENT_CREATED,
*segment);
}
track = &mBuffer.AddTrack(AUDIO_NODE_STREAM_TRACK_ID, IdealAudioRate(), 0, segment.forget());
}
return track;
}
bool
AudioNodeStream::AllInputsFinished() const
{
uint32_t inputCount = mInputs.Length();
for (uint32_t i = 0; i < inputCount; ++i) {
if (!mInputs[i]->GetSource()->IsFinishedOnGraphThread()) {
return false;
}
}
return !!inputCount;
}
void
AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
{
uint32_t inputCount = mInputs.Length();
uint32_t outputChannelCount = 1;
nsAutoTArray<AudioChunk*,250> inputChunks;
for (uint32_t i = 0; i < inputCount; ++i) {
if (aPortIndex != mInputs[i]->InputNumber()) {
// This input is connected to a different port
continue;
}
MediaStream* s = mInputs[i]->GetSource();
AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
MOZ_ASSERT(a == s->AsAudioNodeStream());
if (a->IsFinishedOnGraphThread() ||
a->IsAudioParamStream()) {
continue;
}
AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
MOZ_ASSERT(chunk);
if (chunk->IsNull()) {
continue;
}
inputChunks.AppendElement(chunk);
outputChannelCount =
GetAudioChannelsSuperset(outputChannelCount, chunk->mChannelData.Length());
}
switch (mMixingMode.mChannelCountMode) {
case ChannelCountMode::Explicit:
// Disregard the output channel count that we've calculated, and just use
// mNumberOfInputChannels.
outputChannelCount = mNumberOfInputChannels;
break;
case ChannelCountMode::Clamped_max:
// Clamp the computed output channel count to mNumberOfInputChannels.
outputChannelCount = std::min(outputChannelCount, mNumberOfInputChannels);
break;
case ChannelCountMode::Max:
// Nothing to do here, just shut up the compiler warning.
break;
}
uint32_t inputChunkCount = inputChunks.Length();
if (inputChunkCount == 0) {
aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
return;
}
if (inputChunkCount == 1 &&
inputChunks[0]->mChannelData.Length() == outputChannelCount) {
aTmpChunk = *inputChunks[0];
return;
}
AllocateAudioBlock(outputChannelCount, &aTmpChunk);
float silenceChannel[WEBAUDIO_BLOCK_SIZE] = {0.f};
// The static storage here should be 1KB, so it's fine
nsAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE> downmixBuffer;
for (uint32_t i = 0; i < inputChunkCount; ++i) {
AudioChunk* chunk = inputChunks[i];
nsAutoTArray<const void*,GUESS_AUDIO_CHANNELS> channels;
channels.AppendElements(chunk->mChannelData);
if (channels.Length() < outputChannelCount) {
if (mMixingMode.mChannelInterpretation == ChannelInterpretation::Speakers) {
AudioChannelsUpMix(&channels, outputChannelCount, nullptr);
NS_ASSERTION(outputChannelCount == channels.Length(),
"We called GetAudioChannelsSuperset to avoid this");
} else {
// Fill up the remaining channels by zeros
for (uint32_t j = channels.Length(); j < outputChannelCount; ++j) {
channels.AppendElement(silenceChannel);
}
}
} else if (channels.Length() > outputChannelCount) {
if (mMixingMode.mChannelInterpretation == ChannelInterpretation::Speakers) {
nsAutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
outputChannels.SetLength(outputChannelCount);
downmixBuffer.SetLength(outputChannelCount * WEBAUDIO_BLOCK_SIZE);
for (uint32_t j = 0; j < outputChannelCount; ++j) {
outputChannels[j] = &downmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
}
AudioChannelsDownMix(channels, outputChannels.Elements(),
outputChannelCount, WEBAUDIO_BLOCK_SIZE);
channels.SetLength(outputChannelCount);
for (uint32_t j = 0; j < channels.Length(); ++j) {
channels[j] = outputChannels[j];
}
} else {
// Drop the remaining channels
channels.RemoveElementsAt(outputChannelCount,
channels.Length() - outputChannelCount);
}
}
for (uint32_t c = 0; c < channels.Length(); ++c) {
const float* inputData = static_cast<const float*>(channels[c]);
float* outputData = static_cast<float*>(const_cast<void*>(aTmpChunk.mChannelData[c]));
if (inputData) {
if (i == 0) {
AudioBlockCopyChannelWithScale(inputData, chunk->mVolume, outputData);
} else {
AudioBlockAddChannelWithScale(inputData, chunk->mVolume, outputData);
}
} else {
if (i == 0) {
memset(outputData, 0, WEBAUDIO_BLOCK_SIZE*sizeof(float));
}
}
}
}
}
// The MediaStreamGraph guarantees that this is actually one block, for
// AudioNodeStreams.
void
AudioNodeStream::ProduceOutput(GraphTime aFrom, GraphTime aTo)
{
if (mMarkAsFinishedAfterThisBlock) {
// This stream was finished the last time that we looked at it, and all
// of the depending streams have finished their output as well, so now
// it's time to mark this stream as finished.
FinishOutput();
}
StreamBuffer::Track* track = EnsureTrack();
AudioSegment* segment = track->Get<AudioSegment>();
mLastChunks.SetLength(1);
mLastChunks[0].SetNull(0);
if (mInCycle) {
// XXX DelayNode not supported yet so just produce silence
mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
} else {
// We need to generate at least one input
uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount());
OutputChunks inputChunks;
inputChunks.SetLength(maxInputs);
for (uint16_t i = 0; i < maxInputs; ++i) {
ObtainInputBlock(inputChunks[i], i);
}
bool finished = false;
if (maxInputs <= 1 && mEngine->OutputCount() <= 1) {
mEngine->ProduceAudioBlock(this, inputChunks[0], &mLastChunks[0], &finished);
} else {
mEngine->ProduceAudioBlocksOnPorts(this, inputChunks, mLastChunks, &finished);
}
if (finished) {
mMarkAsFinishedAfterThisBlock = true;
}
}
if (mKind == MediaStreamGraph::EXTERNAL_STREAM) {
segment->AppendAndConsumeChunk(&mLastChunks[0]);
} else {
segment->AppendNullData(mLastChunks[0].GetDuration());
}
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
MediaStreamListener* l = mListeners[j];
AudioChunk copyChunk = mLastChunks[0];
AudioSegment tmpSegment;
tmpSegment.AppendAndConsumeChunk(&copyChunk);
l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID,
IdealAudioRate(), segment->GetDuration(), 0,
tmpSegment);
}
}
TrackTicks
AudioNodeStream::GetCurrentPosition()
{
return EnsureTrack()->Get<AudioSegment>()->GetDuration();
}
void
AudioNodeStream::FinishOutput()
{
if (IsFinishedOnGraphThread()) {
return;
}
StreamBuffer::Track* track = EnsureTrack();
track->SetEnded();
FinishOnGraphThread();
for (uint32_t j = 0; j < mListeners.Length(); ++j) {
MediaStreamListener* l = mListeners[j];
AudioSegment emptySegment;
l->NotifyQueuedTrackChanges(Graph(), AUDIO_NODE_STREAM_TRACK_ID,
IdealAudioRate(),
track->GetSegment()->GetDuration(),
MediaStreamListener::TRACK_EVENT_ENDED, emptySegment);
}
}
}

View File

@ -41,7 +41,7 @@ namespace mozilla {
void
MediaEngineWebRTC::EnumerateVideoDevices(nsTArray<nsRefPtr<MediaEngineVideoSource> >* aVSources)
{
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
MutexAutoLock lock(mMutex);
if (!mCameraManager) {
return;

View File

@ -45,7 +45,7 @@
#include "video_engine/include/vie_render.h"
#include "video_engine/include/vie_capture.h"
#include "video_engine/include/vie_file.h"
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
#include "CameraPreviewMediaStream.h"
#include "DOMCameraManager.h"
#include "GonkCameraControl.h"
@ -58,7 +58,7 @@
namespace mozilla {
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
class CameraAllocateRunnable;
class GetCameraNameRunnable;
#endif
@ -82,7 +82,7 @@ class GetCameraNameRunnable;
*/
class MediaEngineWebRTCVideoSource : public MediaEngineVideoSource
, public nsRunnable
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
, public nsICameraGetCameraCallback
, public nsICameraPreviewStreamCallback
, public nsICameraTakePictureCallback
@ -94,7 +94,7 @@ class MediaEngineWebRTCVideoSource : public MediaEngineVideoSource
#endif
{
public:
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
MediaEngineWebRTCVideoSource(nsDOMCameraManager* aCameraManager,
int aIndex, uint64_t aWindowId)
: mCameraManager(aCameraManager)
@ -155,7 +155,7 @@ public:
TrackTicks &aLastEndTime);
NS_DECL_ISUPPORTS
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
NS_DECL_NSICAMERAGETCAMERACALLBACK
NS_DECL_NSICAMERAPREVIEWSTREAMCALLBACK
NS_DECL_NSICAMERATAKEPICTURECALLBACK
@ -200,7 +200,7 @@ private:
void Shutdown();
// Engine variables.
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
// MediaEngine hold this DOM object, and the MediaEngine is hold by Navigator
// Their life time is always much longer than this object. Use a raw-pointer
// here should be safe.
@ -337,7 +337,7 @@ private:
class MediaEngineWebRTC : public MediaEngine
{
public:
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
MediaEngineWebRTC(nsDOMCameraManager* aCameraManager, uint64_t aWindowId)
: mMutex("mozilla::MediaEngineWebRTC")
, mVideoEngine(nullptr)
@ -387,7 +387,7 @@ private:
nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCVideoSource > mVideoSources;
nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCAudioSource > mAudioSources;
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
// MediaEngine hold this DOM object, and the MediaEngine is hold by Navigator
// Their life time is always much longer than this object. Use a raw-pointer
// here should be safe.

View File

@ -25,7 +25,7 @@ extern PRLogModuleInfo* GetMediaManagerLog();
NS_IMPL_THREADSAFE_ISUPPORTS1(MediaEngineWebRTCVideoSource, nsIRunnable)
// ViEExternalRenderer Callback.
#ifndef MOZ_WIDGET_GONK
#ifndef MOZ_B2G_CAMERA
int
MediaEngineWebRTCVideoSource::FrameSizeChange(
unsigned int w, unsigned int h, unsigned int streams)
@ -156,7 +156,7 @@ MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph,
void
MediaEngineWebRTCVideoSource::ChooseCapability(const MediaEnginePrefs &aPrefs)
{
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
mCapability.width = aPrefs.mWidth;
mCapability.height = aPrefs.mHeight;
#else
@ -224,7 +224,7 @@ nsresult
MediaEngineWebRTCVideoSource::Allocate(const MediaEnginePrefs &aPrefs)
{
LOG((__FUNCTION__));
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
if (mState == kReleased && mInitDone) {
ChooseCapability(aPrefs);
@ -262,13 +262,13 @@ MediaEngineWebRTCVideoSource::Deallocate()
{
LOG((__FUNCTION__));
if (mSources.IsEmpty()) {
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
#endif
if (mState != kStopped && mState != kAllocated) {
return NS_ERROR_FAILURE;
}
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
// We do not register success callback here
NS_DispatchToMainThread(WrapRunnable(this,
@ -319,7 +319,7 @@ MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
aStream->AddTrack(aID, USECS_PER_S, 0, new VideoSegment());
aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
#endif
@ -328,7 +328,7 @@ MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
}
mImageContainer = layers::LayerManager::CreateImageContainer();
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
NS_DispatchToMainThread(WrapRunnable(this,
&MediaEngineWebRTCVideoSource::StartImpl,
mCapability));
@ -367,7 +367,7 @@ MediaEngineWebRTCVideoSource::Stop(SourceMediaStream *aSource, TrackID aID)
if (!mSources.IsEmpty()) {
return NS_OK;
}
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
#endif
if (mState != kStarted) {
@ -382,7 +382,7 @@ MediaEngineWebRTCVideoSource::Stop(SourceMediaStream *aSource, TrackID aID)
// usage
mImage = nullptr;
}
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
NS_DispatchToMainThread(WrapRunnable(this,
&MediaEngineWebRTCVideoSource::StopImpl));
#else
@ -413,14 +413,14 @@ MediaEngineWebRTCVideoSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
* return from this function after cleaning up the temporary stream object
* and caling Stop() on the media source.
*/
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
#endif
*aFile = nullptr;
if (!mInitDone || mState != kAllocated) {
return NS_ERROR_FAILURE;
}
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
mLastCapture = nullptr;
NS_DispatchToMainThread(WrapRunnable(this,
@ -529,7 +529,7 @@ MediaEngineWebRTCVideoSource::Init()
{
mDeviceName[0] = '\0'; // paranoia
mUniqueId[0] = '\0';
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
nsCString deviceName;
mCameraManager->GetCameraName(mCaptureIndex, deviceName);
@ -579,7 +579,7 @@ MediaEngineWebRTCVideoSource::Shutdown()
if (!mInitDone) {
return;
}
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
#endif
if (mState == kStarted) {
@ -592,7 +592,7 @@ MediaEngineWebRTCVideoSource::Shutdown()
if (mState == kAllocated || mState == kStopped) {
Deallocate();
}
#ifndef MOZ_WIDGET_GONK
#ifndef MOZ_B2G_CAMERA
mViECapture->Release();
mViERender->Release();
mViEBase->Release();
@ -601,7 +601,7 @@ MediaEngineWebRTCVideoSource::Shutdown()
mInitDone = false;
}
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
// All these functions must be run on MainThread!
void

View File

@ -21,6 +21,13 @@ Cu.import("resource://gre/modules/OfflineCacheInstaller.jsm");
Cu.import("resource://gre/modules/SystemMessagePermissionsChecker.jsm");
Cu.import("resource://gre/modules/AppDownloadManager.jsm");
#ifdef MOZ_WIDGET_GONK
XPCOMUtils.defineLazyGetter(this, "libcutils", function() {
Cu.import("resource://gre/modules/systemlibs.js");
return libcutils;
});
#endif
function debug(aMsg) {
//dump("-*-*- Webapps.jsm : " + aMsg + "\n");
}
@ -1612,11 +1619,16 @@ this.DOMApplicationRegistry = {
}
// Try to download a new manifest.
function doRequest(oldManifest) {
function doRequest(oldManifest, headers) {
headers = headers || [];
let xhr = Cc["@mozilla.org/xmlextras/xmlhttprequest;1"]
.createInstance(Ci.nsIXMLHttpRequest);
xhr.open("GET", aData.manifestURL, true);
xhr.channel.loadFlags |= Ci.nsIRequest.INHIBIT_CACHING;
headers.forEach(function(aHeader) {
debug("Adding header: " + aHeader.name + ": " + aHeader.value);
xhr.setRequestHeader(aHeader.name, aHeader.value);
});
xhr.responseType = "json";
if (app.etag) {
debug("adding manifest etag:" + app.etag);
@ -1636,7 +1648,21 @@ this.DOMApplicationRegistry = {
// Read the current app manifest file
this._readManifests([{ id: id }], (function(aResult) {
doRequest.call(this, aResult[0].manifest);
let extraHeaders = [];
#ifdef MOZ_WIDGET_GONK
let pingManifestURL;
try {
pingManifestURL = Services.prefs.getCharPref("ping.manifestURL");
} catch(e) { }
if (pingManifestURL && pingManifestURL == aData.manifestURL) {
// Get the device info.
let device = libcutils.property_get("ro.product.model");
extraHeaders.push({ name: "X-MOZ-B2G-DEVICE",
value: device || "unknown" });
}
#endif
doRequest.call(this, aResult[0].manifest, extraHeaders);
}).bind(this));
},

View File

@ -23,6 +23,10 @@ Cu.import("resource://gre/modules/Services.jsm");
Cu.import("resource://gre/modules/XPCOMUtils.jsm");
Cu.import("resource://gre/modules/identity/IdentityUtils.jsm");
XPCOMUtils.defineLazyServiceGetter(this, "uuidgen",
"@mozilla.org/uuid-generator;1",
"nsIUUIDGenerator");
// This is the child process corresponding to nsIDOMIdentity
XPCOMUtils.defineLazyServiceGetter(this, "cpmm",
"@mozilla.org/childprocessmessagemanager;1",
@ -372,7 +376,10 @@ nsDOMIdentity.prototype = {
// Setup identifiers for current window.
let util = aWindow.QueryInterface(Ci.nsIInterfaceRequestor)
.getInterface(Ci.nsIDOMWindowUtils);
this._id = util.outerWindowID;
// We need to inherit the id from the internalIdentity service.
// See comments below in that service's init.
this._id = this._identityInternal._id;
},
/**
@ -590,15 +597,22 @@ nsDOMIdentityInternal.prototype = {
Services.prefs.getPrefType(PREF_DEBUG) == Ci.nsIPrefBranch.PREF_BOOL
&& Services.prefs.getBoolPref(PREF_DEBUG);
this._identity = new nsDOMIdentity(this);
this._identity._init(aWindow);
let util = aWindow.QueryInterface(Ci.nsIInterfaceRequestor)
.getInterface(Ci.nsIDOMWindowUtils);
this._id = util.outerWindowID;
// To avoid cross-process windowId collisions, use a uuid as an
// almost certainly unique identifier.
//
// XXX Bug 869182 - use a combination of child process id and
// innerwindow id to construct the unique id.
this._id = uuidgen.generateUUID().toString();
this._innerWindowID = util.currentInnerWindowID;
// nsDOMIdentity needs to know our _id, so this goes after
// its creation.
this._identity = new nsDOMIdentity(this);
this._identity._init(aWindow);
this._log("init was called from " + aWindow.document.location);
this._mm = cpmm;

View File

@ -1050,7 +1050,7 @@ MediaManager::GetUserMedia(bool aPrivileged, nsPIDOMWindow* aWindow,
);
}
#ifdef MOZ_WIDGET_GONK
#ifdef MOZ_B2G_CAMERA
if (mCameraManager == nullptr) {
mCameraManager = nsDOMCameraManager::CheckPermissionAndCreateInstance(aWindow);
if (!mCameraManager) {
@ -1146,7 +1146,7 @@ MediaManager::GetBackend(uint64_t aWindowId)
MutexAutoLock lock(mMutex);
if (!mBackend) {
#if defined(MOZ_WEBRTC)
#ifndef MOZ_WIDGET_GONK
#ifndef MOZ_B2G_CAMERA
mBackend = new MediaEngineWebRTC();
#else
mBackend = new MediaEngineWebRTC(mCameraManager, aWindowId);

View File

@ -16,7 +16,13 @@ Cu.import("resource://gre/modules/NetUtil.jsm");
const RIL_MMSSERVICE_CONTRACTID = "@mozilla.org/mms/rilmmsservice;1";
const RIL_MMSSERVICE_CID = Components.ID("{217ddd76-75db-4210-955d-8806cd8d87f9}");
const DEBUG = false;
let DEBUG = false;
// Read debug setting from pref.
try {
let debugPref = Services.prefs.getBoolPref("mms.debugging.enabled");
DEBUG = DEBUG || debugPref;
} catch (e) {}
const kSmsSendingObserverTopic = "sms-sending";
const kSmsSentObserverTopic = "sms-sent";
@ -142,8 +148,8 @@ XPCOMUtils.defineLazyGetter(this, "gMmsConnection", function () {
* Callback when |connectTimer| is timeout or cancelled by shutdown.
*/
onConnectTimerTimeout: function onConnectTimerTimeout() {
debug("onConnectTimerTimeout: " + this.pendingCallbacks.length
+ " pending callbacks");
if (DEBUG) debug("onConnectTimerTimeout: " + this.pendingCallbacks.length
+ " pending callbacks");
while (this.pendingCallbacks.length) {
let callback = this.pendingCallbacks.shift();
callback(false);
@ -154,7 +160,7 @@ XPCOMUtils.defineLazyGetter(this, "gMmsConnection", function () {
* Callback when |disconnectTimer| is timeout or cancelled by shutdown.
*/
onDisconnectTimerTimeout: function onDisconnectTimerTimeout() {
debug("onDisconnectTimerTimeout: deactivate the MMS data call.");
if (DEBUG) debug("onDisconnectTimerTimeout: deactivate the MMS data call.");
if (this.connected) {
gRIL.deactivateDataCallByType("mms");
}
@ -174,9 +180,9 @@ XPCOMUtils.defineLazyGetter(this, "gMmsConnection", function () {
this.port = Services.prefs.getIntPref("ril.mms.mmsport");
this.updateProxyInfo();
} catch (e) {
debug("Unable to initialize the MMS proxy settings from the" +
"preference. This could happen at the first-run. Should be" +
"available later.");
if (DEBUG) debug("Unable to initialize the MMS proxy settings from the" +
"preference. This could happen at the first-run. Should be" +
"available later.");
this.clearMmsProxySettings();
}
this.connected = gRIL.getDataCallStateByType("mms") ==
@ -190,7 +196,7 @@ XPCOMUtils.defineLazyGetter(this, "gMmsConnection", function () {
*/
isVoiceRoaming: function isVoiceRoaming() {
let isRoaming = gRIL.rilContext.voice.roaming;
debug("isVoiceRoaming = " + isRoaming);
if (DEBUG) debug("isVoiceRoaming = " + isRoaming);
return isRoaming;
},
@ -211,7 +217,7 @@ XPCOMUtils.defineLazyGetter(this, "gMmsConnection", function () {
// If the MMS network is not yet connected, buffer the
// MMS request and try to setup the MMS network first.
if (!this.connected) {
debug("acquire: buffer the MMS request and setup the MMS data call.");
if (DEBUG) debug("acquire: buffer the MMS request and setup the MMS data call.");
this.pendingCallbacks.push(callback);
gRIL.setupDataCallByType("mms");
@ -252,7 +258,7 @@ XPCOMUtils.defineLazyGetter(this, "gMmsConnection", function () {
*/
updateProxyInfo: function updateProxyInfo() {
if (this.proxy === null || this.port === null) {
debug("updateProxyInfo: proxy or port is not yet decided." );
if (DEBUG) debug("updateProxyInfo: proxy or port is not yet decided." );
return;
}
@ -260,7 +266,7 @@ XPCOMUtils.defineLazyGetter(this, "gMmsConnection", function () {
gpps.newProxyInfo("http", this.proxy, this.port,
Ci.nsIProxyInfo.TRANSPARENT_PROXY_RESOLVES_HOST,
-1, null);
debug("updateProxyInfo: " + JSON.stringify(this.proxyInfo));
if (DEBUG) debug("updateProxyInfo: " + JSON.stringify(this.proxyInfo));
},
/**
@ -297,8 +303,8 @@ XPCOMUtils.defineLazyGetter(this, "gMmsConnection", function () {
return;
}
debug("Got the MMS network connected! Resend the buffered " +
"MMS requests: number: " + this.pendingCallbacks.length);
if (DEBUG) debug("Got the MMS network connected! Resend the buffered " +
"MMS requests: number: " + this.pendingCallbacks.length);
this.connectTimer.cancel();
while (this.pendingCallbacks.length) {
let callback = this.pendingCallbacks.shift();
@ -324,8 +330,8 @@ XPCOMUtils.defineLazyGetter(this, "gMmsConnection", function () {
break;
}
} catch (e) {
debug("Failed to update the MMS proxy settings from the" +
"preference.");
if (DEBUG) debug("Failed to update the MMS proxy settings from the" +
"preference.");
this.clearMmsProxySettings();
}
break;
@ -358,14 +364,14 @@ MmsProxyFilter.prototype = {
}
if (this.url != url) {
debug("applyFilter: content uri = " + this.url +
" is not matched url = " + url + " .");
if (DEBUG) debug("applyFilter: content uri = " + this.url +
" is not matched url = " + url + " .");
return proxyInfo;
}
// Fall-through, reutrn the MMS proxy info.
debug("applyFilter: MMSC is matched: " +
JSON.stringify({ url: this.url,
proxyInfo: gMmsConnection.proxyInfo }));
if (DEBUG) debug("applyFilter: MMSC is matched: " +
JSON.stringify({ url: this.url,
roxyInfo: gMmsConnection.proxyInfo }));
return gMmsConnection.proxyInfo ? gMmsConnection.proxyInfo : proxyInfo;
}
};
@ -399,7 +405,7 @@ XPCOMUtils.defineLazyGetter(this, "gMmsTransactionHelper", function () {
return;
}
debug("sendRequest: register proxy filter to " + url);
if (DEBUG) debug("sendRequest: register proxy filter to " + url);
let proxyFilter = new MmsProxyFilter(url);
gpps.registerFilter(proxyFilter, 0);
@ -440,8 +446,8 @@ XPCOMUtils.defineLazyGetter(this, "gMmsTransactionHelper", function () {
// Setup event listeners
xhr.onerror = function () {
debug("xhr error, response headers: " +
xhr.getAllResponseHeaders());
if (DEBUG) debug("xhr error, response headers: " +
xhr.getAllResponseHeaders());
releaseMmsConnectionAndCallback(xhr.status, null);
};
xhr.onreadystatechange = function () {
@ -452,14 +458,14 @@ XPCOMUtils.defineLazyGetter(this, "gMmsTransactionHelper", function () {
let data = null;
switch (xhr.status) {
case HTTP_STATUS_OK: {
debug("xhr success, response headers: "
+ xhr.getAllResponseHeaders());
if (DEBUG) debug("xhr success, response headers: "
+ xhr.getAllResponseHeaders());
let array = new Uint8Array(xhr.response);
if (false) {
for (let begin = 0; begin < array.length; begin += 20) {
let partial = array.subarray(begin, begin + 20);
debug("res: " + JSON.stringify(partial));
if (DEBUG) debug("res: " + JSON.stringify(partial));
}
}
@ -467,7 +473,7 @@ XPCOMUtils.defineLazyGetter(this, "gMmsTransactionHelper", function () {
break;
}
default: {
debug("xhr done, but status = " + xhr.status);
if (DEBUG) debug("xhr done, but status = " + xhr.status);
break;
}
}
@ -478,7 +484,7 @@ XPCOMUtils.defineLazyGetter(this, "gMmsTransactionHelper", function () {
// Send request
xhr.send(istream);
} catch (e) {
debug("xhr error, can't send: " + e.message);
if (DEBUG) debug("xhr error, can't send: " + e.message);
releaseMmsConnectionAndCallback(0, null);
}
}).bind(this, method, url, istream, callback));
@ -537,7 +543,7 @@ XPCOMUtils.defineLazyGetter(this, "gMmsTransactionHelper", function () {
totalRecipients += this.countRecipients(msg.headers["cc"]);
totalRecipients += this.countRecipients(msg.headers["bcc"]);
} catch (ex) {
debug("Exception caught : " + ex);
if (DEBUG) debug("Exception caught : " + ex);
return false;
}
@ -707,7 +713,7 @@ function SendTransaction(msg) {
if (!gMmsTransactionHelper.checkMaxValuesParameters(msg)) {
//We should notify end user that the header format is wrong.
debug("Check max values parameters fail.");
if (DEBUG) debug("Check max values parameters fail.");
throw new Error("Check max values parameters fail.");
}
let messageSize = 0;
@ -753,7 +759,7 @@ function SendTransaction(msg) {
msg.headers["x-mms-message-size"] = messageSize;
// TODO: bug 809832 - support customizable max incoming/outgoing message size
debug("msg: " + JSON.stringify(msg));
if (DEBUG) debug("msg: " + JSON.stringify(msg));
this.msg = msg;
}
@ -768,7 +774,7 @@ SendTransaction.prototype = {
*/
loadBlobs: function loadBlobs(parts, callback) {
let callbackIfValid = function callbackIfValid() {
debug("All parts loaded: " + JSON.stringify(parts));
if (DEBUG) debug("All parts loaded: " + JSON.stringify(parts));
if (callback) {
callback();
}
@ -1050,7 +1056,7 @@ MmsService.prototype = {
* The nsIDOMMozMmsMessage object.
*/
broadcastMmsSystemMessage: function broadcastMmsSystemMessage(aName, aDomMessage) {
debug("Broadcasting the MMS system message: " + aName);
if (DEBUG) debug("Broadcasting the MMS system message: " + aName);
// Sadly we cannot directly broadcast the aDomMessage object
// because the system message mechamism will rewrap the object
@ -1105,7 +1111,7 @@ MmsService.prototype = {
savableMessage,
mmsStatus,
retrievedMessage) {
debug("retrievedMessage = " + JSON.stringify(retrievedMessage));
if (DEBUG) debug("retrievedMessage = " + JSON.stringify(retrievedMessage));
// The absence of the field does not indicate any default
// value. So we go check the same field in the retrieved
@ -1147,7 +1153,7 @@ MmsService.prototype = {
// At this point we could send a message to content to notify the user
// that storing an incoming MMS failed, most likely due to a full disk.
// The end user has to retrieve the MMS again.
debug("Could not store MMS " + domMessage.id +
if (DEBUG) debug("Could not store MMS " + domMessage.id +
", error code " + rv);
return;
}
@ -1167,7 +1173,7 @@ MmsService.prototype = {
// At this point we could send a message to content to notify the
// user that storing an incoming MMS notification indication failed,
// ost likely due to a full disk.
debug("Could not store MMS " + JSON.stringify(savableMessage) +
if (DEBUG) debug("Could not store MMS " + JSON.stringify(savableMessage) +
", error code " + rv);
// Because MMSC will resend the notification indication once we don't
// response the notification. Hope the end user will clean some space
@ -1228,8 +1234,8 @@ MmsService.prototype = {
(function (aRv, aMessageRecord) {
if (Ci.nsIMobileMessageCallback.SUCCESS_NO_ERROR === aRv
&& aMessageRecord) {
debug("We already got the NotificationIndication with transactionId = "
+ transactionId + " before.");
if (DEBUG) debug("We already got the NotificationIndication with transactionId = "
+ transactionId + " before.");
return;
}
@ -1256,7 +1262,7 @@ MmsService.prototype = {
// 2. Fire "mms-delivery-success" or "mms-delivery-error" observer
// topics to MobileMessageManager.
let messageId = msg.headers["message-id"];
debug("handleDeliveryIndication: got delivery report for " + messageId);
if (DEBUG) debug("handleDeliveryIndication: got delivery report for " + messageId);
},
/**
@ -1278,7 +1284,7 @@ MmsService.prototype = {
* header is available, Content-Location header SHALL be used if available.
*/
createSavableFromParams: function createSavableFromParams(aParams) {
debug("createSavableFromParams: aParams: " + JSON.stringify(aParams));
if (DEBUG) debug("createSavableFromParams: aParams: " + JSON.stringify(aParams));
let message = {};
let smil = aParams.smil;
@ -1348,14 +1354,14 @@ MmsService.prototype = {
message["timestamp"] = Date.now();
message["receivers"] = receivers;
debug("createSavableFromParams: message: " + JSON.stringify(message));
if (DEBUG) debug("createSavableFromParams: message: " + JSON.stringify(message));
return message;
},
// nsIMmsService
send: function send(aParams, aRequest) {
debug("send: aParams: " + JSON.stringify(aParams));
if (DEBUG) debug("send: aParams: " + JSON.stringify(aParams));
if (aParams.receivers.length == 0) {
aRequest.notifySendMmsMessageFailed(Ci.nsIMobileMessageCallback.INTERNAL_ERROR);
return;
@ -1364,21 +1370,25 @@ MmsService.prototype = {
let self = this;
let sendTransactionCb = function sendTransactionCb(aRecordId, aIsSentSuccess) {
debug("The success status of sending transaction: " + aIsSentSuccess);
if (DEBUG) debug("The success status of sending transaction: " + aIsSentSuccess);
gMobileMessageDatabaseService
.setMessageDelivery(aRecordId,
null,
aIsSentSuccess ? DELIVERY_SENT : DELIVERY_ERROR,
aIsSentSuccess ? null : DELIVERY_STATUS_ERROR,
function notifySetDeliveryResult(aRv, aDomMessage) {
debug("Marking the delivery state/staus is done. Notify sent or failed.");
if (DEBUG) debug("Marking the delivery state/staus is done. Notify sent or failed.");
// TODO bug 832140 handle !Components.isSuccessCode(aRv)
if (!aIsSentSuccess) {
if (DEBUG) debug("Send MMS fail. aParams.receivers = " +
JSON.stringify(aParams.receivers));
aRequest.notifySendMessageFailed(Ci.nsIMobileMessageCallback.INTERNAL_ERROR);
Services.obs.notifyObservers(aDomMessage, kSmsFailedObserverTopic, null);
return;
}
if (DEBUG) debug("Send MMS successful. aParams.receivers = " +
JSON.stringify(aParams.receivers));
self.broadcastSentMessageEvent(domMessage);
aRequest.notifyMessageSent(aDomMessage);
});
@ -1388,20 +1398,20 @@ MmsService.prototype = {
gMobileMessageDatabaseService
.saveSendingMessage(savableMessage,
function notifySendingResult(aRv, aDomMessage) {
debug("Saving sending message is done. Start to send.");
if (DEBUG) debug("Saving sending message is done. Start to send.");
// TODO bug 832140 handle !Components.isSuccessCode(aRv)
Services.obs.notifyObservers(aDomMessage, kSmsSendingObserverTopic, null);
let sendTransaction;
try {
sendTransaction = new SendTransaction(savableMessage);
} catch (e) {
debug("Exception: fail to create a SendTransaction instance.");
if (DEBUG) debug("Exception: fail to create a SendTransaction instance.");
sendTransactionCb(aDomMessage.id, false);
return;
}
sendTransaction.run(function callback(aMmsStatus, aMsg) {
let isSentSuccess = (aMmsStatus == MMS.MMS_PDU_ERROR_OK);
debug("The sending status of sendTransaction.run(): " + aMmsStatus);
if (DEBUG) debug("The sending status of sendTransaction.run(): " + aMmsStatus);
sendTransactionCb(aDomMessage.id, isSentSuccess);
});
});
@ -1411,18 +1421,18 @@ MmsService.prototype = {
gMobileMessageDatabaseService.getMessageRecordById(id,
(function notifyResult(aRv, aMessageRecord) {
if (Ci.nsIMobileMessageCallback.SUCCESS_NO_ERROR != aRv) {
debug("Function getMessageRecordById() return error.");
if (DEBUG) debug("Function getMessageRecordById() return error.");
aRequest.notifyGetMessageFailed(aRv);
return;
}
if ("mms" != aMessageRecord.type) {
debug("Type of message record is not mms");
if (DEBUG) debug("Type of message record is not mms");
aRequest.notifyGetMessageFailed(Ci.nsIMobileMessageCallback.INTERNAL_ERROR);
return;
}
if (!aMessageRecord.headers ||
!aMessageRecord.headers["x-mms-content-location"]) {
debug("Can't find mms content url in database.");
if (DEBUG) debug("Can't find mms content url in database.");
aRequest.notifyGetMessageFailed(Ci.nsIMobileMessageCallback.INTERNAL_ERROR);
return;
}
@ -1434,7 +1444,7 @@ MmsService.prototype = {
aMessageRecord.headers["x-mms-expiry"] * 1000;
if (expiriedDate < Date.now()) {
aRequest.notifyGetMessageFailed(Ci.nsIMobileMessageCallback.NOT_FOUND_ERROR);
debug("This notification indication is expired.");
if (DEBUG) debug("This notification indication is expired.");
return;
}
@ -1445,7 +1455,7 @@ MmsService.prototype = {
// If the mmsStatus is still MMS_PDU_STATUS_DEFERRED after retry,
// we should not store it into database.
if (MMS.MMS_PDU_STATUS_RETRIEVED !== mmsStatus) {
debug("RetrieveMessage fail after retry.");
if (DEBUG) debug("RetrieveMessage fail after retry.");
aRequest.notifyGetMessageFailed(Ci.nsIMobileMessageCallback.INTERNAL_ERROR);
return;
}
@ -1466,7 +1476,7 @@ MmsService.prototype = {
let reportAllowed = this.getReportAllowed(this.confSendDeliveryReport,
wish);
debug("retrievedMsg = " + JSON.stringify(retrievedMsg));
if (DEBUG) debug("retrievedMsg = " + JSON.stringify(retrievedMsg));
aMessageRecord = this.mergeRetrievalConfirmation(retrievedMsg, aMessageRecord);
gMobileMessageDatabaseService.saveReceivedMessage(aMessageRecord,
(function (rv, domMessage) {
@ -1475,7 +1485,7 @@ MmsService.prototype = {
// At this point we could send a message to content to
// notify the user that storing an incoming MMS failed, most
// likely due to a full disk.
debug("Could not store MMS " + domMessage.id +
if (DEBUG) debug("Could not store MMS " + domMessage.id +
", error code " + rv);
aRequest.notifyGetMessageFailed(Ci.nsIMobileMessageCallback.INTERNAL_ERROR);
return;
@ -1498,7 +1508,7 @@ MmsService.prototype = {
if (!msg) {
return false;
}
debug("receiveWapPush: msg = " + JSON.stringify(msg));
if (DEBUG) debug("receiveWapPush: msg = " + JSON.stringify(msg));
switch (msg.type) {
case MMS.MMS_PDU_TYPE_NOTIFICATION_IND:
@ -1508,7 +1518,7 @@ MmsService.prototype = {
this.handleDeliveryIndication(msg);
break;
default:
debug("Unsupported X-MMS-Message-Type: " + msg.type);
if (DEBUG) debug("Unsupported X-MMS-Message-Type: " + msg.type);
break;
}
},

View File

@ -13,7 +13,7 @@ interface nsIDOMMozMobileCellInfo;
interface nsIDOMMozIccManager;
interface nsIDOMMozMobileCFInfo;
[scriptable, builtinclass, uuid(780de142-562c-4141-bd5c-5413fb1952d2)]
[scriptable, builtinclass, uuid(0106c3fe-0064-40f8-b2e1-b8ad37b4c81e)]
interface nsIDOMMozMobileConnection : nsIDOMEventTarget
{
const long ICC_SERVICE_CLASS_VOICE = (1 << 0);
@ -41,6 +41,14 @@ interface nsIDOMMozMobileConnection : nsIDOMEventTarget
*/
readonly attribute DOMString cardState;
/**
* Indicates the number of retries remaining when cardState equals 'pinRequired'
* or 'pukRequired'. 0 denotes the retry count is unavailable.
*
* Value is undefined for other cardState values.
*/
readonly attribute long retryCount;
/**
* Information stored in the device's ICC card.
*/

View File

@ -11,7 +11,7 @@ interface nsIDOMMozMobileCFInfo;
interface nsIDOMDOMRequest;
interface nsIDOMWindow;
[scriptable, uuid(2cb8e811-7eaf-4cb9-8aa8-581e7a245edc)]
[scriptable, uuid(d919e279-b0e4-4bc0-8464-f5b37aa41484)]
interface nsIMobileConnectionListener : nsISupports
{
void notifyVoiceChanged();
@ -47,6 +47,7 @@ interface nsIMobileConnectionProvider : nsISupports
void unregisterMobileConnectionMsg(in nsIMobileConnectionListener listener);
readonly attribute DOMString cardState;
readonly attribute long retryCount;
readonly attribute nsIDOMMozMobileICCInfo iccInfo;
readonly attribute nsIDOMMozMobileConnectionInfo voiceConnectionInfo;
readonly attribute nsIDOMMozMobileConnectionInfo dataConnectionInfo;

View File

@ -185,6 +185,17 @@ MobileConnection::GetCardState(nsAString& cardState)
return mProvider->GetCardState(cardState);
}
NS_IMETHODIMP
MobileConnection::GetRetryCount(int32_t* retryCount)
{
*retryCount = 0;
if (!mProvider || !CheckPermission("mobileconnection")) {
return NS_OK;
}
return mProvider->GetRetryCount(retryCount);
}
NS_IMETHODIMP
MobileConnection::GetIccInfo(nsIDOMMozMobileICCInfo** aIccInfo)
{

View File

@ -288,6 +288,8 @@ this.PhoneNumber = (function (dataBase) {
for (var n = 0; n < entry.length; ++n) {
if (typeof entry[n] == "string")
entry[n] = ParseMetaData(countryCode, entry[n]);
if (n > 0)
entry[n].formats = entry[0].formats;
ret = ParseNationalNumber(number, entry[n])
if (ret)
return ret;

View File

@ -101,6 +101,28 @@ function Format(dial, currentRegion, nationalNumber, region, nationalFormat, int
}
}
function TestProperties(dial, currentRegion) {
var result = PhoneNumber.Parse(dial, currentRegion);
if (result) {
ok(true, "found it");
ok(true, "InternationalFormat: " + result.internationalFormat);
ok(true, "InternationalNumber: " + result.internationalNumber);
ok(true, "NationalNumber: " + result.nationalNumber);
ok(true, "NationalFormat: " + result.nationalFormat);
} else {
ok(true, "not found");
}
}
TestProperties("+0988782456");
TestProperties("+33442020", "ES");
TestProperties("+43987614", "ES");
TestProperties("+0988782456");
TestProperties("+34556657");
TestProperties("+66554433");
TestProperties("+43442075");
TestProperties("+13442074");
// Test whether could a string be a phone number.
IsPlain(null, false);
IsPlain("", false);

View File

@ -8,6 +8,7 @@ const {classes: Cc, interfaces: Ci, utils: Cu, results: Cr} = Components;
Cu.import("resource://gre/modules/XPCOMUtils.jsm");
Cu.import("resource://gre/modules/Services.jsm");
Cu.import("resource://gre/modules/FileUtils.jsm");
const NETWORKMANAGER_CONTRACTID = "@mozilla.org/network/manager;1";
const NETWORKMANAGER_CID =
@ -31,11 +32,14 @@ const TOPIC_PREF_CHANGED = "nsPref:changed";
const TOPIC_XPCOM_SHUTDOWN = "xpcom-shutdown";
const PREF_MANAGE_OFFLINE_STATUS = "network.gonk.manage-offline-status";
// TODO, get USB RNDIS interface name automatically.(see Bug 776212)
const POSSIBLE_USB_INTERFACE_NAME = "rndis0,usb0";
const DEFAULT_USB_INTERFACE_NAME = "rndis0";
const DEFAULT_3G_INTERFACE_NAME = "rmnet0";
const DEFAULT_WIFI_INTERFACE_NAME = "wlan0";
// The kernel's proc entry for network lists.
const KERNEL_NETWORK_ENTRY = "/sys/class/net";
const TETHERING_TYPE_WIFI = "WiFi";
const TETHERING_TYPE_USB = "USB";
@ -149,6 +153,9 @@ function NetworkManager() {
}
Services.prefs.addObserver(PREF_MANAGE_OFFLINE_STATUS, this, false);
// Possible usb tethering interfaces for different gonk platform.
this.possibleInterface = POSSIBLE_USB_INTERFACE_NAME.split(",");
// Default values for internal and external interfaces.
this._tetheringInterface = Object.create(null);
this._tetheringInterface[TETHERING_TYPE_USB] = {externalInterface: DEFAULT_3G_INTERFACE_NAME,
@ -838,10 +845,28 @@ NetworkManager.prototype = {
this.controlMessage(params, this.usbTetheringResultReport);
},
getUsbInterface: function getUsbInterface() {
// Find the rndis interface.
for (let i = 0; i < this.possibleInterface.length; i++) {
try {
let file = new FileUtils.File(KERNEL_NETWORK_ENTRY + "/" +
this.possibleInterface[i]);
if (file.IsDirectory()) {
return this.possibleInterface[i];
}
} catch (e) {
debug("Not " + this.possibleInterface[i] + " interface.");
}
}
debug("Can't find rndis interface in possible lists.");
return DEFAULT_USB_INTERFACE_NAME;
},
enableUsbRndisResult: function enableUsbRndisResult(data) {
let result = data.result;
let enable = data.enable;
if (result) {
this._tetheringInterface[TETHERING_TYPE_USB].internalInterface = this.getUsbInterface();
this.setUSBTethering(enable, this._tetheringInterface[TETHERING_TYPE_USB]);
} else {
let params = {

View File

@ -314,6 +314,7 @@ CellBroadcastEtwsInfo.prototype = {
function RILContentHelper() {
this.rilContext = {
cardState: RIL.GECKO_CARDSTATE_UNKNOWN,
retryCount: 0,
networkSelectionMode: RIL.GECKO_NETWORK_SELECTION_UNKNOWN,
iccInfo: new MobileICCInfo(),
voiceConnectionInfo: new MobileConnectionInfo(),
@ -406,6 +407,7 @@ RILContentHelper.prototype = {
return;
}
this.rilContext.cardState = rilContext.cardState;
this.rilContext.retryCount = rilContext.retryCount;
this.rilContext.networkSelectionMode = rilContext.networkSelectionMode;
this.updateInfo(rilContext.iccInfo, this.rilContext.iccInfo);
this.updateConnectionInfo(rilContext.voice, this.rilContext.voiceConnectionInfo);
@ -430,6 +432,10 @@ RILContentHelper.prototype = {
return this.getRilContext().cardState;
},
get retryCount() {
return this.getRilContext().retryCount;
},
get networkSelectionMode() {
return this.getRilContext().networkSelectionMode;
},
@ -1037,6 +1043,7 @@ RILContentHelper.prototype = {
debug("Received message '" + msg.name + "': " + JSON.stringify(msg.json));
switch (msg.name) {
case "RIL:CardStateChanged":
this.rilContext.retryCount = msg.json.retryCount;
if (this.rilContext.cardState != msg.json.cardState) {
this.rilContext.cardState = msg.json.cardState;
this._deliverEvent("_mobileConnectionListeners",

View File

@ -229,6 +229,7 @@ function RadioInterfaceLayer() {
this.rilContext = {
radioState: RIL.GECKO_RADIOSTATE_UNAVAILABLE,
cardState: RIL.GECKO_CARDSTATE_UNKNOWN,
retryCount: 0, // TODO: Please see bug 868896
networkSelectionMode: RIL.GECKO_NETWORK_SELECTION_UNKNOWN,
iccInfo: null,
imsi: null,

View File

@ -59,13 +59,15 @@ interface nsIVoicemailInfo : nsISupports
readonly attribute DOMString displayName;
};
[scriptable, uuid(f7ff9856-5c55-4ba2-9b64-bfc0ace169e7)]
[scriptable, uuid(2f1c8055-322e-490a-b1e1-4ccd5d546b3c)]
interface nsIRilContext : nsISupports
{
readonly attribute DOMString radioState;
readonly attribute DOMString cardState;
readonly attribute long retryCount;
readonly attribute DOMString imsi;
readonly attribute DOMString networkSelectionMode;

View File

@ -8,7 +8,7 @@
* be found in the AUTHORS file in the root of the source tree.
*/
#ifdef ANDROID
#if defined(ANDROID) && !defined(MOZ_WIDGET_GONK)
#include "modules/audio_device/android/audio_device_jni_android.h"
#endif

View File

@ -4187,6 +4187,9 @@ pref("dom.mms.sendRetryInterval", 300000);
pref("dom.mms.retrievalRetryCount", 4);
pref("dom.mms.retrievalRetryIntervals", "60000,300000,600000,1800000");
// Debug enabler for MMS.
pref("mms.debugging.enabled", false);
// If the user puts a finger down on an element and we think the user
// might be executing a pan gesture, how long do we wait before
// tentatively deciding the gesture is actually a tap and activating