mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 1156472 - Part 2 - Rename MediaEngineWebRTCAudioSource to MediaEngineWebRTCMicrophoneSource. r=jesup
There are now two different possible audio source, so this was getting confusing.
This commit is contained in:
parent
a8d30803eb
commit
6d3f3d4413
@ -358,15 +358,14 @@ MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
|
||||
strcpy(uniqueId,deviceName); // safe given assert and initialization/error-check
|
||||
}
|
||||
|
||||
nsRefPtr<MediaEngineWebRTCAudioSource> aSource;
|
||||
nsRefPtr<MediaEngineWebRTCMicrophoneSource> aSource;
|
||||
NS_ConvertUTF8toUTF16 uuid(uniqueId);
|
||||
if (mAudioSources.Get(uuid, getter_AddRefs(aSource))) {
|
||||
// We've already seen this device, just append.
|
||||
aASources->AppendElement(aSource.get());
|
||||
} else {
|
||||
aSource = new MediaEngineWebRTCAudioSource(
|
||||
mThread, mVoiceEngine, i, deviceName, uniqueId
|
||||
);
|
||||
aSource = new MediaEngineWebRTCMicrophoneSource(mThread, mVoiceEngine, i,
|
||||
deviceName, uniqueId);
|
||||
mAudioSources.Put(uuid, aSource); // Hashtable takes ownership.
|
||||
aASources->AppendElement(aSource);
|
||||
}
|
||||
|
@ -133,13 +133,16 @@ private:
|
||||
void GetCapability(size_t aIndex, webrtc::CaptureCapability& aOut) override;
|
||||
};
|
||||
|
||||
class MediaEngineWebRTCAudioSource : public MediaEngineAudioSource,
|
||||
public webrtc::VoEMediaProcess,
|
||||
private MediaConstraintsHelper
|
||||
class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource,
|
||||
public webrtc::VoEMediaProcess,
|
||||
private MediaConstraintsHelper
|
||||
{
|
||||
public:
|
||||
MediaEngineWebRTCAudioSource(nsIThread* aThread, webrtc::VoiceEngine* aVoiceEnginePtr,
|
||||
int aIndex, const char* name, const char* uuid)
|
||||
MediaEngineWebRTCMicrophoneSource(nsIThread* aThread,
|
||||
webrtc::VoiceEngine* aVoiceEnginePtr,
|
||||
int aIndex,
|
||||
const char* name,
|
||||
const char* uuid)
|
||||
: MediaEngineAudioSource(kReleased)
|
||||
, mVoiceEngine(aVoiceEnginePtr)
|
||||
, mMonitor("WebRTCMic.Monitor")
|
||||
@ -207,7 +210,7 @@ public:
|
||||
virtual void Shutdown() override;
|
||||
|
||||
protected:
|
||||
~MediaEngineWebRTCAudioSource() { Shutdown(); }
|
||||
~MediaEngineWebRTCMicrophoneSource() { Shutdown(); }
|
||||
|
||||
private:
|
||||
void Init();
|
||||
@ -294,7 +297,8 @@ private:
|
||||
// Store devices we've already seen in a hashtable for quick return.
|
||||
// Maps UUID to MediaEngineSource (one set for audio, one for video).
|
||||
nsRefPtrHashtable<nsStringHashKey, MediaEngineVideoSource> mVideoSources;
|
||||
nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCAudioSource> mAudioSources;
|
||||
nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCMicrophoneSource>
|
||||
mAudioSources;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -41,9 +41,9 @@ extern PRLogModuleInfo* GetMediaManagerLog();
|
||||
#define LOG_FRAMES(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg)
|
||||
|
||||
/**
|
||||
* Webrtc audio source.
|
||||
* Webrtc microphone source source.
|
||||
*/
|
||||
NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioSource)
|
||||
NS_IMPL_ISUPPORTS0(MediaEngineWebRTCMicrophoneSource)
|
||||
|
||||
// XXX temp until MSG supports registration
|
||||
StaticRefPtr<AudioOutputObserver> gFarendObserver;
|
||||
@ -177,7 +177,7 @@ AudioOutputObserver::InsertFarEnd(const AudioDataValue *aBuffer, uint32_t aFrame
|
||||
}
|
||||
|
||||
void
|
||||
MediaEngineWebRTCAudioSource::GetName(nsAString& aName)
|
||||
MediaEngineWebRTCMicrophoneSource::GetName(nsAString& aName)
|
||||
{
|
||||
if (mInitDone) {
|
||||
aName.Assign(mDeviceName);
|
||||
@ -187,7 +187,7 @@ MediaEngineWebRTCAudioSource::GetName(nsAString& aName)
|
||||
}
|
||||
|
||||
void
|
||||
MediaEngineWebRTCAudioSource::GetUUID(nsACString& aUUID)
|
||||
MediaEngineWebRTCMicrophoneSource::GetUUID(nsACString& aUUID)
|
||||
{
|
||||
if (mInitDone) {
|
||||
aUUID.Assign(mDeviceUUID);
|
||||
@ -197,10 +197,10 @@ MediaEngineWebRTCAudioSource::GetUUID(nsACString& aUUID)
|
||||
}
|
||||
|
||||
nsresult
|
||||
MediaEngineWebRTCAudioSource::Config(bool aEchoOn, uint32_t aEcho,
|
||||
bool aAgcOn, uint32_t aAGC,
|
||||
bool aNoiseOn, uint32_t aNoise,
|
||||
int32_t aPlayoutDelay)
|
||||
MediaEngineWebRTCMicrophoneSource::Config(bool aEchoOn, uint32_t aEcho,
|
||||
bool aAgcOn, uint32_t aAGC,
|
||||
bool aNoiseOn, uint32_t aNoise,
|
||||
int32_t aPlayoutDelay)
|
||||
{
|
||||
LOG(("Audio config: aec: %d, agc: %d, noise: %d",
|
||||
aEchoOn ? aEcho : -1,
|
||||
@ -281,9 +281,9 @@ uint32_t MediaEngineWebRTCAudioSource::GetBestFitnessDistance(
|
||||
}
|
||||
|
||||
nsresult
|
||||
MediaEngineWebRTCAudioSource::Allocate(const dom::MediaTrackConstraints &aConstraints,
|
||||
const MediaEnginePrefs &aPrefs,
|
||||
const nsString& aDeviceId)
|
||||
MediaEngineWebRTCMicrophoneSource::Allocate(const dom::MediaTrackConstraints &aConstraints,
|
||||
const MediaEnginePrefs &aPrefs,
|
||||
const nsString& aDeviceId)
|
||||
{
|
||||
if (mState == kReleased) {
|
||||
if (mInitDone) {
|
||||
@ -309,7 +309,7 @@ MediaEngineWebRTCAudioSource::Allocate(const dom::MediaTrackConstraints &aConstr
|
||||
}
|
||||
|
||||
nsresult
|
||||
MediaEngineWebRTCAudioSource::Deallocate()
|
||||
MediaEngineWebRTCMicrophoneSource::Deallocate()
|
||||
{
|
||||
bool empty;
|
||||
{
|
||||
@ -331,7 +331,8 @@ MediaEngineWebRTCAudioSource::Deallocate()
|
||||
}
|
||||
|
||||
nsresult
|
||||
MediaEngineWebRTCAudioSource::Start(SourceMediaStream* aStream, TrackID aID)
|
||||
MediaEngineWebRTCMicrophoneSource::Start(SourceMediaStream *aStream,
|
||||
TrackID aID)
|
||||
{
|
||||
if (!mInitDone || !aStream) {
|
||||
return NS_ERROR_FAILURE;
|
||||
@ -384,7 +385,7 @@ MediaEngineWebRTCAudioSource::Start(SourceMediaStream* aStream, TrackID aID)
|
||||
}
|
||||
|
||||
nsresult
|
||||
MediaEngineWebRTCAudioSource::Stop(SourceMediaStream *aSource, TrackID aID)
|
||||
MediaEngineWebRTCMicrophoneSource::Stop(SourceMediaStream *aSource, TrackID aID)
|
||||
{
|
||||
{
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
@ -421,17 +422,17 @@ MediaEngineWebRTCAudioSource::Stop(SourceMediaStream *aSource, TrackID aID)
|
||||
}
|
||||
|
||||
void
|
||||
MediaEngineWebRTCAudioSource::NotifyPull(MediaStreamGraph* aGraph,
|
||||
SourceMediaStream *aSource,
|
||||
TrackID aID,
|
||||
StreamTime aDesiredTime)
|
||||
MediaEngineWebRTCMicrophoneSource::NotifyPull(MediaStreamGraph *aGraph,
|
||||
SourceMediaStream *aSource,
|
||||
TrackID aID,
|
||||
StreamTime aDesiredTime)
|
||||
{
|
||||
// Ignore - we push audio data
|
||||
LOG_FRAMES(("NotifyPull, desired = %ld", (int64_t) aDesiredTime));
|
||||
}
|
||||
|
||||
void
|
||||
MediaEngineWebRTCAudioSource::Init()
|
||||
MediaEngineWebRTCMicrophoneSource::Init()
|
||||
{
|
||||
mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
|
||||
|
||||
@ -496,7 +497,7 @@ MediaEngineWebRTCAudioSource::Init()
|
||||
}
|
||||
|
||||
void
|
||||
MediaEngineWebRTCAudioSource::Shutdown()
|
||||
MediaEngineWebRTCMicrophoneSource::Shutdown()
|
||||
{
|
||||
if (!mInitDone) {
|
||||
// duplicate these here in case we failed during Init()
|
||||
@ -551,9 +552,10 @@ MediaEngineWebRTCAudioSource::Shutdown()
|
||||
typedef int16_t sample;
|
||||
|
||||
void
|
||||
MediaEngineWebRTCAudioSource::Process(int channel,
|
||||
webrtc::ProcessingTypes type, sample* audio10ms,
|
||||
int length, int samplingFreq, bool isStereo)
|
||||
MediaEngineWebRTCMicrophoneSource::Process(int channel,
|
||||
webrtc::ProcessingTypes type,
|
||||
sample *audio10ms, int length,
|
||||
int samplingFreq, bool isStereo)
|
||||
{
|
||||
// On initial capture, throw away all far-end data except the most recent sample
|
||||
// since it's already irrelevant and we want to keep avoid confusing the AEC far-end
|
||||
|
Loading…
Reference in New Issue
Block a user