2012-07-12 04:53:08 -07:00
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
|
|
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#include "MediaEngineWebRTC.h"
|
|
|
|
|
|
|
|
#define CHANNELS 1
|
|
|
|
#define ENCODING "L16"
|
|
|
|
#define DEFAULT_PORT 5555
|
|
|
|
|
|
|
|
#define SAMPLE_RATE 256000
|
|
|
|
#define SAMPLE_FREQUENCY 16000
|
|
|
|
#define SAMPLE_LENGTH ((SAMPLE_FREQUENCY*10)/1000)
|
|
|
|
|
|
|
|
namespace mozilla {
|
|
|
|
|
2012-10-15 13:41:46 -07:00
|
|
|
#ifdef PR_LOGGING
|
2012-10-29 16:32:10 -07:00
|
|
|
extern PRLogModuleInfo* GetMediaManagerLog();
|
|
|
|
#define LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg)
|
2012-10-15 13:41:46 -07:00
|
|
|
#else
|
|
|
|
#define LOG(msg)
|
|
|
|
#endif
|
|
|
|
|
2012-07-12 04:53:08 -07:00
|
|
|
/**
|
|
|
|
* Webrtc audio source.
|
|
|
|
*/
|
|
|
|
NS_IMPL_THREADSAFE_ISUPPORTS0(MediaEngineWebRTCAudioSource)
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaEngineWebRTCAudioSource::GetName(nsAString& aName)
|
|
|
|
{
|
|
|
|
if (mInitDone) {
|
|
|
|
aName.Assign(mDeviceName);
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaEngineWebRTCAudioSource::GetUUID(nsAString& aUUID)
|
|
|
|
{
|
|
|
|
if (mInitDone) {
|
|
|
|
aUUID.Assign(mDeviceUUID);
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
MediaEngineWebRTCAudioSource::Allocate()
|
|
|
|
{
|
|
|
|
if (mState != kReleased) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Audio doesn't play through unless we set a receiver and destination, so
|
|
|
|
// we setup a dummy local destination, and do a loopback.
|
|
|
|
mVoEBase->SetLocalReceiver(mChannel, DEFAULT_PORT);
|
|
|
|
mVoEBase->SetSendDestination(mChannel, DEFAULT_PORT, "127.0.0.1");
|
|
|
|
|
|
|
|
mState = kAllocated;
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
MediaEngineWebRTCAudioSource::Deallocate()
|
|
|
|
{
|
|
|
|
if (mState != kStopped && mState != kAllocated) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
mState = kReleased;
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
MediaEngineWebRTCAudioSource::Start(SourceMediaStream* aStream, TrackID aID)
|
|
|
|
{
|
|
|
|
if (!mInitDone || mState != kAllocated) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
if (!aStream) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
mSource = aStream;
|
|
|
|
|
|
|
|
AudioSegment* segment = new AudioSegment();
|
|
|
|
segment->Init(CHANNELS);
|
|
|
|
mSource->AddTrack(aID, SAMPLE_FREQUENCY, 0, segment);
|
|
|
|
mSource->AdvanceKnownTracksTime(STREAM_TIME_MAX);
|
2012-10-17 02:46:40 -07:00
|
|
|
LOG(("Initial audio"));
|
2012-07-12 04:53:08 -07:00
|
|
|
mTrackID = aID;
|
|
|
|
|
|
|
|
if (mVoEBase->StartReceive(mChannel)) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
if (mVoEBase->StartSend(mChannel)) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attach external media processor, so this::Process will be called.
|
|
|
|
mVoERender->RegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel, *this);
|
|
|
|
|
|
|
|
mState = kStarted;
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
MediaEngineWebRTCAudioSource::Stop()
|
|
|
|
{
|
|
|
|
if (mState != kStarted) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
if (!mVoEBase) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
mVoERender->DeRegisterExternalMediaProcessing(mChannel, webrtc::kRecordingPerChannel);
|
|
|
|
|
|
|
|
if (mVoEBase->StopSend(mChannel)) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
if (mVoEBase->StopReceive(mChannel)) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
2012-10-17 14:40:14 -07:00
|
|
|
{
|
|
|
|
ReentrantMonitorAutoEnter enter(mMonitor);
|
|
|
|
mState = kStopped;
|
|
|
|
mSource->EndTrack(mTrackID);
|
|
|
|
}
|
|
|
|
|
2012-07-12 04:53:08 -07:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2012-10-17 02:46:40 -07:00
|
|
|
void
|
|
|
|
MediaEngineWebRTCAudioSource::NotifyPull(MediaStreamGraph* aGraph,
|
|
|
|
StreamTime aDesiredTime)
|
|
|
|
{
|
|
|
|
// Ignore - we push audio data
|
2012-10-24 16:21:15 -07:00
|
|
|
#ifdef DEBUG
|
|
|
|
static TrackTicks mLastEndTime = 0;
|
|
|
|
TrackTicks target = TimeToTicksRoundUp(SAMPLE_FREQUENCY, aDesiredTime);
|
|
|
|
TrackTicks delta = target - mLastEndTime;
|
|
|
|
LOG(("Audio:NotifyPull: target %lu, delta %lu",(uint32_t) target, (uint32_t) delta));
|
|
|
|
mLastEndTime = target;
|
|
|
|
#endif
|
2012-10-17 02:46:40 -07:00
|
|
|
}
|
|
|
|
|
2012-07-12 04:53:08 -07:00
|
|
|
nsresult
|
2012-08-22 08:56:38 -07:00
|
|
|
MediaEngineWebRTCAudioSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
|
2012-07-12 04:53:08 -07:00
|
|
|
{
|
|
|
|
return NS_ERROR_NOT_IMPLEMENTED;
|
|
|
|
}
|
|
|
|
|
2012-10-16 17:53:55 -07:00
|
|
|
void
|
|
|
|
MediaEngineWebRTCAudioSource::Init()
|
|
|
|
{
|
|
|
|
mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
|
|
|
|
|
|
|
|
mVoEBase->Init();
|
|
|
|
|
|
|
|
mVoERender = webrtc::VoEExternalMedia::GetInterface(mVoiceEngine);
|
|
|
|
if (!mVoERender) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
mChannel = mVoEBase->CreateChannel();
|
|
|
|
if (mChannel < 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for availability.
|
|
|
|
webrtc::VoEHardware* ptrVoEHw = webrtc::VoEHardware::GetInterface(mVoiceEngine);
|
|
|
|
if (ptrVoEHw->SetRecordingDevice(mCapIndex)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool avail = false;
|
|
|
|
ptrVoEHw->GetRecordingDeviceStatus(avail);
|
|
|
|
if (!avail) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set "codec" to PCM, 32kHz on 1 channel
|
|
|
|
webrtc::VoECodec* ptrVoECodec;
|
|
|
|
webrtc::CodecInst codec;
|
|
|
|
ptrVoECodec = webrtc::VoECodec::GetInterface(mVoiceEngine);
|
|
|
|
if (!ptrVoECodec) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
strcpy(codec.plname, ENCODING);
|
|
|
|
codec.channels = CHANNELS;
|
|
|
|
codec.rate = SAMPLE_RATE;
|
|
|
|
codec.plfreq = SAMPLE_FREQUENCY;
|
|
|
|
codec.pacsize = SAMPLE_LENGTH;
|
|
|
|
codec.pltype = 0; // Default payload type
|
|
|
|
|
|
|
|
if (ptrVoECodec->SetSendCodec(mChannel, codec)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
mInitDone = true;
|
|
|
|
}
|
2012-07-12 04:53:08 -07:00
|
|
|
|
|
|
|
void
|
|
|
|
MediaEngineWebRTCAudioSource::Shutdown()
|
|
|
|
{
|
|
|
|
if (!mInitDone) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mState == kStarted) {
|
|
|
|
Stop();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mState == kAllocated) {
|
|
|
|
Deallocate();
|
|
|
|
}
|
|
|
|
|
2012-10-16 17:53:55 -07:00
|
|
|
mVoEBase->Terminate();
|
|
|
|
mVoERender->Release();
|
2012-07-12 04:53:08 -07:00
|
|
|
mVoEBase->Release();
|
|
|
|
|
|
|
|
mState = kReleased;
|
|
|
|
mInitDone = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef WebRtc_Word16 sample;
|
|
|
|
|
|
|
|
void
|
|
|
|
MediaEngineWebRTCAudioSource::Process(const int channel,
|
|
|
|
const webrtc::ProcessingTypes type, sample* audio10ms,
|
|
|
|
const int length, const int samplingFreq, const bool isStereo)
|
|
|
|
{
|
|
|
|
ReentrantMonitorAutoEnter enter(mMonitor);
|
2012-10-17 14:40:14 -07:00
|
|
|
if (mState != kStarted)
|
|
|
|
return;
|
2012-07-12 04:53:08 -07:00
|
|
|
|
|
|
|
nsRefPtr<SharedBuffer> buffer = SharedBuffer::Create(length * sizeof(sample));
|
|
|
|
|
|
|
|
sample* dest = static_cast<sample*>(buffer->Data());
|
2012-10-15 13:41:46 -07:00
|
|
|
memcpy(dest, audio10ms, length * sizeof(sample));
|
2012-07-12 04:53:08 -07:00
|
|
|
|
|
|
|
AudioSegment segment;
|
|
|
|
segment.Init(CHANNELS);
|
|
|
|
segment.AppendFrames(
|
2012-10-25 03:09:40 -07:00
|
|
|
buffer.forget(), length, 0, length, AUDIO_FORMAT_S16
|
2012-07-12 04:53:08 -07:00
|
|
|
);
|
|
|
|
mSource->AppendToTrack(mTrackID, &segment);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|