Bug 1053130: Refactor MediaEngine video sources; alse remove Snapshot function. r=jesup,alfredo

--HG--
rename : content/media/webrtc/MediaEngineWebRTCVideo.cpp => content/media/webrtc/MediaEngineCameraVideoSource.cpp
rename : content/media/webrtc/MediaEngineWebRTC.h => content/media/webrtc/MediaEngineCameraVideoSource.h
rename : content/media/webrtc/MediaEngineWebRTCVideo.cpp => content/media/webrtc/MediaEngineGonkVideoSource.cpp
rename : content/media/webrtc/MediaEngineWebRTC.h => content/media/webrtc/MediaEngineGonkVideoSource.h
This commit is contained in:
Chai-hung Tai 2014-10-12 23:37:37 -04:00
parent 9db62ed8f4
commit 7161e34d5d
15 changed files with 1095 additions and 1015 deletions

View File

@ -112,12 +112,6 @@ public:
/* tell the source if there are any direct listeners attached */ /* tell the source if there are any direct listeners attached */
virtual void SetDirectListeners(bool) = 0; virtual void SetDirectListeners(bool) = 0;
/* Take a snapshot from this source. In the case of video this is a single
* image, and for audio, it is a snippet lasting aDuration milliseconds. The
* duration argument is ignored for a MediaEngineVideoSource.
*/
virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile) = 0;
/* Called when the stream wants more data */ /* Called when the stream wants more data */
virtual void NotifyPull(MediaStreamGraph* aGraph, virtual void NotifyPull(MediaStreamGraph* aGraph,
SourceMediaStream *aSource, SourceMediaStream *aSource,
@ -178,6 +172,8 @@ public:
* a Start(). Only Allocate() may be called after a Deallocate(). */ * a Start(). Only Allocate() may be called after a Deallocate(). */
protected: protected:
// Only class' own members can be initialized in constructor initializer list.
explicit MediaEngineSource(MediaEngineState aState) : mState(aState) {}
MediaEngineState mState; MediaEngineState mState;
}; };
@ -231,12 +227,14 @@ class MediaEngineVideoSource : public MediaEngineSource
public: public:
virtual ~MediaEngineVideoSource() {} virtual ~MediaEngineVideoSource() {}
virtual const MediaSourceType GetMediaSource() {
return MediaSourceType::Camera;
}
/* This call reserves but does not start the device. */ /* This call reserves but does not start the device. */
virtual nsresult Allocate(const VideoTrackConstraintsN &aConstraints, virtual nsresult Allocate(const VideoTrackConstraintsN &aConstraints,
const MediaEnginePrefs &aPrefs) = 0; const MediaEnginePrefs &aPrefs) = 0;
protected:
explicit MediaEngineVideoSource(MediaEngineState aState)
: MediaEngineSource(aState) {}
MediaEngineVideoSource()
: MediaEngineSource(kReleased) {}
}; };
/** /**
@ -250,6 +248,11 @@ public:
/* This call reserves but does not start the device. */ /* This call reserves but does not start the device. */
virtual nsresult Allocate(const AudioTrackConstraintsN &aConstraints, virtual nsresult Allocate(const AudioTrackConstraintsN &aConstraints,
const MediaEnginePrefs &aPrefs) = 0; const MediaEnginePrefs &aPrefs) = 0;
protected:
explicit MediaEngineAudioSource(MediaEngineState aState)
: MediaEngineSource(aState) {}
MediaEngineAudioSource()
: MediaEngineSource(kReleased) {}
}; };

View File

@ -0,0 +1,149 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "MediaEngineCameraVideoSource.h"
namespace mozilla {
using dom::ConstrainLongRange;
using dom::ConstrainDoubleRange;
using dom::MediaTrackConstraintSet;
#ifdef PR_LOGGING
extern PRLogModuleInfo* GetMediaManagerLog();
#define LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg)
#define LOGFRAME(msg) PR_LOG(GetMediaManagerLog(), 6, msg)
#else
#define LOG(msg)
#define LOGFRAME(msg)
#endif
/* static */ bool
MediaEngineCameraVideoSource::IsWithin(int32_t n, const ConstrainLongRange& aRange) {
return aRange.mMin <= n && n <= aRange.mMax;
}
/* static */ bool
MediaEngineCameraVideoSource::IsWithin(double n, const ConstrainDoubleRange& aRange) {
return aRange.mMin <= n && n <= aRange.mMax;
}
/* static */ int32_t
MediaEngineCameraVideoSource::Clamp(int32_t n, const ConstrainLongRange& aRange) {
return std::max(aRange.mMin, std::min(n, aRange.mMax));
}
/* static */ bool
MediaEngineCameraVideoSource::AreIntersecting(const ConstrainLongRange& aA, const ConstrainLongRange& aB) {
return aA.mMax >= aB.mMin && aA.mMin <= aB.mMax;
}
/* static */ bool
MediaEngineCameraVideoSource::Intersect(ConstrainLongRange& aA, const ConstrainLongRange& aB) {
MOZ_ASSERT(AreIntersecting(aA, aB));
aA.mMin = std::max(aA.mMin, aB.mMin);
aA.mMax = std::min(aA.mMax, aB.mMax);
return true;
}
// A special version of the algorithm for cameras that don't list capabilities.
void
MediaEngineCameraVideoSource::GuessCapability(
const VideoTrackConstraintsN& aConstraints,
const MediaEnginePrefs& aPrefs)
{
LOG(("GuessCapability: prefs: %dx%d @%d-%dfps",
aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS));
// In short: compound constraint-ranges and use pref as ideal.
ConstrainLongRange cWidth(aConstraints.mRequired.mWidth);
ConstrainLongRange cHeight(aConstraints.mRequired.mHeight);
if (aConstraints.mAdvanced.WasPassed()) {
const auto& advanced = aConstraints.mAdvanced.Value();
for (uint32_t i = 0; i < advanced.Length(); i++) {
if (AreIntersecting(cWidth, advanced[i].mWidth) &&
AreIntersecting(cHeight, advanced[i].mHeight)) {
Intersect(cWidth, advanced[i].mWidth);
Intersect(cHeight, advanced[i].mHeight);
}
}
}
// Detect Mac HD cams and give them some love in the form of a dynamic default
// since that hardware switches between 4:3 at low res and 16:9 at higher res.
//
// Logic is: if we're relying on defaults in aPrefs, then
// only use HD pref when non-HD pref is too small and HD pref isn't too big.
bool macHD = ((!aPrefs.mWidth || !aPrefs.mHeight) &&
mDeviceName.EqualsASCII("FaceTime HD Camera (Built-in)") &&
(aPrefs.GetWidth() < cWidth.mMin ||
aPrefs.GetHeight() < cHeight.mMin) &&
!(aPrefs.GetWidth(true) > cWidth.mMax ||
aPrefs.GetHeight(true) > cHeight.mMax));
int prefWidth = aPrefs.GetWidth(macHD);
int prefHeight = aPrefs.GetHeight(macHD);
// Clamp width and height without distorting inherent aspect too much.
if (IsWithin(prefWidth, cWidth) == IsWithin(prefHeight, cHeight)) {
// If both are within, we get the default (pref) aspect.
// If neither are within, we get the aspect of the enclosing constraint.
// Either are presumably reasonable (presuming constraints are sane).
mCapability.width = Clamp(prefWidth, cWidth);
mCapability.height = Clamp(prefHeight, cHeight);
} else {
// But if only one clips (e.g. width), the resulting skew is undesirable:
// .------------.
// | constraint |
// .----+------------+----.
// | | | |
// |pref| result | | prefAspect != resultAspect
// | | | |
// '----+------------+----'
// '------------'
// So in this case, preserve prefAspect instead:
// .------------.
// | constraint |
// .------------.
// |pref | prefAspect is unchanged
// '------------'
// | |
// '------------'
if (IsWithin(prefWidth, cWidth)) {
mCapability.height = Clamp(prefHeight, cHeight);
mCapability.width = Clamp((mCapability.height * prefWidth) /
prefHeight, cWidth);
} else {
mCapability.width = Clamp(prefWidth, cWidth);
mCapability.height = Clamp((mCapability.width * prefHeight) /
prefWidth, cHeight);
}
}
mCapability.maxFPS = MediaEngine::DEFAULT_VIDEO_FPS;
LOG(("chose cap %dx%d @%dfps",
mCapability.width, mCapability.height, mCapability.maxFPS));
}
void
MediaEngineCameraVideoSource::GetName(nsAString& aName)
{
aName = mDeviceName;
}
void
MediaEngineCameraVideoSource::GetUUID(nsAString& aUUID)
{
aUUID = mUniqueId;
}
void
MediaEngineCameraVideoSource::SetDirectListeners(bool aHasDirectListeners)
{
LOG((__FUNCTION__));
mHasDirectListeners = aHasDirectListeners;
}
} // namespace mozilla

View File

@ -0,0 +1,100 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MediaEngineCameraVideoSource_h
#define MediaEngineCameraVideoSource_h
#include "MediaEngine.h"
#include "MediaTrackConstraints.h"
#include "nsDirectoryServiceDefs.h"
// conflicts with #include of scoped_ptr.h
#undef FF
#include "webrtc/video_engine/include/vie_capture.h"
namespace mozilla {
class MediaEngineCameraVideoSource : public MediaEngineVideoSource
{
public:
MediaEngineCameraVideoSource(int aIndex,
const char* aMonitorName = "Camera.Monitor")
: MediaEngineVideoSource(kReleased)
, mMonitor(aMonitorName)
, mWidth(0)
, mHeight(0)
, mInitDone(false)
, mHasDirectListeners(false)
, mCaptureIndex(aIndex)
, mFps(-1)
{}
virtual void GetName(nsAString& aName) MOZ_OVERRIDE;
virtual void GetUUID(nsAString& aUUID) MOZ_OVERRIDE;
virtual void SetDirectListeners(bool aHasListeners) MOZ_OVERRIDE;
virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
bool aAgcOn, uint32_t aAGC,
bool aNoiseOn, uint32_t aNoise,
int32_t aPlayoutDelay) MOZ_OVERRIDE
{
return NS_OK;
};
virtual bool IsFake() MOZ_OVERRIDE
{
return false;
}
virtual const MediaSourceType GetMediaSource() {
return MediaSourceType::Camera;
}
virtual nsresult TakePhoto(PhotoCallback* aCallback) MOZ_OVERRIDE
{
return NS_ERROR_NOT_IMPLEMENTED;
}
protected:
~MediaEngineCameraVideoSource() {}
static bool IsWithin(int32_t n, const dom::ConstrainLongRange& aRange);
static bool IsWithin(double n, const dom::ConstrainDoubleRange& aRange);
static int32_t Clamp(int32_t n, const dom::ConstrainLongRange& aRange);
static bool AreIntersecting(const dom::ConstrainLongRange& aA,
const dom::ConstrainLongRange& aB);
static bool Intersect(dom::ConstrainLongRange& aA, const dom::ConstrainLongRange& aB);
void GuessCapability(const VideoTrackConstraintsN& aConstraints,
const MediaEnginePrefs& aPrefs);
// Engine variables.
// mMonitor protects mImage access/changes, and transitions of mState
// from kStarted to kStopped (which are combined with EndTrack() and
// image changes). Note that mSources is not accessed from other threads
// for video and is not protected.
// All the mMonitor accesses are from the child classes.
Monitor mMonitor; // Monitor for processing Camera frames.
nsRefPtr<layers::Image> mImage;
nsRefPtr<layers::ImageContainer> mImageContainer;
int mWidth, mHeight; // protected with mMonitor on Gonk due to different threading
// end of data protected by mMonitor
nsTArray<SourceMediaStream*> mSources; // When this goes empty, we shut down HW
bool mInitDone;
bool mHasDirectListeners;
int mCaptureIndex;
int mFps; // Track rate (30 fps by default)
webrtc::CaptureCapability mCapability; // Doesn't work on OS X.
nsString mDeviceName;
nsString mUniqueId;
};
} // namespace mozilla
#endif // MediaEngineCameraVideoSource_h

View File

@ -39,10 +39,12 @@ NS_IMPL_ISUPPORTS(MediaEngineDefaultVideoSource, nsITimerCallback)
*/ */
MediaEngineDefaultVideoSource::MediaEngineDefaultVideoSource() MediaEngineDefaultVideoSource::MediaEngineDefaultVideoSource()
: mTimer(nullptr), mMonitor("Fake video"), mCb(16), mCr(16) : MediaEngineVideoSource(kReleased)
, mTimer(nullptr)
, mMonitor("Fake video")
, mCb(16), mCr(16)
{ {
mImageContainer = layers::LayerManager::CreateImageContainer(); mImageContainer = layers::LayerManager::CreateImageContainer();
mState = kReleased;
} }
MediaEngineDefaultVideoSource::~MediaEngineDefaultVideoSource() MediaEngineDefaultVideoSource::~MediaEngineDefaultVideoSource()
@ -170,50 +172,6 @@ MediaEngineDefaultVideoSource::Stop(SourceMediaStream *aSource, TrackID aID)
return NS_OK; return NS_OK;
} }
nsresult
MediaEngineDefaultVideoSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
{
*aFile = nullptr;
#ifndef MOZ_WIDGET_ANDROID
return NS_ERROR_NOT_IMPLEMENTED;
#else
nsAutoString filePath;
nsCOMPtr<nsIFilePicker> filePicker = do_CreateInstance("@mozilla.org/filepicker;1");
if (!filePicker)
return NS_ERROR_FAILURE;
nsXPIDLString title;
nsContentUtils::GetLocalizedString(nsContentUtils::eFORMS_PROPERTIES, "Browse", title);
int16_t mode = static_cast<int16_t>(nsIFilePicker::modeOpen);
nsresult rv = filePicker->Init(nullptr, title, mode);
NS_ENSURE_SUCCESS(rv, rv);
filePicker->AppendFilters(nsIFilePicker::filterImages);
// XXX - This API should be made async
int16_t dialogReturn;
rv = filePicker->Show(&dialogReturn);
NS_ENSURE_SUCCESS(rv, rv);
if (dialogReturn == nsIFilePicker::returnCancel) {
*aFile = nullptr;
return NS_OK;
}
nsCOMPtr<nsIFile> localFile;
filePicker->GetFile(getter_AddRefs(localFile));
if (!localFile) {
*aFile = nullptr;
return NS_OK;
}
nsCOMPtr<nsIDOMFile> domFile = dom::File::CreateFromFile(nullptr, localFile);
domFile.forget(aFile);
return NS_OK;
#endif
}
NS_IMETHODIMP NS_IMETHODIMP
MediaEngineDefaultVideoSource::Notify(nsITimer* aTimer) MediaEngineDefaultVideoSource::Notify(nsITimer* aTimer)
{ {
@ -351,9 +309,9 @@ private:
NS_IMPL_ISUPPORTS(MediaEngineDefaultAudioSource, nsITimerCallback) NS_IMPL_ISUPPORTS(MediaEngineDefaultAudioSource, nsITimerCallback)
MediaEngineDefaultAudioSource::MediaEngineDefaultAudioSource() MediaEngineDefaultAudioSource::MediaEngineDefaultAudioSource()
: mTimer(nullptr) : MediaEngineAudioSource(kReleased)
, mTimer(nullptr)
{ {
mState = kReleased;
} }
MediaEngineDefaultAudioSource::~MediaEngineDefaultAudioSource() MediaEngineDefaultAudioSource::~MediaEngineDefaultAudioSource()
@ -455,12 +413,6 @@ MediaEngineDefaultAudioSource::Stop(SourceMediaStream *aSource, TrackID aID)
return NS_OK; return NS_OK;
} }
nsresult
MediaEngineDefaultAudioSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
{
return NS_ERROR_NOT_IMPLEMENTED;
}
NS_IMETHODIMP NS_IMETHODIMP
MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer) MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
{ {

View File

@ -46,7 +46,6 @@ public:
virtual nsresult Start(SourceMediaStream*, TrackID); virtual nsresult Start(SourceMediaStream*, TrackID);
virtual nsresult Stop(SourceMediaStream*, TrackID); virtual nsresult Stop(SourceMediaStream*, TrackID);
virtual void SetDirectListeners(bool aHasDirectListeners) {}; virtual void SetDirectListeners(bool aHasDirectListeners) {};
virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
virtual nsresult Config(bool aEchoOn, uint32_t aEcho, virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
bool aAgcOn, uint32_t aAGC, bool aAgcOn, uint32_t aAGC,
bool aNoiseOn, uint32_t aNoise, bool aNoiseOn, uint32_t aNoise,
@ -111,7 +110,6 @@ public:
virtual nsresult Start(SourceMediaStream*, TrackID); virtual nsresult Start(SourceMediaStream*, TrackID);
virtual nsresult Stop(SourceMediaStream*, TrackID); virtual nsresult Stop(SourceMediaStream*, TrackID);
virtual void SetDirectListeners(bool aHasDirectListeners) {}; virtual void SetDirectListeners(bool aHasDirectListeners) {};
virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
virtual nsresult Config(bool aEchoOn, uint32_t aEcho, virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
bool aAgcOn, uint32_t aAGC, bool aAgcOn, uint32_t aAGC,
bool aNoiseOn, uint32_t aNoise, bool aNoiseOn, uint32_t aNoise,

View File

@ -0,0 +1,649 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "MediaEngineGonkVideoSource.h"
#define LOG_TAG "MediaEngineGonkVideoSource"
#include <utils/Log.h>
#include "GrallocImages.h"
#include "VideoUtils.h"
#include "ScreenOrientation.h"
#include "libyuv.h"
#include "mtransport/runnable_utils.h"
namespace mozilla {
using namespace mozilla::dom;
using namespace mozilla::gfx;
#ifdef PR_LOGGING
extern PRLogModuleInfo* GetMediaManagerLog();
#define LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg)
#define LOGFRAME(msg) PR_LOG(GetMediaManagerLog(), 6, msg)
#else
#define LOG(msg)
#define LOGFRAME(msg)
#endif
// We are subclassed from CameraControlListener, which implements a
// threadsafe reference-count for us.
NS_IMPL_QUERY_INTERFACE(MediaEngineGonkVideoSource, nsISupports)
NS_IMPL_ADDREF_INHERITED(MediaEngineGonkVideoSource, CameraControlListener)
NS_IMPL_RELEASE_INHERITED(MediaEngineGonkVideoSource, CameraControlListener)
// Called if the graph thinks it's running out of buffered video; repeat
// the last frame for whatever minimum period it think it needs. Note that
// this means that no *real* frame can be inserted during this period.
void
MediaEngineGonkVideoSource::NotifyPull(MediaStreamGraph* aGraph,
SourceMediaStream* aSource,
TrackID aID,
StreamTime aDesiredTime,
TrackTicks& aLastEndTime)
{
VideoSegment segment;
MonitorAutoLock lock(mMonitor);
// B2G does AddTrack, but holds kStarted until the hardware changes state.
// So mState could be kReleased here. We really don't care about the state,
// though.
// Note: we're not giving up mImage here
nsRefPtr<layers::Image> image = mImage;
TrackTicks target = aSource->TimeToTicksRoundUp(USECS_PER_S, aDesiredTime);
TrackTicks delta = target - aLastEndTime;
LOGFRAME(("NotifyPull, desired = %ld, target = %ld, delta = %ld %s", (int64_t) aDesiredTime,
(int64_t) target, (int64_t) delta, image ? "" : "<null>"));
// Bug 846188 We may want to limit incoming frames to the requested frame rate
// mFps - if you want 30FPS, and the camera gives you 60FPS, this could
// cause issues.
// We may want to signal if the actual frame rate is below mMinFPS -
// cameras often don't return the requested frame rate especially in low
// light; we should consider surfacing this so that we can switch to a
// lower resolution (which may up the frame rate)
// Don't append if we've already provided a frame that supposedly goes past the current aDesiredTime
// Doing so means a negative delta and thus messes up handling of the graph
if (delta > 0) {
// nullptr images are allowed
IntSize size(image ? mWidth : 0, image ? mHeight : 0);
segment.AppendFrame(image.forget(), delta, size);
// This can fail if either a) we haven't added the track yet, or b)
// we've removed or finished the track.
if (aSource->AppendToTrack(aID, &(segment))) {
aLastEndTime = target;
}
}
}
void
MediaEngineGonkVideoSource::ChooseCapability(const VideoTrackConstraintsN& aConstraints,
const MediaEnginePrefs& aPrefs)
{
return GuessCapability(aConstraints, aPrefs);
}
nsresult
MediaEngineGonkVideoSource::Allocate(const VideoTrackConstraintsN& aConstraints,
const MediaEnginePrefs& aPrefs)
{
LOG((__FUNCTION__));
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
if (mState == kReleased && mInitDone) {
ChooseCapability(aConstraints, aPrefs);
NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineGonkVideoSource>(this),
&MediaEngineGonkVideoSource::AllocImpl));
mCallbackMonitor.Wait();
if (mState != kAllocated) {
return NS_ERROR_FAILURE;
}
}
return NS_OK;
}
nsresult
MediaEngineGonkVideoSource::Deallocate()
{
LOG((__FUNCTION__));
if (mSources.IsEmpty()) {
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
if (mState != kStopped && mState != kAllocated) {
return NS_ERROR_FAILURE;
}
// We do not register success callback here
NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineGonkVideoSource>(this),
&MediaEngineGonkVideoSource::DeallocImpl));
mCallbackMonitor.Wait();
if (mState != kReleased) {
return NS_ERROR_FAILURE;
}
mState = kReleased;
LOG(("Video device %d deallocated", mCaptureIndex));
} else {
LOG(("Video device %d deallocated but still in use", mCaptureIndex));
}
return NS_OK;
}
nsresult
MediaEngineGonkVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
{
LOG((__FUNCTION__));
if (!mInitDone || !aStream) {
return NS_ERROR_FAILURE;
}
mSources.AppendElement(aStream);
aStream->AddTrack(aID, USECS_PER_S, 0, new VideoSegment());
aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
if (mState == kStarted) {
return NS_OK;
}
mImageContainer = layers::LayerManager::CreateImageContainer();
NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineGonkVideoSource>(this),
&MediaEngineGonkVideoSource::StartImpl,
mCapability));
mCallbackMonitor.Wait();
if (mState != kStarted) {
return NS_ERROR_FAILURE;
}
return NS_OK;
}
nsresult
MediaEngineGonkVideoSource::Stop(SourceMediaStream* aSource, TrackID aID)
{
LOG((__FUNCTION__));
if (!mSources.RemoveElement(aSource)) {
// Already stopped - this is allowed
return NS_OK;
}
if (!mSources.IsEmpty()) {
return NS_OK;
}
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
if (mState != kStarted) {
return NS_ERROR_FAILURE;
}
{
MonitorAutoLock lock(mMonitor);
mState = kStopped;
aSource->EndTrack(aID);
// Drop any cached image so we don't start with a stale image on next
// usage
mImage = nullptr;
}
NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineGonkVideoSource>(this),
&MediaEngineGonkVideoSource::StopImpl));
return NS_OK;
}
/**
* Initialization and Shutdown functions for the video source, called by the
* constructor and destructor respectively.
*/
void
MediaEngineGonkVideoSource::Init()
{
nsAutoCString deviceName;
ICameraControl::GetCameraName(mCaptureIndex, deviceName);
CopyUTF8toUTF16(deviceName, mDeviceName);
CopyUTF8toUTF16(deviceName, mUniqueId);
mInitDone = true;
}
void
MediaEngineGonkVideoSource::Shutdown()
{
LOG((__FUNCTION__));
if (!mInitDone) {
return;
}
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
if (mState == kStarted) {
while (!mSources.IsEmpty()) {
Stop(mSources[0], kVideoTrack); // XXX change to support multiple tracks
}
MOZ_ASSERT(mState == kStopped);
}
if (mState == kAllocated || mState == kStopped) {
Deallocate();
}
mState = kReleased;
mInitDone = false;
}
// All these functions must be run on MainThread!
void
MediaEngineGonkVideoSource::AllocImpl() {
MOZ_ASSERT(NS_IsMainThread());
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
mCameraControl = ICameraControl::Create(mCaptureIndex);
if (mCameraControl) {
mState = kAllocated;
// Add this as a listener for CameraControl events. We don't need
// to explicitly remove this--destroying the CameraControl object
// in DeallocImpl() will do that for us.
mCameraControl->AddListener(this);
}
mCallbackMonitor.Notify();
}
void
MediaEngineGonkVideoSource::DeallocImpl() {
MOZ_ASSERT(NS_IsMainThread());
mCameraControl = nullptr;
}
// The same algorithm from bug 840244
static int
GetRotateAmount(ScreenOrientation aScreen, int aCameraMountAngle, bool aBackCamera) {
int screenAngle = 0;
switch (aScreen) {
case eScreenOrientation_PortraitPrimary:
screenAngle = 0;
break;
case eScreenOrientation_PortraitSecondary:
screenAngle = 180;
break;
case eScreenOrientation_LandscapePrimary:
screenAngle = 90;
break;
case eScreenOrientation_LandscapeSecondary:
screenAngle = 270;
break;
default:
MOZ_ASSERT(false);
break;
}
int result;
if (aBackCamera) {
// back camera
result = (aCameraMountAngle - screenAngle + 360) % 360;
} else {
// front camera
result = (aCameraMountAngle + screenAngle) % 360;
}
return result;
}
// undefine to remove on-the-fly rotation support
#define DYNAMIC_GUM_ROTATION
void
MediaEngineGonkVideoSource::Notify(const hal::ScreenConfiguration& aConfiguration) {
#ifdef DYNAMIC_GUM_ROTATION
if (mHasDirectListeners) {
// aka hooked to PeerConnection
MonitorAutoLock enter(mMonitor);
mRotation = GetRotateAmount(aConfiguration.orientation(), mCameraAngle, mBackCamera);
LOG(("*** New orientation: %d (Camera %d Back %d MountAngle: %d)",
mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
}
#endif
mOrientationChanged = true;
}
void
MediaEngineGonkVideoSource::StartImpl(webrtc::CaptureCapability aCapability) {
MOZ_ASSERT(NS_IsMainThread());
ICameraControl::Configuration config;
config.mMode = ICameraControl::kPictureMode;
config.mPreviewSize.width = aCapability.width;
config.mPreviewSize.height = aCapability.height;
mCameraControl->Start(&config);
mCameraControl->Set(CAMERA_PARAM_PICTURE_SIZE, config.mPreviewSize);
hal::RegisterScreenConfigurationObserver(this);
}
void
MediaEngineGonkVideoSource::StopImpl() {
MOZ_ASSERT(NS_IsMainThread());
hal::UnregisterScreenConfigurationObserver(this);
mCameraControl->Stop();
}
void
MediaEngineGonkVideoSource::OnHardwareStateChange(HardwareState aState)
{
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
if (aState == CameraControlListener::kHardwareClosed) {
// When the first CameraControl listener is added, it gets pushed
// the current state of the camera--normally 'closed'. We only
// pay attention to that state if we've progressed out of the
// allocated state.
if (mState != kAllocated) {
mState = kReleased;
mCallbackMonitor.Notify();
}
} else {
// Can't read this except on MainThread (ugh)
NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineGonkVideoSource>(this),
&MediaEngineGonkVideoSource::GetRotation));
mState = kStarted;
mCallbackMonitor.Notify();
}
}
void
MediaEngineGonkVideoSource::GetRotation()
{
MOZ_ASSERT(NS_IsMainThread());
MonitorAutoLock enter(mMonitor);
mCameraControl->Get(CAMERA_PARAM_SENSORANGLE, mCameraAngle);
MOZ_ASSERT(mCameraAngle == 0 || mCameraAngle == 90 || mCameraAngle == 180 ||
mCameraAngle == 270);
hal::ScreenConfiguration config;
hal::GetCurrentScreenConfiguration(&config);
nsCString deviceName;
ICameraControl::GetCameraName(mCaptureIndex, deviceName);
if (deviceName.EqualsASCII("back")) {
mBackCamera = true;
}
mRotation = GetRotateAmount(config.orientation(), mCameraAngle, mBackCamera);
LOG(("*** Initial orientation: %d (Camera %d Back %d MountAngle: %d)",
mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
}
void
MediaEngineGonkVideoSource::OnUserError(UserContext aContext, nsresult aError)
{
{
// Scope the monitor, since there is another monitor below and we don't want
// unexpected deadlock.
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
mCallbackMonitor.Notify();
}
// A main thread runnable to send error code to all queued PhotoCallbacks.
class TakePhotoError : public nsRunnable {
public:
TakePhotoError(nsTArray<nsRefPtr<PhotoCallback>>& aCallbacks,
nsresult aRv)
: mRv(aRv)
{
mCallbacks.SwapElements(aCallbacks);
}
NS_IMETHOD Run()
{
uint32_t callbackNumbers = mCallbacks.Length();
for (uint8_t i = 0; i < callbackNumbers; i++) {
mCallbacks[i]->PhotoError(mRv);
}
// PhotoCallback needs to dereference on main thread.
mCallbacks.Clear();
return NS_OK;
}
protected:
nsTArray<nsRefPtr<PhotoCallback>> mCallbacks;
nsresult mRv;
};
if (aContext == UserContext::kInTakePicture) {
MonitorAutoLock lock(mMonitor);
if (mPhotoCallbacks.Length()) {
NS_DispatchToMainThread(new TakePhotoError(mPhotoCallbacks, aError));
}
}
}
void
MediaEngineGonkVideoSource::OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType)
{
// It needs to start preview because Gonk camera will stop preview while
// taking picture.
mCameraControl->StartPreview();
// Create a main thread runnable to generate a blob and call all current queued
// PhotoCallbacks.
class GenerateBlobRunnable : public nsRunnable {
public:
GenerateBlobRunnable(nsTArray<nsRefPtr<PhotoCallback>>& aCallbacks,
uint8_t* aData,
uint32_t aLength,
const nsAString& aMimeType)
{
mCallbacks.SwapElements(aCallbacks);
mPhoto.AppendElements(aData, aLength);
mMimeType = aMimeType;
}
NS_IMETHOD Run()
{
nsRefPtr<dom::File> blob =
dom::File::CreateMemoryFile(nullptr, mPhoto.Elements(), mPhoto.Length(), mMimeType);
uint32_t callbackCounts = mCallbacks.Length();
for (uint8_t i = 0; i < callbackCounts; i++) {
nsRefPtr<dom::File> tempBlob = blob;
mCallbacks[i]->PhotoComplete(tempBlob.forget());
}
// PhotoCallback needs to dereference on main thread.
mCallbacks.Clear();
return NS_OK;
}
nsTArray<nsRefPtr<PhotoCallback>> mCallbacks;
nsTArray<uint8_t> mPhoto;
nsString mMimeType;
};
// All elements in mPhotoCallbacks will be swapped in GenerateBlobRunnable
// constructor. This captured image will be sent to all the queued
// PhotoCallbacks in this runnable.
MonitorAutoLock lock(mMonitor);
if (mPhotoCallbacks.Length()) {
NS_DispatchToMainThread(
new GenerateBlobRunnable(mPhotoCallbacks, aData, aLength, aMimeType));
}
}
nsresult
MediaEngineGonkVideoSource::TakePhoto(PhotoCallback* aCallback)
{
MOZ_ASSERT(NS_IsMainThread());
MonitorAutoLock lock(mMonitor);
// If other callback exists, that means there is a captured picture on the way,
// it doesn't need to TakePicture() again.
if (!mPhotoCallbacks.Length()) {
nsresult rv;
if (mOrientationChanged) {
UpdatePhotoOrientation();
}
rv = mCameraControl->TakePicture();
if (NS_FAILED(rv)) {
return rv;
}
}
mPhotoCallbacks.AppendElement(aCallback);
return NS_OK;
}
nsresult
MediaEngineGonkVideoSource::UpdatePhotoOrientation()
{
MOZ_ASSERT(NS_IsMainThread());
hal::ScreenConfiguration config;
hal::GetCurrentScreenConfiguration(&config);
// The rotation angle is clockwise.
int orientation = 0;
switch (config.orientation()) {
case eScreenOrientation_PortraitPrimary:
orientation = 0;
break;
case eScreenOrientation_PortraitSecondary:
orientation = 180;
break;
case eScreenOrientation_LandscapePrimary:
orientation = 270;
break;
case eScreenOrientation_LandscapeSecondary:
orientation = 90;
break;
}
// Front camera is inverse angle comparing to back camera.
orientation = (mBackCamera ? orientation : (-orientation));
ICameraControlParameterSetAutoEnter batch(mCameraControl);
// It changes the orientation value in EXIF information only.
mCameraControl->Set(CAMERA_PARAM_PICTURE_ROTATION, orientation);
mOrientationChanged = false;
return NS_OK;
}
uint32_t
MediaEngineGonkVideoSource::ConvertPixelFormatToFOURCC(int aFormat)
{
switch (aFormat) {
case HAL_PIXEL_FORMAT_RGBA_8888:
return libyuv::FOURCC_BGRA;
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
return libyuv::FOURCC_NV21;
case HAL_PIXEL_FORMAT_YV12:
return libyuv::FOURCC_YV12;
default: {
LOG((" xxxxx Unknown pixel format %d", aFormat));
MOZ_ASSERT(false, "Unknown pixel format.");
return libyuv::FOURCC_ANY;
}
}
}
void
MediaEngineGonkVideoSource::RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
layers::GrallocImage *nativeImage = static_cast<layers::GrallocImage*>(aImage);
android::sp<android::GraphicBuffer> graphicBuffer = nativeImage->GetGraphicBuffer();
void *pMem = nullptr;
uint32_t size = aWidth * aHeight * 3 / 2;
graphicBuffer->lock(android::GraphicBuffer::USAGE_SW_READ_MASK, &pMem);
uint8_t* srcPtr = static_cast<uint8_t*>(pMem);
// Create a video frame and append it to the track.
nsRefPtr<layers::Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
uint32_t dstWidth;
uint32_t dstHeight;
if (mRotation == 90 || mRotation == 270) {
dstWidth = aHeight;
dstHeight = aWidth;
} else {
dstWidth = aWidth;
dstHeight = aHeight;
}
uint32_t half_width = dstWidth / 2;
uint8_t* dstPtr = videoImage->AllocateAndGetNewBuffer(size);
libyuv::ConvertToI420(srcPtr, size,
dstPtr, dstWidth,
dstPtr + (dstWidth * dstHeight), half_width,
dstPtr + (dstWidth * dstHeight * 5 / 4), half_width,
0, 0,
aWidth, aHeight,
aWidth, aHeight,
static_cast<libyuv::RotationMode>(mRotation),
ConvertPixelFormatToFOURCC(graphicBuffer->getPixelFormat()));
graphicBuffer->unlock();
const uint8_t lumaBpp = 8;
const uint8_t chromaBpp = 4;
layers::PlanarYCbCrData data;
data.mYChannel = dstPtr;
data.mYSize = IntSize(dstWidth, dstHeight);
data.mYStride = dstWidth * lumaBpp / 8;
data.mCbCrStride = dstWidth * chromaBpp / 8;
data.mCbChannel = dstPtr + dstHeight * data.mYStride;
data.mCrChannel = data.mCbChannel +( dstHeight * data.mCbCrStride / 2);
data.mCbCrSize = IntSize(dstWidth / 2, dstHeight / 2);
data.mPicX = 0;
data.mPicY = 0;
data.mPicSize = IntSize(dstWidth, dstHeight);
data.mStereoMode = StereoMode::MONO;
videoImage->SetDataNoCopy(data);
// implicitly releases last image
mImage = image.forget();
}
bool
MediaEngineGonkVideoSource::OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
{
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
if (mState == kStopped) {
return false;
}
}
MonitorAutoLock enter(mMonitor);
// Bug XXX we'd prefer to avoid converting if mRotation == 0, but that causes problems in UpdateImage()
RotateImage(aImage, aWidth, aHeight);
if (mRotation != 0 && mRotation != 180) {
uint32_t temp = aWidth;
aWidth = aHeight;
aHeight = temp;
}
if (mWidth != static_cast<int>(aWidth) || mHeight != static_cast<int>(aHeight)) {
mWidth = aWidth;
mHeight = aHeight;
LOG(("Video FrameSizeChange: %ux%u", mWidth, mHeight));
}
return true; // return true because we're accepting the frame
}
} // namespace mozilla

View File

@ -0,0 +1,113 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MediaEngineGonkVideoSource_h_
#define MediaEngineGonkVideoSource_h_
#ifndef MOZ_B2G_CAMERA
#error MediaEngineGonkVideoSource is only available when MOZ_B2G_CAMERA is defined.
#endif
#include "CameraControlListener.h"
#include "MediaEngineCameraVideoSource.h"
#include "mozilla/Hal.h"
#include "mozilla/ReentrantMonitor.h"
namespace mozilla {
/**
* The B2G implementation of the MediaEngine interface.
*
* On B2G platform, member data may accessed from different thread after construction:
*
* MediaThread:
* mState, mImage, mWidth, mHeight, mCapability, mPrefs, mDeviceName, mUniqueId, mInitDone,
* mSources, mImageContainer, mSources, mState, mImage, mLastCapture.
*
* CameraThread:
* mDOMCameraControl, mCaptureIndex, mCameraThread, mWindowId, mCameraManager,
* mNativeCameraControl, mPreviewStream, mState, mLastCapture, mWidth, mHeight
*
* Where mWidth, mHeight, mImage, mPhotoCallbacks, mRotation, mCameraAngle and
* mBackCamera are protected by mMonitor (in parent MediaEngineCameraVideoSource)
* mState, mLastCapture is protected by mCallbackMonitor
* Other variable is accessed only from single thread
*/
class MediaEngineGonkVideoSource : public MediaEngineCameraVideoSource
, public mozilla::hal::ScreenConfigurationObserver
, public CameraControlListener
{
public:
NS_DECL_ISUPPORTS_INHERITED
MediaEngineGonkVideoSource(int aIndex)
: MediaEngineCameraVideoSource(aIndex, "GonkCamera.Monitor")
, mCameraControl(nullptr)
, mCallbackMonitor("GonkCamera.CallbackMonitor")
, mRotation(0)
, mBackCamera(false)
, mOrientationChanged(true) // Correct the orientation at first time takePhoto.
{
Init();
}
virtual nsresult Allocate(const VideoTrackConstraintsN &aConstraints,
const MediaEnginePrefs &aPrefs) MOZ_OVERRIDE;
virtual nsresult Deallocate() MOZ_OVERRIDE;
virtual nsresult Start(SourceMediaStream* aStream, TrackID aID) MOZ_OVERRIDE;
virtual nsresult Stop(SourceMediaStream* aSource, TrackID aID) MOZ_OVERRIDE;
virtual void NotifyPull(MediaStreamGraph* aGraph,
SourceMediaStream* aSource,
TrackID aId,
StreamTime aDesiredTime,
TrackTicks& aLastEndTime) MOZ_OVERRIDE;
void OnHardwareStateChange(HardwareState aState);
void GetRotation();
bool OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight);
void OnUserError(UserContext aContext, nsresult aError);
void OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType);
void AllocImpl();
void DeallocImpl();
void StartImpl(webrtc::CaptureCapability aCapability);
void StopImpl();
uint32_t ConvertPixelFormatToFOURCC(int aFormat);
void RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight);
void Notify(const mozilla::hal::ScreenConfiguration& aConfiguration);
nsresult TakePhoto(PhotoCallback* aCallback) MOZ_OVERRIDE;
// It sets the correct photo orientation via camera parameter according to
// current screen orientation.
nsresult UpdatePhotoOrientation();
protected:
~MediaEngineGonkVideoSource()
{
Shutdown();
}
// Initialize the needed Video engine interfaces.
void Init();
void Shutdown();
void ChooseCapability(const VideoTrackConstraintsN& aConstraints,
const MediaEnginePrefs& aPrefs);
mozilla::ReentrantMonitor mCallbackMonitor; // Monitor for camera callback handling
// This is only modified on MainThread (AllocImpl and DeallocImpl)
nsRefPtr<ICameraControl> mCameraControl;
nsCOMPtr<nsIDOMFile> mLastCapture;
// These are protected by mMonitor in parent class
nsTArray<nsRefPtr<PhotoCallback>> mPhotoCallbacks;
int mRotation;
int mCameraAngle; // See dom/base/ScreenOrientation.h
bool mBackCamera;
bool mOrientationChanged; // True when screen rotates.
};
} // namespace mozilla
#endif // MediaEngineGonkVideoSource_h_

View File

@ -189,12 +189,6 @@ MediaEngineTabVideoSource::Start(mozilla::SourceMediaStream* aStream, mozilla::T
return NS_OK; return NS_OK;
} }
nsresult
MediaEngineTabVideoSource::Snapshot(uint32_t, nsIDOMFile**)
{
return NS_OK;
}
void void
MediaEngineTabVideoSource:: MediaEngineTabVideoSource::
NotifyPull(MediaStreamGraph*, SourceMediaStream* aSource, mozilla::TrackID aID, mozilla::StreamTime aDesiredTime, mozilla::TrackTicks& aLastEndTime) NotifyPull(MediaStreamGraph*, SourceMediaStream* aSource, mozilla::TrackID aID, mozilla::StreamTime aDesiredTime, mozilla::TrackTicks& aLastEndTime)

View File

@ -25,7 +25,6 @@ class MediaEngineTabVideoSource : public MediaEngineVideoSource, nsIDOMEventList
virtual nsresult Deallocate(); virtual nsresult Deallocate();
virtual nsresult Start(mozilla::SourceMediaStream*, mozilla::TrackID); virtual nsresult Start(mozilla::SourceMediaStream*, mozilla::TrackID);
virtual void SetDirectListeners(bool aHasDirectListeners) {}; virtual void SetDirectListeners(bool aHasDirectListeners) {};
virtual nsresult Snapshot(uint32_t, nsIDOMFile**);
virtual void NotifyPull(mozilla::MediaStreamGraph*, mozilla::SourceMediaStream*, mozilla::TrackID, mozilla::StreamTime, mozilla::TrackTicks&); virtual void NotifyPull(mozilla::MediaStreamGraph*, mozilla::SourceMediaStream*, mozilla::TrackID, mozilla::StreamTime, mozilla::TrackTicks&);
virtual nsresult Stop(mozilla::SourceMediaStream*, mozilla::TrackID); virtual nsresult Stop(mozilla::SourceMediaStream*, mozilla::TrackID);
virtual nsresult Config(bool, uint32_t, bool, uint32_t, bool, uint32_t, int32_t); virtual nsresult Config(bool, uint32_t, bool, uint32_t, bool, uint32_t, int32_t);

View File

@ -31,6 +31,11 @@ GetUserMediaLog()
#include "AndroidBridge.h" #include "AndroidBridge.h"
#endif #endif
#ifdef MOZ_B2G_CAMERA
#include "ICameraControl.h"
#include "MediaEngineGonkVideoSource.h"
#endif
#undef LOG #undef LOG
#define LOG(args) PR_LOG(GetUserMediaLog(), PR_LOG_DEBUG, args) #define LOG(args) PR_LOG(GetUserMediaLog(), PR_LOG_DEBUG, args)
@ -73,7 +78,7 @@ MediaEngineWebRTC::EnumerateVideoDevices(MediaSourceType aMediaSource,
// We spawn threads to handle gUM runnables, so we must protect the member vars // We spawn threads to handle gUM runnables, so we must protect the member vars
MutexAutoLock lock(mMutex); MutexAutoLock lock(mMutex);
#ifdef MOZ_B2G_CAMERA #ifdef MOZ_B2G_CAMERA
if (aMediaSource != MediaSourceType::Camera) { if (aMediaSource != MediaSourceType::Camera) {
// only supports camera sources // only supports camera sources
return; return;
@ -101,13 +106,13 @@ MediaEngineWebRTC::EnumerateVideoDevices(MediaSourceType aMediaSource,
continue; continue;
} }
nsRefPtr<MediaEngineWebRTCVideoSource> vSource; nsRefPtr<MediaEngineVideoSource> vSource;
NS_ConvertUTF8toUTF16 uuid(cameraName); NS_ConvertUTF8toUTF16 uuid(cameraName);
if (mVideoSources.Get(uuid, getter_AddRefs(vSource))) { if (mVideoSources.Get(uuid, getter_AddRefs(vSource))) {
// We've already seen this device, just append. // We've already seen this device, just append.
aVSources->AppendElement(vSource.get()); aVSources->AppendElement(vSource.get());
} else { } else {
vSource = new MediaEngineWebRTCVideoSource(i, aMediaSource); vSource = new MediaEngineGonkVideoSource(i);
mVideoSources.Put(uuid, vSource); // Hashtable takes ownership. mVideoSources.Put(uuid, vSource); // Hashtable takes ownership.
aVSources->AppendElement(vSource); aVSources->AppendElement(vSource);
} }
@ -256,11 +261,11 @@ MediaEngineWebRTC::EnumerateVideoDevices(MediaSourceType aMediaSource,
uniqueId[sizeof(uniqueId)-1] = '\0'; // strncpy isn't safe uniqueId[sizeof(uniqueId)-1] = '\0'; // strncpy isn't safe
} }
nsRefPtr<MediaEngineWebRTCVideoSource> vSource; nsRefPtr<MediaEngineVideoSource> vSource;
NS_ConvertUTF8toUTF16 uuid(uniqueId); NS_ConvertUTF8toUTF16 uuid(uniqueId);
if (mVideoSources.Get(uuid, getter_AddRefs(vSource))) { if (mVideoSources.Get(uuid, getter_AddRefs(vSource))) {
// We've already seen this device, just refresh and append. // We've already seen this device, just refresh and append.
vSource->Refresh(i); static_cast<MediaEngineWebRTCVideoSource*>(vSource.get())->Refresh(i);
aVSources->AppendElement(vSource.get()); aVSources->AppendElement(vSource.get());
} else { } else {
vSource = new MediaEngineWebRTCVideoSource(videoEngine, i, aMediaSource); vSource = new MediaEngineWebRTCVideoSource(videoEngine, i, aMediaSource);

View File

@ -21,7 +21,7 @@
#include "nsRefPtrHashtable.h" #include "nsRefPtrHashtable.h"
#include "VideoUtils.h" #include "VideoUtils.h"
#include "MediaEngine.h" #include "MediaEngineCameraVideoSource.h"
#include "VideoSegment.h" #include "VideoSegment.h"
#include "AudioSegment.h" #include "AudioSegment.h"
#include "StreamBuffer.h" #include "StreamBuffer.h"
@ -49,76 +49,27 @@
#include "webrtc/video_engine/include/vie_codec.h" #include "webrtc/video_engine/include/vie_codec.h"
#include "webrtc/video_engine/include/vie_render.h" #include "webrtc/video_engine/include/vie_render.h"
#include "webrtc/video_engine/include/vie_capture.h" #include "webrtc/video_engine/include/vie_capture.h"
#ifdef MOZ_B2G_CAMERA
#include "CameraControlListener.h"
#include "ICameraControl.h"
#include "ImageContainer.h"
#include "nsGlobalWindow.h"
#include "prprf.h"
#include "mozilla/Hal.h"
#endif
#include "NullTransport.h" #include "NullTransport.h"
#include "AudioOutputObserver.h" #include "AudioOutputObserver.h"
namespace mozilla { namespace mozilla {
#ifdef MOZ_B2G_CAMERA
class CameraAllocateRunnable;
class GetCameraNameRunnable;
#endif
/** /**
* The WebRTC implementation of the MediaEngine interface. * The WebRTC implementation of the MediaEngine interface.
*
* On B2G platform, member data may accessed from different thread after construction:
*
* MediaThread:
* mState, mImage, mWidth, mHeight, mCapability, mPrefs, mDeviceName, mUniqueId, mInitDone,
* mImageContainer, mSources, mState, mImage
*
* MainThread:
* mCaptureIndex, mLastCapture, mState, mWidth, mHeight,
*
* Where mWidth, mHeight, mImage, mPhotoCallbacks are protected by mMonitor
* mState is protected by mCallbackMonitor
* Other variable is accessed only from single thread
*/ */
class MediaEngineWebRTCVideoSource : public MediaEngineVideoSource class MediaEngineWebRTCVideoSource : public MediaEngineCameraVideoSource
, public nsRunnable
#ifdef MOZ_B2G_CAMERA
, public CameraControlListener
, public mozilla::hal::ScreenConfigurationObserver
#else
, public webrtc::ExternalRenderer , public webrtc::ExternalRenderer
#endif
{ {
public: public:
#ifdef MOZ_B2G_CAMERA NS_DECL_THREADSAFE_ISUPPORTS
MediaEngineWebRTCVideoSource(int aIndex,
MediaSourceType aMediaSource = MediaSourceType::Camera)
: mCameraControl(nullptr)
, mCallbackMonitor("WebRTCCamera.CallbackMonitor")
, mRotation(0)
, mBackCamera(false)
, mOrientationChanged(true) // Correct the orientation at first time takePhoto.
, mCaptureIndex(aIndex)
, mMediaSource(aMediaSource)
, mMonitor("WebRTCCamera.Monitor")
, mWidth(0)
, mHeight(0)
, mHasDirectListeners(false)
, mInitDone(false)
, mInSnapshotMode(false)
, mSnapshotPath(nullptr)
{
mState = kReleased;
Init();
}
#else
// ViEExternalRenderer. // ViEExternalRenderer.
virtual int FrameSizeChange(unsigned int, unsigned int, unsigned int); virtual int FrameSizeChange(unsigned int w, unsigned int h, unsigned int streams);
virtual int DeliverFrame(unsigned char*,int, uint32_t , int64_t, virtual int DeliverFrame(unsigned char* buffer,
int size,
uint32_t time_stamp,
int64_t render_time,
void *handle); void *handle);
/** /**
* Does DeliverFrame() support a null buffer and non-null handle * Does DeliverFrame() support a null buffer and non-null handle
@ -129,104 +80,33 @@ public:
MediaEngineWebRTCVideoSource(webrtc::VideoEngine* aVideoEnginePtr, int aIndex, MediaEngineWebRTCVideoSource(webrtc::VideoEngine* aVideoEnginePtr, int aIndex,
MediaSourceType aMediaSource = MediaSourceType::Camera) MediaSourceType aMediaSource = MediaSourceType::Camera)
: mVideoEngine(aVideoEnginePtr) : MediaEngineCameraVideoSource(aIndex, "WebRTCCamera.Monitor")
, mCaptureIndex(aIndex) , mVideoEngine(aVideoEnginePtr)
, mFps(-1)
, mMinFps(-1) , mMinFps(-1)
, mMediaSource(aMediaSource) , mMediaSource(aMediaSource)
, mMonitor("WebRTCCamera.Monitor") {
, mWidth(0)
, mHeight(0)
, mHasDirectListeners(false)
, mInitDone(false)
, mInSnapshotMode(false)
, mSnapshotPath(nullptr) {
MOZ_ASSERT(aVideoEnginePtr); MOZ_ASSERT(aVideoEnginePtr);
mState = kReleased;
Init(); Init();
} }
#endif
virtual void GetName(nsAString&); virtual nsresult Allocate(const VideoTrackConstraintsN& aConstraints,
virtual void GetUUID(nsAString&); const MediaEnginePrefs& aPrefs);
virtual nsresult Allocate(const VideoTrackConstraintsN &aConstraints,
const MediaEnginePrefs &aPrefs);
virtual nsresult Deallocate(); virtual nsresult Deallocate();
virtual nsresult Start(SourceMediaStream*, TrackID); virtual nsresult Start(SourceMediaStream*, TrackID);
virtual nsresult Stop(SourceMediaStream*, TrackID); virtual nsresult Stop(SourceMediaStream*, TrackID);
virtual void SetDirectListeners(bool aHasListeners);
virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
bool aAgcOn, uint32_t aAGC,
bool aNoiseOn, uint32_t aNoise,
int32_t aPlayoutDelay) { return NS_OK; };
virtual void NotifyPull(MediaStreamGraph* aGraph, virtual void NotifyPull(MediaStreamGraph* aGraph,
SourceMediaStream *aSource, SourceMediaStream* aSource,
TrackID aId, TrackID aId,
StreamTime aDesiredTime, StreamTime aDesiredTime,
TrackTicks &aLastEndTime); TrackTicks& aLastEndTime);
virtual bool IsFake() {
return false;
}
virtual const MediaSourceType GetMediaSource() { virtual const MediaSourceType GetMediaSource() {
return mMediaSource; return mMediaSource;
} }
virtual nsresult TakePhoto(PhotoCallback* aCallback)
#ifndef MOZ_B2G_CAMERA
NS_DECL_THREADSAFE_ISUPPORTS
nsresult TakePhoto(PhotoCallback* aCallback)
{ {
return NS_ERROR_NOT_IMPLEMENTED; return NS_ERROR_NOT_IMPLEMENTED;
} }
#else
// We are subclassed from CameraControlListener, which implements a
// threadsafe reference-count for us.
NS_DECL_ISUPPORTS_INHERITED
void OnHardwareStateChange(HardwareState aState);
void GetRotation();
bool OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight);
void OnUserError(UserContext aContext, nsresult aError);
void OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType);
void AllocImpl();
void DeallocImpl();
void StartImpl(webrtc::CaptureCapability aCapability);
void StopImpl();
void SnapshotImpl();
void RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight);
uint32_t ConvertPixelFormatToFOURCC(int aFormat);
void Notify(const mozilla::hal::ScreenConfiguration& aConfiguration);
nsresult TakePhoto(PhotoCallback* aCallback) MOZ_OVERRIDE;
// It sets the correct photo orientation via camera parameter according to
// current screen orientation.
nsresult UpdatePhotoOrientation();
#endif
// This runnable is for creating a temporary file on the main thread.
NS_IMETHODIMP
Run()
{
nsCOMPtr<nsIFile> tmp;
nsresult rv = NS_GetSpecialDirectory(NS_OS_TEMP_DIR, getter_AddRefs(tmp));
NS_ENSURE_SUCCESS(rv, rv);
tmp->Append(NS_LITERAL_STRING("webrtc_snapshot.jpeg"));
rv = tmp->CreateUnique(nsIFile::NORMAL_FILE_TYPE, 0600);
NS_ENSURE_SUCCESS(rv, rv);
mSnapshotPath = new nsString();
rv = tmp->GetPath(*mSnapshotPath);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
void Refresh(int aIndex); void Refresh(int aIndex);
@ -239,64 +119,29 @@ private:
void Shutdown(); void Shutdown();
// Engine variables. // Engine variables.
#ifdef MOZ_B2G_CAMERA
mozilla::ReentrantMonitor mCallbackMonitor; // Monitor for camera callback handling
// This is only modified on MainThread (AllocImpl and DeallocImpl)
nsRefPtr<ICameraControl> mCameraControl;
nsCOMPtr<nsIDOMFile> mLastCapture;
nsTArray<nsRefPtr<PhotoCallback>> mPhotoCallbacks;
// These are protected by mMonitor below
int mRotation;
int mCameraAngle; // See dom/base/ScreenOrientation.h
bool mBackCamera;
bool mOrientationChanged; // True when screen rotates.
#else
webrtc::VideoEngine* mVideoEngine; // Weak reference, don't free. webrtc::VideoEngine* mVideoEngine; // Weak reference, don't free.
webrtc::ViEBase* mViEBase; webrtc::ViEBase* mViEBase;
webrtc::ViECapture* mViECapture; webrtc::ViECapture* mViECapture;
webrtc::ViERender* mViERender; webrtc::ViERender* mViERender;
#endif
webrtc::CaptureCapability mCapability; // Doesn't work on OS X. webrtc::CaptureCapability mCapability; // Doesn't work on OS X.
int mCaptureIndex;
int mFps; // Track rate (30 fps by default)
int mMinFps; // Min rate we want to accept int mMinFps; // Min rate we want to accept
MediaSourceType mMediaSource; // source of media (camera | application | screen) MediaSourceType mMediaSource; // source of media (camera | application | screen)
// mMonitor protects mImage access/changes, and transitions of mState static bool SatisfyConstraintSet(const dom::MediaTrackConstraintSet& aConstraints,
// from kStarted to kStopped (which are combined with EndTrack() and const webrtc::CaptureCapability& aCandidate);
// image changes). Note that mSources is not accessed from other threads void ChooseCapability(const VideoTrackConstraintsN& aConstraints,
// for video and is not protected. const MediaEnginePrefs& aPrefs);
Monitor mMonitor; // Monitor for processing WebRTC frames.
int mWidth, mHeight;
nsRefPtr<layers::Image> mImage;
nsRefPtr<layers::ImageContainer> mImageContainer;
bool mHasDirectListeners;
nsTArray<SourceMediaStream *> mSources; // When this goes empty, we shut down HW
bool mInitDone;
bool mInSnapshotMode;
nsString* mSnapshotPath;
nsString mDeviceName;
nsString mUniqueId;
void ChooseCapability(const VideoTrackConstraintsN &aConstraints,
const MediaEnginePrefs &aPrefs);
void GuessCapability(const VideoTrackConstraintsN &aConstraints,
const MediaEnginePrefs &aPrefs);
}; };
class MediaEngineWebRTCAudioSource : public MediaEngineAudioSource, class MediaEngineWebRTCAudioSource : public MediaEngineAudioSource,
public webrtc::VoEMediaProcess public webrtc::VoEMediaProcess
{ {
public: public:
MediaEngineWebRTCAudioSource(nsIThread *aThread, webrtc::VoiceEngine* aVoiceEnginePtr, MediaEngineWebRTCAudioSource(nsIThread* aThread, webrtc::VoiceEngine* aVoiceEnginePtr,
int aIndex, const char* name, const char* uuid) int aIndex, const char* name, const char* uuid)
: mSamples(0) : MediaEngineAudioSource(kReleased)
, mSamples(0)
, mVoiceEngine(aVoiceEnginePtr) , mVoiceEngine(aVoiceEnginePtr)
, mMonitor("WebRTCMic.Monitor") , mMonitor("WebRTCMic.Monitor")
, mThread(aThread) , mThread(aThread)
@ -311,32 +156,30 @@ public:
, mPlayoutDelay(0) , mPlayoutDelay(0)
, mNullTransport(nullptr) { , mNullTransport(nullptr) {
MOZ_ASSERT(aVoiceEnginePtr); MOZ_ASSERT(aVoiceEnginePtr);
mState = kReleased;
mDeviceName.Assign(NS_ConvertUTF8toUTF16(name)); mDeviceName.Assign(NS_ConvertUTF8toUTF16(name));
mDeviceUUID.Assign(NS_ConvertUTF8toUTF16(uuid)); mDeviceUUID.Assign(NS_ConvertUTF8toUTF16(uuid));
Init(); Init();
} }
virtual void GetName(nsAString&); virtual void GetName(nsAString& aName);
virtual void GetUUID(nsAString&); virtual void GetUUID(nsAString& aUUID);
virtual nsresult Allocate(const AudioTrackConstraintsN &aConstraints, virtual nsresult Allocate(const AudioTrackConstraintsN& aConstraints,
const MediaEnginePrefs &aPrefs); const MediaEnginePrefs& aPrefs);
virtual nsresult Deallocate(); virtual nsresult Deallocate();
virtual nsresult Start(SourceMediaStream*, TrackID); virtual nsresult Start(SourceMediaStream* aStream, TrackID aID);
virtual nsresult Stop(SourceMediaStream*, TrackID); virtual nsresult Stop(SourceMediaStream* aSource, TrackID aID);
virtual void SetDirectListeners(bool aHasDirectListeners) {}; virtual void SetDirectListeners(bool aHasDirectListeners) {};
virtual nsresult Snapshot(uint32_t aDuration, nsIDOMFile** aFile);
virtual nsresult Config(bool aEchoOn, uint32_t aEcho, virtual nsresult Config(bool aEchoOn, uint32_t aEcho,
bool aAgcOn, uint32_t aAGC, bool aAgcOn, uint32_t aAGC,
bool aNoiseOn, uint32_t aNoise, bool aNoiseOn, uint32_t aNoise,
int32_t aPlayoutDelay); int32_t aPlayoutDelay);
virtual void NotifyPull(MediaStreamGraph* aGraph, virtual void NotifyPull(MediaStreamGraph* aGraph,
SourceMediaStream *aSource, SourceMediaStream* aSource,
TrackID aId, TrackID aId,
StreamTime aDesiredTime, StreamTime aDesiredTime,
TrackTicks &aLastEndTime); TrackTicks& aLastEndTime);
virtual bool IsFake() { virtual bool IsFake() {
return false; return false;
@ -382,7 +225,7 @@ private:
// from kStarted to kStopped (which are combined with EndTrack()). // from kStarted to kStopped (which are combined with EndTrack()).
// mSources[] is accessed from webrtc threads. // mSources[] is accessed from webrtc threads.
Monitor mMonitor; Monitor mMonitor;
nsTArray<SourceMediaStream *> mSources; // When this goes empty, we shut down HW nsTArray<SourceMediaStream*> mSources; // When this goes empty, we shut down HW
nsCOMPtr<nsIThread> mThread; nsCOMPtr<nsIThread> mThread;
int mCapIndex; int mCapIndex;
int mChannel; int mChannel;
@ -405,7 +248,7 @@ private:
class MediaEngineWebRTC : public MediaEngine class MediaEngineWebRTC : public MediaEngine
{ {
public: public:
explicit MediaEngineWebRTC(MediaEnginePrefs &aPrefs); explicit MediaEngineWebRTC(MediaEnginePrefs& aPrefs);
// Clients should ensure to clean-up sources video/audio sources // Clients should ensure to clean-up sources video/audio sources
// before invoking Shutdown on this class. // before invoking Shutdown on this class.
@ -453,8 +296,8 @@ private:
// Store devices we've already seen in a hashtable for quick return. // Store devices we've already seen in a hashtable for quick return.
// Maps UUID to MediaEngineSource (one set for audio, one for video). // Maps UUID to MediaEngineSource (one set for audio, one for video).
nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCVideoSource > mVideoSources; nsRefPtrHashtable<nsStringHashKey, MediaEngineVideoSource> mVideoSources;
nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCAudioSource > mAudioSources; nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCAudioSource> mAudioSources;
}; };
} }

View File

@ -406,12 +406,6 @@ MediaEngineWebRTCAudioSource::NotifyPull(MediaStreamGraph* aGraph,
#endif #endif
} }
nsresult
MediaEngineWebRTCAudioSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
{
return NS_ERROR_NOT_IMPLEMENTED;
}
void void
MediaEngineWebRTCAudioSource::Init() MediaEngineWebRTCAudioSource::Init()
{ {

View File

@ -11,13 +11,6 @@
#include "mtransport/runnable_utils.h" #include "mtransport/runnable_utils.h"
#include "MediaTrackConstraints.h" #include "MediaTrackConstraints.h"
#ifdef MOZ_B2G_CAMERA
#include "GrallocImages.h"
#include "libyuv.h"
#include "mozilla/Hal.h"
#include "ScreenOrientation.h"
using namespace mozilla::dom;
#endif
namespace mozilla { namespace mozilla {
using namespace mozilla::gfx; using namespace mozilla::gfx;
@ -37,16 +30,9 @@ extern PRLogModuleInfo* GetMediaManagerLog();
/** /**
* Webrtc video source. * Webrtc video source.
*/ */
#ifndef MOZ_B2G_CAMERA
NS_IMPL_ISUPPORTS(MediaEngineWebRTCVideoSource, nsIRunnable)
#else
NS_IMPL_QUERY_INTERFACE(MediaEngineWebRTCVideoSource, nsIRunnable)
NS_IMPL_ADDREF_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener)
NS_IMPL_RELEASE_INHERITED(MediaEngineWebRTCVideoSource, CameraControlListener)
#endif
// ViEExternalRenderer Callback. NS_IMPL_ISUPPORTS0(MediaEngineWebRTCVideoSource)
#ifndef MOZ_B2G_CAMERA
int int
MediaEngineWebRTCVideoSource::FrameSizeChange( MediaEngineWebRTCVideoSource::FrameSizeChange(
unsigned int w, unsigned int h, unsigned int streams) unsigned int w, unsigned int h, unsigned int streams)
@ -63,16 +49,6 @@ MediaEngineWebRTCVideoSource::DeliverFrame(
unsigned char* buffer, int size, uint32_t time_stamp, int64_t render_time, unsigned char* buffer, int size, uint32_t time_stamp, int64_t render_time,
void *handle) void *handle)
{ {
// mInSnapshotMode can only be set before the camera is turned on and
// the renderer is started, so this amounts to a 1-shot
if (mInSnapshotMode) {
// Set the condition variable to false and notify Snapshot().
MonitorAutoLock lock(mMonitor);
mInSnapshotMode = false;
lock.Notify();
return 0;
}
// Check for proper state. // Check for proper state.
if (mState != kStarted) { if (mState != kStarted) {
LOG(("DeliverFrame: video not started")); LOG(("DeliverFrame: video not started"));
@ -124,7 +100,6 @@ MediaEngineWebRTCVideoSource::DeliverFrame(
return 0; return 0;
} }
#endif
// Called if the graph thinks it's running out of buffered video; repeat // Called if the graph thinks it's running out of buffered video; repeat
// the last frame for whatever minimum period it think it needs. Note that // the last frame for whatever minimum period it think it needs. Note that
@ -172,38 +147,14 @@ MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph,
} }
} }
static bool IsWithin(int32_t n, const ConstrainLongRange& aRange) { /*static*/
return aRange.mMin <= n && n <= aRange.mMax; bool MediaEngineWebRTCVideoSource::SatisfyConstraintSet(const MediaTrackConstraintSet &aConstraints,
} const webrtc::CaptureCapability& aCandidate) {
if (!MediaEngineCameraVideoSource::IsWithin(aCandidate.width, aConstraints.mWidth) ||
static bool IsWithin(double n, const ConstrainDoubleRange& aRange) { !MediaEngineCameraVideoSource::IsWithin(aCandidate.height, aConstraints.mHeight)) {
return aRange.mMin <= n && n <= aRange.mMax;
}
static int32_t Clamp(int32_t n, const ConstrainLongRange& aRange) {
return std::max(aRange.mMin, std::min(n, aRange.mMax));
}
static bool
AreIntersecting(const ConstrainLongRange& aA, const ConstrainLongRange& aB) {
return aA.mMax >= aB.mMin && aA.mMin <= aB.mMax;
}
static bool
Intersect(ConstrainLongRange& aA, const ConstrainLongRange& aB) {
MOZ_ASSERT(AreIntersecting(aA, aB));
aA.mMin = std::max(aA.mMin, aB.mMin);
aA.mMax = std::min(aA.mMax, aB.mMax);
return true;
}
static bool SatisfyConstraintSet(const MediaTrackConstraintSet &aConstraints,
const webrtc::CaptureCapability& aCandidate) {
if (!IsWithin(aCandidate.width, aConstraints.mWidth) ||
!IsWithin(aCandidate.height, aConstraints.mHeight)) {
return false; return false;
} }
if (!IsWithin(aCandidate.maxFPS, aConstraints.mFrameRate)) { if (!MediaEngineCameraVideoSource::IsWithin(aCandidate.maxFPS, aConstraints.mFrameRate)) {
return false; return false;
} }
return true; return true;
@ -214,9 +165,6 @@ MediaEngineWebRTCVideoSource::ChooseCapability(
const VideoTrackConstraintsN &aConstraints, const VideoTrackConstraintsN &aConstraints,
const MediaEnginePrefs &aPrefs) const MediaEnginePrefs &aPrefs)
{ {
#ifdef MOZ_B2G_CAMERA
return GuessCapability(aConstraints, aPrefs);
#else
NS_ConvertUTF16toUTF8 uniqueId(mUniqueId); NS_ConvertUTF16toUTF8 uniqueId(mUniqueId);
int num = mViECapture->NumberOfCapabilities(uniqueId.get(), kMaxUniqueIdLength); int num = mViECapture->NumberOfCapabilities(uniqueId.get(), kMaxUniqueIdLength);
if (num <= 0) { if (num <= 0) {
@ -331,100 +279,6 @@ MediaEngineWebRTCVideoSource::ChooseCapability(
LOG(("chose cap %dx%d @%dfps codec %d raw %d", LOG(("chose cap %dx%d @%dfps codec %d raw %d",
mCapability.width, mCapability.height, mCapability.maxFPS, mCapability.width, mCapability.height, mCapability.maxFPS,
mCapability.codecType, mCapability.rawType)); mCapability.codecType, mCapability.rawType));
#endif
}
// A special version of the algorithm for cameras that don't list capabilities.
void
MediaEngineWebRTCVideoSource::GuessCapability(
const VideoTrackConstraintsN &aConstraints,
const MediaEnginePrefs &aPrefs)
{
LOG(("GuessCapability: prefs: %dx%d @%d-%dfps",
aPrefs.mWidth, aPrefs.mHeight, aPrefs.mFPS, aPrefs.mMinFPS));
// In short: compound constraint-ranges and use pref as ideal.
ConstrainLongRange cWidth(aConstraints.mRequired.mWidth);
ConstrainLongRange cHeight(aConstraints.mRequired.mHeight);
if (aConstraints.mAdvanced.WasPassed()) {
const auto& advanced = aConstraints.mAdvanced.Value();
for (uint32_t i = 0; i < advanced.Length(); i++) {
if (AreIntersecting(cWidth, advanced[i].mWidth) &&
AreIntersecting(cHeight, advanced[i].mHeight)) {
Intersect(cWidth, advanced[i].mWidth);
Intersect(cHeight, advanced[i].mHeight);
}
}
}
// Detect Mac HD cams and give them some love in the form of a dynamic default
// since that hardware switches between 4:3 at low res and 16:9 at higher res.
//
// Logic is: if we're relying on defaults in aPrefs, then
// only use HD pref when non-HD pref is too small and HD pref isn't too big.
bool macHD = ((!aPrefs.mWidth || !aPrefs.mHeight) &&
mDeviceName.EqualsASCII("FaceTime HD Camera (Built-in)") &&
(aPrefs.GetWidth() < cWidth.mMin ||
aPrefs.GetHeight() < cHeight.mMin) &&
!(aPrefs.GetWidth(true) > cWidth.mMax ||
aPrefs.GetHeight(true) > cHeight.mMax));
int prefWidth = aPrefs.GetWidth(macHD);
int prefHeight = aPrefs.GetHeight(macHD);
// Clamp width and height without distorting inherent aspect too much.
if (IsWithin(prefWidth, cWidth) == IsWithin(prefHeight, cHeight)) {
// If both are within, we get the default (pref) aspect.
// If neither are within, we get the aspect of the enclosing constraint.
// Either are presumably reasonable (presuming constraints are sane).
mCapability.width = Clamp(prefWidth, cWidth);
mCapability.height = Clamp(prefHeight, cHeight);
} else {
// But if only one clips (e.g. width), the resulting skew is undesirable:
// .------------.
// | constraint |
// .----+------------+----.
// | | | |
// |pref| result | | prefAspect != resultAspect
// | | | |
// '----+------------+----'
// '------------'
// So in this case, preserve prefAspect instead:
// .------------.
// | constraint |
// .------------.
// |pref | prefAspect is unchanged
// '------------'
// | |
// '------------'
if (IsWithin(prefWidth, cWidth)) {
mCapability.height = Clamp(prefHeight, cHeight);
mCapability.width = Clamp((mCapability.height * prefWidth) /
prefHeight, cWidth);
} else {
mCapability.width = Clamp(prefWidth, cWidth);
mCapability.height = Clamp((mCapability.width * prefHeight) /
prefWidth, cHeight);
}
}
mCapability.maxFPS = MediaEngine::DEFAULT_VIDEO_FPS;
LOG(("chose cap %dx%d @%dfps",
mCapability.width, mCapability.height, mCapability.maxFPS));
}
void
MediaEngineWebRTCVideoSource::GetName(nsAString& aName)
{
aName = mDeviceName;
}
void
MediaEngineWebRTCVideoSource::GetUUID(nsAString& aUUID)
{
aUUID = mUniqueId;
} }
nsresult nsresult
@ -432,18 +286,6 @@ MediaEngineWebRTCVideoSource::Allocate(const VideoTrackConstraintsN &aConstraint
const MediaEnginePrefs &aPrefs) const MediaEnginePrefs &aPrefs)
{ {
LOG((__FUNCTION__)); LOG((__FUNCTION__));
#ifdef MOZ_B2G_CAMERA
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
if (mState == kReleased && mInitDone) {
ChooseCapability(aConstraints, aPrefs);
NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
&MediaEngineWebRTCVideoSource::AllocImpl));
mCallbackMonitor.Wait();
if (mState != kAllocated) {
return NS_ERROR_FAILURE;
}
}
#else
if (mState == kReleased && mInitDone) { if (mState == kReleased && mInitDone) {
// Note: if shared, we don't allow a later opener to affect the resolution. // Note: if shared, we don't allow a later opener to affect the resolution.
// (This may change depending on spec changes for Constraints/settings) // (This may change depending on spec changes for Constraints/settings)
@ -461,7 +303,6 @@ MediaEngineWebRTCVideoSource::Allocate(const VideoTrackConstraintsN &aConstraint
} else { } else {
LOG(("Video device %d allocated shared", mCaptureIndex)); LOG(("Video device %d allocated shared", mCaptureIndex));
} }
#endif
return NS_OK; return NS_OK;
} }
@ -471,22 +312,10 @@ MediaEngineWebRTCVideoSource::Deallocate()
{ {
LOG((__FUNCTION__)); LOG((__FUNCTION__));
if (mSources.IsEmpty()) { if (mSources.IsEmpty()) {
#ifdef MOZ_B2G_CAMERA
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
#endif
if (mState != kStopped && mState != kAllocated) { if (mState != kStopped && mState != kAllocated) {
return NS_ERROR_FAILURE; return NS_ERROR_FAILURE;
} }
#ifdef MOZ_B2G_CAMERA #ifdef XP_MACOSX
// We do not register success callback here
NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
&MediaEngineWebRTCVideoSource::DeallocImpl));
mCallbackMonitor.Wait();
if (mState != kReleased) {
return NS_ERROR_FAILURE;
}
#elif XP_MACOSX
// Bug 829907 - on mac, in shutdown, the mainthread stops processing // Bug 829907 - on mac, in shutdown, the mainthread stops processing
// 'native' events, and the QTKit code uses events to the main native CFRunLoop // 'native' events, and the QTKit code uses events to the main native CFRunLoop
// in order to provide thread safety. In order to avoid this locking us up, // in order to provide thread safety. In order to avoid this locking us up,
@ -518,9 +347,7 @@ nsresult
MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID) MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
{ {
LOG((__FUNCTION__)); LOG((__FUNCTION__));
#ifndef MOZ_B2G_CAMERA
int error = 0; int error = 0;
#endif
if (!mInitDone || !aStream) { if (!mInitDone || !aStream) {
return NS_ERROR_FAILURE; return NS_ERROR_FAILURE;
} }
@ -530,24 +357,11 @@ MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
aStream->AddTrack(aID, USECS_PER_S, 0, new VideoSegment()); aStream->AddTrack(aID, USECS_PER_S, 0, new VideoSegment());
aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX); aStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
#ifdef MOZ_B2G_CAMERA
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
#endif
if (mState == kStarted) { if (mState == kStarted) {
return NS_OK; return NS_OK;
} }
mImageContainer = layers::LayerManager::CreateImageContainer(); mImageContainer = layers::LayerManager::CreateImageContainer();
#ifdef MOZ_B2G_CAMERA
NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
&MediaEngineWebRTCVideoSource::StartImpl,
mCapability));
mCallbackMonitor.Wait();
if (mState != kStarted) {
return NS_ERROR_FAILURE;
}
#else
mState = kStarted; mState = kStarted;
error = mViERender->AddRenderer(mCaptureIndex, webrtc::kVideoI420, (webrtc::ExternalRenderer*)this); error = mViERender->AddRenderer(mCaptureIndex, webrtc::kVideoI420, (webrtc::ExternalRenderer*)this);
if (error == -1) { if (error == -1) {
@ -562,7 +376,6 @@ MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID)
if (mViECapture->StartCapture(mCaptureIndex, mCapability) < 0) { if (mViECapture->StartCapture(mCaptureIndex, mCapability) < 0) {
return NS_ERROR_FAILURE; return NS_ERROR_FAILURE;
} }
#endif
return NS_OK; return NS_OK;
} }
@ -578,9 +391,6 @@ MediaEngineWebRTCVideoSource::Stop(SourceMediaStream *aSource, TrackID aID)
if (!mSources.IsEmpty()) { if (!mSources.IsEmpty()) {
return NS_OK; return NS_OK;
} }
#ifdef MOZ_B2G_CAMERA
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
#endif
if (mState != kStarted) { if (mState != kStarted) {
return NS_ERROR_FAILURE; return NS_ERROR_FAILURE;
} }
@ -593,45 +403,16 @@ MediaEngineWebRTCVideoSource::Stop(SourceMediaStream *aSource, TrackID aID)
// usage // usage
mImage = nullptr; mImage = nullptr;
} }
#ifdef MOZ_B2G_CAMERA
NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
&MediaEngineWebRTCVideoSource::StopImpl));
#else
mViERender->StopRender(mCaptureIndex); mViERender->StopRender(mCaptureIndex);
mViERender->RemoveRenderer(mCaptureIndex); mViERender->RemoveRenderer(mCaptureIndex);
mViECapture->StopCapture(mCaptureIndex); mViECapture->StopCapture(mCaptureIndex);
#endif
return NS_OK; return NS_OK;
} }
void
MediaEngineWebRTCVideoSource::SetDirectListeners(bool aHasDirectListeners)
{
LOG((__FUNCTION__));
mHasDirectListeners = aHasDirectListeners;
}
nsresult
MediaEngineWebRTCVideoSource::Snapshot(uint32_t aDuration, nsIDOMFile** aFile)
{
return NS_ERROR_NOT_IMPLEMENTED;
}
/**
* Initialization and Shutdown functions for the video source, called by the
* constructor and destructor respectively.
*/
void void
MediaEngineWebRTCVideoSource::Init() MediaEngineWebRTCVideoSource::Init()
{ {
#ifdef MOZ_B2G_CAMERA
nsAutoCString deviceName;
ICameraControl::GetCameraName(mCaptureIndex, deviceName);
CopyUTF8toUTF16(deviceName, mDeviceName);
CopyUTF8toUTF16(deviceName, mUniqueId);
#else
// fix compile warning for these being unused. (remove once used) // fix compile warning for these being unused. (remove once used)
(void) mFps; (void) mFps;
(void) mMinFps; (void) mMinFps;
@ -664,7 +445,6 @@ MediaEngineWebRTCVideoSource::Init()
CopyUTF8toUTF16(deviceName, mDeviceName); CopyUTF8toUTF16(deviceName, mDeviceName);
CopyUTF8toUTF16(uniqueId, mUniqueId); CopyUTF8toUTF16(uniqueId, mUniqueId);
#endif
mInitDone = true; mInitDone = true;
} }
@ -676,9 +456,6 @@ MediaEngineWebRTCVideoSource::Shutdown()
if (!mInitDone) { if (!mInitDone) {
return; return;
} }
#ifdef MOZ_B2G_CAMERA
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
#endif
if (mState == kStarted) { if (mState == kStarted) {
while (!mSources.IsEmpty()) { while (!mSources.IsEmpty()) {
Stop(mSources[0], kVideoTrack); // XXX change to support multiple tracks Stop(mSources[0], kVideoTrack); // XXX change to support multiple tracks
@ -689,11 +466,9 @@ MediaEngineWebRTCVideoSource::Shutdown()
if (mState == kAllocated || mState == kStopped) { if (mState == kAllocated || mState == kStopped) {
Deallocate(); Deallocate();
} }
#ifndef MOZ_B2G_CAMERA
mViECapture->Release(); mViECapture->Release();
mViERender->Release(); mViERender->Release();
mViEBase->Release(); mViEBase->Release();
#endif
mState = kReleased; mState = kReleased;
mInitDone = false; mInitDone = false;
} }
@ -701,9 +476,6 @@ MediaEngineWebRTCVideoSource::Shutdown()
void MediaEngineWebRTCVideoSource::Refresh(int aIndex) { void MediaEngineWebRTCVideoSource::Refresh(int aIndex) {
// NOTE: mCaptureIndex might have changed when allocated! // NOTE: mCaptureIndex might have changed when allocated!
// Use aIndex to update information, but don't change mCaptureIndex!! // Use aIndex to update information, but don't change mCaptureIndex!!
#ifdef MOZ_B2G_CAMERA
// Caller looked up this source by uniqueId; since deviceName == uniqueId nothing else changes
#else
// Caller looked up this source by uniqueId, so it shouldn't change // Caller looked up this source by uniqueId, so it shouldn't change
char deviceName[kMaxDeviceNameLength]; char deviceName[kMaxDeviceNameLength];
char uniqueId[kMaxUniqueIdLength]; char uniqueId[kMaxUniqueIdLength];
@ -720,422 +492,6 @@ void MediaEngineWebRTCVideoSource::Refresh(int aIndex) {
CopyUTF8toUTF16(uniqueId, temp); CopyUTF8toUTF16(uniqueId, temp);
MOZ_ASSERT(temp.Equals(mUniqueId)); MOZ_ASSERT(temp.Equals(mUniqueId));
#endif #endif
#endif
} }
#ifdef MOZ_B2G_CAMERA } // namespace mozilla
// All these functions must be run on MainThread!
void
MediaEngineWebRTCVideoSource::AllocImpl() {
MOZ_ASSERT(NS_IsMainThread());
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
mCameraControl = ICameraControl::Create(mCaptureIndex);
if (mCameraControl) {
mState = kAllocated;
// Add this as a listener for CameraControl events. We don't need
// to explicitly remove this--destroying the CameraControl object
// in DeallocImpl() will do that for us.
mCameraControl->AddListener(this);
}
mCallbackMonitor.Notify();
}
void
MediaEngineWebRTCVideoSource::DeallocImpl() {
MOZ_ASSERT(NS_IsMainThread());
mCameraControl = nullptr;
}
// The same algorithm from bug 840244
static int
GetRotateAmount(ScreenOrientation aScreen, int aCameraMountAngle, bool aBackCamera) {
int screenAngle = 0;
switch (aScreen) {
case eScreenOrientation_PortraitPrimary:
screenAngle = 0;
break;
case eScreenOrientation_PortraitSecondary:
screenAngle = 180;
break;
case eScreenOrientation_LandscapePrimary:
screenAngle = 90;
break;
case eScreenOrientation_LandscapeSecondary:
screenAngle = 270;
break;
default:
MOZ_ASSERT(false);
break;
}
int result;
if (aBackCamera) {
//back camera
result = (aCameraMountAngle - screenAngle + 360) % 360;
} else {
//front camera
result = (aCameraMountAngle + screenAngle) % 360;
}
return result;
}
// undefine to remove on-the-fly rotation support
#define DYNAMIC_GUM_ROTATION
void
MediaEngineWebRTCVideoSource::Notify(const hal::ScreenConfiguration& aConfiguration) {
#ifdef DYNAMIC_GUM_ROTATION
if (mHasDirectListeners) {
// aka hooked to PeerConnection
MonitorAutoLock enter(mMonitor);
mRotation = GetRotateAmount(aConfiguration.orientation(), mCameraAngle, mBackCamera);
LOG(("*** New orientation: %d (Camera %d Back %d MountAngle: %d)",
mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
}
#endif
mOrientationChanged = true;
}
void
MediaEngineWebRTCVideoSource::StartImpl(webrtc::CaptureCapability aCapability) {
MOZ_ASSERT(NS_IsMainThread());
ICameraControl::Configuration config;
config.mMode = ICameraControl::kPictureMode;
config.mPreviewSize.width = aCapability.width;
config.mPreviewSize.height = aCapability.height;
mCameraControl->Start(&config);
mCameraControl->Set(CAMERA_PARAM_PICTURE_SIZE, config.mPreviewSize);
hal::RegisterScreenConfigurationObserver(this);
}
void
MediaEngineWebRTCVideoSource::StopImpl() {
MOZ_ASSERT(NS_IsMainThread());
hal::UnregisterScreenConfigurationObserver(this);
mCameraControl->Stop();
}
void
MediaEngineWebRTCVideoSource::SnapshotImpl() {
MOZ_ASSERT(NS_IsMainThread());
mCameraControl->TakePicture();
}
void
MediaEngineWebRTCVideoSource::OnHardwareStateChange(HardwareState aState)
{
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
if (aState == CameraControlListener::kHardwareClosed) {
// When the first CameraControl listener is added, it gets pushed
// the current state of the camera--normally 'closed'. We only
// pay attention to that state if we've progressed out of the
// allocated state.
if (mState != kAllocated) {
mState = kReleased;
mCallbackMonitor.Notify();
}
} else {
// Can't read this except on MainThread (ugh)
NS_DispatchToMainThread(WrapRunnable(nsRefPtr<MediaEngineWebRTCVideoSource>(this),
&MediaEngineWebRTCVideoSource::GetRotation));
mState = kStarted;
mCallbackMonitor.Notify();
}
}
void
MediaEngineWebRTCVideoSource::GetRotation()
{
MOZ_ASSERT(NS_IsMainThread());
MonitorAutoLock enter(mMonitor);
mCameraControl->Get(CAMERA_PARAM_SENSORANGLE, mCameraAngle);
MOZ_ASSERT(mCameraAngle == 0 || mCameraAngle == 90 || mCameraAngle == 180 ||
mCameraAngle == 270);
hal::ScreenConfiguration config;
hal::GetCurrentScreenConfiguration(&config);
nsCString deviceName;
ICameraControl::GetCameraName(mCaptureIndex, deviceName);
if (deviceName.EqualsASCII("back")) {
mBackCamera = true;
}
mRotation = GetRotateAmount(config.orientation(), mCameraAngle, mBackCamera);
LOG(("*** Initial orientation: %d (Camera %d Back %d MountAngle: %d)",
mRotation, mCaptureIndex, mBackCamera, mCameraAngle));
}
void
MediaEngineWebRTCVideoSource::OnUserError(UserContext aContext, nsresult aError)
{
{
// Scope the monitor, since there is another monitor below and we don't want
// unexpected deadlock.
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
mCallbackMonitor.Notify();
}
// A main thread runnable to send error code to all queued PhotoCallbacks.
class TakePhotoError : public nsRunnable {
public:
TakePhotoError(nsTArray<nsRefPtr<PhotoCallback>>& aCallbacks,
nsresult aRv)
: mRv(aRv)
{
mCallbacks.SwapElements(aCallbacks);
}
NS_IMETHOD Run()
{
uint32_t callbackNumbers = mCallbacks.Length();
for (uint8_t i = 0; i < callbackNumbers; i++) {
mCallbacks[i]->PhotoError(mRv);
}
// PhotoCallback needs to dereference on main thread.
mCallbacks.Clear();
return NS_OK;
}
protected:
nsTArray<nsRefPtr<PhotoCallback>> mCallbacks;
nsresult mRv;
};
if (aContext == UserContext::kInTakePicture) {
MonitorAutoLock lock(mMonitor);
if (mPhotoCallbacks.Length()) {
NS_DispatchToMainThread(new TakePhotoError(mPhotoCallbacks, aError));
}
}
}
void
MediaEngineWebRTCVideoSource::OnTakePictureComplete(uint8_t* aData, uint32_t aLength, const nsAString& aMimeType)
{
// It needs to start preview because Gonk camera will stop preview while
// taking picture.
mCameraControl->StartPreview();
// Create a main thread runnable to generate a blob and call all current queued
// PhotoCallbacks.
class GenerateBlobRunnable : public nsRunnable {
public:
GenerateBlobRunnable(nsTArray<nsRefPtr<PhotoCallback>>& aCallbacks,
uint8_t* aData,
uint32_t aLength,
const nsAString& aMimeType)
{
mCallbacks.SwapElements(aCallbacks);
mPhoto.AppendElements(aData, aLength);
mMimeType = aMimeType;
}
NS_IMETHOD Run()
{
nsRefPtr<dom::File> blob =
dom::File::CreateMemoryFile(nullptr, mPhoto.Elements(), mPhoto.Length(), mMimeType);
uint32_t callbackCounts = mCallbacks.Length();
for (uint8_t i = 0; i < callbackCounts; i++) {
nsRefPtr<dom::File> tempBlob = blob;
mCallbacks[i]->PhotoComplete(tempBlob.forget());
}
// PhotoCallback needs to dereference on main thread.
mCallbacks.Clear();
return NS_OK;
}
nsTArray<nsRefPtr<PhotoCallback>> mCallbacks;
nsTArray<uint8_t> mPhoto;
nsString mMimeType;
};
// All elements in mPhotoCallbacks will be swapped in GenerateBlobRunnable
// constructor. This captured image will be sent to all the queued
// PhotoCallbacks in this runnable.
MonitorAutoLock lock(mMonitor);
if (mPhotoCallbacks.Length()) {
NS_DispatchToMainThread(
new GenerateBlobRunnable(mPhotoCallbacks, aData, aLength, aMimeType));
}
}
uint32_t
MediaEngineWebRTCVideoSource::ConvertPixelFormatToFOURCC(int aFormat)
{
switch (aFormat) {
case HAL_PIXEL_FORMAT_RGBA_8888:
return libyuv::FOURCC_BGRA;
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
return libyuv::FOURCC_NV21;
case HAL_PIXEL_FORMAT_YV12:
return libyuv::FOURCC_YV12;
default: {
LOG((" xxxxx Unknown pixel format %d", aFormat));
MOZ_ASSERT(false, "Unknown pixel format.");
return libyuv::FOURCC_ANY;
}
}
}
void
MediaEngineWebRTCVideoSource::RotateImage(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
layers::GrallocImage *nativeImage = static_cast<layers::GrallocImage*>(aImage);
android::sp<android::GraphicBuffer> graphicBuffer = nativeImage->GetGraphicBuffer();
void *pMem = nullptr;
uint32_t size = aWidth * aHeight * 3 / 2;
graphicBuffer->lock(android::GraphicBuffer::USAGE_SW_READ_MASK, &pMem);
uint8_t* srcPtr = static_cast<uint8_t*>(pMem);
// Create a video frame and append it to the track.
nsRefPtr<layers::Image> image = mImageContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
layers::PlanarYCbCrImage* videoImage = static_cast<layers::PlanarYCbCrImage*>(image.get());
uint32_t dstWidth;
uint32_t dstHeight;
if (mRotation == 90 || mRotation == 270) {
dstWidth = aHeight;
dstHeight = aWidth;
} else {
dstWidth = aWidth;
dstHeight = aHeight;
}
uint32_t half_width = dstWidth / 2;
uint8_t* dstPtr = videoImage->AllocateAndGetNewBuffer(size);
libyuv::ConvertToI420(srcPtr, size,
dstPtr, dstWidth,
dstPtr + (dstWidth * dstHeight), half_width,
dstPtr + (dstWidth * dstHeight * 5 / 4), half_width,
0, 0,
aWidth, aHeight,
aWidth, aHeight,
static_cast<libyuv::RotationMode>(mRotation),
ConvertPixelFormatToFOURCC(graphicBuffer->getPixelFormat()));
graphicBuffer->unlock();
const uint8_t lumaBpp = 8;
const uint8_t chromaBpp = 4;
layers::PlanarYCbCrData data;
data.mYChannel = dstPtr;
data.mYSize = IntSize(dstWidth, dstHeight);
data.mYStride = dstWidth * lumaBpp / 8;
data.mCbCrStride = dstWidth * chromaBpp / 8;
data.mCbChannel = dstPtr + dstHeight * data.mYStride;
data.mCrChannel = data.mCbChannel +( dstHeight * data.mCbCrStride / 2);
data.mCbCrSize = IntSize(dstWidth / 2, dstHeight / 2);
data.mPicX = 0;
data.mPicY = 0;
data.mPicSize = IntSize(dstWidth, dstHeight);
data.mStereoMode = StereoMode::MONO;
videoImage->SetDataNoCopy(data);
// implicitly releases last image
mImage = image.forget();
}
bool
MediaEngineWebRTCVideoSource::OnNewPreviewFrame(layers::Image* aImage, uint32_t aWidth, uint32_t aHeight) {
{
ReentrantMonitorAutoEnter sync(mCallbackMonitor);
if (mState == kStopped) {
return false;
}
}
MonitorAutoLock enter(mMonitor);
// Bug XXX we'd prefer to avoid converting if mRotation == 0, but that causes problems in UpdateImage()
RotateImage(aImage, aWidth, aHeight);
if (mRotation != 0 && mRotation != 180) {
uint32_t temp = aWidth;
aWidth = aHeight;
aHeight = temp;
}
if (mWidth != static_cast<int>(aWidth) || mHeight != static_cast<int>(aHeight)) {
mWidth = aWidth;
mHeight = aHeight;
LOG(("Video FrameSizeChange: %ux%u", mWidth, mHeight));
}
return true; // return true because we're accepting the frame
}
nsresult
MediaEngineWebRTCVideoSource::TakePhoto(PhotoCallback* aCallback)
{
MOZ_ASSERT(NS_IsMainThread());
MonitorAutoLock lock(mMonitor);
// If other callback exists, that means there is a captured picture on the way,
// it doesn't need to TakePicture() again.
if (!mPhotoCallbacks.Length()) {
nsresult rv;
if (mOrientationChanged) {
UpdatePhotoOrientation();
}
rv = mCameraControl->TakePicture();
if (NS_FAILED(rv)) {
return rv;
}
}
mPhotoCallbacks.AppendElement(aCallback);
return NS_OK;
}
nsresult
MediaEngineWebRTCVideoSource::UpdatePhotoOrientation()
{
MOZ_ASSERT(NS_IsMainThread());
hal::ScreenConfiguration config;
hal::GetCurrentScreenConfiguration(&config);
// The rotation angle is clockwise.
int orientation = 0;
switch (config.orientation()) {
case eScreenOrientation_PortraitPrimary:
orientation = 0;
break;
case eScreenOrientation_PortraitSecondary:
orientation = 180;
break;
case eScreenOrientation_LandscapePrimary:
orientation = 270;
break;
case eScreenOrientation_LandscapeSecondary:
orientation = 90;
break;
}
// Front camera is inverse angle comparing to back camera.
orientation = (mBackCamera ? orientation : (-orientation));
ICameraControlParameterSetAutoEnter batch(mCameraControl);
// It changes the orientation value in EXIF information only.
mCameraControl->Set(CAMERA_PARAM_PICTURE_ROTATION, orientation);
mOrientationChanged = false;
return NS_OK;
}
#endif
}

View File

@ -8,6 +8,7 @@ XPIDL_MODULE = 'content_webrtc'
EXPORTS += [ EXPORTS += [
'MediaEngine.h', 'MediaEngine.h',
'MediaEngineCameraVideoSource.h',
'MediaEngineDefault.h', 'MediaEngineDefault.h',
'MediaTrackConstraints.h', 'MediaTrackConstraints.h',
] ]
@ -16,6 +17,7 @@ if CONFIG['MOZ_WEBRTC']:
EXPORTS += ['AudioOutputObserver.h', EXPORTS += ['AudioOutputObserver.h',
'MediaEngineWebRTC.h'] 'MediaEngineWebRTC.h']
UNIFIED_SOURCES += [ UNIFIED_SOURCES += [
'MediaEngineCameraVideoSource.cpp',
'MediaEngineTabVideoSource.cpp', 'MediaEngineTabVideoSource.cpp',
'MediaEngineWebRTCAudio.cpp', 'MediaEngineWebRTCAudio.cpp',
'MediaEngineWebRTCVideo.cpp', 'MediaEngineWebRTCVideo.cpp',
@ -32,6 +34,12 @@ if CONFIG['MOZ_WEBRTC']:
'/media/webrtc/signaling/src/common/browser_logging', '/media/webrtc/signaling/src/common/browser_logging',
'/media/webrtc/trunk', '/media/webrtc/trunk',
] ]
# Gonk camera source.
if CONFIG['MOZ_B2G_CAMERA']:
EXPORTS += ['MediaEngineGonkVideoSource.h']
UNIFIED_SOURCES += [
'MediaEngineGonkVideoSource.cpp',
]
XPIDL_SOURCES += [ XPIDL_SOURCES += [
'nsITabSource.idl' 'nsITabSource.idl'

View File

@ -1041,8 +1041,7 @@ static SourceSet *
/** /**
* Runs on a seperate thread and is responsible for enumerating devices. * Runs on a seperate thread and is responsible for enumerating devices.
* Depending on whether a picture or stream was asked for, either * Depending on whether a picture or stream was asked for, either
* ProcessGetUserMedia or ProcessGetUserMediaSnapshot is called, and the results * ProcessGetUserMedia is called, and the results are sent back to the DOM.
* are sent back to the DOM.
* *
* Do not run this on the main thread. The success and error callbacks *MUST* * Do not run this on the main thread. The success and error callbacks *MUST*
* be dispatched on the main thread! * be dispatched on the main thread!
@ -1124,18 +1123,6 @@ public:
} }
} }
// It is an error if audio or video are requested along with picture.
if (mConstraints.mPicture &&
(IsOn(mConstraints.mAudio) || IsOn(mConstraints.mVideo))) {
Fail(NS_LITERAL_STRING("NOT_SUPPORTED_ERR"));
return;
}
if (mConstraints.mPicture) {
ProcessGetUserMediaSnapshot(mVideoDevice->GetSource(), 0);
return;
}
// There's a bug in the permission code that can leave us with mAudio but no audio device // There's a bug in the permission code that can leave us with mAudio but no audio device
ProcessGetUserMedia(((IsOn(mConstraints.mAudio) && mAudioDevice) ? ProcessGetUserMedia(((IsOn(mConstraints.mAudio) && mAudioDevice) ?
mAudioDevice->GetSource() : nullptr), mAudioDevice->GetSource() : nullptr),
@ -1204,7 +1191,7 @@ public:
{ {
MOZ_ASSERT(mSuccess); MOZ_ASSERT(mSuccess);
MOZ_ASSERT(mError); MOZ_ASSERT(mError);
if (mConstraints.mPicture || IsOn(mConstraints.mVideo)) { if (IsOn(mConstraints.mVideo)) {
VideoTrackConstraintsN constraints(GetInvariant(mConstraints.mVideo)); VideoTrackConstraintsN constraints(GetInvariant(mConstraints.mVideo));
ScopedDeletePtr<SourceSet> sources(GetSources(backend, constraints, ScopedDeletePtr<SourceSet> sources(GetSources(backend, constraints,
&MediaEngine::EnumerateVideoDevices)); &MediaEngine::EnumerateVideoDevices));
@ -1281,38 +1268,6 @@ public:
return; return;
} }
/**
* Allocates a video device, takes a snapshot and returns a File via
* a SuccessRunnable or an error via the ErrorRunnable. Off the main thread.
*/
void
ProcessGetUserMediaSnapshot(MediaEngineVideoSource* aSource, int aDuration)
{
MOZ_ASSERT(mSuccess);
MOZ_ASSERT(mError);
nsresult rv = aSource->Allocate(GetInvariant(mConstraints.mVideo), mPrefs);
if (NS_FAILED(rv)) {
Fail(NS_LITERAL_STRING("HARDWARE_UNAVAILABLE"));
return;
}
/**
* Display picture capture UI here before calling Snapshot() - Bug 748835.
*/
nsCOMPtr<nsIDOMFile> file;
aSource->Snapshot(aDuration, getter_AddRefs(file));
aSource->Deallocate();
NS_DispatchToMainThread(new SuccessCallbackRunnable(
mSuccess, mError, file, mWindowID
));
MOZ_ASSERT(!mSuccess);
MOZ_ASSERT(!mError);
return;
}
private: private:
MediaStreamConstraints mConstraints; MediaStreamConstraints mConstraints;
@ -1594,35 +1549,6 @@ MediaManager::GetUserMedia(
MediaStreamConstraints c(aConstraints); // copy MediaStreamConstraints c(aConstraints); // copy
/**
* If we were asked to get a picture, before getting a snapshot, we check if
* the calling page is allowed to open a popup. We do this because
* {picture:true} will open a new "window" to let the user preview or select
* an image, on Android. The desktop UI for {picture:true} is TBD, at which
* may point we can decide whether to extend this test there as well.
*/
#if !defined(MOZ_WEBRTC)
if (c.mPicture && !privileged) {
if (aWindow->GetPopupControlState() > openControlled) {
nsCOMPtr<nsIPopupWindowManager> pm =
do_GetService(NS_POPUPWINDOWMANAGER_CONTRACTID);
if (!pm) {
return NS_OK;
}
uint32_t permission;
nsCOMPtr<nsIDocument> doc = aWindow->GetExtantDoc();
if (doc) {
pm->TestPermission(doc->NodePrincipal(), &permission);
if (permission == nsIPopupWindowManager::DENY_POPUP) {
aWindow->FirePopupBlockedEvent(doc, nullptr, EmptyString(),
EmptyString());
return NS_OK;
}
}
}
}
#endif
static bool created = false; static bool created = false;
if (!created) { if (!created) {
// Force MediaManager to startup before we try to access it from other threads // Force MediaManager to startup before we try to access it from other threads
@ -1751,15 +1677,6 @@ MediaManager::GetUserMedia(
} }
#endif #endif
#if defined(ANDROID) && !defined(MOZ_WIDGET_GONK)
if (c.mPicture) {
// ShowFilePickerForMimeType() must run on the Main Thread! (on Android)
// Note, GetUserMediaRunnableWrapper takes ownership of task.
NS_DispatchToMainThread(new GetUserMediaRunnableWrapper(task.forget()));
return NS_OK;
}
#endif
bool isLoop = false; bool isLoop = false;
nsCOMPtr<nsIURI> loopURI; nsCOMPtr<nsIURI> loopURI;
nsresult rv = NS_NewURI(getter_AddRefs(loopURI), "about:loopconversation"); nsresult rv = NS_NewURI(getter_AddRefs(loopURI), "about:loopconversation");