diff --git a/content/media/webrtc/MediaEngineCameraVideoSource.cpp b/content/media/webrtc/MediaEngineCameraVideoSource.cpp index 31a81e17a2b..65b8f51c40d 100644 --- a/content/media/webrtc/MediaEngineCameraVideoSource.cpp +++ b/content/media/webrtc/MediaEngineCameraVideoSource.cpp @@ -6,6 +6,7 @@ namespace mozilla { +using namespace mozilla::gfx; using dom::ConstrainLongRange; using dom::ConstrainDoubleRange; using dom::MediaTrackConstraintSet; @@ -47,6 +48,26 @@ MediaEngineCameraVideoSource::Intersect(ConstrainLongRange& aA, const ConstrainL return true; } +// guts for appending data to the MSG track +bool MediaEngineCameraVideoSource::AppendToTrack(SourceMediaStream* aSource, + layers::Image* aImage, + TrackID aID, + TrackTicks delta) +{ + MOZ_ASSERT(aSource); + + VideoSegment segment; + nsRefPtr image = aImage; + IntSize size(image ? mWidth : 0, image ? mHeight : 0); + segment.AppendFrame(image.forget(), delta, size); + + // This is safe from any thread, and is safe if the track is Finished + // or Destroyed. + // This can fail if either a) we haven't added the track yet, or b) + // we've removed or finished the track. + return aSource->AppendToTrack(aID, &(segment)); +} + // A special version of the algorithm for cameras that don't list capabilities. void MediaEngineCameraVideoSource::GuessCapability( diff --git a/content/media/webrtc/MediaEngineCameraVideoSource.h b/content/media/webrtc/MediaEngineCameraVideoSource.h index 3e46cd6fab8..377a925079f 100644 --- a/content/media/webrtc/MediaEngineCameraVideoSource.h +++ b/content/media/webrtc/MediaEngineCameraVideoSource.h @@ -28,6 +28,7 @@ public: , mInitDone(false) , mHasDirectListeners(false) , mCaptureIndex(aIndex) + , mTrackID(0) , mFps(-1) {} @@ -60,6 +61,12 @@ public: protected: ~MediaEngineCameraVideoSource() {} + // guts for appending data to the MSG track + virtual bool AppendToTrack(SourceMediaStream* aSource, + layers::Image* aImage, + TrackID aID, + TrackTicks delta); + static bool IsWithin(int32_t n, const dom::ConstrainLongRange& aRange); static bool IsWithin(double n, const dom::ConstrainDoubleRange& aRange); static int32_t Clamp(int32_t n, const dom::ConstrainLongRange& aRange); @@ -87,6 +94,7 @@ protected: bool mInitDone; bool mHasDirectListeners; int mCaptureIndex; + TrackID mTrackID; int mFps; // Track rate (30 fps by default) webrtc::CaptureCapability mCapability; // Doesn't work on OS X. diff --git a/content/media/webrtc/MediaEngineGonkVideoSource.cpp b/content/media/webrtc/MediaEngineGonkVideoSource.cpp index d8e59c72b27..28dd7922e02 100644 --- a/content/media/webrtc/MediaEngineGonkVideoSource.cpp +++ b/content/media/webrtc/MediaEngineGonkVideoSource.cpp @@ -154,6 +154,7 @@ MediaEngineGonkVideoSource::Start(SourceMediaStream* aStream, TrackID aID) if (mState == kStarted) { return NS_OK; } + mTrackID = aID; mImageContainer = layers::LayerManager::CreateImageContainer(); NS_DispatchToMainThread(WrapRunnable(nsRefPtr(this), @@ -621,6 +622,21 @@ MediaEngineGonkVideoSource::RotateImage(layers::Image* aImage, uint32_t aWidth, // implicitly releases last image mImage = image.forget(); + + // Push the frame into the MSG with a minimal duration. This will likely + // mean we'll still get NotifyPull calls which will then return the same + // frame again with a longer duration. However, this means we won't + // fail to get the frame in and drop frames. + + // XXX The timestamp for the frame should be base on the Capture time, + // not the MSG time, and MSG should never, ever block on a (realtime) + // video frame (or even really for streaming - audio yes, video probably no). + uint32_t len = mSources.Length(); + for (uint32_t i = 0; i < len; i++) { + if (mSources[i]) { + AppendToTrack(mSources[i], mImage, mTrackID, 1); // shortest possible duration + } + } } bool diff --git a/content/media/webrtc/MediaEngineWebRTCAudio.cpp b/content/media/webrtc/MediaEngineWebRTCAudio.cpp index 1cde8c364b5..6c8bfa63871 100644 --- a/content/media/webrtc/MediaEngineWebRTCAudio.cpp +++ b/content/media/webrtc/MediaEngineWebRTCAudio.cpp @@ -39,8 +39,10 @@ namespace mozilla { #ifdef PR_LOGGING extern PRLogModuleInfo* GetMediaManagerLog(); #define LOG(msg) PR_LOG(GetMediaManagerLog(), PR_LOG_DEBUG, msg) +#define LOG_FRAMES(msg) PR_LOG(GetMediaManagerLog(), 6, msg) #else #define LOG(msg) +#define LOG_FRAMES(msg) #endif /** @@ -401,7 +403,7 @@ MediaEngineWebRTCAudioSource::NotifyPull(MediaStreamGraph* aGraph, #ifdef DEBUG TrackTicks target = aSource->TimeToTicksRoundUp(SAMPLE_FREQUENCY, aDesiredTime); TrackTicks delta = target - aLastEndTime; - LOG(("Audio: NotifyPull: aDesiredTime %ld, target %ld, delta %ld",(int64_t) aDesiredTime, (int64_t) target, (int64_t) delta)); + LOG_FRAMES(("Audio: NotifyPull: aDesiredTime %ld, target %ld, delta %ld",(int64_t) aDesiredTime, (int64_t) target, (int64_t) delta)); aLastEndTime = target; #endif } diff --git a/content/media/webrtc/MediaEngineWebRTCVideo.cpp b/content/media/webrtc/MediaEngineWebRTCVideo.cpp index bc2c1cb2022..b25b17c7db5 100644 --- a/content/media/webrtc/MediaEngineWebRTCVideo.cpp +++ b/content/media/webrtc/MediaEngineWebRTCVideo.cpp @@ -98,6 +98,22 @@ MediaEngineWebRTCVideoSource::DeliverFrame( // implicitly releases last image mImage = image.forget(); + // Push the frame into the MSG with a minimal duration. This will likely + // mean we'll still get NotifyPull calls which will then return the same + // frame again with a longer duration. However, this means we won't + // fail to get the frame in and drop frames. + + // XXX The timestamp for the frame should be based on the Capture time, + // not the MSG time, and MSG should never, ever block on a (realtime) + // video frame (or even really for streaming - audio yes, video probably no). + // Note that MediaPipeline currently ignores the timestamps from MSG + uint32_t len = mSources.Length(); + for (uint32_t i = 0; i < len; i++) { + if (mSources[i]) { + AppendToTrack(mSources[i], mImage, mTrackID, 1); // shortest possible duration + } + } + return 0; } @@ -106,7 +122,7 @@ MediaEngineWebRTCVideoSource::DeliverFrame( // this means that no *real* frame can be inserted during this period. void MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph, - SourceMediaStream *aSource, + SourceMediaStream* aSource, TrackID aID, StreamTime aDesiredTime, TrackTicks &aLastEndTime) @@ -118,12 +134,10 @@ MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph, // So mState could be kReleased here. We really don't care about the state, // though. - // Note: we're not giving up mImage here - nsRefPtr image = mImage; TrackTicks target = aSource->TimeToTicksRoundUp(USECS_PER_S, aDesiredTime); TrackTicks delta = target - aLastEndTime; LOGFRAME(("NotifyPull, desired = %ld, target = %ld, delta = %ld %s", (int64_t) aDesiredTime, - (int64_t) target, (int64_t) delta, image ? "" : "")); + (int64_t) target, (int64_t) delta, mImage ? "" : "")); // Bug 846188 We may want to limit incoming frames to the requested frame rate // mFps - if you want 30FPS, and the camera gives you 60FPS, this could @@ -137,11 +151,7 @@ MediaEngineWebRTCVideoSource::NotifyPull(MediaStreamGraph* aGraph, // Doing so means a negative delta and thus messes up handling of the graph if (delta > 0) { // nullptr images are allowed - IntSize size(image ? mWidth : 0, image ? mHeight : 0); - segment.AppendFrame(image.forget(), delta, size); - // This can fail if either a) we haven't added the track yet, or b) - // we've removed or finished the track. - if (aSource->AppendToTrack(aID, &(segment))) { + if (AppendToTrack(aSource, mImage, aID, delta)) { aLastEndTime = target; } } @@ -397,6 +407,8 @@ MediaEngineWebRTCVideoSource::Start(SourceMediaStream* aStream, TrackID aID) mImageContainer = layers::LayerManager::CreateImageContainer(); mState = kStarted; + mTrackID = aID; + error = mViERender->AddRenderer(mCaptureIndex, webrtc::kVideoI420, (webrtc::ExternalRenderer*)this); if (error == -1) { return NS_ERROR_FAILURE;