Bug 1186367 - Make MDSM more ignorant of AudioData/VideoData via using MediaQueue<MediaData> instead. r=jwwang

This commit is contained in:
Kilik Kuo 2015-07-28 00:21:33 +08:00
parent fdf510d427
commit 41d43e3209
8 changed files with 75 additions and 54 deletions

View File

@ -23,7 +23,7 @@ extern PRLogModuleInfo* gMediaDecoderLog;
// The amount of audio frames that is used to fuzz rounding errors.
static const int64_t AUDIO_FUZZ_FRAMES = 1;
AudioSink::AudioSink(MediaQueue<AudioData>& aAudioQueue,
AudioSink::AudioSink(MediaQueue<MediaData>& aAudioQueue,
int64_t aStartTime,
const AudioInfo& aInfo,
dom::AudioChannel aChannel)
@ -433,7 +433,8 @@ AudioSink::PlayFromAudioQueue()
{
AssertOnAudioThread();
NS_ASSERTION(!mAudioStream->IsPaused(), "Don't play when paused");
nsRefPtr<AudioData> audio(AudioQueue().PopFront());
nsRefPtr<AudioData> audio =
dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());
SINK_LOG_V("playing %u frames of audio at time %lld",
audio->mFrames, audio->mTime);

View File

@ -26,7 +26,7 @@ class AudioSink {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(AudioSink)
AudioSink(MediaQueue<AudioData>& aAudioQueue,
AudioSink(MediaQueue<MediaData>& aAudioQueue,
int64_t aStartTime,
const AudioInfo& aInfo,
dom::AudioChannel aChannel);
@ -119,7 +119,7 @@ private:
void StartAudioStreamPlaybackIfNeeded();
void WriteSilence(uint32_t aFrames);
MediaQueue<AudioData>& AudioQueue() const {
MediaQueue<MediaData>& AudioQueue() const {
return mAudioQueue;
}
@ -134,7 +134,7 @@ private:
void AssertOnAudioThread();
void AssertNotOnAudioThread();
MediaQueue<AudioData>& mAudioQueue;
MediaQueue<MediaData>& mAudioQueue;
mutable ReentrantMonitor mMonitor;
// There members are accessed on the audio thread only.

View File

@ -184,8 +184,8 @@ OutputStreamData::Init(DecodedStream* aDecodedStream, ProcessedMediaStream* aStr
aStream->AddListener(mListener);
}
DecodedStream::DecodedStream(MediaQueue<AudioData>& aAudioQueue,
MediaQueue<VideoData>& aVideoQueue)
DecodedStream::DecodedStream(MediaQueue<MediaData>& aAudioQueue,
MediaQueue<MediaData>& aVideoQueue)
: mMonitor("DecodedStream::mMonitor")
, mPlaying(false)
, mAudioQueue(aAudioQueue)
@ -402,19 +402,21 @@ DecodedStream::InitTracks()
static void
SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
AudioData* aAudio, AudioSegment* aOutput,
MediaData* aData, AudioSegment* aOutput,
uint32_t aRate, double aVolume)
{
MOZ_ASSERT(aData);
AudioData* audio = aData->As<AudioData>();
// This logic has to mimic AudioSink closely to make sure we write
// the exact same silences
CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten +
UsecsToFrames(aStartTime, aRate);
CheckedInt64 frameOffset = UsecsToFrames(aAudio->mTime, aRate);
CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate);
if (!audioWrittenOffset.isValid() ||
!frameOffset.isValid() ||
// ignore packet that we've already processed
frameOffset.value() + aAudio->mFrames <= audioWrittenOffset.value()) {
frameOffset.value() + audio->mFrames <= audioWrittenOffset.value()) {
return;
}
@ -431,20 +433,20 @@ SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
MOZ_ASSERT(audioWrittenOffset.value() >= frameOffset.value());
int64_t offset = audioWrittenOffset.value() - frameOffset.value();
size_t framesToWrite = aAudio->mFrames - offset;
size_t framesToWrite = audio->mFrames - offset;
aAudio->EnsureAudioBuffer();
nsRefPtr<SharedBuffer> buffer = aAudio->mAudioBuffer;
audio->EnsureAudioBuffer();
nsRefPtr<SharedBuffer> buffer = audio->mAudioBuffer;
AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
nsAutoTArray<const AudioDataValue*, 2> channels;
for (uint32_t i = 0; i < aAudio->mChannels; ++i) {
channels.AppendElement(bufferData + i * aAudio->mFrames + offset);
for (uint32_t i = 0; i < audio->mChannels; ++i) {
channels.AppendElement(bufferData + i * audio->mFrames + offset);
}
aOutput->AppendFrames(buffer.forget(), channels, framesToWrite);
aStream->mAudioFramesWritten += framesToWrite;
aOutput->ApplyVolume(aVolume);
aStream->mNextAudioTime = aAudio->GetEndTime();
aStream->mNextAudioTime = audio->GetEndTime();
}
void
@ -458,7 +460,7 @@ DecodedStream::SendAudio(double aVolume, bool aIsSameOrigin)
AudioSegment output;
uint32_t rate = mInfo.mAudio.mRate;
nsAutoTArray<nsRefPtr<AudioData>,10> audio;
nsAutoTArray<nsRefPtr<MediaData>,10> audio;
TrackID audioTrackId = mInfo.mAudio.mTrackId;
SourceMediaStream* sourceStream = mData->mStream;
@ -523,7 +525,7 @@ DecodedStream::SendVideo(bool aIsSameOrigin)
VideoSegment output;
TrackID videoTrackId = mInfo.mVideo.mTrackId;
nsAutoTArray<nsRefPtr<VideoData>, 10> video;
nsAutoTArray<nsRefPtr<MediaData>, 10> video;
SourceMediaStream* sourceStream = mData->mStream;
// It's OK to hold references to the VideoData because VideoData
@ -531,7 +533,7 @@ DecodedStream::SendVideo(bool aIsSameOrigin)
mVideoQueue.GetElementsAfter(mData->mNextVideoTime, &video);
for (uint32_t i = 0; i < video.Length(); ++i) {
VideoData* v = video[i];
VideoData* v = video[i]->As<VideoData>();
if (mData->mNextVideoTime < v->mTime) {
// Write last video frame to catch up. mLastVideoImage can be null here

View File

@ -19,8 +19,7 @@
namespace mozilla {
class AudioData;
class VideoData;
class MediaData;
class AudioSegment;
class MediaStream;
class MediaInputPort;
@ -97,8 +96,8 @@ public:
class DecodedStream {
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(DecodedStream);
public:
DecodedStream(MediaQueue<AudioData>& aAudioQueue,
MediaQueue<VideoData>& aVideoQueue);
DecodedStream(MediaQueue<MediaData>& aAudioQueue,
MediaQueue<MediaData>& aVideoQueue);
// Mimic MDSM::StartAudioThread.
// Must be called before any calls to SendData().
@ -149,8 +148,8 @@ private:
Maybe<int64_t> mStartTime;
MediaInfo mInfo;
MediaQueue<AudioData>& mAudioQueue;
MediaQueue<VideoData>& mVideoQueue;
MediaQueue<MediaData>& mAudioQueue;
MediaQueue<MediaData>& mVideoQueue;
};
} // namespace mozilla

View File

@ -115,7 +115,7 @@ VideoData::VideoData(int64_t aOffset,
int64_t aTimecode,
IntSize aDisplay,
layers::ImageContainer::FrameID aFrameID)
: MediaData(VIDEO_DATA, aOffset, aTime, aDuration)
: MediaData(VIDEO_DATA, aOffset, aTime, aDuration, 1)
, mDisplay(aDisplay)
, mFrameID(aFrameID)
, mSentToCompositor(false)
@ -488,7 +488,7 @@ VideoData::Create(const VideoInfo& aInfo,
#define RAW_DATA_DEFAULT_SIZE 4096
MediaRawData::MediaRawData()
: MediaData(RAW_DATA)
: MediaData(RAW_DATA, 0)
, mData(nullptr)
, mSize(0)
, mCrypto(mCryptoInternal)
@ -499,7 +499,7 @@ MediaRawData::MediaRawData()
}
MediaRawData::MediaRawData(const uint8_t* aData, size_t aSize)
: MediaData(RAW_DATA)
: MediaData(RAW_DATA, 0)
, mData(nullptr)
, mSize(0)
, mCrypto(mCryptoInternal)

View File

@ -40,12 +40,14 @@ public:
MediaData(Type aType,
int64_t aOffset,
int64_t aTimestamp,
int64_t aDuration)
int64_t aDuration,
uint32_t aFrames)
: mType(aType)
, mOffset(aOffset)
, mTime(aTimestamp)
, mTimecode(aTimestamp)
, mDuration(aDuration)
, mFrames(aFrames)
, mKeyframe(false)
, mDiscontinuity(false)
{}
@ -66,6 +68,9 @@ public:
// Duration of sample, in microseconds.
int64_t mDuration;
// Amount of frames for contained data.
const uint32_t mFrames;
bool mKeyframe;
// True if this is the first sample after a gap or discontinuity in
@ -79,13 +84,29 @@ public:
mTime = mTime - aStartTime;
return mTime >= 0;
}
template <typename ReturnType>
const ReturnType* As() const
{
MOZ_ASSERT(this->mType == ReturnType::sType);
return static_cast<const ReturnType*>(this);
}
template <typename ReturnType>
ReturnType* As()
{
MOZ_ASSERT(this->mType == ReturnType::sType);
return static_cast<ReturnType*>(this);
}
protected:
explicit MediaData(Type aType)
MediaData(Type aType, uint32_t aFrames)
: mType(aType)
, mOffset(0)
, mTime(0)
, mTimecode(0)
, mDuration(0)
, mFrames(aFrames)
, mKeyframe(false)
, mDiscontinuity(false)
{}
@ -105,8 +126,7 @@ public:
AudioDataValue* aData,
uint32_t aChannels,
uint32_t aRate)
: MediaData(sType, aOffset, aTime, aDuration)
, mFrames(aFrames)
: MediaData(sType, aOffset, aTime, aDuration, aFrames)
, mChannels(aChannels)
, mRate(aRate)
, mAudioData(aData) {}
@ -128,7 +148,6 @@ public:
// If mAudioBuffer is null, creates it from mAudioData.
void EnsureAudioBuffer();
const uint32_t mFrames;
const uint32_t mChannels;
const uint32_t mRate;
// At least one of mAudioBuffer/mAudioData must be non-null.

View File

@ -287,13 +287,13 @@ MediaDecoderStateMachine::MediaDecoderStateMachine(MediaDecoder* aDecoder,
nsRefPtr<MediaDecoderStateMachine> self = this;
AudioQueue().AddPopListener(
[self] (const AudioData* aSample) {
self->OnAudioPopped(aSample);
}, mTaskQueue);
[self] (const MediaData* aSample) {
self->OnAudioPopped(aSample->As<AudioData>());
}, mTaskQueue);
VideoQueue().AddPopListener(
[self] (const VideoData* aSample) {
self->OnVideoPopped(aSample);
[self] (const MediaData* aSample) {
self->OnVideoPopped(aSample->As<VideoData>());
}, mTaskQueue);
}
@ -384,13 +384,14 @@ void MediaDecoderStateMachine::SendStreamData()
const auto clockTime = GetClock();
while (true) {
const AudioData* a = AudioQueue().PeekFront();
const MediaData* a = AudioQueue().PeekFront();
// If we discard audio samples fed to the stream immediately, we will
// keep decoding audio samples till the end and consume a lot of memory.
// Therefore we only discard those behind the stream clock to throttle
// the decoding speed.
if (a && a->mTime <= clockTime) {
nsRefPtr<AudioData> releaseMe = AudioQueue().PopFront();
nsRefPtr<MediaData> releaseMe = AudioQueue().PopFront();
continue;
}
break;
@ -2113,11 +2114,11 @@ MediaDecoderStateMachine::SeekCompleted()
int64_t newCurrentTime = seekTime;
// Setup timestamp state.
nsRefPtr<VideoData> video = VideoQueue().PeekFront();
nsRefPtr<MediaData> video = VideoQueue().PeekFront();
if (seekTime == Duration().ToMicroseconds()) {
newCurrentTime = seekTime;
} else if (HasAudio()) {
AudioData* audio = AudioQueue().PeekFront();
MediaData* audio = AudioQueue().PeekFront();
// Though we adjust the newCurrentTime in audio-based, and supplemented
// by video. For better UX, should NOT bind the slide position to
// the first audio data timestamp directly.
@ -2516,7 +2517,7 @@ void MediaDecoderStateMachine::RenderVideoFrames(int32_t aMaxFrames,
AssertCurrentThreadInMonitor();
VideoFrameContainer* container = mDecoder->GetVideoFrameContainer();
nsAutoTArray<nsRefPtr<VideoData>,16> frames;
nsAutoTArray<nsRefPtr<MediaData>,16> frames;
VideoQueue().GetFirstElements(aMaxFrames, &frames);
if (frames.IsEmpty() || !container) {
return;
@ -2525,7 +2526,7 @@ void MediaDecoderStateMachine::RenderVideoFrames(int32_t aMaxFrames,
nsAutoTArray<ImageContainer::NonOwningImage,16> images;
TimeStamp lastFrameTime;
for (uint32_t i = 0; i < frames.Length(); ++i) {
VideoData* frame = frames[i];
VideoData* frame = frames[i]->As<VideoData>();
frame->mSentToCompositor = true;
int64_t frameTime = frame->mTime;
@ -2562,7 +2563,7 @@ void MediaDecoderStateMachine::RenderVideoFrames(int32_t aMaxFrames,
VideoQueue().GetSize(), mReader->SizeOfVideoQueueInFrames());
}
container->SetCurrentFrames(frames[0]->mDisplay, images);
container->SetCurrentFrames(frames[0]->As<VideoData>()->mDisplay, images);
}
void MediaDecoderStateMachine::ResyncAudioClock()
@ -2666,21 +2667,21 @@ void MediaDecoderStateMachine::UpdateRenderedVideoFrames()
NS_ASSERTION(clockTime >= 0, "Should have positive clock time.");
int64_t remainingTime = AUDIO_DURATION_USECS;
if (VideoQueue().GetSize() > 0) {
nsRefPtr<VideoData> currentFrame = VideoQueue().PopFront();
nsRefPtr<MediaData> currentFrame = VideoQueue().PopFront();
int32_t framesRemoved = 0;
while (VideoQueue().GetSize() > 0) {
VideoData* nextFrame = VideoQueue().PeekFront();
MediaData* nextFrame = VideoQueue().PeekFront();
if (!IsRealTime() && nextFrame->mTime > clockTime) {
remainingTime = nextFrame->mTime - clockTime;
break;
}
++framesRemoved;
if (!currentFrame->mSentToCompositor) {
if (!currentFrame->As<VideoData>()->mSentToCompositor) {
mDecoder->NotifyDecodedFrames(0, 0, 1);
VERBOSE_LOG("discarding video frame mTime=%lld clock_time=%lld",
currentFrame->mTime, clockTime);
}
CheckTurningOffHardwareDecoder(currentFrame);
CheckTurningOffHardwareDecoder(currentFrame->As<VideoData>());
currentFrame = VideoQueue().PopFront();
}

View File

@ -406,8 +406,8 @@ protected:
void LogicalPlaybackRateChanged();
void PreservesPitchChanged();
MediaQueue<AudioData>& AudioQueue() { return mAudioQueue; }
MediaQueue<VideoData>& VideoQueue() { return mVideoQueue; }
MediaQueue<MediaData>& AudioQueue() { return mAudioQueue; }
MediaQueue<MediaData>& VideoQueue() { return mVideoQueue; }
// True if our buffers of decoded audio are not full, and we should
// decode more.
@ -879,11 +879,10 @@ private:
// Queue of audio frames. This queue is threadsafe, and is accessed from
// the audio, decoder, state machine, and main threads.
MediaQueue<AudioData> mAudioQueue;
MediaQueue<MediaData> mAudioQueue;
// Queue of video frames. This queue is threadsafe, and is accessed from
// the decoder, state machine, and main threads.
MediaQueue<VideoData> mVideoQueue;
MediaQueue<MediaData> mVideoQueue;
// The decoder monitor must be obtained before modifying this state.
// NotifyAll on the monitor must be called when the state is changed so