Bug 1143575. Remove Theora-only duplicate frame optimization. r=cpearce

This commit is contained in:
Robert O'Callahan 2015-03-28 10:53:37 +13:00
parent c3151774c2
commit e0a6610fd1
4 changed files with 31 additions and 91 deletions

View File

@ -108,26 +108,14 @@ IsInEmulator()
#endif #endif
VideoData::VideoData(int64_t aOffset,
int64_t aTime,
int64_t aDuration,
int64_t aTimecode)
: MediaData(sType, aOffset, aTime, aDuration)
, mDuplicate(true)
{
NS_ASSERTION(mDuration >= 0, "Frame must have non-negative duration.");
mTimecode = aTimecode;
}
VideoData::VideoData(int64_t aOffset, VideoData::VideoData(int64_t aOffset,
int64_t aTime, int64_t aTime,
int64_t aDuration, int64_t aDuration,
bool aKeyframe, bool aKeyframe,
int64_t aTimecode, int64_t aTimecode,
IntSize aDisplay) IntSize aDisplay)
: MediaData(sType, aOffset, aTime, aDuration) : MediaData(VIDEO_DATA, aOffset, aTime, aDuration)
, mDisplay(aDisplay) , mDisplay(aDisplay)
, mDuplicate(false)
{ {
NS_ASSERTION(mDuration >= 0, "Frame must have non-negative duration."); NS_ASSERTION(mDuration >= 0, "Frame must have non-negative duration.");
mKeyframe = aKeyframe; mKeyframe = aKeyframe;

View File

@ -269,18 +269,6 @@ public:
const IntRect& aPicture, const IntRect& aPicture,
bool aCopyData); bool aCopyData);
// Constructs a duplicate VideoData object. This intrinsically tells the
// player that it does not need to update the displayed frame when this
// frame is played; this frame is identical to the previous.
static already_AddRefed<VideoData> CreateDuplicate(int64_t aOffset,
int64_t aTime,
int64_t aDuration,
int64_t aTimecode)
{
nsRefPtr<VideoData> rv = new VideoData(aOffset, aTime, aDuration, aTimecode);
return rv.forget();
}
size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const; size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
// Dimensions at which to display the video frame. The picture region // Dimensions at which to display the video frame. The picture region
@ -291,14 +279,6 @@ public:
// This frame's image. // This frame's image.
nsRefPtr<Image> mImage; nsRefPtr<Image> mImage;
// When true, denotes that this frame is identical to the frame that
// came before; it's a duplicate. mBuffer will be empty.
const bool mDuplicate;
VideoData(int64_t aOffset,
int64_t aTime,
int64_t aDuration,
int64_t aTimecode);
VideoData(int64_t aOffset, VideoData(int64_t aOffset,
int64_t aTime, int64_t aTime,

View File

@ -2487,10 +2487,6 @@ void MediaDecoderStateMachine::RenderVideoFrame(VideoData* aData,
MOZ_ASSERT(OnTaskQueue()); MOZ_ASSERT(OnTaskQueue());
mDecoder->GetReentrantMonitor().AssertNotCurrentThreadIn(); mDecoder->GetReentrantMonitor().AssertNotCurrentThreadIn();
if (aData->mDuplicate) {
return;
}
VERBOSE_LOG("playing video frame %lld (queued=%i, state-machine=%i, decoder-queued=%i)", VERBOSE_LOG("playing video frame %lld (queued=%i, state-machine=%i, decoder-queued=%i)",
aData->mTime, VideoQueue().GetSize() + mReader->SizeOfVideoQueueInFrames(), aData->mTime, VideoQueue().GetSize() + mReader->SizeOfVideoQueueInFrames(),
VideoQueue().GetSize(), mReader->SizeOfVideoQueueInFrames()); VideoQueue().GetSize(), mReader->SizeOfVideoQueueInFrames());
@ -2732,27 +2728,11 @@ MediaDecoderStateMachine::DropVideoUpToSeekTarget(VideoData* aSample)
MOZ_ASSERT(OnTaskQueue()); MOZ_ASSERT(OnTaskQueue());
nsRefPtr<VideoData> video(aSample); nsRefPtr<VideoData> video(aSample);
MOZ_ASSERT(video); MOZ_ASSERT(video);
DECODER_LOG("DropVideoUpToSeekTarget() frame [%lld, %lld] dup=%d", DECODER_LOG("DropVideoUpToSeekTarget() frame [%lld, %lld]",
video->mTime, video->GetEndTime(), video->mDuplicate); video->mTime, video->GetEndTime());
MOZ_ASSERT(mCurrentSeek.Exists()); MOZ_ASSERT(mCurrentSeek.Exists());
const int64_t target = mCurrentSeek.mTarget.mTime; const int64_t target = mCurrentSeek.mTarget.mTime;
// Duplicate handling: if we're dropping frames up the seek target, we must
// be wary of Theora duplicate frames. They don't have an image, so if the
// target frame is in a run of duplicates, we won't have an image to draw
// after the seek. So store the last frame encountered while dropping, and
// copy its Image forward onto duplicate frames, so that every frame has
// an Image.
if (video->mDuplicate &&
mFirstVideoFrameAfterSeek &&
!mFirstVideoFrameAfterSeek->mDuplicate) {
nsRefPtr<VideoData> temp =
VideoData::ShallowCopyUpdateTimestampAndDuration(mFirstVideoFrameAfterSeek,
video->mTime,
video->mDuration);
video = temp;
}
// If the frame end time is less than the seek target, we won't want // If the frame end time is less than the seek target, we won't want
// to display this frame after the seek, so discard it. // to display this frame after the seek, so discard it.
if (target >= video->GetEndTime()) { if (target >= video->GetEndTime()) {

View File

@ -854,43 +854,35 @@ nsresult OggReader::DecodeTheora(ogg_packet* aPacket, int64_t aTimeThreshold)
return NS_OK; return NS_OK;
} }
if (ret == TH_DUPFRAME) { th_ycbcr_buffer buffer;
nsRefPtr<VideoData> v = VideoData::CreateDuplicate(mDecoder->GetResource()->Tell(), ret = th_decode_ycbcr_out(mTheoraState->mCtx, buffer);
time, NS_ASSERTION(ret == 0, "th_decode_ycbcr_out failed");
endTime - time, bool isKeyframe = th_packet_iskeyframe(aPacket) == 1;
aPacket->granulepos); VideoData::YCbCrBuffer b;
mVideoQueue.Push(v); for (uint32_t i=0; i < 3; ++i) {
} else if (ret == 0) { b.mPlanes[i].mData = buffer[i].data;
th_ycbcr_buffer buffer; b.mPlanes[i].mHeight = buffer[i].height;
ret = th_decode_ycbcr_out(mTheoraState->mCtx, buffer); b.mPlanes[i].mWidth = buffer[i].width;
NS_ASSERTION(ret == 0, "th_decode_ycbcr_out failed"); b.mPlanes[i].mStride = buffer[i].stride;
bool isKeyframe = th_packet_iskeyframe(aPacket) == 1; b.mPlanes[i].mOffset = b.mPlanes[i].mSkip = 0;
VideoData::YCbCrBuffer b;
for (uint32_t i=0; i < 3; ++i) {
b.mPlanes[i].mData = buffer[i].data;
b.mPlanes[i].mHeight = buffer[i].height;
b.mPlanes[i].mWidth = buffer[i].width;
b.mPlanes[i].mStride = buffer[i].stride;
b.mPlanes[i].mOffset = b.mPlanes[i].mSkip = 0;
}
nsRefPtr<VideoData> v = VideoData::Create(mInfo.mVideo,
mDecoder->GetImageContainer(),
mDecoder->GetResource()->Tell(),
time,
endTime - time,
b,
isKeyframe,
aPacket->granulepos,
mPicture);
if (!v) {
// There may be other reasons for this error, but for
// simplicity just assume the worst case: out of memory.
NS_WARNING("Failed to allocate memory for video frame");
return NS_ERROR_OUT_OF_MEMORY;
}
mVideoQueue.Push(v);
} }
nsRefPtr<VideoData> v = VideoData::Create(mInfo.mVideo,
mDecoder->GetImageContainer(),
mDecoder->GetResource()->Tell(),
time,
endTime - time,
b,
isKeyframe,
aPacket->granulepos,
mPicture);
if (!v) {
// There may be other reasons for this error, but for
// simplicity just assume the worst case: out of memory.
NS_WARNING("Failed to allocate memory for video frame");
return NS_ERROR_OUT_OF_MEMORY;
}
mVideoQueue.Push(v);
return NS_OK; return NS_OK;
} }