Bug 664918. Part 2: Create MediaSegment, AudioSegment and VideoSegment classes to manage intervals of media data. r=jesup

Also introduces a SharedBuffer class, representing a blob of binary data with threadsafe refcounting.
This commit is contained in:
Robert O'Callahan 2012-04-30 15:11:19 +12:00
parent aee1489730
commit ba1e1720cd
8 changed files with 1129 additions and 0 deletions

View File

@ -0,0 +1,193 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "AudioSegment.h"
namespace mozilla {
static PRUint16
FlipByteOrderIfBigEndian(PRUint16 aValue)
{
PRUint16 s = aValue;
#if defined(IS_BIG_ENDIAN)
s = (s << 8) | (s >> 8);
#endif
return s;
}
/*
* Use "2^N" conversion since it's simple, fast, "bit transparent", used by
* many other libraries and apparently behaves reasonably.
* http://blog.bjornroche.com/2009/12/int-float-int-its-jungle-out-there.html
* http://blog.bjornroche.com/2009/12/linearity-and-dynamic-range-in-int.html
*/
static float
SampleToFloat(float aValue)
{
return aValue;
}
static float
SampleToFloat(PRUint8 aValue)
{
return (aValue - 128)/128.0f;
}
static float
SampleToFloat(PRInt16 aValue)
{
return PRInt16(FlipByteOrderIfBigEndian(aValue))/32768.0f;
}
static void
FloatToSample(float aValue, float* aOut)
{
*aOut = aValue;
}
static void
FloatToSample(float aValue, PRUint8* aOut)
{
float v = aValue*128 + 128;
float clamped = NS_MAX(0.0f, NS_MIN(255.0f, v));
*aOut = PRUint8(clamped);
}
static void
FloatToSample(float aValue, PRInt16* aOut)
{
float v = aValue*32768.0f;
float clamped = NS_MAX(-32768.0f, NS_MIN(32767.0f, v));
*aOut = PRInt16(FlipByteOrderIfBigEndian(PRInt16(clamped)));
}
template <class SrcT, class DestT>
static void
InterleaveAndConvertBuffer(const SrcT* aSource, PRInt32 aSourceLength,
PRInt32 aLength,
float aVolume,
PRInt32 aChannels,
DestT* aOutput)
{
DestT* output = aOutput;
for (PRInt32 i = 0; i < aLength; ++i) {
for (PRInt32 channel = 0; channel < aChannels; ++channel) {
float v = SampleToFloat(aSource[channel*aSourceLength + i])*aVolume;
FloatToSample(v, output);
++output;
}
}
}
static void
InterleaveAndConvertBuffer(const PRInt16* aSource, PRInt32 aSourceLength,
PRInt32 aLength,
float aVolume,
PRInt32 aChannels,
PRInt16* aOutput)
{
PRInt16* output = aOutput;
float v = NS_MAX(NS_MIN(aVolume, 1.0f), -1.0f);
PRInt32 volume = PRInt32((1 << 16) * v);
for (PRInt32 i = 0; i < aLength; ++i) {
for (PRInt32 channel = 0; channel < aChannels; ++channel) {
PRInt16 s = FlipByteOrderIfBigEndian(aSource[channel*aSourceLength + i]);
*output = FlipByteOrderIfBigEndian(PRInt16((PRInt32(s) * volume) >> 16));
++output;
}
}
}
template <class SrcT>
static void
InterleaveAndConvertBuffer(const SrcT* aSource, PRInt32 aSourceLength,
PRInt32 aLength,
float aVolume,
PRInt32 aChannels,
void* aOutput, nsAudioStream::SampleFormat aOutputFormat)
{
switch (aOutputFormat) {
case nsAudioStream::FORMAT_FLOAT32:
InterleaveAndConvertBuffer(aSource, aSourceLength, aLength, aVolume,
aChannels, static_cast<float*>(aOutput));
break;
case nsAudioStream::FORMAT_S16_LE:
InterleaveAndConvertBuffer(aSource, aSourceLength, aLength, aVolume,
aChannels, static_cast<PRInt16*>(aOutput));
break;
case nsAudioStream::FORMAT_U8:
InterleaveAndConvertBuffer(aSource, aSourceLength, aLength, aVolume,
aChannels, static_cast<PRUint8*>(aOutput));
break;
}
}
static void
InterleaveAndConvertBuffer(const void* aSource, nsAudioStream::SampleFormat aSourceFormat,
PRInt32 aSourceLength,
PRInt32 aOffset, PRInt32 aLength,
float aVolume,
PRInt32 aChannels,
void* aOutput, nsAudioStream::SampleFormat aOutputFormat)
{
switch (aSourceFormat) {
case nsAudioStream::FORMAT_FLOAT32:
InterleaveAndConvertBuffer(static_cast<const float*>(aSource) + aOffset, aSourceLength,
aLength,
aVolume,
aChannels,
aOutput, aOutputFormat);
break;
case nsAudioStream::FORMAT_S16_LE:
InterleaveAndConvertBuffer(static_cast<const PRInt16*>(aSource) + aOffset, aSourceLength,
aLength,
aVolume,
aChannels,
aOutput, aOutputFormat);
break;
case nsAudioStream::FORMAT_U8:
InterleaveAndConvertBuffer(static_cast<const PRUint8*>(aSource) + aOffset, aSourceLength,
aLength,
aVolume,
aChannels,
aOutput, aOutputFormat);
break;
}
}
void
AudioSegment::ApplyVolume(float aVolume)
{
for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
ci->mVolume *= aVolume;
}
}
static const int STATIC_AUDIO_BUFFER_BYTES = 50000;
void
AudioSegment::WriteTo(nsAudioStream* aOutput)
{
NS_ASSERTION(mChannels == aOutput->GetChannels(), "Wrong number of channels");
nsAutoTArray<PRUint8,STATIC_AUDIO_BUFFER_BYTES> buf;
PRUint32 frameSize = GetSampleSize(aOutput->GetFormat())*mChannels;
for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
AudioChunk& c = *ci;
if (frameSize*c.mDuration > PR_UINT32_MAX) {
NS_ERROR("Buffer overflow");
return;
}
buf.SetLength(PRInt32(frameSize*c.mDuration));
if (c.mBuffer) {
InterleaveAndConvertBuffer(c.mBuffer->Data(), c.mBufferFormat, c.mBufferLength,
c.mOffset, PRInt32(c.mDuration),
c.mVolume,
aOutput->GetChannels(),
buf.Elements(), aOutput->GetFormat());
} else {
// Assumes that a bit pattern of zeroes == 0.0f
memset(buf.Elements(), 0, buf.Length());
}
aOutput->Write(buf.Elements(), PRInt32(c.mDuration));
}
}
}

View File

@ -0,0 +1,151 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MOZILLA_AUDIOSEGMENT_H_
#define MOZILLA_AUDIOSEGMENT_H_
#include "MediaSegment.h"
#include "nsISupportsImpl.h"
#include "nsAudioStream.h"
#include "SharedBuffer.h"
namespace mozilla {
struct AudioChunk {
typedef nsAudioStream::SampleFormat SampleFormat;
// Generic methods
void SliceTo(TrackTicks aStart, TrackTicks aEnd)
{
NS_ASSERTION(aStart >= 0 && aStart < aEnd && aEnd <= mDuration,
"Slice out of bounds");
if (mBuffer) {
mOffset += PRInt32(aStart);
}
mDuration = aEnd - aStart;
}
TrackTicks GetDuration() const { return mDuration; }
bool CanCombineWithFollowing(const AudioChunk& aOther) const
{
if (aOther.mBuffer != mBuffer) {
return false;
}
if (mBuffer) {
NS_ASSERTION(aOther.mBufferFormat == mBufferFormat && aOther.mBufferLength == mBufferLength,
"Wrong metadata about buffer");
return aOther.mOffset == mOffset + mDuration && aOther.mVolume == mVolume;
}
return true;
}
bool IsNull() const { return mBuffer == nsnull; }
void SetNull(TrackTicks aDuration)
{
mBuffer = nsnull;
mDuration = aDuration;
mOffset = 0;
mVolume = 1.0f;
}
TrackTicks mDuration; // in frames within the buffer
nsRefPtr<SharedBuffer> mBuffer; // null means data is all zeroes
PRInt32 mBufferLength; // number of frames in mBuffer (only meaningful if mBuffer is nonnull)
SampleFormat mBufferFormat; // format of frames in mBuffer (only meaningful if mBuffer is nonnull)
PRInt32 mOffset; // in frames within the buffer (zero if mBuffer is null)
float mVolume; // volume multiplier to apply (1.0f if mBuffer is nonnull)
};
/**
* A list of audio samples consisting of a sequence of slices of SharedBuffers.
* The audio rate is determined by the track, not stored in this class.
*/
class AudioSegment : public MediaSegmentBase<AudioSegment, AudioChunk> {
public:
typedef nsAudioStream::SampleFormat SampleFormat;
static int GetSampleSize(SampleFormat aFormat)
{
switch (aFormat) {
case nsAudioStream::FORMAT_U8: return 1;
case nsAudioStream::FORMAT_S16_LE: return 2;
case nsAudioStream::FORMAT_FLOAT32: return 4;
}
NS_ERROR("Bad format");
return 0;
}
AudioSegment() : MediaSegmentBase<AudioSegment, AudioChunk>(AUDIO), mChannels(0) {}
bool IsInitialized()
{
return mChannels > 0;
}
void Init(PRInt32 aChannels)
{
NS_ASSERTION(aChannels > 0, "Bad number of channels");
NS_ASSERTION(!IsInitialized(), "Already initialized");
mChannels = aChannels;
}
PRInt32 GetChannels()
{
NS_ASSERTION(IsInitialized(), "Not initialized");
return mChannels;
}
/**
* Returns the format of the first audio frame that has data, or
* FORMAT_FLOAT32 if there is none.
*/
SampleFormat GetFirstFrameFormat()
{
for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
if (ci->mBuffer) {
return ci->mBufferFormat;
}
}
return nsAudioStream::FORMAT_FLOAT32;
}
void AppendFrames(already_AddRefed<SharedBuffer> aBuffer, PRInt32 aBufferLength,
PRInt32 aStart, PRInt32 aEnd, SampleFormat aFormat)
{
NS_ASSERTION(mChannels > 0, "Not initialized");
AudioChunk* chunk = AppendChunk(aEnd - aStart);
chunk->mBuffer = aBuffer;
chunk->mBufferFormat = aFormat;
chunk->mBufferLength = aBufferLength;
chunk->mOffset = aStart;
chunk->mVolume = 1.0f;
}
void ApplyVolume(float aVolume);
/**
* aOutput must have a matching number of channels, but we will automatically
* convert sample formats.
*/
void WriteTo(nsAudioStream* aOutput);
void AppendFrom(AudioSegment* aSource)
{
NS_ASSERTION(aSource->mChannels == mChannels, "Non-matching channels");
MediaSegmentBase<AudioSegment, AudioChunk>::AppendFrom(aSource);
}
// Segment-generic methods not in MediaSegmentBase
void InitFrom(const AudioSegment& aOther)
{
NS_ASSERTION(mChannels == 0, "Channels already set");
mChannels = aOther.mChannels;
}
void SliceFrom(const AudioSegment& aOther, TrackTicks aStart, TrackTicks aEnd)
{
InitFrom(aOther);
BaseSliceFrom(aOther, aStart, aEnd);
}
static Type StaticType() { return AUDIO; }
protected:
PRInt32 mChannels;
};
}
#endif /* MOZILLA_AUDIOSEGMENT_H_ */

View File

@ -46,20 +46,26 @@ LIBRARY_NAME = gkconmedia_s
LIBXUL_LIBRARY = 1
EXPORTS = \
AudioSegment.h \
FileBlockCache.h \
MediaResource.h \
MediaSegment.h \
nsAudioAvailableEventManager.h \
nsBuiltinDecoder.h \
nsBuiltinDecoderStateMachine.h \
nsBuiltinDecoderReader.h \
nsMediaCache.h \
nsMediaDecoder.h \
SharedBuffer.h \
StreamBuffer.h \
TimeVarying.h \
VideoFrameContainer.h \
VideoUtils.h \
VideoSegment.h \
$(NULL)
CPPSRCS = \
AudioSegment.cpp \
FileBlockCache.cpp \
MediaResource.cpp \
nsAudioAvailableEventManager.cpp \
@ -68,6 +74,7 @@ CPPSRCS = \
nsBuiltinDecoderReader.cpp \
nsMediaCache.cpp \
nsMediaDecoder.cpp \
StreamBuffer.cpp \
VideoFrameContainer.cpp \
VideoUtils.cpp \
$(NULL)

View File

@ -0,0 +1,270 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MOZILLA_MEDIASEGMENT_H_
#define MOZILLA_MEDIASEGMENT_H_
#include "nsTArray.h"
namespace mozilla {
/**
* We represent media times in 64-bit fixed point. So 1 MediaTime is
* 1/(2^MEDIA_TIME_FRAC_BITS) seconds.
*/
typedef PRInt64 MediaTime;
const PRInt64 MEDIA_TIME_FRAC_BITS = 20;
const PRInt64 MEDIA_TIME_MAX = PR_INT64_MAX;
inline MediaTime MillisecondsToMediaTime(PRInt32 aMS)
{
return (MediaTime(aMS) << MEDIA_TIME_FRAC_BITS)/1000;
}
inline MediaTime SecondsToMediaTime(double aS)
{
NS_ASSERTION(aS <= (MEDIA_TIME_MAX >> MEDIA_TIME_FRAC_BITS),
"Out of range");
return MediaTime(aS * (1 << MEDIA_TIME_FRAC_BITS));
}
inline double MediaTimeToSeconds(MediaTime aTime)
{
return aTime*(1.0/(1 << MEDIA_TIME_FRAC_BITS));
}
/**
* A number of ticks at a rate determined by some underlying track (e.g.
* audio sample rate). We want to make sure that multiplying TrackTicks by
* 2^MEDIA_TIME_FRAC_BITS doesn't overflow, so we set its max accordingly.
*/
typedef PRInt64 TrackTicks;
const PRInt64 TRACK_TICKS_MAX = PR_INT64_MAX >> MEDIA_TIME_FRAC_BITS;
/**
* A MediaSegment is a chunk of media data sequential in time. Different
* types of data have different subclasses of MediaSegment, all inheriting
* from MediaSegmentBase.
* All MediaSegment data is timed using TrackTicks. The actual tick rate
* is defined on a per-track basis. For some track types, this can be
* a fixed constant for all tracks of that type (e.g. 1MHz for video).
*
* Each media segment defines a concept of "null media data" (e.g. silence
* for audio or "no video frame" for video), which can be efficiently
* represented. This is used for padding.
*/
class MediaSegment {
public:
virtual ~MediaSegment()
{
MOZ_COUNT_DTOR(MediaSegment);
}
enum Type {
AUDIO,
VIDEO,
TYPE_COUNT
};
/**
* Gets the total duration of the segment.
*/
TrackTicks GetDuration() { return mDuration; }
Type GetType() { return mType; }
/**
* Create a MediaSegment of the same type.
*/
virtual MediaSegment* CreateEmptyClone() = 0;
/**
* Moves contents of aSource to the end of this segment.
*/
virtual void AppendFrom(MediaSegment* aSource) = 0;
/**
* Replace all contents up to aDuration with null data.
*/
virtual void ForgetUpTo(TrackTicks aDuration) = 0;
/**
* Insert aDuration of null data at the start of the segment.
*/
virtual void InsertNullDataAtStart(TrackTicks aDuration) = 0;
protected:
MediaSegment(Type aType) : mDuration(0), mType(aType)
{
MOZ_COUNT_CTOR(MediaSegment);
}
TrackTicks mDuration; // total of mDurations of all chunks
Type mType;
};
/**
* C is the implementation class subclassed from MediaSegmentBase.
* C must contain a Chunk class.
*/
template <class C, class Chunk> class MediaSegmentBase : public MediaSegment {
public:
virtual MediaSegment* CreateEmptyClone()
{
C* s = new C();
s->InitFrom(*static_cast<C*>(this));
return s;
}
/**
* Appends the contents of aSource to this segment, clearing aSource.
*/
virtual void AppendFrom(MediaSegmentBase<C, Chunk>* aSource)
{
mDuration += aSource->mDuration;
aSource->mDuration = 0;
if (!mChunks.IsEmpty() && !aSource->mChunks.IsEmpty() &&
mChunks[mChunks.Length() - 1].CanCombineWithFollowing(aSource->mChunks[0])) {
mChunks[mChunks.Length() - 1].mDuration += aSource->mChunks[0].mDuration;
aSource->mChunks.RemoveElementAt(0);
}
mChunks.MoveElementsFrom(aSource->mChunks);
}
void RemoveLeading(TrackTicks aDuration)
{
RemoveLeadingInternal(aDuration, 0);
}
virtual void AppendFrom(MediaSegment* aSource)
{
NS_ASSERTION(aSource->GetType() == C::StaticType(), "Wrong type");
AppendFrom(static_cast<C*>(aSource));
}
/**
* Replace the first aDuration ticks with null media data, because the data
* will not be required again.
*/
virtual void ForgetUpTo(TrackTicks aDuration)
{
if (mChunks.IsEmpty() || aDuration <= 0) {
return;
}
if (mChunks[0].IsNull()) {
TrackTicks extraToForget = NS_MIN(aDuration, mDuration) - mChunks[0].GetDuration();
if (extraToForget > 0) {
RemoveLeadingInternal(extraToForget, 1);
mChunks[0].mDuration += extraToForget;
mDuration += extraToForget;
}
return;
}
RemoveLeading(aDuration);
mChunks.InsertElementAt(0)->SetNull(aDuration);
mDuration += aDuration;
}
virtual void InsertNullDataAtStart(TrackTicks aDuration)
{
if (aDuration <= 0) {
return;
}
if (!mChunks.IsEmpty() && mChunks[0].IsNull()) {
mChunks[0].mDuration += aDuration;
} else {
mChunks.InsertElementAt(0)->SetNull(aDuration);
}
mDuration += aDuration;
}
protected:
MediaSegmentBase(Type aType) : MediaSegment(aType) {}
void BaseSliceFrom(const MediaSegmentBase<C, Chunk>& aOther,
TrackTicks aStart, TrackTicks aEnd)
{
NS_ASSERTION(aStart >= 0 && aEnd <= aOther.mDuration,
"Slice out of range");
TrackTicks offset = 0;
for (PRUint32 i = 0; i < aOther.mChunks.Length() && offset < aEnd; ++i) {
const Chunk& c = aOther.mChunks[i];
TrackTicks start = NS_MAX(aStart, offset);
TrackTicks nextOffset = offset + c.GetDuration();
TrackTicks end = NS_MIN(aEnd, nextOffset);
if (start < end) {
mChunks.AppendElement(c)->SliceTo(start - offset, end - offset);
}
offset = nextOffset;
}
}
Chunk* AppendChunk(TrackTicks aDuration)
{
Chunk* c = mChunks.AppendElement();
c->mDuration = aDuration;
mDuration += aDuration;
return c;
}
Chunk* FindChunkContaining(TrackTicks aOffset, TrackTicks* aStart = nsnull)
{
if (aOffset < 0) {
return nsnull;
}
TrackTicks offset = 0;
for (PRUint32 i = 0; i < mChunks.Length(); ++i) {
Chunk& c = mChunks[i];
TrackTicks nextOffset = offset + c.GetDuration();
if (aOffset < nextOffset) {
if (aStart) {
*aStart = offset;
}
return &c;
}
offset = nextOffset;
}
return nsnull;
}
Chunk* GetLastChunk()
{
if (mChunks.IsEmpty()) {
return nsnull;
}
return &mChunks[mChunks.Length() - 1];
}
class ChunkIterator {
public:
ChunkIterator(MediaSegmentBase<C, Chunk>& aSegment)
: mSegment(aSegment), mIndex(0) {}
bool IsEnded() { return mIndex >= mSegment.mChunks.Length(); }
void Next() { ++mIndex; }
Chunk& operator*() { return mSegment.mChunks[mIndex]; }
Chunk* operator->() { return &mSegment.mChunks[mIndex]; }
private:
MediaSegmentBase<C, Chunk>& mSegment;
PRUint32 mIndex;
};
protected:
void RemoveLeadingInternal(TrackTicks aDuration, PRUint32 aStartIndex)
{
NS_ASSERTION(aDuration >= 0, "Can't remove negative duration");
TrackTicks t = aDuration;
PRUint32 chunksToRemove = 0;
for (PRUint32 i = aStartIndex; i < mChunks.Length() && t > 0; ++i) {
Chunk* c = &mChunks[i];
if (c->GetDuration() > t) {
c->SliceTo(t, c->GetDuration());
t = 0;
break;
}
t -= c->GetDuration();
chunksToRemove = i + 1 - aStartIndex;
}
mChunks.RemoveElementsAt(aStartIndex, chunksToRemove);
mDuration -= aDuration - t;
}
nsTArray<Chunk> mChunks;
};
}
#endif /* MOZILLA_MEDIASEGMENT_H_ */

View File

@ -0,0 +1,45 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MOZILLA_SHAREDBUFFER_H_
#define MOZILLA_SHAREDBUFFER_H_
#include "mozilla/mozalloc.h"
namespace mozilla {
/**
* Heap-allocated chunk of arbitrary data with threadsafe refcounting.
* Typically you would allocate one of these, fill it in, and then treat it as
* immutable while it's shared.
* This only guarantees 4-byte alignment of the data. For alignment we
* simply assume that the refcount is at least 4-byte aligned and its size
* is divisible by 4.
*/
class SharedBuffer {
public:
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(SharedBuffer)
~SharedBuffer() {}
void* Data() { return this + 1; }
// Takes ownership of aData (which will be freed via moz_free()).
// aData consists of aChannels consecutive buffers, each of aLength samples.
static already_AddRefed<SharedBuffer> Create(size_t aSize)
{
void* m = moz_xmalloc(sizeof(SharedBuffer) + aSize);
nsRefPtr<SharedBuffer> p = new (m) SharedBuffer();
NS_ASSERTION((reinterpret_cast<char*>(p.get() + 1) - reinterpret_cast<char*>(p.get())) % 4 == 0,
"SharedBuffers should be at least 4-byte aligned");
return p.forget();
}
private:
SharedBuffer() {}
};
}
#endif /* MOZILLA_SHAREDBUFFER_H_ */

View File

@ -0,0 +1,60 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "StreamBuffer.h"
namespace mozilla {
StreamTime
StreamBuffer::GetEnd() const
{
StreamTime t = mTracksKnownTime;
for (PRUint32 i = 0; i < mTracks.Length(); ++i) {
Track* track = mTracks[i];
if (!track->IsEnded()) {
t = NS_MIN(t, track->GetEndTimeRoundDown());
}
}
return t;
}
StreamBuffer::Track*
StreamBuffer::FindTrack(TrackID aID)
{
if (aID == TRACK_NONE)
return nsnull;
for (PRUint32 i = 0; i < mTracks.Length(); ++i) {
Track* track = mTracks[i];
if (track->GetID() == aID) {
return track;
}
}
return nsnull;
}
void
StreamBuffer::ForgetUpTo(StreamTime aTime)
{
// Round to nearest 50ms so we don't spend too much time pruning segments.
const int roundTo = MillisecondsToMediaTime(50);
StreamTime forget = (aTime/roundTo)*roundTo;
if (forget <= mForgottenTime) {
return;
}
mForgottenTime = forget;
for (PRUint32 i = 0; i < mTracks.Length(); ++i) {
Track* track = mTracks[i];
if (track->IsEnded() && track->GetEndTimeRoundDown() <= forget) {
mTracks.RemoveElementAt(i);
--i;
continue;
}
TrackTicks forgetTo = NS_MIN(track->GetEnd() - 1, track->TimeToTicksRoundDown(forget));
track->ForgetUpTo(forgetTo);
}
}
}

View File

@ -0,0 +1,286 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MOZILLA_STREAMBUFFER_H_
#define MOZILLA_STREAMBUFFER_H_
#include "mozilla/Util.h"
#include "MediaSegment.h"
#include "nsAutoPtr.h"
namespace mozilla {
/**
* Media time relative to the start of a StreamBuffer.
*/
typedef MediaTime StreamTime;
const StreamTime STREAM_TIME_MAX = MEDIA_TIME_MAX;
/**
* Track rate in Hz. Maximum 1 << MEDIA_TIME_FRAC_BITS Hz. This ensures
* calculations below don't overflow.
*/
typedef PRInt32 TrackRate;
const TrackRate TRACK_RATE_MAX = 1 << MEDIA_TIME_FRAC_BITS;
/**
* Unique ID for track within a StreamBuffer. Tracks from different
* StreamBuffers may have the same ID; this matters when appending StreamBuffers,
* since tracks with the same ID are matched. Only IDs greater than 0 are allowed.
*/
typedef PRInt32 TrackID;
const TrackID TRACK_NONE = 0;
inline TrackTicks TimeToTicksRoundUp(TrackRate aRate, StreamTime aMicroseconds)
{
NS_ASSERTION(0 < aRate && aRate <= TRACK_RATE_MAX, "Bad rate");
NS_ASSERTION(0 <= aMicroseconds && aMicroseconds <= STREAM_TIME_MAX, "Bad microseconds");
return (aMicroseconds*aRate + (1 << MEDIA_TIME_FRAC_BITS) - 1) >> MEDIA_TIME_FRAC_BITS;
}
inline TrackTicks TimeToTicksRoundDown(TrackRate aRate, StreamTime aMicroseconds)
{
NS_ASSERTION(0 < aRate && aRate <= TRACK_RATE_MAX, "Bad rate");
NS_ASSERTION(0 <= aMicroseconds && aMicroseconds <= STREAM_TIME_MAX, "Bad microseconds");
return (aMicroseconds*aRate) >> MEDIA_TIME_FRAC_BITS;
}
inline StreamTime TicksToTimeRoundUp(TrackRate aRate, TrackTicks aTicks)
{
NS_ASSERTION(0 < aRate && aRate <= TRACK_RATE_MAX, "Bad rate");
NS_ASSERTION(0 <= aTicks && aTicks <= TRACK_TICKS_MAX, "Bad samples");
return ((aTicks << MEDIA_TIME_FRAC_BITS) + aRate - 1)/aRate;
}
inline StreamTime TicksToTimeRound(TrackRate aRate, TrackTicks aTicks)
{
NS_ASSERTION(0 < aRate && aRate <= TRACK_RATE_MAX, "Bad rate");
NS_ASSERTION(0 <= aTicks && aTicks <= TRACK_TICKS_MAX, "Bad samples");
return ((aTicks << MEDIA_TIME_FRAC_BITS) + aRate/2)/aRate;
}
inline StreamTime TicksToTimeRoundDown(TrackRate aRate, TrackTicks aTicks)
{
NS_ASSERTION(0 < aRate && aRate <= TRACK_RATE_MAX, "Bad rate");
NS_ASSERTION(0 <= aTicks && aTicks <= TRACK_TICKS_MAX, "Bad samples");
return (aTicks << MEDIA_TIME_FRAC_BITS)/aRate;
}
/**
* This object contains the decoded data for a stream's tracks.
* A StreamBuffer can be appended to. Logically a StreamBuffer only gets longer,
* but we also have the ability to "forget" data before a certain time that
* we know won't be used again. (We prune a whole number of seconds internally.)
*
* StreamBuffers should only be used from one thread at a time.
*
* A StreamBuffer has a set of tracks that can be of arbitrary types ---
* the data for each track is a MediaSegment. The set of tracks can vary
* over the timeline of the StreamBuffer.
*/
class StreamBuffer {
public:
/**
* Every track has a start time --- when it started in the StreamBuffer.
* It has an end flag; when false, no end point is known; when true,
* the track ends when the data we have for the track runs out.
* Tracks have a unique ID assigned at creation. This allows us to identify
* the same track across StreamBuffers. A StreamBuffer should never have
* two tracks with the same ID (even if they don't overlap in time).
* TODO Tracks can also be enabled and disabled over time.
* TODO Add TimeVarying<TrackTicks,bool> mEnabled.
*/
class Track {
public:
Track(TrackID aID, TrackRate aRate, TrackTicks aStart, MediaSegment* aSegment)
: mStart(aStart),
mSegment(aSegment),
mRate(aRate),
mID(aID),
mEnded(false)
{
MOZ_COUNT_CTOR(Track);
NS_ASSERTION(aID > TRACK_NONE, "Bad track ID");
NS_ASSERTION(0 < aRate && aRate <= TRACK_RATE_MAX, "Invalid rate");
NS_ASSERTION(0 <= aStart && aStart <= aSegment->GetDuration(), "Bad start position");
}
~Track()
{
MOZ_COUNT_DTOR(Track);
}
template <class T> T* Get() const
{
if (mSegment->GetType() == T::StaticType()) {
return static_cast<T*>(mSegment.get());
}
return nsnull;
}
MediaSegment* GetSegment() const { return mSegment; }
TrackRate GetRate() const { return mRate; }
TrackID GetID() const { return mID; }
bool IsEnded() const { return mEnded; }
TrackTicks GetStart() const { return mStart; }
TrackTicks GetEnd() const { return mSegment->GetDuration(); }
StreamTime GetEndTimeRoundDown() const
{
return mozilla::TicksToTimeRoundDown(mRate, mSegment->GetDuration());
}
StreamTime GetStartTimeRoundDown() const
{
return mozilla::TicksToTimeRoundDown(mRate, mStart);
}
TrackTicks TimeToTicksRoundDown(StreamTime aTime) const
{
return mozilla::TimeToTicksRoundDown(mRate, aTime);
}
StreamTime TicksToTimeRoundDown(TrackTicks aTicks) const
{
return mozilla::TicksToTimeRoundDown(mRate, aTicks);
}
MediaSegment::Type GetType() const { return mSegment->GetType(); }
void SetEnded() { mEnded = true; }
void AppendFrom(Track* aTrack)
{
NS_ASSERTION(!mEnded, "Can't append to ended track");
NS_ASSERTION(aTrack->mID == mID, "IDs must match");
NS_ASSERTION(aTrack->mStart == 0, "Source track must start at zero");
NS_ASSERTION(aTrack->mSegment->GetType() == GetType(), "Track types must match");
NS_ASSERTION(aTrack->mRate == mRate, "Track rates must match");
mSegment->AppendFrom(aTrack->mSegment);
mEnded = aTrack->mEnded;
}
MediaSegment* RemoveSegment()
{
return mSegment.forget();
}
void ForgetUpTo(TrackTicks aTime)
{
mSegment->ForgetUpTo(aTime);
}
protected:
friend class StreamBuffer;
// Start offset is in ticks at rate mRate
TrackTicks mStart;
// The segment data starts at the start of the owning StreamBuffer, i.e.,
// there's mStart silence/no video at the beginning.
nsAutoPtr<MediaSegment> mSegment;
TrackRate mRate; // rate in ticks per second
// Unique ID
TrackID mID;
// True when the track ends with the data in mSegment
bool mEnded;
};
class CompareTracksByID {
public:
bool Equals(Track* aA, Track* aB) const {
return aA->GetID() == aB->GetID();
}
bool LessThan(Track* aA, Track* aB) const {
return aA->GetID() < aB->GetID();
}
};
StreamBuffer()
: mTracksKnownTime(0), mForgottenTime(0)
{
MOZ_COUNT_CTOR(StreamBuffer);
}
~StreamBuffer()
{
MOZ_COUNT_DTOR(StreamBuffer);
}
/**
* Takes ownership of aSegment. Don't do this while iterating, or while
* holding a Track reference.
* aSegment must have aStart worth of null data.
*/
Track& AddTrack(TrackID aID, TrackRate aRate, TrackTicks aStart, MediaSegment* aSegment)
{
NS_ASSERTION(TimeToTicksRoundDown(aRate, mTracksKnownTime) <= aStart,
"Start time too early");
NS_ASSERTION(!FindTrack(aID), "Track with this ID already exists");
return **mTracks.InsertElementSorted(new Track(aID, aRate, aStart, aSegment),
CompareTracksByID());
}
void AdvanceKnownTracksTime(StreamTime aKnownTime)
{
NS_ASSERTION(aKnownTime >= mTracksKnownTime, "Can't move tracks-known time earlier");
mTracksKnownTime = aKnownTime;
}
/**
* The end time for the StreamBuffer is the latest time for which we have
* data for all tracks that haven't ended by that time.
*/
StreamTime GetEnd() const;
Track* FindTrack(TrackID aID);
class TrackIter {
public:
/**
* Iterate through the tracks of aBuffer in order of ID.
*/
TrackIter(const StreamBuffer& aBuffer) :
mBuffer(&aBuffer.mTracks), mIndex(0), mMatchType(false) {}
/**
* Iterate through the tracks of aBuffer with type aType, in order of ID.
*/
TrackIter(const StreamBuffer& aBuffer, MediaSegment::Type aType) :
mBuffer(&aBuffer.mTracks), mIndex(0), mType(aType), mMatchType(true) { FindMatch(); }
bool IsEnded() { return mIndex >= mBuffer->Length(); }
void Next()
{
++mIndex;
FindMatch();
}
Track& operator*() { return *mBuffer->ElementAt(mIndex); }
Track* operator->() { return mBuffer->ElementAt(mIndex); }
private:
void FindMatch()
{
if (!mMatchType)
return;
while (mIndex < mBuffer->Length() &&
mBuffer->ElementAt(mIndex)->GetType() != mType) {
++mIndex;
}
}
const nsTArray<nsAutoPtr<Track> >* mBuffer;
PRUint32 mIndex;
MediaSegment::Type mType;
bool mMatchType;
};
friend class TrackIter;
/**
* Forget stream data before aTime; they will no longer be needed.
* Also can forget entire tracks that have ended at or before aTime.
* Can't be used to forget beyond GetEnd().
*/
void ForgetUpTo(StreamTime aTime);
protected:
// Any new tracks added will start at or after this time. In other words, the track
// list is complete and correct for all times less than this time.
StreamTime mTracksKnownTime;
StreamTime mForgottenTime;
// All known tracks for this StreamBuffer
nsTArray<nsAutoPtr<Track> > mTracks;
};
}
#endif /* MOZILLA_STREAMBUFFER_H_ */

View File

@ -0,0 +1,117 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MOZILLA_VIDEOSEGMENT_H_
#define MOZILLA_VIDEOSEGMENT_H_
#include "MediaSegment.h"
#include "ImageLayers.h"
namespace mozilla {
class VideoFrame {
public:
typedef mozilla::layers::Image Image;
VideoFrame(already_AddRefed<Image> aImage, const gfxIntSize& aIntrinsicSize)
: mImage(aImage), mIntrinsicSize(aIntrinsicSize) {}
VideoFrame() : mIntrinsicSize(0, 0) {}
bool operator==(const VideoFrame& aFrame) const
{
return mImage == aFrame.mImage && mIntrinsicSize == aFrame.mIntrinsicSize;
}
bool operator!=(const VideoFrame& aFrame) const
{
return !operator==(aFrame);
}
Image* GetImage() const { return mImage; }
const gfxIntSize& GetIntrinsicSize() const { return mIntrinsicSize; }
void SetNull() { mImage = nsnull; mIntrinsicSize = gfxIntSize(0, 0); }
void TakeFrom(VideoFrame* aFrame)
{
mImage = aFrame->mImage.forget();
mIntrinsicSize = aFrame->mIntrinsicSize;
}
protected:
// mImage can be null to indicate "no video" (aka "empty frame"). It can
// still have an intrinsic size in this case.
nsRefPtr<Image> mImage;
// The desired size to render the video frame at.
gfxIntSize mIntrinsicSize;
};
struct VideoChunk {
void SliceTo(TrackTicks aStart, TrackTicks aEnd)
{
NS_ASSERTION(aStart >= 0 && aStart < aEnd && aEnd <= mDuration,
"Slice out of bounds");
mDuration = aEnd - aStart;
}
TrackTicks GetDuration() const { return mDuration; }
bool CanCombineWithFollowing(const VideoChunk& aOther) const
{
return aOther.mFrame == mFrame;
}
bool IsNull() const { return !mFrame.GetImage(); }
void SetNull(TrackTicks aDuration)
{
mDuration = aDuration;
mFrame.SetNull();
}
TrackTicks mDuration;
VideoFrame mFrame;
};
class VideoSegment : public MediaSegmentBase<VideoSegment, VideoChunk> {
public:
typedef mozilla::layers::Image Image;
VideoSegment() : MediaSegmentBase<VideoSegment, VideoChunk>(VIDEO) {}
void AppendFrame(already_AddRefed<Image> aImage, TrackTicks aDuration,
const gfxIntSize& aIntrinsicSize)
{
VideoChunk* chunk = AppendChunk(aDuration);
VideoFrame frame(aImage, aIntrinsicSize);
chunk->mFrame.TakeFrom(&frame);
}
const VideoFrame* GetFrameAt(TrackTicks aOffset, TrackTicks* aStart = nsnull)
{
VideoChunk* c = FindChunkContaining(aOffset, aStart);
if (!c) {
return nsnull;
}
return &c->mFrame;
}
const VideoFrame* GetLastFrame(TrackTicks* aStart = nsnull)
{
VideoChunk* c = GetLastChunk();
if (!c) {
return nsnull;
}
if (aStart) {
*aStart = mDuration - c->mDuration;
}
return &c->mFrame;
}
// Segment-generic methods not in MediaSegmentBase
void InitFrom(const VideoSegment& aOther)
{
}
void SliceFrom(const VideoSegment& aOther, TrackTicks aStart, TrackTicks aEnd) {
BaseSliceFrom(aOther, aStart, aEnd);
}
static Type StaticType() { return VIDEO; }
};
}
#endif /* MOZILLA_VIDEOSEGMENT_H_ */