mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 1064128 - Implement support for timestampOffset in segments mode. r=k17e,r=cajbir
This commit is contained in:
parent
d3b3b9d426
commit
4e76b3f561
@ -65,6 +65,9 @@ public:
|
||||
// Can be called on any thread.
|
||||
virtual void NotifyDecodedFrames(uint32_t aParsed, uint32_t aDecoded) = 0;
|
||||
|
||||
// For decoders with a notion of timestamp offset, returns the value in microseconds.
|
||||
virtual int64_t GetTimestampOffset() const { return 0; }
|
||||
|
||||
// Return the duration of the media in microseconds.
|
||||
virtual int64_t GetMediaDuration() = 0;
|
||||
|
||||
|
@ -157,7 +157,7 @@ MP4Reader::Init(MediaDecoderReader* aCloneDonor)
|
||||
{
|
||||
MOZ_ASSERT(NS_IsMainThread(), "Must be on main thread.");
|
||||
PlatformDecoderModule::Init();
|
||||
mDemuxer = new MP4Demuxer(new MP4Stream(mDecoder->GetResource(), &mDemuxerMonitor), &mDemuxerMonitor);
|
||||
mDemuxer = new MP4Demuxer(new MP4Stream(mDecoder->GetResource(), &mDemuxerMonitor), GetDecoder()->GetTimestampOffset(), &mDemuxerMonitor);
|
||||
|
||||
InitLayersBackendType();
|
||||
|
||||
|
@ -24,7 +24,7 @@ public:
|
||||
explicit MP4DemuxerBinding(const char* aFileName = "dash_dashinit.mp4")
|
||||
: resource(new MockMediaResource(aFileName))
|
||||
, mMonitor("TestMP4Demuxer monitor")
|
||||
, demuxer(new MP4Demuxer(new MP4Stream(resource, &mMonitor), &mMonitor))
|
||||
, demuxer(new MP4Demuxer(new MP4Stream(resource, &mMonitor), 0, &mMonitor))
|
||||
{
|
||||
EXPECT_EQ(NS_OK, resource->Open(nullptr));
|
||||
}
|
||||
|
@ -249,7 +249,11 @@ public:
|
||||
bool initSegment = IsInitSegmentPresent(aData, aLength);
|
||||
if (initSegment) {
|
||||
mStream = new mp4_demuxer::BufferStream();
|
||||
mParser = new mp4_demuxer::MoofParser(mStream, 0, &mMonitor);
|
||||
// We use a timestampOffset of 0 for ContainerParser, and require
|
||||
// consumers of ParseStartAndEndTimestamps to add their timestamp offset
|
||||
// manually. This allows the ContainerParser to be shared across different
|
||||
// timestampOffsets.
|
||||
mParser = new mp4_demuxer::MoofParser(mStream, 0, 0, &mMonitor);
|
||||
} else if (!mStream || !mParser) {
|
||||
return false;
|
||||
}
|
||||
|
@ -129,10 +129,10 @@ MediaSourceDecoder::DetachMediaSource()
|
||||
}
|
||||
|
||||
already_AddRefed<SourceBufferDecoder>
|
||||
MediaSourceDecoder::CreateSubDecoder(const nsACString& aType)
|
||||
MediaSourceDecoder::CreateSubDecoder(const nsACString& aType, int64_t aTimestampOffset)
|
||||
{
|
||||
MOZ_ASSERT(mReader);
|
||||
return mReader->CreateSubDecoder(aType);
|
||||
return mReader->CreateSubDecoder(aType, aTimestampOffset);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -46,7 +46,8 @@ public:
|
||||
void AttachMediaSource(dom::MediaSource* aMediaSource);
|
||||
void DetachMediaSource();
|
||||
|
||||
already_AddRefed<SourceBufferDecoder> CreateSubDecoder(const nsACString& aType);
|
||||
already_AddRefed<SourceBufferDecoder> CreateSubDecoder(const nsACString& aType,
|
||||
int64_t aTimestampOffset /* microseconds */);
|
||||
void AddTrackBuffer(TrackBuffer* aTrackBuffer);
|
||||
void RemoveTrackBuffer(TrackBuffer* aTrackBuffer);
|
||||
void OnTrackBufferConfigured(TrackBuffer* aTrackBuffer, const MediaInfo& aInfo);
|
||||
|
@ -457,14 +457,14 @@ CreateReaderForType(const nsACString& aType, AbstractMediaDecoder* aDecoder)
|
||||
}
|
||||
|
||||
already_AddRefed<SourceBufferDecoder>
|
||||
MediaSourceReader::CreateSubDecoder(const nsACString& aType)
|
||||
MediaSourceReader::CreateSubDecoder(const nsACString& aType, int64_t aTimestampOffset)
|
||||
{
|
||||
if (IsShutdown()) {
|
||||
return nullptr;
|
||||
}
|
||||
MOZ_ASSERT(GetTaskQueue());
|
||||
nsRefPtr<SourceBufferDecoder> decoder =
|
||||
new SourceBufferDecoder(new SourceBufferResource(aType), mDecoder);
|
||||
new SourceBufferDecoder(new SourceBufferResource(aType), mDecoder, aTimestampOffset);
|
||||
nsRefPtr<MediaDecoderReader> reader(CreateReaderForType(aType, decoder));
|
||||
if (!reader) {
|
||||
return nullptr;
|
||||
|
@ -99,7 +99,8 @@ public:
|
||||
// Acquires the decoder monitor, and is thus callable on any thread.
|
||||
nsresult GetBuffered(dom::TimeRanges* aBuffered) MOZ_OVERRIDE;
|
||||
|
||||
already_AddRefed<SourceBufferDecoder> CreateSubDecoder(const nsACString& aType);
|
||||
already_AddRefed<SourceBufferDecoder> CreateSubDecoder(const nsACString& aType,
|
||||
int64_t aTimestampOffset /* microseconds */);
|
||||
|
||||
void AddTrackBuffer(TrackBuffer* aTrackBuffer);
|
||||
void RemoveTrackBuffer(TrackBuffer* aTrackBuffer);
|
||||
|
@ -335,7 +335,9 @@ SourceBuffer::AppendData(const uint8_t* aData, uint32_t aLength, ErrorResult& aR
|
||||
}
|
||||
StartUpdating();
|
||||
|
||||
if (!mTrackBuffer->AppendData(aData, aLength)) {
|
||||
MOZ_ASSERT(mAppendMode == SourceBufferAppendMode::Segments,
|
||||
"We don't handle timestampOffset for sequence mode yet");
|
||||
if (!mTrackBuffer->AppendData(aData, aLength, mTimestampOffset * USECS_PER_S)) {
|
||||
Optional<MediaSourceEndOfStreamError> decodeError(MediaSourceEndOfStreamError::Decode);
|
||||
ErrorResult dummy;
|
||||
mMediaSource->EndOfStream(decodeError, dummy);
|
||||
|
@ -33,10 +33,12 @@ class ImageContainer;
|
||||
NS_IMPL_ISUPPORTS0(SourceBufferDecoder)
|
||||
|
||||
SourceBufferDecoder::SourceBufferDecoder(MediaResource* aResource,
|
||||
AbstractMediaDecoder* aParentDecoder)
|
||||
AbstractMediaDecoder* aParentDecoder,
|
||||
int64_t aTimestampOffset)
|
||||
: mResource(aResource)
|
||||
, mParentDecoder(aParentDecoder)
|
||||
, mReader(nullptr)
|
||||
, mTimestampOffset(aTimestampOffset)
|
||||
, mMediaDuration(-1)
|
||||
{
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
|
@ -32,7 +32,8 @@ class SourceBufferDecoder MOZ_FINAL : public AbstractMediaDecoder
|
||||
public:
|
||||
// This class holds a weak pointer to MediaResource. It's the responsibility
|
||||
// of the caller to manage the memory of the MediaResource object.
|
||||
SourceBufferDecoder(MediaResource* aResource, AbstractMediaDecoder* aParentDecoder);
|
||||
SourceBufferDecoder(MediaResource* aResource, AbstractMediaDecoder* aParentDecoder,
|
||||
int64_t aTimestampOffset /* microseconds */);
|
||||
|
||||
NS_DECL_THREADSAFE_ISUPPORTS
|
||||
|
||||
@ -41,6 +42,7 @@ public:
|
||||
virtual bool IsTransportSeekable() MOZ_FINAL MOZ_OVERRIDE;
|
||||
virtual bool OnDecodeThread() const MOZ_FINAL MOZ_OVERRIDE;
|
||||
virtual bool OnStateMachineThread() const MOZ_FINAL MOZ_OVERRIDE;
|
||||
virtual int64_t GetTimestampOffset() const MOZ_FINAL MOZ_OVERRIDE { return mTimestampOffset; }
|
||||
virtual int64_t GetMediaDuration() MOZ_FINAL MOZ_OVERRIDE;
|
||||
virtual layers::ImageContainer* GetImageContainer() MOZ_FINAL MOZ_OVERRIDE;
|
||||
virtual MediaDecoderOwner* GetOwner() MOZ_FINAL MOZ_OVERRIDE;
|
||||
@ -129,6 +131,7 @@ private:
|
||||
|
||||
AbstractMediaDecoder* mParentDecoder;
|
||||
nsRefPtr<MediaDecoderReader> mReader;
|
||||
int64_t mTimestampOffset;
|
||||
int64_t mMediaDuration;
|
||||
|
||||
#ifdef MOZ_EME
|
||||
|
@ -39,6 +39,7 @@ TrackBuffer::TrackBuffer(MediaSourceDecoder* aParentDecoder, const nsACString& a
|
||||
: mParentDecoder(aParentDecoder)
|
||||
, mType(aType)
|
||||
, mLastStartTimestamp(0)
|
||||
, mLastTimestampOffset(0)
|
||||
, mShutdown(false)
|
||||
{
|
||||
MOZ_COUNT_CTOR(TrackBuffer);
|
||||
@ -85,9 +86,9 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
bool NewDecoder()
|
||||
bool NewDecoder(int64_t aTimestampOffset)
|
||||
{
|
||||
nsRefPtr<SourceBufferDecoder> decoder = mOwner->NewDecoder();
|
||||
nsRefPtr<SourceBufferDecoder> decoder = mOwner->NewDecoder(aTimestampOffset);
|
||||
if (!decoder) {
|
||||
return false;
|
||||
}
|
||||
@ -138,14 +139,14 @@ TrackBuffer::ContinueShutdown()
|
||||
}
|
||||
|
||||
bool
|
||||
TrackBuffer::AppendData(const uint8_t* aData, uint32_t aLength)
|
||||
TrackBuffer::AppendData(const uint8_t* aData, uint32_t aLength, int64_t aTimestampOffset)
|
||||
{
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
DecodersToInitialize decoders(this);
|
||||
// TODO: Run more of the buffer append algorithm asynchronously.
|
||||
if (mParser->IsInitSegmentPresent(aData, aLength)) {
|
||||
MSE_DEBUG("TrackBuffer(%p)::AppendData: New initialization segment.", this);
|
||||
if (!decoders.NewDecoder()) {
|
||||
if (!decoders.NewDecoder(aTimestampOffset)) {
|
||||
return false;
|
||||
}
|
||||
} else if (!mParser->HasInitData()) {
|
||||
@ -155,16 +156,19 @@ TrackBuffer::AppendData(const uint8_t* aData, uint32_t aLength)
|
||||
|
||||
int64_t start, end;
|
||||
if (mParser->ParseStartAndEndTimestamps(aData, aLength, start, end)) {
|
||||
start += aTimestampOffset;
|
||||
end += aTimestampOffset;
|
||||
if (mParser->IsMediaSegmentPresent(aData, aLength) &&
|
||||
mLastEndTimestamp &&
|
||||
(!mParser->TimestampsFuzzyEqual(start, mLastEndTimestamp.value()) ||
|
||||
mLastTimestampOffset != aTimestampOffset ||
|
||||
mDecoderPerSegment)) {
|
||||
MSE_DEBUG("TrackBuffer(%p)::AppendData: Data last=[%lld, %lld] overlaps [%lld, %lld]",
|
||||
this, mLastStartTimestamp, mLastEndTimestamp.value(), start, end);
|
||||
|
||||
// This data is earlier in the timeline than data we have already
|
||||
// processed, so we must create a new decoder to handle the decoding.
|
||||
if (!decoders.NewDecoder()) {
|
||||
if (!decoders.NewDecoder(aTimestampOffset)) {
|
||||
return false;
|
||||
}
|
||||
MSE_DEBUG("TrackBuffer(%p)::AppendData: Decoder marked as initialized.", this);
|
||||
@ -331,14 +335,14 @@ TrackBuffer::Buffered(dom::TimeRanges* aRanges)
|
||||
}
|
||||
|
||||
already_AddRefed<SourceBufferDecoder>
|
||||
TrackBuffer::NewDecoder()
|
||||
TrackBuffer::NewDecoder(int64_t aTimestampOffset)
|
||||
{
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
MOZ_ASSERT(mParentDecoder);
|
||||
|
||||
DiscardDecoder();
|
||||
|
||||
nsRefPtr<SourceBufferDecoder> decoder = mParentDecoder->CreateSubDecoder(mType);
|
||||
nsRefPtr<SourceBufferDecoder> decoder = mParentDecoder->CreateSubDecoder(mType, aTimestampOffset);
|
||||
if (!decoder) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -348,6 +352,7 @@ TrackBuffer::NewDecoder()
|
||||
|
||||
mLastStartTimestamp = 0;
|
||||
mLastEndTimestamp.reset();
|
||||
mLastTimestampOffset = aTimestampOffset;
|
||||
|
||||
decoder->SetTaskQueue(mTaskQueue);
|
||||
return decoder.forget();
|
||||
|
@ -39,7 +39,7 @@ public:
|
||||
// Append data to the current decoder. Also responsible for calling
|
||||
// NotifyDataArrived on the decoder to keep buffered range computation up
|
||||
// to date. Returns false if the append failed.
|
||||
bool AppendData(const uint8_t* aData, uint32_t aLength);
|
||||
bool AppendData(const uint8_t* aData, uint32_t aLength, int64_t aTimestampOffset /* microseconds */);
|
||||
bool EvictData(uint32_t aThreshold);
|
||||
void EvictBefore(double aTime);
|
||||
|
||||
@ -92,7 +92,7 @@ private:
|
||||
// for initialization.
|
||||
// The decoder is not considered initialized until it is added to
|
||||
// mInitializedDecoders.
|
||||
already_AddRefed<SourceBufferDecoder> NewDecoder();
|
||||
already_AddRefed<SourceBufferDecoder> NewDecoder(int64_t aTimestampOffset /* microseconds */);
|
||||
|
||||
// Helper for AppendData, ensures NotifyDataArrived is called whenever
|
||||
// data is appended to the current decoder's SourceBufferResource.
|
||||
@ -156,6 +156,9 @@ private:
|
||||
int64_t mLastStartTimestamp;
|
||||
Maybe<int64_t> mLastEndTimestamp;
|
||||
|
||||
// The timestamp offset used by our current decoder, in microseconds.
|
||||
int64_t mLastTimestampOffset;
|
||||
|
||||
// Set when the first decoder used by this TrackBuffer is initialized.
|
||||
// Protected by mParentDecoder's monitor.
|
||||
MediaInfo mInfo;
|
||||
|
@ -233,11 +233,13 @@ MP4Sample::~MP4Sample()
|
||||
}
|
||||
|
||||
void
|
||||
MP4Sample::Update(int64_t& aMediaTime)
|
||||
MP4Sample::Update(int64_t& aMediaTime, int64_t& aTimestampOffset)
|
||||
{
|
||||
sp<MetaData> m = mMediaBuffer->meta_data();
|
||||
decode_timestamp = FindInt64(m, kKeyDecodingTime);
|
||||
composition_timestamp = FindInt64(m, kKeyTime) - aMediaTime;
|
||||
// XXXbholley - Why don't we adjust decode_timestamp for aMediaTime?
|
||||
// According to k17e, this code path is no longer used - we should probably remove it.
|
||||
decode_timestamp = FindInt64(m, kKeyDecodingTime) + aTimestampOffset;
|
||||
composition_timestamp = FindInt64(m, kKeyTime) - aMediaTime + aTimestampOffset;
|
||||
duration = FindInt64(m, kKeyDuration);
|
||||
byte_offset = FindInt64(m, kKey64BitFileOffset);
|
||||
is_sync_point = FindInt32(m, kKeyIsSyncFrame);
|
||||
|
@ -187,12 +187,13 @@ void SampleIterator::Seek(Microseconds aTime)
|
||||
}
|
||||
|
||||
Index::Index(const stagefright::Vector<MediaSource::Indice>& aIndex,
|
||||
Stream* aSource, uint32_t aTrackId, Monitor* aMonitor)
|
||||
Stream* aSource, uint32_t aTrackId, Microseconds aTimestampOffset,
|
||||
Monitor* aMonitor)
|
||||
: mSource(aSource)
|
||||
, mMonitor(aMonitor)
|
||||
{
|
||||
if (aIndex.isEmpty()) {
|
||||
mMoofParser = new MoofParser(aSource, aTrackId, aMonitor);
|
||||
mMoofParser = new MoofParser(aSource, aTrackId, aTimestampOffset, aMonitor);
|
||||
} else {
|
||||
for (size_t i = 0; i < aIndex.size(); i++) {
|
||||
const MediaSource::Indice& indice = aIndex[i];
|
||||
|
@ -28,7 +28,7 @@ MoofParser::RebuildFragmentedIndex(BoxContext& aContext)
|
||||
mInitRange = MediaByteRange(0, box.Range().mEnd);
|
||||
ParseMoov(box);
|
||||
} else if (box.IsType("moof")) {
|
||||
Moof moof(box, mTrex, mMdhd, mEdts);
|
||||
Moof moof(box, mTrex, mMdhd, mEdts, mTimestampOffset);
|
||||
|
||||
if (!mMoofs.IsEmpty()) {
|
||||
// Stitch time ranges together in the case of a (hopefully small) time
|
||||
@ -164,8 +164,8 @@ MoofParser::ParseMvex(Box& aBox)
|
||||
}
|
||||
}
|
||||
|
||||
Moof::Moof(Box& aBox, Trex& aTrex, Mdhd& aMdhd, Edts& aEdts) :
|
||||
mRange(aBox.Range()), mMaxRoundingError(0)
|
||||
Moof::Moof(Box& aBox, Trex& aTrex, Mdhd& aMdhd, Edts& aEdts, Microseconds aTimestampOffset) :
|
||||
mRange(aBox.Range()), mTimestampOffset(aTimestampOffset), mMaxRoundingError(0)
|
||||
{
|
||||
for (Box box = aBox.FirstChild(); box.IsAvailable(); box = box.Next()) {
|
||||
if (box.IsType("traf")) {
|
||||
@ -327,10 +327,10 @@ Moof::ParseTrun(Box& aBox, Tfhd& aTfhd, Tfdt& aTfdt, Mdhd& aMdhd, Edts& aEdts)
|
||||
sample.mByteRange = MediaByteRange(offset, offset + sampleSize);
|
||||
offset += sampleSize;
|
||||
|
||||
sample.mDecodeTime = aMdhd.ToMicroseconds(decodeTime);
|
||||
sample.mDecodeTime = aMdhd.ToMicroseconds(decodeTime) + mTimestampOffset;
|
||||
sample.mCompositionRange = Interval<Microseconds>(
|
||||
aMdhd.ToMicroseconds((int64_t)decodeTime + ctsOffset - aEdts.mMediaStart),
|
||||
aMdhd.ToMicroseconds((int64_t)decodeTime + ctsOffset + sampleDuration - aEdts.mMediaStart));
|
||||
aMdhd.ToMicroseconds((int64_t)decodeTime + ctsOffset - aEdts.mMediaStart) + mTimestampOffset,
|
||||
aMdhd.ToMicroseconds((int64_t)decodeTime + ctsOffset + sampleDuration - aEdts.mMediaStart) + mTimestampOffset);
|
||||
decodeTime += sampleDuration;
|
||||
|
||||
sample.mSync = !(sampleFlags & 0x1010000);
|
||||
|
@ -157,7 +157,7 @@ public:
|
||||
MP4Sample();
|
||||
MP4Sample(const MP4Sample& copy);
|
||||
virtual ~MP4Sample();
|
||||
void Update(int64_t& aMediaTime);
|
||||
void Update(int64_t& aMediaTime, int64_t& aTimestampOffset);
|
||||
void Pad(size_t aPaddingBytes);
|
||||
|
||||
stagefright::MediaBuffer* mMediaBuffer;
|
||||
|
@ -37,7 +37,8 @@ public:
|
||||
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Index)
|
||||
|
||||
Index(const stagefright::Vector<stagefright::MediaSource::Indice>& aIndex,
|
||||
Stream* aSource, uint32_t aTrackId, Monitor* aMonitor);
|
||||
Stream* aSource, uint32_t aTrackId, Microseconds aTimestampOffset,
|
||||
Monitor* aMonitor);
|
||||
|
||||
void UpdateMoofIndex(const nsTArray<mozilla::MediaByteRange>& aByteRanges);
|
||||
Microseconds GetEndCompositionIfBuffered(
|
||||
|
@ -151,7 +151,7 @@ private:
|
||||
class Moof
|
||||
{
|
||||
public:
|
||||
Moof(Box& aBox, Trex& aTrex, Mdhd& aMdhd, Edts& aEdts);
|
||||
Moof(Box& aBox, Trex& aTrex, Mdhd& aMdhd, Edts& aEdts, Microseconds aTimestampOffset);
|
||||
bool GetAuxInfo(AtomType aType, nsTArray<MediaByteRange>* aByteRanges);
|
||||
void FixRounding(const Moof& aMoof);
|
||||
|
||||
@ -169,14 +169,17 @@ private:
|
||||
void ParseSaiz(Box& aBox);
|
||||
void ParseSaio(Box& aBox);
|
||||
bool ProcessCenc();
|
||||
Microseconds mTimestampOffset;
|
||||
uint64_t mMaxRoundingError;
|
||||
};
|
||||
|
||||
class MoofParser
|
||||
{
|
||||
public:
|
||||
MoofParser(Stream* aSource, uint32_t aTrackId, Monitor* aMonitor)
|
||||
: mSource(aSource), mOffset(0), mTrex(aTrackId), mMonitor(aMonitor)
|
||||
MoofParser(Stream* aSource, uint32_t aTrackId,
|
||||
Microseconds aTimestampOffset, Monitor* aMonitor)
|
||||
: mSource(aSource), mOffset(0), mTimestampOffset(aTimestampOffset),
|
||||
mTrex(aTrackId), mMonitor(aMonitor)
|
||||
{
|
||||
// Setting the mTrex.mTrackId to 0 is a nasty work around for calculating
|
||||
// the composition range for MSE. We need an array of tracks.
|
||||
@ -197,6 +200,7 @@ public:
|
||||
mozilla::MediaByteRange mInitRange;
|
||||
nsRefPtr<Stream> mSource;
|
||||
uint64_t mOffset;
|
||||
Microseconds mTimestampOffset;
|
||||
nsTArray<uint64_t> mMoofOffsets;
|
||||
Mdhd mMdhd;
|
||||
Trex mTrex;
|
||||
|
@ -43,7 +43,7 @@ enum TrackType { kVideo = 1, kAudio };
|
||||
class MP4Demuxer
|
||||
{
|
||||
public:
|
||||
explicit MP4Demuxer(Stream* aSource, Monitor* aMonitor);
|
||||
explicit MP4Demuxer(Stream* aSource, Microseconds aTimestampOffset, Monitor* aMonitor);
|
||||
~MP4Demuxer();
|
||||
|
||||
bool Init();
|
||||
@ -82,6 +82,7 @@ private:
|
||||
nsRefPtr<Stream> mSource;
|
||||
nsTArray<mozilla::MediaByteRange> mCachedByteRanges;
|
||||
nsTArray<Interval<Microseconds>> mCachedTimeRanges;
|
||||
Microseconds mTimestampOffset;
|
||||
Monitor* mMonitor;
|
||||
};
|
||||
|
||||
|
@ -73,8 +73,9 @@ private:
|
||||
nsRefPtr<Stream> mSource;
|
||||
};
|
||||
|
||||
MP4Demuxer::MP4Demuxer(Stream* source, Monitor* aMonitor)
|
||||
: mPrivate(new StageFrightPrivate()), mSource(source), mMonitor(aMonitor)
|
||||
MP4Demuxer::MP4Demuxer(Stream* source, Microseconds aTimestampOffset, Monitor* aMonitor)
|
||||
: mPrivate(new StageFrightPrivate()), mSource(source),
|
||||
mTimestampOffset(aTimestampOffset), mMonitor(aMonitor)
|
||||
{
|
||||
mPrivate->mExtractor = new MPEG4Extractor(new DataSourceAdapter(source));
|
||||
}
|
||||
@ -110,7 +111,8 @@ MP4Demuxer::Init()
|
||||
mPrivate->mAudio = track;
|
||||
mAudioConfig.Update(metaData, mimeType);
|
||||
nsRefPtr<Index> index = new Index(mPrivate->mAudio->exportIndex(),
|
||||
mSource, mAudioConfig.mTrackId, mMonitor);
|
||||
mSource, mAudioConfig.mTrackId,
|
||||
mTimestampOffset, mMonitor);
|
||||
mPrivate->mIndexes.AppendElement(index);
|
||||
if (index->IsFragmented() && !mAudioConfig.crypto.valid) {
|
||||
mPrivate->mAudioIterator = new SampleIterator(index);
|
||||
@ -123,7 +125,8 @@ MP4Demuxer::Init()
|
||||
mPrivate->mVideo = track;
|
||||
mVideoConfig.Update(metaData, mimeType);
|
||||
nsRefPtr<Index> index = new Index(mPrivate->mVideo->exportIndex(),
|
||||
mSource, mVideoConfig.mTrackId, mMonitor);
|
||||
mSource, mVideoConfig.mTrackId,
|
||||
mTimestampOffset, mMonitor);
|
||||
mPrivate->mIndexes.AppendElement(index);
|
||||
if (index->IsFragmented() && !mVideoConfig.crypto.valid) {
|
||||
mPrivate->mVideoIterator = new SampleIterator(index);
|
||||
@ -213,7 +216,7 @@ MP4Demuxer::DemuxAudioSample()
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
sample->Update(mAudioConfig.media_time);
|
||||
sample->Update(mAudioConfig.media_time, mTimestampOffset);
|
||||
|
||||
return sample.forget();
|
||||
}
|
||||
@ -243,7 +246,7 @@ MP4Demuxer::DemuxVideoSample()
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
sample->Update(mVideoConfig.media_time);
|
||||
sample->Update(mVideoConfig.media_time, mTimestampOffset);
|
||||
sample->extra_data = mVideoConfig.extra_data;
|
||||
|
||||
return sample.forget();
|
||||
|
Loading…
Reference in New Issue
Block a user