Bug 908503: Remove Chrome demuxer; r=cpearce

This commit is contained in:
Anthony Jones 2014-05-12 09:46:44 +12:00
parent 1b4ffc4277
commit a3ab153578
33 changed files with 5 additions and 5391 deletions

View File

@ -1,27 +0,0 @@
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,21 +0,0 @@
#pragma once
#include <stdint.h>
namespace mp4_demuxer {
class Stream {
public:
// Returns true on success, false on failure.
// Writes number of bytes read into out_bytes_read, or 0 on EOS.
// Returns true on EOS.
virtual bool ReadAt(int64_t offset,
uint8_t* buffer,
uint32_t count,
uint32_t* out_bytes_read) = 0;
virtual int64_t Length() const = 0;
};
} // namespace mp4_demuxer

View File

@ -1,273 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "mp4_demuxer/aac.h"
#include <algorithm>
#include "mp4_demuxer/bit_reader.h"
namespace mp4_demuxer {
// The following conversion table is extracted from ISO 14496 Part 3 -
// Table 1.16 - Sampling Frequency Index.
static const int kFrequencyMap[] = {
96000, 88200, 64000, 48000, 44100, 32000, 24000,
22050, 16000, 12000, 11025, 8000, 7350
};
static ChannelLayout ConvertChannelConfigToLayout(uint8_t channel_config) {
switch (channel_config) {
case 1:
return CHANNEL_LAYOUT_MONO;
case 2:
return CHANNEL_LAYOUT_STEREO;
case 3:
return CHANNEL_LAYOUT_SURROUND;
case 4:
return CHANNEL_LAYOUT_4_0;
case 5:
return CHANNEL_LAYOUT_5_0;
case 6:
return CHANNEL_LAYOUT_5_1;
case 8:
return CHANNEL_LAYOUT_7_1;
default:
break;
}
return CHANNEL_LAYOUT_UNSUPPORTED;
}
AAC::AAC()
: profile_(0), frequency_index_(0), channel_config_(0), frequency_(0),
extension_frequency_(0), channel_layout_(CHANNEL_LAYOUT_UNSUPPORTED) {
}
AAC::~AAC() {
}
bool AAC::Parse(const std::vector<uint8_t>& data) {
if (data.empty())
return false;
BitReader reader(&data[0], data.size());
uint8_t extension_type = 0;
bool ps_present = false;
uint8_t extension_frequency_index = 0xff;
frequency_ = 0;
extension_frequency_ = 0;
// The following code is written according to ISO 14496 Part 3 Table 1.13 -
// Syntax of AudioSpecificConfig.
// Read base configuration
RCHECK(reader.ReadBits(5, &profile_));
RCHECK(reader.ReadBits(4, &frequency_index_));
if (frequency_index_ == 0xf)
RCHECK(reader.ReadBits(24, &frequency_));
RCHECK(reader.ReadBits(4, &channel_config_));
// Read extension configuration.
if (profile_ == 5 || profile_ == 29) {
ps_present = (profile_ == 29);
extension_type = 5;
RCHECK(reader.ReadBits(4, &extension_frequency_index));
if (extension_frequency_index == 0xf)
RCHECK(reader.ReadBits(24, &extension_frequency_));
RCHECK(reader.ReadBits(5, &profile_));
}
RCHECK(SkipDecoderGASpecificConfig(&reader));
RCHECK(SkipErrorSpecificConfig());
// Read extension configuration again
// Note: The check for 16 available bits comes from the AAC spec.
if (extension_type != 5 && reader.bits_available() >= 16) {
uint16_t sync_extension_type;
uint8_t sbr_present_flag;
uint8_t ps_present_flag;
if (reader.ReadBits(11, &sync_extension_type) &&
sync_extension_type == 0x2b7) {
if (reader.ReadBits(5, &extension_type) && extension_type == 5) {
RCHECK(reader.ReadBits(1, &sbr_present_flag));
if (sbr_present_flag) {
RCHECK(reader.ReadBits(4, &extension_frequency_index));
if (extension_frequency_index == 0xf)
RCHECK(reader.ReadBits(24, &extension_frequency_));
// Note: The check for 12 available bits comes from the AAC spec.
if (reader.bits_available() >= 12) {
RCHECK(reader.ReadBits(11, &sync_extension_type));
if (sync_extension_type == 0x548) {
RCHECK(reader.ReadBits(1, &ps_present_flag));
ps_present = ps_present_flag != 0;
}
}
}
}
}
}
if (frequency_ == 0) {
RCHECK(frequency_index_ < arraysize(kFrequencyMap));
frequency_ = kFrequencyMap[frequency_index_];
}
if (extension_frequency_ == 0 && extension_frequency_index != 0xff) {
RCHECK(extension_frequency_index < arraysize(kFrequencyMap));
extension_frequency_ = kFrequencyMap[extension_frequency_index];
}
// When Parametric Stereo is on, mono will be played as stereo.
if (ps_present && channel_config_ == 1)
channel_layout_ = CHANNEL_LAYOUT_STEREO;
else
channel_layout_ = ConvertChannelConfigToLayout(channel_config_);
audio_specific_config_.insert(audio_specific_config_.begin(), data.begin(), data.end());
return frequency_ != 0 && channel_layout_ != CHANNEL_LAYOUT_UNSUPPORTED &&
profile_ >= 1 && profile_ <= 4 && frequency_index_ != 0xf &&
channel_config_ <= 7;
}
const std::vector<uint8_t>& AAC::AudioSpecificConfig() const
{
return audio_specific_config_;
}
int AAC::GetOutputSamplesPerSecond(bool sbr_in_mimetype) const {
if (extension_frequency_ > 0)
return extension_frequency_;
if (!sbr_in_mimetype)
return frequency_;
// The following code is written according to ISO 14496 Part 3 Table 1.11 and
// Table 1.22. (Table 1.11 refers to the capping to 48000, Table 1.22 refers
// to SBR doubling the AAC sample rate.)
// TODO(acolwell) : Extend sample rate cap to 96kHz for Level 5 content.
DCHECK_GT(frequency_, 0);
return std::min(2 * frequency_, 48000);
}
ChannelLayout AAC::GetChannelLayout(bool sbr_in_mimetype) const {
// Check for implicit signalling of HE-AAC and indicate stereo output
// if the mono channel configuration is signalled.
// See ISO-14496-3 Section 1.6.6.1.2 for details about this special casing.
if (sbr_in_mimetype && channel_config_ == 1)
return CHANNEL_LAYOUT_STEREO;
return channel_layout_;
}
bool AAC::ConvertEsdsToADTS(std::vector<uint8_t>* buffer) const {
size_t size = buffer->size() + kADTSHeaderSize;
DCHECK(profile_ >= 1 && profile_ <= 4 && frequency_index_ != 0xf &&
channel_config_ <= 7);
// ADTS header uses 13 bits for packet size.
if (size >= (1 << 13))
return false;
std::vector<uint8_t>& adts = *buffer;
adts.insert(buffer->begin(), kADTSHeaderSize, 0);
adts[0] = 0xff;
adts[1] = 0xf1;
adts[2] = ((profile_ - 1) << 6) + (frequency_index_ << 2) +
(channel_config_ >> 2);
adts[3] = ((channel_config_ & 0x3) << 6) + (size >> 11);
adts[4] = (size & 0x7ff) >> 3;
adts[5] = ((size & 7) << 5) + 0x1f;
adts[6] = 0xfc;
return true;
}
// Currently this function only support GASpecificConfig defined in
// ISO 14496 Part 3 Table 4.1 - Syntax of GASpecificConfig()
bool AAC::SkipDecoderGASpecificConfig(BitReader* bit_reader) const {
switch (profile_) {
case 1:
case 2:
case 3:
case 4:
case 6:
case 7:
case 17:
case 19:
case 20:
case 21:
case 22:
case 23:
return SkipGASpecificConfig(bit_reader);
default:
break;
}
return false;
}
bool AAC::SkipErrorSpecificConfig() const {
switch (profile_) {
case 17:
case 19:
case 20:
case 21:
case 22:
case 23:
case 24:
case 25:
case 26:
case 27:
return false;
default:
break;
}
return true;
}
// The following code is written according to ISO 14496 part 3 Table 4.1 -
// GASpecificConfig.
bool AAC::SkipGASpecificConfig(BitReader* bit_reader) const {
uint8_t extension_flag = 0;
uint8_t depends_on_core_coder;
uint16_t dummy;
RCHECK(bit_reader->ReadBits(1, &dummy)); // frameLengthFlag
RCHECK(bit_reader->ReadBits(1, &depends_on_core_coder));
if (depends_on_core_coder == 1)
RCHECK(bit_reader->ReadBits(14, &dummy)); // coreCoderDelay
RCHECK(bit_reader->ReadBits(1, &extension_flag));
RCHECK(channel_config_ != 0);
if (profile_ == 6 || profile_ == 20)
RCHECK(bit_reader->ReadBits(3, &dummy)); // layerNr
if (extension_flag) {
if (profile_ == 22) {
RCHECK(bit_reader->ReadBits(5, &dummy)); // numOfSubFrame
RCHECK(bit_reader->ReadBits(11, &dummy)); // layer_length
}
if (profile_ == 17 || profile_ == 19 || profile_ == 20 || profile_ == 23) {
RCHECK(bit_reader->ReadBits(3, &dummy)); // resilience flags
}
RCHECK(bit_reader->ReadBits(1, &dummy)); // extensionFlag3
}
return true;
}
} // namespace mp4_demuxer

View File

@ -1,81 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_MP4_AAC_H_
#define MEDIA_MP4_AAC_H_
#include <vector>
#include "mp4_demuxer/basictypes.h"
#include "mp4_demuxer/channel_layout.h"
namespace mp4_demuxer {
class BitReader;
// This class parses the AAC information from decoder specific information
// embedded in the esds box in an ISO BMFF file.
// Please refer to ISO 14496 Part 3 Table 1.13 - Syntax of AudioSpecificConfig
// for more details.
class AAC {
public:
AAC();
~AAC();
// Parse the AAC config from the raw binary data embedded in esds box.
// The function will parse the data and get the ElementaryStreamDescriptor,
// then it will parse the ElementaryStreamDescriptor to get audio stream
// configurations.
bool Parse(const std::vector<uint8_t>& data);
// Gets the output sample rate for the AAC stream.
// |sbr_in_mimetype| should be set to true if the SBR mode is
// signalled in the mimetype. (ie mp4a.40.5 in the codecs parameter).
// Returns the samples_per_second value that should used in an
// AudioDecoderConfig.
int GetOutputSamplesPerSecond(bool sbr_in_mimetype) const;
// Gets the channel layout for the AAC stream.
// |sbr_in_mimetype| should be set to true if the SBR mode is
// signalled in the mimetype. (ie mp4a.40.5 in the codecs parameter).
// Returns the channel_layout value that should used in an
// AudioDecoderConfig.
ChannelLayout GetChannelLayout(bool sbr_in_mimetype) const;
// This function converts a raw AAC frame into an AAC frame with an ADTS
// header. On success, the function returns true and stores the converted data
// in the buffer. The function returns false on failure and leaves the buffer
// unchanged.
bool ConvertEsdsToADTS(std::vector<uint8_t>* buffer) const;
// Size in bytes of the ADTS header added by ConvertEsdsToADTS().
static const size_t kADTSHeaderSize = 7;
const std::vector<uint8_t>& AudioSpecificConfig() const;
private:
bool SkipDecoderGASpecificConfig(BitReader* bit_reader) const;
bool SkipErrorSpecificConfig() const;
bool SkipGASpecificConfig(BitReader* bit_reader) const;
// The following variables store the AAC specific configuration information
// that are used to generate the ADTS header.
uint8_t profile_;
uint8_t frequency_index_;
uint8_t channel_config_;
// The following variables store audio configuration information that
// can be used by Chromium. They are based on the AAC specific
// configuration but can be overridden by extensions in elementary
// stream descriptor.
int frequency_;
int extension_frequency_;
ChannelLayout channel_layout_;
std::vector<uint8_t> audio_specific_config_;
};
} // namespace mp4_demuxer
#endif // MEDIA_MP4_AAC_H_

View File

@ -1,109 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "mp4_demuxer/audio_decoder_config.h"
#include <sstream>
#include <string.h>
namespace mp4_demuxer {
static int SampleFormatToBitsPerChannel(SampleFormat sample_format) {
switch (sample_format) {
case kUnknownSampleFormat:
return 0;
case kSampleFormatU8:
return 8;
case kSampleFormatS16:
case kSampleFormatPlanarS16:
return 16;
case kSampleFormatS32:
case kSampleFormatF32:
case kSampleFormatPlanarF32:
return 32;
case kSampleFormatMax:
break;
}
//NOTREACHED() << "Invalid sample format provided: " << sample_format;
return 0;
}
AudioDecoderConfig::AudioDecoderConfig()
: codec_(kUnknownAudioCodec),
sample_format_(kUnknownSampleFormat),
bits_per_channel_(0),
channel_layout_(CHANNEL_LAYOUT_UNSUPPORTED),
samples_per_second_(0),
bytes_per_frame_(0),
is_encrypted_(false) {
}
AudioDecoderConfig::AudioDecoderConfig(AudioCodec codec,
SampleFormat sample_format,
ChannelLayout channel_layout,
int samples_per_second,
const uint8_t* extra_data,
size_t extra_data_size,
bool is_encrypted) {
Initialize(codec, sample_format, channel_layout, samples_per_second,
extra_data, extra_data_size, is_encrypted);
}
void AudioDecoderConfig::Initialize(AudioCodec codec,
SampleFormat sample_format,
ChannelLayout channel_layout,
int samples_per_second,
const uint8_t* extra_data,
size_t extra_data_size,
bool is_encrypted) {
CHECK((extra_data_size != 0) == (extra_data != NULL));
codec_ = codec;
channel_layout_ = channel_layout;
samples_per_second_ = samples_per_second;
sample_format_ = sample_format;
bits_per_channel_ = SampleFormatToBitsPerChannel(sample_format);
extra_data_.assign(extra_data, extra_data + extra_data_size);
is_encrypted_ = is_encrypted;
int channels = ChannelLayoutToChannelCount(channel_layout_);
bytes_per_frame_ = channels * bits_per_channel_ / 8;
}
AudioDecoderConfig::~AudioDecoderConfig() {}
bool AudioDecoderConfig::IsValidConfig() const {
return codec_ != kUnknownAudioCodec &&
channel_layout_ != CHANNEL_LAYOUT_UNSUPPORTED &&
bits_per_channel_ > 0 &&
bits_per_channel_ <= kMaxBitsPerSample &&
samples_per_second_ > 0 &&
samples_per_second_ <= kMaxSampleRate &&
sample_format_ != kUnknownSampleFormat;
}
bool AudioDecoderConfig::Matches(const AudioDecoderConfig& config) const {
return ((codec() == config.codec()) &&
(bits_per_channel() == config.bits_per_channel()) &&
(channel_layout() == config.channel_layout()) &&
(samples_per_second() == config.samples_per_second()) &&
(extra_data_size() == config.extra_data_size()) &&
(!extra_data() || !memcmp(extra_data(), config.extra_data(),
extra_data_size())) &&
(is_encrypted() == config.is_encrypted()) &&
(sample_format() == config.sample_format()));
}
std::string AudioDecoderConfig::AsHumanReadableString() const {
std::ostringstream s;
s << "codec: " << codec()
<< " bits/channel: " << bits_per_channel()
<< " samples/s: " << samples_per_second()
<< " has extra data? " << (extra_data() ? "true" : "false")
<< " encrypted? " << (is_encrypted() ? "true" : "false");
return s.str();
}
} // namespace mp4_demuxer

View File

@ -1,127 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_BASE_AUDIO_DECODER_CONFIG_H_
#define MEDIA_BASE_AUDIO_DECODER_CONFIG_H_
#include <vector>
#include "mp4_demuxer/basictypes.h"
#include "mp4_demuxer/channel_layout.h"
namespace mp4_demuxer {
enum AudioCodec {
// These values are histogrammed over time; do not change their ordinal
// values. When deleting a codec replace it with a dummy value; when adding a
// codec, do so at the bottom before kAudioCodecMax.
kUnknownAudioCodec = 0,
kCodecAAC,
kCodecMP3,
kCodecPCM,
kCodecVorbis,
kCodecFLAC,
kCodecAMR_NB,
kCodecAMR_WB,
kCodecPCM_MULAW,
kCodecGSM_MS,
kCodecPCM_S16BE,
kCodecPCM_S24BE,
kCodecOpus,
// DO NOT ADD RANDOM AUDIO CODECS!
//
// The only acceptable time to add a new codec is if there is production code
// that uses said codec in the same CL.
// Must always be last!
kAudioCodecMax
};
enum SampleFormat {
// These values are histogrammed over time; do not change their ordinal
// values. When deleting a sample format replace it with a dummy value; when
// adding a sample format, do so at the bottom before kSampleFormatMax.
kUnknownSampleFormat = 0,
kSampleFormatU8, // Unsigned 8-bit w/ bias of 128.
kSampleFormatS16, // Signed 16-bit.
kSampleFormatS32, // Signed 32-bit.
kSampleFormatF32, // Float 32-bit.
kSampleFormatPlanarS16, // Signed 16-bit planar.
kSampleFormatPlanarF32, // Float 32-bit planar.
// Must always be last!
kSampleFormatMax
};
// TODO(dalecurtis): FFmpeg API uses |bytes_per_channel| instead of
// |bits_per_channel|, we should switch over since bits are generally confusing
// to work with.
class AudioDecoderConfig {
public:
// Constructs an uninitialized object. Clients should call Initialize() with
// appropriate values before using.
AudioDecoderConfig();
// Constructs an initialized object. It is acceptable to pass in NULL for
// |extra_data|, otherwise the memory is copied.
AudioDecoderConfig(AudioCodec codec, SampleFormat sample_format,
ChannelLayout channel_layout, int samples_per_second,
const uint8_t* extra_data, size_t extra_data_size,
bool is_encrypted);
~AudioDecoderConfig();
// Resets the internal state of this object.
void Initialize(AudioCodec codec, SampleFormat sample_format,
ChannelLayout channel_layout, int samples_per_second,
const uint8_t* extra_data, size_t extra_data_size,
bool is_encrypted);
// Returns true if this object has appropriate configuration values, false
// otherwise.
bool IsValidConfig() const;
// Returns true if all fields in |config| match this config.
// Note: The contents of |extra_data_| are compared not the raw pointers.
bool Matches(const AudioDecoderConfig& config) const;
AudioCodec codec() const { return codec_; }
int bits_per_channel() const { return bits_per_channel_; }
ChannelLayout channel_layout() const { return channel_layout_; }
int samples_per_second() const { return samples_per_second_; }
SampleFormat sample_format() const { return sample_format_; }
int bytes_per_frame() const { return bytes_per_frame_; }
// Optional byte data required to initialize audio decoders such as Vorbis
// codebooks.
const uint8_t* extra_data() const {
return extra_data_.empty() ? NULL : &extra_data_[0];
}
size_t extra_data_size() const { return extra_data_.size(); }
// Whether the audio stream is potentially encrypted.
// Note that in a potentially encrypted audio stream, individual buffers
// can be encrypted or not encrypted.
bool is_encrypted() const { return is_encrypted_; }
std::string AsHumanReadableString() const;
private:
AudioCodec codec_;
SampleFormat sample_format_;
int bits_per_channel_;
ChannelLayout channel_layout_;
int samples_per_second_;
int bytes_per_frame_;
std::vector<uint8_t> extra_data_;
bool is_encrypted_;
// Not using DISALLOW_COPY_AND_ASSIGN here intentionally to allow the compiler
// generated copy constructor and assignment operator. Since the extra data is
// typically small, the performance impact is minimal.
};
} // namespace mp4_demuxer
#endif // MEDIA_BASE_AUDIO_DECODER_CONFIG_H_

View File

@ -1,89 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "mp4_demuxer/avc.h"
#include <algorithm>
#include <vector>
#include "mp4_demuxer/box_definitions.h"
#include "mp4_demuxer/box_reader.h"
namespace mp4_demuxer {
static const uint8_t kAnnexBStartCode[] = {0, 0, 0, 1};
static const int kAnnexBStartCodeSize = 4;
static bool ConvertAVCToAnnexBInPlaceForLengthSize4(std::vector<uint8_t>* buf) {
const int kLengthSize = 4;
size_t pos = 0;
while (pos + kLengthSize < buf->size()) {
int nal_size = (*buf)[pos];
nal_size = (nal_size << 8) + (*buf)[pos+1];
nal_size = (nal_size << 8) + (*buf)[pos+2];
nal_size = (nal_size << 8) + (*buf)[pos+3];
std::copy(kAnnexBStartCode, kAnnexBStartCode + kAnnexBStartCodeSize,
buf->begin() + pos);
pos += kLengthSize + nal_size;
}
return pos == buf->size();
}
// static
bool AVC::ConvertFrameToAnnexB(int length_size, std::vector<uint8_t>* buffer) {
RCHECK(length_size == 1 || length_size == 2 || length_size == 4);
if (length_size == 4)
return ConvertAVCToAnnexBInPlaceForLengthSize4(buffer);
std::vector<uint8_t> temp;
temp.swap(*buffer);
buffer->reserve(temp.size() + 32);
size_t pos = 0;
while (pos + length_size < temp.size()) {
int nal_size = temp[pos];
if (length_size == 2) nal_size = (nal_size << 8) + temp[pos+1];
pos += length_size;
RCHECK(pos + nal_size <= temp.size());
buffer->insert(buffer->end(), kAnnexBStartCode,
kAnnexBStartCode + kAnnexBStartCodeSize);
buffer->insert(buffer->end(), temp.begin() + pos,
temp.begin() + pos + nal_size);
pos += nal_size;
}
return pos == temp.size();
}
// static
bool AVC::ConvertConfigToAnnexB(
const AVCDecoderConfigurationRecord& avc_config,
std::vector<uint8_t>* buffer) {
DCHECK(buffer->empty());
buffer->clear();
int total_size = 0;
for (size_t i = 0; i < avc_config.sps_list.size(); i++)
total_size += avc_config.sps_list[i].size() + kAnnexBStartCodeSize;
for (size_t i = 0; i < avc_config.pps_list.size(); i++)
total_size += avc_config.pps_list[i].size() + kAnnexBStartCodeSize;
buffer->reserve(total_size);
for (size_t i = 0; i < avc_config.sps_list.size(); i++) {
buffer->insert(buffer->end(), kAnnexBStartCode,
kAnnexBStartCode + kAnnexBStartCodeSize);
buffer->insert(buffer->end(), avc_config.sps_list[i].begin(),
avc_config.sps_list[i].end());
}
for (size_t i = 0; i < avc_config.pps_list.size(); i++) {
buffer->insert(buffer->end(), kAnnexBStartCode,
kAnnexBStartCode + kAnnexBStartCodeSize);
buffer->insert(buffer->end(), avc_config.pps_list[i].begin(),
avc_config.pps_list[i].end());
}
return true;
}
} // namespace mp4_demuxer

View File

@ -1,27 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_MP4_AVC_H_
#define MEDIA_MP4_AVC_H_
#include <vector>
#include "mp4_demuxer/basictypes.h"
namespace mp4_demuxer {
struct AVCDecoderConfigurationRecord;
class AVC {
public:
static bool ConvertFrameToAnnexB(int length_size, std::vector<uint8_t>* buffer);
static bool ConvertConfigToAnnexB(
const AVCDecoderConfigurationRecord& avc_config,
std::vector<uint8_t>* buffer);
};
} // namespace mp4_demuxer
#endif // MEDIA_MP4_AVC_H_

View File

@ -1,176 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_MP4_BASIC_TYPES_H_
#define MEDIA_MP4_BASIC_TYPES_H_
#include <iostream>
#include <limits>
#include <stdint.h>
#include "prlog.h"
#ifdef PR_LOGGING
PRLogModuleInfo* GetDemuxerLog();
#endif
namespace mp4_demuxer {
// Define to enable logging.
//#define LOG_DEMUXER
#define kint32max std::numeric_limits<int32_t>::max()
#define kuint64max std::numeric_limits<uint64_t>::max()
#define kint64max std::numeric_limits<int64_t>::max()
#define OVERRIDE MOZ_OVERRIDE
#define WARN_UNUSED_RESULT
#define DCHECK(condition) \
{ \
if (!(condition)) {\
DMX_LOG("DCHECK Failed (%s) %s:%d\n", #condition, __FILE__, __LINE__); \
} \
}
#define CHECK(condition) { \
if (!(condition)) {\
DMX_LOG("CHECK Failed %s %s:%d\n", #condition, __FILE__, __LINE__); \
} \
}
#define DCHECK_LE(variable, value) DCHECK(variable <= value)
#define DCHECK_LT(variable, value) DCHECK(variable < value)
#define DCHECK_EQ(variable, value) DCHECK(variable == value)
#define DCHECK_GT(variable, value) DCHECK(variable > value)
#define DCHECK_GE(variable, value) DCHECK(variable >= value)
#define RCHECK(x) \
do { \
if (!(x)) { \
DMX_LOG("Failure while parsing MP4: %s %s:%d\n", #x, __FILE__, __LINE__); \
return false; \
} \
} while (0)
#define arraysize(f) (sizeof(f) / sizeof(*f))
#ifdef LOG_DEMUXER
#ifdef PR_LOGGING
#define DMX_LOG(...) PR_LOG(GetDemuxerLog(), PR_LOG_DEBUG, (__VA_ARGS__))
#else
#define DMX_LOG(...) (void)0
#endif
#else
// define DMX_LOG as 0, so that if(condition){DMX_LOG(...)} branches don't elicit
// a warning-as-error.
#define DMX_LOG(...) (void)0
#endif
// A macro to disallow the evil copy constructor and operator= functions
// This should be used in the private: declarations for a class
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
void operator=(const TypeName&)
typedef int64_t Microseconds;
typedef int64_t Milliseconds;
#define MicrosecondsPerSecond (int64_t(1000000))
#define InfiniteMicroseconds (int64_t(-1))
#define InfiniteMilliseconds (int64_t(-1))
inline Microseconds MicrosecondsFromRational(int64_t numer, int64_t denom) {
DCHECK_LT((numer > 0 ? numer : -numer),
kint64max / MicrosecondsPerSecond);
return MicrosecondsPerSecond * numer / denom;
}
inline Milliseconds ToMilliseconds(Microseconds us) {
return (us == InfiniteMicroseconds) ? InfiniteMilliseconds : us / 1000;
}
class IntSize {
public:
IntSize() : w_(0), h_(0) {}
IntSize(const IntSize& i) : w_(i.w_), h_(i.h_) {}
IntSize(int32_t w, int32_t h) : w_(w), h_(h) {}
~IntSize() {};
int32_t width() const { return w_; }
int32_t height() const { return h_; }
int32_t GetArea() const { return w_ * h_; }
bool IsEmpty() const { return (w_ == 0) || (h_ == 0); }
private:
int32_t w_;
int32_t h_;
};
inline bool operator==(const IntSize& lhs, const IntSize& rhs) {
return lhs.width() == rhs.width() &&
lhs.height() == rhs.height();
}
class IntRect {
public:
IntRect() : x_(0), y_(0), w_(0), h_(0) {}
IntRect(const IntRect& i) : x_(i.x_), y_(i.y_), w_(i.w_), h_(i.h_) {}
IntRect(int32_t x, int32_t y, int32_t w, int32_t h) : x_(x), y_(y), w_(w), h_(h) {}
~IntRect() {};
IntSize size() const { return IntSize(w_, h_); }
int32_t x() const { return x_; }
int32_t y() const { return y_; }
int32_t width() const { return w_; }
int32_t height() const { return h_; }
int32_t GetArea() const { return w_ * h_; }
bool IsEmpty() const { return (w_ == 0) || (h_ == 0); }
int32_t right() const { return x() + width(); }
int32_t bottom() const { return y() + height(); }
private:
int32_t x_;
int32_t y_;
int32_t w_;
int32_t h_;
};
inline bool operator==(const IntRect& lhs, const IntRect& rhs) {
return lhs.x() == rhs.x() &&
lhs.y() == rhs.y() &&
lhs.width() == rhs.width() &&
lhs.height() == rhs.height();
}
enum {
// Maximum possible dimension (width or height) for any video.
kMaxDimension = (1 << 15) - 1, // 32767
// Maximum possible canvas size (width multiplied by height) for any video.
kMaxCanvas = (1 << (14 * 2)), // 16384 x 16384
// Total number of video frames which are populating in the pipeline.
kMaxVideoFrames = 4,
// The following limits are used by AudioParameters::IsValid().
//
// A few notes on sample rates of common formats:
// - AAC files are limited to 96 kHz.
// - MP3 files are limited to 48 kHz.
// - Vorbis used to be limited to 96 KHz, but no longer has that
// restriction.
// - Most PC audio hardware is limited to 192 KHz.
kMaxSampleRate = 192000,
kMinSampleRate = 3000,
kMaxChannels = 32,
kMaxBitsPerSample = 32,
kMaxSamplesPerPacket = kMaxSampleRate,
kMaxPacketSizeInBytes =
(kMaxBitsPerSample / 8) * kMaxChannels * kMaxSamplesPerPacket,
// This limit is used by ParamTraits<VideoCaptureParams>.
kMaxFramesPerSecond = 1000,
};
} // namespace mp4_demuxer
#endif // MEDIA_MP4_BASIC_TYPES_H_

View File

@ -1,57 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "mp4_demuxer/bit_reader.h"
#include <algorithm>
namespace mp4_demuxer {
BitReader::BitReader(const uint8_t* data, off_t size)
: data_(data), bytes_left_(size), num_remaining_bits_in_curr_byte_(0) {
DCHECK(data_ != nullptr && bytes_left_ > 0);
UpdateCurrByte();
}
BitReader::~BitReader() {}
int BitReader::bits_available() const {
return 8 * bytes_left_ + num_remaining_bits_in_curr_byte_;
}
bool BitReader::ReadBitsInternal(int num_bits, uint64_t* out) {
DCHECK_LE(num_bits, 64);
*out = 0;
while (num_remaining_bits_in_curr_byte_ != 0 && num_bits != 0) {
int bits_to_take = std::min(num_remaining_bits_in_curr_byte_, num_bits);
*out <<= bits_to_take;
*out += curr_byte_ >> (num_remaining_bits_in_curr_byte_ - bits_to_take);
num_bits -= bits_to_take;
num_remaining_bits_in_curr_byte_ -= bits_to_take;
curr_byte_ &= (1 << num_remaining_bits_in_curr_byte_) - 1;
if (num_remaining_bits_in_curr_byte_ == 0)
UpdateCurrByte();
}
return num_bits == 0;
}
void BitReader::UpdateCurrByte() {
DCHECK_EQ(num_remaining_bits_in_curr_byte_, 0);
if (bytes_left_ == 0)
return;
// Load a new byte and advance pointers.
curr_byte_ = *data_;
++data_;
--bytes_left_;
num_remaining_bits_in_curr_byte_ = 8;
}
} // namespace mp4_demuxer

View File

@ -1,69 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_BASE_BIT_READER_H_
#define MEDIA_BASE_BIT_READER_H_
#include <sys/types.h>
#include "mp4_demuxer/basictypes.h"
namespace mp4_demuxer {
// A class to read bit streams.
class BitReader {
public:
// Initialize the reader to start reading at |data|, |size| being size
// of |data| in bytes.
BitReader(const uint8_t* data, off_t size);
~BitReader();
// Read |num_bits| next bits from stream and return in |*out|, first bit
// from the stream starting at |num_bits| position in |*out|.
// |num_bits| cannot be larger than the bits the type can hold.
// Return false if the given number of bits cannot be read (not enough
// bits in the stream), true otherwise. When return false, the stream will
// enter a state where further ReadBits/SkipBits operations will always
// return false unless |num_bits| is 0. The type |T| has to be a primitive
// integer type.
template<typename T> bool ReadBits(int num_bits, T *out) {
DCHECK_LE(num_bits, static_cast<int>(sizeof(T) * 8));
uint64_t temp;
bool ret = ReadBitsInternal(num_bits, &temp);
*out = static_cast<T>(temp);
return ret;
}
// Returns the number of bits available for reading.
int bits_available() const;
private:
// Help function used by ReadBits to avoid inlining the bit reading logic.
bool ReadBitsInternal(int num_bits, uint64_t* out);
// Advance to the next byte, loading it into curr_byte_.
// If the num_remaining_bits_in_curr_byte_ is 0 after this function returns,
// the stream has reached the end.
void UpdateCurrByte();
// Pointer to the next unread (not in curr_byte_) byte in the stream.
const uint8_t* data_;
// Bytes left in the stream (without the curr_byte_).
off_t bytes_left_;
// Contents of the current byte; first unread bit starting at position
// 8 - num_remaining_bits_in_curr_byte_ from MSB.
uint8_t curr_byte_;
// Number of bits remaining in curr_byte_
int num_remaining_bits_in_curr_byte_;
private:
DISALLOW_COPY_AND_ASSIGN(BitReader);
};
} // namespace mp4_demuxer
#endif // MEDIA_BASE_BIT_READER_H_

View File

@ -1,754 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "mp4_demuxer/box_definitions.h"
#include "mp4_demuxer/es_descriptor.h"
#include <iostream>
namespace mp4_demuxer {
FileType::FileType() {}
FileType::~FileType() {}
FourCC FileType::BoxType() const { return FOURCC_FTYP; }
bool FileType::Parse(BoxReader* reader) {
RCHECK(reader->ReadFourCC(&major_brand) && reader->Read4(&minor_version));
size_t num_brands = (reader->size() - reader->pos()) / sizeof(FourCC);
return reader->SkipBytes(sizeof(FourCC) * num_brands); // compatible_brands
}
ProtectionSystemSpecificHeader::ProtectionSystemSpecificHeader() {}
ProtectionSystemSpecificHeader::~ProtectionSystemSpecificHeader() {}
FourCC ProtectionSystemSpecificHeader::BoxType() const { return FOURCC_PSSH; }
bool ProtectionSystemSpecificHeader::Parse(BoxReader* reader) {
// Validate the box's contents and hang on to the system ID.
uint32_t size;
RCHECK(reader->ReadFullBoxHeader() &&
reader->ReadVec(&system_id, 16) &&
reader->Read4(&size) &&
reader->HasBytes(size));
// Copy the entire box, including the header, for passing to EME as initData.
DCHECK(raw_box.empty());
reader->ReadVec(&raw_box, size);
//raw_box.assign(reader->data(), reader->data() + size);
return true;
}
SampleAuxiliaryInformationOffset::SampleAuxiliaryInformationOffset() {}
SampleAuxiliaryInformationOffset::~SampleAuxiliaryInformationOffset() {}
FourCC SampleAuxiliaryInformationOffset::BoxType() const { return FOURCC_SAIO; }
bool SampleAuxiliaryInformationOffset::Parse(BoxReader* reader) {
RCHECK(reader->ReadFullBoxHeader());
if (reader->flags() & 1)
RCHECK(reader->SkipBytes(8));
uint32_t count;
RCHECK(reader->Read4(&count) &&
reader->HasBytes(count * (reader->version() == 1 ? 8 : 4)));
offsets.resize(count);
for (uint32_t i = 0; i < count; i++) {
if (reader->version() == 1) {
RCHECK(reader->Read8(&offsets[i]));
} else {
RCHECK(reader->Read4Into8(&offsets[i]));
}
}
return true;
}
SampleAuxiliaryInformationSize::SampleAuxiliaryInformationSize()
: default_sample_info_size(0), sample_count(0) {
}
SampleAuxiliaryInformationSize::~SampleAuxiliaryInformationSize() {}
FourCC SampleAuxiliaryInformationSize::BoxType() const { return FOURCC_SAIZ; }
bool SampleAuxiliaryInformationSize::Parse(BoxReader* reader) {
RCHECK(reader->ReadFullBoxHeader());
if (reader->flags() & 1)
RCHECK(reader->SkipBytes(8));
RCHECK(reader->Read1(&default_sample_info_size) &&
reader->Read4(&sample_count));
if (default_sample_info_size == 0)
return reader->ReadVec(&sample_info_sizes, sample_count);
return true;
}
OriginalFormat::OriginalFormat() : format(FOURCC_NULL) {}
OriginalFormat::~OriginalFormat() {}
FourCC OriginalFormat::BoxType() const { return FOURCC_FRMA; }
bool OriginalFormat::Parse(BoxReader* reader) {
return reader->ReadFourCC(&format);
}
SchemeType::SchemeType() : type(FOURCC_NULL), version(0) {}
SchemeType::~SchemeType() {}
FourCC SchemeType::BoxType() const { return FOURCC_SCHM; }
bool SchemeType::Parse(BoxReader* reader) {
RCHECK(reader->ReadFullBoxHeader() &&
reader->ReadFourCC(&type) &&
reader->Read4(&version));
return true;
}
TrackEncryption::TrackEncryption()
: is_encrypted(false), default_iv_size(0) {
}
TrackEncryption::~TrackEncryption() {}
FourCC TrackEncryption::BoxType() const { return FOURCC_TENC; }
bool TrackEncryption::Parse(BoxReader* reader) {
uint8_t flag;
RCHECK(reader->ReadFullBoxHeader() &&
reader->SkipBytes(2) &&
reader->Read1(&flag) &&
reader->Read1(&default_iv_size) &&
reader->ReadVec(&default_kid, 16));
is_encrypted = (flag != 0);
if (is_encrypted) {
RCHECK(default_iv_size == 8 || default_iv_size == 16);
} else {
RCHECK(default_iv_size == 0);
}
return true;
}
SchemeInfo::SchemeInfo() {}
SchemeInfo::~SchemeInfo() {}
FourCC SchemeInfo::BoxType() const { return FOURCC_SCHI; }
bool SchemeInfo::Parse(BoxReader* reader) {
return reader->ScanChildren() && reader->ReadChild(&track_encryption);
}
ProtectionSchemeInfo::ProtectionSchemeInfo() {}
ProtectionSchemeInfo::~ProtectionSchemeInfo() {}
FourCC ProtectionSchemeInfo::BoxType() const { return FOURCC_SINF; }
bool ProtectionSchemeInfo::Parse(BoxReader* reader) {
RCHECK(reader->ScanChildren() &&
reader->ReadChild(&format) &&
reader->ReadChild(&type));
if (type.type == FOURCC_CENC)
RCHECK(reader->ReadChild(&info));
// Other protection schemes are silently ignored. Since the protection scheme
// type can't be determined until this box is opened, we return 'true' for
// non-CENC protection scheme types. It is the parent box's responsibility to
// ensure that this scheme type is a supported one.
return true;
}
MovieHeader::MovieHeader()
: creation_time(0),
modification_time(0),
timescale(0),
duration(0),
rate(-1),
volume(-1),
next_track_id(0) {}
MovieHeader::~MovieHeader() {}
FourCC MovieHeader::BoxType() const { return FOURCC_MVHD; }
bool MovieHeader::Parse(BoxReader* reader) {
RCHECK(reader->ReadFullBoxHeader());
if (reader->version() == 1) {
RCHECK(reader->Read8(&creation_time) &&
reader->Read8(&modification_time) &&
reader->Read4(&timescale) &&
reader->Read8(&duration));
} else {
RCHECK(reader->Read4Into8(&creation_time) &&
reader->Read4Into8(&modification_time) &&
reader->Read4(&timescale) &&
reader->Read4Into8(&duration));
}
RCHECK(reader->Read4s(&rate) &&
reader->Read2s(&volume) &&
reader->SkipBytes(10) && // reserved
reader->SkipBytes(36) && // matrix
reader->SkipBytes(24) && // predefined zero
reader->Read4(&next_track_id));
return true;
}
TrackHeader::TrackHeader()
: creation_time(0),
modification_time(0),
track_id(0),
duration(0),
layer(-1),
alternate_group(-1),
volume(-1),
width(0),
height(0) {}
TrackHeader::~TrackHeader() {}
FourCC TrackHeader::BoxType() const { return FOURCC_TKHD; }
bool TrackHeader::Parse(BoxReader* reader) {
RCHECK(reader->ReadFullBoxHeader());
if (reader->version() == 1) {
RCHECK(reader->Read8(&creation_time) &&
reader->Read8(&modification_time) &&
reader->Read4(&track_id) &&
reader->SkipBytes(4) && // reserved
reader->Read8(&duration));
} else {
RCHECK(reader->Read4Into8(&creation_time) &&
reader->Read4Into8(&modification_time) &&
reader->Read4(&track_id) &&
reader->SkipBytes(4) && // reserved
reader->Read4Into8(&duration));
}
RCHECK(reader->SkipBytes(8) && // reserved
reader->Read2s(&layer) &&
reader->Read2s(&alternate_group) &&
reader->Read2s(&volume) &&
reader->SkipBytes(2) && // reserved
reader->SkipBytes(36) && // matrix
reader->Read4(&width) &&
reader->Read4(&height));
width >>= 16;
height >>= 16;
return true;
}
SampleDescription::SampleDescription() : type(kInvalid) {}
SampleDescription::~SampleDescription() {}
FourCC SampleDescription::BoxType() const { return FOURCC_STSD; }
bool SampleDescription::Parse(BoxReader* reader) {
uint32_t count;
RCHECK(reader->SkipBytes(4) &&
reader->Read4(&count));
video_entries.clear();
audio_entries.clear();
// Note: this value is preset before scanning begins. See comments in the
// Parse(Media*) function.
if (type == kVideo) {
RCHECK(reader->ReadAllChildren(&video_entries));
} else if (type == kAudio) {
RCHECK(reader->ReadAllChildren(&audio_entries));
}
return true;
}
SampleTable::SampleTable() {}
SampleTable::~SampleTable() {}
FourCC SampleTable::BoxType() const { return FOURCC_STBL; }
bool SampleTable::Parse(BoxReader* reader) {
return reader->ScanChildren() &&
reader->ReadChild(&description);
}
EditList::EditList() {}
EditList::~EditList() {}
FourCC EditList::BoxType() const { return FOURCC_ELST; }
bool EditList::Parse(BoxReader* reader) {
uint32_t count;
RCHECK(reader->ReadFullBoxHeader() && reader->Read4(&count));
if (reader->version() == 1) {
RCHECK(reader->HasBytes(count * 20));
} else {
RCHECK(reader->HasBytes(count * 12));
}
edits.resize(count);
for (std::vector<EditListEntry>::iterator edit = edits.begin();
edit != edits.end(); ++edit) {
if (reader->version() == 1) {
RCHECK(reader->Read8(&edit->segment_duration) &&
reader->Read8s(&edit->media_time));
} else {
RCHECK(reader->Read4Into8(&edit->segment_duration) &&
reader->Read4sInto8s(&edit->media_time));
}
RCHECK(reader->Read2s(&edit->media_rate_integer) &&
reader->Read2s(&edit->media_rate_fraction));
}
return true;
}
Edit::Edit() {}
Edit::~Edit() {}
FourCC Edit::BoxType() const { return FOURCC_EDTS; }
bool Edit::Parse(BoxReader* reader) {
return reader->ScanChildren() && reader->ReadChild(&list);
}
HandlerReference::HandlerReference() : type(kInvalid) {}
HandlerReference::~HandlerReference() {}
FourCC HandlerReference::BoxType() const { return FOURCC_HDLR; }
bool HandlerReference::Parse(BoxReader* reader) {
FourCC hdlr_type;
RCHECK(reader->SkipBytes(8) && reader->ReadFourCC(&hdlr_type));
// Note: remaining fields in box ignored
if (hdlr_type == FOURCC_VIDE) {
type = kVideo;
} else if (hdlr_type == FOURCC_SOUN) {
type = kAudio;
} else {
type = kInvalid;
}
return true;
}
AVCDecoderConfigurationRecord::AVCDecoderConfigurationRecord()
: version(0),
profile_indication(0),
profile_compatibility(0),
avc_level(0),
length_size(0) {}
AVCDecoderConfigurationRecord::~AVCDecoderConfigurationRecord() {}
FourCC AVCDecoderConfigurationRecord::BoxType() const { return FOURCC_AVCC; }
bool AVCDecoderConfigurationRecord::Parse(BoxReader* reader) {
RCHECK(reader->Read1(&version) && version == 1 &&
reader->Read1(&profile_indication) &&
reader->Read1(&profile_compatibility) &&
reader->Read1(&avc_level));
uint8_t length_size_minus_one;
RCHECK(reader->Read1(&length_size_minus_one) &&
(length_size_minus_one & 0xfc) == 0xfc);
length_size = (length_size_minus_one & 0x3) + 1;
uint8_t num_sps;
RCHECK(reader->Read1(&num_sps) && (num_sps & 0xe0) == 0xe0);
num_sps &= 0x1f;
sps_list.resize(num_sps);
for (int i = 0; i < num_sps; i++) {
uint16_t sps_length;
RCHECK(reader->Read2(&sps_length) &&
reader->ReadVec(&sps_list[i], sps_length));
}
uint8_t num_pps;
RCHECK(reader->Read1(&num_pps));
pps_list.resize(num_pps);
for (int i = 0; i < num_pps; i++) {
uint16_t pps_length;
RCHECK(reader->Read2(&pps_length) &&
reader->ReadVec(&pps_list[i], pps_length));
}
return true;
}
PixelAspectRatioBox::PixelAspectRatioBox() : h_spacing(1), v_spacing(1) {}
PixelAspectRatioBox::~PixelAspectRatioBox() {}
FourCC PixelAspectRatioBox::BoxType() const { return FOURCC_PASP; }
bool PixelAspectRatioBox::Parse(BoxReader* reader) {
RCHECK(reader->Read4(&h_spacing) &&
reader->Read4(&v_spacing));
return true;
}
VideoSampleEntry::VideoSampleEntry()
: format(FOURCC_NULL),
data_reference_index(0),
width(0),
height(0) {}
VideoSampleEntry::~VideoSampleEntry() {}
FourCC VideoSampleEntry::BoxType() const {
DMX_LOG("VideoSampleEntry should be parsed according to the "
"handler type recovered in its Media ancestor.\n");
return FOURCC_NULL;
}
bool VideoSampleEntry::Parse(BoxReader* reader) {
format = reader->type();
RCHECK(reader->SkipBytes(6) &&
reader->Read2(&data_reference_index) &&
reader->SkipBytes(16) &&
reader->Read2(&width) &&
reader->Read2(&height) &&
reader->SkipBytes(50));
RCHECK(reader->ScanChildren() &&
reader->MaybeReadChild(&pixel_aspect));
if (format == FOURCC_ENCV) {
// Continue scanning until a recognized protection scheme is found, or until
// we run out of protection schemes.
while (sinf.type.type != FOURCC_CENC) {
if (!reader->ReadChild(&sinf))
return false;
}
}
if (format == FOURCC_AVC1 ||
(format == FOURCC_ENCV && sinf.format.format == FOURCC_AVC1)) {
RCHECK(reader->ReadChild(&avcc));
}
return true;
}
ElementaryStreamDescriptor::ElementaryStreamDescriptor()
: object_type(kForbidden) {}
ElementaryStreamDescriptor::~ElementaryStreamDescriptor() {}
FourCC ElementaryStreamDescriptor::BoxType() const {
return FOURCC_ESDS;
}
bool ElementaryStreamDescriptor::Parse(BoxReader* reader) {
std::vector<uint8_t> data;
ESDescriptor es_desc;
RCHECK(reader->ReadFullBoxHeader());
RCHECK(reader->ReadVec(&data, reader->size() - reader->pos()));
RCHECK(es_desc.Parse(data));
object_type = es_desc.object_type();
RCHECK(aac.Parse(es_desc.decoder_specific_info()));
return true;
}
AudioSampleEntry::AudioSampleEntry()
: format(FOURCC_NULL),
data_reference_index(0),
channelcount(0),
samplesize(0),
samplerate(0) {}
AudioSampleEntry::~AudioSampleEntry() {}
FourCC AudioSampleEntry::BoxType() const {
DMX_LOG("AudioSampleEntry should be parsed according to the "
"handler type recovered in its Media ancestor.\n");
return FOURCC_NULL;
}
bool AudioSampleEntry::Parse(BoxReader* reader) {
format = reader->type();
RCHECK(reader->SkipBytes(6) &&
reader->Read2(&data_reference_index) &&
reader->SkipBytes(8) &&
reader->Read2(&channelcount) &&
reader->Read2(&samplesize) &&
reader->SkipBytes(4) &&
reader->Read4(&samplerate));
// Convert from 16.16 fixed point to integer
samplerate >>= 16;
RCHECK(reader->ScanChildren());
if (format == FOURCC_ENCA) {
// Continue scanning until a recognized protection scheme is found, or until
// we run out of protection schemes.
while (sinf.type.type != FOURCC_CENC) {
if (!reader->ReadChild(&sinf))
return false;
}
}
RCHECK(reader->ReadChild(&esds));
return true;
}
MediaHeader::MediaHeader()
: creation_time(0),
modification_time(0),
timescale(0),
duration(0) {}
MediaHeader::~MediaHeader() {}
FourCC MediaHeader::BoxType() const { return FOURCC_MDHD; }
bool MediaHeader::Parse(BoxReader* reader) {
RCHECK(reader->ReadFullBoxHeader());
if (reader->version() == 1) {
RCHECK(reader->Read8(&creation_time) &&
reader->Read8(&modification_time) &&
reader->Read4(&timescale) &&
reader->Read8(&duration));
} else {
RCHECK(reader->Read4Into8(&creation_time) &&
reader->Read4Into8(&modification_time) &&
reader->Read4(&timescale) &&
reader->Read4Into8(&duration));
}
// Skip language information
return reader->SkipBytes(4);
}
MediaInformation::MediaInformation() {}
MediaInformation::~MediaInformation() {}
FourCC MediaInformation::BoxType() const { return FOURCC_MINF; }
bool MediaInformation::Parse(BoxReader* reader) {
return reader->ScanChildren() &&
reader->ReadChild(&sample_table);
}
Media::Media() {}
Media::~Media() {}
FourCC Media::BoxType() const { return FOURCC_MDIA; }
bool Media::Parse(BoxReader* reader) {
RCHECK(reader->ScanChildren() &&
reader->ReadChild(&header) &&
reader->ReadChild(&handler));
// Maddeningly, the HandlerReference box specifies how to parse the
// SampleDescription box, making the latter the only box (of those that we
// support) which cannot be parsed correctly on its own (or even with
// information from its strict ancestor tree). We thus copy the handler type
// to the sample description box *before* parsing it to provide this
// information while parsing.
information.sample_table.description.type = handler.type;
RCHECK(reader->ReadChild(&information));
return true;
}
Track::Track() {}
Track::~Track() {}
FourCC Track::BoxType() const { return FOURCC_TRAK; }
bool Track::Parse(BoxReader* reader) {
RCHECK(reader->ScanChildren() &&
reader->ReadChild(&header) &&
reader->ReadChild(&media) &&
reader->MaybeReadChild(&edit));
return true;
}
MovieExtendsHeader::MovieExtendsHeader() : fragment_duration(0) {}
MovieExtendsHeader::~MovieExtendsHeader() {}
FourCC MovieExtendsHeader::BoxType() const { return FOURCC_MEHD; }
bool MovieExtendsHeader::Parse(BoxReader* reader) {
RCHECK(reader->ReadFullBoxHeader());
if (reader->version() == 1) {
RCHECK(reader->Read8(&fragment_duration));
} else {
RCHECK(reader->Read4Into8(&fragment_duration));
}
return true;
}
TrackExtends::TrackExtends()
: track_id(0),
default_sample_description_index(0),
default_sample_duration(0),
default_sample_size(0),
default_sample_flags(0) {}
TrackExtends::~TrackExtends() {}
FourCC TrackExtends::BoxType() const { return FOURCC_TREX; }
bool TrackExtends::Parse(BoxReader* reader) {
RCHECK(reader->ReadFullBoxHeader() &&
reader->Read4(&track_id) &&
reader->Read4(&default_sample_description_index) &&
reader->Read4(&default_sample_duration) &&
reader->Read4(&default_sample_size) &&
reader->Read4(&default_sample_flags));
return true;
}
MovieExtends::MovieExtends() {}
MovieExtends::~MovieExtends() {}
FourCC MovieExtends::BoxType() const { return FOURCC_MVEX; }
bool MovieExtends::Parse(BoxReader* reader) {
header.fragment_duration = 0;
return reader->ScanChildren() &&
reader->MaybeReadChild(&header) &&
reader->ReadChildren(&tracks);
}
Movie::Movie() : fragmented(false) {}
Movie::~Movie() {}
FourCC Movie::BoxType() const { return FOURCC_MOOV; }
bool Movie::Parse(BoxReader* reader) {
return reader->ScanChildren() &&
reader->ReadChild(&header) &&
reader->ReadChildren(&tracks) &&
// Media Source specific: 'mvex' required
reader->ReadChild(&extends) &&
reader->MaybeReadChildren(&pssh);
}
TrackFragmentDecodeTime::TrackFragmentDecodeTime() : decode_time(0) {}
TrackFragmentDecodeTime::~TrackFragmentDecodeTime() {}
FourCC TrackFragmentDecodeTime::BoxType() const { return FOURCC_TFDT; }
bool TrackFragmentDecodeTime::Parse(BoxReader* reader) {
RCHECK(reader->ReadFullBoxHeader());
if (reader->version() == 1)
return reader->Read8(&decode_time);
else
return reader->Read4Into8(&decode_time);
}
MovieFragmentHeader::MovieFragmentHeader() : sequence_number(0) {}
MovieFragmentHeader::~MovieFragmentHeader() {}
FourCC MovieFragmentHeader::BoxType() const { return FOURCC_MFHD; }
bool MovieFragmentHeader::Parse(BoxReader* reader) {
return reader->SkipBytes(4) && reader->Read4(&sequence_number);
}
TrackFragmentHeader::TrackFragmentHeader()
: track_id(0),
sample_description_index(0),
default_sample_duration(0),
default_sample_size(0),
default_sample_flags(0),
has_default_sample_flags(false) {}
TrackFragmentHeader::~TrackFragmentHeader() {}
FourCC TrackFragmentHeader::BoxType() const { return FOURCC_TFHD; }
bool TrackFragmentHeader::Parse(BoxReader* reader) {
RCHECK(reader->ReadFullBoxHeader() && reader->Read4(&track_id));
// Media Source specific: reject tracks that set 'base-data-offset-present'.
// Although the Media Source requires that 'default-base-is-moof' (14496-12
// Amendment 2) be set, we omit this check as many otherwise-valid files in
// the wild don't set it.
//
// RCHECK((flags & 0x020000) && !(flags & 0x1));
RCHECK(!(reader->flags() & 0x1));
if (reader->flags() & 0x2) {
RCHECK(reader->Read4(&sample_description_index));
} else {
sample_description_index = 0;
}
if (reader->flags() & 0x8) {
RCHECK(reader->Read4(&default_sample_duration));
} else {
default_sample_duration = 0;
}
if (reader->flags() & 0x10) {
RCHECK(reader->Read4(&default_sample_size));
} else {
default_sample_size = 0;
}
if (reader->flags() & 0x20) {
RCHECK(reader->Read4(&default_sample_flags));
has_default_sample_flags = true;
} else {
has_default_sample_flags = false;
}
return true;
}
TrackFragmentRun::TrackFragmentRun()
: sample_count(0), data_offset(0) {}
TrackFragmentRun::~TrackFragmentRun() {}
FourCC TrackFragmentRun::BoxType() const { return FOURCC_TRUN; }
bool TrackFragmentRun::Parse(BoxReader* reader) {
RCHECK(reader->ReadFullBoxHeader() &&
reader->Read4(&sample_count));
const uint32_t flags = reader->flags();
bool data_offset_present = (flags & 0x1) != 0;
bool first_sample_flags_present = (flags & 0x4) != 0;
bool sample_duration_present = (flags & 0x100) != 0;
bool sample_size_present = (flags & 0x200) != 0;
bool sample_flags_present = (flags & 0x400) != 0;
bool sample_composition_time_offsets_present = (flags & 0x800) != 0;
if (data_offset_present) {
RCHECK(reader->Read4(&data_offset));
} else {
data_offset = 0;
}
uint32_t first_sample_flags;
if (first_sample_flags_present)
RCHECK(reader->Read4(&first_sample_flags));
int fields = sample_duration_present + sample_size_present +
sample_flags_present + sample_composition_time_offsets_present;
RCHECK(reader->HasBytes(fields * sample_count));
if (sample_duration_present)
sample_durations.resize(sample_count);
if (sample_size_present)
sample_sizes.resize(sample_count);
if (sample_flags_present)
sample_flags.resize(sample_count);
if (sample_composition_time_offsets_present)
sample_composition_time_offsets.resize(sample_count);
for (uint32_t i = 0; i < sample_count; ++i) {
if (sample_duration_present)
RCHECK(reader->Read4(&sample_durations[i]));
if (sample_size_present)
RCHECK(reader->Read4(&sample_sizes[i]));
if (sample_flags_present)
RCHECK(reader->Read4(&sample_flags[i]));
if (sample_composition_time_offsets_present)
RCHECK(reader->Read4s(&sample_composition_time_offsets[i]));
}
if (first_sample_flags_present) {
if (sample_flags.size() == 0) {
sample_flags.push_back(first_sample_flags);
} else {
sample_flags[0] = first_sample_flags;
}
}
return true;
}
TrackFragment::TrackFragment() {}
TrackFragment::~TrackFragment() {}
FourCC TrackFragment::BoxType() const { return FOURCC_TRAF; }
bool TrackFragment::Parse(BoxReader* reader) {
return reader->ScanChildren() &&
reader->ReadChild(&header) &&
// Media Source specific: 'tfdt' required
reader->ReadChild(&decode_time) &&
reader->MaybeReadChildren(&runs) &&
reader->MaybeReadChild(&auxiliary_offset) &&
reader->MaybeReadChild(&auxiliary_size);
}
MovieFragment::MovieFragment() {}
MovieFragment::~MovieFragment() {}
FourCC MovieFragment::BoxType() const { return FOURCC_MOOF; }
bool MovieFragment::Parse(BoxReader* reader) {
RCHECK(reader->ScanChildren() &&
reader->ReadChild(&header) &&
reader->ReadChildren(&tracks) &&
reader->MaybeReadChildren(&pssh));
return true;
}
} // namespace mp4_demuxer

View File

@ -1,349 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_MP4_BOX_DEFINITIONS_H_
#define MEDIA_MP4_BOX_DEFINITIONS_H_
#include <string>
#include <vector>
#include "mp4_demuxer/box_reader.h"
#include "mp4_demuxer/basictypes.h"
#include "mp4_demuxer/aac.h"
#include "mp4_demuxer/avc.h"
#include "mp4_demuxer/box_reader.h"
#include "mp4_demuxer/fourccs.h"
namespace mp4_demuxer {
enum TrackType {
kInvalid = 0,
kVideo,
kAudio,
kHint
};
#define DECLARE_BOX_METHODS(T) \
T(); \
virtual ~T(); \
virtual bool Parse(BoxReader* reader) OVERRIDE; \
virtual FourCC BoxType() const OVERRIDE; \
struct FileType : Box {
DECLARE_BOX_METHODS(FileType);
FourCC major_brand;
uint32_t minor_version;
};
struct ProtectionSystemSpecificHeader : Box {
DECLARE_BOX_METHODS(ProtectionSystemSpecificHeader);
std::vector<uint8_t> system_id;
std::vector<uint8_t> raw_box;
};
struct SampleAuxiliaryInformationOffset : Box {
DECLARE_BOX_METHODS(SampleAuxiliaryInformationOffset);
std::vector<uint64_t> offsets;
};
struct SampleAuxiliaryInformationSize : Box {
DECLARE_BOX_METHODS(SampleAuxiliaryInformationSize);
uint8_t default_sample_info_size;
uint32_t sample_count;
std::vector<uint8_t> sample_info_sizes;
};
struct OriginalFormat : Box {
DECLARE_BOX_METHODS(OriginalFormat);
FourCC format;
};
struct SchemeType : Box {
DECLARE_BOX_METHODS(SchemeType);
FourCC type;
uint32_t version;
};
struct TrackEncryption : Box {
DECLARE_BOX_METHODS(TrackEncryption);
// Note: this definition is specific to the CENC protection type.
bool is_encrypted;
uint8_t default_iv_size;
std::vector<uint8_t> default_kid;
};
struct SchemeInfo : Box {
DECLARE_BOX_METHODS(SchemeInfo);
TrackEncryption track_encryption;
};
struct ProtectionSchemeInfo : Box {
DECLARE_BOX_METHODS(ProtectionSchemeInfo);
OriginalFormat format;
SchemeType type;
SchemeInfo info;
};
struct MovieHeader : Box {
DECLARE_BOX_METHODS(MovieHeader);
uint64_t creation_time;
uint64_t modification_time;
uint32_t timescale;
uint64_t duration;
int32_t rate;
int16_t volume;
uint32_t next_track_id;
};
struct TrackHeader : Box {
DECLARE_BOX_METHODS(TrackHeader);
uint64_t creation_time;
uint64_t modification_time;
uint32_t track_id;
uint64_t duration;
int16_t layer;
int16_t alternate_group;
int16_t volume;
uint32_t width;
uint32_t height;
};
struct EditListEntry {
uint64_t segment_duration;
int64_t media_time;
int16_t media_rate_integer;
int16_t media_rate_fraction;
};
struct EditList : Box {
DECLARE_BOX_METHODS(EditList);
std::vector<EditListEntry> edits;
};
struct Edit : Box {
DECLARE_BOX_METHODS(Edit);
EditList list;
};
struct HandlerReference : Box {
DECLARE_BOX_METHODS(HandlerReference);
TrackType type;
};
struct AVCDecoderConfigurationRecord : Box {
DECLARE_BOX_METHODS(AVCDecoderConfigurationRecord);
uint8_t version;
uint8_t profile_indication;
uint8_t profile_compatibility;
uint8_t avc_level;
uint8_t length_size;
typedef std::vector<uint8_t> SPS;
typedef std::vector<uint8_t> PPS;
std::vector<SPS> sps_list;
std::vector<PPS> pps_list;
};
struct PixelAspectRatioBox : Box {
DECLARE_BOX_METHODS(PixelAspectRatioBox);
uint32_t h_spacing;
uint32_t v_spacing;
};
struct VideoSampleEntry : Box {
DECLARE_BOX_METHODS(VideoSampleEntry);
FourCC format;
uint16_t data_reference_index;
uint16_t width;
uint16_t height;
PixelAspectRatioBox pixel_aspect;
ProtectionSchemeInfo sinf;
// Currently expected to be present regardless of format.
AVCDecoderConfigurationRecord avcc;
};
struct ElementaryStreamDescriptor : Box {
DECLARE_BOX_METHODS(ElementaryStreamDescriptor);
uint8_t object_type;
AAC aac;
};
struct AudioSampleEntry : Box {
DECLARE_BOX_METHODS(AudioSampleEntry);
FourCC format;
uint16_t data_reference_index;
uint16_t channelcount;
uint16_t samplesize;
uint32_t samplerate;
ProtectionSchemeInfo sinf;
ElementaryStreamDescriptor esds;
};
struct SampleDescription : Box {
DECLARE_BOX_METHODS(SampleDescription);
TrackType type;
std::vector<VideoSampleEntry> video_entries;
std::vector<AudioSampleEntry> audio_entries;
};
struct SampleTable : Box {
DECLARE_BOX_METHODS(SampleTable);
// Media Source specific: we ignore many of the sub-boxes in this box,
// including some that are required to be present in the BMFF spec. This
// includes the 'stts', 'stsc', and 'stco' boxes, which must contain no
// samples in order to be compliant files.
SampleDescription description;
};
struct MediaHeader : Box {
DECLARE_BOX_METHODS(MediaHeader);
uint64_t creation_time;
uint64_t modification_time;
uint32_t timescale;
uint64_t duration;
};
struct MediaInformation : Box {
DECLARE_BOX_METHODS(MediaInformation);
SampleTable sample_table;
};
struct Media : Box {
DECLARE_BOX_METHODS(Media);
MediaHeader header;
HandlerReference handler;
MediaInformation information;
};
struct Track : Box {
DECLARE_BOX_METHODS(Track);
TrackHeader header;
Media media;
Edit edit;
};
struct MovieExtendsHeader : Box {
DECLARE_BOX_METHODS(MovieExtendsHeader);
uint64_t fragment_duration;
};
struct TrackExtends : Box {
DECLARE_BOX_METHODS(TrackExtends);
uint32_t track_id;
uint32_t default_sample_description_index;
uint32_t default_sample_duration;
uint32_t default_sample_size;
uint32_t default_sample_flags;
};
struct MovieExtends : Box {
DECLARE_BOX_METHODS(MovieExtends);
MovieExtendsHeader header;
std::vector<TrackExtends> tracks;
};
struct Movie : Box {
DECLARE_BOX_METHODS(Movie);
bool fragmented;
MovieHeader header;
MovieExtends extends;
std::vector<Track> tracks;
std::vector<ProtectionSystemSpecificHeader> pssh;
};
struct TrackFragmentDecodeTime : Box {
DECLARE_BOX_METHODS(TrackFragmentDecodeTime);
uint64_t decode_time;
};
struct MovieFragmentHeader : Box {
DECLARE_BOX_METHODS(MovieFragmentHeader);
uint32_t sequence_number;
};
struct TrackFragmentHeader : Box {
DECLARE_BOX_METHODS(TrackFragmentHeader);
uint32_t track_id;
uint32_t sample_description_index;
uint32_t default_sample_duration;
uint32_t default_sample_size;
uint32_t default_sample_flags;
// As 'flags' might be all zero, we cannot use zeroness alone to identify
// when default_sample_flags wasn't specified, unlike the other values.
bool has_default_sample_flags;
};
struct TrackFragmentRun : Box {
DECLARE_BOX_METHODS(TrackFragmentRun);
uint32_t sample_count;
uint32_t data_offset;
std::vector<uint32_t> sample_flags;
std::vector<uint32_t> sample_sizes;
std::vector<uint32_t> sample_durations;
std::vector<int32_t> sample_composition_time_offsets;
};
struct TrackFragment : Box {
DECLARE_BOX_METHODS(TrackFragment);
TrackFragmentHeader header;
std::vector<TrackFragmentRun> runs;
TrackFragmentDecodeTime decode_time;
SampleAuxiliaryInformationOffset auxiliary_offset;
SampleAuxiliaryInformationSize auxiliary_size;
};
struct MovieFragment : Box {
DECLARE_BOX_METHODS(MovieFragment);
MovieFragmentHeader header;
std::vector<TrackFragment> tracks;
std::vector<ProtectionSystemSpecificHeader> pssh;
};
#undef DECLARE_BOX
} // namespace mp4_demuxer
#endif // MEDIA_MP4_BOX_DEFINITIONS_H_

View File

@ -1,263 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "mp4_demuxer/box_reader.h"
#include <string.h>
#include <algorithm>
#include <map>
#include <set>
#include <memory>
#include <iostream>
#include <assert.h>
#include "mp4_demuxer/box_definitions.h"
#include "mp4_demuxer/Streams.h"
using namespace std;
namespace mp4_demuxer {
Box::~Box() {}
bool StreamReader::Read1(uint8_t* v) {
RCHECK(HasBytes(1));
assert(start_ + pos_ <= stream_->Length());
uint32_t bytesRead = 0;
if (!stream_->ReadAt(start_ + pos_, v, 1, &bytesRead) || bytesRead != 1) {
return false;
}
pos_ += bytesRead;
return true;
}
// Internal implementation of multi-byte reads
template<typename T> bool StreamReader::Read(T* v) {
RCHECK(HasBytes(sizeof(T)));
T tmp = 0;
for (size_t i = 0; i < sizeof(T); i++) {
tmp <<= 8;
uint8_t byte;
Read1(&byte);
tmp += byte;
}
*v = tmp;
return true;
}
bool StreamReader::Read2(uint16_t* v) { return Read(v); }
bool StreamReader::Read2s(int16_t* v) { return Read(v); }
bool StreamReader::Read4(uint32_t* v) { return Read(v); }
bool StreamReader::Read4s(int32_t* v) { return Read(v); }
bool StreamReader::Read8(uint64_t* v) { return Read(v); }
bool StreamReader::Read8s(int64_t* v) { return Read(v); }
bool StreamReader::ReadFourCC(FourCC* v) {
return Read4(reinterpret_cast<uint32_t*>(v));
}
bool StreamReader::ReadVec(std::vector<uint8_t>* vec, int count) {
RCHECK(HasBytes(count));
vec->resize(count);
assert(start_ + pos_ <= stream_->Length());
uint32_t bytesRead = 0;
if (!stream_->ReadAt(start_ + pos_, vec->data(), count, &bytesRead)) {
return false;
}
pos_ += bytesRead;
return true;
}
bool StreamReader::SkipBytes(int bytes) {
RCHECK(HasBytes(bytes));
pos_ += bytes;
return true;
}
bool StreamReader::Read4Into8(uint64_t* v) {
uint32_t tmp;
RCHECK(Read4(&tmp));
*v = tmp;
return true;
}
bool StreamReader::Read4sInto8s(int64_t* v) {
// Beware of the need for sign extension.
int32_t tmp;
RCHECK(Read4s(&tmp));
*v = tmp;
return true;
}
int64_t StreamReader::size() const {
return size_;
}
int64_t StreamReader::pos() const {
return pos_;
}
BoxReader::BoxReader(Stream* stream, int64_t offset, int64_t size)
: StreamReader(stream, offset, size),
type_(FOURCC_NULL),
version_(0),
flags_(0),
scanned_(false) {
}
BoxReader::~BoxReader() {
if (scanned_ && !children_.empty()) {
for (ChildMap::iterator itr = children_.begin();
itr != children_.end(); ++itr) {
auto reader = itr->second;
DMX_LOG("Skipping unknown box: '%s' reader type'%s'\n",
FourCCToString(itr->first).c_str(),
FourCCToString(reader.type()).c_str());
}
}
}
// static
BoxReader* BoxReader::ReadTopLevelBox(Stream* stream,
int64_t offset,
bool* err) {
nsAutoPtr<BoxReader> reader(new BoxReader(stream, offset, stream->Length()));
if (!reader->ReadHeader(err))
return NULL;
if (!IsValidTopLevelBox(reader->type())) {
*err = true;
return NULL;
}
if (reader->size() <= stream->Length())
return reader.forget();
return NULL;
}
// static
bool BoxReader::StartTopLevelBox(Stream* stream,
int64_t offset,
FourCC* type,
int* box_size) {
assert(stream->Length() > offset);
BoxReader reader(stream, offset, stream->Length() - offset);
bool err = false;
if (!reader.ReadHeader(&err)) return false;
if (!IsValidTopLevelBox(reader.type()) || err) {
return false;
}
*type = reader.type();
*box_size = reader.size();
return true;
}
// static
bool BoxReader::IsValidTopLevelBox(const FourCC& type) {
switch (type) {
case FOURCC_FTYP:
case FOURCC_PDIN:
case FOURCC_BLOC:
case FOURCC_MOOV:
case FOURCC_MOOF:
case FOURCC_MFRA:
case FOURCC_MDAT:
case FOURCC_FREE:
case FOURCC_SKIP:
case FOURCC_META:
case FOURCC_MECO:
case FOURCC_STYP:
case FOURCC_SIDX:
case FOURCC_SSIX:
case FOURCC_PRFT:
return true;
default:
// Hex is used to show nonprintable characters and aid in debugging
DMX_LOG("Unrecognized top-level box type 0x%x\n", type);
return false;
}
}
bool BoxReader::ScanChildren() {
DCHECK(!scanned_);
scanned_ = true;
bool err = false;
while (pos() < size()) {
BoxReader child(stream_, start_ + pos_, size_ - pos_);
if (!child.ReadHeader(&err)) break;
assert(child.size() < size());
children_.insert(std::pair<FourCC, BoxReader>(child.type(), child));
pos_ += child.size();
}
DCHECK(!err);
return !err && pos() == size();
}
bool BoxReader::ReadChild(Box* child) {
DCHECK(scanned_);
FourCC child_type = child->BoxType();
ChildMap::iterator itr = children_.find(child_type);
if (itr == children_.end()) {
DMX_LOG("No child of type %s\n", FourCCToString(child_type).c_str());
}
RCHECK(itr != children_.end());
DMX_LOG("Found a %s box\n", FourCCToString(child_type).c_str());
RCHECK(child->Parse(&itr->second));
children_.erase(itr);
return true;
}
bool BoxReader::MaybeReadChild(Box* child) {
if (!children_.count(child->BoxType())) return true;
return ReadChild(child);
}
bool BoxReader::ReadFullBoxHeader() {
uint32_t vflags;
RCHECK(Read4(&vflags));
version_ = vflags >> 24;
flags_ = vflags & 0xffffff;
return true;
}
bool BoxReader::ReadHeader(bool* err) {
uint64_t size = 0;
*err = false;
if (!HasBytes(8)) return false;
CHECK(Read4Into8(&size) && ReadFourCC(&type_));
DMX_LOG("BoxReader::ReadHeader() read %s size=%d\n", FourCCToString(type_).c_str(), size);
if (size == 0) {
// Media Source specific: we do not support boxes that run to EOS.
*err = true;
return false;
} else if (size == 1) {
if (!HasBytes(8)) return false;
CHECK(Read8(&size));
}
// Implementation-specific: support for boxes larger than 2^31 has been
// removed.
if (size < static_cast<uint64_t>(pos_) ||
size > static_cast<uint64_t>(kint32max)) {
*err = true;
return false;
}
// Note that the pos_ head has advanced to the byte immediately after the
// header, which is where we want it.
size_ = size;
return true;
}
} // namespace mp4_demuxer

View File

@ -1,216 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_MP4_BOX_READER_H_
#define MEDIA_MP4_BOX_READER_H_
#include <map>
#include <vector>
#include "mp4_demuxer/basictypes.h"
#include "mp4_demuxer/fourccs.h"
#include "nsAutoPtr.h"
namespace mp4_demuxer {
class BoxReader;
class Stream;
struct Box {
virtual ~Box();
virtual bool Parse(BoxReader* reader) = 0;
virtual FourCC BoxType() const = 0;
};
class StreamReader {
public:
StreamReader(Stream* stream, int64_t offset, int64_t size)
: start_(offset), size_(size), pos_(0), stream_(stream) {}
bool HasBytes(int count) { return (pos() + count <= size()); }
// Read a value from the stream, perfoming endian correction, and advance the
// stream pointer.
bool Read1(uint8_t* v) WARN_UNUSED_RESULT;
bool Read2(uint16_t* v) WARN_UNUSED_RESULT;
bool Read2s(int16_t* v) WARN_UNUSED_RESULT;
bool Read4(uint32_t* v) WARN_UNUSED_RESULT;
bool Read4s(int32_t* v) WARN_UNUSED_RESULT;
bool Read8(uint64_t* v) WARN_UNUSED_RESULT;
bool Read8s(int64_t* v) WARN_UNUSED_RESULT;
bool ReadFourCC(FourCC* v) WARN_UNUSED_RESULT;
bool ReadVec(std::vector<uint8_t>* t, int count) WARN_UNUSED_RESULT;
// These variants read a 4-byte integer of the corresponding signedness and
// store it in the 8-byte return type.
bool Read4Into8(uint64_t* v) WARN_UNUSED_RESULT;
bool Read4sInto8s(int64_t* v) WARN_UNUSED_RESULT;
// Advance the stream by this many bytes.
bool SkipBytes(int nbytes) WARN_UNUSED_RESULT;
//const uint8_t* data() const { return buf_; }
int64_t size() const;
int64_t pos() const;
protected:
// The start offset of the box in the stream.
const int64_t start_;
// The size of the box in the stream.
int64_t size_;
// The offset from start_ at which the read cursor is.
// 0 initially.
int64_t pos_;
// The stream that we read from.
Stream* stream_;
template<typename T> bool Read(T* t) WARN_UNUSED_RESULT;
};
class BoxReader : public StreamReader {
public:
~BoxReader();
// Create a BoxReader from a buffer. Note that this function may return NULL
// if an intact, complete box was not available in the buffer. If |*err| is
// set, there was a stream-level error when creating the box; otherwise, NULL
// values are only expected when insufficient data is available.
//
// |buf| is retained but not owned, and must outlive the BoxReader instance.
static BoxReader* ReadTopLevelBox(Stream* stream,
int64_t offset,
bool* err);
// Read the box header from the current buffer. This function returns true if
// the header is sane; that is, it does not check to ensure the entire box is
// in the buffer before returning true.
static bool StartTopLevelBox(Stream* stream,
int64_t offset,
FourCC* type,
int* box_size);
// Returns true if |type| is recognized to be a top-level box, false
// otherwise. This returns true for some boxes which we do not parse.
// Helpful in debugging misaligned appends.
static bool IsValidTopLevelBox(const FourCC& type);
// Scan through all boxes within the current box, starting at the current
// buffer position. Must be called before any of the *Child functions work.
bool ScanChildren() WARN_UNUSED_RESULT;
// Read exactly one child box from the set of children. The type of the child
// will be determined by the BoxType() method of |child|.
bool ReadChild(Box* child) WARN_UNUSED_RESULT;
// Read one child if available. Returns false on error, true on successful
// read or on child absent.
bool MaybeReadChild(Box* child) WARN_UNUSED_RESULT;
// Read at least one child. False means error or no such child present.
template<typename T> bool ReadChildren(
std::vector<T>* children) WARN_UNUSED_RESULT;
// Read any number of children. False means error.
template<typename T> bool MaybeReadChildren(
std::vector<T>* children) WARN_UNUSED_RESULT;
// Read all children, regardless of FourCC. This is used from exactly one box,
// corresponding to a rather significant inconsistency in the BMFF spec.
// Note that this method is mutually exclusive with ScanChildren().
template<typename T> bool ReadAllChildren(
std::vector<T>* children) WARN_UNUSED_RESULT;
// Populate the values of 'version()' and 'flags()' from a full box header.
// Many boxes, but not all, use these values. This call should happen after
// the box has been initialized, and does not re-read the main box header.
bool ReadFullBoxHeader() WARN_UNUSED_RESULT;
FourCC type() const { return type_; }
uint8_t version() const { return version_; }
uint32_t flags() const { return flags_; }
private:
BoxReader(Stream* stream, int64_t offset, int64_t size);
// Must be called immediately after init. If the return is false, this
// indicates that the box header and its contents were not available in the
// stream or were nonsensical, and that the box must not be used further. In
// this case, if |*err| is false, the problem was simply a lack of data, and
// should only be an error condition if some higher-level component knows that
// no more data is coming (i.e. EOS or end of containing box). If |*err| is
// true, the error is unrecoverable and the stream should be aborted.
bool ReadHeader(bool* err);
FourCC type_;
uint8_t version_;
uint32_t flags_;
typedef std::multimap<FourCC, BoxReader> ChildMap;
// The set of child box FourCCs and their corresponding buffer readers. Only
// valid if scanned_ is true.
ChildMap children_;
bool scanned_;
};
// Template definitions
template<typename T> bool BoxReader::ReadChildren(std::vector<T>* children) {
RCHECK(MaybeReadChildren(children) && !children->empty());
return true;
}
template<typename T>
bool BoxReader::MaybeReadChildren(std::vector<T>* children) {
DCHECK(scanned_);
DCHECK(children->empty());
children->resize(1);
FourCC child_type = (*children)[0].BoxType();
ChildMap::iterator start_itr = children_.lower_bound(child_type);
ChildMap::iterator end_itr = children_.upper_bound(child_type);
children->resize(std::distance(start_itr, end_itr));
typename std::vector<T>::iterator child_itr = children->begin();
for (ChildMap::iterator itr = start_itr; itr != end_itr; ++itr) {
RCHECK(child_itr->Parse(&itr->second));
++child_itr;
}
children_.erase(start_itr, end_itr);
DMX_LOG("Found %d %s boxes\n",
children->size(),
FourCCToString(child_type).c_str());
return true;
}
template<typename T>
bool BoxReader::ReadAllChildren(std::vector<T>* children) {
DCHECK(!scanned_);
scanned_ = true;
bool err = false;
while (pos() < size()) {
BoxReader child_reader(stream_, start_ + pos_, size_ - pos_);
if (!child_reader.ReadHeader(&err)) break;
T child;
RCHECK(child.Parse(&child_reader));
children->push_back(child);
pos_ += child_reader.size();
}
return !err;
}
} // namespace mp4_demuxer
#endif // MEDIA_MP4_BOX_READER_H_

View File

@ -1,51 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "mp4_demuxer/cenc.h"
#include <cstring>
#include "mp4_demuxer/box_reader.h"
namespace mp4_demuxer {
FrameCENCInfo::FrameCENCInfo() {}
FrameCENCInfo::~FrameCENCInfo() {}
bool FrameCENCInfo::Parse(int iv_size, StreamReader* reader) {
const int kEntrySize = 6;
// Mandated by CENC spec
RCHECK(iv_size == 8 || iv_size == 16);
memset(iv, 0, sizeof(iv));
for (int i = 0; i < iv_size; i++)
RCHECK(reader->Read1(&iv[i]));
if (!reader->HasBytes(1)) return true;
uint16_t subsample_count;
RCHECK(reader->Read2(&subsample_count) &&
reader->HasBytes(subsample_count * kEntrySize));
subsamples.resize(subsample_count);
for (int i = 0; i < subsample_count; i++) {
uint16_t clear_bytes;
uint32_t cypher_bytes;
RCHECK(reader->Read2(&clear_bytes) &&
reader->Read4(&cypher_bytes));
subsamples[i].clear_bytes = clear_bytes;
subsamples[i].cypher_bytes = cypher_bytes;
}
return true;
}
size_t FrameCENCInfo::GetTotalSizeOfSubsamples() const {
size_t size = 0;
for (size_t i = 0; i < subsamples.size(); i++) {
size += subsamples[i].clear_bytes + subsamples[i].cypher_bytes;
}
return size;
}
} // namespace mp4_demuxer

View File

@ -1,29 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_MP4_CENC_H_
#define MEDIA_MP4_CENC_H_
#include <vector>
#include "mp4_demuxer/basictypes.h"
#include "mp4_demuxer/decrypt_config.h"
namespace mp4_demuxer {
class StreamReader;
struct FrameCENCInfo {
uint8_t iv[16];
std::vector<SubsampleEntry> subsamples;
FrameCENCInfo();
~FrameCENCInfo();
bool Parse(int iv_size, StreamReader* r);
size_t GetTotalSizeOfSubsamples() const;
};
} // namespace mp4_demuxer
#endif // MEDIA_MP4_CENC_H_

View File

@ -1,186 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "mp4_demuxer/channel_layout.h"
#include "mp4_demuxer/basictypes.h"
namespace mp4_demuxer {
static const int kLayoutToChannels[] = {
0, // CHANNEL_LAYOUT_NONE
0, // CHANNEL_LAYOUT_UNSUPPORTED
1, // CHANNEL_LAYOUT_MONO
2, // CHANNEL_LAYOUT_STEREO
3, // CHANNEL_LAYOUT_2_1
3, // CHANNEL_LAYOUT_SURROUND
4, // CHANNEL_LAYOUT_4_0
4, // CHANNEL_LAYOUT_2_2
4, // CHANNEL_LAYOUT_QUAD
5, // CHANNEL_LAYOUT_5_0
6, // CHANNEL_LAYOUT_5_1
5, // CHANNEL_LAYOUT_5_0_BACK
6, // CHANNEL_LAYOUT_5_1_BACK
7, // CHANNEL_LAYOUT_7_0
8, // CHANNEL_LAYOUT_7_1
8, // CHANNEL_LAYOUT_7_1_WIDE
2, // CHANNEL_LAYOUT_STEREO_DOWNMIX
3, // CHANNEL_LAYOUT_2POINT1
4, // CHANNEL_LAYOUT_3_1
5, // CHANNEL_LAYOUT_4_1
6, // CHANNEL_LAYOUT_6_0
6, // CHANNEL_LAYOUT_6_0_FRONT
6, // CHANNEL_LAYOUT_HEXAGONAL
7, // CHANNEL_LAYOUT_6_1
7, // CHANNEL_LAYOUT_6_1_BACK
7, // CHANNEL_LAYOUT_6_1_FRONT
7, // CHANNEL_LAYOUT_7_0_FRONT
8, // CHANNEL_LAYOUT_7_1_WIDE_BACK
8, // CHANNEL_LAYOUT_OCTAGONAL
0, // CHANNEL_LAYOUT_DISCRETE
};
// The channel orderings for each layout as specified by FFmpeg. Each value
// represents the index of each channel in each layout. Values of -1 mean the
// channel at that index is not used for that layout.For example, the left side
// surround sound channel in FFmpeg's 5.1 layout is in the 5th position (because
// the order is L, R, C, LFE, LS, RS), so
// kChannelOrderings[CHANNEL_LAYOUT_5POINT1][SIDE_LEFT] = 4;
static const int kChannelOrderings[CHANNEL_LAYOUT_MAX][CHANNELS_MAX] = {
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
// CHANNEL_LAYOUT_NONE
{ -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
// CHANNEL_LAYOUT_UNSUPPORTED
{ -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
// CHANNEL_LAYOUT_MONO
{ -1 , -1 , 0 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
// CHANNEL_LAYOUT_STEREO
{ 0 , 1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
// CHANNEL_LAYOUT_2_1
{ 0 , 1 , -1 , -1 , -1 , -1 , -1 , -1 , 2 , -1 , -1 },
// CHANNEL_LAYOUT_SURROUND
{ 0 , 1 , 2 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
// CHANNEL_LAYOUT_4_0
{ 0 , 1 , 2 , -1 , -1 , -1 , -1 , -1 , 3 , -1 , -1 },
// CHANNEL_LAYOUT_2_2
{ 0 , 1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , 2 , 3 },
// CHANNEL_LAYOUT_QUAD
{ 0 , 1 , -1 , -1 , 2 , 3 , -1 , -1 , -1 , -1 , -1 },
// CHANNEL_LAYOUT_5_0
{ 0 , 1 , 2 , -1 , -1 , -1 , -1 , -1 , -1 , 3 , 4 },
// CHANNEL_LAYOUT_5_1
{ 0 , 1 , 2 , 3 , -1 , -1 , -1 , -1 , -1 , 4 , 5 },
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
// CHANNEL_LAYOUT_5_0_BACK
{ 0 , 1 , 2 , -1 , 3 , 4 , -1 , -1 , -1 , -1 , -1 },
// CHANNEL_LAYOUT_5_1_BACK
{ 0 , 1 , 2 , 3 , 4 , 5 , -1 , -1 , -1 , -1 , -1 },
// CHANNEL_LAYOUT_7_0
{ 0 , 1 , 2 , -1 , 5 , 6 , -1 , -1 , -1 , 3 , 4 },
// CHANNEL_LAYOUT_7_1
{ 0 , 1 , 2 , 3 , 6 , 7 , -1 , -1 , -1 , 4 , 5 },
// CHANNEL_LAYOUT_7_1_WIDE
{ 0 , 1 , 2 , 3 , -1 , -1 , 6 , 7 , -1 , 4 , 5 },
// CHANNEL_LAYOUT_STEREO_DOWNMIX
{ 0 , 1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
// CHANNEL_LAYOUT_2POINT1
{ 0 , 1 , -1 , 2 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
// CHANNEL_LAYOUT_3_1
{ 0 , 1 , 2 , 3 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
// CHANNEL_LAYOUT_4_1
{ 0 , 1 , 2 , 4 , -1 , -1 , -1 , -1 , 3 , -1 , -1 },
// CHANNEL_LAYOUT_6_0
{ 0 , 1 , 2 , -1 , -1 , -1 , -1 , -1 , 5 , 3 , 4 },
// CHANNEL_LAYOUT_6_0_FRONT
{ 0 , 1 , -1 , -1 , -1 , -1 , 4 , 5 , -1 , 2 , 3 },
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
// CHANNEL_LAYOUT_HEXAGONAL
{ 0 , 1 , 2 , -1 , 3 , 4 , -1 , -1 , 5 , -1 , -1 },
// CHANNEL_LAYOUT_6_1
{ 0 , 1 , 2 , 3 , -1 , -1 , -1 , -1 , 6 , 4 , 5 },
// CHANNEL_LAYOUT_6_1_BACK
{ 0 , 1 , 2 , 3 , 4 , 5 , -1 , -1 , 6 , -1 , -1 },
// CHANNEL_LAYOUT_6_1_FRONT
{ 0 , 1 , -1 , 6 , -1 , -1 , 4 , 5 , -1 , 2 , 3 },
// CHANNEL_LAYOUT_7_0_FRONT
{ 0 , 1 , 2 , -1 , -1 , -1 , 5 , 6 , -1 , 3 , 4 },
// CHANNEL_LAYOUT_7_1_WIDE_BACK
{ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , -1 , -1 , -1 },
// CHANNEL_LAYOUT_OCTAGONAL
{ 0 , 1 , 2 , -1 , 5 , 6 , -1 , -1 , 7 , 3 , 4 },
// CHANNEL_LAYOUT_DISCRETE
{ -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 , -1 },
// FL | FR | FC | LFE | BL | BR | FLofC | FRofC | BC | SL | SR
};
int ChannelLayoutToChannelCount(ChannelLayout layout) {
DCHECK_LT(static_cast<size_t>(layout), arraysize(kLayoutToChannels));
return kLayoutToChannels[layout];
}
// Converts a channel count into a channel layout.
ChannelLayout GuessChannelLayout(int channels) {
switch (channels) {
case 1:
return CHANNEL_LAYOUT_MONO;
case 2:
return CHANNEL_LAYOUT_STEREO;
case 3:
return CHANNEL_LAYOUT_SURROUND;
case 4:
return CHANNEL_LAYOUT_QUAD;
case 5:
return CHANNEL_LAYOUT_5_0;
case 6:
return CHANNEL_LAYOUT_5_1;
case 7:
return CHANNEL_LAYOUT_6_1;
case 8:
return CHANNEL_LAYOUT_7_1;
default:
DMX_LOG("Unsupported channel count: %d\n", channels);
}
return CHANNEL_LAYOUT_UNSUPPORTED;
}
int ChannelOrder(ChannelLayout layout, Channels channel) {
DCHECK_LT(static_cast<size_t>(layout), arraysize(kChannelOrderings));
DCHECK_LT(static_cast<size_t>(channel), arraysize(kChannelOrderings[0]));
return kChannelOrderings[layout][channel];
}
} // namespace mp4_demuxer

View File

@ -1,134 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_BASE_CHANNEL_LAYOUT_H_
#define MEDIA_BASE_CHANNEL_LAYOUT_H_
namespace mp4_demuxer {
// Enumerates the various representations of the ordering of audio channels.
// Logged to UMA, so never reuse a value, always add new/greater ones!
enum ChannelLayout {
CHANNEL_LAYOUT_NONE = 0,
CHANNEL_LAYOUT_UNSUPPORTED = 1,
// Front C
CHANNEL_LAYOUT_MONO = 2,
// Front L, Front R
CHANNEL_LAYOUT_STEREO = 3,
// Front L, Front R, Back C
CHANNEL_LAYOUT_2_1 = 4,
// Front L, Front R, Front C
CHANNEL_LAYOUT_SURROUND = 5,
// Front L, Front R, Front C, Back C
CHANNEL_LAYOUT_4_0 = 6,
// Front L, Front R, Side L, Side R
CHANNEL_LAYOUT_2_2 = 7,
// Front L, Front R, Back L, Back R
CHANNEL_LAYOUT_QUAD = 8,
// Front L, Front R, Front C, Side L, Side R
CHANNEL_LAYOUT_5_0 = 9,
// Front L, Front R, Front C, Side L, Side R, LFE
CHANNEL_LAYOUT_5_1 = 10,
// Front L, Front R, Front C, Back L, Back R
CHANNEL_LAYOUT_5_0_BACK = 11,
// Front L, Front R, Front C, Back L, Back R, LFE
CHANNEL_LAYOUT_5_1_BACK = 12,
// Front L, Front R, Front C, Side L, Side R, Back L, Back R
CHANNEL_LAYOUT_7_0 = 13,
// Front L, Front R, Front C, Side L, Side R, LFE, Back L, Back R
CHANNEL_LAYOUT_7_1 = 14,
// Front L, Front R, Front C, Side L, Side R, LFE, Front LofC, Front RofC
CHANNEL_LAYOUT_7_1_WIDE = 15,
// Stereo L, Stereo R
CHANNEL_LAYOUT_STEREO_DOWNMIX = 16,
// Stereo L, Stereo R, LFE
CHANNEL_LAYOUT_2POINT1 = 17,
// Stereo L, Stereo R, Front C, LFE
CHANNEL_LAYOUT_3_1 = 18,
// Stereo L, Stereo R, Front C, Rear C, LFE
CHANNEL_LAYOUT_4_1 = 19,
// Stereo L, Stereo R, Front C, Side L, Side R, Back C
CHANNEL_LAYOUT_6_0 = 20,
// Stereo L, Stereo R, Side L, Side R, Front LofC, Front RofC
CHANNEL_LAYOUT_6_0_FRONT = 21,
// Stereo L, Stereo R, Side L, Side R, Front C, Rear C.
CHANNEL_LAYOUT_HEXAGONAL = 22,
// Stereo L, Stereo R, Side L, Side R, Front C, Rear Center, LFE
CHANNEL_LAYOUT_6_1 = 23,
// Stereo L, Stereo R, Back L, Back R, Front C, Rear Center, LFE
CHANNEL_LAYOUT_6_1_BACK = 24,
// Stereo L, Stereo R, Side L, Side R, Front LofC, Front RofC, LFE
CHANNEL_LAYOUT_6_1_FRONT = 25,
// Front L, Front R, Front C, Side L, Side R, Front LofC, Front RofC
CHANNEL_LAYOUT_7_0_FRONT = 26,
// Front L, Front R, Front C, Back L, Back R, LFE, Front LofC, Front RofC
CHANNEL_LAYOUT_7_1_WIDE_BACK = 27,
// Front L, Front R, Front C, Side L, Side R, Rear C, Back L, Back R.
CHANNEL_LAYOUT_OCTAGONAL = 28,
// Channels are not explicitly mapped to speakers.
CHANNEL_LAYOUT_DISCRETE = 29,
// Total number of layouts.
CHANNEL_LAYOUT_MAX // Must always be last!
};
enum Channels {
LEFT = 0,
RIGHT,
CENTER,
LFE,
BACK_LEFT,
BACK_RIGHT,
LEFT_OF_CENTER,
RIGHT_OF_CENTER,
BACK_CENTER,
SIDE_LEFT,
SIDE_RIGHT,
CHANNELS_MAX
};
// Returns the expected channel position in an interleaved stream. Values of -1
// mean the channel at that index is not used for that layout. Values range
// from 0 to CHANNELS_MAX - 1.
int ChannelOrder(ChannelLayout layout, Channels channel);
// Returns the number of channels in a given ChannelLayout.
int ChannelLayoutToChannelCount(ChannelLayout layout);
// Given the number of channels, return the best layout,
// or return CHANNEL_LAYOUT_UNSUPPORTED if there is no good match.
ChannelLayout GuessChannelLayout(int channels);
} // namespace mp4_demuxer
#endif // MEDIA_BASE_CHANNEL_LAYOUT_H_

View File

@ -1,25 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "mp4_demuxer/decrypt_config.h"
namespace mp4_demuxer {
DecryptConfig::DecryptConfig(const std::string& key_id,
const std::string& iv,
const int data_offset,
const std::vector<SubsampleEntry>& subsamples)
: key_id_(key_id),
iv_(iv),
data_offset_(data_offset),
subsamples_(subsamples) {
DCHECK_GT(key_id.size(), 0u);
DCHECK(iv.size() == static_cast<size_t>(DecryptConfig::kDecryptionKeySize) ||
iv.empty());
DCHECK_GE(data_offset, 0);
}
DecryptConfig::~DecryptConfig() {}
} // namespace mp4_demuxer

View File

@ -1,78 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_BASE_DECRYPT_CONFIG_H_
#define MEDIA_BASE_DECRYPT_CONFIG_H_
#include <string>
#include <vector>
#include "mp4_demuxer/basictypes.h"
namespace mp4_demuxer {
// The Common Encryption spec provides for subsample encryption, where portions
// of a sample are set in cleartext. A SubsampleEntry specifies the number of
// clear and encrypted bytes in each subsample. For decryption, all of the
// encrypted bytes in a sample should be considered a single logical stream,
// regardless of how they are divided into subsamples, and the clear bytes
// should not be considered as part of decryption. This is logically equivalent
// to concatenating all 'cypher_bytes' portions of subsamples, decrypting that
// result, and then copying each byte from the decrypted block over the
// position of the corresponding encrypted byte.
struct SubsampleEntry {
uint32_t clear_bytes;
uint32_t cypher_bytes;
};
// Contains all information that a decryptor needs to decrypt a media sample.
class DecryptConfig {
public:
// Keys are always 128 bits.
static const int kDecryptionKeySize = 16;
// |key_id| is the ID that references the decryption key for this sample.
// |iv| is the initialization vector defined by the encrypted format.
// Currently |iv| must be 16 bytes as defined by WebM and ISO. Or must be
// empty which signals an unencrypted frame.
// |data_offset| is the amount of data that should be discarded from the
// head of the sample buffer before applying subsample information. A
// decrypted buffer will be shorter than an encrypted buffer by this amount.
// |subsamples| defines the clear and encrypted portions of the sample as
// described above. A decrypted buffer will be equal in size to the sum
// of the subsample sizes.
//
// |data_offset| is applied before |subsamples|.
DecryptConfig(const std::string& key_id,
const std::string& iv,
const int data_offset,
const std::vector<SubsampleEntry>& subsamples);
~DecryptConfig();
const std::string& key_id() const { return key_id_; }
const std::string& iv() const { return iv_; }
int data_offset() const { return data_offset_; }
const std::vector<SubsampleEntry>& subsamples() const { return subsamples_; }
private:
const std::string key_id_;
// Initialization vector.
const std::string iv_;
// TODO(fgalligan): Remove |data_offset_| if there is no plan to use it in
// the future.
// Amount of data to be discarded before applying subsample information.
const int data_offset_;
// Subsample information. May be empty for some formats, meaning entire frame
// (less data ignored by data_offset_) is encrypted.
const std::vector<SubsampleEntry> subsamples_;
DISALLOW_COPY_AND_ASSIGN(DecryptConfig);
};
} // namespace mp4_demuxer
#endif // MEDIA_BASE_DECRYPT_CONFIG_H_

View File

@ -1,106 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "mp4_demuxer/es_descriptor.h"
#include "mp4_demuxer/bit_reader.h"
namespace mp4_demuxer {
// The elementary stream size is specific by up to 4 bytes.
// The MSB of a byte indicates if there are more bytes for the size.
static bool ReadESSize(BitReader* reader, uint32_t* size) {
uint8_t msb;
uint8_t byte;
*size = 0;
for (size_t i = 0; i < 4; ++i) {
RCHECK(reader->ReadBits(1, &msb));
RCHECK(reader->ReadBits(7, &byte));
*size = (*size << 7) + byte;
if (msb == 0)
break;
}
return true;
}
ESDescriptor::ESDescriptor()
: object_type_(kForbidden) {
}
ESDescriptor::~ESDescriptor() {}
bool ESDescriptor::Parse(const std::vector<uint8_t>& data) {
BitReader reader(&data[0], data.size());
uint8_t tag;
uint32_t size;
uint8_t stream_dependency_flag;
uint8_t url_flag;
uint8_t ocr_stream_flag;
uint16_t dummy;
RCHECK(reader.ReadBits(8, &tag));
RCHECK(tag == kESDescrTag);
RCHECK(ReadESSize(&reader, &size));
RCHECK(reader.ReadBits(16, &dummy)); // ES_ID
RCHECK(reader.ReadBits(1, &stream_dependency_flag));
RCHECK(reader.ReadBits(1, &url_flag));
RCHECK(!url_flag); // We don't support url flag
RCHECK(reader.ReadBits(1, &ocr_stream_flag));
RCHECK(reader.ReadBits(5, &dummy)); // streamPriority
if (stream_dependency_flag)
RCHECK(reader.ReadBits(16, &dummy)); // dependsOn_ES_ID
if (ocr_stream_flag)
RCHECK(reader.ReadBits(16, &dummy)); // OCR_ES_Id
RCHECK(ParseDecoderConfigDescriptor(&reader));
return true;
}
uint8_t ESDescriptor::object_type() const {
return object_type_;
}
const std::vector<uint8_t>& ESDescriptor::decoder_specific_info() const {
return decoder_specific_info_;
}
bool ESDescriptor::ParseDecoderConfigDescriptor(BitReader* reader) {
uint8_t tag;
uint32_t size;
uint64_t dummy;
RCHECK(reader->ReadBits(8, &tag));
RCHECK(tag == kDecoderConfigDescrTag);
RCHECK(ReadESSize(reader, &size));
RCHECK(reader->ReadBits(8, &object_type_));
RCHECK(reader->ReadBits(64, &dummy));
RCHECK(reader->ReadBits(32, &dummy));
RCHECK(ParseDecoderSpecificInfo(reader));
return true;
}
bool ESDescriptor::ParseDecoderSpecificInfo(BitReader* reader) {
uint8_t tag;
uint32_t size;
RCHECK(reader->ReadBits(8, &tag));
RCHECK(tag == kDecoderSpecificInfoTag);
RCHECK(ReadESSize(reader, &size));
decoder_specific_info_.resize(size);
for (uint32_t i = 0; i < size; ++i)
RCHECK(reader->ReadBits(8, &decoder_specific_info_[i]));
return true;
}
} // namespace mp4_demuxer

View File

@ -1,53 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_MP4_ES_DESCRIPTOR_H_
#define MEDIA_MP4_ES_DESCRIPTOR_H_
#include <vector>
#include "mp4_demuxer/basictypes.h"
namespace mp4_demuxer {
class BitReader;
// The following values are extracted from ISO 14496 Part 1 Table 5 -
// objectTypeIndication Values. Only values currently in use are included.
enum ObjectType {
kForbidden = 0,
kISO_14496_3 = 0x40, // MPEG4 AAC
kISO_13818_7_AAC_LC = 0x67 // MPEG2 AAC-LC
};
// This class parse object type and decoder specific information from an
// elementary stream descriptor, which is usually contained in an esds box.
// Please refer to ISO 14496 Part 1 7.2.6.5 for more details.
class ESDescriptor {
public:
ESDescriptor();
~ESDescriptor();
bool Parse(const std::vector<uint8_t>& data);
uint8_t object_type() const;
const std::vector<uint8_t>& decoder_specific_info() const;
private:
enum Tag {
kESDescrTag = 0x03,
kDecoderConfigDescrTag = 0x04,
kDecoderSpecificInfoTag = 0x05
};
bool ParseDecoderConfigDescriptor(BitReader* reader);
bool ParseDecoderSpecificInfo(BitReader* reader);
uint8_t object_type_;
std::vector<uint8_t> decoder_specific_info_;
};
} // namespace mp4_demuxer
#endif // MEDIA_MP4_ES_DESCRIPTOR_H_

View File

@ -1,97 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_MP4_FOURCCS_H_
#define MEDIA_MP4_FOURCCS_H_
#include <string>
namespace mp4_demuxer {
enum FourCC {
FOURCC_NULL = 0,
FOURCC_AVC1 = 0x61766331,
FOURCC_AVCC = 0x61766343,
FOURCC_BLOC = 0x626C6F63,
FOURCC_CENC = 0x63656e63,
FOURCC_CO64 = 0x636f3634,
FOURCC_CTTS = 0x63747473,
FOURCC_DINF = 0x64696e66,
FOURCC_EDTS = 0x65647473,
FOURCC_ELST = 0x656c7374,
FOURCC_ENCA = 0x656e6361,
FOURCC_ENCV = 0x656e6376,
FOURCC_ESDS = 0x65736473,
FOURCC_FREE = 0x66726565,
FOURCC_FRMA = 0x66726d61,
FOURCC_FTYP = 0x66747970,
FOURCC_HDLR = 0x68646c72,
FOURCC_HINT = 0x68696e74,
FOURCC_IODS = 0x696f6473,
FOURCC_MDAT = 0x6d646174,
FOURCC_MDHD = 0x6d646864,
FOURCC_MDIA = 0x6d646961,
FOURCC_MECO = 0x6d65636f,
FOURCC_MEHD = 0x6d656864,
FOURCC_META = 0x6d657461,
FOURCC_MFHD = 0x6d666864,
FOURCC_MFRA = 0x6d667261,
FOURCC_MINF = 0x6d696e66,
FOURCC_MOOF = 0x6d6f6f66,
FOURCC_MOOV = 0x6d6f6f76,
FOURCC_MP4A = 0x6d703461,
FOURCC_MP4V = 0x6d703476,
FOURCC_MVEX = 0x6d766578,
FOURCC_MVHD = 0x6d766864,
FOURCC_PASP = 0x70617370,
FOURCC_PDIN = 0x7064696e,
FOURCC_PRFT = 0x70726674,
FOURCC_PSSH = 0x70737368,
FOURCC_SAIO = 0x7361696f,
FOURCC_SAIZ = 0x7361697a,
FOURCC_SCHI = 0x73636869,
FOURCC_SCHM = 0x7363686d,
FOURCC_SDTP = 0x73647470,
FOURCC_SIDX = 0x73696478,
FOURCC_SINF = 0x73696e66,
FOURCC_SKIP = 0x736b6970,
FOURCC_SMHD = 0x736d6864,
FOURCC_SOUN = 0x736f756e,
FOURCC_SSIX = 0x73736978,
FOURCC_STBL = 0x7374626c,
FOURCC_STCO = 0x7374636f,
FOURCC_STSC = 0x73747363,
FOURCC_STSD = 0x73747364,
FOURCC_STSS = 0x73747373,
FOURCC_STSZ = 0x7374737a,
FOURCC_STTS = 0x73747473,
FOURCC_STYP = 0x73747970,
FOURCC_TENC = 0x74656e63,
FOURCC_TFDT = 0x74666474,
FOURCC_TFHD = 0x74666864,
FOURCC_TKHD = 0x746b6864,
FOURCC_TRAF = 0x74726166,
FOURCC_TRAK = 0x7472616b,
FOURCC_TREX = 0x74726578,
FOURCC_TRUN = 0x7472756e,
FOURCC_UDTA = 0x75647461,
FOURCC_UUID = 0x75756964,
FOURCC_VIDE = 0x76696465,
FOURCC_VMHD = 0x766d6864,
FOURCC_WIDE = 0x77696465,
};
const inline std::string FourCCToString(FourCC fourcc) {
char buf[5];
buf[0] = (fourcc >> 24) & 0xff;
buf[1] = (fourcc >> 16) & 0xff;
buf[2] = (fourcc >> 8) & 0xff;
buf[3] = (fourcc) & 0xff;
buf[4] = 0;
return std::string(buf);
}
} // namespace mp4_demuxer
#endif // MEDIA_MP4_FOURCCS_H_

View File

@ -1,525 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "mp4_demuxer/mp4_demuxer.h"
#include "mp4_demuxer/Streams.h"
#include "mp4_demuxer/box_reader.h"
#include "mp4_demuxer/box_definitions.h"
#include "mp4_demuxer/basictypes.h"
#include "mp4_demuxer/es_descriptor.h"
#include "mp4_demuxer/video_util.h"
#include "mp4_demuxer/track_run_iterator.h"
#include "mp4_demuxer/audio_decoder_config.h"
#include "mp4_demuxer/video_decoder_config.h"
#include <assert.h>
using namespace std;
namespace mp4_demuxer {
MP4Sample::MP4Sample(Microseconds _decode_timestamp,
Microseconds _composition_timestamp,
Microseconds _duration,
int64_t _byte_offset,
std::vector<uint8_t>* _data,
TrackType _type,
DecryptConfig* _decrypt_config,
bool _is_sync_point)
: decode_timestamp(_decode_timestamp),
composition_timestamp(_composition_timestamp),
duration(_duration),
byte_offset(_byte_offset),
data(_data),
type(_type),
decrypt_config(_decrypt_config),
is_sync_point(_is_sync_point)
{
}
MP4Sample::~MP4Sample()
{
}
bool MP4Sample::is_encrypted() const {
return decrypt_config != nullptr;
};
MP4Demuxer::MP4Demuxer(Stream* stream)
: state_(kWaitingForInit),
stream_(stream),
stream_offset_(0),
duration_(InfiniteMicroseconds),
moof_head_(0),
mdat_tail_(0),
audio_track_id_(0),
video_track_id_(0),
audio_frameno(0),
video_frameno(0),
has_audio_(false),
has_sbr_(false),
is_audio_track_encrypted_(false),
has_video_(false),
is_video_track_encrypted_(false),
can_seek_(false)
{
}
MP4Demuxer::~MP4Demuxer()
{
}
bool MP4Demuxer::Init()
{
ChangeState(kParsingBoxes);
// Read from the stream until the moov box is read. This will have the
// header data that we need to initialize the decoders.
bool ok = true;
const int64_t length = stream_->Length();
while (ok &&
stream_offset_ < length &&
!moov_ &&
state_ == kParsingBoxes) {
ok = ParseBox();
}
return state_ >= kParsingBoxes &&
state_ < kError;
}
void MP4Demuxer::Reset() {
moov_ = nullptr;
runs_ = nullptr;
moof_head_ = 0;
mdat_tail_ = 0;
}
// TODO(xhwang): Figure out the init data type appropriately once it's spec'ed.
static const char kMp4InitDataType[] = "video/mp4";
bool MP4Demuxer::ParseMoov(BoxReader* reader) {
RCHECK(state_ < kError);
moov_ = new Movie();
RCHECK(moov_->Parse(reader));
runs_ = new TrackRunIterator(moov_.get());
has_audio_ = false;
has_video_ = false;
for (std::vector<Track>::const_iterator track = moov_->tracks.begin();
track != moov_->tracks.end(); ++track) {
// TODO(strobe): Only the first audio and video track present in a file are
// used. (Track selection is better accomplished via Source IDs, though, so
// adding support for track selection within a stream is low-priority.)
const SampleDescription& samp_descr =
track->media.information.sample_table.description;
// TODO(strobe): When codec reconfigurations are supported, detect and send
// a codec reconfiguration for fragments using a sample description index
// different from the previous one
size_t desc_idx = 0;
for (size_t t = 0; t < moov_->extends.tracks.size(); t++) {
const TrackExtends& trex = moov_->extends.tracks[t];
if (trex.track_id == track->header.track_id) {
desc_idx = trex.default_sample_description_index;
break;
}
}
RCHECK(desc_idx > 0);
desc_idx -= 1; // BMFF descriptor index is one-based
if (track->media.handler.type == kAudio && !audio_config_.IsValidConfig()) {
RCHECK(!samp_descr.audio_entries.empty());
// It is not uncommon to find otherwise-valid files with incorrect sample
// description indices, so we fail gracefully in that case.
if (desc_idx >= samp_descr.audio_entries.size())
desc_idx = 0;
const AudioSampleEntry& entry = samp_descr.audio_entries[desc_idx];
const AAC& aac = entry.esds.aac;
if (!(entry.format == FOURCC_MP4A ||
(entry.format == FOURCC_ENCA &&
entry.sinf.format.format == FOURCC_MP4A))) {
DMX_LOG("Unsupported audio format 0x%x in stsd box\n", entry.format);
return false;
}
int audio_type = entry.esds.object_type;
DMX_LOG("audio_type 0x%x\n", audio_type);
const std::vector<uint8_t>& asc = aac.AudioSpecificConfig();
if (asc.size() > 0) {
DMX_LOG("audio specific config:");
for (unsigned i=0; i<asc.size(); ++i) {
DMX_LOG(" 0x%x", asc[i]);
}
DMX_LOG("\n");
}
// Check if it is MPEG4 AAC defined in ISO 14496 Part 3 or
// supported MPEG2 AAC varients.
if (audio_type != kISO_14496_3 && audio_type != kISO_13818_7_AAC_LC) {
DMX_LOG("Unsupported audio object type 0x%x in esds.", audio_type);
return false;
}
SampleFormat sample_format;
if (entry.samplesize == 8) {
sample_format = kSampleFormatU8;
} else if (entry.samplesize == 16) {
sample_format = kSampleFormatS16;
} else if (entry.samplesize == 32) {
sample_format = kSampleFormatS32;
} else {
DMX_LOG("Unsupported sample size.\n");
return false;
}
is_audio_track_encrypted_ = entry.sinf.info.track_encryption.is_encrypted;
DMX_LOG("is_audio_track_encrypted_: %d\n", is_audio_track_encrypted_);
// TODO(cpearce): Chromium checks the MIME type specified to see if it contains
// the codec info that tells us it's using SBR. We should check for that
// here too.
audio_config_.Initialize(kCodecAAC, sample_format,
aac.GetChannelLayout(has_sbr_),
aac.GetOutputSamplesPerSecond(has_sbr_),
&asc.front(),
asc.size(),
is_audio_track_encrypted_);
has_audio_ = true;
audio_track_id_ = track->header.track_id;
}
if (track->media.handler.type == kVideo && !video_config_.IsValidConfig()) {
RCHECK(!samp_descr.video_entries.empty());
if (desc_idx >= samp_descr.video_entries.size())
desc_idx = 0;
const VideoSampleEntry& entry = samp_descr.video_entries[desc_idx];
if (!(entry.format == FOURCC_AVC1 ||
(entry.format == FOURCC_ENCV &&
entry.sinf.format.format == FOURCC_AVC1))) {
DMX_LOG("Unsupported video format 0x%x in stsd box.\n", entry.format);
return false;
}
// TODO(strobe): Recover correct crop box
IntSize coded_size(entry.width, entry.height);
IntRect visible_rect(0, 0, coded_size.width(), coded_size.height());
IntSize natural_size = GetNaturalSize(visible_rect.size(),
entry.pixel_aspect.h_spacing,
entry.pixel_aspect.v_spacing);
is_video_track_encrypted_ = entry.sinf.info.track_encryption.is_encrypted;
DMX_LOG("is_video_track_encrypted_: %d\n", is_video_track_encrypted_);
video_config_.Initialize(kCodecH264, H264PROFILE_MAIN, VideoFrameFormat::YV12,
coded_size, visible_rect, natural_size,
// No decoder-specific buffer needed for AVC;
// SPS/PPS are embedded in the video stream
NULL, 0, is_video_track_encrypted_, true);
has_video_ = true;
video_track_id_ = track->header.track_id;
}
}
//RCHECK(config_cb_.Run(audio_config, video_config));
if (moov_->extends.header.fragment_duration > 0) {
duration_ = MicrosecondsFromRational(moov_->extends.header.fragment_duration,
moov_->header.timescale);
} else if (moov_->header.duration > 0 &&
moov_->header.duration != kuint64max) {
duration_ = MicrosecondsFromRational(moov_->header.duration,
moov_->header.timescale);
} else {
duration_ = InfiniteMicroseconds;
}
//if (!init_cb_.is_null())
// base::ResetAndReturn(&init_cb_).Run(true, duration);
return true;
}
Microseconds
MP4Demuxer::Duration() const {
return duration_;
}
bool MP4Demuxer::ParseMoof(BoxReader* reader) {
RCHECK(state_ < kError);
RCHECK(moov_.get()); // Must already have initialization segment
MovieFragment moof;
RCHECK(moof.Parse(reader));
RCHECK(runs_->Init(moof));
//new_segment_cb_.Run(runs_->GetMinDecodeTimestamp());
ChangeState(kEmittingSamples);
return true;
}
bool MP4Demuxer::ParseBox() {
RCHECK(state_ < kError);
bool err = false;
nsAutoPtr<BoxReader> reader(BoxReader::ReadTopLevelBox(stream_,
stream_offset_,
&err));
if (!reader || err) {
DMX_LOG("Failed to read box at offset=%lld", stream_offset_);
return false;
}
string type = FourCCToString(reader->type());
DMX_LOG("offset=%lld version=0x%x flags=0x%x size=%d",
stream_offset_, (uint32_t)reader->version(),
reader->flags(), reader->size());
if (reader->type() == FOURCC_MOOV) {
DMX_LOG("ParseMoov\n");
if (!ParseMoov(reader.get())) {
DMX_LOG("ParseMoov failed\n");
return false;
}
} else if (reader->type() == FOURCC_MOOF) {
DMX_LOG("MOOF encountered\n.");
moof_head_ = stream_offset_;
if (!ParseMoof(reader.get())) {
DMX_LOG("ParseMoof failed\n");
return false;
}
mdat_tail_ = stream_offset_ + reader->size();
}
stream_offset_ += reader->size();
return true;
}
bool MP4Demuxer::EmitSample(nsAutoPtr<MP4Sample>* sample) {
if (!runs_->IsRunValid()) {
ChangeState(kParsingBoxes);
//end_of_segment_cb_.Run();
return true;
}
if (!runs_->IsSampleValid()) {
runs_->AdvanceRun();
return true;
}
bool audio = has_audio_ && audio_track_id_ == runs_->track_id();
bool video = has_video_ && video_track_id_ == runs_->track_id();
// Skip this entire track if it's not one we're interested in
if (!audio && !video)
runs_->AdvanceRun();
// Attempt to cache the auxiliary information first. Aux info is usually
// placed in a contiguous block before the sample data, rather than being
// interleaved. If we didn't cache it, this would require that we retain the
// start of the segment buffer while reading samples. Aux info is typically
// quite small compared to sample data, so this pattern is useful on
// memory-constrained devices where the source buffer consumes a substantial
// portion of the total system memory.
if (runs_->AuxInfoNeedsToBeCached()) {
int64_t aux_info_offset = runs_->aux_info_offset() + moof_head_;
if (stream_->Length() - aux_info_offset < runs_->aux_info_size()) {
return false;
}
return runs_->CacheAuxInfo(stream_, moof_head_);
}
nsAutoPtr<DecryptConfig> decrypt_config;
std::vector<SubsampleEntry> subsamples;
if (runs_->is_encrypted()) {
runs_->GetDecryptConfig(decrypt_config);
subsamples = decrypt_config->subsamples();
}
nsAutoPtr<vector<uint8_t>> frame_buf(new vector<uint8_t>());
const int64_t sample_offset = runs_->sample_offset() + moof_head_;
StreamReader reader(stream_, sample_offset, runs_->sample_size());
reader.ReadVec(frame_buf, runs_->sample_size());
if (video) {
if (!PrepareAVCBuffer(runs_->video_description().avcc,
frame_buf, &subsamples)) {
DMX_LOG("Failed to prepare AVC sample for decode\n");
return false;
}
}
if (audio) {
if (!PrepareAACBuffer(runs_->audio_description().esds.aac,
frame_buf, &subsamples)) {
DMX_LOG("Failed to prepare AAC sample for decode\n");
return false;
}
}
const bool is_encrypted = (audio && is_audio_track_encrypted_) ||
(video && is_video_track_encrypted_);
assert(runs_->is_encrypted() == is_encrypted);
if (decrypt_config) {
if (!subsamples.empty()) {
// Create a new config with the updated subsamples.
decrypt_config = new DecryptConfig(decrypt_config->key_id(),
decrypt_config->iv(),
decrypt_config->data_offset(),
subsamples);
}
// else, use the existing config.
} else if (is_encrypted) {
// The media pipeline requires a DecryptConfig with an empty |iv|.
// TODO(ddorwin): Refactor so we do not need a fake key ID ("1");
decrypt_config = new DecryptConfig("1", "", 0, std::vector<SubsampleEntry>());
}
assert(audio || video);
*sample = new MP4Sample(runs_->dts(),
runs_->cts(),
runs_->duration(),
sample_offset,
frame_buf.forget(),
audio ? kAudio : kVideo,
decrypt_config.forget(),
runs_->is_keyframe());
runs_->AdvanceSample();
return true;
}
bool MP4Demuxer::PrepareAVCBuffer(
const AVCDecoderConfigurationRecord& avc_config,
std::vector<uint8_t>* frame_buf,
std::vector<SubsampleEntry>* subsamples) const {
// Convert the AVC NALU length fields to Annex B headers, as expected by
// decoding libraries. Since this may enlarge the size of the buffer, we also
// update the clear byte count for each subsample if encryption is used to
// account for the difference in size between the length prefix and Annex B
// start code.
RCHECK(AVC::ConvertFrameToAnnexB(avc_config.length_size, frame_buf));
if (!subsamples->empty()) {
const int nalu_size_diff = 4 - avc_config.length_size;
size_t expected_size = runs_->sample_size() +
subsamples->size() * nalu_size_diff;
RCHECK(frame_buf->size() == expected_size);
for (size_t i = 0; i < subsamples->size(); i++)
(*subsamples)[i].clear_bytes += nalu_size_diff;
}
if (runs_->is_keyframe()) {
// If this is a keyframe, we (re-)inject SPS and PPS headers at the start of
// a frame. If subsample info is present, we also update the clear byte
// count for that first subsample.
std::vector<uint8_t> param_sets;
RCHECK(AVC::ConvertConfigToAnnexB(avc_config, &param_sets));
frame_buf->insert(frame_buf->begin(),
param_sets.begin(), param_sets.end());
if (!subsamples->empty())
(*subsamples)[0].clear_bytes += param_sets.size();
}
return true;
}
bool MP4Demuxer::PrepareAACBuffer(const AAC& aac_config,
std::vector<uint8_t>* frame_buf,
std::vector<SubsampleEntry>* subsamples) const {
// Append an ADTS header to every audio sample.
RCHECK(aac_config.ConvertEsdsToADTS(frame_buf));
// As above, adjust subsample information to account for the headers. AAC is
// not required to use subsample encryption, so we may need to add an entry.
if (subsamples->empty()) {
SubsampleEntry entry;
entry.clear_bytes = AAC::kADTSHeaderSize;
entry.cypher_bytes = frame_buf->size() - AAC::kADTSHeaderSize;
subsamples->push_back(entry);
} else {
(*subsamples)[0].clear_bytes += AAC::kADTSHeaderSize;
}
return true;
}
// Reads the metadata boxes.
bool MP4Demuxer::Demux(nsAutoPtr<MP4Sample>* sample,
bool* end_of_stream)
{
RCHECK(state_ < kError);
assert(state_ > kWaitingForInit);
*end_of_stream = false;
const int64_t length = stream_->Length();
bool ok = true;
while (ok) {
if (state_ == kParsingBoxes) {
if (stream_offset_ < length) {
ok = ParseBox();
} else {
DMX_LOG("End of stream reached.\n");
*end_of_stream = true;
break;
}
} else {
DCHECK_EQ(kEmittingSamples, state_);
ok = EmitSample(sample);
if (ok && *sample) {
// Got a sample, return.
break;
}
}
}
if (!ok) {
DMX_LOG("Error demuxing stream\n");
ChangeState(kError);
return false;
}
return true;
}
void MP4Demuxer::ChangeState(State new_state) {
DMX_LOG("Demuxer changing state: %d\n", new_state);
state_ = new_state;
if (state_ == kError) {
Reset();
}
}
const AudioDecoderConfig&
MP4Demuxer::AudioConfig() const
{
return audio_config_;
}
const VideoDecoderConfig&
MP4Demuxer::VideoConfig() const
{
return video_config_;
}
bool
MP4Demuxer::HasAudio() const
{
return has_audio_;
}
bool
MP4Demuxer::HasVideo() const
{
return has_video_;
}
bool
MP4Demuxer::CanSeek() const
{
return can_seek_;
}
} // namespace mp4_demuxer

View File

@ -1,159 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_MP4_MP4DEMUXER_H
#define MEDIA_MP4_MP4DEMUXER_H
#include "mp4_demuxer/audio_decoder_config.h"
#include "mp4_demuxer/video_decoder_config.h"
#include "mp4_demuxer/decrypt_config.h"
#include "mp4_demuxer/box_definitions.h"
#include "nsAutoPtr.h"
#include <memory>
namespace mp4_demuxer {
class Stream;
class BoxReader;
struct Movie;
class TrackRunIterator;
struct AVCDecoderConfigurationRecord;
class AAC;
// Constructs an MP4 Sample. Note this assumes ownership of the |data| vector
// passed in.
struct MP4Sample {
MP4Sample(Microseconds decode_timestamp,
Microseconds composition_timestamp,
Microseconds duration,
int64_t byte_offset,
std::vector<uint8_t>* data,
TrackType type,
DecryptConfig* decrypt_config,
bool is_sync_point);
~MP4Sample();
const Microseconds decode_timestamp;
const Microseconds composition_timestamp;
const Microseconds duration;
// Offset of sample in byte stream.
const int64_t byte_offset;
// Raw demuxed data.
const nsAutoPtr<std::vector<uint8_t>> data;
// Is this an audio or video sample?
const TrackType type;
const nsAutoPtr<DecryptConfig> decrypt_config;
// Whether this is a keyframe or not.
const bool is_sync_point;
bool is_encrypted() const;
};
class MP4Demuxer {
public:
MP4Demuxer(Stream* stream);
~MP4Demuxer();
bool Init();
// Reads the metadata boxes, up to the first fragment.
bool Demux(nsAutoPtr<MP4Sample>* sample,
bool* end_of_stream);
bool HasAudio() const;
const AudioDecoderConfig& AudioConfig() const;
bool HasVideo() const;
const VideoDecoderConfig& VideoConfig() const;
Microseconds Duration() const;
bool CanSeek() const;
private:
enum State {
kWaitingForInit,
kParsingBoxes,
kEmittingSamples,
kError
};
// Parses the bitstream. Returns false on error.
bool Parse(nsAutoPtr<MP4Sample>* sample,
bool& end_of_stream);
void ChangeState(State new_state);
// Return true on success, false on failure.
bool ParseBox();
bool ParseMoov(BoxReader* reader);
bool ParseMoof(BoxReader* reader);
void Reset();
bool EmitSample(nsAutoPtr<MP4Sample>* sample);
bool PrepareAACBuffer(const AAC& aac_config,
std::vector<uint8_t>* frame_buf,
std::vector<SubsampleEntry>* subsamples) const;
bool PrepareAVCBuffer(const AVCDecoderConfigurationRecord& avc_config,
std::vector<uint8_t>* frame_buf,
std::vector<SubsampleEntry>* subsamples) const;
State state_;
// Stream abstraction that we read from. It is the responsibility of the
// owner of the demuxer to ensure that it stays alive for the lifetime
// of the demuxer.
Stream* stream_;
int64_t stream_offset_;
Microseconds duration_;
// These two parameters are only valid in the |kEmittingSegments| state.
//
// |moof_head_| is the offset of the start of the most recently parsed moof
// block. All byte offsets in sample information are relative to this offset,
// as mandated by the Media Source spec.
int64_t moof_head_;
// |mdat_tail_| is the stream offset of the end of the current 'mdat' box.
// Valid iff it is greater than the head of the queue.
int64_t mdat_tail_;
nsAutoPtr<Movie> moov_;
nsAutoPtr<TrackRunIterator> runs_;
uint32_t audio_track_id_;
uint32_t video_track_id_;
uint32_t audio_frameno;
uint32_t video_frameno;
AudioDecoderConfig audio_config_;
VideoDecoderConfig video_config_;
bool has_audio_;
bool has_sbr_; // NOTE: This is not initialized!
bool is_audio_track_encrypted_;
bool has_video_;
bool is_video_track_encrypted_;
bool can_seek_;
};
} // mp4_demuxer
#endif // MEDIA_MP4_MP4DEMUXER_H

View File

@ -1,451 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "mp4_demuxer/track_run_iterator.h"
#include "mp4_demuxer/basictypes.h"
#include "mp4_demuxer/Streams.h"
#include <algorithm>
#include <memory>
#include <assert.h>
using namespace std;
namespace mp4_demuxer {
static const uint32_t kSampleIsDifferenceSampleFlagMask = 0x10000;
struct SampleInfo {
int size;
int duration;
int cts_offset;
bool is_keyframe;
};
struct TrackRunInfo {
uint32_t track_id;
std::vector<SampleInfo> samples;
int64_t timescale;
int64_t start_dts;
int64_t sample_start_offset;
bool is_audio;
const AudioSampleEntry* audio_description;
const VideoSampleEntry* video_description;
int64_t aux_info_start_offset; // Only valid if aux_info_total_size > 0.
int aux_info_default_size;
std::vector<uint8_t> aux_info_sizes; // Populated if default_size == 0.
int aux_info_total_size;
TrackRunInfo();
~TrackRunInfo();
};
TrackRunInfo::TrackRunInfo()
: track_id(0),
timescale(-1),
start_dts(-1),
sample_start_offset(-1),
is_audio(false),
aux_info_start_offset(-1),
aux_info_default_size(-1),
aux_info_total_size(-1) {
}
TrackRunInfo::~TrackRunInfo() {}
Microseconds TimeDeltaFromRational(int64_t numer, int64_t denom) {
DCHECK_LT((numer > 0 ? numer : -numer),
kint64max / MicrosecondsPerSecond);
return MicrosecondsPerSecond * numer / denom;
}
TrackRunIterator::TrackRunIterator(const Movie* moov)
: moov_(moov), sample_offset_(0) {
CHECK(moov);
}
TrackRunIterator::~TrackRunIterator() {}
static void PopulateSampleInfo(const TrackExtends& trex,
const TrackFragmentHeader& tfhd,
const TrackFragmentRun& trun,
const int64_t edit_list_offset,
const uint32_t i,
SampleInfo* sample_info) {
if (i < trun.sample_sizes.size()) {
sample_info->size = trun.sample_sizes[i];
} else if (tfhd.default_sample_size > 0) {
sample_info->size = tfhd.default_sample_size;
} else {
sample_info->size = trex.default_sample_size;
}
if (i < trun.sample_durations.size()) {
sample_info->duration = trun.sample_durations[i];
} else if (tfhd.default_sample_duration > 0) {
sample_info->duration = tfhd.default_sample_duration;
} else {
sample_info->duration = trex.default_sample_duration;
}
if (i < trun.sample_composition_time_offsets.size()) {
sample_info->cts_offset = trun.sample_composition_time_offsets[i];
} else {
sample_info->cts_offset = 0;
}
sample_info->cts_offset += edit_list_offset;
uint32_t flags;
if (i < trun.sample_flags.size()) {
flags = trun.sample_flags[i];
} else if (tfhd.has_default_sample_flags) {
flags = tfhd.default_sample_flags;
} else {
flags = trex.default_sample_flags;
}
sample_info->is_keyframe = !(flags & kSampleIsDifferenceSampleFlagMask);
}
// In well-structured encrypted media, each track run will be immediately
// preceded by its auxiliary information; this is the only optimal storage
// pattern in terms of minimum number of bytes from a serial stream needed to
// begin playback. It also allows us to optimize caching on memory-constrained
// architectures, because we can cache the relatively small auxiliary
// information for an entire run and then discard data from the input stream,
// instead of retaining the entire 'mdat' box.
//
// We optimize for this situation (with no loss of generality) by sorting track
// runs during iteration in order of their first data offset (either sample data
// or auxiliary data).
class CompareMinTrackRunDataOffset {
public:
bool operator()(const TrackRunInfo& a, const TrackRunInfo& b) {
int64_t a_aux = a.aux_info_total_size ? a.aux_info_start_offset : kint64max;
int64_t b_aux = b.aux_info_total_size ? b.aux_info_start_offset : kint64max;
int64_t a_lesser = std::min(a_aux, a.sample_start_offset);
int64_t a_greater = std::max(a_aux, a.sample_start_offset);
int64_t b_lesser = std::min(b_aux, b.sample_start_offset);
int64_t b_greater = std::max(b_aux, b.sample_start_offset);
if (a_lesser == b_lesser) return a_greater < b_greater;
return a_lesser < b_lesser;
}
};
bool TrackRunIterator::Init(const MovieFragment& moof) {
runs_.clear();
for (size_t i = 0; i < moof.tracks.size(); i++) {
const TrackFragment& traf = moof.tracks[i];
const Track* trak = NULL;
for (size_t t = 0; t < moov_->tracks.size(); t++) {
if (moov_->tracks[t].header.track_id == traf.header.track_id)
trak = &moov_->tracks[t];
}
RCHECK(trak);
const TrackExtends* trex = NULL;
for (size_t t = 0; t < moov_->extends.tracks.size(); t++) {
if (moov_->extends.tracks[t].track_id == traf.header.track_id)
trex = &moov_->extends.tracks[t];
}
RCHECK(trex);
const SampleDescription& stsd =
trak->media.information.sample_table.description;
if (stsd.type != kAudio && stsd.type != kVideo) {
DMX_LOG("Skipping unhandled track type\n");
continue;
}
size_t desc_idx = traf.header.sample_description_index;
if (!desc_idx) desc_idx = trex->default_sample_description_index;
RCHECK(desc_idx > 0); // Descriptions are one-indexed in the file
desc_idx -= 1;
// Process edit list to remove CTS offset introduced in the presence of
// B-frames (those that contain a single edit with a nonnegative media
// time). Other uses of edit lists are not supported, as they are
// both uncommon and better served by higher-level protocols.
int64_t edit_list_offset = 0;
const std::vector<EditListEntry>& edits = trak->edit.list.edits;
if (!edits.empty()) {
if (edits.size() > 1)
DMX_LOG("Multi-entry edit box detected; some components ignored.\n");
if (edits[0].media_time < 0) {
DMX_LOG("Empty edit list entry ignored.\n");
} else {
edit_list_offset = -edits[0].media_time;
}
}
int64_t run_start_dts = traf.decode_time.decode_time;
int sample_count_sum = 0;
for (size_t j = 0; j < traf.runs.size(); j++) {
const TrackFragmentRun& trun = traf.runs[j];
TrackRunInfo tri;
tri.track_id = traf.header.track_id;
tri.timescale = trak->media.header.timescale;
tri.start_dts = run_start_dts;
tri.sample_start_offset = trun.data_offset;
tri.is_audio = (stsd.type == kAudio);
if (tri.is_audio) {
RCHECK(!stsd.audio_entries.empty());
if (desc_idx > stsd.audio_entries.size())
desc_idx = 0;
tri.audio_description = &stsd.audio_entries[desc_idx];
} else {
RCHECK(!stsd.video_entries.empty());
if (desc_idx > stsd.video_entries.size())
desc_idx = 0;
tri.video_description = &stsd.video_entries[desc_idx];
}
// Collect information from the auxiliary_offset entry with the same index
// in the 'saiz' container as the current run's index in the 'trun'
// container, if it is present.
if (traf.auxiliary_offset.offsets.size() > j) {
// There should be an auxiliary info entry corresponding to each sample
// in the auxiliary offset entry's corresponding track run.
RCHECK(traf.auxiliary_size.sample_count >=
sample_count_sum + trun.sample_count);
tri.aux_info_start_offset = traf.auxiliary_offset.offsets[j];
tri.aux_info_default_size =
traf.auxiliary_size.default_sample_info_size;
if (tri.aux_info_default_size == 0) {
const std::vector<uint8_t>& sizes =
traf.auxiliary_size.sample_info_sizes;
tri.aux_info_sizes.insert(tri.aux_info_sizes.begin(),
sizes.begin() + sample_count_sum,
sizes.begin() + sample_count_sum + trun.sample_count);
}
// If the default info size is positive, find the total size of the aux
// info block from it, otherwise sum over the individual sizes of each
// aux info entry in the aux_offset entry.
if (tri.aux_info_default_size) {
tri.aux_info_total_size =
tri.aux_info_default_size * trun.sample_count;
} else {
tri.aux_info_total_size = 0;
for (size_t k = 0; k < trun.sample_count; k++) {
tri.aux_info_total_size += tri.aux_info_sizes[k];
}
}
} else {
tri.aux_info_start_offset = -1;
tri.aux_info_total_size = 0;
}
tri.samples.resize(trun.sample_count);
for (size_t k = 0; k < trun.sample_count; k++) {
PopulateSampleInfo(*trex, traf.header, trun, edit_list_offset,
k, &tri.samples[k]);
run_start_dts += tri.samples[k].duration;
}
runs_.push_back(tri);
sample_count_sum += trun.sample_count;
}
}
std::sort(runs_.begin(), runs_.end(), CompareMinTrackRunDataOffset());
run_itr_ = runs_.begin();
ResetRun();
return true;
}
void TrackRunIterator::AdvanceRun() {
++run_itr_;
ResetRun();
}
void TrackRunIterator::ResetRun() {
if (!IsRunValid()) return;
sample_dts_ = run_itr_->start_dts;
sample_offset_ = run_itr_->sample_start_offset;
sample_itr_ = run_itr_->samples.begin();
cenc_info_.clear();
}
void TrackRunIterator::AdvanceSample() {
DCHECK(IsSampleValid());
sample_dts_ += sample_itr_->duration;
sample_offset_ += sample_itr_->size;
++sample_itr_;
}
// This implementation only indicates a need for caching if CENC auxiliary
// info is available in the stream.
bool TrackRunIterator::AuxInfoNeedsToBeCached() {
DCHECK(IsRunValid());
return is_encrypted() && aux_info_size() > 0 && cenc_info_.size() == 0;
}
// This implementation currently only caches CENC auxiliary info.
bool TrackRunIterator::CacheAuxInfo(Stream* stream, int64_t moof_offset) {
RCHECK(AuxInfoNeedsToBeCached());
int64_t offset = aux_info_offset() + moof_offset;
if (stream->Length() - offset < aux_info_size()) {
return false;
}
assert(run_itr_ == runs_.begin());
cenc_info_.resize(run_itr_->samples.size());
int64_t pos = 0;
for (size_t i = 0; i < run_itr_->samples.size(); i++) {
int info_size = run_itr_->aux_info_default_size;
if (!info_size)
info_size = run_itr_->aux_info_sizes[i];
StreamReader reader(stream, offset + pos, info_size);
RCHECK(cenc_info_[i].Parse(track_encryption().default_iv_size, &reader));
pos += info_size;
}
return true;
}
bool TrackRunIterator::IsRunValid() const {
return run_itr_ != runs_.end();
}
bool TrackRunIterator::IsSampleValid() const {
return IsRunValid() && (sample_itr_ != run_itr_->samples.end());
}
// Because tracks are in sorted order and auxiliary information is cached when
// returning samples, it is guaranteed that no data will be required before the
// lesser of the minimum data offset of this track and the next in sequence.
// (The stronger condition - that no data is required before the minimum data
// offset of this track alone - is not guaranteed, because the BMFF spec does
// not have any inter-run ordering restrictions.)
int64_t TrackRunIterator::GetMaxClearOffset() {
int64_t offset = kint64max;
if (IsSampleValid()) {
offset = std::min(offset, sample_offset_);
if (AuxInfoNeedsToBeCached())
offset = std::min(offset, aux_info_offset());
}
if (run_itr_ != runs_.end()) {
std::vector<TrackRunInfo>::const_iterator next_run = run_itr_ + 1;
if (next_run != runs_.end()) {
offset = std::min(offset, next_run->sample_start_offset);
if (next_run->aux_info_total_size)
offset = std::min(offset, next_run->aux_info_start_offset);
}
}
if (offset == kint64max) return 0;
return offset;
}
Microseconds TrackRunIterator::GetMinDecodeTimestamp() {
Microseconds dts = -1;
for (size_t i = 0; i < runs_.size(); i++) {
dts = std::min(dts, MicrosecondsFromRational(runs_[i].start_dts,
runs_[i].timescale));
}
return dts;
}
uint32_t TrackRunIterator::track_id() const {
DCHECK(IsRunValid());
return run_itr_->track_id;
}
bool TrackRunIterator::is_encrypted() const {
DCHECK(IsRunValid());
return track_encryption().is_encrypted;
}
int64_t TrackRunIterator::aux_info_offset() const {
return run_itr_->aux_info_start_offset;
}
int TrackRunIterator::aux_info_size() const {
return run_itr_->aux_info_total_size;
}
bool TrackRunIterator::is_audio() const {
DCHECK(IsRunValid());
return run_itr_->is_audio;
}
const AudioSampleEntry& TrackRunIterator::audio_description() const {
DCHECK(is_audio());
DCHECK(run_itr_->audio_description);
return *run_itr_->audio_description;
}
const VideoSampleEntry& TrackRunIterator::video_description() const {
DCHECK(!is_audio());
DCHECK(run_itr_->video_description);
return *run_itr_->video_description;
}
int64_t TrackRunIterator::sample_offset() const {
DCHECK(IsSampleValid());
return sample_offset_;
}
int TrackRunIterator::sample_size() const {
DCHECK(IsSampleValid());
return sample_itr_->size;
}
Microseconds TrackRunIterator::dts() const {
DCHECK(IsSampleValid());
return MicrosecondsFromRational(sample_dts_, run_itr_->timescale);
}
Microseconds TrackRunIterator::cts() const {
DCHECK(IsSampleValid());
return MicrosecondsFromRational(sample_dts_ + sample_itr_->cts_offset,
run_itr_->timescale);
}
Microseconds TrackRunIterator::duration() const {
DCHECK(IsSampleValid());
return MicrosecondsFromRational(sample_itr_->duration, run_itr_->timescale);
}
bool TrackRunIterator::is_keyframe() const {
DCHECK(IsSampleValid());
return sample_itr_->is_keyframe;
}
const TrackEncryption& TrackRunIterator::track_encryption() const {
if (is_audio())
return audio_description().sinf.info.track_encryption;
return video_description().sinf.info.track_encryption;
}
void TrackRunIterator::GetDecryptConfig(nsAutoPtr<DecryptConfig>& config) {
size_t sample_idx = sample_itr_ - run_itr_->samples.begin();
DCHECK(sample_idx < cenc_info_.size());
const FrameCENCInfo& cenc_info = cenc_info_[sample_idx];
DCHECK(is_encrypted() && !AuxInfoNeedsToBeCached());
if (!cenc_info.subsamples.empty() &&
(cenc_info.GetTotalSizeOfSubsamples() !=
static_cast<size_t>(sample_size()))) {
DMX_LOG("Incorrect CENC subsample size.\n");
return;
}
const std::vector<uint8_t>& kid = track_encryption().default_kid;
config = new DecryptConfig(
std::string(reinterpret_cast<const char*>(&kid[0]), kid.size()),
std::string(reinterpret_cast<const char*>(cenc_info.iv),
arraysize(cenc_info.iv)),
0, // No offset to start of media data in MP4 using CENC.
cenc_info.subsamples);
}
} // namespace mp4_demuxer

View File

@ -1,107 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_MP4_TRACK_RUN_ITERATOR_H_
#define MEDIA_MP4_TRACK_RUN_ITERATOR_H_
#include <vector>
#include <memory>
#include "mp4_demuxer/box_definitions.h"
#include "mp4_demuxer/cenc.h"
#include "nsAutoPtr.h"
namespace mp4_demuxer {
class DecryptConfig;
Microseconds MicrosecondsFromRational(int64_t numer, int64_t denom);
struct SampleInfo;
struct TrackRunInfo;
class TrackRunIterator {
public:
// Create a new TrackRunIterator. A reference to |moov| will be retained for
// the lifetime of this object.
TrackRunIterator(const Movie* moov);
~TrackRunIterator();
void Reset();
// Sets up the iterator to handle all the runs from the current fragment.
bool Init(const MovieFragment& moof);
// Returns true if the properties of the current run or sample are valid.
bool IsRunValid() const;
bool IsSampleValid() const;
// Advance the properties to refer to the next run or sample. Requires that
// the current sample be valid.
void AdvanceRun();
void AdvanceSample();
// Returns true if this track run has auxiliary information and has not yet
// been cached. Only valid if IsRunValid().
bool AuxInfoNeedsToBeCached();
// Caches the CENC data from the given buffer. |buf| must be a buffer starting
// at the offset given by cenc_offset(), with a |size| of at least
// cenc_size(). Returns true on success, false on error.
//bool CacheAuxInfo(const uint8_t* buf, int size);
bool CacheAuxInfo(Stream* stream, int64_t moof_offset);
// Returns the maximum buffer location at which no data earlier in the stream
// will be required in order to read the current or any subsequent sample. You
// may clear all data up to this offset before reading the current sample
// safely. Result is in the same units as offset() (for Media Source this is
// in bytes past the the head of the MOOF box).
int64_t GetMaxClearOffset();
// Returns the minimum timestamp (or kInfiniteDuration if no runs present).
Microseconds GetMinDecodeTimestamp();
// Property of the current run. Only valid if IsRunValid().
uint32_t track_id() const;
int64_t aux_info_offset() const;
int aux_info_size() const;
bool is_encrypted() const;
bool is_audio() const;
// Only one is valid, based on the value of is_audio().
const AudioSampleEntry& audio_description() const;
const VideoSampleEntry& video_description() const;
// Properties of the current sample. Only valid if IsSampleValid().
int64_t sample_offset() const;
int sample_size() const;
Microseconds dts() const;
Microseconds cts() const;
Microseconds duration() const;
bool is_keyframe() const;
// Only call when is_encrypted() is true and AuxInfoNeedsToBeCached() is
// false. Result is owned by caller.
void GetDecryptConfig(nsAutoPtr<DecryptConfig>& config);
private:
void ResetRun();
const TrackEncryption& track_encryption() const;
const Movie* moov_;
std::vector<TrackRunInfo> runs_;
std::vector<TrackRunInfo>::const_iterator run_itr_;
std::vector<SampleInfo>::const_iterator sample_itr_;
std::vector<FrameCENCInfo> cenc_info_;
int64_t sample_dts_;
int64_t sample_offset_;
DISALLOW_COPY_AND_ASSIGN(TrackRunIterator);
};
} // namespace mp4_demuxer
#endif // MEDIA_MP4_TRACK_RUN_ITERATOR_H_

View File

@ -1,159 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "mp4_demuxer/video_decoder_config.h"
#include <sstream>
#include <string.h>
namespace mp4_demuxer {
VideoDecoderConfig::VideoDecoderConfig()
: codec_(kUnknownVideoCodec),
profile_(VIDEO_CODEC_PROFILE_UNKNOWN),
format_(VideoFrameFormat::INVALID),
is_encrypted_(false) {
}
VideoDecoderConfig::VideoDecoderConfig(VideoCodec codec,
VideoCodecProfile profile,
VideoFrameFormat format,
const IntSize& coded_size,
const IntRect& visible_rect,
const IntSize& natural_size,
const uint8_t* extra_data,
size_t extra_data_size,
bool is_encrypted) {
Initialize(codec, profile, format, coded_size, visible_rect, natural_size,
extra_data, extra_data_size, is_encrypted, true);
}
VideoDecoderConfig::~VideoDecoderConfig() {}
// Some videos just want to watch the world burn, with a height of 0; cap the
// "infinite" aspect ratio resulting.
static const int kInfiniteRatio = 99999;
// Common aspect ratios (multiplied by 100 and truncated) used for histogramming
// video sizes. These were taken on 20111103 from
// http://wikipedia.org/wiki/Aspect_ratio_(image)#Previous_and_currently_used_aspect_ratios
static const int kCommonAspectRatios100[] = {
100, 115, 133, 137, 143, 150, 155, 160, 166, 175, 177, 185, 200, 210, 220,
221, 235, 237, 240, 255, 259, 266, 276, 293, 400, 1200, kInfiniteRatio,
};
void VideoDecoderConfig::Initialize(VideoCodec codec,
VideoCodecProfile profile,
VideoFrameFormat format,
const IntSize& coded_size,
const IntRect& visible_rect,
const IntSize& natural_size,
const uint8_t* extra_data,
size_t extra_data_size,
bool is_encrypted,
bool record_stats) {
CHECK((extra_data_size != 0) == (extra_data != NULL));
codec_ = codec;
profile_ = profile;
format_ = format;
coded_size_ = coded_size;
visible_rect_ = visible_rect;
natural_size_ = natural_size;
extra_data_.assign(extra_data, extra_data + extra_data_size);
is_encrypted_ = is_encrypted;
}
bool VideoDecoderConfig::IsValidConfig() const {
return codec_ != kUnknownVideoCodec &&
natural_size_.width() > 0 &&
natural_size_.height() > 0 &&
// Copied from:
// VideoFrame::IsValidConfig(format_, coded_size_, visible_rect_, natural_size_)
format_ != VideoFrameFormat::INVALID &&
!coded_size_.IsEmpty() &&
coded_size_.GetArea() <= kMaxCanvas &&
coded_size_.width() <= kMaxDimension &&
coded_size_.height() <= kMaxDimension &&
!visible_rect_.IsEmpty() &&
visible_rect_.x() >= 0 && visible_rect_.y() >= 0 &&
visible_rect_.right() <= coded_size_.width() &&
visible_rect_.bottom() <= coded_size_.height() &&
!natural_size_.IsEmpty() &&
natural_size_.GetArea() <= kMaxCanvas &&
natural_size_.width() <= kMaxDimension &&
natural_size_.height() <= kMaxDimension;
}
bool VideoDecoderConfig::Matches(const VideoDecoderConfig& config) const {
return ((codec() == config.codec()) &&
(format() == config.format()) &&
(profile() == config.profile()) &&
(coded_size() == config.coded_size()) &&
(visible_rect() == config.visible_rect()) &&
(natural_size() == config.natural_size()) &&
(extra_data_size() == config.extra_data_size()) &&
(!extra_data() || !memcmp(extra_data(), config.extra_data(),
extra_data_size())) &&
(is_encrypted() == config.is_encrypted()));
}
std::string VideoDecoderConfig::AsHumanReadableString() const {
std::ostringstream s;
s << "codec: " << codec()
<< " format: " << format()
<< " profile: " << profile()
<< " coded size: [" << coded_size().width()
<< "," << coded_size().height() << "]"
<< " visible rect: [" << visible_rect().x()
<< "," << visible_rect().y()
<< "," << visible_rect().width()
<< "," << visible_rect().height() << "]"
<< " natural size: [" << natural_size().width()
<< "," << natural_size().height() << "]"
<< " has extra data? " << (extra_data() ? "true" : "false")
<< " encrypted? " << (is_encrypted() ? "true" : "false");
return s.str();
}
VideoCodec VideoDecoderConfig::codec() const {
return codec_;
}
VideoCodecProfile VideoDecoderConfig::profile() const {
return profile_;
}
VideoFrameFormat VideoDecoderConfig::format() const {
return format_;
}
IntSize VideoDecoderConfig::coded_size() const {
return coded_size_;
}
IntRect VideoDecoderConfig::visible_rect() const {
return visible_rect_;
}
IntSize VideoDecoderConfig::natural_size() const {
return natural_size_;
}
const uint8_t* VideoDecoderConfig::extra_data() const {
if (extra_data_.empty())
return NULL;
return &extra_data_[0];
}
size_t VideoDecoderConfig::extra_data_size() const {
return extra_data_.size();
}
bool VideoDecoderConfig::is_encrypted() const {
return is_encrypted_;
}
} // namespace mp4_demuxer

View File

@ -1,171 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_BASE_VIDEO_DECODER_CONFIG_H_
#define MEDIA_BASE_VIDEO_DECODER_CONFIG_H_
#include <string>
#include <vector>
#include "mp4_demuxer/basictypes.h"
namespace mp4_demuxer {
enum VideoCodec {
// These values are histogrammed over time; do not change their ordinal
// values. When deleting a codec replace it with a dummy value; when adding a
// codec, do so at the bottom (and update kVideoCodecMax).
kUnknownVideoCodec = 0,
kCodecH264,
kCodecVC1,
kCodecMPEG2,
kCodecMPEG4,
kCodecTheora,
kCodecVP8,
kCodecVP9,
// DO NOT ADD RANDOM VIDEO CODECS!
//
// The only acceptable time to add a new codec is if there is production code
// that uses said codec in the same CL.
kVideoCodecMax = kCodecVP9 // Must equal the last "real" codec above.
};
// Video stream profile. This *must* match PP_VideoDecoder_Profile.
// (enforced in webkit/plugins/ppapi/ppb_video_decoder_impl.cc)
enum VideoCodecProfile {
// Keep the values in this enum unique, as they imply format (h.264 vs. VP8,
// for example), and keep the values for a particular format grouped
// together for clarity.
VIDEO_CODEC_PROFILE_UNKNOWN = -1,
H264PROFILE_MIN = 0,
H264PROFILE_BASELINE = H264PROFILE_MIN,
H264PROFILE_MAIN = 1,
H264PROFILE_EXTENDED = 2,
H264PROFILE_HIGH = 3,
H264PROFILE_HIGH10PROFILE = 4,
H264PROFILE_HIGH422PROFILE = 5,
H264PROFILE_HIGH444PREDICTIVEPROFILE = 6,
H264PROFILE_SCALABLEBASELINE = 7,
H264PROFILE_SCALABLEHIGH = 8,
H264PROFILE_STEREOHIGH = 9,
H264PROFILE_MULTIVIEWHIGH = 10,
H264PROFILE_MAX = H264PROFILE_MULTIVIEWHIGH,
VP8PROFILE_MIN = 11,
VP8PROFILE_MAIN = VP8PROFILE_MIN,
VP8PROFILE_MAX = VP8PROFILE_MAIN,
VP9PROFILE_MIN = 12,
VP9PROFILE_MAIN = VP9PROFILE_MIN,
VP9PROFILE_MAX = VP9PROFILE_MAIN,
VIDEO_CODEC_PROFILE_MAX = VP9PROFILE_MAX,
};
// Surface formats roughly based on FOURCC labels, see:
// http://www.fourcc.org/rgb.php
// http://www.fourcc.org/yuv.php
enum VideoFrameFormat { // VideoFrame::Format
INVALID = 0, // Invalid format value. Used for error reporting.
RGB32 = 4, // 32bpp RGB packed with extra byte 8:8:8
YV12 = 6, // 12bpp YVU planar 1x1 Y, 2x2 VU samples
YV16 = 7, // 16bpp YVU planar 1x1 Y, 2x1 VU samples
EMPTY = 9, // An empty frame.
I420 = 11, // 12bpp YVU planar 1x1 Y, 2x2 UV samples.
NATIVE_TEXTURE = 12, // Native texture. Pixel-format agnostic.
#if defined(GOOGLE_TV)
HOLE = 13, // Hole frame.
#endif
YV12A = 14, // 20bpp YUVA planar 1x1 Y, 2x2 VU, 1x1 A samples.
};
class VideoDecoderConfig {
public:
// Constructs an uninitialized object. Clients should call Initialize() with
// appropriate values before using.
VideoDecoderConfig();
// Constructs an initialized object. It is acceptable to pass in NULL for
// |extra_data|, otherwise the memory is copied.
VideoDecoderConfig(VideoCodec codec,
VideoCodecProfile profile,
VideoFrameFormat format,
const IntSize& coded_size,
const IntRect& visible_rect,
const IntSize& natural_size,
const uint8_t* extra_data, size_t extra_data_size,
bool is_encrypted);
~VideoDecoderConfig();
// Resets the internal state of this object.
void Initialize(VideoCodec codec,
VideoCodecProfile profile,
VideoFrameFormat format,
const IntSize& coded_size,
const IntRect& visible_rect,
const IntSize& natural_size,
const uint8_t* extra_data, size_t extra_data_size,
bool is_encrypted,
bool record_stats);
// Returns true if this object has appropriate configuration values, false
// otherwise.
bool IsValidConfig() const;
// Returns true if all fields in |config| match this config.
// Note: The contents of |extra_data_| are compared not the raw pointers.
bool Matches(const VideoDecoderConfig& config) const;
// Returns a human-readable string describing |*this|. For debugging & test
// output only.
std::string AsHumanReadableString() const;
VideoCodec codec() const;
VideoCodecProfile profile() const;
// Video format used to determine YUV buffer sizes.
VideoFrameFormat format() const;
// Width and height of video frame immediately post-decode. Not all pixels
// in this region are valid.
IntSize coded_size() const;
// Region of |coded_size_| that is visible.
IntRect visible_rect() const;
// Final visible width and height of a video frame with aspect ratio taken
// into account.
IntSize natural_size() const;
// Optional byte data required to initialize video decoders, such as H.264
// AAVC data.
const uint8_t* extra_data() const;
size_t extra_data_size() const;
// Whether the video stream is potentially encrypted.
// Note that in a potentially encrypted video stream, individual buffers
// can be encrypted or not encrypted.
bool is_encrypted() const;
private:
VideoCodec codec_;
VideoCodecProfile profile_;
VideoFrameFormat format_;
IntSize coded_size_;
IntRect visible_rect_;
IntSize natural_size_;
std::vector<uint8_t> extra_data_;
bool is_encrypted_;
// Not using DISALLOW_COPY_AND_ASSIGN here intentionally to allow the compiler
// generated copy constructor and assignment operator. Since the extra data is
// typically small, the performance impact is minimal.
};
} // namespace mp4_demuxer
#endif // MEDIA_BASE_VIDEO_DECODER_CONFIG_H_

View File

@ -1,299 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "mp4_demuxer/video_util.h"
#include <cmath>
namespace mp4_demuxer {
IntSize GetNaturalSize(const IntSize& visible_size,
int aspect_ratio_numerator,
int aspect_ratio_denominator) {
if (aspect_ratio_denominator == 0 ||
aspect_ratio_numerator < 0 ||
aspect_ratio_denominator < 0)
return IntSize();
double aspect_ratio = aspect_ratio_numerator /
static_cast<double>(aspect_ratio_denominator);
int width = floor(visible_size.width() * aspect_ratio + 0.5);
int height = visible_size.height();
// An even width makes things easier for YV12 and appears to be the behavior
// expected by WebKit layout tests.
return IntSize(width & ~1, height);
}
/*
void CopyPlane(size_t plane, const uint8_t* source, int stride, int rows,
VideoFrame* frame) {
uint8_t* dest = frame->data(plane);
int dest_stride = frame->stride(plane);
// Clamp in case source frame has smaller stride.
int bytes_to_copy_per_row = std::min(frame->row_bytes(plane), stride);
// Clamp in case source frame has smaller height.
int rows_to_copy = std::min(frame->rows(plane), rows);
// Copy!
for (int row = 0; row < rows_to_copy; ++row) {
memcpy(dest, source, bytes_to_copy_per_row);
source += stride;
dest += dest_stride;
}
}
void CopyYPlane(const uint8_t* source, int stride, int rows, VideoFrame* frame) {
CopyPlane(VideoFrame::kYPlane, source, stride, rows, frame);
}
void CopyUPlane(const uint8_t* source, int stride, int rows, VideoFrame* frame) {
CopyPlane(VideoFrame::kUPlane, source, stride, rows, frame);
}
void CopyVPlane(const uint8_t* source, int stride, int rows, VideoFrame* frame) {
CopyPlane(VideoFrame::kVPlane, source, stride, rows, frame);
}
void CopyAPlane(const uint8_t* source, int stride, int rows, VideoFrame* frame) {
CopyPlane(VideoFrame::kAPlane, source, stride, rows, frame);
}
void MakeOpaqueAPlane(int stride, int rows, VideoFrame* frame) {
int rows_to_clear = std::min(frame->rows(VideoFrame::kAPlane), rows);
memset(frame->data(VideoFrame::kAPlane), 255,
frame->stride(VideoFrame::kAPlane) * rows_to_clear);
}
void FillYUV(VideoFrame* frame, uint8_t y, uint8_t u, uint8_t v) {
// Fill the Y plane.
uint8_t* y_plane = frame->data(VideoFrame::kYPlane);
int y_rows = frame->rows(VideoFrame::kYPlane);
int y_row_bytes = frame->row_bytes(VideoFrame::kYPlane);
for (int i = 0; i < y_rows; ++i) {
memset(y_plane, y, y_row_bytes);
y_plane += frame->stride(VideoFrame::kYPlane);
}
// Fill the U and V planes.
uint8_t* u_plane = frame->data(VideoFrame::kUPlane);
uint8_t* v_plane = frame->data(VideoFrame::kVPlane);
int uv_rows = frame->rows(VideoFrame::kUPlane);
int u_row_bytes = frame->row_bytes(VideoFrame::kUPlane);
int v_row_bytes = frame->row_bytes(VideoFrame::kVPlane);
for (int i = 0; i < uv_rows; ++i) {
memset(u_plane, u, u_row_bytes);
memset(v_plane, v, v_row_bytes);
u_plane += frame->stride(VideoFrame::kUPlane);
v_plane += frame->stride(VideoFrame::kVPlane);
}
}
static void LetterboxPlane(VideoFrame* frame,
int plane,
const gfx::Rect& view_area,
uint8_t fill_byte) {
uint8_t* ptr = frame->data(plane);
const int rows = frame->rows(plane);
const int row_bytes = frame->row_bytes(plane);
const int stride = frame->stride(plane);
CHECK_GE(stride, row_bytes);
CHECK_GE(view_area.x(), 0);
CHECK_GE(view_area.y(), 0);
CHECK_LE(view_area.right(), row_bytes);
CHECK_LE(view_area.bottom(), rows);
int y = 0;
for (; y < view_area.y(); y++) {
memset(ptr, fill_byte, row_bytes);
ptr += stride;
}
if (view_area.width() < row_bytes) {
for (; y < view_area.bottom(); y++) {
if (view_area.x() > 0) {
memset(ptr, fill_byte, view_area.x());
}
if (view_area.right() < row_bytes) {
memset(ptr + view_area.right(),
fill_byte,
row_bytes - view_area.right());
}
ptr += stride;
}
} else {
y += view_area.height();
ptr += stride * view_area.height();
}
for (; y < rows; y++) {
memset(ptr, fill_byte, row_bytes);
ptr += stride;
}
}
void LetterboxYUV(VideoFrame* frame, const gfx::Rect& view_area) {
DCHECK(!(view_area.x() & 1));
DCHECK(!(view_area.y() & 1));
DCHECK(!(view_area.width() & 1));
DCHECK(!(view_area.height() & 1));
DCHECK_EQ(frame->format(), VideoFrame::YV12);
LetterboxPlane(frame, VideoFrame::kYPlane, view_area, 0x00);
gfx::Rect half_view_area(view_area.x() / 2,
view_area.y() / 2,
view_area.width() / 2,
view_area.height() / 2);
LetterboxPlane(frame, VideoFrame::kUPlane, half_view_area, 0x80);
LetterboxPlane(frame, VideoFrame::kVPlane, half_view_area, 0x80);
}
void RotatePlaneByPixels(
const uint8_t* src,
uint8_t* dest,
int width,
int height,
int rotation, // Clockwise.
bool flip_vert,
bool flip_horiz) {
DCHECK((width > 0) && (height > 0) &&
((width & 1) == 0) && ((height & 1) == 0) &&
(rotation >= 0) && (rotation < 360) && (rotation % 90 == 0));
// Consolidate cases. Only 0 and 90 are left.
if (rotation == 180 || rotation == 270) {
rotation -= 180;
flip_vert = !flip_vert;
flip_horiz = !flip_horiz;
}
int num_rows = height;
int num_cols = width;
int src_stride = width;
// During pixel copying, the corresponding incremental of dest pointer
// when src pointer moves to next row.
int dest_row_step = width;
// During pixel copying, the corresponding incremental of dest pointer
// when src pointer moves to next column.
int dest_col_step = 1;
if (rotation == 0) {
if (flip_horiz) {
// Use pixel copying.
dest_col_step = -1;
if (flip_vert) {
// Rotation 180.
dest_row_step = -width;
dest += height * width - 1;
} else {
dest += width - 1;
}
} else {
if (flip_vert) {
// Fast copy by rows.
dest += width * (height - 1);
for (int row = 0; row < height; ++row) {
memcpy(dest, src, width);
src += width;
dest -= width;
}
} else {
memcpy(dest, src, width * height);
}
return;
}
} else if (rotation == 90) {
int offset;
if (width > height) {
offset = (width - height) / 2;
src += offset;
num_rows = num_cols = height;
} else {
offset = (height - width) / 2;
src += width * offset;
num_rows = num_cols = width;
}
dest_col_step = (flip_vert ? -width : width);
dest_row_step = (flip_horiz ? 1 : -1);
if (flip_horiz) {
if (flip_vert) {
dest += (width > height ? width * (height - 1) + offset :
width * (height - offset - 1));
} else {
dest += (width > height ? offset : width * offset);
}
} else {
if (flip_vert) {
dest += (width > height ? width * height - offset - 1 :
width * (height - offset) - 1);
} else {
dest += (width > height ? width - offset - 1 :
width * (offset + 1) - 1);
}
}
} else {
NOTREACHED();
}
// Copy pixels.
for (int row = 0; row < num_rows; ++row) {
const uint8_t* src_ptr = src;
uint8_t* dest_ptr = dest;
for (int col = 0; col < num_cols; ++col) {
*dest_ptr = *src_ptr++;
dest_ptr += dest_col_step;
}
src += src_stride;
dest += dest_row_step;
}
}
gfx::Rect ComputeLetterboxRegion(const gfx::Rect& bounds,
const IntSize& content) {
int64_t x = static_cast<int64_t>(content.width()) * bounds.height();
int64_t y = static_cast<int64_t>(content.height()) * bounds.width();
IntSize letterbox(bounds.width(), bounds.height());
if (y < x)
letterbox.set_height(static_cast<int>(y / content.width()));
else
letterbox.set_width(static_cast<int>(x / content.height()));
gfx::Rect result = bounds;
result.ClampToCenteredSize(letterbox);
return result;
}
void CopyRGBToVideoFrame(const uint8_t* source,
int stride,
const gfx::Rect& region_in_frame,
VideoFrame* frame) {
const int kY = VideoFrame::kYPlane;
const int kU = VideoFrame::kUPlane;
const int kV = VideoFrame::kVPlane;
CHECK_EQ(frame->stride(kU), frame->stride(kV));
const int uv_stride = frame->stride(kU);
if (region_in_frame != gfx::Rect(frame->coded_size())) {
LetterboxYUV(frame, region_in_frame);
}
const int y_offset = region_in_frame.x()
+ (region_in_frame.y() * frame->stride(kY));
const int uv_offset = region_in_frame.x() / 2
+ (region_in_frame.y() / 2 * uv_stride);
ConvertRGB32ToYUV(source,
frame->data(kY) + y_offset,
frame->data(kU) + uv_offset,
frame->data(kV) + uv_offset,
region_in_frame.width(),
region_in_frame.height(),
stride,
frame->stride(kY),
uv_stride);
}
*/
} // namespace mp4_demuxer

View File

@ -1,86 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_BASE_VIDEO_UTIL_H_
#define MEDIA_BASE_VIDEO_UTIL_H_
#include "mp4_demuxer/basictypes.h"
namespace mp4_demuxer {
class VideoFrame;
// Computes the size of |visible_size| for a given aspect ratio.
IntSize GetNaturalSize(const IntSize& visible_size,
int aspect_ratio_numerator,
int aspect_ratio_denominator);
/*
// Copies a plane of YUV(A) source into a VideoFrame object, taking into account
// source and destinations dimensions.
//
// NOTE: rows is *not* the same as height!
void CopyYPlane(const uint8_t* source, int stride, int rows,
VideoFrame* frame);
void CopyUPlane(const uint8_t* source, int stride, int rows,
VideoFrame* frame);
void CopyVPlane(const uint8_t* source, int stride, int rows,
VideoFrame* frame);
void CopyAPlane(const uint8_t* source, int stride, int rows,
VideoFrame* frame);
// Sets alpha plane values to be completely opaque (all 255's).
void MakeOpaqueAPlane(int stride, int rows, VideoFrame* frame);
// |plane| is one of VideoFrame::kYPlane, VideoFrame::kUPlane,
// VideoFrame::kVPlane or VideoFrame::kAPlane
void CopyPlane(size_t plane, const uint8_t* source, int stride,
int rows, VideoFrame* frame);
// Fills |frame| containing YUV data to the given color values.
void FillYUV(VideoFrame* frame, uint8_t y, uint8_t u, uint8_t v);
// Creates a border in |frame| such that all pixels outside of
// |view_area| are black. The size and position of |view_area|
// must be even to align correctly with the color planes.
// Only YV12 format video frames are currently supported.
void LetterboxYUV(VideoFrame* frame,
const gfx::Rect& view_area);
// Rotates |src| plane by |rotation| degree with possible flipping vertically
// and horizontally.
// |rotation| is limited to {0, 90, 180, 270}.
// |width| and |height| are expected to be even numbers.
// Both |src| and |dest| planes are packed and have same |width| and |height|.
// When |width| != |height| and rotated by 90/270, only the maximum square
// portion located in the center is rotated. For example, for width=640 and
// height=480, the rotated area is 480x480 located from row 0 through 479 and
// from column 80 through 559. The leftmost and rightmost 80 columns are
// ignored for both |src| and |dest|.
// The caller is responsible for blanking out the margin area.
void RotatePlaneByPixels(
const uint8_t* src,
uint8_t* dest,
int width,
int height,
int rotation, // Clockwise.
bool flip_vert,
bool flip_horiz);
// Return the largest centered rectangle with the same aspect ratio of |content|
// that fits entirely inside of |bounds|.
gfx::Rect ComputeLetterboxRegion(const gfx::Rect& bounds,
const IntSize& content);
// Copy an RGB bitmap into the specified |region_in_frame| of a YUV video frame.
// Fills the regions outside |region_in_frame| with black.
void CopyRGBToVideoFrame(const uint8_t* source,
int stride,
const gfx::Rect& region_in_frame,
VideoFrame* frame);
*/
} // namespace mp4_demuxer
#endif // MEDIA_BASE_VIDEO_UTIL_H_

View File

@ -10,46 +10,14 @@ EXPORTS += [
'PlatformDecoderModule.h',
]
#EXPORTS.mp4_demuxer += [
# 'demuxer/aac.h',
# 'demuxer/audio_decoder_config.h',
# 'demuxer/avc.h',
# 'demuxer/basictypes.h',
# 'demuxer/bit_reader.h',
# 'demuxer/box_definitions.h',
# 'demuxer/box_reader.h',
# 'demuxer/cenc.h',
# 'demuxer/channel_layout.h',
# 'demuxer/decrypt_config.h',
# 'demuxer/es_descriptor.h',
# 'demuxer/fourccs.h',
# 'demuxer/mp4_demuxer.h',
# 'demuxer/Streams.h',
# 'demuxer/track_run_iterator.h',
# 'demuxer/video_decoder_config.h',
# 'demuxer/video_util.h',
#]
#UNIFIED_SOURCES += [
SOURCES += [
UNIFIED_SOURCES += [
'BlankDecoderModule.cpp',
# 'demuxer/aac.cc',
# 'demuxer/audio_decoder_config.cc',
# 'demuxer/avc.cc',
# 'demuxer/bit_reader.cc',
# 'demuxer/box_definitions.cc',
# 'demuxer/box_reader.cc',
# 'demuxer/cenc.cc',
# 'demuxer/channel_layout.cc',
# 'demuxer/decrypt_config.cc',
# 'demuxer/es_descriptor.cc',
# 'demuxer/mp4_demuxer.cc',
# 'demuxer/track_run_iterator.cc',
# 'demuxer/video_decoder_config.cc',
# 'demuxer/video_util.cc',
'PlatformDecoderModule.cpp',
]
SOURCES += [
'MP4Decoder.cpp',
'MP4Reader.cpp',
'PlatformDecoderModule.cpp',
]
if CONFIG['MOZ_WMF']: