2014-07-24 16:30:00 -07:00
|
|
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
|
|
|
/* vim:set ts=2 sw=2 sts=2 et cindent: */
|
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#include "AppleUtils.h"
|
|
|
|
#include "MP4Reader.h"
|
|
|
|
#include "MP4Decoder.h"
|
2014-11-26 18:23:15 -08:00
|
|
|
#include "mp4_demuxer/Adts.h"
|
2014-07-24 16:30:00 -07:00
|
|
|
#include "mp4_demuxer/DecoderData.h"
|
|
|
|
#include "AppleATDecoder.h"
|
|
|
|
#include "prlog.h"
|
|
|
|
|
|
|
|
#ifdef PR_LOGGING
|
2014-08-21 16:20:00 -07:00
|
|
|
PRLogModuleInfo* GetAppleMediaLog();
|
|
|
|
#define LOG(...) PR_LOG(GetAppleMediaLog(), PR_LOG_DEBUG, (__VA_ARGS__))
|
2014-07-24 16:30:00 -07:00
|
|
|
#else
|
|
|
|
#define LOG(...)
|
|
|
|
#endif
|
2014-11-26 18:23:15 -08:00
|
|
|
#define FourCC2Str(n) ((char[5]){(char)(n >> 24), (char)(n >> 16), (char)(n >> 8), (char)(n), 0})
|
2014-07-24 16:30:00 -07:00
|
|
|
|
|
|
|
namespace mozilla {
|
|
|
|
|
|
|
|
AppleATDecoder::AppleATDecoder(const mp4_demuxer::AudioDecoderConfig& aConfig,
|
2014-08-14 02:24:00 -07:00
|
|
|
MediaTaskQueue* aAudioTaskQueue,
|
2014-07-24 16:30:00 -07:00
|
|
|
MediaDataDecoderCallback* aCallback)
|
|
|
|
: mConfig(aConfig)
|
2014-11-26 18:23:15 -08:00
|
|
|
, mFileStreamError(false)
|
2014-08-14 02:24:00 -07:00
|
|
|
, mTaskQueue(aAudioTaskQueue)
|
2014-07-24 16:30:00 -07:00
|
|
|
, mCallback(aCallback)
|
|
|
|
, mConverter(nullptr)
|
2014-11-26 18:23:15 -08:00
|
|
|
, mStream(nullptr)
|
2014-07-24 16:30:00 -07:00
|
|
|
{
|
|
|
|
MOZ_COUNT_CTOR(AppleATDecoder);
|
2014-10-27 14:06:00 -07:00
|
|
|
LOG("Creating Apple AudioToolbox decoder");
|
2014-07-24 16:30:00 -07:00
|
|
|
LOG("Audio Decoder configuration: %s %d Hz %d channels %d bits per channel",
|
|
|
|
mConfig.mime_type,
|
|
|
|
mConfig.samples_per_second,
|
|
|
|
mConfig.channel_count,
|
|
|
|
mConfig.bits_per_sample);
|
2014-08-14 23:25:06 -07:00
|
|
|
|
2014-11-19 06:03:30 -08:00
|
|
|
if (!strcmp(mConfig.mime_type, "audio/mpeg")) {
|
|
|
|
mFormatID = kAudioFormatMPEGLayer3;
|
|
|
|
} else if (!strcmp(mConfig.mime_type, "audio/mp4a-latm")) {
|
|
|
|
mFormatID = kAudioFormatMPEG4AAC;
|
2014-08-14 23:25:06 -07:00
|
|
|
} else {
|
2014-11-19 06:03:30 -08:00
|
|
|
mFormatID = 0;
|
2014-08-14 23:25:06 -07:00
|
|
|
}
|
2014-07-24 16:30:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
AppleATDecoder::~AppleATDecoder()
|
|
|
|
{
|
2014-08-20 15:28:00 -07:00
|
|
|
MOZ_COUNT_DTOR(AppleATDecoder);
|
2014-07-24 16:30:00 -07:00
|
|
|
MOZ_ASSERT(!mConverter);
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
AppleATDecoder::Init()
|
|
|
|
{
|
2014-11-19 06:03:30 -08:00
|
|
|
if (!mFormatID) {
|
2014-08-14 23:25:06 -07:00
|
|
|
NS_ERROR("Non recognised format");
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
2014-07-24 16:30:00 -07:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
AppleATDecoder::Input(mp4_demuxer::MP4Sample* aSample)
|
|
|
|
{
|
|
|
|
LOG("mp4 input sample %p %lld us %lld pts%s %llu bytes audio",
|
|
|
|
aSample,
|
|
|
|
aSample->duration,
|
|
|
|
aSample->composition_timestamp,
|
|
|
|
aSample->is_sync_point ? " keyframe" : "",
|
|
|
|
(unsigned long long)aSample->size);
|
|
|
|
|
|
|
|
// Queue a task to perform the actual decoding on a separate thread.
|
|
|
|
mTaskQueue->Dispatch(
|
|
|
|
NS_NewRunnableMethodWithArg<nsAutoPtr<mp4_demuxer::MP4Sample>>(
|
|
|
|
this,
|
|
|
|
&AppleATDecoder::SubmitSample,
|
|
|
|
nsAutoPtr<mp4_demuxer::MP4Sample>(aSample)));
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
AppleATDecoder::Flush()
|
|
|
|
{
|
|
|
|
LOG("Flushing AudioToolbox AAC decoder");
|
2014-08-20 16:38:00 -07:00
|
|
|
mTaskQueue->Flush();
|
2014-11-26 18:23:15 -08:00
|
|
|
mQueuedSamples.Clear();
|
2014-07-24 16:30:00 -07:00
|
|
|
OSStatus rv = AudioConverterReset(mConverter);
|
|
|
|
if (rv) {
|
|
|
|
LOG("Error %d resetting AudioConverter", rv);
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
AppleATDecoder::Drain()
|
|
|
|
{
|
|
|
|
LOG("Draining AudioToolbox AAC decoder");
|
|
|
|
mTaskQueue->AwaitIdle();
|
2014-07-30 16:07:00 -07:00
|
|
|
mCallback->DrainComplete();
|
2014-07-24 16:30:00 -07:00
|
|
|
return Flush();
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
AppleATDecoder::Shutdown()
|
|
|
|
{
|
|
|
|
LOG("Shutdown: Apple AudioToolbox AAC decoder");
|
2014-11-26 18:23:15 -08:00
|
|
|
mQueuedSamples.Clear();
|
2014-11-19 06:03:30 -08:00
|
|
|
OSStatus rv = AudioConverterDispose(mConverter);
|
|
|
|
if (rv) {
|
|
|
|
LOG("error %d disposing of AudioConverter", rv);
|
|
|
|
return NS_ERROR_FAILURE;
|
2014-07-24 16:30:00 -07:00
|
|
|
}
|
2014-11-19 06:03:30 -08:00
|
|
|
mConverter = nullptr;
|
2014-11-26 18:23:15 -08:00
|
|
|
|
|
|
|
if (mStream) {
|
|
|
|
rv = AudioFileStreamClose(mStream);
|
|
|
|
if (rv) {
|
|
|
|
LOG("error %d disposing of AudioFileStream", rv);
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
mStream = nullptr;
|
|
|
|
}
|
2014-11-19 06:03:30 -08:00
|
|
|
return NS_OK;
|
2014-07-24 16:30:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
struct PassthroughUserData {
|
2014-11-19 06:03:30 -08:00
|
|
|
UInt32 mChannels;
|
2014-07-24 16:30:00 -07:00
|
|
|
UInt32 mDataSize;
|
2014-08-13 21:32:59 -07:00
|
|
|
const void* mData;
|
2014-11-19 06:03:30 -08:00
|
|
|
AudioStreamPacketDescription mPacket;
|
2014-07-24 16:30:00 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
// Error value we pass through the decoder to signal that nothing
|
2014-11-19 06:03:30 -08:00
|
|
|
// has gone wrong during decoding and we're done processing the packet.
|
|
|
|
const uint32_t kNoMoreDataErr = 'MOAR';
|
2014-07-24 16:30:00 -07:00
|
|
|
|
|
|
|
static OSStatus
|
|
|
|
_PassthroughInputDataCallback(AudioConverterRef aAudioConverter,
|
2014-08-13 21:32:59 -07:00
|
|
|
UInt32* aNumDataPackets /* in/out */,
|
|
|
|
AudioBufferList* aData /* in/out */,
|
|
|
|
AudioStreamPacketDescription** aPacketDesc,
|
|
|
|
void* aUserData)
|
2014-07-24 16:30:00 -07:00
|
|
|
{
|
2014-08-13 21:32:59 -07:00
|
|
|
PassthroughUserData* userData = (PassthroughUserData*)aUserData;
|
2014-11-19 06:03:30 -08:00
|
|
|
if (!userData->mDataSize) {
|
2014-07-24 16:30:00 -07:00
|
|
|
*aNumDataPackets = 0;
|
2014-11-19 06:03:30 -08:00
|
|
|
return kNoMoreDataErr;
|
2014-07-24 16:30:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
LOG("AudioConverter wants %u packets of audio data\n", *aNumDataPackets);
|
|
|
|
|
2014-11-19 06:03:30 -08:00
|
|
|
if (aPacketDesc) {
|
|
|
|
userData->mPacket.mStartOffset = 0;
|
|
|
|
userData->mPacket.mVariableFramesInPacket = 0;
|
|
|
|
userData->mPacket.mDataByteSize = userData->mDataSize;
|
|
|
|
*aPacketDesc = &userData->mPacket;
|
|
|
|
}
|
2014-07-24 16:30:00 -07:00
|
|
|
|
2014-11-19 06:03:30 -08:00
|
|
|
aData->mBuffers[0].mNumberChannels = userData->mChannels;
|
2014-07-24 16:30:00 -07:00
|
|
|
aData->mBuffers[0].mDataByteSize = userData->mDataSize;
|
2014-08-13 21:32:59 -07:00
|
|
|
aData->mBuffers[0].mData = const_cast<void*>(userData->mData);
|
2014-07-24 16:30:00 -07:00
|
|
|
|
2014-11-19 06:03:30 -08:00
|
|
|
// No more data to provide following this run.
|
|
|
|
userData->mDataSize = 0;
|
|
|
|
|
2014-07-24 16:30:00 -07:00
|
|
|
return noErr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2014-11-19 06:03:30 -08:00
|
|
|
AppleATDecoder::SubmitSample(nsAutoPtr<mp4_demuxer::MP4Sample> aSample)
|
2014-11-26 18:23:15 -08:00
|
|
|
{
|
|
|
|
nsresult rv = NS_OK;
|
|
|
|
if (!mConverter) {
|
|
|
|
rv = SetupDecoder(aSample);
|
|
|
|
if (rv != NS_OK && rv != NS_ERROR_NOT_INITIALIZED) {
|
|
|
|
mCallback->Error();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mQueuedSamples.AppendElement(aSample);
|
|
|
|
|
|
|
|
if (rv == NS_OK) {
|
|
|
|
for (size_t i = 0; i < mQueuedSamples.Length(); i++) {
|
|
|
|
if (NS_FAILED(DecodeSample(mQueuedSamples[i]))) {
|
|
|
|
mQueuedSamples.Clear();
|
|
|
|
mCallback->Error();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mQueuedSamples.Clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mTaskQueue->IsEmpty()) {
|
|
|
|
mCallback->InputExhausted();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
AppleATDecoder::DecodeSample(mp4_demuxer::MP4Sample* aSample)
|
2014-07-24 16:30:00 -07:00
|
|
|
{
|
2014-11-19 06:03:30 -08:00
|
|
|
// Array containing the queued decoded audio frames, about to be output.
|
|
|
|
nsTArray<AudioDataValue> outputData;
|
|
|
|
UInt32 channels = mOutputFormat.mChannelsPerFrame;
|
2014-07-24 16:30:00 -07:00
|
|
|
// Pick a multiple of the frame size close to a power of two
|
|
|
|
// for efficient allocation.
|
|
|
|
const uint32_t MAX_AUDIO_FRAMES = 128;
|
2014-11-19 06:03:30 -08:00
|
|
|
const uint32_t maxDecodedSamples = MAX_AUDIO_FRAMES * channels;
|
2014-07-24 16:30:00 -07:00
|
|
|
|
|
|
|
// Descriptions for _decompressed_ audio packets. ignored.
|
|
|
|
nsAutoArrayPtr<AudioStreamPacketDescription>
|
2014-11-19 06:03:30 -08:00
|
|
|
packets(new AudioStreamPacketDescription[MAX_AUDIO_FRAMES]);
|
2014-07-24 16:30:00 -07:00
|
|
|
|
|
|
|
// This API insists on having packets spoon-fed to it from a callback.
|
2014-11-19 06:03:30 -08:00
|
|
|
// This structure exists only to pass our state.
|
2014-07-24 16:30:00 -07:00
|
|
|
PassthroughUserData userData =
|
2014-11-19 06:03:30 -08:00
|
|
|
{ channels, (UInt32)aSample->size, aSample->data };
|
2014-07-24 16:30:00 -07:00
|
|
|
|
2014-11-11 20:13:02 -08:00
|
|
|
// Decompressed audio buffer
|
|
|
|
nsAutoArrayPtr<AudioDataValue> decoded(new AudioDataValue[maxDecodedSamples]);
|
2014-07-24 16:30:00 -07:00
|
|
|
|
2014-11-11 20:13:02 -08:00
|
|
|
do {
|
2014-07-24 16:30:00 -07:00
|
|
|
AudioBufferList decBuffer;
|
|
|
|
decBuffer.mNumberBuffers = 1;
|
2014-11-19 06:03:30 -08:00
|
|
|
decBuffer.mBuffers[0].mNumberChannels = channels;
|
2014-11-11 20:13:02 -08:00
|
|
|
decBuffer.mBuffers[0].mDataByteSize =
|
|
|
|
maxDecodedSamples * sizeof(AudioDataValue);
|
2014-07-24 16:30:00 -07:00
|
|
|
decBuffer.mBuffers[0].mData = decoded.get();
|
|
|
|
|
|
|
|
// in: the max number of packets we can handle from the decoder.
|
|
|
|
// out: the number of packets the decoder is actually returning.
|
2014-07-26 23:43:00 -07:00
|
|
|
UInt32 numFrames = MAX_AUDIO_FRAMES;
|
2014-07-24 16:30:00 -07:00
|
|
|
|
|
|
|
OSStatus rv = AudioConverterFillComplexBuffer(mConverter,
|
|
|
|
_PassthroughInputDataCallback,
|
|
|
|
&userData,
|
|
|
|
&numFrames /* in/out */,
|
|
|
|
&decBuffer,
|
|
|
|
packets.get());
|
|
|
|
|
2014-11-19 06:03:30 -08:00
|
|
|
if (rv && rv != kNoMoreDataErr) {
|
2014-10-29 17:09:00 -07:00
|
|
|
LOG("Error decoding audio stream: %d\n", rv);
|
2014-11-26 18:23:15 -08:00
|
|
|
return NS_ERROR_FAILURE;
|
2014-07-24 16:30:00 -07:00
|
|
|
}
|
|
|
|
|
2014-11-19 06:03:30 -08:00
|
|
|
if (numFrames) {
|
|
|
|
outputData.AppendElements(decoded.get(), numFrames * channels);
|
|
|
|
LOG("%d frames decoded", numFrames);
|
|
|
|
}
|
2014-07-24 16:30:00 -07:00
|
|
|
|
2014-11-19 06:03:30 -08:00
|
|
|
if (rv == kNoMoreDataErr) {
|
|
|
|
LOG("done processing compressed packet");
|
2014-07-24 16:30:00 -07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
} while (true);
|
2014-11-11 20:13:02 -08:00
|
|
|
|
2014-11-26 18:23:15 -08:00
|
|
|
if (outputData.IsEmpty()) {
|
|
|
|
return NS_OK;
|
2014-07-24 16:30:00 -07:00
|
|
|
}
|
|
|
|
|
2014-11-26 18:23:15 -08:00
|
|
|
size_t numFrames = outputData.Length() / channels;
|
|
|
|
int rate = mOutputFormat.mSampleRate;
|
|
|
|
CheckedInt<Microseconds> duration = FramesToUsecs(numFrames, rate);
|
|
|
|
if (!duration.isValid()) {
|
|
|
|
NS_WARNING("Invalid count of accumulated audio samples");
|
|
|
|
return NS_ERROR_FAILURE;
|
2014-07-24 16:30:00 -07:00
|
|
|
}
|
2014-11-26 18:23:15 -08:00
|
|
|
|
|
|
|
LOG("pushed audio at time %lfs; duration %lfs\n",
|
|
|
|
(double)aSample->composition_timestamp / USECS_PER_S,
|
|
|
|
(double)duration.value() / USECS_PER_S);
|
|
|
|
|
|
|
|
nsAutoArrayPtr<AudioDataValue> data(new AudioDataValue[outputData.Length()]);
|
|
|
|
PodCopy(data.get(), &outputData[0], outputData.Length());
|
|
|
|
nsRefPtr<AudioData> audio = new AudioData(aSample->byte_offset,
|
|
|
|
aSample->composition_timestamp,
|
|
|
|
duration.value(),
|
|
|
|
numFrames,
|
|
|
|
data.forget(),
|
|
|
|
channels,
|
|
|
|
rate);
|
|
|
|
mCallback->Output(audio);
|
|
|
|
return NS_OK;
|
2014-07-24 16:30:00 -07:00
|
|
|
}
|
|
|
|
|
2014-11-19 06:03:30 -08:00
|
|
|
nsresult
|
2014-11-26 18:23:15 -08:00
|
|
|
AppleATDecoder::GetInputAudioDescription(AudioStreamBasicDescription& aDesc,
|
|
|
|
const Vector<uint8_t>& aExtraData)
|
2014-08-22 16:39:00 -07:00
|
|
|
{
|
2014-11-19 06:03:30 -08:00
|
|
|
// Request the properties from CoreAudio using the codec magic cookie
|
|
|
|
AudioFormatInfo formatInfo;
|
|
|
|
PodZero(&formatInfo.mASBD);
|
|
|
|
formatInfo.mASBD.mFormatID = mFormatID;
|
|
|
|
if (mFormatID == kAudioFormatMPEG4AAC) {
|
|
|
|
formatInfo.mASBD.mFormatFlags = mConfig.extended_profile;
|
|
|
|
}
|
2014-11-26 18:23:15 -08:00
|
|
|
formatInfo.mMagicCookieSize = aExtraData.length();
|
|
|
|
formatInfo.mMagicCookie = aExtraData.begin();
|
2014-11-19 06:03:30 -08:00
|
|
|
|
|
|
|
UInt32 formatListSize;
|
|
|
|
// Attempt to retrieve the default format using
|
|
|
|
// kAudioFormatProperty_FormatInfo method.
|
|
|
|
// This method only retrieves the FramesPerPacket information required
|
|
|
|
// by the decoder, which depends on the codec type and profile.
|
|
|
|
aDesc.mFormatID = mFormatID;
|
|
|
|
aDesc.mChannelsPerFrame = mConfig.channel_count;
|
|
|
|
aDesc.mSampleRate = mConfig.samples_per_second;
|
|
|
|
UInt32 inputFormatSize = sizeof(aDesc);
|
|
|
|
OSStatus rv = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
&inputFormatSize,
|
|
|
|
&aDesc);
|
|
|
|
if (NS_WARN_IF(rv)) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If any of the methods below fail, we will return the default format as
|
|
|
|
// created using kAudioFormatProperty_FormatInfo above.
|
|
|
|
rv = AudioFormatGetPropertyInfo(kAudioFormatProperty_FormatList,
|
|
|
|
sizeof(formatInfo),
|
|
|
|
&formatInfo,
|
|
|
|
&formatListSize);
|
|
|
|
if (rv || (formatListSize % sizeof(AudioFormatListItem))) {
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
size_t listCount = formatListSize / sizeof(AudioFormatListItem);
|
|
|
|
nsAutoArrayPtr<AudioFormatListItem> formatList(
|
|
|
|
new AudioFormatListItem[listCount]);
|
|
|
|
|
|
|
|
rv = AudioFormatGetProperty(kAudioFormatProperty_FormatList,
|
|
|
|
sizeof(formatInfo),
|
|
|
|
&formatInfo,
|
|
|
|
&formatListSize,
|
|
|
|
formatList);
|
|
|
|
if (rv) {
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
LOG("found %u available audio stream(s)",
|
|
|
|
formatListSize / sizeof(AudioFormatListItem));
|
|
|
|
// Get the index number of the first playable format.
|
|
|
|
// This index number will be for the highest quality layer the platform
|
|
|
|
// is capable of playing.
|
|
|
|
UInt32 itemIndex;
|
|
|
|
UInt32 indexSize = sizeof(itemIndex);
|
|
|
|
rv = AudioFormatGetProperty(kAudioFormatProperty_FirstPlayableFormatFromList,
|
|
|
|
formatListSize,
|
|
|
|
formatList,
|
|
|
|
&indexSize,
|
|
|
|
&itemIndex);
|
|
|
|
if (rv) {
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
aDesc = formatList[itemIndex].mASBD;
|
|
|
|
|
|
|
|
return NS_OK;
|
2014-08-22 16:39:00 -07:00
|
|
|
}
|
|
|
|
|
2014-11-26 18:23:15 -08:00
|
|
|
nsresult
|
|
|
|
AppleATDecoder::SetupDecoder(mp4_demuxer::MP4Sample* aSample)
|
|
|
|
{
|
|
|
|
if (mFormatID == kAudioFormatMPEG4AAC &&
|
|
|
|
mConfig.extended_profile == 2) {
|
|
|
|
// Check for implicit SBR signalling if stream is AAC-LC
|
|
|
|
// This will provide us with an updated magic cookie for use with
|
|
|
|
// GetInputAudioDescription.
|
|
|
|
if (NS_SUCCEEDED(GetImplicitAACMagicCookie(aSample)) &&
|
|
|
|
!mMagicCookie.length()) {
|
|
|
|
// nothing found yet, will try again later
|
|
|
|
return NS_ERROR_NOT_INITIALIZED;
|
|
|
|
}
|
|
|
|
// An error occurred, fallback to using default stream description
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG("Initializing Apple AudioToolbox decoder");
|
|
|
|
|
|
|
|
AudioStreamBasicDescription inputFormat;
|
|
|
|
PodZero(&inputFormat);
|
|
|
|
nsresult rv =
|
|
|
|
GetInputAudioDescription(inputFormat,
|
|
|
|
mMagicCookie.length() ?
|
|
|
|
mMagicCookie : mConfig.extra_data);
|
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
// Fill in the output format manually.
|
|
|
|
PodZero(&mOutputFormat);
|
|
|
|
mOutputFormat.mFormatID = kAudioFormatLinearPCM;
|
|
|
|
mOutputFormat.mSampleRate = inputFormat.mSampleRate;
|
|
|
|
mOutputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
|
|
|
|
#if defined(MOZ_SAMPLE_TYPE_FLOAT32)
|
|
|
|
mOutputFormat.mBitsPerChannel = 32;
|
|
|
|
mOutputFormat.mFormatFlags =
|
|
|
|
kLinearPCMFormatFlagIsFloat |
|
|
|
|
0;
|
|
|
|
#else
|
|
|
|
# error Unknown audio sample type
|
|
|
|
#endif
|
|
|
|
// Set up the decoder so it gives us one sample per frame
|
|
|
|
mOutputFormat.mFramesPerPacket = 1;
|
|
|
|
mOutputFormat.mBytesPerPacket = mOutputFormat.mBytesPerFrame
|
|
|
|
= mOutputFormat.mChannelsPerFrame * mOutputFormat.mBitsPerChannel / 8;
|
|
|
|
|
|
|
|
OSStatus status = AudioConverterNew(&inputFormat, &mOutputFormat, &mConverter);
|
|
|
|
if (status) {
|
|
|
|
LOG("Error %d constructing AudioConverter", status);
|
|
|
|
mConverter = nullptr;
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_MetadataCallback(void* aAppleATDecoder,
|
|
|
|
AudioFileStreamID aStream,
|
|
|
|
AudioFileStreamPropertyID aProperty,
|
|
|
|
UInt32* aFlags)
|
|
|
|
{
|
|
|
|
AppleATDecoder* decoder = static_cast<AppleATDecoder*>(aAppleATDecoder);
|
|
|
|
LOG("MetadataCallback receiving: '%s'", FourCC2Str(aProperty));
|
|
|
|
if (aProperty == kAudioFileStreamProperty_MagicCookieData) {
|
|
|
|
UInt32 size;
|
|
|
|
Boolean writeable;
|
|
|
|
OSStatus rv = AudioFileStreamGetPropertyInfo(aStream,
|
|
|
|
aProperty,
|
|
|
|
&size,
|
|
|
|
&writeable);
|
|
|
|
if (rv) {
|
|
|
|
LOG("Couldn't get property info for '%s' (%s)",
|
|
|
|
FourCC2Str(aProperty), FourCC2Str(rv));
|
|
|
|
decoder->mFileStreamError = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
nsAutoArrayPtr<uint8_t> data(new uint8_t[size]);
|
|
|
|
rv = AudioFileStreamGetProperty(aStream, aProperty,
|
|
|
|
&size, data);
|
|
|
|
if (rv) {
|
|
|
|
LOG("Couldn't get property '%s' (%s)",
|
|
|
|
FourCC2Str(aProperty), FourCC2Str(rv));
|
|
|
|
decoder->mFileStreamError = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
decoder->mMagicCookie.append(data.get(), size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_SampleCallback(void* aSBR,
|
|
|
|
UInt32 aNumBytes,
|
|
|
|
UInt32 aNumPackets,
|
|
|
|
const void* aData,
|
|
|
|
AudioStreamPacketDescription* aPackets)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
AppleATDecoder::GetImplicitAACMagicCookie(const mp4_demuxer::MP4Sample* aSample)
|
|
|
|
{
|
|
|
|
// Prepend ADTS header to AAC audio.
|
|
|
|
mp4_demuxer::MP4Sample adtssample(*aSample);
|
|
|
|
bool rv = mp4_demuxer::Adts::ConvertSample(mConfig.channel_count,
|
|
|
|
mConfig.frequency_index,
|
|
|
|
mConfig.aac_profile,
|
|
|
|
&adtssample);
|
|
|
|
if (!rv) {
|
|
|
|
NS_WARNING("Failed to apply ADTS header");
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
if (!mStream) {
|
|
|
|
OSStatus rv = AudioFileStreamOpen(this,
|
|
|
|
_MetadataCallback,
|
|
|
|
_SampleCallback,
|
|
|
|
kAudioFileAAC_ADTSType,
|
|
|
|
&mStream);
|
|
|
|
if (rv) {
|
|
|
|
NS_WARNING("Couldn't open AudioFileStream");
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
OSStatus status = AudioFileStreamParseBytes(mStream,
|
|
|
|
adtssample.size,
|
|
|
|
adtssample.data,
|
|
|
|
0 /* discontinuity */);
|
|
|
|
if (status) {
|
|
|
|
NS_WARNING("Couldn't parse sample");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (status || mFileStreamError || mMagicCookie.length()) {
|
|
|
|
// We have decoded a magic cookie or an error occurred as such
|
|
|
|
// we won't need the stream any longer.
|
|
|
|
AudioFileStreamClose(mStream);
|
|
|
|
mStream = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (mFileStreamError || status) ? NS_ERROR_FAILURE : NS_OK;
|
|
|
|
}
|
2014-07-24 16:30:00 -07:00
|
|
|
} // namespace mozilla
|