2012-04-29 20:11:19 -07:00
|
|
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
|
|
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
#ifndef MOZILLA_AUDIOSEGMENT_H_
|
|
|
|
#define MOZILLA_AUDIOSEGMENT_H_
|
|
|
|
|
|
|
|
#include "MediaSegment.h"
|
|
|
|
#include "nsISupportsImpl.h"
|
2012-10-25 03:09:40 -07:00
|
|
|
#include "AudioSampleFormat.h"
|
2012-04-29 20:11:19 -07:00
|
|
|
#include "SharedBuffer.h"
|
|
|
|
|
|
|
|
namespace mozilla {
|
|
|
|
|
2012-11-14 11:46:40 -08:00
|
|
|
class AudioStream;
|
2012-11-14 11:45:33 -08:00
|
|
|
|
2012-11-21 21:04:27 -08:00
|
|
|
/**
|
|
|
|
* An AudioChunk represents a multi-channel buffer of audio samples.
|
|
|
|
* It references an underlying ThreadSharedObject which manages the lifetime
|
|
|
|
* of the buffer. An AudioChunk maintains its own duration and channel data
|
|
|
|
* pointers so it can represent a subinterval of a buffer without copying.
|
|
|
|
* An AudioChunk can store its individual channels anywhere; it maintains
|
|
|
|
* separate pointers to each channel's buffer.
|
|
|
|
*/
|
2012-04-29 20:11:19 -07:00
|
|
|
struct AudioChunk {
|
2012-10-25 03:09:40 -07:00
|
|
|
typedef mozilla::AudioSampleFormat SampleFormat;
|
2012-04-29 20:11:19 -07:00
|
|
|
|
|
|
|
// Generic methods
|
|
|
|
void SliceTo(TrackTicks aStart, TrackTicks aEnd)
|
|
|
|
{
|
|
|
|
NS_ASSERTION(aStart >= 0 && aStart < aEnd && aEnd <= mDuration,
|
|
|
|
"Slice out of bounds");
|
|
|
|
if (mBuffer) {
|
2012-11-21 21:04:27 -08:00
|
|
|
MOZ_ASSERT(aStart < INT32_MAX, "Can't slice beyond 32-bit sample lengths");
|
|
|
|
for (uint32_t channel = 0; channel < mChannelData.Length(); ++channel) {
|
|
|
|
mChannelData[channel] = AddAudioSampleOffset(mChannelData[channel],
|
|
|
|
mBufferFormat, int32_t(aStart));
|
|
|
|
}
|
2012-04-29 20:11:19 -07:00
|
|
|
}
|
|
|
|
mDuration = aEnd - aStart;
|
|
|
|
}
|
|
|
|
TrackTicks GetDuration() const { return mDuration; }
|
|
|
|
bool CanCombineWithFollowing(const AudioChunk& aOther) const
|
|
|
|
{
|
|
|
|
if (aOther.mBuffer != mBuffer) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (mBuffer) {
|
2012-11-21 21:04:27 -08:00
|
|
|
NS_ASSERTION(aOther.mBufferFormat == mBufferFormat,
|
2012-04-29 20:11:19 -07:00
|
|
|
"Wrong metadata about buffer");
|
2012-11-21 21:04:27 -08:00
|
|
|
NS_ASSERTION(aOther.mChannelData.Length() == mChannelData.Length(),
|
|
|
|
"Mismatched channel count");
|
|
|
|
if (mDuration > INT32_MAX) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
for (uint32_t channel = 0; channel < mChannelData.Length(); ++channel) {
|
|
|
|
if (aOther.mChannelData[channel] != AddAudioSampleOffset(mChannelData[channel],
|
|
|
|
mBufferFormat, int32_t(mDuration))) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2012-04-29 20:11:19 -07:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2012-07-30 07:20:58 -07:00
|
|
|
bool IsNull() const { return mBuffer == nullptr; }
|
2012-04-29 20:11:19 -07:00
|
|
|
void SetNull(TrackTicks aDuration)
|
|
|
|
{
|
2012-07-30 07:20:58 -07:00
|
|
|
mBuffer = nullptr;
|
2012-11-21 21:04:27 -08:00
|
|
|
mChannelData.Clear();
|
2012-04-29 20:11:19 -07:00
|
|
|
mDuration = aDuration;
|
|
|
|
mVolume = 1.0f;
|
|
|
|
}
|
|
|
|
|
2012-11-21 21:04:27 -08:00
|
|
|
TrackTicks mDuration; // in frames within the buffer
|
|
|
|
nsRefPtr<ThreadSharedObject> mBuffer; // the buffer object whose lifetime is managed; null means data is all zeroes
|
|
|
|
nsTArray<const void*> mChannelData; // one pointer per channel; empty if and only if mBuffer is null
|
|
|
|
float mVolume; // volume multiplier to apply (1.0f if mBuffer is nonnull)
|
|
|
|
SampleFormat mBufferFormat; // format of frames in mBuffer (only meaningful if mBuffer is nonnull)
|
2012-04-29 20:11:19 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* A list of audio samples consisting of a sequence of slices of SharedBuffers.
|
|
|
|
* The audio rate is determined by the track, not stored in this class.
|
|
|
|
*/
|
|
|
|
class AudioSegment : public MediaSegmentBase<AudioSegment, AudioChunk> {
|
|
|
|
public:
|
2012-10-25 03:09:40 -07:00
|
|
|
typedef mozilla::AudioSampleFormat SampleFormat;
|
2012-04-29 20:11:19 -07:00
|
|
|
|
2013-01-20 12:44:44 -08:00
|
|
|
AudioSegment() : MediaSegmentBase<AudioSegment, AudioChunk>(AUDIO) {}
|
2012-04-29 20:11:19 -07:00
|
|
|
|
2012-11-21 21:04:27 -08:00
|
|
|
void AppendFrames(already_AddRefed<ThreadSharedObject> aBuffer,
|
|
|
|
const nsTArray<const float*>& aChannelData,
|
|
|
|
int32_t aDuration)
|
2012-04-29 20:11:19 -07:00
|
|
|
{
|
2012-11-21 21:04:27 -08:00
|
|
|
AudioChunk* chunk = AppendChunk(aDuration);
|
2012-04-29 20:11:19 -07:00
|
|
|
chunk->mBuffer = aBuffer;
|
2012-11-21 21:04:27 -08:00
|
|
|
for (uint32_t channel = 0; channel < aChannelData.Length(); ++channel) {
|
|
|
|
chunk->mChannelData.AppendElement(aChannelData[channel]);
|
|
|
|
}
|
|
|
|
chunk->mVolume = 1.0f;
|
|
|
|
chunk->mBufferFormat = AUDIO_FORMAT_FLOAT32;
|
|
|
|
}
|
|
|
|
void AppendFrames(already_AddRefed<ThreadSharedObject> aBuffer,
|
|
|
|
const nsTArray<const int16_t*>& aChannelData,
|
|
|
|
int32_t aDuration)
|
|
|
|
{
|
|
|
|
AudioChunk* chunk = AppendChunk(aDuration);
|
|
|
|
chunk->mBuffer = aBuffer;
|
|
|
|
for (uint32_t channel = 0; channel < aChannelData.Length(); ++channel) {
|
|
|
|
chunk->mChannelData.AppendElement(aChannelData[channel]);
|
|
|
|
}
|
2012-04-29 20:11:19 -07:00
|
|
|
chunk->mVolume = 1.0f;
|
2012-11-21 21:04:27 -08:00
|
|
|
chunk->mBufferFormat = AUDIO_FORMAT_S16;
|
2012-04-29 20:11:19 -07:00
|
|
|
}
|
2013-01-20 12:44:44 -08:00
|
|
|
// Consumes aChunk, and returns a pointer to the persistent copy of aChunk
|
|
|
|
// in the segment.
|
|
|
|
AudioChunk* AppendAndConsumeChunk(AudioChunk* aChunk)
|
|
|
|
{
|
|
|
|
AudioChunk* chunk = AppendChunk(aChunk->mDuration);
|
|
|
|
chunk->mBuffer = aChunk->mBuffer.forget();
|
|
|
|
chunk->mChannelData.SwapElements(aChunk->mChannelData);
|
|
|
|
chunk->mVolume = aChunk->mVolume;
|
|
|
|
chunk->mBufferFormat = aChunk->mBufferFormat;
|
|
|
|
return chunk;
|
|
|
|
}
|
2012-04-29 20:11:19 -07:00
|
|
|
void ApplyVolume(float aVolume);
|
2012-11-14 11:46:40 -08:00
|
|
|
void WriteTo(AudioStream* aOutput);
|
2012-04-29 20:11:19 -07:00
|
|
|
|
|
|
|
static Type StaticType() { return AUDIO; }
|
|
|
|
};
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* MOZILLA_AUDIOSEGMENT_H_ */
|