Bug 1240417. Part 1 - add a writer class to encapsulate pointer arithmetic. r=kinetik.

This commit is contained in:
JW Wang 2016-01-18 11:24:06 +08:00
parent cfd4156755
commit 293194d14e
4 changed files with 106 additions and 58 deletions

View File

@ -553,18 +553,18 @@ AudioStream::Downmix(AudioDataValue* aBuffer, uint32_t aFrames)
return true;
}
long
AudioStream::GetUnprocessed(void* aBuffer, long aFrames)
void
AudioStream::GetUnprocessed(AudioBufferWriter& aWriter)
{
mMonitor.AssertCurrentThreadOwns();
uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer);
// Flush the timestretcher pipeline, if we were playing using a playback rate
// other than 1.0.
uint32_t flushedFrames = 0;
if (mTimeStretcher && mTimeStretcher->numSamples()) {
flushedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames);
wpos += FramesToBytes(flushedFrames);
auto timeStretcher = mTimeStretcher;
aWriter.Write([timeStretcher] (AudioDataValue* aPtr, uint32_t aFrames) {
return timeStretcher->receiveSamples(aPtr, aFrames);
}, aWriter.Available());
// TODO: There might be still unprocessed samples in the stretcher.
// We should either remove or flush them so they won't be in the output
@ -572,41 +572,35 @@ AudioStream::GetUnprocessed(void* aBuffer, long aFrames)
NS_WARN_IF(mTimeStretcher->numUnprocessedSamples() > 0);
}
uint32_t toPopFrames = aFrames - flushedFrames;
while (toPopFrames > 0) {
UniquePtr<Chunk> c = mDataSource.PopFrames(toPopFrames);
while (aWriter.Available() > 0) {
UniquePtr<Chunk> c = mDataSource.PopFrames(aWriter.Available());
if (c->Frames() == 0) {
break;
}
MOZ_ASSERT(c->Frames() <= toPopFrames);
MOZ_ASSERT(c->Frames() <= aWriter.Available());
if (Downmix(c->GetWritable(), c->Frames())) {
memcpy(wpos, c->Data(), FramesToBytes(c->Frames()));
aWriter.Write(c->Data(), c->Frames());
} else {
// Write silence if downmixing fails.
memset(wpos, 0, FramesToBytes(c->Frames()));
aWriter.WriteZeros(c->Frames());
}
wpos += FramesToBytes(c->Frames());
toPopFrames -= c->Frames();
}
return aFrames - toPopFrames;
}
long
AudioStream::GetTimeStretched(void* aBuffer, long aFrames)
void
AudioStream::GetTimeStretched(AudioBufferWriter& aWriter)
{
mMonitor.AssertCurrentThreadOwns();
// We need to call the non-locking version, because we already have the lock.
if (EnsureTimeStretcherInitializedUnlocked() != NS_OK) {
return 0;
return;
}
uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer);
double playbackRate = static_cast<double>(mInRate) / mOutRate;
uint32_t toPopFrames = ceil(aFrames * playbackRate);
uint32_t toPopFrames = ceil(aWriter.Available() * playbackRate);
while (mTimeStretcher->numSamples() < static_cast<uint32_t>(aFrames)) {
while (mTimeStretcher->numSamples() < aWriter.Available()) {
UniquePtr<Chunk> c = mDataSource.PopFrames(toPopFrames);
if (c->Frames() == 0) {
break;
@ -623,9 +617,10 @@ AudioStream::GetTimeStretched(void* aBuffer, long aFrames)
}
}
uint32_t receivedFrames = mTimeStretcher->receiveSamples(reinterpret_cast<AudioDataValue*>(wpos), aFrames);
wpos += FramesToBytes(receivedFrames);
return receivedFrames;
auto timeStretcher = mTimeStretcher;
aWriter.Write([timeStretcher] (AudioDataValue* aPtr, uint32_t aFrames) {
return timeStretcher->receiveSamples(aPtr, aFrames);
}, aWriter.Available());
}
long
@ -633,16 +628,16 @@ AudioStream::DataCallback(void* aBuffer, long aFrames)
{
MonitorAutoLock mon(mMonitor);
MOZ_ASSERT(mState != SHUTDOWN, "No data callback after shutdown");
uint32_t underrunFrames = 0;
uint32_t servicedFrames = 0;
auto writer = AudioBufferWriter(
reinterpret_cast<AudioDataValue*>(aBuffer), mOutChannels, aFrames);
// FIXME: cubeb_pulse sometimes calls us before cubeb_stream_start() is called.
// We don't want to consume audio data until Start() is called by the client.
if (mState == INITIALIZED) {
NS_WARNING("data callback fires before cubeb_stream_start() is called");
mAudioClock.UpdateFrameHistory(0, aFrames);
memset(aBuffer, 0, FramesToBytes(aFrames));
return aFrames;
return writer.WriteZeros(aFrames);
}
// NOTE: wasapi (others?) can call us back *after* stop()/Shutdown() (mState == SHUTDOWN)
@ -654,33 +649,29 @@ AudioStream::DataCallback(void* aBuffer, long aFrames)
}
if (mInRate == mOutRate) {
servicedFrames = GetUnprocessed(aBuffer, aFrames);
GetUnprocessed(writer);
} else {
servicedFrames = GetTimeStretched(aBuffer, aFrames);
GetTimeStretched(writer);
}
underrunFrames = aFrames - servicedFrames;
// Always send audible frames first, and silent frames later.
// Otherwise it will break the assumption of FrameHistory.
if (!mDataSource.Ended()) {
mAudioClock.UpdateFrameHistory(servicedFrames, underrunFrames);
uint8_t* rpos = static_cast<uint8_t*>(aBuffer) + FramesToBytes(aFrames - underrunFrames);
memset(rpos, 0, FramesToBytes(underrunFrames));
if (underrunFrames) {
mAudioClock.UpdateFrameHistory(aFrames - writer.Available(), writer.Available());
if (writer.Available() > 0) {
MOZ_LOG(gAudioStreamLog, LogLevel::Warning,
("AudioStream %p lost %d frames", this, underrunFrames));
("AudioStream %p lost %d frames", this, writer.Available()));
writer.WriteZeros(writer.Available());
}
servicedFrames += underrunFrames;
} else {
// No more new data in the data source. Don't send silent frames so the
// cubeb stream can start draining.
mAudioClock.UpdateFrameHistory(servicedFrames, 0);
mAudioClock.UpdateFrameHistory(aFrames - writer.Available(), 0);
}
WriteDumpFile(mDumpFile, this, aFrames, aBuffer);
return servicedFrames;
return aFrames - writer.Available();
}
void

View File

@ -148,6 +148,66 @@ private:
uint32_t mCount;
};
/*
* A bookkeeping class to track the read/write position of an audio buffer.
*/
class AudioBufferCursor {
public:
AudioBufferCursor(AudioDataValue* aPtr, uint32_t aChannels, uint32_t aFrames)
: mPtr(aPtr), mChannels(aChannels), mFrames(aFrames) {}
// Advance the cursor to account for frames that are consumed.
uint32_t Advance(uint32_t aFrames) {
MOZ_ASSERT(mFrames >= aFrames);
mFrames -= aFrames;
mPtr += mChannels * aFrames;
return aFrames;
}
// The number of frames available for read/write in this buffer.
uint32_t Available() const { return mFrames; }
// Return a pointer where read/write should begin.
AudioDataValue* Ptr() const { return mPtr; }
protected:
AudioDataValue* mPtr;
const uint32_t mChannels;
uint32_t mFrames;
};
/*
* A helper class to encapsulate pointer arithmetic and provide means to modify
* the underlying audio buffer.
*/
class AudioBufferWriter : private AudioBufferCursor {
public:
AudioBufferWriter(AudioDataValue* aPtr, uint32_t aChannels, uint32_t aFrames)
: AudioBufferCursor(aPtr, aChannels, aFrames) {}
uint32_t WriteZeros(uint32_t aFrames) {
memset(mPtr, 0, sizeof(AudioDataValue) * mChannels * aFrames);
return Advance(aFrames);
}
uint32_t Write(const AudioDataValue* aPtr, uint32_t aFrames) {
memcpy(mPtr, aPtr, sizeof(AudioDataValue) * mChannels * aFrames);
return Advance(aFrames);
}
// Provide a write fuction to update the audio buffer with the following
// signature: uint32_t(const AudioDataValue* aPtr, uint32_t aFrames)
// aPtr: Pointer to the audio buffer.
// aFrames: The number of frames available in the buffer.
// return: The number of frames actually written by the function.
template <typename Function>
uint32_t Write(const Function& aFunction, uint32_t aFrames) {
return Advance(aFunction(mPtr, aFrames));
}
using AudioBufferCursor::Available;
};
// Access to a single instance of this class must be synchronized by
// callers, or made from a single thread. One exception is that access to
// GetPosition, GetPositionInFrames, SetVolume, and Get{Rate,Channels},
@ -263,8 +323,8 @@ private:
// Return true if downmixing succeeds otherwise false.
bool Downmix(AudioDataValue* aBuffer, uint32_t aFrames);
long GetUnprocessed(void* aBuffer, long aFrames);
long GetTimeStretched(void* aBuffer, long aFrames);
void GetUnprocessed(AudioBufferWriter& aWriter);
void GetTimeStretched(AudioBufferWriter& aWriter);
void StartUnlocked();

View File

@ -167,17 +167,12 @@ DecodedAudioDataSink::PopFrames(uint32_t aFrames)
{
class Chunk : public AudioStream::Chunk {
public:
Chunk(AudioData* aBuffer, uint32_t aFrames, uint32_t aOffset)
: mBuffer(aBuffer)
, mFrames(aFrames)
, mData(aBuffer->mAudioData.get() + aBuffer->mChannels * aOffset) {
MOZ_ASSERT(aOffset + aFrames <= aBuffer->mFrames);
}
Chunk(AudioData* aBuffer, uint32_t aFrames, AudioDataValue* aData)
: mBuffer(aBuffer), mFrames(aFrames), mData(aData) {}
Chunk() : mFrames(0), mData(nullptr) {}
const AudioDataValue* Data() const { return mData; }
uint32_t Frames() const { return mFrames; }
AudioDataValue* GetWritable() const { return mData; }
private:
const RefPtr<AudioData> mBuffer;
const uint32_t mFrames;
@ -232,20 +227,22 @@ DecodedAudioDataSink::PopFrames(uint32_t aFrames)
return MakeUnique<SilentChunk>(framesToPop, mInfo.mChannels);
}
mFramesPopped = 0;
mCurrentData = dont_AddRef(AudioQueue().PopFront().take()->As<AudioData>());
mCursor = MakeUnique<AudioBufferCursor>(mCurrentData->mAudioData.get(),
mCurrentData->mChannels,
mCurrentData->mFrames);
}
auto framesToPop = std::min(aFrames, mCurrentData->mFrames - mFramesPopped);
auto framesToPop = std::min(aFrames, mCursor->Available());
SINK_LOG_V("playing audio at time=%lld offset=%u length=%u",
mCurrentData->mTime, mFramesPopped, framesToPop);
mCurrentData->mTime, mCurrentData->mFrames - mCursor->Available(), framesToPop);
UniquePtr<AudioStream::Chunk> chunk;
if (mCurrentData->mRate == mInfo.mRate &&
mCurrentData->mChannels == mInfo.mChannels) {
chunk = MakeUnique<Chunk>(mCurrentData, framesToPop, mFramesPopped);
chunk = MakeUnique<Chunk>(mCurrentData, framesToPop, mCursor->Ptr());
} else {
SINK_LOG_V("mismatched sample format mInfo=[%uHz/%u channels] audio=[%uHz/%u channels]",
mInfo.mRate, mInfo.mChannels, mCurrentData->mRate, mCurrentData->mChannels);
@ -253,11 +250,11 @@ DecodedAudioDataSink::PopFrames(uint32_t aFrames)
}
mWritten += framesToPop;
mFramesPopped += framesToPop;
mCursor->Advance(framesToPop);
// All frames are popped. Reset mCurrentData so we can pop new elements from
// the audio queue in next calls to PopFrames().
if (mFramesPopped == mCurrentData->mFrames) {
if (mCursor->Available() == 0) {
mCurrentData = nullptr;
}

View File

@ -97,8 +97,8 @@ private:
*/
// The AudioData at which AudioStream::DataSource is reading.
RefPtr<AudioData> mCurrentData;
// The number of frames that have been popped from mCurrentData.
uint32_t mFramesPopped = 0;
// Keep track of the read positoin of mCurrentData.
UniquePtr<AudioBufferCursor> mCursor;
// True if there is any error in processing audio data like overflow.
bool mErrored = false;
};