Files
UnrealEngineUWP/Engine/Source/Runtime/ALAudio/Private/ALAudioBuffer.cpp

191 lines
5.3 KiB
C++
Raw Normal View History

// Copyright 1998-2017 Epic Games, Inc. All Rights Reserved.
Copying //UE4/Dev-Build to //UE4/Dev-Main (Source: //UE4/Dev-Build @ 3209340) #lockdown Nick.Penwarden #rb none ========================== MAJOR FEATURES + CHANGES ========================== Change 3209340 on 2016/11/23 by Ben.Marsh Convert UE4 codebase to an "include what you use" model - where every header just includes the dependencies it needs, rather than every source file including large monolithic headers like Engine.h and UnrealEd.h. Measured full rebuild times around 2x faster using XGE on Windows, and improvements of 25% or more for incremental builds and full rebuilds on most other platforms. * Every header now includes everything it needs to compile. * There's a CoreMinimal.h header that gets you a set of ubiquitous types from Core (eg. FString, FName, TArray, FVector, etc...). Most headers now include this first. * There's a CoreTypes.h header that sets up primitive UE4 types and build macros (int32, PLATFORM_WIN64, etc...). All headers in Core include this first, as does CoreMinimal.h. * Every .cpp file includes its matching .h file first. * This helps validate that each header is including everything it needs to compile. * No engine code includes a monolithic header such as Engine.h or UnrealEd.h any more. * You will get a warning if you try to include one of these from the engine. They still exist for compatibility with game projects and do not produce warnings when included there. * There have only been minor changes to our internal games down to accommodate these changes. The intent is for this to be as seamless as possible. * No engine code explicitly includes a precompiled header any more. * We still use PCHs, but they're force-included on the compiler command line by UnrealBuildTool instead. This lets us tune what they contain without breaking any existing include dependencies. * PCHs are generated by a tool to get a statistical amount of coverage for the source files using it, and I've seeded the new shared PCHs to contain any header included by > 15% of source files. Tool used to generate this transform is at Engine\Source\Programs\IncludeTool. [CL 3209342 by Ben Marsh in Main branch]
2016-11-23 15:48:37 -05:00
#include "CoreMinimal.h"
#include "Stats/Stats.h"
#include "Audio.h"
#include "ALAudioDevice.h"
#include "AudioDecompress.h"
/*------------------------------------------------------------------------------------
FALSoundBuffer.
------------------------------------------------------------------------------------*/
/**
* Constructor
*
* @param AudioDevice audio device this sound buffer is going to be attached to.
*/
FALSoundBuffer::FALSoundBuffer( FALAudioDevice* InAudioDevice )
: FSoundBuffer(InAudioDevice),
BufferId(0)
{
}
/**
* Destructor
*
* Frees wave data and detaches itself from audio device.
*/
FALSoundBuffer::~FALSoundBuffer( void )
{
// Delete AL buffers.
alDeleteBuffers(1, &BufferId);
}
/**
* Static function used to create a buffer.
*
* @param InWave USoundNodeWave to use as template and wave source
* @param AudioDevice audio device to attach created buffer to
* @param bIsPrecacheRequest Whether this request is for precaching or not
* @return FALSoundBuffer pointer if buffer creation succeeded, NULL otherwise
*/
FALSoundBuffer* FALSoundBuffer::Init(FALAudioDevice* AudioDevice, USoundWave* InWave)
{
// Can't create a buffer without any source data
if (InWave == nullptr || InWave->NumChannels == 0)
{
return nullptr;
}
FAudioDeviceManager* AudioDeviceManager = GEngine->GetAudioDeviceManager();
check(AudioDeviceManager != nullptr);
FALSoundBuffer *Buffer = nullptr;
switch (static_cast<EDecompressionType>(InWave->DecompressionType))
{
case DTYPE_Setup:
// Has circumvented pre-cache mechanism - pre-cache now
AudioDevice->Precache(InWave, true, false);
// Recall this function with new decompression type
return Init(AudioDevice, InWave);
case DTYPE_Native:
if (InWave->ResourceID)
{
Buffer = static_cast<FALSoundBuffer*>(AudioDeviceManager->WaveBufferMap.FindRef(InWave->ResourceID));
}
if (!Buffer || !Buffer->BufferId)
{
CreateNativeBuffer(AudioDevice, InWave, Buffer);
}
break;
// Always create a new buffer for streaming ogg vorbis data
//Buffer = CreateQueuedBuffer(AudioDevice, InWave);
//break;
case DTYPE_Invalid:
case DTYPE_Preview:
case DTYPE_Procedural:
case DTYPE_RealTime:
default:
// Invalid will be set if the wave cannot be played
UE_LOG(LogALAudio, Warning, TEXT("ALSoundBuffer wave '%s' has an invalid decompression type %d."),
*InWave->GetName(), static_cast<EDecompressionType>(InWave->DecompressionType));
break;
}
if (Buffer == nullptr)
{
UE_LOG(LogALAudio, Warning, TEXT("ALSoundBuffer init failed for wave '%s', decompression type %d."),
*InWave->GetName(), static_cast<EDecompressionType>(InWave->DecompressionType));
}
return Buffer;
}
void FALSoundBuffer::CreateNativeBuffer(FALAudioDevice* AudioDevice, USoundWave* Wave, FALSoundBuffer* &Buffer)
{
// Check if Buffer already exists
if (Buffer)
{
// Assign the new AudioDevice
Buffer->AudioDevice = AudioDevice;
}
else
{
SCOPE_CYCLE_COUNTER( STAT_AudioResourceCreationTime );
// Check to see if thread has finished decompressing on the other thread
if (Wave->AudioDecompressor)
{
Wave->AudioDecompressor->EnsureCompletion();
// Remove the decompressor
delete Wave->AudioDecompressor;
Wave->AudioDecompressor = nullptr;
}
// Create new buffer.
Buffer = new FALSoundBuffer(AudioDevice);
Buffer->InternalFormat = AudioDevice->GetInternalFormat(Wave->NumChannels);
Buffer->NumChannels = Wave->NumChannels;
Buffer->SampleRate = Wave->SampleRate;
FAudioDeviceManager* AudioDeviceManager = GEngine->GetAudioDeviceManager();
check(AudioDeviceManager != nullptr);
AudioDeviceManager->TrackResource(Wave, Buffer);
}
// Generate the new OpenAL buffer
alGenBuffers(1, &Buffer->BufferId);
AudioDevice->alError(TEXT("RegisterSound"));
if (Wave->RawPCMData)
{
// upload it
Buffer->BufferSize = Wave->RawPCMDataSize;
alBufferData(Buffer->BufferId, Buffer->InternalFormat, Wave->RawPCMData, Wave->RawPCMDataSize, Buffer->SampleRate);
// Free up the data if necessary
if (Wave->bDynamicResource)
{
FMemory::Free( Wave->RawPCMData );
Wave->RawPCMData = nullptr;
Wave->bDynamicResource = false;
}
}
else
{
// get the raw data
uint8* SoundData = reinterpret_cast<uint8*>(Wave->RawData.Lock(LOCK_READ_ONLY));
// it's (possibly) a pointer to a wave file, so skip over the header
int SoundDataSize = Wave->RawData.GetBulkDataSize();
// is there a wave header?
FWaveModInfo WaveInfo;
if (WaveInfo.ReadWaveInfo(SoundData, SoundDataSize))
{
// if so, modify the location and size of the sound data based on header
SoundData = WaveInfo.SampleDataStart;
SoundDataSize = WaveInfo.SampleDataSize;
}
// let the Buffer know the final size
Buffer->BufferSize = SoundDataSize;
// upload it
alBufferData( Buffer->BufferId, Buffer->InternalFormat, SoundData, Buffer->BufferSize, Buffer->SampleRate );
// unload it
Wave->RawData.Unlock();
}
if (AudioDevice->alError(TEXT( "RegisterSound (buffer data)")) || (Buffer->BufferSize == 0))
{
Buffer->InternalFormat = 0;
}
if (Buffer->InternalFormat == 0)
{
UE_LOG(LogAudio, Log,TEXT( "Audio: sound format not supported for '%s' (%d)" ), *Wave->GetName(), Wave->NumChannels);
UE_LOG(LogALAudio, Warning, TEXT("ALSoundBuffer: sound format not supported for wave '%s'"), *Wave->GetName());
delete Buffer;
Buffer = nullptr;
}
}