You've already forked UnrealEngineUWP
mirror of
https://github.com/izzy2lost/UnrealEngineUWP.git
synced 2026-03-26 18:15:20 -07:00
- ICacheStore::Put() has updated documentation to reflect the requirements of partial records. - ICacheStore::Get() now takes a FCacheRecordPolicy, which is implicitly constructible from ECachePolicy, and allows setting the policy by payload. - ICacheStore::GetPayload() is replaced by ICacheStore::GetChunks(), which allows loading parts of payloads. - ICacheStore::CancelAll() is moved to ICache::CancelAll() because the cache can track requests at the top level and cancel them without exposing cancellation on individual cache stores. - ECachePolicy::SkipLocalCopy has been removed because it is difficult to reason about. - ECachePolicy::SkipData flags now have a documented meaning for put requests, to hint that record existence implies payload existence. - The filesystem and memory cache stores have been updated to support partial records, filtering of payloads, and loading parts of payloads. - Requesting part of a payload will decompress the entire payload for now, until compressed buffers expose a way to decompress only part. - Fixed a bug in FTexturePlatformData::AreDerivedMipsAvailable() that caused it to return false for structured cache keys. #rb Zousar.Shaker #rnx #preflight 615e03241ed62f0001b95454 #ROBOMERGE-OWNER: Devin.Doucette #ROBOMERGE-AUTHOR: devin.doucette #ROBOMERGE-SOURCE: CL 17748550 in //UE5/Release-5.0/... via CL 17748555 #ROBOMERGE-BOT: STARSHIP (Release-Engine-Staging -> Release-Engine-Test) (v879-17706426) #ROBOMERGE-CONFLICT from-shelf #ROBOMERGE[STARSHIP]: UE5-Main [CL 17748602 by Devin Doucette in ue5-release-engine-test branch]
582 lines
15 KiB
C++
582 lines
15 KiB
C++
// Copyright Epic Games, Inc. All Rights Reserved.
|
|
|
|
#include "MemoryDerivedDataBackend.h"
|
|
|
|
#include "Algo/Accumulate.h"
|
|
#include "Algo/AllOf.h"
|
|
#include "DerivedDataPayload.h"
|
|
#include "Misc/ScopeExit.h"
|
|
#include "Serialization/CompactBinary.h"
|
|
#include "Templates/UniquePtr.h"
|
|
|
|
namespace UE::DerivedData::Backends
|
|
{
|
|
|
|
FMemoryDerivedDataBackend::FMemoryDerivedDataBackend(const TCHAR* InName, int64 InMaxCacheSize, bool bInCanBeDisabled)
|
|
: Name(InName)
|
|
, MaxCacheSize(InMaxCacheSize)
|
|
, bDisabled( false )
|
|
, CurrentCacheSize( SerializationSpecificDataSize )
|
|
, bMaxSizeExceeded(false)
|
|
, bCanBeDisabled(bInCanBeDisabled)
|
|
{
|
|
}
|
|
|
|
FMemoryDerivedDataBackend::~FMemoryDerivedDataBackend()
|
|
{
|
|
bShuttingDown = true;
|
|
Disable();
|
|
}
|
|
|
|
bool FMemoryDerivedDataBackend::IsWritable() const
|
|
{
|
|
FScopeLock ScopeLock(&SynchronizationObject);
|
|
return !bDisabled;
|
|
}
|
|
|
|
FDerivedDataBackendInterface::ESpeedClass FMemoryDerivedDataBackend::GetSpeedClass() const
|
|
{
|
|
return ESpeedClass::Local;
|
|
}
|
|
|
|
bool FMemoryDerivedDataBackend::CachedDataProbablyExists(const TCHAR* CacheKey)
|
|
{
|
|
// See comments on the declaration of bCanBeDisabled variable.
|
|
if (bCanBeDisabled)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
COOK_STAT(auto Timer = UsageStats.TimeProbablyExists());
|
|
|
|
if (ShouldSimulateMiss(CacheKey))
|
|
{
|
|
return false;
|
|
}
|
|
|
|
if (bDisabled)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
FScopeLock ScopeLock(&SynchronizationObject);
|
|
bool Result = CacheItems.Contains(FString(CacheKey));
|
|
if (Result)
|
|
{
|
|
COOK_STAT(Timer.AddHit(0));
|
|
}
|
|
return Result;
|
|
}
|
|
|
|
bool FMemoryDerivedDataBackend::GetCachedData(const TCHAR* CacheKey, TArray<uint8>& OutData)
|
|
{
|
|
COOK_STAT(auto Timer = UsageStats.TimeGet());
|
|
|
|
if (ShouldSimulateMiss(CacheKey))
|
|
{
|
|
return false;
|
|
}
|
|
|
|
if (!bDisabled)
|
|
{
|
|
FScopeLock ScopeLock(&SynchronizationObject);
|
|
|
|
FCacheValue* Item = CacheItems.FindRef(FString(CacheKey));
|
|
if (Item)
|
|
{
|
|
OutData = Item->Data;
|
|
Item->Age = 0;
|
|
check(OutData.Num());
|
|
COOK_STAT(Timer.AddHit(OutData.Num()));
|
|
return true;
|
|
}
|
|
}
|
|
OutData.Empty();
|
|
return false;
|
|
}
|
|
|
|
bool FMemoryDerivedDataBackend::TryToPrefetch(TConstArrayView<FString> CacheKeys)
|
|
{
|
|
return CachedDataProbablyExistsBatch(CacheKeys).CountSetBits() == CacheKeys.Num();
|
|
}
|
|
|
|
bool FMemoryDerivedDataBackend::WouldCache(const TCHAR* CacheKey, TArrayView<const uint8> InData)
|
|
{
|
|
if (bDisabled || bMaxSizeExceeded)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
FDerivedDataBackendInterface::EPutStatus FMemoryDerivedDataBackend::PutCachedData(const TCHAR* CacheKey, TArrayView<const uint8> InData, bool bPutEvenIfExists)
|
|
{
|
|
COOK_STAT(auto Timer = UsageStats.TimePut());
|
|
FScopeLock ScopeLock(&SynchronizationObject);
|
|
|
|
if (ShouldSimulateMiss(CacheKey))
|
|
{
|
|
return EPutStatus::Skipped;
|
|
}
|
|
|
|
// Should never hit this as higher level code should be checking..
|
|
if (!WouldCache(CacheKey, InData))
|
|
{
|
|
//UE_LOG(LogDerivedDataCache, Warning, TEXT("WouldCache was not called prior to attempted Put!"));
|
|
return EPutStatus::NotCached;
|
|
}
|
|
|
|
FString Key(CacheKey);
|
|
FCacheValue* Item = CacheItems.FindRef(FString(CacheKey));
|
|
if (Item)
|
|
{
|
|
//check(Item->Data == InData); // any second attempt to push data should be identical data
|
|
return EPutStatus::Cached;
|
|
}
|
|
else
|
|
{
|
|
FCacheValue* Val = new FCacheValue(InData);
|
|
int32 CacheValueSize = CalcCacheValueSize(Key, *Val);
|
|
|
|
// check if we haven't exceeded the MaxCacheSize
|
|
if (MaxCacheSize > 0 && (CurrentCacheSize + CacheValueSize) > MaxCacheSize)
|
|
{
|
|
delete Val;
|
|
UE_LOG(LogDerivedDataCache, Display, TEXT("Failed to cache data. Maximum cache size reached. CurrentSize %d kb / MaxSize: %d kb"), CurrentCacheSize / 1024, MaxCacheSize / 1024);
|
|
bMaxSizeExceeded = true;
|
|
return EPutStatus::NotCached;
|
|
}
|
|
else
|
|
{
|
|
COOK_STAT(Timer.AddHit(InData.Num()));
|
|
CacheItems.Add(Key, Val);
|
|
CalcCacheValueSize(Key, *Val);
|
|
|
|
CurrentCacheSize += CacheValueSize;
|
|
return EPutStatus::Cached;
|
|
}
|
|
}
|
|
}
|
|
|
|
void FMemoryDerivedDataBackend::RemoveCachedData(const TCHAR* CacheKey, bool bTransient)
|
|
{
|
|
FScopeLock ScopeLock(&SynchronizationObject);
|
|
if (bDisabled || bTransient)
|
|
{
|
|
return;
|
|
}
|
|
FString Key(CacheKey);
|
|
FCacheValue* Item = NULL;
|
|
if (CacheItems.RemoveAndCopyValue(Key, Item))
|
|
{
|
|
CurrentCacheSize -= CalcCacheValueSize(Key, *Item);
|
|
bMaxSizeExceeded = false;
|
|
|
|
check(Item);
|
|
delete Item;
|
|
}
|
|
else
|
|
{
|
|
check(!Item);
|
|
}
|
|
}
|
|
|
|
bool FMemoryDerivedDataBackend::SaveCache(const TCHAR* Filename)
|
|
{
|
|
double StartTime = FPlatformTime::Seconds();
|
|
TUniquePtr<FArchive> SaverArchive(IFileManager::Get().CreateFileWriter(Filename, FILEWRITE_EvenIfReadOnly));
|
|
if (!SaverArchive)
|
|
{
|
|
UE_LOG(LogDerivedDataCache, Error, TEXT("Could not save memory cache %s."), Filename);
|
|
return false;
|
|
}
|
|
|
|
FArchive& Saver = *SaverArchive;
|
|
uint32 Magic = MemCache_Magic64;
|
|
Saver << Magic;
|
|
const int64 DataStartOffset = Saver.Tell();
|
|
{
|
|
FScopeLock ScopeLock(&SynchronizationObject);
|
|
check(!bDisabled);
|
|
for (TMap<FString, FCacheValue*>::TIterator It(CacheItems); It; ++It )
|
|
{
|
|
Saver << It.Key();
|
|
Saver << It.Value()->Age;
|
|
Saver << It.Value()->Data;
|
|
}
|
|
}
|
|
const int64 DataSize = Saver.Tell(); // Everything except the footer
|
|
int64 Size = DataSize;
|
|
uint32 Crc = MemCache_Magic64; // Crc takes more time than I want to spend FCrc::MemCrc_DEPRECATED(&Buffer[0], Size);
|
|
Saver << Size;
|
|
Saver << Crc;
|
|
|
|
check(SerializationSpecificDataSize + DataSize <= MaxCacheSize || MaxCacheSize <= 0);
|
|
|
|
UE_LOG(LogDerivedDataCache, Log, TEXT("Saved boot cache %4.2fs %lldMB %s."), float(FPlatformTime::Seconds() - StartTime), DataSize / (1024 * 1024), Filename);
|
|
return true;
|
|
}
|
|
|
|
bool FMemoryDerivedDataBackend::LoadCache(const TCHAR* Filename)
|
|
{
|
|
double StartTime = FPlatformTime::Seconds();
|
|
const int64 FileSize = IFileManager::Get().FileSize(Filename);
|
|
if (FileSize < 0)
|
|
{
|
|
UE_LOG(LogDerivedDataCache, Warning, TEXT("Could not find memory cache %s."), Filename);
|
|
return false;
|
|
}
|
|
// We test 3 * uint32 which is the old format (< SerializationSpecificDataSize). We'll test
|
|
// against SerializationSpecificDataSize later when we read the magic number from the cache.
|
|
if (FileSize < sizeof(uint32) * 3)
|
|
{
|
|
UE_LOG(LogDerivedDataCache, Error, TEXT("Memory cache was corrputed (short) %s."), Filename);
|
|
return false;
|
|
}
|
|
if (FileSize > MaxCacheSize*2 && MaxCacheSize > 0)
|
|
{
|
|
UE_LOG(LogDerivedDataCache, Error, TEXT("Refusing to load DDC cache %s. Size exceeds doubled MaxCacheSize."), Filename);
|
|
return false;
|
|
}
|
|
|
|
TUniquePtr<FArchive> LoaderArchive(IFileManager::Get().CreateFileReader(Filename));
|
|
if (!LoaderArchive)
|
|
{
|
|
UE_LOG(LogDerivedDataCache, Warning, TEXT("Could not read memory cache %s."), Filename);
|
|
return false;
|
|
}
|
|
|
|
FArchive& Loader = *LoaderArchive;
|
|
uint32 Magic = 0;
|
|
Loader << Magic;
|
|
if (Magic != MemCache_Magic && Magic != MemCache_Magic64)
|
|
{
|
|
UE_LOG(LogDerivedDataCache, Error, TEXT("Memory cache was corrputed (magic) %s."), Filename);
|
|
return false;
|
|
}
|
|
// Check the file size again, this time against the correct minimum size.
|
|
if (Magic == MemCache_Magic64 && FileSize < SerializationSpecificDataSize)
|
|
{
|
|
UE_LOG(LogDerivedDataCache, Error, TEXT("Memory cache was corrputed (short) %s."), Filename);
|
|
return false;
|
|
}
|
|
// Calculate expected DataSize based on the magic number (footer size difference)
|
|
const int64 DataSize = FileSize - (Magic == MemCache_Magic64 ? (SerializationSpecificDataSize - sizeof(uint32)) : (sizeof(uint32) * 2));
|
|
Loader.Seek(DataSize);
|
|
int64 Size = 0;
|
|
uint32 Crc = 0;
|
|
if (Magic == MemCache_Magic64)
|
|
{
|
|
Loader << Size;
|
|
}
|
|
else
|
|
{
|
|
uint32 Size32 = 0;
|
|
Loader << Size32;
|
|
Size = (int64)Size32;
|
|
}
|
|
Loader << Crc;
|
|
if (Size != DataSize)
|
|
{
|
|
UE_LOG(LogDerivedDataCache, Error, TEXT("Memory cache was corrputed (size) %s."), Filename);
|
|
return false;
|
|
}
|
|
if ((Crc != MemCache_Magic && Crc != MemCache_Magic64) || Crc != Magic)
|
|
{
|
|
UE_LOG(LogDerivedDataCache, Warning, TEXT("Memory cache was corrputed (crc) %s."), Filename);
|
|
return false;
|
|
}
|
|
// Seek to data start offset (skip magic number)
|
|
Loader.Seek(sizeof(uint32));
|
|
{
|
|
TArray<uint8> Working;
|
|
FScopeLock ScopeLock(&SynchronizationObject);
|
|
check(!bDisabled);
|
|
while (Loader.Tell() < DataSize)
|
|
{
|
|
FString Key;
|
|
int32 Age;
|
|
Loader << Key;
|
|
Loader << Age;
|
|
Age++;
|
|
Loader << Working;
|
|
if (Age < MaxAge)
|
|
{
|
|
CacheItems.Add(Key, new FCacheValue(Working, Age));
|
|
}
|
|
Working.Reset();
|
|
}
|
|
// these are just a double check on ending correctly
|
|
if (Magic == MemCache_Magic64)
|
|
{
|
|
Loader << Size;
|
|
}
|
|
else
|
|
{
|
|
uint32 Size32 = 0;
|
|
Loader << Size32;
|
|
Size = (int64)Size32;
|
|
}
|
|
Loader << Crc;
|
|
}
|
|
|
|
CurrentCacheSize = FileSize;
|
|
CacheFilename = Filename;
|
|
UE_LOG(LogDerivedDataCache, Log, TEXT("Loaded boot cache %4.2fs %lldMB %s."), float(FPlatformTime::Seconds() - StartTime), DataSize / (1024 * 1024), Filename);
|
|
return true;
|
|
}
|
|
|
|
void FMemoryDerivedDataBackend::Disable()
|
|
{
|
|
check(bCanBeDisabled || bShuttingDown);
|
|
FScopeLock ScopeLock(&SynchronizationObject);
|
|
bDisabled = true;
|
|
for (TMap<FString,FCacheValue*>::TIterator It(CacheItems); It; ++It )
|
|
{
|
|
delete It.Value();
|
|
}
|
|
CacheItems.Empty();
|
|
|
|
CurrentCacheSize = SerializationSpecificDataSize;
|
|
}
|
|
|
|
TSharedRef<FDerivedDataCacheStatsNode> FMemoryDerivedDataBackend::GatherUsageStats() const
|
|
{
|
|
TSharedRef<FDerivedDataCacheStatsNode> Usage = MakeShared<FDerivedDataCacheStatsNode>(this, FString::Printf(TEXT("%s.%s"), TEXT("MemoryBackend"), *CacheFilename));
|
|
Usage->Stats.Add(TEXT(""), UsageStats);
|
|
|
|
return Usage;
|
|
}
|
|
|
|
bool FMemoryDerivedDataBackend::ApplyDebugOptions(FBackendDebugOptions& InOptions)
|
|
{
|
|
DebugOptions = InOptions;
|
|
return true;
|
|
}
|
|
|
|
bool FMemoryDerivedDataBackend::ShouldSimulateMiss(const TCHAR* InKey)
|
|
{
|
|
if (DebugOptions.RandomMissRate == 0 && DebugOptions.SimulateMissTypes.IsEmpty())
|
|
{
|
|
return false;
|
|
}
|
|
|
|
const FName Key(InKey);
|
|
const uint32 Hash = GetTypeHash(Key);
|
|
|
|
if (FScopeLock Lock(&MissedKeysCS); DebugMissedKeys.ContainsByHash(Hash, Key))
|
|
{
|
|
return true;
|
|
}
|
|
|
|
if (DebugOptions.ShouldSimulateMiss(InKey))
|
|
{
|
|
FScopeLock Lock(&MissedKeysCS);
|
|
UE_LOG(LogDerivedDataCache, Verbose, TEXT("Simulating miss in %s for %s"), *GetName(), InKey);
|
|
DebugMissedKeys.AddByHash(Hash, Key);
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
bool FMemoryDerivedDataBackend::ShouldSimulateMiss(const FCacheKey& Key)
|
|
{
|
|
if (DebugOptions.RandomMissRate == 0 && DebugOptions.SimulateMissTypes.IsEmpty())
|
|
{
|
|
return false;
|
|
}
|
|
|
|
const uint32 Hash = GetTypeHash(Key);
|
|
|
|
if (FScopeLock Lock(&MissedKeysCS); DebugMissedCacheKeys.ContainsByHash(Hash, Key))
|
|
{
|
|
return true;
|
|
}
|
|
|
|
if (DebugOptions.ShouldSimulateMiss(Key))
|
|
{
|
|
FScopeLock Lock(&MissedKeysCS);
|
|
DebugMissedCacheKeys.AddByHash(Hash, Key);
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
int64 FMemoryDerivedDataBackend::CalcRawCacheRecordSize(const FCacheRecord& Record) const
|
|
{
|
|
const uint64 ValueSize = Record.GetValuePayload().GetRawSize();
|
|
return int64(Algo::TransformAccumulate(Record.GetAttachmentPayloads(), &FPayload::GetRawSize, ValueSize));
|
|
}
|
|
|
|
int64 FMemoryDerivedDataBackend::CalcSerializedCacheRecordSize(const FCacheRecord& Record) const
|
|
{
|
|
// Estimate the serialized size of the cache record.
|
|
uint64 TotalSize = 20;
|
|
TotalSize += Record.GetKey().Bucket.ToString().Len();
|
|
TotalSize += Record.GetMeta().GetSize();
|
|
const auto CalcCachePayloadSize = [](const FPayload& Payload)
|
|
{
|
|
return Payload ? Payload.GetData().GetCompressedSize() + 32 : 0;
|
|
};
|
|
TotalSize += CalcCachePayloadSize(Record.GetValuePayload());
|
|
TotalSize += Algo::TransformAccumulate(Record.GetAttachmentPayloads(), CalcCachePayloadSize, uint64(0));
|
|
return int64(TotalSize);
|
|
}
|
|
|
|
void FMemoryDerivedDataBackend::Put(
|
|
TConstArrayView<FCacheRecord> Records,
|
|
FStringView Context,
|
|
ECachePolicy Policy,
|
|
IRequestOwner& Owner,
|
|
FOnCachePutComplete&& OnComplete)
|
|
{
|
|
for (const FCacheRecord& Record : Records)
|
|
{
|
|
const FCacheKey& Key = Record.GetKey();
|
|
EStatus Status = EStatus::Error;
|
|
ON_SCOPE_EXIT
|
|
{
|
|
if (OnComplete)
|
|
{
|
|
OnComplete({Key, Status});
|
|
}
|
|
};
|
|
|
|
if (ShouldSimulateMiss(Key))
|
|
{
|
|
UE_LOG(LogDerivedDataCache, Verbose, TEXT("%s: Simulated miss for put of %s from '%.*s'"),
|
|
*GetName(), *WriteToString<96>(Key), Context.Len(), Context.GetData());
|
|
continue;
|
|
}
|
|
|
|
const FPayload& Value = Record.GetValuePayload();
|
|
const TConstArrayView<FPayload> Attachments = Record.GetAttachmentPayloads();
|
|
|
|
if ((Value && !Value.HasData()) || !Algo::AllOf(Attachments, &FPayload::HasData))
|
|
{
|
|
continue;
|
|
}
|
|
|
|
if (!Value && Attachments.IsEmpty())
|
|
{
|
|
FScopeLock ScopeLock(&SynchronizationObject);
|
|
if (bDisabled)
|
|
{
|
|
continue;
|
|
}
|
|
if (const FCacheRecord* Existing = CacheRecords.Find(Key))
|
|
{
|
|
CurrentCacheSize -= CalcSerializedCacheRecordSize(*Existing);
|
|
bMaxSizeExceeded = false;
|
|
}
|
|
Status = EStatus::Ok;
|
|
}
|
|
else
|
|
{
|
|
COOK_STAT(auto Timer = UsageStats.TimePut());
|
|
const int64 RecordSize = CalcSerializedCacheRecordSize(Record);
|
|
|
|
FScopeLock ScopeLock(&SynchronizationObject);
|
|
Status = CacheRecords.Contains(Key) ? EStatus::Ok : EStatus::Error;
|
|
if (bDisabled || Status == EStatus::Ok)
|
|
{
|
|
continue;
|
|
}
|
|
if (MaxCacheSize > 0 && (CurrentCacheSize + RecordSize) > MaxCacheSize)
|
|
{
|
|
UE_LOG(LogDerivedDataCache, Display, TEXT("Failed to cache data. Maximum cache size reached. CurrentSize %" INT64_FMT " KiB / MaxSize: %" INT64_FMT " KiB"), CurrentCacheSize / 1024, MaxCacheSize / 1024);
|
|
bMaxSizeExceeded = true;
|
|
continue;
|
|
}
|
|
|
|
CurrentCacheSize += RecordSize;
|
|
CacheRecords.Add(Record);
|
|
COOK_STAT(Timer.AddHit(RecordSize));
|
|
Status = EStatus::Ok;
|
|
}
|
|
}
|
|
}
|
|
|
|
void FMemoryDerivedDataBackend::Get(
|
|
TConstArrayView<FCacheKey> Keys,
|
|
FStringView Context,
|
|
FCacheRecordPolicy Policy,
|
|
IRequestOwner& Owner,
|
|
FOnCacheGetComplete&& OnComplete)
|
|
{
|
|
for (const FCacheKey& Key : Keys)
|
|
{
|
|
COOK_STAT(auto Timer = UsageStats.TimeGet());
|
|
FOptionalCacheRecord Record;
|
|
if (ShouldSimulateMiss(Key))
|
|
{
|
|
UE_LOG(LogDerivedDataCache, Verbose, TEXT("%s: Simulated miss for get of %s from '%.*s'"),
|
|
*GetName(), *WriteToString<96>(Key), Context.Len(), Context.GetData());
|
|
}
|
|
else if (FScopeLock ScopeLock(&SynchronizationObject); const FCacheRecord* CacheRecord = CacheRecords.Find(Key))
|
|
{
|
|
Record = *CacheRecord;
|
|
}
|
|
if (Record)
|
|
{
|
|
COOK_STAT(Timer.AddHit(CalcRawCacheRecordSize(Record.Get())));
|
|
if (OnComplete)
|
|
{
|
|
OnComplete({MoveTemp(Record).Get(), EStatus::Ok});
|
|
}
|
|
}
|
|
else
|
|
{
|
|
if (OnComplete)
|
|
{
|
|
OnComplete({FCacheRecordBuilder(Key).Build(), EStatus::Error});
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void FMemoryDerivedDataBackend::GetChunks(
|
|
TConstArrayView<FCacheChunkRequest> Chunks,
|
|
FStringView Context,
|
|
IRequestOwner& Owner,
|
|
FOnCacheGetChunkComplete&& OnComplete)
|
|
{
|
|
for (const FCacheChunkRequest& Chunk : Chunks)
|
|
{
|
|
COOK_STAT(auto Timer = UsageStats.TimeGet());
|
|
FPayload Payload;
|
|
if (ShouldSimulateMiss(Chunk.Key))
|
|
{
|
|
UE_LOG(LogDerivedDataCache, Verbose, TEXT("%s: Simulated miss for get of %s from '%.*s'"),
|
|
*GetName(), *WriteToString<96>(Chunk.Key, '/', Chunk.Id), Context.Len(), Context.GetData());
|
|
}
|
|
else if (FScopeLock ScopeLock(&SynchronizationObject); const FCacheRecord* Record = CacheRecords.Find(Chunk.Key))
|
|
{
|
|
Payload = Record->GetAttachmentPayload(Chunk.Id);
|
|
}
|
|
if (Payload && Chunk.RawOffset <= Payload.GetRawSize())
|
|
{
|
|
const uint64 RawSize = FMath::Min(Payload.GetRawSize() - Chunk.RawOffset, Chunk.RawSize);
|
|
COOK_STAT(Timer.AddHit(RawSize));
|
|
if (OnComplete)
|
|
{
|
|
FUniqueBuffer Buffer = FUniqueBuffer::Alloc(RawSize);
|
|
Payload.GetData().DecompressToComposite().CopyTo(Buffer, Chunk.RawOffset);
|
|
OnComplete({Chunk.Key, Chunk.Id, Chunk.RawOffset, RawSize, Payload.GetRawHash(), Buffer.MoveToShared(), EStatus::Ok});
|
|
}
|
|
}
|
|
else
|
|
{
|
|
if (OnComplete)
|
|
{
|
|
OnComplete({Chunk.Key, Chunk.Id, Chunk.RawOffset, 0, {}, {}, EStatus::Error});
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
} // UE::DerivedData::Backends
|