2024-04-10 20:29:18 -04:00
// Copyright Epic Games, Inc. All Rights Reserved.
# include "UbaCacheServer.h"
2024-05-25 16:58:10 -04:00
# include "UbaCacheEntry.h"
2024-04-12 18:04:58 -04:00
# include "UbaCompactTables.h"
2024-04-10 20:29:18 -04:00
# include "UbaBinaryReaderWriter.h"
2024-05-28 16:55:11 -04:00
# include "UbaDirectoryIterator.h"
2024-04-10 20:29:18 -04:00
# include "UbaFileAccessor.h"
2024-09-10 10:26:02 -04:00
# include "UbaHashMap.h"
2024-04-10 20:29:18 -04:00
# include "UbaNetworkServer.h"
# include "UbaStorageServer.h"
2024-04-24 15:26:58 -04:00
//#include <oodle2.h>
2024-04-10 20:29:18 -04:00
2024-09-10 10:26:02 -04:00
// TODO
// - Fix so expiration time is set to oldest if overflowing and decreasing time didn't cause any deletes. That way we can make sure next maintenance will delete entries
// - Sort buckets by last maintenance time to make sure the long ones always get a slot first
// - Change so save happens when bucket is done in same work to minimize latency for the long ones
2024-08-24 14:04:20 -04:00
2024-08-10 17:26:42 -04:00
# if PLATFORM_WINDOWS
# define UBA_FORCEINLINE __forceinline
# else
# define UBA_FORCEINLINE inline __attribute__ ((always_inline))
# endif
2024-04-10 20:29:18 -04:00
namespace uba
{
2024-07-29 13:48:36 -04:00
static constexpr u32 CacheFileVersion = 6 ;
2024-05-25 16:58:10 -04:00
static constexpr u32 CacheFileCompatibilityVersion = 3 ;
2024-04-25 00:44:38 -04:00
bool IsCaseInsensitive ( u64 id ) { return ( id & ( 1ull < < 32 ) ) = = 0 ; }
2024-04-10 20:29:18 -04:00
2024-08-10 17:26:42 -04:00
struct BitArray
{
2024-08-17 17:25:22 -04:00
void Init ( MemoryBlock & memoryBlock , u32 bitCount )
2024-08-10 17:26:42 -04:00
{
2024-08-13 23:09:17 -04:00
u32 bytes = AlignUp ( ( bitCount + 7 ) / 8 , 8u ) ; // Align up to 64 bits
2024-08-11 05:06:02 -04:00
data = ( u64 * ) memoryBlock . Allocate ( bytes , 8 , TC ( " " ) ) ;
memset ( data , 0 , bytes ) ;
count = bytes / 8 ;
2024-08-10 17:26:42 -04:00
}
2024-08-13 23:09:17 -04:00
UBA_FORCEINLINE void Set ( u32 bitIndex )
2024-08-10 17:26:42 -04:00
{
2024-08-13 23:09:17 -04:00
u32 index = bitIndex / 64 ;
UBA_ASSERTF ( index < count , TC ( " Out of bounds (%u/%u). Bit index : %u " ) , index , count , bitIndex ) ;
u32 bitOffset = bitIndex - index * 64 ;
data [ index ] | = 1ull < < bitOffset ;
2024-08-10 17:26:42 -04:00
}
2024-08-17 17:25:22 -04:00
UBA_FORCEINLINE bool IsSet ( u32 bitIndex )
{
u32 index = bitIndex / 64 ;
UBA_ASSERTF ( index < count , TC ( " Out of bounds (%u/%u). Bit index : %u " ) , index , count , bitIndex ) ;
u32 bitOffset = bitIndex - index * 64 ;
return ( data [ index ] & ( 1ull < < bitOffset ) ) ! = 0 ;
}
2024-08-10 17:26:42 -04:00
UBA_FORCEINLINE u32 CountSetBits ( )
{
2024-08-11 05:06:02 -04:00
u64 bits = 0 ;
for ( u64 i = 0 , e = count ; i ! = e ; + + i )
bits + = CountBits ( data [ i ] ) ;
return u32 ( bits ) ;
2024-08-10 17:26:42 -04:00
}
template < typename Func >
2024-08-11 05:06:02 -04:00
void Traverse ( const Func & func )
2024-08-10 17:26:42 -04:00
{
u32 index = 0 ;
2024-08-11 05:06:02 -04:00
for ( u64 i = 0 , e = count ; i ! = e ; + + i )
2024-08-10 17:26:42 -04:00
{
u64 v = data [ i ] ;
while ( v )
{
2024-08-11 05:06:02 -04:00
u64 bitIndex = FindFirstBit ( v ) ;
func ( index + u32 ( bitIndex ) ) ;
v & = ~ ( 1ull < < bitIndex ) ;
2024-08-10 17:26:42 -04:00
}
index + = 64 ;
}
}
2024-08-11 05:06:02 -04:00
static constexpr UBA_FORCEINLINE u64 CountBits ( u64 bits )
{
// https://en.wikipedia.org/wiki/Hamming_weight
bits - = ( bits > > 1 ) & 0x5555555555555555ull ;
bits = ( bits & 0x3333333333333333ull ) + ( ( bits > > 2 ) & 0x3333333333333333ull ) ;
bits = ( bits + ( bits > > 4 ) ) & 0x0f0f0f0f0f0f0f0full ;
return ( bits * 0x0101010101010101 ) > > 56 ;
}
static UBA_FORCEINLINE u64 FindFirstBit ( u64 v )
{
2024-08-14 11:04:14 -04:00
# if PLATFORM_WINDOWS && (defined(_M_X64) || defined(_M_IX86))
// Use TZCNT intrinsic on Windows x86/x64
2024-08-11 05:06:02 -04:00
return _tzcnt_u64 ( v ) ;
2024-08-14 11:04:14 -04:00
# elif PLATFORM_WINDOWS && defined(_M_ARM64)
// Use the ARM64 equivalent
return _CountTrailingZeros64 ( v ) ;
# elif PLATFORM_LINUX && (defined(__x86_64__) || defined(__i386__))
// Use GCC's built-in TZCNT equivalent for x86/x64
2024-08-11 05:06:02 -04:00
return __builtin_ia32_tzcnt_u64 ( v ) ;
2024-08-14 11:04:14 -04:00
# elif PLATFORM_LINUX && defined(__aarch64__)
// Use the ARM64 equivalent
return __builtin_ctzll ( v ) ;
2024-08-11 05:06:02 -04:00
# else
u64 pos = 0 ;
if ( v > = 1ull < < 32 ) { v > > = 32 ; pos + = 32 ; }
if ( v > = 1ull < < 16 ) { v > > = 16 ; pos + = 16 ; }
if ( v > = 1ull < < 8 ) { v > > = 8 ; pos + = 8 ; }
if ( v > = 1ull < < 4 ) { v > > = 4 ; pos + = 4 ; }
if ( v > = 1ull < < 2 ) { v > > = 2 ; pos + = 2 ; }
if ( v > = 1ull < < 1 ) { pos + = 1 ; }
return pos ;
# endif
}
2024-08-17 17:25:22 -04:00
u64 * data = nullptr ;
u32 count = 0 ;
} ;
struct CacheServer : : ConnectionBucket
{
ConnectionBucket ( u64 i ) : pathTable ( CachePathTableMaxSize , CompactPathTable : : V1 , IsCaseInsensitive ( i ) ) , casKeyTable ( CacheCasKeyTableMaxSize ) , id ( i ) { }
CompactPathTable pathTable ;
CompactCasKeyTable casKeyTable ;
ReaderWriterLock cacheEntryLookupLock ;
UnorderedMap < CasKey , CacheEntry > cacheEntryLookup ;
u64 id ;
} ;
struct CacheServer : : Connection
{
u32 clientVersion ;
UnorderedMap < u64 , ConnectionBucket > buckets ;
} ;
struct CacheServer : : Bucket
{
Bucket ( u64 id ) : m_pathTable ( CachePathTableMaxSize , CompactPathTable : : V1 , IsCaseInsensitive ( id ) ) , m_casKeyTable ( CacheCasKeyTableMaxSize ) { }
ReaderWriterLock m_cacheEntryLookupLock ;
UnorderedMap < CasKey , CacheEntries > m_cacheEntryLookup ;
CompactPathTable m_pathTable ;
CompactCasKeyTable m_casKeyTable ;
Atomic < u64 > totalEntryCount ;
Atomic < u64 > totalEntrySize ;
Atomic < bool > hasDeletedEntries ;
Atomic < bool > needsSave ;
Atomic < u64 > lastSavedTime ;
Atomic < u64 > lastUsedTime ;
u64 expirationTimeSeconds = 0 ;
u32 index = ~ 0u ;
struct MaintenanceContext
{
MemoryBlock memoryBlock ;
BitArray deletedOffsets ;
bool isInitialized = false ;
bool shouldTest = false ;
2024-09-10 10:26:02 -04:00
} * m_maintenanceContext = nullptr ;
2024-08-10 17:26:42 -04:00
} ;
2024-04-10 20:29:18 -04:00
const tchar * ToString ( CacheMessageType type )
{
switch ( type )
{
# define UBA_CACHE_MESSAGE(x) case CacheMessageType_##x: return TC("")#x;
UBA_CACHE_MESSAGES
# undef UBA_CACHE_MESSAGE
default :
return TC ( " Unknown " ) ; // Should never happen
}
}
2024-05-29 01:07:06 -04:00
CacheServer : : CacheServer ( const CacheServerCreateInfo & info )
2024-05-29 13:59:57 -04:00
: m_logger ( info . logWriter , TC ( " UbaCacheServer " ) )
2024-05-29 01:07:06 -04:00
, m_server ( info . storage . GetServer ( ) )
, m_storage ( info . storage )
2024-04-10 20:29:18 -04:00
{
2024-05-29 01:07:06 -04:00
m_checkInputsForDeletedCas = info . checkInputsForDeletedCas ;
2024-07-26 16:44:00 -04:00
m_bootTime = GetTime ( ) ;
2024-05-23 19:16:15 -04:00
2024-07-26 16:44:00 -04:00
m_maintenanceReserveSize = info . maintenanceReserveSize ;
2024-05-29 01:07:06 -04:00
m_expirationTimeSeconds = info . expirationTimeSeconds ;
2024-08-24 14:04:20 -04:00
m_bucketCasTableMaxSize = info . bucketCasTableMaxSize ;
2024-05-29 01:07:06 -04:00
m_rootDir . count = GetFullPathNameW ( info . rootDir , m_rootDir . capacity , m_rootDir . data , NULL ) ;
2024-04-10 20:29:18 -04:00
m_rootDir . Replace ( ' / ' , PathSeparator ) . EnsureEndsWithSlash ( ) ;
m_server . RegisterService ( CacheServiceId ,
[ this ] ( const ConnectionInfo & connectionInfo , MessageInfo & messageInfo , BinaryReader & reader , BinaryWriter & writer )
{
return HandleMessage ( connectionInfo , messageInfo . type , reader , writer ) ;
} ,
[ ] ( u8 messageType )
{
return ToString ( CacheMessageType ( messageType ) ) ;
}
) ;
m_server . RegisterOnClientDisconnected ( CacheServiceId , [ this ] ( const Guid & clientUid , u32 clientId )
{
OnDisconnected ( clientId ) ;
} ) ;
}
CacheServer : : ~ CacheServer ( )
{
}
2024-05-28 16:55:11 -04:00
struct CacheServer : : LoadStats
{
Atomic < u32 > totalPathTableSize ;
Atomic < u32 > totalCasKeyTableSize ;
Atomic < u64 > totalCacheEntryCount ;
} ;
2024-04-10 20:29:18 -04:00
bool CacheServer : : Load ( )
{
u64 startTime = GetTime ( ) ;
StringBuffer < > fileName ( m_rootDir ) ;
fileName . EnsureEndsWithSlash ( ) . Append ( TC ( " cachedb " ) ) ;
FileAccessor file ( m_logger , fileName . data ) ;
if ( ! file . OpenMemoryRead ( 0 , false ) )
2024-04-12 18:04:58 -04:00
{
m_logger . Detail ( TC ( " No database found. Starting a new one at %s " ) , fileName . data ) ;
2024-05-25 16:58:10 -04:00
m_creationTime = GetSystemTimeAsFileTime ( ) ;
2024-05-28 16:55:11 -04:00
m_dbfileDirty = true ;
2024-04-10 20:29:18 -04:00
return true ;
2024-04-12 18:04:58 -04:00
}
2024-04-10 20:29:18 -04:00
BinaryReader reader ( file . GetData ( ) , 0 , file . GetSize ( ) ) ;
2024-05-25 16:58:10 -04:00
u32 databaseVersion = reader . ReadU32 ( ) ;
if ( databaseVersion < CacheFileCompatibilityVersion | | databaseVersion > CacheFileVersion )
{
m_logger . Detail ( TC ( " Can't load database of version %u. Starting a new one at %s " ) , databaseVersion , fileName . data ) ;
2024-04-10 20:29:18 -04:00
return true ;
2024-05-25 16:58:10 -04:00
}
if ( databaseVersion = = 3 )
m_creationTime = GetSystemTimeAsFileTime ( ) - 1 ;
else
m_creationTime = reader . ReadU64 ( ) ;
2024-04-10 20:29:18 -04:00
2024-05-28 16:55:11 -04:00
if ( databaseVersion ! = CacheFileVersion )
m_dbfileDirty = true ;
2024-04-12 18:04:58 -04:00
2024-05-28 16:55:11 -04:00
LoadStats stats ;
if ( databaseVersion = = 4 )
2024-04-10 20:29:18 -04:00
{
2024-05-28 16:55:11 -04:00
u32 bucketCount = reader . ReadU32 ( ) ;
while ( bucketCount - - )
2024-04-10 20:29:18 -04:00
{
2024-05-28 18:03:47 -04:00
Bucket & bucket = GetBucket ( reader . ReadU64 ( ) ) ;
2024-05-28 16:55:11 -04:00
LoadBucket ( bucket , reader , databaseVersion , stats ) ;
2024-04-12 18:04:58 -04:00
}
2024-05-28 16:55:11 -04:00
}
else
{
StringBuffer < MaxPath > bucketsDir ( m_rootDir ) ;
bucketsDir . EnsureEndsWithSlash ( ) . Append ( TC ( " buckets " ) ) ;
TraverseDir ( m_logger , bucketsDir . data , [ & ] ( const DirectoryEntry & e )
{
StringBuffer < 128 > keyName ;
keyName . Append ( e . name , e . nameLen ) ;
2024-05-28 18:03:47 -04:00
u64 id ;
if ( ! keyName . Parse ( id ) )
2024-05-28 16:55:11 -04:00
return ;
2024-05-28 18:03:47 -04:00
GetBucket ( id ) ;
2024-05-28 16:55:11 -04:00
} ) ;
2024-04-10 20:29:18 -04:00
2024-05-28 16:55:11 -04:00
m_server . ParallelFor ( GetBucketWorkerCount ( ) , m_buckets , [ & ] ( auto & it )
{
u64 key = it - > first ;
2024-04-10 20:29:18 -04:00
2024-05-28 16:55:11 -04:00
StringBuffer < MaxPath > bucketFilename ( bucketsDir ) ;
bucketFilename . EnsureEndsWithSlash ( ) . AppendValue ( key ) ;
FileAccessor bucketFile ( m_logger , bucketFilename . data ) ;
if ( ! bucketFile . OpenMemoryRead ( 0 , false ) )
{
m_logger . Detail ( TC ( " Failed to open bucket file %s " ) , bucketFilename . data ) ;
return ;
}
BinaryReader reader ( bucketFile . GetData ( ) , 0 , bucketFile . GetSize ( ) ) ;
u32 bucketVersion = reader . ReadU32 ( ) ;
LoadBucket ( it - > second , reader , bucketVersion , stats ) ;
} ) ;
2024-04-10 20:29:18 -04:00
}
u64 duration = GetTime ( ) - startTime ;
2024-08-06 15:29:52 -04:00
m_logger . Detail ( TC ( " Database loaded from %s (v%u) in %s (%llu bucket(s) containing %s paths, %s keys, %s cache entries) " ) , fileName . data , databaseVersion , TimeToText ( duration ) . str , m_buckets . size ( ) , BytesToText ( stats . totalPathTableSize ) . str , BytesToText ( stats . totalCasKeyTableSize ) . str , CountToText ( stats . totalCacheEntryCount . load ( ) ) . str ) ;
2024-05-28 16:55:11 -04:00
return true ;
}
bool CacheServer : : LoadBucket ( Bucket & bucket , BinaryReader & reader , u32 databaseVersion , LoadStats & outStats )
{
2024-05-28 19:07:31 -04:00
if ( databaseVersion ! = CacheFileVersion )
bucket . needsSave = true ;
2024-07-25 01:28:05 -04:00
bucket . expirationTimeSeconds = m_expirationTimeSeconds ;
2024-05-28 16:55:11 -04:00
u32 pathTableSize = reader . ReadU32 ( ) ;
if ( pathTableSize )
{
BinaryReader pathTableReader ( reader . GetPositionData ( ) , 0 , pathTableSize ) ;
bucket . m_pathTable . ReadMem ( pathTableReader , true ) ;
reader . Skip ( pathTableSize ) ;
}
outStats . totalPathTableSize + = pathTableSize ;
u32 casKeyTableSize = reader . ReadU32 ( ) ;
if ( casKeyTableSize )
{
BinaryReader casKeyTableReader ( reader . GetPositionData ( ) , 0 , casKeyTableSize ) ;
bucket . m_casKeyTable . ReadMem ( casKeyTableReader , true ) ;
reader . Skip ( casKeyTableSize ) ;
}
outStats . totalCasKeyTableSize + = casKeyTableSize ;
u32 entryLookupCount = reader . ReadU32 ( ) ;
bucket . m_cacheEntryLookup . reserve ( entryLookupCount ) ;
while ( entryLookupCount - - )
{
auto insres = bucket . m_cacheEntryLookup . try_emplace ( reader . ReadCasKey ( ) ) ;
UBA_ASSERT ( insres . second ) ;
auto & cacheEntries = insres . first - > second ;
cacheEntries . Read ( m_logger , reader , databaseVersion ) ;
outStats . totalCacheEntryCount + = cacheEntries . entries . size ( ) ;
}
2024-04-10 20:29:18 -04:00
return true ;
}
bool CacheServer : : Save ( )
{
2024-05-29 01:07:06 -04:00
for ( auto & kv : m_buckets )
{
Bucket & bucket = kv . second ;
if ( bucket . lastSavedTime < bucket . lastUsedTime )
bucket . needsSave = true ;
}
2024-04-10 20:29:18 -04:00
return SaveNoLock ( ) ;
}
2024-05-28 16:55:11 -04:00
struct FileWriter
2024-04-10 20:29:18 -04:00
{
2024-05-28 16:55:11 -04:00
static constexpr u64 TempBufferSize = 1024 * 1024 ;
2024-04-10 20:29:18 -04:00
2024-05-28 16:55:11 -04:00
FileWriter ( Logger & l , const tchar * fn )
: logger ( l )
, fileName ( fn )
, tempFileName ( StringBuffer < MaxPath > ( fn ) . Append ( TC ( " .tmp " ) ) . data )
, file ( logger , tempFileName . c_str ( ) )
2024-04-10 20:29:18 -04:00
{
2024-05-28 16:55:11 -04:00
tempBuffer = ( u8 * ) malloc ( TempBufferSize ) ;
}
2024-04-12 18:04:58 -04:00
2024-05-28 16:55:11 -04:00
~ FileWriter ( )
{
free ( tempBuffer ) ;
}
2024-04-10 20:29:18 -04:00
2024-05-28 16:55:11 -04:00
void WriteBytes ( const void * data , u64 size )
{
u8 * readPos = ( u8 * ) data ;
u64 left = size ;
while ( left )
2024-04-10 20:29:18 -04:00
{
2024-05-28 16:55:11 -04:00
if ( tempBufferPos ! = TempBufferSize )
{
u64 toWrite = Min ( TempBufferSize - tempBufferPos , left ) ;
memcpy ( tempBuffer + tempBufferPos , readPos , toWrite ) ;
tempBufferPos + = toWrite ;
left - = toWrite ;
readPos + = toWrite ;
}
else
{
2024-06-07 16:29:55 -04:00
written + = tempBufferPos ;
2024-05-28 16:55:11 -04:00
success & = file . Write ( tempBuffer , tempBufferPos ) ;
tempBufferPos = 0 ;
}
2024-04-10 20:29:18 -04:00
}
}
2024-05-28 16:55:11 -04:00
template < typename T >
void Write ( const T & v )
{
WriteBytes ( & v , sizeof ( v ) ) ;
}
2024-04-10 20:29:18 -04:00
2024-05-28 16:55:11 -04:00
bool Create ( ) { return file . CreateWrite ( ) ; }
bool Close ( )
{
success & = file . Write ( tempBuffer , tempBufferPos ) ;
2024-06-07 16:29:55 -04:00
written + = tempBufferPos ;
2024-05-28 16:55:11 -04:00
if ( ! success )
return false ;
if ( ! file . Close ( ) )
return false ;
if ( ! MoveFileExW ( tempFileName . data ( ) , fileName . data ( ) , MOVEFILE_REPLACE_EXISTING ) )
return logger . Error ( TC ( " Can't move file from %s to %s (%s) " ) , tempFileName . data ( ) , fileName . data ( ) , LastErrorToText ( ) . data ) ;
return true ;
}
Logger & logger ;
bool success = true ;
u8 * tempBuffer = nullptr ;
u64 tempBufferPos = 0 ;
2024-06-07 16:29:55 -04:00
u64 written = 0 ;
2024-05-28 16:55:11 -04:00
TString fileName ;
TString tempFileName ;
FileAccessor file ;
} ;
bool CacheServer : : SaveBucket ( u64 bucketId , Bucket & bucket )
{
2024-05-29 01:07:06 -04:00
u64 saveStart = GetTime ( ) ;
2024-05-28 16:55:11 -04:00
StringBuffer < MaxPath > bucketsDir ( m_rootDir ) ;
bucketsDir . EnsureEndsWithSlash ( ) . Append ( TC ( " buckets " ) ) ;
if ( ! m_storage . CreateDirectory ( bucketsDir . data ) )
return false ;
bucketsDir . EnsureEndsWithSlash ( ) ;
StringBuffer < MaxPath > bucketsFile ( bucketsDir ) ;
bucketsFile . AppendValue ( bucketId ) ;
FileWriter file ( m_logger , bucketsFile . data ) ;
if ( ! file . Create ( ) )
2024-04-10 20:29:18 -04:00
return false ;
2024-05-28 16:55:11 -04:00
file . Write ( CacheFileVersion ) ;
u32 pathTableSize = bucket . m_pathTable . GetSize ( ) ;
file . Write ( pathTableSize ) ;
file . WriteBytes ( bucket . m_pathTable . GetMemory ( ) , pathTableSize ) ;
u32 casKeyTableSize = bucket . m_casKeyTable . GetSize ( ) ;
file . Write ( casKeyTableSize ) ;
file . WriteBytes ( bucket . m_casKeyTable . GetMemory ( ) , casKeyTableSize ) ;
u32 entryLookupCount = u32 ( bucket . m_cacheEntryLookup . size ( ) ) ;
file . Write ( entryLookupCount ) ;
Vector < u8 > temp ;
for ( auto & kv2 : bucket . m_cacheEntryLookup )
{
file . Write ( kv2 . first ) ;
2024-07-29 13:48:36 -04:00
temp . resize ( kv2 . second . GetTotalSize ( CacheNetworkVersion , true ) ) ;
2024-05-28 16:55:11 -04:00
BinaryWriter writer ( temp . data ( ) , 0 , temp . size ( ) ) ;
kv2 . second . Write ( writer , CacheNetworkVersion , true ) ;
UBA_ASSERT ( writer . GetPosition ( ) = = temp . size ( ) ) ;
file . WriteBytes ( temp . data ( ) , temp . size ( ) ) ;
}
2024-05-29 01:07:06 -04:00
if ( ! file . Close ( ) )
return false ;
bucket . lastSavedTime = GetSystemTimeAsFileTime ( ) - m_creationTime ;
2024-06-07 16:29:55 -04:00
m_logger . Detail ( TC ( " Bucket %u saved - %s (%s) " ) , bucket . index , BytesToText ( file . written ) . str , TimeToText ( GetTime ( ) - saveStart ) . str ) ;
2024-05-29 01:07:06 -04:00
return true ;
2024-05-28 16:55:11 -04:00
}
bool CacheServer : : SaveNoLock ( )
{
if ( m_dbfileDirty )
{
StringBuffer < MaxPath > fileName ( m_rootDir ) ;
fileName . EnsureEndsWithSlash ( ) . Append ( TC ( " cachedb " ) ) ;
FileWriter file ( m_logger , fileName . data ) ;
if ( ! file . Create ( ) )
return false ;
file . Write ( CacheFileVersion ) ;
file . Write ( m_creationTime ) ;
if ( ! file . Close ( ) )
return false ;
m_dbfileDirty = false ;
}
StringBuffer < MaxPath > bucketsDir ( m_rootDir ) ;
bucketsDir . EnsureEndsWithSlash ( ) . Append ( TC ( " buckets " ) ) ;
if ( ! m_storage . CreateDirectory ( bucketsDir . data ) )
2024-04-10 20:29:18 -04:00
return false ;
2024-05-28 16:55:11 -04:00
bucketsDir . EnsureEndsWithSlash ( ) ;
2024-04-10 20:29:18 -04:00
2024-05-28 16:55:11 -04:00
Atomic < bool > success = true ;
2024-04-10 20:29:18 -04:00
2024-05-30 19:37:55 -04:00
for ( auto it = m_buckets . begin ( ) ; it ! = m_buckets . end ( ) ; )
{
Bucket & bucket = it - > second ;
if ( ! bucket . m_cacheEntryLookup . empty ( ) )
{
+ + it ;
continue ;
}
StringBuffer < MaxPath > bucketsFile ( m_rootDir ) ;
bucketsFile . EnsureEndsWithSlash ( ) . Append ( TC ( " buckets " ) ) . EnsureEndsWithSlash ( ) . AppendValue ( it - > first ) ;
DeleteFileW ( bucketsFile . data ) ;
m_logger . Detail ( TC ( " Bucket %u was empty. Deleted " ) , bucket . index ) ;
it = m_buckets . erase ( it ) ;
}
2024-05-28 16:55:11 -04:00
m_server . ParallelFor ( GetBucketWorkerCount ( ) , m_buckets , [ & , temp = Vector < u8 > ( ) ] ( auto & it ) mutable
{
Bucket & bucket = it - > second ;
if ( ! bucket . needsSave )
return ;
if ( SaveBucket ( it - > first , bucket ) )
bucket . needsSave = false ;
else
success = false ;
} ) ;
return success ;
2024-04-10 20:29:18 -04:00
}
2024-05-02 12:53:20 -04:00
bool CacheServer : : RunMaintenance ( bool force , const Function < bool ( ) > & shouldExit )
2024-04-10 20:29:18 -04:00
{
if ( m_addsSinceMaintenance = = 0 & & ! force )
return true ;
2024-05-23 19:16:15 -04:00
SCOPED_WRITE_LOCK ( m_connectionsLock , lock2 ) ;
if ( ! m_connections . empty ( ) )
return true ;
m_isRunningMaintenance = true ;
lock2 . Leave ( ) ;
auto g = MakeGuard ( [ & ] ( )
{
SCOPED_WRITE_LOCK ( m_connectionsLock , lock2 ) ;
m_isRunningMaintenance = false ;
} ) ;
2024-05-25 22:30:11 -04:00
2024-08-17 17:25:22 -04:00
//m_forceAllSteps = true;
2024-05-28 15:06:16 -04:00
bool forceAllSteps = m_forceAllSteps ;
m_forceAllSteps = false ;
2024-04-10 20:29:18 -04:00
2024-08-11 05:06:02 -04:00
u32 addsSinceMaintenance = m_addsSinceMaintenance ;
bool entriesAdded = addsSinceMaintenance ! = 0 ;
2024-07-26 16:44:00 -04:00
m_addsSinceMaintenance = 0 ;
u64 startTime = GetTime ( ) ;
if ( entriesAdded )
{
auto & storageStats = m_storage . Stats ( ) ;
u64 hits = m_cacheKeyHitCount ;
u64 miss = m_cacheKeyFetchCount - hits ;
m_logger . Info ( TC ( " Stats since boot (%s ago) " ) , TimeToText ( startTime - m_bootTime , true ) . str ) ;
2024-08-06 15:29:52 -04:00
m_logger . Info ( TC ( " CacheServer %s hits, %s misses " ) , CountToText ( hits ) . str , CountToText ( miss ) . str ) ;
2024-07-26 16:44:00 -04:00
u64 recvCount = storageStats . sendCas . count . load ( ) ;
u64 sendCount = storageStats . recvCas . count . load ( ) ;
2024-08-06 15:29:52 -04:00
m_logger . Info ( TC ( " StorageServer cas %s (%s) sent, %s (%s) received " ) , CountToText ( recvCount ) . str , BytesToText ( storageStats . sendCasBytesComp ) . str , CountToText ( sendCount ) . str , BytesToText ( storageStats . recvCasBytesComp ) . str ) ;
2024-07-26 16:44:00 -04:00
}
2024-05-25 22:30:11 -04:00
if ( m_shouldWipe )
{
m_shouldWipe = false ;
m_logger . Info ( TC ( " Obliterating database " ) ) ;
m_longestMaintenance = 0 ;
m_buckets . clear ( ) ;
forceAllSteps = true ;
m_creationTime = GetSystemTimeAsFileTime ( ) ;
}
else
{
2024-07-26 16:44:00 -04:00
TimeToText lastTime ( startTime - m_lastMaintenance , true ) ;
2024-08-11 05:06:02 -04:00
m_logger . Info ( TC ( " Maintenance started after %u added cache entries (Ran last time %s ago) " ) , addsSinceMaintenance , ( m_lastMaintenance ? lastTime . str : TC ( " <never> " ) ) ) ;
2024-05-25 22:30:11 -04:00
}
2024-07-26 16:44:00 -04:00
m_lastMaintenance = startTime ;
2024-04-10 20:29:18 -04:00
2024-05-26 18:07:50 -04:00
UnorderedSet < CasKey > deletedCasFiles ;
2024-04-10 20:29:18 -04:00
m_storage . HandleOverflow ( & deletedCasFiles ) ;
u64 deletedCasCount = deletedCasFiles . size ( ) ;
u64 totalCasSize = 0 ;
2024-05-29 01:07:06 -04:00
2024-09-10 10:26:02 -04:00
struct CasFileInfo { CasFileInfo ( u32 s = 0 ) : size ( s ) { } u32 size ; Atomic < bool > isUsed ; } ; // These are compressed cas, should never be over 4gb
// Existing cas entries can be more than 2 million entries.. which uses a lot of memory
constexpr u64 existingCasMemoryReserveSize = 192 * 1024 * 1024 ;
MemoryBlock existingCasMemoryBlock ;
if ( ! existingCasMemoryBlock . Init ( existingCasMemoryReserveSize , nullptr , true ) )
existingCasMemoryBlock . Init ( existingCasMemoryReserveSize ) ;
HashMap < CasKey , CasFileInfo > existingCas ;
2024-05-29 01:07:06 -04:00
2024-06-05 14:00:34 -04:00
m_storage . WaitForActiveWork ( ) ;
2024-09-10 10:26:02 -04:00
u64 totalCasCount ;
2024-04-10 20:29:18 -04:00
{
2024-08-16 12:38:26 -04:00
u64 collectCasStartTime = GetTime ( ) ;
u32 removedNonExisting = 0 ;
// TODO: Make this cleaner... (inside UbaStorage instead)
2024-05-29 01:07:06 -04:00
SCOPED_WRITE_LOCK ( m_storage . m_casLookupLock , lookupLock ) ;
2024-05-29 12:07:26 -04:00
2024-09-10 10:26:02 -04:00
totalCasCount = m_storage . m_casLookup . size ( ) ;
existingCas . Init ( existingCasMemoryBlock , totalCasCount ) ;
2024-05-29 12:07:26 -04:00
2024-05-29 02:20:36 -04:00
for ( auto i = m_storage . m_casLookup . begin ( ) , e = m_storage . m_casLookup . end ( ) ; i ! = e ; )
2024-05-29 01:07:06 -04:00
{
2024-05-29 02:20:36 -04:00
if ( i - > second . verified & & ! i - > second . exists )
{
2024-05-31 02:18:27 -04:00
m_storage . DetachEntry ( i - > second ) ;
2024-05-29 16:57:56 -04:00
+ + removedNonExisting ;
2024-05-29 02:20:36 -04:00
i = m_storage . m_casLookup . erase ( i ) ;
2024-05-29 12:07:26 -04:00
e = m_storage . m_casLookup . end ( ) ;
2024-05-29 02:20:36 -04:00
continue ;
}
totalCasSize + = i - > second . size ;
2024-09-10 10:26:02 -04:00
UBA_ASSERT ( i - > second . size < ~ 0u ) ;
existingCas . Insert ( i - > first ) . size = u32 ( i - > second . size ) ;
2024-05-29 02:20:36 -04:00
+ + i ;
2024-05-29 01:07:06 -04:00
}
2024-08-16 12:38:26 -04:00
lookupLock . Leave ( ) ;
if ( removedNonExisting )
m_logger . Detail ( TC ( " Removed %s cas entries (marked as not existing) " ) , CountToText ( removedNonExisting ) . str ) ;
2024-09-10 10:26:02 -04:00
m_logger . Detail ( TC ( " Found %s (%s) cas files and %s deleted by overflow (%s) " ) , CountToText ( existingCas . Size ( ) ) . str , BytesToText ( totalCasSize ) . str , CountToText ( deletedCasFiles . size ( ) ) . str , TimeToText ( GetTime ( ) - collectCasStartTime ) . str ) ;
2024-05-28 20:07:10 -04:00
}
2024-05-02 12:53:20 -04:00
if ( shouldExit ( ) )
return true ;
2024-08-17 17:25:22 -04:00
ReaderWriterLock globalStatsLock ;
2024-04-10 20:29:18 -04:00
u64 now = GetSystemTimeAsFileTime ( ) ;
2024-05-25 22:30:11 -04:00
u64 oldest = 0 ;
2024-06-07 16:29:55 -04:00
u64 longestUnused = 0 ;
2024-04-10 20:29:18 -04:00
2024-05-23 21:53:29 -04:00
u32 workerCount = m_server . GetWorkerCount ( ) ;
u32 workerCountToUse = workerCount > 0 ? workerCount - 1 : 0 ;
2024-05-26 01:56:11 -04:00
u32 workerCountToUseForBuckets = Min ( workerCountToUse , u32 ( m_buckets . size ( ) ) ) ;
2024-04-10 20:29:18 -04:00
2024-05-29 01:07:06 -04:00
Atomic < u64 > totalEntryCount ;
2024-04-12 18:04:58 -04:00
Atomic < u64 > deleteEntryCount ;
2024-05-29 01:07:06 -04:00
Atomic < u64 > expiredEntryCount ;
Atomic < u64 > overflowedEntryCount ;
Atomic < u64 > missingOutputEntryCount ;
Atomic < u64 > missingInputEntryCount ;
2024-05-26 18:07:50 -04:00
Atomic < u64 > activeDropCount ;
auto dropCasGuard = MakeGuard ( [ & ] ( ) { while ( activeDropCount ! = 0 ) Sleep ( 1 ) ; } ) ;
2024-04-10 20:29:18 -04:00
2024-08-17 17:25:22 -04:00
auto EnsureBucketContextInitialized = [ & ] ( Bucket & bucket )
{
auto & context = * bucket . m_maintenanceContext ;
if ( ! context . isInitialized )
{
if ( ! context . memoryBlock . Init ( m_maintenanceReserveSize , nullptr , true ) ) // Try to use large blocks
context . memoryBlock . Init ( m_maintenanceReserveSize , nullptr , false ) ;
context . deletedOffsets . Init ( context . memoryBlock , bucket . m_casKeyTable . GetSize ( ) ) ;
context . isInitialized = true ;
}
} ;
2024-07-25 01:44:30 -04:00
u32 deleteIteration = 0 ;
2024-04-10 20:29:18 -04:00
u64 deleteCacheEntriesStartTime = GetTime ( ) ;
do
{
2024-05-26 18:07:50 -04:00
bool checkInputsForDeletes = m_checkInputsForDeletedCas & & ! deletedCasFiles . empty ( ) ;
2024-05-25 22:30:11 -04:00
oldest = 0 ;
2024-06-07 16:29:55 -04:00
longestUnused = 0 ;
2024-05-29 01:07:06 -04:00
totalEntryCount = 0 ;
2024-04-12 18:04:58 -04:00
2024-05-26 01:56:11 -04:00
m_server . ParallelFor ( workerCountToUseForBuckets , m_buckets , [ & ] ( auto & it )
2024-04-10 20:29:18 -04:00
{
2024-04-12 18:04:58 -04:00
Bucket & bucket = it - > second ;
2024-08-17 17:25:22 -04:00
auto context = bucket . m_maintenanceContext ;
if ( ! context )
context = bucket . m_maintenanceContext = new Bucket : : MaintenanceContext ;
bool foundDeletedCasKey = false ;
for ( auto & cas : deletedCasFiles )
bucket . m_casKeyTable . TraverseOffsets ( cas , [ & ] ( u32 casKeyOffset )
{
EnsureBucketContextInitialized ( bucket ) ;
foundDeletedCasKey = true ;
context - > deletedOffsets . Set ( casKeyOffset ) ;
} ) ;
if ( ! foundDeletedCasKey )
checkInputsForDeletes = false ;
auto & deletedOffsets = context - > deletedOffsets ;
2024-04-12 18:04:58 -04:00
bucket . totalEntryCount = 0 ;
bucket . totalEntrySize = 0 ;
2024-07-25 01:28:05 -04:00
2024-05-26 18:07:50 -04:00
ReaderWriterLock keysToEraseLock ;
Vector < CasKey > keysToErase ;
2024-07-29 13:48:36 -04:00
u64 lastUseTimeLimit = 0 ; // This is the time relative to server startup time
2024-08-24 14:04:20 -04:00
if ( bucket . expirationTimeSeconds & & m_bucketCasTableMaxSize )
2024-07-25 01:28:05 -04:00
{
2024-08-24 14:04:20 -04:00
if ( deleteIteration = = 0 )
2024-07-25 01:28:05 -04:00
{
2024-08-24 14:04:20 -04:00
u64 bucketCasTableSize = bucket . m_casKeyTable . GetSize ( ) ;
u64 oldExpirationTime = bucket . expirationTimeSeconds ;
if ( bucketCasTableSize > = m_bucketCasTableMaxSize )
bucket . expirationTimeSeconds - = 60 * 60 ; // Decreased by one hour
else if ( bucket . expirationTimeSeconds < m_expirationTimeSeconds & & ( bucketCasTableSize + 1ull * 1024 * 1024 ) < m_bucketCasTableMaxSize )
bucket . expirationTimeSeconds + = 60 * 60 ; // Increased by one hour
if ( oldExpirationTime ! = bucket . expirationTimeSeconds )
m_logger . Info ( TC ( " %s expiration time for bucket %u to %s " ) , oldExpirationTime < bucket . expirationTimeSeconds ? TC ( " Increased " ) : TC ( " Decreased " ) , bucket . index , TimeToText ( MsToTime ( bucket . expirationTimeSeconds * 1000 ) , true ) . str ) ;
2024-07-25 01:28:05 -04:00
}
2024-08-24 14:04:20 -04:00
u64 secondsRunning = GetFileTimeAsSeconds ( now - m_creationTime ) ;
if ( secondsRunning > bucket . expirationTimeSeconds )
2024-07-25 01:28:05 -04:00
lastUseTimeLimit = ( now - m_creationTime ) - GetSecondsAsFileTime ( bucket . expirationTimeSeconds ) ;
}
2024-08-17 17:25:22 -04:00
m_server . ParallelFor ( workerCountToUse , bucket . m_cacheEntryLookup , [ & , touchedCas = Vector < Atomic < bool > * > ( ) ] ( auto & li ) mutable
2024-04-10 20:29:18 -04:00
{
2024-05-25 16:58:10 -04:00
CacheEntries & entries = li - > second ;
2024-04-12 18:04:58 -04:00
// There is currently no idea saving more than 256kb worth of entries per lookup key (because that is what fetch max returns).. so let's wipe out
// all the entries that overflow that number
2024-05-25 16:58:10 -04:00
u64 capacityLeft = SendMaxSize - 32 - entries . GetSharedSize ( ) ;
2024-04-10 20:29:18 -04:00
2024-05-26 18:07:50 -04:00
// Check if any offset has been deleted in shared offsets..
bool offsetDeletedInShared = false ;
auto & sharedOffsets = entries . sharedInputCasKeyOffsets ;
if ( checkInputsForDeletes )
{
BinaryReader reader2 ( sharedOffsets . data ( ) , 0 , sharedOffsets . size ( ) ) ;
while ( reader2 . GetLeft ( ) )
{
2024-08-17 17:25:22 -04:00
if ( ! deletedOffsets . IsSet ( u32 ( reader2 . Read7BitEncoded ( ) ) ) )
2024-05-26 18:07:50 -04:00
continue ;
offsetDeletedInShared = true ;
break ;
}
}
2024-04-12 18:04:58 -04:00
for ( auto i = entries . entries . begin ( ) , e = entries . entries . end ( ) ; i ! = e ; )
{
auto & entry = * i ;
bool deleteEntry = false ;
2024-07-29 13:48:36 -04:00
u64 neededSize = entries . GetEntrySize ( entry , CacheNetworkVersion , false ) ;
2024-04-12 18:04:58 -04:00
if ( neededSize > capacityLeft )
2024-04-10 20:29:18 -04:00
{
2024-04-12 18:04:58 -04:00
deleteEntry = true ;
capacityLeft = 0 ;
2024-05-29 01:07:06 -04:00
+ + overflowedEntryCount ;
}
if ( ! deleteEntry & & entry . creationTime < lastUseTimeLimit & & entry . lastUsedTime < lastUseTimeLimit )
{
deleteEntry = true ;
+ + expiredEntryCount ;
2024-04-10 20:29:18 -04:00
}
2024-04-12 18:04:58 -04:00
2024-05-25 16:58:10 -04:00
// This is an attempt at removing entries that has inputs that depends on other entries outputs.
// and that there is no point keeping them if the other entry is removed
// Example would be that there is no idea keeping entries that uses a pch if the entry producing the pch is gone
2024-05-26 18:07:50 -04:00
if ( checkInputsForDeletes )
2024-04-12 18:04:58 -04:00
{
2024-05-26 18:07:50 -04:00
if ( ! deleteEntry & & offsetDeletedInShared )
2024-04-12 18:04:58 -04:00
{
2024-05-26 18:07:50 -04:00
BinaryReader rangeReader ( entry . sharedInputCasKeyOffsetRanges . data ( ) , 0 , entry . sharedInputCasKeyOffsetRanges . size ( ) ) ;
while ( ! deleteEntry & & rangeReader . GetLeft ( ) )
2024-04-12 18:04:58 -04:00
{
2024-05-26 18:07:50 -04:00
u64 begin = rangeReader . Read7BitEncoded ( ) ;
u64 end = rangeReader . Read7BitEncoded ( ) ;
BinaryReader inputReader ( sharedOffsets . data ( ) + begin , 0 , end - begin ) ;
while ( inputReader . GetLeft ( ) )
{
2024-08-17 17:25:22 -04:00
if ( ! deletedOffsets . IsSet ( u32 ( inputReader . Read7BitEncoded ( ) ) ) )
2024-05-26 18:07:50 -04:00
continue ;
deleteEntry = true ;
2024-05-29 01:07:06 -04:00
+ + missingInputEntryCount ;
2024-05-26 18:07:50 -04:00
break ;
}
}
}
if ( ! deleteEntry )
{
auto & extraInputs = entry . extraInputCasKeyOffsets ;
BinaryReader extraReader ( extraInputs . data ( ) , 0 , extraInputs . size ( ) ) ;
while ( extraReader . GetLeft ( ) )
{
2024-08-17 17:25:22 -04:00
if ( ! deletedOffsets . IsSet ( u32 ( extraReader . Read7BitEncoded ( ) ) ) )
2024-05-26 18:07:50 -04:00
continue ;
2024-04-12 18:04:58 -04:00
deleteEntry = true ;
2024-05-29 01:07:06 -04:00
+ + missingInputEntryCount ;
2024-04-12 18:04:58 -04:00
break ;
}
}
}
2024-05-26 18:07:50 -04:00
if ( ! deleteEntry )
{
// Traverse outputs and check if cas files exists for each output, if not, delete entry.
touchedCas . clear ( ) ;
auto & outputs = entry . outputCasKeyOffsets ;
BinaryReader outputsReader ( outputs . data ( ) , 0 , outputs . size ( ) ) ;
while ( outputsReader . GetLeft ( ) )
{
u64 offset = outputsReader . Read7BitEncoded ( ) ;
CasKey casKey ;
bucket . m_casKeyTable . GetKey ( casKey , offset ) ;
UBA_ASSERT ( IsCompressed ( casKey ) ) ;
2024-09-10 10:26:02 -04:00
if ( auto value = existingCas . Find ( casKey ) )
2024-05-26 18:07:50 -04:00
{
2024-09-10 10:26:02 -04:00
touchedCas . push_back ( & value - > isUsed ) ;
2024-05-26 18:07:50 -04:00
continue ;
}
deleteEntry = true ;
2024-05-29 01:07:06 -04:00
+ + missingOutputEntryCount ;
2024-05-26 18:07:50 -04:00
break ;
}
}
2024-04-12 18:04:58 -04:00
// Remove entry from entries list and skip increasing ref count of cas files
if ( deleteEntry )
{
2024-05-28 15:06:16 -04:00
if ( i - > id = = entries . primaryId )
entries . primaryId = ~ 0u ;
2024-05-26 18:07:50 -04:00
bucket . hasDeletedEntries = true ;
2024-04-12 18:04:58 -04:00
+ + deleteEntryCount ;
i = entries . entries . erase ( i ) ;
e = entries . entries . end ( ) ;
continue ;
}
+ + bucket . totalEntryCount ;
capacityLeft - = neededSize ;
2024-08-17 17:25:22 -04:00
{
SCOPED_WRITE_LOCK ( globalStatsLock , l ) ;
if ( ! oldest | | entry . creationTime < oldest )
oldest = entry . creationTime ;
if ( ! longestUnused | | entry . lastUsedTime < longestUnused )
longestUnused = entry . lastUsedTime ;
}
2024-04-12 18:04:58 -04:00
2024-08-17 17:25:22 -04:00
for ( auto v : touchedCas )
* v = true ;
2024-04-12 18:04:58 -04:00
+ + i ;
2024-04-10 20:29:18 -04:00
}
2024-05-26 18:07:50 -04:00
if ( entries . entries . empty ( ) )
{
SCOPED_WRITE_LOCK ( keysToEraseLock , lock2 ) ;
keysToErase . push_back ( li - > first ) ;
}
else
2024-07-29 13:48:36 -04:00
bucket . totalEntrySize + = entries . GetTotalSize ( CacheNetworkVersion , false ) ;
2024-05-26 18:07:50 -04:00
} ) ;
for ( auto & key : keysToErase )
bucket . m_cacheEntryLookup . erase ( key ) ;
2024-05-29 01:07:06 -04:00
totalEntryCount + = bucket . totalEntryCount ;
2024-06-02 18:15:45 -04:00
} , TC ( " " ) , true ) ;
2024-04-10 20:29:18 -04:00
// Reset deleted cas files and update it again..
deletedCasFiles . clear ( ) ;
2024-09-10 10:26:02 -04:00
for ( auto i = existingCas . ValuesBegin ( ) , e = existingCas . ValuesEnd ( ) ; i ! = e ; + + i )
2024-04-10 20:29:18 -04:00
{
2024-09-10 10:26:02 -04:00
if ( i - > isUsed )
2024-04-10 20:29:18 -04:00
{
2024-09-10 10:26:02 -04:00
i - > isUsed = false ;
2024-04-10 20:29:18 -04:00
continue ;
}
2024-09-10 10:26:02 -04:00
const CasKey * key = existingCas . GetKey ( i ) ;
if ( ! key )
continue ;
deletedCasFiles . insert ( * key ) ;
2024-04-10 20:29:18 -04:00
+ + deletedCasCount ;
2024-09-10 10:26:02 -04:00
totalCasSize - = i - > size ;
existingCas . Erase ( * key ) ;
2024-04-10 20:29:18 -04:00
}
2024-05-26 18:07:50 -04:00
// Add drop cas as work so it can run in the background
2024-04-10 20:29:18 -04:00
for ( auto & casKey : deletedCasFiles )
2024-05-26 18:07:50 -04:00
{
+ + activeDropCount ;
m_server . AddWork ( [ & , key = casKey ] ( ) { m_storage . DropCasFile ( key , true , TC ( " " ) ) ; - - activeDropCount ; } , 1 , TC ( " " ) ) ;
}
2024-07-25 01:44:30 -04:00
+ + deleteIteration ;
2024-04-10 20:29:18 -04:00
}
while ( ! deletedCasFiles . empty ( ) ) ; // if cas files are deleted we need to do another loop and check cache entry inputs to see if files were inputs
2024-09-10 10:26:02 -04:00
existingCasMemoryBlock . Deinit ( ) ;
2024-05-29 01:07:06 -04:00
if ( overflowedEntryCount )
m_logger . Detail ( TC ( " Found %llu overflowed cache entries " ) , overflowedEntryCount . load ( ) ) ;
if ( expiredEntryCount )
2024-08-06 15:29:52 -04:00
m_logger . Detail ( TC ( " Found %llu expired cache entries " ) , expiredEntryCount . load ( ) ) ;
2024-05-29 01:07:06 -04:00
if ( missingOutputEntryCount )
m_logger . Detail ( TC ( " Found %llu cache entries with missing output cas " ) , missingOutputEntryCount . load ( ) ) ;
if ( missingInputEntryCount )
m_logger . Detail ( TC ( " Found %llu cache entries with missing input cas " ) , missingInputEntryCount . load ( ) ) ;
2024-08-16 12:38:26 -04:00
m_logger . Detail ( TC ( " Deleted %llu cas files and %llu cache entries over %u buckets (%s) " ) , deletedCasCount , deleteEntryCount . load ( ) , u32 ( m_buckets . size ( ) ) , TimeToText ( GetTime ( ) - deleteCacheEntriesStartTime ) . str ) ;
2024-04-10 20:29:18 -04:00
2024-05-02 12:53:20 -04:00
if ( shouldExit ( ) )
return true ;
2024-07-26 16:44:00 -04:00
u64 maxCommittedMemory = 0 ;
2024-05-26 01:56:11 -04:00
m_server . ParallelFor ( workerCountToUseForBuckets , m_buckets , [ & ] ( auto & it )
2024-04-10 20:29:18 -04:00
{
2024-04-12 18:04:58 -04:00
u64 bucketStartTime = GetTime ( ) ;
2024-04-10 20:29:18 -04:00
2024-04-12 18:04:58 -04:00
Bucket & bucket = it - > second ;
2024-08-17 17:25:22 -04:00
auto deleteContext = MakeGuard ( [ & ] ( ) { delete bucket . m_maintenanceContext ; bucket . m_maintenanceContext = nullptr ; } ) ;
2024-04-12 18:04:58 -04:00
2024-05-26 18:07:50 -04:00
if ( ! bucket . hasDeletedEntries & & ! forceAllSteps )
2024-04-10 20:29:18 -04:00
{
2024-08-06 15:29:52 -04:00
m_logger . Detail ( TC ( " Bucket %u skipped updating. (%s entries) " ) , bucket . index , CountToText ( bucket . totalEntryCount . load ( ) ) . str ) ;
2024-05-24 14:04:37 -04:00
return ;
}
2024-05-27 11:42:45 -04:00
bucket . hasDeletedEntries = false ;
2024-08-17 17:25:22 -04:00
EnsureBucketContextInitialized ( bucket ) ;
MemoryBlock & memoryBlock = bucket . m_maintenanceContext - > memoryBlock ;
2024-04-12 18:04:58 -04:00
2024-08-17 17:25:22 -04:00
BitArray usedCasKeyOffsets ;
usedCasKeyOffsets . Init ( memoryBlock , bucket . m_casKeyTable . GetSize ( ) ) ;
2024-05-24 14:04:37 -04:00
u64 collectUsedCasKeysStart = GetTime ( ) ;
// Collect all caskeys that are used by cache entries.
for ( auto & kv2 : bucket . m_cacheEntryLookup )
{
2024-05-25 16:58:10 -04:00
auto collectUsedCasKeyOffsets = [ & ] ( const Vector < u8 > & offsets )
{
BinaryReader reader2 ( offsets . data ( ) , 0 , offsets . size ( ) ) ;
while ( reader2 . GetLeft ( ) )
2024-08-10 17:26:42 -04:00
{
u32 offset = u32 ( reader2 . Read7BitEncoded ( ) ) ;
usedCasKeyOffsets . Set ( offset ) ;
}
2024-05-25 16:58:10 -04:00
} ;
collectUsedCasKeyOffsets ( kv2 . second . sharedInputCasKeyOffsets ) ;
2024-05-24 14:04:37 -04:00
for ( auto & entry : kv2 . second . entries )
2024-04-10 20:29:18 -04:00
{
2024-05-25 16:58:10 -04:00
collectUsedCasKeyOffsets ( entry . extraInputCasKeyOffsets ) ;
2024-05-24 14:04:37 -04:00
collectUsedCasKeyOffsets ( entry . outputCasKeyOffsets ) ;
2024-04-10 20:29:18 -04:00
}
2024-05-24 14:04:37 -04:00
}
2024-08-10 17:26:42 -04:00
u64 usedCasKeyOffsetsCount = usedCasKeyOffsets . CountSetBits ( ) ;
m_logger . Detail ( TC ( " Bucket %u Collected %s used caskeys. (%s) " ) , bucket . index , CountToText ( usedCasKeyOffsetsCount ) . str , TimeToText ( GetTime ( ) - collectUsedCasKeysStart ) . str ) ;
2024-04-10 20:29:18 -04:00
2024-05-24 14:04:37 -04:00
u64 recreatePathTableStart = GetTime ( ) ;
2024-04-10 20:29:18 -04:00
2024-05-24 14:04:37 -04:00
// Traverse all caskeys in caskey table and figure out which ones we can delete
2024-08-17 17:25:22 -04:00
BitArray usedPathOffsets ;
usedPathOffsets . Init ( memoryBlock , bucket . m_pathTable . GetSize ( ) ) ;
2024-04-10 20:29:18 -04:00
2024-08-10 17:26:42 -04:00
BinaryReader casKeyTableReader ( bucket . m_casKeyTable . GetMemory ( ) , 0 , bucket . m_casKeyTable . GetSize ( ) ) ;
usedCasKeyOffsets . Traverse ( [ & ] ( u32 casKeyOffset )
{
casKeyTableReader . SetPosition ( casKeyOffset ) ;
u32 pathOffset = u32 ( casKeyTableReader . Read7BitEncoded ( ) ) ;
usedPathOffsets . Set ( pathOffset ) ;
} ) ;
2024-05-24 14:04:37 -04:00
// Build new path table based on used offsets
2024-09-10 10:26:02 -04:00
HashMap2 < u32 , u32 > oldToNewPathOffset ;
2024-05-24 14:04:37 -04:00
u32 oldSize = bucket . m_pathTable . GetSize ( ) ;
{
CompactPathTable newPathTable ( CachePathTableMaxSize , CompactPathTable : : V1 , bucket . m_pathTable . GetPathCount ( ) , bucket . m_pathTable . GetSegmentCount ( ) ) ;
2024-09-10 10:26:02 -04:00
oldToNewPathOffset . Init ( memoryBlock , usedPathOffsets . CountSetBits ( ) ) ;
2024-05-24 14:04:37 -04:00
2024-08-10 17:26:42 -04:00
StringBuffer < > temp ;
usedPathOffsets . Traverse ( [ & ] ( u32 pathOffset )
2024-05-24 14:04:37 -04:00
{
2024-08-10 17:26:42 -04:00
bucket . m_pathTable . GetString ( temp . Clear ( ) , pathOffset ) ;
2024-08-16 02:14:46 -04:00
u32 newPathOffset = newPathTable . AddNoLock ( temp . data , temp . count ) ;
2024-05-24 14:04:37 -04:00
#if 0
StringBuffer < > test ;
2024-08-16 02:14:46 -04:00
newPathTable . GetString ( test , newPathOffset ) ;
2024-05-24 14:04:37 -04:00
UBA_ASSERT ( test . Equals ( temp . data ) ) ;
# endif
2024-08-16 02:14:46 -04:00
if ( pathOffset ! = newPathOffset )
2024-09-10 10:26:02 -04:00
oldToNewPathOffset . Insert ( pathOffset ) = newPathOffset ;
2024-08-10 17:26:42 -04:00
} ) ;
2024-05-24 14:04:37 -04:00
bucket . m_pathTable . Swap ( newPathTable ) ;
}
2024-05-28 18:03:47 -04:00
m_logger . Detail ( TC ( " Bucket %u Recreated path table. %s -> %s (%s) " ) , bucket . index , BytesToText ( oldSize ) . str , BytesToText ( bucket . m_pathTable . GetSize ( ) ) . str , TimeToText ( GetTime ( ) - recreatePathTableStart ) . str ) ;
2024-05-24 14:04:37 -04:00
// Build new caskey table based on used offsets
u64 recreateCasKeyTableStart = GetTime ( ) ;
2024-09-10 10:26:02 -04:00
HashMap2 < u32 , u32 > oldToNewCasKeyOffset ;
2024-05-24 14:04:37 -04:00
oldSize = bucket . m_casKeyTable . GetSize ( ) ;
{
2024-09-10 10:26:02 -04:00
oldToNewCasKeyOffset . Init ( memoryBlock , usedCasKeyOffsetsCount ) ;
2024-08-10 17:26:42 -04:00
CompactCasKeyTable newCasKeyTable ( CacheCasKeyTableMaxSize , usedCasKeyOffsetsCount ) ;
BinaryReader reader2 ( bucket . m_casKeyTable . GetMemory ( ) , 0 , bucket . m_casKeyTable . GetSize ( ) ) ;
usedCasKeyOffsets . Traverse ( [ & ] ( u32 casKeyOffset )
2024-04-10 20:29:18 -04:00
{
2024-08-10 17:26:42 -04:00
reader2 . SetPosition ( casKeyOffset ) ;
2024-05-24 14:04:37 -04:00
u32 oldPathOffset = u32 ( reader2 . Read7BitEncoded ( ) ) ;
CasKey casKey = reader2 . ReadCasKey ( ) ;
2024-08-16 02:14:46 -04:00
u32 newPathOffset = oldPathOffset ;
2024-09-10 10:26:02 -04:00
if ( auto value = oldToNewPathOffset . Find ( oldPathOffset ) )
newPathOffset = * value ;
2024-08-16 02:14:46 -04:00
u32 newCasKeyOffset = newCasKeyTable . Add ( casKey , newPathOffset ) ;
if ( casKeyOffset ! = newCasKeyOffset )
2024-09-10 10:26:02 -04:00
oldToNewCasKeyOffset . Insert ( casKeyOffset ) = newCasKeyOffset ;
2024-08-10 17:26:42 -04:00
} ) ;
2024-05-24 14:04:37 -04:00
bucket . m_casKeyTable . Swap ( newCasKeyTable ) ;
}
2024-05-28 18:03:47 -04:00
m_logger . Detail ( TC ( " Bucket %u Recreated caskey table. %s -> %s (%s) " ) , bucket . index , BytesToText ( oldSize ) . str , BytesToText ( bucket . m_casKeyTable . GetSize ( ) ) . str , TimeToText ( GetTime ( ) - recreateCasKeyTableStart ) . str ) ;
2024-04-12 18:04:58 -04:00
2024-09-10 10:26:02 -04:00
if ( oldToNewCasKeyOffset . Size ( ) > 0 )
2024-05-24 14:04:37 -04:00
{
2024-04-12 18:04:58 -04:00
// Update all casKeyOffsets
u64 updateEntriesStart = GetTime ( ) ;
2024-05-28 15:06:16 -04:00
m_server . ParallelFor ( workerCountToUse , bucket . m_cacheEntryLookup , [ & , temp = Vector < u32 > ( ) , temp2 = Vector < u8 > ( ) ] ( auto & it ) mutable
2024-04-12 18:04:58 -04:00
{
2024-05-28 15:06:16 -04:00
it - > second . UpdateEntries ( m_logger , oldToNewCasKeyOffset , temp , temp2 ) ;
2024-04-12 18:04:58 -04:00
} ) ;
2024-04-24 15:26:58 -04:00
#if 0
u8 * mem = bucket . m_pathTable . GetMemory ( ) ;
u64 memLeft = bucket . m_pathTable . GetSize ( ) ;
while ( memLeft )
{
u8 buffer [ 256 * 1024 ] ;
auto compressor = OodleLZ_Compressor_Kraken ;
auto compressionLevel = OodleLZ_CompressionLevel_SuperFast ;
u64 toCompress = Min ( memLeft , u64 ( 256 * 1024 - 128 ) ) ;
auto compressedBlockSize = OodleLZ_Compress ( compressor , mem , ( OO_SINTa ) toCompress , buffer , compressionLevel ) ;
( void ) compressedBlockSize ;
memLeft - = toCompress ;
}
# endif
2024-05-28 18:03:47 -04:00
m_logger . Detail ( TC ( " Bucket %u Updated cache entries with new tables (%s) " ) , bucket . index , TimeToText ( GetTime ( ) - updateEntriesStart ) . str ) ;
2024-04-10 20:29:18 -04:00
}
2024-05-28 16:55:11 -04:00
bucket . needsSave = true ;
2024-08-06 15:29:52 -04:00
m_logger . Info ( TC ( " Bucket %u Done (%s). CacheEntries: %s (%s) PathTable: %s CasTable: %s Expiration: %s " ) , bucket . index , TimeToText ( GetTime ( ) - bucketStartTime ) . str , CountToText ( bucket . totalEntryCount . load ( ) ) . str , BytesToText ( bucket . totalEntrySize . load ( ) ) . str , BytesToText ( bucket . m_pathTable . GetSize ( ) ) . str , BytesToText ( bucket . m_casKeyTable . GetSize ( ) ) . str , TimeToText ( MsToTime ( bucket . expirationTimeSeconds * 1000 ) , true ) . str ) ;
2024-07-26 16:44:00 -04:00
2024-08-17 17:25:22 -04:00
SCOPED_WRITE_LOCK ( globalStatsLock , l ) ;
2024-07-26 16:44:00 -04:00
maxCommittedMemory = Max ( maxCommittedMemory , memoryBlock . writtenSize ) ;
2024-06-02 18:15:45 -04:00
} , TC ( " " ) , true ) ;
2024-04-10 20:29:18 -04:00
2024-05-26 18:07:50 -04:00
// Need to make sure all cas entries are dropped before saving cas table
2024-05-26 23:28:21 -04:00
u64 dropStartTime = GetTime ( ) ;
2024-05-26 18:07:50 -04:00
dropCasGuard . Execute ( ) ;
2024-05-26 23:28:21 -04:00
u64 dropCasDuration = GetTime ( ) - dropStartTime ;
if ( TimeToMs ( dropCasDuration ) > 10 )
m_logger . Detail ( TC ( " Done deleting cas files (%s) " ) , TimeToText ( dropCasDuration ) . str ) ;
2024-05-26 18:07:50 -04:00
2024-04-10 20:29:18 -04:00
if ( entriesAdded | | deletedCasCount | | deleteEntryCount | | forceAllSteps )
{
u64 saveStart = GetTime ( ) ;
2024-05-28 18:03:47 -04:00
m_logger . Detail ( TC ( " Saving to disk " ) ) ;
2024-06-03 19:22:50 -04:00
Event saveCasEvent ( true ) ;
m_server . AddWork ( [ & ] ( ) { m_storage . SaveCasTable ( false , false ) ; saveCasEvent . Set ( ) ; } , 1 , TC ( " SaveCas " ) ) ;
2024-04-10 20:29:18 -04:00
SaveNoLock ( ) ;
2024-06-03 19:22:50 -04:00
saveCasEvent . IsSet ( ) ;
2024-05-28 18:03:47 -04:00
m_logger . Detail ( TC ( " Save Done (%s) " ) , TimeToText ( GetTime ( ) - saveStart ) . str ) ;
2024-04-10 20:29:18 -04:00
}
2024-05-25 22:30:11 -04:00
u64 oldestTime = oldest ? GetFileTimeAsTime ( now - ( m_creationTime + oldest ) ) : 0 ;
2024-06-07 16:29:55 -04:00
u64 longestUnusedTime = longestUnused ? GetFileTimeAsTime ( now - ( m_creationTime + longestUnused ) ) : 0 ;
2024-05-23 19:16:15 -04:00
u64 duration = GetTime ( ) - startTime ;
2024-09-10 10:26:02 -04:00
m_logger . Info ( TC ( " Maintenance done! (%s) CasFiles: %s (%s) Entries: %s Oldest: %s LongestUnused: %s MaintenanceMem: %s/%s " ) , TimeToText ( duration ) . str , CountToText ( totalCasCount - deletedCasCount ) . str , BytesToText ( totalCasSize ) . str , CountToText ( totalEntryCount . load ( ) ) . str , TimeToText ( oldestTime , true ) . str , TimeToText ( longestUnusedTime , true ) . str , BytesToText ( maxCommittedMemory ) . str , BytesToText ( m_maintenanceReserveSize ) . str ) ;
2024-05-23 19:16:15 -04:00
m_longestMaintenance = Max ( m_longestMaintenance , duration ) ;
2024-05-25 16:58:10 -04:00
2024-04-10 20:29:18 -04:00
return true ;
}
2024-05-09 15:06:47 -04:00
bool CacheServer : : ShouldShutdown ( )
{
if ( ! m_shutdownRequested )
return false ;
SCOPED_READ_LOCK ( m_connectionsLock , lock2 ) ;
if ( ! m_connections . empty ( ) | | m_addsSinceMaintenance )
return false ;
return true ;
}
2024-04-10 20:29:18 -04:00
void CacheServer : : OnDisconnected ( u32 clientId )
{
SCOPED_WRITE_LOCK ( m_connectionsLock , lock ) ;
m_connections . erase ( clientId ) ;
lock . Leave ( ) ;
}
2024-07-29 13:48:36 -04:00
CacheServer : : ConnectionBucket & CacheServer : : GetConnectionBucket ( const ConnectionInfo & connectionInfo , BinaryReader & reader , u32 * outClientVersion )
2024-04-25 00:44:38 -04:00
{
u64 id = reader . Read7BitEncoded ( ) ;
SCOPED_WRITE_LOCK ( m_connectionsLock , lock ) ;
auto & connection = m_connections [ connectionInfo . GetId ( ) ] ;
2024-07-29 13:48:36 -04:00
if ( outClientVersion )
* outClientVersion = connection . clientVersion ;
2024-04-25 00:44:38 -04:00
return connection . buckets . try_emplace ( id , id ) . first - > second ;
}
CacheServer : : Bucket & CacheServer : : GetBucket ( BinaryReader & reader )
{
u64 id = reader . Read7BitEncoded ( ) ;
2024-05-28 18:03:47 -04:00
return GetBucket ( id ) ;
2024-04-25 00:44:38 -04:00
}
2024-05-28 18:03:47 -04:00
CacheServer : : Bucket & CacheServer : : GetBucket ( u64 id )
{
SCOPED_WRITE_LOCK ( m_bucketsLock , bucketsLock ) ;
auto insres = m_buckets . try_emplace ( id , id ) ;
2024-07-25 01:28:05 -04:00
auto & bucket = insres . first - > second ;
if ( ! insres . second )
return bucket ;
bucket . index = u32 ( m_buckets . size ( ) - 1 ) ;
bucket . expirationTimeSeconds = m_expirationTimeSeconds ;
return bucket ;
2024-05-28 18:03:47 -04:00
}
2024-05-28 16:55:11 -04:00
u32 CacheServer : : GetBucketWorkerCount ( )
{
u32 workerCount = m_server . GetWorkerCount ( ) ;
u32 workerCountToUse = workerCount > 0 ? workerCount - 1 : 0 ;
return Min ( workerCountToUse , u32 ( m_buckets . size ( ) ) ) ;
}
2024-04-10 20:29:18 -04:00
bool CacheServer : : HandleMessage ( const ConnectionInfo & connectionInfo , u8 messageType , BinaryReader & reader , BinaryWriter & writer )
{
2024-05-23 19:16:15 -04:00
if ( messageType ! = CacheMessageType_Connect & & m_isRunningMaintenance )
return m_logger . Error ( TC ( " Can't handle network message %s while running maintenance mode " ) , ToString ( CacheMessageType ( messageType ) ) ) ;
2024-04-10 20:29:18 -04:00
switch ( messageType )
{
case CacheMessageType_Connect :
{
u32 clientVersion = reader . ReadU32 ( ) ;
2024-05-25 16:58:10 -04:00
if ( clientVersion < 3 | | clientVersion > CacheNetworkVersion )
2024-04-10 20:29:18 -04:00
return m_logger . Error ( TC ( " Different network versions. Client: %u, Server: %u. Disconnecting " ) , clientVersion , CacheNetworkVersion ) ;
2024-05-23 19:16:15 -04:00
SCOPED_WRITE_LOCK ( m_connectionsLock , lock ) ;
if ( m_isRunningMaintenance )
{
writer . WriteBool ( false ) ;
writer . WriteString ( TC ( " Running maintenance... " ) ) ;
}
writer . WriteBool ( true ) ;
2024-05-25 18:16:40 -04:00
auto insres = m_connections . try_emplace ( connectionInfo . GetId ( ) ) ;
auto & connection = insres . first - > second ;
connection . clientVersion = clientVersion ;
2024-04-10 20:29:18 -04:00
return true ;
}
case CacheMessageType_StorePathTable :
{
2024-04-25 00:44:38 -04:00
GetConnectionBucket ( connectionInfo , reader ) . pathTable . ReadMem ( reader , false ) ;
2024-04-10 20:29:18 -04:00
return true ;
}
case CacheMessageType_StoreCasTable :
{
2024-04-25 00:44:38 -04:00
GetConnectionBucket ( connectionInfo , reader ) . casKeyTable . ReadMem ( reader , false ) ;
2024-04-10 20:29:18 -04:00
return true ;
}
case CacheMessageType_StoreEntry :
{
2024-07-29 13:48:36 -04:00
u32 clientVersion ;
auto & bucket = GetConnectionBucket ( connectionInfo , reader , & clientVersion ) ;
return HandleStoreEntry ( bucket , reader , writer , clientVersion ) ;
2024-04-10 20:29:18 -04:00
}
case CacheMessageType_StoreEntryDone :
{
2024-04-25 00:44:38 -04:00
auto & connectionBucket = GetConnectionBucket ( connectionInfo , reader ) ;
2024-04-10 20:29:18 -04:00
CasKey cmdKey = reader . ReadCasKey ( ) ;
2024-06-03 14:32:06 -04:00
bool success = true ;
if ( reader . GetLeft ( ) )
success = reader . ReadBool ( ) ;
2024-04-12 18:04:58 -04:00
SCOPED_WRITE_LOCK ( connectionBucket . cacheEntryLookupLock , lock2 ) ;
auto findIt = connectionBucket . cacheEntryLookup . find ( cmdKey ) ;
2024-06-03 14:32:06 -04:00
if ( findIt = = connectionBucket . cacheEntryLookup . end ( ) )
return true ;
auto g = MakeGuard ( [ & ] ( ) { connectionBucket . cacheEntryLookup . erase ( findIt ) ; } ) ;
if ( ! success )
return true ;
2024-04-12 18:04:58 -04:00
2024-06-03 14:32:06 -04:00
u64 id = connectionBucket . id ;
Bucket & bucket = GetBucket ( id ) ;
2024-04-10 20:29:18 -04:00
2024-06-03 14:32:06 -04:00
SCOPED_WRITE_LOCK ( bucket . m_cacheEntryLookupLock , lock3 ) ;
auto insres = bucket . m_cacheEntryLookup . try_emplace ( cmdKey ) ;
auto & cacheEntries = insres . first - > second ;
lock3 . Leave ( ) ;
SCOPED_WRITE_LOCK ( cacheEntries . lock , lock4 ) ;
cacheEntries . entries . emplace_front ( std : : move ( findIt - > second ) ) ;
2024-04-10 20:29:18 -04:00
return true ;
}
case CacheMessageType_FetchPathTable :
return HandleFetchPathTable ( reader , writer ) ;
case CacheMessageType_FetchCasTable :
return HandleFetchCasTable ( reader , writer ) ;
case CacheMessageType_FetchEntries :
2024-05-25 16:58:10 -04:00
{
SCOPED_READ_LOCK ( m_connectionsLock , lock ) ;
u32 clientVersion = m_connections [ connectionInfo . GetId ( ) ] . clientVersion ;
lock . Leave ( ) ;
return HandleFetchEntries ( reader , writer , clientVersion ) ;
}
2024-05-23 19:16:15 -04:00
case CacheMessageType_ExecuteCommand :
return HandleExecuteCommand ( reader , writer ) ;
2024-04-10 20:29:18 -04:00
2024-05-25 16:58:10 -04:00
case CacheMessageType_ReportUsedEntry :
2024-07-29 13:48:36 -04:00
{
SCOPED_READ_LOCK ( m_connectionsLock , lock ) ;
u32 clientVersion = m_connections [ connectionInfo . GetId ( ) ] . clientVersion ;
lock . Leave ( ) ;
return HandleReportUsedEntry ( reader , writer , clientVersion ) ;
}
2024-05-09 15:06:47 -04:00
case CacheMessageType_RequestShutdown :
{
TString reason = reader . ReadString ( ) ;
m_logger . Info ( TC ( " Shutdown requested. Reason: %s " ) , reason . empty ( ) ? TC ( " Unknown " ) : reason . c_str ( ) ) ;
m_shutdownRequested = true ;
writer . WriteBool ( true ) ;
return true ;
}
2024-04-10 20:29:18 -04:00
default :
return false ;
}
}
2024-07-29 13:48:36 -04:00
bool CacheServer : : HandleStoreEntry ( ConnectionBucket & connectionBucket , BinaryReader & reader , BinaryWriter & writer , u32 clientVersion )
2024-04-10 20:29:18 -04:00
{
CasKey cmdKey = reader . ReadCasKey ( ) ;
2024-07-29 13:48:36 -04:00
u64 inputCount = ~ 0u ;
if ( clientVersion > = 5 )
inputCount = reader . Read7BitEncoded ( ) ;
2024-04-10 20:29:18 -04:00
u64 outputCount = reader . Read7BitEncoded ( ) ;
u64 index = 0 ;
Set < u32 > inputs ;
u64 bytesForInput = 0 ;
u64 outputStartOffset = reader . GetPosition ( ) ;
2024-04-25 00:44:38 -04:00
u64 id = connectionBucket . id ;
2024-05-28 18:03:47 -04:00
Bucket & bucket = GetBucket ( id ) ;
2024-04-12 18:04:58 -04:00
2024-04-10 20:29:18 -04:00
while ( reader . GetLeft ( ) )
{
bool isInput = index + + > = outputCount ;
2024-07-29 13:48:36 -04:00
if ( isInput & & ! inputCount - - ) // For client versions under 5 we will hit reader.GetLeft() == false first.
break ;
u32 offset = u32 ( reader . Read7BitEncoded ( ) ) ;
2024-04-10 20:29:18 -04:00
if ( ! isInput )
continue ;
CasKey casKey ;
StringBuffer < > path ;
2024-04-12 18:04:58 -04:00
connectionBucket . casKeyTable . GetPathAndKey ( path , casKey , connectionBucket . pathTable , offset ) ;
2024-04-10 20:29:18 -04:00
UBA_ASSERT ( path . count ) ;
2024-04-12 18:04:58 -04:00
u32 pathOffset = bucket . m_pathTable . Add ( path . data , path . count ) ;
2024-04-12 01:05:04 -04:00
#if 0
StringBuffer < > test ;
2024-04-12 18:04:58 -04:00
bucket . m_pathTable . GetString ( test , pathOffset ) ;
2024-04-12 01:05:04 -04:00
UBA_ASSERT ( test . Equals ( path . data ) ) ;
# endif
2024-04-12 18:04:58 -04:00
u32 casKeyOffset = bucket . m_casKeyTable . Add ( casKey , pathOffset ) ;
2024-04-24 15:26:58 -04:00
auto insres = inputs . insert ( casKeyOffset ) ;
if ( ! insres . second )
{
m_logger . Warning ( TC ( " Input file %s exists more than once in cache entry " ) , path . data ) ;
continue ;
}
2024-04-10 20:29:18 -04:00
bytesForInput + = Get7BitEncodedCount ( casKeyOffset ) ;
2024-04-24 15:26:58 -04:00
//m_logger.Info(TC("%s - %s"), path.data, CasKeyString(casKey).str);
2024-04-10 20:29:18 -04:00
}
2024-07-29 13:48:36 -04:00
// For client versions 5 and over we have log entries after the inputs
Vector < u8 > logLines ;
if ( u64 logLinesSize = reader . GetLeft ( ) )
{
logLines . resize ( logLinesSize ) ;
reader . ReadBytes ( logLines . data ( ) , logLinesSize ) ;
}
2024-04-10 20:29:18 -04:00
Vector < u8 > inputCasKeyOffsets ;
{
inputCasKeyOffsets . resize ( bytesForInput ) ;
BinaryWriter w2 ( inputCasKeyOffsets . data ( ) , 0 , inputCasKeyOffsets . size ( ) ) ;
for ( u32 input : inputs )
w2 . Write7BitEncoded ( input ) ;
}
2024-04-12 18:04:58 -04:00
SCOPED_WRITE_LOCK ( bucket . m_cacheEntryLookupLock , lock ) ;
auto insres = bucket . m_cacheEntryLookup . try_emplace ( cmdKey ) ;
2024-04-10 20:29:18 -04:00
auto & cacheEntries = insres . first - > second ;
lock . Leave ( ) ;
SCOPED_WRITE_LOCK ( cacheEntries . lock , lock2 ) ;
2024-05-25 16:58:10 -04:00
// Create entry based on existing entry
CacheEntry newEntry ;
cacheEntries . BuildInputs ( newEntry , inputs ) ;
2024-04-10 20:29:18 -04:00
List < CacheEntry > : : iterator matchingEntry = cacheEntries . entries . end ( ) ;
for ( auto i = cacheEntries . entries . begin ( ) , e = cacheEntries . entries . end ( ) ; i ! = e ; + + i )
{
2024-05-25 16:58:10 -04:00
if ( i - > sharedInputCasKeyOffsetRanges ! = newEntry . sharedInputCasKeyOffsetRanges | | i - > extraInputCasKeyOffsets ! = newEntry . extraInputCasKeyOffsets )
2024-04-10 20:29:18 -04:00
continue ;
matchingEntry = i ;
break ;
}
// Already exists
if ( matchingEntry ! = cacheEntries . entries . end ( ) )
{
bool shouldOverwrite = false ;
Map < TString , CasKey > existing ;
BinaryReader r2 ( matchingEntry - > outputCasKeyOffsets . data ( ) , 0 , matchingEntry - > outputCasKeyOffsets . size ( ) ) ;
while ( r2 . GetLeft ( ) )
{
u32 existingOffset = u32 ( r2 . Read7BitEncoded ( ) ) ;
CasKey casKey ;
StringBuffer < > path ;
2024-04-12 18:04:58 -04:00
bucket . m_casKeyTable . GetPathAndKey ( path , casKey , bucket . m_pathTable , existingOffset ) ;
2024-04-10 20:29:18 -04:00
existing . try_emplace ( path . data , casKey ) ;
}
reader . SetPosition ( outputStartOffset ) ;
u64 left = outputCount ;
while ( left - - )
{
u32 outputOffset = u32 ( reader . Read7BitEncoded ( ) ) ;
CasKey casKey ;
StringBuffer < > path ;
2024-04-12 18:04:58 -04:00
connectionBucket . casKeyTable . GetPathAndKey ( path , casKey , connectionBucket . pathTable , outputOffset ) ;
2024-04-10 20:29:18 -04:00
auto findIt = existing . find ( path . data ) ;
if ( findIt = = existing . end ( ) )
{
m_logger . Warning ( TC ( " Existing cache entry matches input but does not match output (output file %s did not exist in existing cache entry) " ) , path . data ) ;
cacheEntries . entries . erase ( matchingEntry ) ;
shouldOverwrite = true ;
break ;
}
if ( findIt - > second ! = casKey )
{
2024-05-24 14:04:37 -04:00
//m_logger.Warning(TC("Existing cache entry matches input but does not match output (%s has different caskey)"), path.data);
2024-04-10 20:29:18 -04:00
cacheEntries . entries . erase ( matchingEntry ) ;
shouldOverwrite = true ;
break ;
}
}
if ( ! shouldOverwrite )
return true ;
}
Set < u32 > outputs ;
u64 bytesForOutput = 0 ;
bool hasAllContent = true ;
reader . SetPosition ( outputStartOffset ) ;
u64 left = outputCount ;
while ( left - - )
{
u32 outputOffset = u32 ( reader . Read7BitEncoded ( ) ) ;
CasKey casKey ;
StringBuffer < > path ;
2024-04-12 18:04:58 -04:00
connectionBucket . casKeyTable . GetPathAndKey ( path , casKey , connectionBucket . pathTable , outputOffset ) ;
u32 pathOffset = bucket . m_pathTable . Add ( path . data , path . count ) ;
2024-04-12 01:05:04 -04:00
#if 0
StringBuffer < > test ;
2024-04-12 18:04:58 -04:00
bucket . m_pathTable . GetString ( test , pathOffset ) ;
2024-04-12 01:05:04 -04:00
UBA_ASSERT ( test . Equals ( path . data ) ) ;
# endif
2024-04-12 18:04:58 -04:00
u32 casKeyOffset = bucket . m_casKeyTable . Add ( casKey , pathOffset ) ;
2024-04-10 20:29:18 -04:00
outputs . insert ( casKeyOffset ) ;
bytesForOutput + = Get7BitEncodedCount ( casKeyOffset ) ;
if ( ! m_storage . EnsureCasFile ( casKey , nullptr ) )
{
writer . Write7BitEncoded ( outputOffset ) ;
hasAllContent = false ;
}
}
newEntry . outputCasKeyOffsets . resize ( bytesForOutput ) ;
BinaryWriter w2 ( newEntry . outputCasKeyOffsets . data ( ) , 0 , newEntry . outputCasKeyOffsets . size ( ) ) ;
for ( u32 output : outputs )
w2 . Write7BitEncoded ( output ) ;
2024-05-25 16:58:10 -04:00
newEntry . creationTime = GetSystemTimeAsFileTime ( ) - m_creationTime ;
newEntry . id = cacheEntries . idCounter + + ;
2024-04-10 20:29:18 -04:00
2024-07-29 13:48:36 -04:00
if ( logLines . empty ( ) )
{
newEntry . logLinesType = LogLinesType_Empty ;
}
else if ( cacheEntries . sharedLogLines . empty ( ) & & logLines . size ( ) < 150 ) // If log line is very long it is most likely a warning that will be fixed
{
cacheEntries . sharedLogLines = std : : move ( logLines ) ;
newEntry . logLinesType = LogLinesType_Shared ;
}
else
{
if ( cacheEntries . sharedLogLines = = logLines )
{
newEntry . logLinesType = LogLinesType_Shared ;
}
else
{
newEntry . logLinesType = LogLinesType_Owned ;
newEntry . logLines = std : : move ( logLines ) ;
}
}
2024-04-10 20:29:18 -04:00
// If cache server has all content we can put the new cache entry directly in the lookup.. otherwise we'll have to wait until client has uploaded content
if ( hasAllContent )
{
cacheEntries . entries . emplace_front ( std : : move ( newEntry ) ) ;
}
else
{
2024-04-12 18:04:58 -04:00
SCOPED_WRITE_LOCK ( connectionBucket . cacheEntryLookupLock , lock3 ) ;
bool res = connectionBucket . cacheEntryLookup . try_emplace ( cmdKey , std : : move ( newEntry ) ) . second ;
2024-04-10 20:29:18 -04:00
UBA_ASSERT ( res ) ; ( void ) res ;
}
//m_logger.Info(TC("Added new cache entry (%u inputs and %u outputs)"), u32(inputs.size()), outputCount);
2024-05-28 16:55:11 -04:00
bucket . needsSave = true ;
2024-04-10 20:29:18 -04:00
+ + m_addsSinceMaintenance ;
return true ;
}
bool CacheServer : : HandleFetchPathTable ( BinaryReader & reader , BinaryWriter & writer )
{
2024-04-25 00:44:38 -04:00
Bucket & bucket = GetBucket ( reader ) ;
2024-04-10 20:29:18 -04:00
u32 haveSize = reader . ReadU32 ( ) ;
2024-04-12 18:04:58 -04:00
u32 size = bucket . m_pathTable . GetSize ( ) ;
2024-04-10 20:29:18 -04:00
writer . WriteU32 ( size ) ;
u32 toSend = Min ( u32 ( writer . GetCapacityLeft ( ) ) , size - haveSize ) ;
2024-04-12 18:04:58 -04:00
writer . WriteBytes ( bucket . m_pathTable . GetMemory ( ) + haveSize , toSend ) ;
2024-04-10 20:29:18 -04:00
return true ;
}
bool CacheServer : : HandleFetchCasTable ( BinaryReader & reader , BinaryWriter & writer )
{
2024-04-25 00:44:38 -04:00
Bucket & bucket = GetBucket ( reader ) ;
2024-04-10 20:29:18 -04:00
u32 haveSize = reader . ReadU32 ( ) ;
2024-04-12 18:04:58 -04:00
u32 size = bucket . m_casKeyTable . GetSize ( ) ;
2024-04-10 20:29:18 -04:00
writer . WriteU32 ( size ) ;
u32 toSend = Min ( u32 ( writer . GetCapacityLeft ( ) ) , size - haveSize ) ;
2024-04-12 18:04:58 -04:00
writer . WriteBytes ( bucket . m_casKeyTable . GetMemory ( ) + haveSize , toSend ) ;
2024-04-10 20:29:18 -04:00
return true ;
}
2024-05-25 16:58:10 -04:00
bool CacheServer : : HandleFetchEntries ( BinaryReader & reader , BinaryWriter & writer , u32 clientVersion )
2024-04-10 20:29:18 -04:00
{
2024-04-25 00:44:38 -04:00
Bucket & bucket = GetBucket ( reader ) ;
2024-04-10 20:29:18 -04:00
CasKey cmdKey = reader . ReadCasKey ( ) ;
2024-07-26 16:44:00 -04:00
+ + m_cacheKeyFetchCount ;
2024-05-25 16:58:10 -04:00
SCOPED_READ_LOCK ( bucket . m_cacheEntryLookupLock , lock ) ;
auto findIt = bucket . m_cacheEntryLookup . find ( cmdKey ) ;
if ( findIt = = bucket . m_cacheEntryLookup . end ( ) )
{
writer . WriteU16 ( 0 ) ;
return true ;
}
auto & cacheEntries = findIt - > second ;
lock . Leave ( ) ;
SCOPED_READ_LOCK ( cacheEntries . lock , lock2 ) ;
return cacheEntries . Write ( writer , clientVersion , false ) ;
}
2024-07-29 13:48:36 -04:00
bool CacheServer : : HandleReportUsedEntry ( BinaryReader & reader , BinaryWriter & writer , u32 clientVersion )
2024-05-25 16:58:10 -04:00
{
Bucket & bucket = GetBucket ( reader ) ;
CasKey cmdKey = reader . ReadCasKey ( ) ;
u64 entryId = reader . Read7BitEncoded ( ) ;
2024-04-10 20:29:18 -04:00
2024-07-26 16:44:00 -04:00
+ + m_cacheKeyHitCount ;
2024-04-12 18:04:58 -04:00
SCOPED_READ_LOCK ( bucket . m_cacheEntryLookupLock , lock ) ;
auto findIt = bucket . m_cacheEntryLookup . find ( cmdKey ) ;
if ( findIt = = bucket . m_cacheEntryLookup . end ( ) )
2024-04-10 20:29:18 -04:00
return true ;
auto & cacheEntries = findIt - > second ;
lock . Leave ( ) ;
2024-05-25 16:58:10 -04:00
SCOPED_WRITE_LOCK ( cacheEntries . lock , lock2 ) ;
2024-04-10 20:29:18 -04:00
for ( auto & entry : cacheEntries . entries )
{
2024-05-25 16:58:10 -04:00
if ( entryId ! = entry . id )
continue ;
2024-05-29 01:07:06 -04:00
u64 fileTime = GetSystemTimeAsFileTime ( ) - m_creationTime ;
entry . lastUsedTime = fileTime ;
bucket . lastUsedTime = fileTime ;
2024-07-29 13:48:36 -04:00
if ( clientVersion > = 5 & & entry . logLinesType = = LogLinesType_Owned )
if ( entry . logLines . size ( ) < = writer . GetCapacityLeft ( ) )
writer . WriteBytes ( entry . logLines . data ( ) , entry . logLines . size ( ) ) ;
2024-05-25 16:58:10 -04:00
break ;
2024-04-10 20:29:18 -04:00
}
return true ;
}
2024-05-23 19:16:15 -04:00
bool CacheServer : : HandleExecuteCommand ( BinaryReader & reader , BinaryWriter & writer )
2024-04-10 20:29:18 -04:00
{
2024-05-23 19:16:15 -04:00
StringBuffer < > command ;
reader . ReadString ( command ) ;
2024-04-10 20:29:18 -04:00
2024-05-23 19:16:15 -04:00
StringBuffer < > additionalInfo ;
reader . ReadString ( additionalInfo ) ;
2024-04-10 20:29:18 -04:00
StringBuffer < > tempFile ( m_storage . GetTempPath ( ) ) ;
Guid guid ;
CreateGuid ( guid ) ;
tempFile . Append ( GuidToString ( guid ) . str ) ;
FileAccessor file ( m_logger , tempFile . data ) ;
if ( ! file . CreateWrite ( ) )
return false ;
2024-05-23 19:16:15 -04:00
bool writeSuccess = true ;
auto Write = [ & ] ( const void * data , u64 size ) { writeSuccess & = file . Write ( data , size ) ; } ;
2024-04-10 20:29:18 -04:00
u8 bom [ ] = { 0xEF , 0xBB , 0xBF } ;
Write ( bom , sizeof ( bom ) ) ;
auto writeLine = [ & ] ( const tchar * text )
{
u8 buffer [ 1024 ] ;
BinaryWriter w ( buffer , 0 , sizeof ( buffer ) ) ;
w . WriteUtf8String ( text , TStrlen ( text ) ) ;
w . WriteUtf8String ( TC ( " \n " ) , 1 ) ;
Write ( buffer , w . GetPosition ( ) ) ;
} ;
2024-05-23 19:16:15 -04:00
StringBuffer < > line ;
2024-04-10 20:29:18 -04:00
2024-05-23 19:16:15 -04:00
if ( command . Equals ( TC ( " content " ) ) )
2024-04-10 20:29:18 -04:00
{
2024-05-23 19:16:15 -04:00
writeLine ( TC ( " UbaCache server summary " ) ) ;
2024-04-10 20:29:18 -04:00
2024-05-23 19:16:15 -04:00
StringBufferBase & filterString = additionalInfo ;
u64 now = GetSystemTimeAsFileTime ( ) ;
2024-05-25 16:58:10 -04:00
Vector < u8 > temp ;
2024-05-23 19:16:15 -04:00
SCOPED_READ_LOCK ( m_bucketsLock , bucketsLock ) ;
for ( auto & kv : m_buckets )
2024-04-10 20:29:18 -04:00
{
2024-05-23 19:16:15 -04:00
Bucket & bucket = kv . second ;
SCOPED_READ_LOCK ( bucket . m_cacheEntryLookupLock , lock2 ) ;
2024-04-12 18:04:58 -04:00
2024-05-23 19:16:15 -04:00
for ( auto & kv2 : bucket . m_cacheEntryLookup )
2024-04-12 18:04:58 -04:00
{
2024-05-23 19:16:15 -04:00
CacheEntries & entries = kv2 . second ;
SCOPED_READ_LOCK ( entries . lock , lock3 ) ;
Set < u32 > visibleIndices ;
if ( filterString . count )
{
u32 index = 0 ;
2024-05-25 16:58:10 -04:00
auto findString = [ & ] ( const Vector < u8 > & offsets )
{
BinaryReader reader2 ( offsets . data ( ) , 0 , offsets . size ( ) ) ;
while ( reader2 . GetLeft ( ) )
{
u64 offset = reader2 . Read7BitEncoded ( ) ;
CasKey casKey ;
StringBuffer < > path ;
bucket . m_casKeyTable . GetPathAndKey ( path , casKey , bucket . m_pathTable , offset ) ;
if ( path . Contains ( filterString . data ) )
return true ;
if ( Contains ( CasKeyString ( casKey ) . str , filterString . data ) )
return true ;
}
return false ;
} ;
2024-05-23 19:16:15 -04:00
for ( auto & entry : entries . entries )
{
2024-05-25 16:58:10 -04:00
entries . Flatten ( temp , entry ) ;
if ( findString ( temp ) | | findString ( entry . outputCasKeyOffsets ) )
2024-05-23 19:16:15 -04:00
visibleIndices . insert ( index ) ;
+ + index ;
}
if ( visibleIndices . empty ( ) )
continue ;
}
writeLine ( CasKeyString ( kv2 . first ) . str ) ;
2024-04-12 18:04:58 -04:00
u32 index = 0 ;
for ( auto & entry : entries . entries )
{
2024-05-23 19:16:15 -04:00
if ( ! visibleIndices . empty ( ) & & visibleIndices . find ( index ) = = visibleIndices . end ( ) )
{
+ + index ;
continue ;
}
2024-05-25 22:30:11 -04:00
u64 age = GetFileTimeAsTime ( now - entry . creationTime ) ;
2024-05-23 19:16:15 -04:00
writeLine ( line . Clear ( ) . Appendf ( TC ( " #%u (%s ago) " ) , index , TimeToText ( age , true ) . str ) . data ) ;
auto writeOffsets = [ & ] ( const Vector < u8 > & offsets )
2024-04-12 18:04:58 -04:00
{
BinaryReader reader2 ( offsets . data ( ) , 0 , offsets . size ( ) ) ;
while ( reader2 . GetLeft ( ) )
{
u64 offset = reader2 . Read7BitEncoded ( ) ;
CasKey casKey ;
StringBuffer < > path ;
bucket . m_casKeyTable . GetPathAndKey ( path , casKey , bucket . m_pathTable , offset ) ;
2024-05-23 19:16:15 -04:00
writeLine ( line . Clear ( ) . Appendf ( TC ( " %s - %s " ) , path . data , CasKeyString ( casKey ) . str ) . data ) ;
2024-04-12 18:04:58 -04:00
}
} ;
2024-05-23 19:16:15 -04:00
writeLine ( line . Clear ( ) . Append ( TC ( " Inputs: " ) ) . data ) ;
2024-05-25 16:58:10 -04:00
entries . Flatten ( temp , entry ) ;
writeOffsets ( temp ) ;
2024-05-23 19:16:15 -04:00
writeLine ( line . Clear ( ) . Append ( TC ( " Outputs: " ) ) . data ) ;
writeOffsets ( entry . outputCasKeyOffsets ) ;
2024-04-12 18:04:58 -04:00
+ + index ;
}
2024-04-10 20:29:18 -04:00
}
}
}
2024-05-23 19:16:15 -04:00
else if ( command . Equals ( TC ( " status " ) ) )
{
writeLine ( TC ( " UbaCacheServer status " ) ) ;
2024-05-25 22:30:11 -04:00
writeLine ( line . Clear ( ) . Appendf ( TC ( " CreationTime: %s ago " ) , TimeToText ( GetFileTimeAsTime ( GetSystemTimeAsFileTime ( ) - m_creationTime ) , true ) . str ) . data ) ;
2024-07-26 16:44:00 -04:00
writeLine ( line . Clear ( ) . Appendf ( TC ( " UpTime: %s " ) , TimeToText ( GetTime ( ) - m_bootTime , true ) . str ) . data ) ;
2024-05-23 19:16:15 -04:00
writeLine ( line . Clear ( ) . Appendf ( TC ( " Longest maintenance: %s " ) , TimeToText ( m_longestMaintenance ) . str ) . data ) ;
writeLine ( line . Clear ( ) . Appendf ( TC ( " Buckets: " ) ) . data ) ;
u32 index = 0 ;
2024-04-10 20:29:18 -04:00
2024-05-25 22:30:11 -04:00
{
SCOPED_READ_LOCK ( m_bucketsLock , bucketsLock ) ;
for ( auto & kv : m_buckets )
{
Bucket & bucket = kv . second ;
SCOPED_READ_LOCK ( bucket . m_cacheEntryLookupLock , lock2 ) ;
u64 mostEntries = 0 ;
u64 lastUsed = 0 ;
2024-05-27 11:42:45 -04:00
u64 totalEntryCount = 0 ;
2024-05-25 22:30:11 -04:00
for ( auto & kv2 : bucket . m_cacheEntryLookup )
{
CacheEntries & entries = kv2 . second ;
SCOPED_READ_LOCK ( entries . lock , lock3 ) ;
2024-05-25 22:30:15 -04:00
mostEntries = Max ( mostEntries , u64 ( entries . entries . size ( ) ) ) ;
2024-05-25 22:30:11 -04:00
for ( auto & entry : entries . entries )
lastUsed = Max ( lastUsed , entry . lastUsedTime ) ;
2024-05-27 11:42:45 -04:00
totalEntryCount + = entries . entries . size ( ) ;
2024-05-25 22:30:11 -04:00
}
lock2 . Leave ( ) ;
u64 lastUsedTime = 0 ;
if ( lastUsed )
lastUsedTime = GetFileTimeAsTime ( GetSystemTimeAsFileTime ( ) - ( m_creationTime + lastUsed ) ) ;
writeLine ( line . Clear ( ) . Appendf ( TC ( " #%u - %llu " ) , index + + , kv . first ) . data ) ;
2024-05-27 11:42:45 -04:00
writeLine ( line . Clear ( ) . Appendf ( TC ( " PathTable: %lls (%s) " ) , bucket . m_pathTable . GetPathCount ( ) , BytesToText ( bucket . m_pathTable . GetSize ( ) ) . str ) . data ) ;
writeLine ( line . Clear ( ) . Appendf ( TC ( " CasKeyTable: %llu (%s) " ) , bucket . m_cacheEntryLookup . size ( ) , BytesToText ( bucket . m_casKeyTable . GetSize ( ) ) . str ) . data ) ;
writeLine ( line . Clear ( ) . Appendf ( TC ( " TotalEntries: %llu " ) , totalEntryCount ) . data ) ;
2024-05-25 22:30:11 -04:00
writeLine ( line . Clear ( ) . Appendf ( TC ( " KeyMostEntries: %llu " ) , mostEntries ) . data ) ;
writeLine ( line . Clear ( ) . Appendf ( TC ( " LastEntryUsed: %s ago " ) , TimeToText ( lastUsedTime , true ) . str ) . data ) ;
}
}
2024-05-23 19:16:15 -04:00
u64 totalCasSize = 0 ;
u64 totalCasCount = 0 ;
m_storage . TraverseAllCasFiles ( [ & ] ( const CasKey & casKey , u64 size ) { + + totalCasCount ; totalCasSize + = size ; } ) ;
writeLine ( line . Clear ( ) . Appendf ( TC ( " CasDb: " ) ) . data ) ;
writeLine ( line . Clear ( ) . Appendf ( TC ( " Count: %llu " ) , totalCasCount ) . data ) ;
writeLine ( line . Clear ( ) . Appendf ( TC ( " Size: %s " ) , BytesToText ( totalCasSize ) . str ) . data ) ;
}
2024-05-25 22:30:11 -04:00
else if ( command . Equals ( TC ( " obliterate " ) ) )
{
m_shouldWipe = true ;
m_addsSinceMaintenance = 1 ;
writeLine ( line . Clear ( ) . Appendf ( TC ( " Cache server database obliteration queued! " ) ) . data ) ;
}
2024-05-28 15:06:16 -04:00
else if ( command . Equals ( TC ( " maintenance " ) ) )
{
m_forceAllSteps = true ;
m_addsSinceMaintenance = 1 ;
writeLine ( line . Clear ( ) . Appendf ( TC ( " Cache server maintenance queued! " ) ) . data ) ;
}
2024-05-23 19:16:15 -04:00
else
{
writeLine ( line . Clear ( ) . Appendf ( TC ( " Unknown command: %s " ) , command . data ) . data ) ;
}
Write ( " " , 1 ) ;
if ( ! writeSuccess | | ! file . Close ( ) )
2024-04-10 20:29:18 -04:00
return false ;
CasKey key ;
2024-04-25 13:10:20 -04:00
bool deferCreation = false ;
bool fileIsCompressed = false ;
if ( ! m_storage . StoreCasFile ( key , tempFile . data , CasKeyZero , deferCreation , fileIsCompressed ) )
2024-04-10 20:29:18 -04:00
return false ;
writer . WriteCasKey ( key ) ;
DeleteFileW ( tempFile . data ) ;
return true ;
}
}