2022-05-18 09:58:59 -04:00
// Copyright Epic Games, Inc. All Rights Reserved.
# include "HttpClient.h"
2022-06-10 12:56:08 -04:00
# include "Algo/RemoveIf.h"
2022-06-03 09:12:05 -04:00
# include "Misc/AsciiSet.h"
# include "Misc/ScopeRWLock.h"
2022-05-18 09:58:59 -04:00
# include "Tasks/Task.h"
2022-06-08 09:44:27 -04:00
# if !defined(CURL_NO_OLDIES)
# define CURL_NO_OLDIES
# endif
2022-05-26 18:07:41 -04:00
# if PLATFORM_MICROSOFT
# include "Microsoft/AllowMicrosoftPlatformTypes.h"
# endif
# if defined(PLATFORM_CURL_INCLUDE)
# include PLATFORM_CURL_INCLUDE
# else
# include "curl/curl.h"
2022-06-03 09:12:05 -04:00
# endif // defined(PLATFORM_CURL_INCLUDE)
2022-05-26 18:07:41 -04:00
# if PLATFORM_MICROSOFT
# include "Microsoft/HideMicrosoftPlatformTypes.h"
# endif
2022-05-18 09:58:59 -04:00
namespace UE
{
2022-05-26 18:07:41 -04:00
namespace Http : : Private
{
2022-06-08 17:47:30 -04:00
static constexpr uint32 MaxTotalConnections = 8 ;
2022-06-03 09:12:05 -04:00
static constexpr uint32 WaitIntervalMs = 10 ;
2022-05-26 18:07:41 -04:00
struct FHttpSharedDataInternals
{
CURLSH * CurlShare ;
CURLM * CurlMulti ;
2022-06-03 09:12:05 -04:00
TDepletableMpscQueue < CURL * > PendingRequestAdditions ;
2022-05-26 18:07:41 -04:00
FRWLock Locks [ CURL_LOCK_DATA_LAST ] ;
bool WriteLocked [ CURL_LOCK_DATA_LAST ] { } ;
} ;
struct FHttpSharedDataStatics
{
static void LockFn ( CURL * Handle , curl_lock_data Data , curl_lock_access Access , void * User )
{
FHttpSharedDataInternals * SharedDataInternals = ( ( FHttpSharedData * ) User ) - > Internals . Get ( ) ;
if ( Access = = CURL_LOCK_ACCESS_SHARED )
{
SharedDataInternals - > Locks [ Data ] . ReadLock ( ) ;
}
else
{
SharedDataInternals - > Locks [ Data ] . WriteLock ( ) ;
SharedDataInternals - > WriteLocked [ Data ] = true ;
}
}
static void UnlockFn ( CURL * Handle , curl_lock_data Data , void * User )
{
FHttpSharedDataInternals * SharedDataInternals = ( ( FHttpSharedData * ) User ) - > Internals . Get ( ) ;
if ( ! SharedDataInternals - > WriteLocked [ Data ] )
{
SharedDataInternals - > Locks [ Data ] . ReadUnlock ( ) ;
}
else
{
SharedDataInternals - > WriteLocked [ Data ] = false ;
SharedDataInternals - > Locks [ Data ] . WriteUnlock ( ) ;
}
}
} ;
2022-06-03 09:12:05 -04:00
} // Http::Private
void FHttpAccessToken : : SetToken ( const FStringView Token )
{
FWriteScopeLock WriteLock ( Lock ) ;
const FAnsiStringView Prefix = ANSITEXTVIEW ( " Authorization: Bearer " ) ;
const int32 TokenLen = FPlatformString : : ConvertedLength < ANSICHAR > ( Token . GetData ( ) , Token . Len ( ) ) ;
Header . Empty ( Prefix . Len ( ) + TokenLen ) ;
Header . Append ( Prefix . GetData ( ) , Prefix . Len ( ) ) ;
const int32 TokenIndex = Header . AddUninitialized ( TokenLen ) ;
FPlatformString : : Convert ( Header . GetData ( ) + TokenIndex , TokenLen , Token . GetData ( ) , Token . Len ( ) ) ;
Serial . fetch_add ( 1 , std : : memory_order_relaxed ) ;
2022-05-26 18:07:41 -04:00
}
2022-06-03 09:12:05 -04:00
FAnsiStringBuilderBase & operator < < ( FAnsiStringBuilderBase & Builder , const FHttpAccessToken & Token )
2022-05-18 09:58:59 -04:00
{
2022-06-03 09:12:05 -04:00
FReadScopeLock ReadLock ( Token . Lock ) ;
return Builder . Append ( Token . Header ) ;
2022-05-18 09:58:59 -04:00
}
2022-06-10 12:56:08 -04:00
FHttpSharedData : : FHttpSharedData ( uint32 OverrideMaxConnections )
2022-06-03 09:12:05 -04:00
: PendingRequestEvent ( EEventMode : : AutoReset )
2022-05-18 09:58:59 -04:00
{
2022-05-26 18:07:41 -04:00
Internals = MakePimpl < Http : : Private : : FHttpSharedDataInternals > ( ) ;
Internals - > CurlShare = curl_share_init ( ) ;
curl_share_setopt ( Internals - > CurlShare , CURLSHOPT_USERDATA , this ) ;
curl_share_setopt ( Internals - > CurlShare , CURLSHOPT_LOCKFUNC , Http : : Private : : FHttpSharedDataStatics : : LockFn ) ;
curl_share_setopt ( Internals - > CurlShare , CURLSHOPT_UNLOCKFUNC , Http : : Private : : FHttpSharedDataStatics : : UnlockFn ) ;
curl_share_setopt ( Internals - > CurlShare , CURLSHOPT_SHARE , CURL_LOCK_DATA_DNS ) ;
curl_share_setopt ( Internals - > CurlShare , CURLSHOPT_SHARE , CURL_LOCK_DATA_SSL_SESSION ) ;
Internals - > CurlMulti = curl_multi_init ( ) ;
2022-06-10 12:56:08 -04:00
curl_multi_setopt ( Internals - > CurlMulti , CURLMOPT_MAX_TOTAL_CONNECTIONS , OverrideMaxConnections ? OverrideMaxConnections : Http : : Private : : MaxTotalConnections ) ;
2022-06-20 15:32:20 -04:00
curl_multi_setopt ( Internals - > CurlMulti , CURLMOPT_MAXCONNECTS , OverrideMaxConnections ? OverrideMaxConnections : Http : : Private : : MaxTotalConnections ) ; // Keep the connection pool at exactly the number of total connections
2022-05-26 18:07:41 -04:00
curl_multi_setopt ( Internals - > CurlMulti , CURLMOPT_PIPELINING , CURLPIPE_MULTIPLEX ) ;
2022-05-18 09:58:59 -04:00
}
FHttpSharedData : : ~ FHttpSharedData ( )
{
bAsyncThreadShutdownRequested . store ( true , std : : memory_order_relaxed ) ;
if ( AsyncServiceThread . IsJoinable ( ) )
{
AsyncServiceThread . Join ( ) ;
}
2022-05-26 18:07:41 -04:00
curl_multi_cleanup ( Internals - > CurlMulti ) ;
curl_share_cleanup ( Internals - > CurlShare ) ;
2022-05-18 09:58:59 -04:00
}
2022-05-26 18:07:41 -04:00
void FHttpSharedData : : AddRequest ( void * Curl )
2022-05-18 09:58:59 -04:00
{
2022-05-26 18:07:41 -04:00
if ( Internals - > PendingRequestAdditions . EnqueueAndReturnWasEmpty ( static_cast < CURL * > ( Curl ) ) )
2022-05-18 09:58:59 -04:00
{
PendingRequestEvent . Trigger ( ) ;
}
if ( ! bAsyncThreadStarting . load ( std : : memory_order_relaxed ) & & ! bAsyncThreadStarting . exchange ( true , std : : memory_order_relaxed ) )
{
2022-06-10 12:56:08 -04:00
AsyncServiceThread = FThread ( TEXT ( " HttpService " ) , [ this ] { ProcessAsyncRequests ( ) ; } , 64 * 1024 , TPri_Normal ) ;
2022-05-18 09:58:59 -04:00
}
}
2022-05-26 18:07:41 -04:00
void * FHttpSharedData : : GetCurlShare ( ) const
2022-05-18 09:58:59 -04:00
{
2022-05-26 18:07:41 -04:00
return Internals - > CurlShare ;
2022-05-18 09:58:59 -04:00
}
void FHttpSharedData : : ProcessAsyncRequests ( )
{
int ActiveTransfers = 0 ;
2022-06-10 12:56:08 -04:00
TArray < Tasks : : TTask < void > > PendingCompletionTasks ;
PendingCompletionTasks . Reserve ( 128 ) ;
2022-05-18 09:58:59 -04:00
2022-06-10 12:56:08 -04:00
auto ProcessPendingRequests = [ this , & ActiveTransfers , & PendingCompletionTasks ]
2022-05-18 09:58:59 -04:00
{
int CurrentActiveTransfers = - 1 ;
do
{
2022-05-26 18:07:41 -04:00
Internals - > PendingRequestAdditions . Deplete ( [ this , & ActiveTransfers ] ( CURL * Curl )
2022-06-03 09:12:05 -04:00
{
curl_multi_add_handle ( Internals - > CurlMulti , Curl ) ;
+ + ActiveTransfers ;
} ) ;
2022-05-18 09:58:59 -04:00
2022-05-26 18:07:41 -04:00
curl_multi_perform ( Internals - > CurlMulti , & CurrentActiveTransfers ) ;
2022-05-18 09:58:59 -04:00
if ( CurrentActiveTransfers = = 0 | | ActiveTransfers ! = CurrentActiveTransfers )
{
for ( ; ; )
{
2022-06-03 09:12:05 -04:00
int MsgsStillInQueue = 0 ; // may use that to impose some upper limit we may spend in that loop
2022-05-26 18:07:41 -04:00
CURLMsg * Message = curl_multi_info_read ( Internals - > CurlMulti , & MsgsStillInQueue ) ;
2022-05-18 09:58:59 -04:00
if ( ! Message )
{
break ;
}
// find out which requests have completed
if ( Message - > msg = = CURLMSG_DONE )
{
CURL * CompletedHandle = Message - > easy_handle ;
2022-05-26 18:07:41 -04:00
curl_multi_remove_handle ( Internals - > CurlMulti , CompletedHandle ) ;
2022-05-18 09:58:59 -04:00
void * PrivateData = nullptr ;
curl_easy_getinfo ( CompletedHandle , CURLINFO_PRIVATE , & PrivateData ) ;
FHttpRequest * CompletedRequest = ( FHttpRequest * ) PrivateData ;
if ( CompletedRequest )
{
// It is important that the CompleteAsync call doesn't happen on this thread as it is possible it will block waiting
// for a free HTTP request, and if that happens on this thread, we can deadlock as no HTTP requests will become
// available while this thread is blocked.
2022-06-10 12:56:08 -04:00
PendingCompletionTasks . Emplace ( Tasks : : Launch ( TEXT ( " FHttpRequest::CompleteAsync " ) , [ CompletedRequest , Result = Message - > data . result ] ( ) mutable
2022-05-18 09:58:59 -04:00
{
CompletedRequest - > CompleteAsync ( Result ) ;
2022-06-10 12:56:08 -04:00
} ) ) ;
2022-05-18 09:58:59 -04:00
}
}
}
ActiveTransfers = CurrentActiveTransfers ;
}
if ( CurrentActiveTransfers > 0 )
{
2022-05-26 18:07:41 -04:00
curl_multi_wait ( Internals - > CurlMulti , nullptr , 0 , 1 , nullptr ) ;
2022-05-18 09:58:59 -04:00
}
}
while ( CurrentActiveTransfers > 0 ) ;
} ;
do
{
ProcessPendingRequests ( ) ;
2022-06-10 12:56:08 -04:00
PendingCompletionTasks . SetNum ( Algo : : StableRemoveIf ( PendingCompletionTasks , [ ] ( const Tasks : : TTask < void > & Task ) { return Task . IsCompleted ( ) ; } ) , false ) ;
if ( ! PendingCompletionTasks . IsEmpty ( ) )
{
Tasks : : Wait ( PendingCompletionTasks , FTimespan : : FromMilliseconds ( 100 ) ) ;
}
else
{
PendingRequestEvent . Wait ( 100 ) ;
}
2022-05-18 09:58:59 -04:00
}
while ( ! FHttpSharedData : : bAsyncThreadShutdownRequested . load ( std : : memory_order_relaxed ) ) ;
// Process last requests before shutdown. May want these to be aborted instead.
ProcessPendingRequests ( ) ;
}
2022-06-03 09:12:05 -04:00
FHttpRequestPool : : FHttpRequestPool (
const FStringView InServiceUrl ,
FStringView InEffectiveServiceUrl ,
const FHttpAccessToken * const InAuthorizationToken ,
FHttpSharedData * const InSharedData ,
const uint32 PoolSize ,
const uint32 InOverflowLimit )
: ActiveOverflowRequests ( 0 )
, OverflowLimit ( InOverflowLimit )
2022-05-18 09:58:59 -04:00
{
2022-06-03 09:12:05 -04:00
InEffectiveServiceUrl = FAsciiSet : : TrimSuffixWith ( InEffectiveServiceUrl , " / " ) ;
2022-05-18 09:58:59 -04:00
Pool . AddUninitialized ( PoolSize ) ;
Requests . AddUninitialized ( PoolSize ) ;
2022-06-03 09:12:05 -04:00
for ( int32 Index = 0 ; Index < Pool . Num ( ) ; + + Index )
2022-05-18 09:58:59 -04:00
{
2022-06-03 09:12:05 -04:00
Pool [ Index ] . Usage = 0u ;
Pool [ Index ] . Request = new ( & Requests [ Index ] ) FHttpRequest ( InServiceUrl , InEffectiveServiceUrl , InAuthorizationToken , InSharedData , /*bLogErrors*/ true ) ;
2022-05-18 09:58:59 -04:00
}
InitData = MakeUnique < FInitData > ( InServiceUrl , InEffectiveServiceUrl , InAuthorizationToken , InSharedData ) ;
}
FHttpRequestPool : : ~ FHttpRequestPool ( )
{
check ( ActiveOverflowRequests . load ( ) = = 0 ) ;
2022-06-03 09:12:05 -04:00
for ( const FEntry & Entry : Pool )
2022-05-18 09:58:59 -04:00
{
// No requests should be in use by now.
2022-06-03 09:12:05 -04:00
check ( Entry . Usage . load ( std : : memory_order_acquire ) = = 0u ) ;
2022-05-18 09:58:59 -04:00
}
}
FHttpRequest * FHttpRequestPool : : GetFreeRequest ( bool bUnboundedOverflow )
{
2022-06-03 09:12:05 -04:00
for ( FEntry & Entry : Pool )
2022-05-18 09:58:59 -04:00
{
2022-06-03 09:12:05 -04:00
if ( ! Entry . Usage . load ( std : : memory_order_relaxed ) )
2022-05-18 09:58:59 -04:00
{
uint8 Expected = 0u ;
2022-06-03 09:12:05 -04:00
if ( Entry . Usage . compare_exchange_strong ( Expected , 1u ) )
2022-05-18 09:58:59 -04:00
{
2022-06-03 09:12:05 -04:00
Entry . Request - > Reset ( ) ;
return Entry . Request ;
2022-05-18 09:58:59 -04:00
}
}
}
if ( bUnboundedOverflow | | ( OverflowLimit > 0 ) )
{
// The use of two operations here (load, then fetch_add) implies that we can exceed the overflow limit because the combined operation
// is not atomic. This is acceptable for our use case. If we wanted to enforce the hard limit, we could use a loop instead.
if ( bUnboundedOverflow | | ( ActiveOverflowRequests . load ( std : : memory_order_relaxed ) < OverflowLimit ) )
{
// Create an overflow request (outside of the pre-allocated range of requests)
ActiveOverflowRequests . fetch_add ( 1 , std : : memory_order_relaxed ) ;
2022-06-03 09:12:05 -04:00
return new FHttpRequest ( InitData - > ServiceUrl , InitData - > EffectiveServiceUrl , InitData - > AccessToken , InitData - > SharedData , true ) ;
2022-05-18 09:58:59 -04:00
}
}
return nullptr ;
}
FHttpRequest * FHttpRequestPool : : WaitForFreeRequest ( bool bUnboundedOverflow )
{
2022-06-10 12:56:08 -04:00
TRACE_CPUPROFILER_EVENT_SCOPE ( HttpDDC_WaitForFreeRequest ) ;
2022-05-18 09:58:59 -04:00
FHttpRequest * Request = GetFreeRequest ( bUnboundedOverflow ) ;
if ( Request = = nullptr )
{
// Make it a fair by allowing each thread to register itself in a FIFO
// so that the first thread to start waiting is the first one to get a request.
FWaiter * Waiter = new FWaiter ( this ) ;
Waiter - > AddRef ( ) ; // One ref for the thread that will dequeue
Waiter - > AddRef ( ) ; // One ref for us
Waiters . enqueue ( Waiter ) ;
2022-05-26 18:07:41 -04:00
while ( ! Waiter - > Wait ( Http : : Private : : WaitIntervalMs ) )
2022-05-18 09:58:59 -04:00
{
// While waiting, allow us to check if a race occurred and a request has been freed
// between the time we checked for free requests and the time we queued ourself as a Waiter.
if ( ( Request = GetFreeRequest ( bUnboundedOverflow ) ) ! = nullptr )
{
// We abandon the FWaiter, it will be freed by the next dequeue
// and if it has a request, it will be queued back to the pool.
Waiter - > Release ( ) ;
return Request ;
}
}
Request = Waiter - > Request . exchange ( nullptr ) ;
Request - > Reset ( ) ;
Waiter - > Release ( ) ;
}
check ( Request ) ;
return Request ;
}
void FHttpRequestPool : : ReleaseRequestToPool ( FHttpRequest * Request )
{
if ( ( Request < Requests . GetData ( ) ) | | ( Request > = ( Requests . GetData ( ) + Requests . Num ( ) ) ) )
{
// For overflow requests (outside of the pre-allocated range of requests), just delete it immediately
delete Request ;
ActiveOverflowRequests . fetch_sub ( 1 , std : : memory_order_relaxed ) ;
return ;
}
2022-06-03 09:12:05 -04:00
for ( FEntry & Entry : Pool )
2022-05-18 09:58:59 -04:00
{
2022-06-03 09:12:05 -04:00
if ( Entry . Request = = Request )
2022-05-18 09:58:59 -04:00
{
// If only 1 user is remaining, we can give it to a waiter
// instead of releasing it back to the pool.
2022-06-03 09:12:05 -04:00
if ( Entry . Usage = = 1u )
2022-05-18 09:58:59 -04:00
{
if ( FWaiter * Waiter = Waiters . dequeue ( ) )
{
Waiter - > Request = Request ;
Waiter - > Trigger ( ) ;
Waiter - > Release ( ) ;
return ;
}
}
2022-06-03 09:12:05 -04:00
Entry . Usage - - ;
2022-05-18 09:58:59 -04:00
return ;
}
}
2022-06-03 09:12:05 -04:00
checkNoEntry ( ) ;
2022-05-18 09:58:59 -04:00
}
void FHttpRequestPool : : MakeRequestShared ( FHttpRequest * Request , uint8 Users )
{
2022-06-03 09:12:05 -04:00
// Overflow requests (outside of the pre-allocated range of requests), cannot be made shared
check ( ( Request > = Requests . GetData ( ) ) & & ( Request < ( Requests . GetData ( ) + Requests . Num ( ) ) ) ) ;
2022-05-18 09:58:59 -04:00
check ( Users ! = 0 ) ;
2022-06-03 09:12:05 -04:00
for ( FEntry & Entry : Pool )
2022-05-18 09:58:59 -04:00
{
2022-06-03 09:12:05 -04:00
if ( Entry . Request = = Request )
2022-05-18 09:58:59 -04:00
{
2022-06-03 09:12:05 -04:00
Entry . Usage = Users ;
2022-05-18 09:58:59 -04:00
return ;
}
}
2022-06-03 09:12:05 -04:00
checkNoEntry ( ) ;
2022-05-18 09:58:59 -04:00
}
} // UE