Files
UnrealEngineUWP/Engine/Source/Runtime/PakFile/Private/IPlatformFilePak.cpp
Robert Manuszewski 17538be4f8 Copying //UE4/Dev-Core to //UE4/Dev-Main (Source: //UE4/Dev-Core @ 3151653)
#lockdown Nick.Penwarden

==========================
MAJOR FEATURES + CHANGES
==========================

Change 2975891 on 2016/05/12 by Gil.Gribb

	merged in new async stuff from dev-rendering.

Change 2976695 on 2016/05/13 by Gil.Gribb

	updated precache list

Change 2977030 on 2016/05/13 by Gil.Gribb

	Added time slicing to CreateAsyncPackagesFromQueue, radically reduced the frequency of "precache trimming" and changed a few things in the test rig and logging

Change 2977090 on 2016/05/13 by Gil.Gribb

	Fixed module manager threading and added cmd line param to force async loading thread.

Change 2977292 on 2016/05/13 by Gil.Gribb

	check for thread safety in looking at asset registry

Change 2977296 on 2016/05/13 by Gil.Gribb

	removed some super-expensive check()s from precacher

Change 2978368 on 2016/05/16 by Gil.Gribb

	Move several exposive bools inside of the basic tests inside of FLinkerLoad::Preload, saves a fraction of second.

Change 2978414 on 2016/05/16 by Gil.Gribb

	Added support and testing for unmounting pak files to the pak precacher.

Change 2978446 on 2016/05/16 by Gil.Gribb

	Allow linker listing in non-shipping builds

Change 2978550 on 2016/05/16 by Gil.Gribb

	Allowed some linker spew in non-shipping builds (instead of debug builds). Some tweak to help track down the music.uasset leak.

Change 2979952 on 2016/05/17 by Robert.Manuszewski

	Merging //UE4/Dev-Core @ 2979938 to Dev-UE-30519-LoadTimes

Change 2984927 on 2016/05/20 by Gil.Gribb

	fix a few bugs with an mcp repro

Change 2984951 on 2016/05/20 by Gil.Gribb

	fixed issues with USE_NEW_ASYNC_IO = 0

Change 2985296 on 2016/05/20 by Gil.Gribb

	Fixed several bugs with the MCP boot test

Change 2987956 on 2016/05/24 by Robert.Manuszewski

	Fixing leaked linkers created by blocking load requests during async loading.

Change 2987959 on 2016/05/24 by Joe.Conley

	Enable load timings in block loading also (in addition to async loading).

Change 3017713 on 2016/06/17 by Robert.Manuszewski

	Removing GUseSeekFreeLoading.

Change 3017722 on 2016/06/17 by Robert.Manuszewski

	Renaming LOAD_SeekFree flag to LOAD_Async to better reflect its current purpose.

Change 3017833 on 2016/06/17 by Robert.Manuszewski

	Merging //UE4/Dev-Core to Dev-UE-30519-LoadTimes (//Tasks/Dev-Core/Dev-UE-30519-LoadTimes)

Change 3017840 on 2016/06/17 by Robert.Manuszewski

	Re-doing Dev-Core changes to Delegates 2/2

Change 3022872 on 2016/06/22 by Gil.Gribb

	reorder memory trim and deleting loaders

Change 3059218 on 2016/07/21 by Robert.Manuszewski

	Fixing compilation errors - adding missing load time tracker stats.

Change 3064508 on 2016/07/26 by Robert.Manuszewski

	Removing blocking loading path in cooked builds. LoadPackage will now use the async path.

Change 3066312 on 2016/07/27 by Gil.Gribb

	Event driven loader, first pass

Change 3066785 on 2016/07/27 by Gil.Gribb

	Removed check...searching forward for export fusion can release a node

Change 3068118 on 2016/07/28 by Gil.Gribb

	critical bug fixes for the event driven loader

Change 3068333 on 2016/07/28 by Gil.Gribb

	correctly handle the case where a file is rejected after loading the summary

Change 3069618 on 2016/07/28 by Robert.Manuszewski

	Merging //UE4/Dev-Core to Dev-UE-30519-LoadTimes (//Tasks/Dev-Core/Dev-UE-30519-LoadTimes)

Change 3069901 on 2016/07/29 by Robert.Manuszewski

	Fixing an hang when loading QA-Blueprints level

Change 3070171 on 2016/07/29 by Gil.Gribb

	fixed CDO cyclic dependencies

Change 3075288 on 2016/08/03 by Gil.Gribb

	misc fixes to the event driven loader

Change 3077332 on 2016/08/04 by Robert.Manuszewski

	Fixing checkSlow asserts caused by new loading code not being flagged as IsInAsyncLoadThread() and CreateSynchEvent deprecation warning.

Change 3078113 on 2016/08/04 by Gil.Gribb

	implemented "nicks rule" and undid some previous material and world hacks needed without it.

Change 3079480 on 2016/08/05 by Gil.Gribb

	fixes and tweaks on event driven loader

Change 3080135 on 2016/08/07 by Gil.Gribb

	misc fixes for event driven loader, now with reasonable memory

Change 3083722 on 2016/08/10 by Robert.Manuszewski

	Fixing hangs when async loading packages.

Change 3091747 on 2016/08/17 by Gil.Gribb

	Fix all hitches in streaming load that were regressions.

Change 3093258 on 2016/08/18 by Gil.Gribb

	Fix bug that caused an assert when packages fail to load for certain reasons (like loading an uncooked file).

Change 3095719 on 2016/08/20 by Gil.Gribb

	reenable async loading thread and cleanup and bug fixes

Change 3096350 on 2016/08/22 by Gil.Gribb

	tweak task priorities a bit to minimize precaching memory

Change 3096355 on 2016/08/22 by Gil.Gribb

	add support for precaching for "loose files" in the generic async layer.

Change 3098091 on 2016/08/23 by Gil.Gribb

	Split header into a separate file and disabled a bad optimization in the bulk data.

Change 3099783 on 2016/08/24 by Gil.Gribb

	rework dependency graph to be much, much faster. About half done.

Change 3100995 on 2016/08/25 by Gil.Gribb

	fixed bugs with streaming texture from .uexp and cook time check that should have been runtime only

Change 3101369 on 2016/08/25 by Gil.Gribb

	fixed bug with blueprints in the new loader.

Change 3102793 on 2016/08/26 by Gil.Gribb

	PS4 - fixed small block memcpy to actually be inline

Change 3103785 on 2016/08/27 by Gil.Gribb

	fixed case bug with pak order. devirtualized flinkerload::serialize, made sure -fileopenlog is not heavily skewed

Change 3104884 on 2016/08/29 by Gil.Gribb

	fixed a BP bug and tweaked the -fileopenlog behavior to do leaf assets DFS

Change 3105266 on 2016/08/29 by Ben.Zeigler

	Editor build compilation fix

Change 3105774 on 2016/08/30 by Gil.Gribb

	add checks to locate cases where we try to use something that isn't loaded yet

Change 3107794 on 2016/08/31 by Gil.Gribb

	fixed abug with BP's not loading the parent CDO soon enough

Change 3114278 on 2016/09/06 by Gil.Gribb

	looping loads for paragon load test

Change 3114311 on 2016/09/06 by Ben.Zeigler

	Fix linux compile

Change 3114350 on 2016/09/06 by Ben.Zeigler

	Linux supports fast unaligned int reads

Change 3116169 on 2016/09/07 by Ben.Zeigler

	Force enable separate bulk data cooking when using split cooked files, end-of-exp-file doesn't make sense with the new cook scheme and will crash at runtime

Change 3116538 on 2016/09/07 by Gil.Gribb

	add dependencies for CDO subobjects

Change 3116596 on 2016/09/07 by Ben.Zeigler

	Change crash to warning when trying to load an import to a missing native class, can happen with editor only classes.

Change 3116855 on 2016/09/07 by Ben.Zeigler

	Move cook dialog down a bit so I can cook without constant dialogs popping up

Change 3117452 on 2016/09/08 by Robert.Manuszewski

	Fixing hang when suspending async loading with the async loading thread enabled.

Change 3119255 on 2016/09/09 by Robert.Manuszewski

	Removing texture allocations from PackageFileSummary as they were not used by anything.

Change 3119303 on 2016/09/09 by Gil.Gribb

	Fixed font issue by making all all bulk data either inline or in a ubulk. Added support for compressed packages.

Change 3120324 on 2016/09/09 by Ben.Zeigler

	Fix Cook warnings. Skip transient and client/server only objects when adding dependencies, and mark ShapeComponent BodySetups as properly transient.

Change 3121960 on 2016/09/12 by Ben.Zeigler

	Add RandomizeLoadOrder CVar to randomize the package serial number it uses for sorting async loads

Change 3122635 on 2016/09/13 by Gil.Gribb

	reworked searching disk warning and minor change to the background tasks used for decompression

Change 3122743 on 2016/09/13 by Gil.Gribb

	added some checks around memory accounting

Change 3123395 on 2016/09/13 by Ben.Zeigler

	Enable MallocBinned2 by default on cooked windows builds, similar to how PS4 works.
	Disabled thread pool cache clearing on windows, the threading function it was using is very slow on windows specifically

Change 3124748 on 2016/09/14 by Gil.Gribb

	Store template in import/export table and refer to it for each export to avoid calling GetArchetypeFromRequiredInfo. Minor fix for some NeedLoadForCLient etc stuff on landscape and CDOs. Fix texture streamer minmips stuff.

Change 3125153 on 2016/09/14 by Gil.Gribb

	don't put transient objects in the import map

Change 3126668 on 2016/09/15 by Gil.Gribb

	Fix critical bug with imports not waiting for the corresponding export to serialize. Fixed paragon test rig to run longer looping by flushing the renderer. Made random mode more random.

Change 3126755 on 2016/09/15 by Gil.Gribb

	ooops, test rig fix

Change 3127408 on 2016/09/15 by Ben.Zeigler

	Back out changelist 3123395, restoring windows memory to 4.13 setup

Change 3127409 on 2016/09/15 by Ben.Zeigler

	Remove Memory trim from FlushAsyncLoading, because it gets called much more often in new flow and is slow on some platforms

Change 3127948 on 2016/09/16 by Gil.Gribb

	Added a check() on any attempt to serialize a pointer to something that hasn't been created yet. This will help us find missing dependencies. There is an exception to this related to CDOs.

Change 3128094 on 2016/09/16 by Robert.Manuszewski

	Fixing exports referenced by weak object pointers not being added to the preload dependency list of of the exports that depend on them.

	+ Moved weak object pointer serialization to FArchive operator << to be able to override its behavior when cooking.

Change 3128148 on 2016/09/16 by Robert.Manuszewski

	Gil's mod to how we detect exports with missing dependencies

Change 3129052 on 2016/09/16 by Ben.Zeigler

	Add Missing Serialize helpers for WeakObjectPtrs, fixes crash with replicating weak objects

Change 3129053 on 2016/09/16 by Ben.Zeigler

	Fake integrate CL #3123581 from Dev-Framework, to correctly handle detecting components as editor only even when they have collision. Fixes crashes with blueprint editor only components that depend on native templates

Change 3129630 on 2016/09/17 by Gil.Gribb

	better logging for missing dependencies and properly ifdef'd the CDO primitive comp hack

Change 3130178 on 2016/09/19 by Robert.Manuszewski

	Use the correct macro (COOK_FOR_EVENT_DRIVEN_LOAD instead of USE_NEW_ASYNC_IO) for SavePackage changes from CL #3128094

Change 3130224 on 2016/09/19 by Robert.Manuszewski

	Compile error fix

Change 3130391 on 2016/09/19 by Gil.Gribb

	Add cook time fatal errors, and undid a previous change we don't seem to need relating to editor only CDOs

Change 3130484 on 2016/09/19 by Gil.Gribb

	fixed botched GetArchetypeFromRequiredInfo

Change 3131966 on 2016/09/20 by Robert.Manuszewski

	Making the new event driven loader disabled by default. It's now also configurable via project settings (under Streaming Settings -> Event Driven Loader Enabled).

	Enabled the event driven loader for a few internal projects.

Change 3132035 on 2016/09/20 by Gil.Gribb

	fix dynamic switch on new loader

Change 3132041 on 2016/09/20 by Robert.Manuszewski

	Fix for packages not being saved to disk when cooking with event driven loader disabled.

Change 3132195 on 2016/09/20 by Robert.Manuszewski

	Enabling the event driven loader for Zen

Change 3133870 on 2016/09/21 by Graeme.Thornton

	Config files now enable the event driven loader with the correct cvar name

Change 3135812 on 2016/09/22 by Gil.Gribb

	fixed some bugs with GC during streaming

Change 3136102 on 2016/09/22 by Robert.Manuszewski

	Release GC lock when FlushingAsyncLoading when running GC.

Change 3136633 on 2016/09/22 by Gil.Gribb

	fix bug with linkers finsihing before other things linked their imports

Change 3138002 on 2016/09/23 by Robert.Manuszewski

	Added an assert that will prevent content cooked for the event driven loader to be loaded by game builds that have the EDL disabled.

Change 3138012 on 2016/09/23 by Gil.Gribb

	Improved the fix to prevent packages from finishing before external imports have linked. Async load object libraries.

Change 3138031 on 2016/09/23 by Gil.Gribb

	do not preload obj libs in editor

Change 3139176 on 2016/09/24 by Gil.Gribb

	fixed another bug with an attempt to call GetArchetypeFromRequiredInfo

Change 3139459 on 2016/09/26 by Robert.Manuszewski

	Merging //UE4/Release-4.13 to Dev-LoadTimes (//Tasks/UE4/Dev-LoadTimes)

Change 3139668 on 2016/09/26 by Gil.Gribb

	change some checks to errors on bad bulk data loads

Change 3141127 on 2016/09/27 by Robert.Manuszewski

	Preventing linkers from being detached too early when async loading.

Change 3141129 on 2016/09/27 by Robert.Manuszewski

	Releasing GC Lock before calling post GC callbacks to allow StaticFindObject use in these callbacks

Change 3142048 on 2016/09/27 by Robert.Manuszewski

	Changing async loading code to not close DelayedLinkerClosePackages linkers until the async package that triggered their creation has finished loading.

Change 3143132 on 2016/09/28 by Gil.Gribb

	fixed text render comp, which has some editor only issues. Fixes a runtime crash and adds a cooktime warning.

Change 3143198 on 2016/09/28 by Gil.Gribb

	fixed it so that bogus loads of bulk data are warned but do not crash

Change 3143287 on 2016/09/28 by Robert.Manuszewski

	UBT will now invalidate its makefiles if ini files are newer than the makefile (ini files may contains global build settings).

	+ Android toolchain will add hashed command line values to the action reposnse filenames to actually allow it to detect compiler command line changes when detecting actions to execute

Change 3143344 on 2016/09/28 by Robert.Manuszewski

	Make UAT pass the project filename to UBT when build non-code projects so that UBT can parse all ini files.

Change 3143865 on 2016/09/28 by Gil.Gribb

	iffy fix for the net load assert in paragon, plus a few checks and one bit of code removed that should never be hit in the EDL, but makes no sense

Change 3144683 on 2016/09/29 by Graeme.Thornton

	Minor refactor of pak file non-filename stuff
	 - Don't check for file existing before running through the security delegate
	 - Default behaviour when using new IO is to reject uasset/umap/ubulk/uexp files immediately. Can be disabled by setting EXCLUDE_NONPAK_UE_EXTENSIONS to 0 in project .build.cs

Change 3144745 on 2016/09/29 by Graeme.Thornton

	Orion non-pak file whitelisting is enabled for all cooked game only builds now, rather than just clients

Change 3144780 on 2016/09/29 by Gil.Gribb

	use poison proxy on non-test/shipping builds

Change 3144819 on 2016/09/29 by Gil.Gribb

	added a few asserts and added an improved fix for the net crash

Change 3145414 on 2016/09/29 by Gil.Gribb

	fixed android assert....not sure why I need that block of code.

Change 3146502 on 2016/09/30 by Robert.Manuszewski

	Fix for GPU hang from MarcusW

Change 3146774 on 2016/09/30 by Robert.Manuszewski

	Fixing a crash when constantly streaming levels in and out caused by keeping references to objects (levels) that were requested to be streamed out.

	- Removed FAsyncObjectsReferencer. References will now be owned by FAsyncPackage
	- UGCObjectReferencer is now more thread safe

Change 3148008 on 2016/10/01 by Gil.Gribb

	add additional error for attempting to create an object from a class that needs to be loaded

Change 3148009 on 2016/10/01 by Gil.Gribb

	fix very old threading bug whereby the ASL and GT would attempt to use the same static array

Change 3148222 on 2016/10/02 by Robert.Manuszewski

	Fix for an assert when an FGCObject is removed when purging UObjects

Change 3148229 on 2016/10/02 by Gil.Gribb

	disable assert that was crashing paragon ps4

Change 3148409 on 2016/10/03 by Robert.Manuszewski

	Allow another case for removing FGCObjects while in GC.

Change 3148416 on 2016/10/03 by Robert.Manuszewski

	Merging //UE4/Release-4.13 to Dev-LoadTimes (//Tasks/UE4/Dev-LoadTimes)

Change 3149566 on 2016/10/03 by Ben.Zeigler

	#jira UE-36664 Fix issue where objects loaded during async loading could be added to the wrong package's object list, if a time slice ended at the wrong point

Change 3149913 on 2016/10/04 by Gil.Gribb

	better broadcast

Change 2889560 on 2016/03/02 by Steven.Hutton

	Packages for scheduled tasks.

Change 2889566 on 2016/03/02 by Steven.Hutton

	Remaining nuget packages for hangfire, unity and scheduled tasks.

Change 2980458 on 2016/05/17 by Chris.Wood

	Attempt to fix crash report submission problems from CRP to CR website
	[UE-30257] - Crashreports are sometimes missing file attachments

	Passing crash GUID so that website can easily check for duplicates in future
	Increased request timeout for AddCrash to be longer than website database timeout
	Logging retries for future visibility
	CRP v.1.1.6

Change 3047870 on 2016/07/13 by Steven.Hutton

	Updated CRW to entity framework with repository models. #rb none

Change 3126265 on 2016/09/15 by Steve.Robb

	Fix for TCString::Strspn.

Change 3126266 on 2016/09/15 by Steve.Robb

	Alternative fix for GitHub 2698: Fix one bug : Parsing command "Enable True" is invalid.

	#jira UE-34670

Change 3126268 on 2016/09/15 by Steve.Robb

	UWorld can no longer be extended by users.
	UHT now handles final class declarations.

	#jira UE-35708

Change 3126273 on 2016/09/15 by Steve.Robb

	A further attempt to catch uninitialized pointers supplied to the GC.

	#jira UE-34361

Change 3130042 on 2016/09/19 by Steve.Robb

	Super for USTRUCTs.

	Suggested here: https://udn.unrealengine.com/questions/310461/automatically-typedef-super-for-ustructs.html

Change 3131861 on 2016/09/20 by Steven.Hutton

	Reconciling work for view engine changes #rb none

Change 3131862 on 2016/09/20 by Steve.Robb

	Removal of THasOperatorEquals and THasOperatorNotEquals from Platform.h, which should have happened as part of CL# 3045963.

Change 3131863 on 2016/09/20 by Steven.Hutton

	Adding packages #rb none

Change 3131869 on 2016/09/20 by Steve.Robb

	Improved error message for enum classes with a missing base:

	Error: Missing base specifier for enum class 'EMyEnum' - did you mean ': uint8'?

Change 3132046 on 2016/09/20 by Graeme.Thornton

	Fix for cvar thread access assert in FLandscapeComponentGrassData serialization function
	 - This function can be called from the async thread so access CVarGrassDiscardDataOnLoad with GetValueOnAnyThread() rather than GetValueOnGameThread()

Change 3133201 on 2016/09/20 by Ben.Zeigler

	Reorganize WindowsPlatformMemory and MacPlatformMemory to work like LinuxPlatformMemory where there is an enum to select the allocator, and move some of it up to GenericPlatformMemory
	Add command line options to select malloc at runtime for Windows and Linux, I don't know how Mac options work
	Improve the performance of BroadcastSlow_OnlyUseForSpecialPurposes on windows, but there are cases where it occaisionally stalls for a few seconds waiting for the flush
	Add MallocBinned2 as an option for mac, linux, and windows, but default to off due to some threading issues

Change 3133722 on 2016/09/21 by Graeme.Thornton

	Cooker forces a shader compilation flush when it detects that it has passed the max memory budget

Change 3133756 on 2016/09/21 by Steve.Robb

	Refactor of TrimPrecedingAndTrailing to avoid a call to FString::Mid with a negative count, which is now illegal.

	#jira UE-36163

Change 3134182 on 2016/09/21 by Steve.Robb

	GitHub #1986: Don't show warnings and erros in console twice with UCommandlet::LogToConsole == true

	#jira UE-25915

Change 3134306 on 2016/09/21 by Ben.Zeigler

	Fix it so FMallocBinned2::Trim skips task threads on desktop platforms, they are too slow and don't allocate much memory
	Enable MallocBinned2 as default binned malloc on Windows
	Remove the -Run command line check as it was removed from the old version as well

Change 3135569 on 2016/09/22 by Graeme.Thornton

	Don't create material resources if we are in a build that can never render
	 - Saves a few MB of memory

Change 3135652 on 2016/09/22 by Steve.Robb

	New async-loading-thread-safe IsA implementation.

	#jira UECORE-298

Change 3135692 on 2016/09/22 by Steven.Hutton

	Minor bug fixes to view pages #rb none

Change 3135990 on 2016/09/22 by Robert.Manuszewski

	Adding ENGINE_API to FStripDataFlags sp that it can be used outside of the Engine module.

Change 3136020 on 2016/09/22 by Steve.Robb

	Display a meaningful error and shutdown if Core modules fail to load.

	https://udn.unrealengine.com/questions/312063/mac-unrealheadertool-failing-randomly.html

Change 3136107 on 2016/09/22 by Chris.Wood

	Added S3 file upload to output stage of Crash Report Process (v.1.1.26)
	[UE-35991] - Crash Report Process to write crash files to S3

	Also adds OOM alerts to CRP.
	Also disk space alerts changed to 5% free space and repeat once every 30 minutes instead of 10 minutes.

Change 3137562 on 2016/09/23 by Steve.Robb

	TUniquePtr<T[]> support.

Change 3138030 on 2016/09/23 by Steve.Robb

	Virtual UProperty functions moved out of headers into .cpp files to ease iteration.

Change 3140381 on 2016/09/26 by Chris.Wood

	Disabled uploads via CRRs while leaving services switched on to avoid crashes in some clients.
	[UETOOL-1005] - Turn off CrashReportReceivers

Change 3141150 on 2016/09/27 by Steve.Robb

	Invoke support for TFunction.

Change 3141151 on 2016/09/27 by Steve.Robb

	UBoolProperty now supports hashing and is therefore usable as a TSet element or TMap key.
	FText is now prevented from being a TSet element or TMap key.
	UTextProperty::GetCPPTypeForwardDeclaration implementation moved to the .cpp file.

	#jira UE-36051
	#jira UE-36053

Change 3141440 on 2016/09/27 by Chris.Wood

	Removed legacy queues and unnecessary duplication checks from Crash Report Process (v1.2.0)
	[UE-36246] - CRP scalability: Simplify CRP inputs to DataRouter/S3 only

Change 3142999 on 2016/09/28 by Chris.Wood

	Added dedicated PS4 crash queue to Crash Report Process (v1.2.1)

Change 3144831 on 2016/09/29 by Steve.Robb

	InternalPrecache now flags the archive as in-error so that it can be checked by a caller, rather than popping up a dialog box and asserting.

	#jira https://jira.it.epicgames.net/browse/OPP-6036

Change 3145184 on 2016/09/29 by Robert.Manuszewski

	FScopedCreateImportCounter will now always store the current linker and restore the previous one when it exits.

Change 3148432 on 2016/10/03 by Robert.Manuszewski

	Thread safety fixes for the async log writer + made the async log writer flush its archive more often.

Change 3148661 on 2016/10/03 by Graeme.Thornton

	Fixing merge of IsNonPakFilenameAllowed()
	 - Removed directory search stuff... we pass everything to the delegate now anyway

Change 3149669 on 2016/10/03 by Ben.Zeigler

	Lower verbosity of warnings from deleting native properties. These cases do not cause any problems and are not fixable without resaving the content after it has started warning. I checked Jira history and neither of these warnings has ever found a real bug, but has caused a lot of content to be resaved unnecessarily.

Change 3149670 on 2016/10/03 by Ben.Zeigler

	Merge CL #3149566 from Dev-LoadTimes
	#jira UE-36664 Fix issue where objects loaded during async loading could be added to the wrong package's object list, if a time slice ended at the wrong point

Change 3149835 on 2016/10/04 by Graeme.Thornton

	Thread safety fix for SkyLightComponent
	 - Add to global update list from PostLoad rather than PostInitProperties so that it happens on the game thread, and not the async loading thread (if enabled)

Change 3149836 on 2016/10/04 by Graeme.Thornton

	Thread safety fix for ReflectionCaptureComponent
	 - Add to global update list from PostLoad rather than PostInitProperties so that it happens on the game thread, and not the async loading thread (if enabled)

Change 3149959 on 2016/10/04 by Robert.Manuszewski

	Allow import packages to be missing if they're on the KnownMissingPackages list

Change 3150023 on 2016/10/04 by Steven.Hutton

	Updating jira strings. #rb none

Change 3150050 on 2016/10/04 by Steve.Robb

	MakeShared now returns a TSharedRef (which is implicitly convertible to TSharedPtr) rather than a TSharedPtr (which is not implicitly convertible to TSharedRef), for ease of use and because MakeShared can't return a null pointer anyway.

Change 3150110 on 2016/10/04 by Robert.Manuszewski

	Allow UGCObjectReferencer::AddObjects to happen during BeginDestry and FinishDestroy. It's fine as long as we're not adding new objects during reachability analysis.

Change 3150120 on 2016/10/04 by Gil.Gribb

	fix task graph/binned2 broadcast for PS4

Change 3150195 on 2016/10/04 by Robert.Manuszewski

	Fixing WEX crash
	#jira UE-36801

Change 3150212 on 2016/10/04 by Robert.Manuszewski

	Increasing compiler memory limit to fix CIS errors
	#jira UE-36795

Change 3151583 on 2016/10/05 by Robert.Manuszewski

	Temporarily switching to the old IsA path

	#jria UE-36803

Change 3151642 on 2016/10/05 by Steve.Robb

	Dependency fixes for GameFeedback modules.

Change 3151653 on 2016/10/05 by Robert.Manuszewski

	Maybe fix for crash on the Mac

	#jira UE-36846

[CL 3152539 by Robert Manuszewski in Main branch]
2016-10-05 16:51:01 -04:00

3897 lines
113 KiB
C++

// Copyright 1998-2016 Epic Games, Inc. All Rights Reserved.
#include "PakFilePrivatePCH.h"
#include "IPlatformFilePak.h"
#include "SecureHash.h"
#include "FileManagerGeneric.h"
#include "ModuleManager.h"
#include "IPlatformFileModule.h"
#include "IOBase.h"
#include "BigInt.h"
#include "SignedArchiveReader.h"
#include "PublicKey.inl"
#include "AES.h"
#include "GenericPlatformChunkInstall.h"
#include "AsyncFileHandle.h"
DEFINE_LOG_CATEGORY(LogPakFile);
DEFINE_STAT(STAT_PakFile_Read);
DEFINE_STAT(STAT_PakFile_NumOpenHandles);
#ifndef EXCLUDE_NONPAK_UE_EXTENSIONS
#define EXCLUDE_NONPAK_UE_EXTENSIONS USE_NEW_ASYNC_IO // Use .Build.cs file to disable this if the game relies on accessing loose files
#endif
FFilenameSecurityDelegate& FPakPlatformFile::GetFilenameSecurityDelegate()
{
static FFilenameSecurityDelegate Delegate;
return Delegate;
}
#if USE_NEW_ASYNC_IO && !IS_PROGRAM && !WITH_EDITOR
#define USE_PAK_PRECACHE (1) // you can turn this off to use the async IO stuff without the precache
#else
#define USE_PAK_PRECACHE (0)
#endif
/**
* Precaching
*/
#if USE_PAK_PRECACHE
#include "TaskGraphInterfaces.h"
#define PAK_CACHE_GRANULARITY (64 * 1024)
#define PAK_CACHE_MAX_REQUESTS (8)
#define PAK_CACHE_MAX_PRIORITY_DIFFERENCE_MERGE (AIOP_Normal - AIOP_Precache)
#define PAK_EXTRA_CHECKS DO_CHECK
DECLARE_MEMORY_STAT(TEXT("PakCache Current"), STAT_PakCacheMem, STATGROUP_Memory);
DECLARE_MEMORY_STAT(TEXT("PakCache High Water"), STAT_PakCacheHighWater, STATGROUP_Memory);
static int32 GPakCache_Enable = 1;
static FAutoConsoleVariableRef CVar_Enable(
TEXT("pakcache.Enable"),
GPakCache_Enable,
TEXT("If > 0, then enable the pak cache.")
);
static int32 GPakCache_MaxRequestsToLowerLevel = 2;
static FAutoConsoleVariableRef CVar_MaxRequestsToLowerLevel(
TEXT("pakcache.MaxRequestsToLowerLevel"),
GPakCache_MaxRequestsToLowerLevel,
TEXT("Controls the maximum number of IO requests submitted to the OS filesystem at one time. Limited by PAK_CACHE_MAX_REQUESTS.")
);
static int32 GPakCache_MaxRequestSizeToLowerLevelKB = 1024;
static FAutoConsoleVariableRef CVar_MaxRequestSizeToLowerLevelKB(
TEXT("pakcache.MaxRequestSizeToLowerLevellKB"),
GPakCache_MaxRequestSizeToLowerLevelKB,
TEXT("Controls the maximum size (in KB) of IO requests submitted to the OS filesystem.")
);
//@todoio this won't work with UT etc, need *_API or a real api to work with the pak precacher.
bool GPakCache_AcceptPrecacheRequests = true;
class FPakPrecacher;
typedef uint64 FJoinedOffsetAndPakIndex;
static FORCEINLINE uint16 GetRequestPakIndexLow(FJoinedOffsetAndPakIndex Joined)
{
return uint16((Joined >> 48) & 0xffff);
}
static FORCEINLINE int64 GetRequestOffset(FJoinedOffsetAndPakIndex Joined)
{
return int64(Joined & 0xffffffffffffll);
}
static FORCEINLINE FJoinedOffsetAndPakIndex MakeJoinedRequest(uint16 PakIndex, int64 Offset)
{
check(Offset >= 0);
return (FJoinedOffsetAndPakIndex(PakIndex) << 48) | Offset;
}
enum
{
IntervalTreeInvalidIndex = 0
};
typedef uint32 TIntervalTreeIndex; // this is the arg type of TSparseArray::operator[]
static uint32 GNextSalt = 1;
// This is like TSparseArray, only a bit safer and I needed some restrictions on resizing.
template<class TItem>
class TIntervalTreeAllocator
{
TArray<TItem> Items;
TArray<int32> FreeItems; //@todo make this into a linked list through the existing items
uint32 Salt;
uint32 SaltMask;
public:
TIntervalTreeAllocator()
{
check(GNextSalt < 4);
Salt = (GNextSalt++) << 30;
SaltMask = MAX_uint32 << 30;
verify((Alloc() & ~SaltMask) == IntervalTreeInvalidIndex); // we want this to always have element zero so we can figure out an index from a pointer
}
inline TIntervalTreeIndex Alloc()
{
int32 Result;
if (FreeItems.Num())
{
Result = FreeItems.Pop();
}
else
{
Result = Items.Num();
Items.AddUninitialized();
}
new ((void*)&Items[Result]) TItem();
return Result | Salt;;
}
void EnsureNoRealloc(int32 NeededNewNum)
{
if (FreeItems.Num() + Items.GetSlack() < NeededNewNum)
{
Items.Reserve(Items.Num() + NeededNewNum);
}
}
FORCEINLINE TItem& Get(TIntervalTreeIndex InIndex)
{
TIntervalTreeIndex Index = InIndex & ~SaltMask;
check((InIndex & SaltMask) == Salt && Index != IntervalTreeInvalidIndex && Index >= 0 && Index < (uint32)Items.Num()); //&& !FreeItems.Contains(Index));
return Items[Index];
}
FORCEINLINE void Free(TIntervalTreeIndex InIndex)
{
TIntervalTreeIndex Index = InIndex & ~SaltMask;
check((InIndex & SaltMask) == Salt && Index != IntervalTreeInvalidIndex && Index >= 0 && Index < (uint32)Items.Num()); //&& !FreeItems.Contains(Index));
Items[Index].~TItem();
FreeItems.Push(Index);
if (FreeItems.Num() + 1 == Items.Num())
{
// get rid everything to restore memory coherence
Items.Empty();
FreeItems.Empty();
verify((Alloc() & ~SaltMask) == IntervalTreeInvalidIndex); // we want this to always have element zero so we can figure out an index from a pointer
}
}
FORCEINLINE void CheckIndex(TIntervalTreeIndex InIndex)
{
TIntervalTreeIndex Index = InIndex & ~SaltMask;
check((InIndex & SaltMask) == Salt && Index != IntervalTreeInvalidIndex && Index >= 0 && Index < (uint32)Items.Num()); // && !FreeItems.Contains(Index));
}
};
class FIntervalTreeNode
{
public:
TIntervalTreeIndex LeftChildOrRootOfLeftList;
TIntervalTreeIndex RootOfOnList;
TIntervalTreeIndex RightChildOrRootOfRightList;
FIntervalTreeNode()
: LeftChildOrRootOfLeftList(IntervalTreeInvalidIndex)
, RootOfOnList(IntervalTreeInvalidIndex)
, RightChildOrRootOfRightList(IntervalTreeInvalidIndex)
{
}
~FIntervalTreeNode()
{
check(LeftChildOrRootOfLeftList == IntervalTreeInvalidIndex && RootOfOnList == IntervalTreeInvalidIndex && RightChildOrRootOfRightList == IntervalTreeInvalidIndex); // this routine does not handle recursive destruction
}
};
static TIntervalTreeAllocator<FIntervalTreeNode> GIntervalTreeNodeNodeAllocator;
static FORCEINLINE uint64 HighBit(uint64 x)
{
return x & (1ull << 63);
}
static FORCEINLINE bool IntervalsIntersect(uint64 Min1, uint64 Max1, uint64 Min2, uint64 Max2)
{
return !(Max2 < Min1 || Max1 < Min2);
}
template<typename TItem>
// this routine assume that the pointers remain valid even though we are reallocating
static void AddToIntervalTree_Dangerous(
TIntervalTreeIndex* RootNode,
TIntervalTreeAllocator<TItem>& Allocator,
TIntervalTreeIndex Index,
uint64 MinInterval,
uint64 MaxInterval,
uint32 CurrentShift,
uint32 MaxShift
)
{
while (true)
{
if (*RootNode == IntervalTreeInvalidIndex)
{
*RootNode = GIntervalTreeNodeNodeAllocator.Alloc();
}
int64 MinShifted = HighBit(MinInterval << CurrentShift);
int64 MaxShifted = HighBit(MaxInterval << CurrentShift);
FIntervalTreeNode& Root = GIntervalTreeNodeNodeAllocator.Get(*RootNode);
if (MinShifted == MaxShifted && CurrentShift < MaxShift)
{
CurrentShift++;
RootNode = (!MinShifted) ? &Root.LeftChildOrRootOfLeftList : &Root.RightChildOrRootOfRightList;
}
else
{
TItem& Item = Allocator.Get(Index);
if (MinShifted != MaxShifted) // crosses middle
{
Item.Next = Root.RootOfOnList;
Root.RootOfOnList = Index;
}
else // we are at the leaf
{
if (!MinShifted)
{
Item.Next = Root.LeftChildOrRootOfLeftList;
Root.LeftChildOrRootOfLeftList = Index;
}
else
{
Item.Next = Root.RightChildOrRootOfRightList;
Root.RightChildOrRootOfRightList = Index;
}
}
return;
}
}
}
template<typename TItem>
static void AddToIntervalTree(
TIntervalTreeIndex* RootNode,
TIntervalTreeAllocator<TItem>& Allocator,
TIntervalTreeIndex Index,
uint32 StartShift,
uint32 MaxShift
)
{
GIntervalTreeNodeNodeAllocator.EnsureNoRealloc(1 + MaxShift - StartShift);
TItem& Item = Allocator.Get(Index);
check(Item.Next == IntervalTreeInvalidIndex);
uint64 MinInterval = GetRequestOffset(Item.OffsetAndPakIndex);
uint64 MaxInterval = MinInterval + Item.Size - 1;
AddToIntervalTree_Dangerous(RootNode, Allocator, Index, MinInterval, MaxInterval, StartShift, MaxShift);
}
template<typename TItem>
static FORCEINLINE bool ScanNodeListForRemoval(
TIntervalTreeIndex* Iter,
TIntervalTreeAllocator<TItem>& Allocator,
TIntervalTreeIndex Index,
uint64 MinInterval,
uint64 MaxInterval
)
{
while (*Iter != IntervalTreeInvalidIndex)
{
TItem& Item = Allocator.Get(*Iter);
if (*Iter == Index)
{
*Iter = Item.Next;
Item.Next = IntervalTreeInvalidIndex;
return true;
}
Iter = &Item.Next;
}
return false;
}
template<typename TItem>
static bool RemoveFromIntervalTree(
TIntervalTreeIndex* RootNode,
TIntervalTreeAllocator<TItem>& Allocator,
TIntervalTreeIndex Index,
uint64 MinInterval,
uint64 MaxInterval,
uint32 CurrentShift,
uint32 MaxShift
)
{
bool bResult = false;
if (*RootNode != IntervalTreeInvalidIndex)
{
int64 MinShifted = HighBit(MinInterval << CurrentShift);
int64 MaxShifted = HighBit(MaxInterval << CurrentShift);
FIntervalTreeNode& Root = GIntervalTreeNodeNodeAllocator.Get(*RootNode);
if (!MinShifted && !MaxShifted)
{
if (CurrentShift == MaxShift)
{
bResult = ScanNodeListForRemoval(&Root.LeftChildOrRootOfLeftList, Allocator, Index, MinInterval, MaxInterval);
}
else
{
bResult = RemoveFromIntervalTree(&Root.LeftChildOrRootOfLeftList, Allocator, Index, MinInterval, MaxInterval, CurrentShift + 1, MaxShift);
}
}
else if (!MinShifted && MaxShifted)
{
bResult = ScanNodeListForRemoval(&Root.RootOfOnList, Allocator, Index, MinInterval, MaxInterval);
}
else
{
if (CurrentShift == MaxShift)
{
bResult = ScanNodeListForRemoval(&Root.RightChildOrRootOfRightList, Allocator, Index, MinInterval, MaxInterval);
}
else
{
bResult = RemoveFromIntervalTree(&Root.RightChildOrRootOfRightList, Allocator, Index, MinInterval, MaxInterval, CurrentShift + 1, MaxShift);
}
}
if (bResult)
{
if (Root.LeftChildOrRootOfLeftList == IntervalTreeInvalidIndex && Root.RootOfOnList == IntervalTreeInvalidIndex && Root.RightChildOrRootOfRightList == IntervalTreeInvalidIndex)
{
check(&Root == &GIntervalTreeNodeNodeAllocator.Get(*RootNode));
GIntervalTreeNodeNodeAllocator.Free(*RootNode);
*RootNode = IntervalTreeInvalidIndex;
}
}
}
return bResult;
}
template<typename TItem>
static bool RemoveFromIntervalTree(
TIntervalTreeIndex* RootNode,
TIntervalTreeAllocator<TItem>& Allocator,
TIntervalTreeIndex Index,
uint32 StartShift,
uint32 MaxShift
)
{
TItem& Item = Allocator.Get(Index);
uint64 MinInterval = GetRequestOffset(Item.OffsetAndPakIndex);
uint64 MaxInterval = MinInterval + Item.Size - 1;
return RemoveFromIntervalTree(RootNode, Allocator, Index, MinInterval, MaxInterval, StartShift, MaxShift);
}
template<typename TItem>
static FORCEINLINE void ScanNodeListForRemovalFunc(
TIntervalTreeIndex* Iter,
TIntervalTreeAllocator<TItem>& Allocator,
uint64 MinInterval,
uint64 MaxInterval,
TFunctionRef<bool(TIntervalTreeIndex)> Func
)
{
while (*Iter != IntervalTreeInvalidIndex)
{
TItem& Item = Allocator.Get(*Iter);
uint64 Offset = uint64(GetRequestOffset(Item.OffsetAndPakIndex));
uint64 LastByte = Offset + uint64(Item.Size) - 1;
// save the value and then clear it.
TIntervalTreeIndex NextIndex = Item.Next;
if (IntervalsIntersect(MinInterval, MaxInterval, Offset, LastByte) && Func(*Iter))
{
*Iter = NextIndex; // this may have already be deleted, so cannot rely on the memory block
}
else
{
Iter = &Item.Next;
}
}
}
template<typename TItem>
static void MaybeRemoveOverlappingNodesInIntervalTree(
TIntervalTreeIndex* RootNode,
TIntervalTreeAllocator<TItem>& Allocator,
uint64 MinInterval,
uint64 MaxInterval,
uint64 MinNode,
uint64 MaxNode,
uint32 CurrentShift,
uint32 MaxShift,
TFunctionRef<bool(TIntervalTreeIndex)> Func
)
{
if (*RootNode != IntervalTreeInvalidIndex)
{
int64 MinShifted = HighBit(MinInterval << CurrentShift);
int64 MaxShifted = HighBit(MaxInterval << CurrentShift);
FIntervalTreeNode& Root = GIntervalTreeNodeNodeAllocator.Get(*RootNode);
uint64 Center = (MinNode + MaxNode + 1) >> 1;
//UE_LOG(LogTemp, Warning, TEXT("Exploring Node %X [%d, %d] %d%d interval %llX %llX node interval %llX %llX center %llX "), *RootNode, CurrentShift, MaxShift, !!MinShifted, !!MaxShifted, MinInterval, MaxInterval, MinNode, MaxNode, Center);
if (!MinShifted)
{
if (CurrentShift == MaxShift)
{
//UE_LOG(LogTemp, Warning, TEXT("LeftBottom %X [%d, %d] %d%d"), *RootNode, CurrentShift, MaxShift, !!MinShifted, !!MaxShifted);
ScanNodeListForRemovalFunc(&Root.LeftChildOrRootOfLeftList, Allocator, MinInterval, MaxInterval, Func);
}
else
{
//UE_LOG(LogTemp, Warning, TEXT("LeftRecur %X [%d, %d] %d%d"), *RootNode, CurrentShift, MaxShift, !!MinShifted, !!MaxShifted);
MaybeRemoveOverlappingNodesInIntervalTree(&Root.LeftChildOrRootOfLeftList, Allocator, MinInterval, FMath::Min(MaxInterval, Center - 1), MinNode, Center - 1, CurrentShift + 1, MaxShift, Func);
}
}
//UE_LOG(LogTemp, Warning, TEXT("Center %X [%d, %d] %d%d"), *RootNode, CurrentShift, MaxShift, !!MinShifted, !!MaxShifted);
ScanNodeListForRemovalFunc(&Root.RootOfOnList, Allocator, MinInterval, MaxInterval, Func);
if (MaxShifted)
{
if (CurrentShift == MaxShift)
{
//UE_LOG(LogTemp, Warning, TEXT("RightBottom %X [%d, %d] %d%d"), *RootNode, CurrentShift, MaxShift, !!MinShifted, !!MaxShifted);
ScanNodeListForRemovalFunc(&Root.RightChildOrRootOfRightList, Allocator, MinInterval, MaxInterval, Func);
}
else
{
//UE_LOG(LogTemp, Warning, TEXT("RightRecur %X [%d, %d] %d%d"), *RootNode, CurrentShift, MaxShift, !!MinShifted, !!MaxShifted);
MaybeRemoveOverlappingNodesInIntervalTree(&Root.RightChildOrRootOfRightList, Allocator, FMath::Max(MinInterval, Center), MaxInterval, Center, MaxNode, CurrentShift + 1, MaxShift, Func);
}
}
//UE_LOG(LogTemp, Warning, TEXT("Done Exploring Node %X [%d, %d] %d%d interval %llX %llX node interval %llX %llX center %llX "), *RootNode, CurrentShift, MaxShift, !!MinShifted, !!MaxShifted, MinInterval, MaxInterval, MinNode, MaxNode, Center);
if (Root.LeftChildOrRootOfLeftList == IntervalTreeInvalidIndex && Root.RootOfOnList == IntervalTreeInvalidIndex && Root.RightChildOrRootOfRightList == IntervalTreeInvalidIndex)
{
check(&Root == &GIntervalTreeNodeNodeAllocator.Get(*RootNode));
GIntervalTreeNodeNodeAllocator.Free(*RootNode);
*RootNode = IntervalTreeInvalidIndex;
}
}
}
template<typename TItem>
static FORCEINLINE bool ScanNodeList(
TIntervalTreeIndex Iter,
TIntervalTreeAllocator<TItem>& Allocator,
uint64 MinInterval,
uint64 MaxInterval,
TFunctionRef<bool(TIntervalTreeIndex)> Func
)
{
while (Iter != IntervalTreeInvalidIndex)
{
TItem& Item = Allocator.Get(Iter);
uint64 Offset = uint64(GetRequestOffset(Item.OffsetAndPakIndex));
uint64 LastByte = Offset + uint64(Item.Size) - 1;
if (IntervalsIntersect(MinInterval, MaxInterval, Offset, LastByte))
{
if (!Func(Iter))
{
return false;
}
}
Iter = Item.Next;
}
return true;
}
template<typename TItem>
static bool OverlappingNodesInIntervalTree(
TIntervalTreeIndex RootNode,
TIntervalTreeAllocator<TItem>& Allocator,
uint64 MinInterval,
uint64 MaxInterval,
uint64 MinNode,
uint64 MaxNode,
uint32 CurrentShift,
uint32 MaxShift,
TFunctionRef<bool(TIntervalTreeIndex)> Func
)
{
if (RootNode != IntervalTreeInvalidIndex)
{
int64 MinShifted = HighBit(MinInterval << CurrentShift);
int64 MaxShifted = HighBit(MaxInterval << CurrentShift);
FIntervalTreeNode& Root = GIntervalTreeNodeNodeAllocator.Get(RootNode);
uint64 Center = (MinNode + MaxNode + 1) >> 1;
if (!MinShifted)
{
if (CurrentShift == MaxShift)
{
if (!ScanNodeList(Root.LeftChildOrRootOfLeftList, Allocator, MinInterval, MaxInterval, Func))
{
return false;
}
}
else
{
if (!OverlappingNodesInIntervalTree(Root.LeftChildOrRootOfLeftList, Allocator, MinInterval, FMath::Min(MaxInterval, Center - 1), MinNode, Center - 1, CurrentShift + 1, MaxShift, Func))
{
return false;
}
}
}
if (!ScanNodeList(Root.RootOfOnList, Allocator, MinInterval, MaxInterval, Func))
{
return false;
}
if (MaxShifted)
{
if (CurrentShift == MaxShift)
{
if (!ScanNodeList(Root.RightChildOrRootOfRightList, Allocator, MinInterval, MaxInterval, Func))
{
return false;
}
}
else
{
if (!OverlappingNodesInIntervalTree(Root.RightChildOrRootOfRightList, Allocator, FMath::Max(MinInterval, Center), MaxInterval, Center, MaxNode, CurrentShift + 1, MaxShift, Func))
{
return false;
}
}
}
}
return true;
}
template<typename TItem>
static bool ScanNodeListWithShrinkingInterval(
TIntervalTreeIndex Iter,
TIntervalTreeAllocator<TItem>& Allocator,
uint64 MinInterval,
uint64& MaxInterval,
TFunctionRef<bool(TIntervalTreeIndex)> Func
)
{
while (Iter != IntervalTreeInvalidIndex)
{
TItem& Item = Allocator.Get(Iter);
uint64 Offset = uint64(GetRequestOffset(Item.OffsetAndPakIndex));
uint64 LastByte = Offset + uint64(Item.Size) - 1;
//UE_LOG(LogTemp, Warning, TEXT("Test Overlap %llu %llu %llu %llu"), MinInterval, MaxInterval, Offset, LastByte);
if (IntervalsIntersect(MinInterval, MaxInterval, Offset, LastByte))
{
//UE_LOG(LogTemp, Warning, TEXT("Overlap %llu %llu %llu %llu"), MinInterval, MaxInterval, Offset, LastByte);
if (!Func(Iter))
{
return false;
}
}
Iter = Item.Next;
}
return true;
}
template<typename TItem>
static bool OverlappingNodesInIntervalTreeWithShrinkingInterval(
TIntervalTreeIndex RootNode,
TIntervalTreeAllocator<TItem>& Allocator,
uint64 MinInterval,
uint64& MaxInterval,
uint64 MinNode,
uint64 MaxNode,
uint32 CurrentShift,
uint32 MaxShift,
TFunctionRef<bool(TIntervalTreeIndex)> Func
)
{
if (RootNode != IntervalTreeInvalidIndex)
{
int64 MinShifted = HighBit(MinInterval << CurrentShift);
int64 MaxShifted = HighBit(FMath::Min(MaxInterval, MaxNode) << CurrentShift); // since MaxInterval is changing, we cannot clamp it during recursion.
FIntervalTreeNode& Root = GIntervalTreeNodeNodeAllocator.Get(RootNode);
uint64 Center = (MinNode + MaxNode + 1) >> 1;
if (!MinShifted)
{
if (CurrentShift == MaxShift)
{
if (!ScanNodeListWithShrinkingInterval(Root.LeftChildOrRootOfLeftList, Allocator, MinInterval, MaxInterval, Func))
{
return false;
}
}
else
{
if (!OverlappingNodesInIntervalTreeWithShrinkingInterval(Root.LeftChildOrRootOfLeftList, Allocator, MinInterval, MaxInterval, MinNode, Center - 1, CurrentShift + 1, MaxShift, Func)) // since MaxInterval is changing, we cannot clamp it during recursion.
{
return false;
}
}
}
if (!ScanNodeListWithShrinkingInterval(Root.RootOfOnList, Allocator, MinInterval, MaxInterval, Func))
{
return false;
}
MaxShifted = HighBit(FMath::Min(MaxInterval, MaxNode) << CurrentShift); // since MaxInterval is changing, we cannot clamp it during recursion.
if (MaxShifted)
{
if (CurrentShift == MaxShift)
{
if (!ScanNodeListWithShrinkingInterval(Root.RightChildOrRootOfRightList, Allocator, MinInterval, MaxInterval, Func))
{
return false;
}
}
else
{
if (!OverlappingNodesInIntervalTreeWithShrinkingInterval(Root.RightChildOrRootOfRightList, Allocator, FMath::Max(MinInterval, Center), MaxInterval, Center, MaxNode, CurrentShift + 1, MaxShift, Func))
{
return false;
}
}
}
}
return true;
}
template<typename TItem>
static void MaskInterval(
TIntervalTreeIndex Index,
TIntervalTreeAllocator<TItem>& Allocator,
uint64 MinInterval,
uint64 MaxInterval,
uint32 BytesToBitsShift,
uint64* Bits
)
{
TItem& Item = Allocator.Get(Index);
uint64 Offset = uint64(GetRequestOffset(Item.OffsetAndPakIndex));
uint64 LastByte = Offset + uint64(Item.Size) - 1;
uint64 InterMinInterval = FMath::Max(MinInterval, Offset);
uint64 InterMaxInterval = FMath::Min(MaxInterval, LastByte);
if (InterMinInterval <= InterMaxInterval)
{
uint32 FirstBit = uint32((InterMinInterval - MinInterval) >> BytesToBitsShift);
uint32 LastBit = uint32((InterMaxInterval - MinInterval) >> BytesToBitsShift);
uint32 FirstQWord = FirstBit >> 6;
uint32 LastQWord = LastBit >> 6;
uint32 FirstBitQWord = FirstBit & 63;
uint32 LastBitQWord = LastBit & 63;
if (FirstQWord == LastQWord)
{
Bits[FirstQWord] |= ((MAX_uint64 << FirstBitQWord) & (MAX_uint64 >> (63 - LastBitQWord)));
}
else
{
Bits[FirstQWord] |= (MAX_uint64 << FirstBitQWord);
for (uint32 QWordIndex = FirstQWord + 1; QWordIndex < LastQWord; QWordIndex++)
{
Bits[QWordIndex] = MAX_uint64;
}
Bits[LastQWord] |= (MAX_uint64 >> (63 - LastBitQWord));
}
}
}
template<typename TItem>
static void OverlappingNodesInIntervalTreeMask(
TIntervalTreeIndex RootNode,
TIntervalTreeAllocator<TItem>& Allocator,
uint64 MinInterval,
uint64 MaxInterval,
uint64 MinNode,
uint64 MaxNode,
uint32 CurrentShift,
uint32 MaxShift,
uint32 BytesToBitsShift,
uint64* Bits
)
{
OverlappingNodesInIntervalTree(
RootNode,
Allocator,
MinInterval,
MaxInterval,
MinNode,
MaxNode,
CurrentShift,
MaxShift,
[&Allocator, MinInterval, MaxInterval, BytesToBitsShift, Bits](TIntervalTreeIndex Index) -> bool
{
MaskInterval(Index, Allocator, MinInterval, MaxInterval, BytesToBitsShift, Bits);
return true;
}
);
}
class IPakRequestor
{
friend class FPakPrecacher;
FJoinedOffsetAndPakIndex OffsetAndPakIndex; // this is used for searching and filled in when you make the request
uint64 UniqueID;
TIntervalTreeIndex InRequestIndex;
public:
IPakRequestor()
: OffsetAndPakIndex(MAX_uint64) // invalid value
, UniqueID(0)
, InRequestIndex(IntervalTreeInvalidIndex)
{
}
virtual ~IPakRequestor()
{
}
virtual void RequestIsComplete()
{
}
};
static FPakPrecacher* PakPrecacherSingleton = nullptr;
class FPakPrecacher
{
enum class EInRequestStatus
{
Complete,
Waiting,
InFlight,
Num
};
enum class EBlockStatus
{
InFlight,
Complete,
Num
};
IPlatformFile* LowerLevel;
FCriticalSection CachedFilesScopeLock;
FJoinedOffsetAndPakIndex LastReadRequest;
uint64 NextUniqueID;
int64 BlockMemory;
int64 BlockMemoryHighWater;
struct FCacheBlock
{
FJoinedOffsetAndPakIndex OffsetAndPakIndex;
int64 Size;
uint8 *Memory;
uint32 InRequestRefCount;
TIntervalTreeIndex Index;
TIntervalTreeIndex Next;
EBlockStatus Status;
FCacheBlock()
: OffsetAndPakIndex(0)
, Size(0)
, Memory(nullptr)
, InRequestRefCount(0)
, Index(IntervalTreeInvalidIndex)
, Next(IntervalTreeInvalidIndex)
, Status(EBlockStatus::InFlight)
{
}
};
struct FPakInRequest
{
FJoinedOffsetAndPakIndex OffsetAndPakIndex;
int64 Size;
IPakRequestor* Owner;
uint64 UniqueID;
TIntervalTreeIndex Index;
TIntervalTreeIndex Next;
EAsyncIOPriority Priority;
EInRequestStatus Status;
FPakInRequest()
: OffsetAndPakIndex(0)
, Size(0)
, Owner(nullptr)
, UniqueID(0)
, Index(IntervalTreeInvalidIndex)
, Next(IntervalTreeInvalidIndex)
, Priority(AIOP_MIN)
, Status(EInRequestStatus::Waiting)
{
}
};
struct FPakData
{
IAsyncReadFileHandle* Handle;
int64 TotalSize;
uint64 MaxNode;
uint32 StartShift;
uint32 MaxShift;
uint32 BytesToBitsShift;
FName Name;
TIntervalTreeIndex InRequests[AIOP_NUM][(int32)EInRequestStatus::Num];
TIntervalTreeIndex CacheBlocks[(int32)EBlockStatus::Num];
FPakData(IAsyncReadFileHandle* InHandle, FName InName, int64 InTotalSize)
: Handle(InHandle)
, TotalSize(InTotalSize)
, StartShift(0)
, MaxShift(0)
, BytesToBitsShift(0)
, Name(InName)
{
check(Handle && TotalSize > 0 && Name != NAME_None);
for (int32 Index = 0; Index < AIOP_NUM; Index++)
{
for (int32 IndexInner = 0; IndexInner < (int32)EInRequestStatus::Num; IndexInner++)
{
InRequests[Index][IndexInner] = IntervalTreeInvalidIndex;
}
}
for (int32 IndexInner = 0; IndexInner < (int32)EBlockStatus::Num; IndexInner++)
{
CacheBlocks[IndexInner] = IntervalTreeInvalidIndex;
}
uint64 StartingLastByte = FMath::Max((uint64)TotalSize, (uint64)PAK_CACHE_GRANULARITY);
StartingLastByte--;
{
uint64 LastByte = StartingLastByte;
while (!HighBit(LastByte))
{
LastByte <<= 1;
StartShift++;
}
}
{
uint64 LastByte = StartingLastByte;
uint64 Block = (uint64)PAK_CACHE_GRANULARITY;
while (Block)
{
Block >>= 1;
LastByte >>= 1;
BytesToBitsShift++;
}
BytesToBitsShift--;
check(1 << BytesToBitsShift == PAK_CACHE_GRANULARITY);
MaxShift = StartShift;
while (LastByte)
{
LastByte >>= 1;
MaxShift++;
}
MaxNode = MAX_uint64 >> StartShift;
check(MaxNode >= StartingLastByte && (MaxNode >> 1) < StartingLastByte);
//UE_LOG(LogTemp, Warning, TEXT("Test %d %llX %llX "), MaxShift, (uint64(PAK_CACHE_GRANULARITY) << (MaxShift + 1)), (uint64(PAK_CACHE_GRANULARITY) << MaxShift));
check(MaxShift && (uint64(PAK_CACHE_GRANULARITY) << (MaxShift + 1)) == 0 && (uint64(PAK_CACHE_GRANULARITY) << MaxShift) != 0);
}
}
};
TMap<FName, uint16> CachedPaks;
TArray<FPakData> CachedPakData;
TIntervalTreeAllocator<FPakInRequest> InRequestAllocator;
TIntervalTreeAllocator<FCacheBlock> CacheBlockAllocator;
TMap<uint64, TIntervalTreeIndex> OutstandingRequests;
struct FRequestToLower
{
IAsyncReadRequest* RequestHandle;
TIntervalTreeIndex BlockIndex;
FRequestToLower()
: RequestHandle(nullptr)
, BlockIndex(IntervalTreeInvalidIndex)
{
}
};
FRequestToLower RequestsToLower[PAK_CACHE_MAX_REQUESTS];
TArray<IAsyncReadRequest*> RequestsToDelete;
int32 NotifyRecursion;
FAsyncFileCallBack CallbackFromLower;
uint32 Loads;
uint32 Frees;
uint64 LoadSize;
public:
static void Init(IPlatformFile* InLowerLevel)
{
if (!PakPrecacherSingleton)
{
verify(!FPlatformAtomics::InterlockedCompareExchangePointer((void**)&PakPrecacherSingleton, new FPakPrecacher(InLowerLevel), nullptr));
}
check(PakPrecacherSingleton);
}
static void Shutdown()
{
if (PakPrecacherSingleton)
{
FPakPrecacher* LocalPakPrecacherSingleton = PakPrecacherSingleton;
if (LocalPakPrecacherSingleton && LocalPakPrecacherSingleton == FPlatformAtomics::InterlockedCompareExchangePointer((void**)&PakPrecacherSingleton, nullptr, LocalPakPrecacherSingleton))
{
double StartTime = FPlatformTime::Seconds();
while (!LocalPakPrecacherSingleton->IsProbablyIdle())
{
FPlatformProcess::SleepNoStats(0.001f);
if (FPlatformTime::Seconds() - StartTime > 10.0)
{
UE_LOG(LogPakFile, Error, TEXT("FPakPrecacher was not idle after 10s, exiting anyway and leaking."));
return;
}
}
delete PakPrecacherSingleton;
}
}
check(!PakPrecacherSingleton);
}
static FPakPrecacher& Get()
{
check(PakPrecacherSingleton);
return *PakPrecacherSingleton;
}
FPakPrecacher(IPlatformFile* InLowerLevel)
: LowerLevel(InLowerLevel)
, LastReadRequest(0)
, NextUniqueID(1)
, BlockMemory(0)
, BlockMemoryHighWater(0)
, NotifyRecursion(0)
, Loads(0)
, Frees(0)
, LoadSize(0)
{
check(LowerLevel && FPlatformProcess::SupportsMultithreading());
GPakCache_MaxRequestsToLowerLevel = FMath::Max(FMath::Min(FPlatformMisc::NumberOfIOWorkerThreadsToSpawn(), GPakCache_MaxRequestsToLowerLevel), 1);
check(GPakCache_MaxRequestsToLowerLevel <= PAK_CACHE_MAX_REQUESTS);
CallbackFromLower =
[this](bool bWasCanceled, IAsyncReadRequest* Request)
{
NewRequestsToLowerComplete(bWasCanceled, Request);
};
}
IPlatformFile* GetLowerLevelHandle()
{
check(LowerLevel);
return LowerLevel;
}
bool HasEnoughRoomForPrecache()
{
return GPakCache_AcceptPrecacheRequests;
}
private: // below here we assume CachedFilesScopeLock until we get to the next section
uint16 GetRequestPakIndex(FJoinedOffsetAndPakIndex OffsetAndPakIndex)
{
uint16 Result = GetRequestPakIndexLow(OffsetAndPakIndex);
check(Result < CachedPakData.Num());
return Result;
}
FJoinedOffsetAndPakIndex FirstUnfilledBlockForRequest(TIntervalTreeIndex NewIndex, FJoinedOffsetAndPakIndex ReadHead = 0)
{
// CachedFilesScopeLock is locked
FPakInRequest& Request = InRequestAllocator.Get(NewIndex);
uint16 PakIndex = GetRequestPakIndex(Request.OffsetAndPakIndex);
int64 Offset = GetRequestOffset(Request.OffsetAndPakIndex);
int64 Size = Request.Size;
FPakData& Pak = CachedPakData[PakIndex];
check(Offset + Request.Size <= Pak.TotalSize && Size > 0 && Request.Priority >= AIOP_MIN && Request.Priority <= AIOP_MAX && Request.Status != EInRequestStatus::Complete && Request.Owner);
if (PakIndex != GetRequestPakIndex(ReadHead))
{
// this is in a different pak, so we ignore the read head position
ReadHead = 0;
}
if (ReadHead)
{
// trim to the right of the read head
int64 Trim = FMath::Max(Offset, GetRequestOffset(ReadHead)) - Offset;
Offset += Trim;
Size -= Trim;
}
static TArray<uint64> InFlightOrDone;
int64 FirstByte = AlignDown(Offset, PAK_CACHE_GRANULARITY);
int64 LastByte = Align(Offset + Size, PAK_CACHE_GRANULARITY) - 1;
uint32 NumBits = (PAK_CACHE_GRANULARITY + LastByte - FirstByte) / PAK_CACHE_GRANULARITY;
uint32 NumQWords = (NumBits + 63) >> 6;
InFlightOrDone.Reset();
InFlightOrDone.AddZeroed(NumQWords);
if (NumBits != NumQWords * 64)
{
uint32 Extras = NumQWords * 64 - NumBits;
InFlightOrDone[NumQWords - 1] = (MAX_uint64 << (64 - Extras));
}
if (Pak.CacheBlocks[(int32)EBlockStatus::Complete] != IntervalTreeInvalidIndex)
{
OverlappingNodesInIntervalTreeMask<FCacheBlock>(
Pak.CacheBlocks[(int32)EBlockStatus::Complete],
CacheBlockAllocator,
FirstByte,
LastByte,
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
Pak.BytesToBitsShift,
&InFlightOrDone[0]
);
}
if (Request.Status == EInRequestStatus::Waiting && Pak.CacheBlocks[(int32)EBlockStatus::InFlight] != IntervalTreeInvalidIndex)
{
OverlappingNodesInIntervalTreeMask<FCacheBlock>(
Pak.CacheBlocks[(int32)EBlockStatus::InFlight],
CacheBlockAllocator,
FirstByte,
LastByte,
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
Pak.BytesToBitsShift,
&InFlightOrDone[0]
);
}
for (uint32 Index = 0; Index < NumQWords; Index++)
{
if (InFlightOrDone[Index] != MAX_uint64)
{
uint64 Mask = InFlightOrDone[Index];
int64 FinalOffset = FirstByte + PAK_CACHE_GRANULARITY * 64 * Index;
while (Mask & 1)
{
FinalOffset += PAK_CACHE_GRANULARITY;
Mask >>= 1;
}
return MakeJoinedRequest(PakIndex, FinalOffset);
}
}
return MAX_uint64;
}
bool AddRequest(TIntervalTreeIndex NewIndex)
{
// CachedFilesScopeLock is locked
FPakInRequest& Request = InRequestAllocator.Get(NewIndex);
uint16 PakIndex = GetRequestPakIndex(Request.OffsetAndPakIndex);
int64 Offset = GetRequestOffset(Request.OffsetAndPakIndex);
FPakData& Pak = CachedPakData[PakIndex];
check(Offset + Request.Size <= Pak.TotalSize && Request.Size > 0 && Request.Priority >= AIOP_MIN && Request.Priority <= AIOP_MAX && Request.Status == EInRequestStatus::Waiting && Request.Owner);
static TArray<uint64> InFlightOrDone;
int64 FirstByte = AlignDown(Offset, PAK_CACHE_GRANULARITY);
int64 LastByte = Align(Offset + Request.Size, PAK_CACHE_GRANULARITY) - 1;
uint32 NumBits = (PAK_CACHE_GRANULARITY + LastByte - FirstByte) / PAK_CACHE_GRANULARITY;
uint32 NumQWords = (NumBits + 63) >> 6;
InFlightOrDone.Reset();
InFlightOrDone.AddZeroed(NumQWords);
if (NumBits != NumQWords * 64)
{
uint32 Extras = NumQWords * 64 - NumBits;
InFlightOrDone[NumQWords - 1] = (MAX_uint64 << (64 - Extras));
}
if (Pak.CacheBlocks[(int32)EBlockStatus::Complete] != IntervalTreeInvalidIndex)
{
Request.Status = EInRequestStatus::Complete;
OverlappingNodesInIntervalTree<FCacheBlock>(
Pak.CacheBlocks[(int32)EBlockStatus::Complete],
CacheBlockAllocator,
FirstByte,
LastByte,
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
[this, &Pak, FirstByte, LastByte](TIntervalTreeIndex Index) -> bool
{
CacheBlockAllocator.Get(Index).InRequestRefCount++;
MaskInterval(Index, CacheBlockAllocator, FirstByte, LastByte, Pak.BytesToBitsShift, &InFlightOrDone[0]);
return true;
}
);
for (uint32 Index = 0; Index < NumQWords; Index++)
{
if (InFlightOrDone[Index] != MAX_uint64)
{
Request.Status = EInRequestStatus::Waiting;
break;
}
}
}
if (Request.Status == EInRequestStatus::Waiting)
{
if (Pak.CacheBlocks[(int32)EBlockStatus::InFlight] != IntervalTreeInvalidIndex)
{
Request.Status = EInRequestStatus::InFlight;
OverlappingNodesInIntervalTree<FCacheBlock>(
Pak.CacheBlocks[(int32)EBlockStatus::InFlight],
CacheBlockAllocator,
FirstByte,
LastByte,
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
[this, &Pak, FirstByte, LastByte](TIntervalTreeIndex Index) -> bool
{
CacheBlockAllocator.Get(Index).InRequestRefCount++;
MaskInterval(Index, CacheBlockAllocator, FirstByte, LastByte, Pak.BytesToBitsShift, &InFlightOrDone[0]);
return true;
}
);
for (uint32 Index = 0; Index < NumQWords; Index++)
{
if (InFlightOrDone[Index] != MAX_uint64)
{
Request.Status = EInRequestStatus::Waiting;
break;
}
}
}
}
else
{
#if PAK_EXTRA_CHECKS
OverlappingNodesInIntervalTree<FCacheBlock>(
Pak.CacheBlocks[(int32)EBlockStatus::InFlight],
CacheBlockAllocator,
FirstByte,
LastByte,
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
[this, &Pak, FirstByte, LastByte](TIntervalTreeIndex Index) -> bool
{
check(0); // if we are complete, then how come there are overlapping in flight blocks?
return true;
}
);
#endif
}
{
AddToIntervalTree<FPakInRequest>(
&Pak.InRequests[Request.Priority][(int32)Request.Status],
InRequestAllocator,
NewIndex,
Pak.StartShift,
Pak.MaxShift
);
}
check(&Request == &InRequestAllocator.Get(NewIndex));
if (Request.Status == EInRequestStatus::Complete)
{
NotifyComplete(NewIndex);
return true;
}
else if (Request.Status == EInRequestStatus::Waiting)
{
StartNextRequest();
}
return false;
}
void ClearBlock(FCacheBlock &Block)
{
if (Block.Memory)
{
check(Block.Size);
BlockMemory -= Block.Size;
DEC_MEMORY_STAT_BY(STAT_PakCacheMem, Block.Size);
check(BlockMemory >= 0);
FMemory::Free(Block.Memory);
Block.Memory = nullptr;
}
Block.Next = IntervalTreeInvalidIndex;
CacheBlockAllocator.Free(Block.Index);
}
void ClearRequest(FPakInRequest& DoneRequest)
{
uint64 Id = DoneRequest.UniqueID;
TIntervalTreeIndex Index = DoneRequest.Index;
DoneRequest.OffsetAndPakIndex = 0;
DoneRequest.Size = 0;
DoneRequest.Owner = nullptr;
DoneRequest.UniqueID = 0;
DoneRequest.Index = IntervalTreeInvalidIndex;
DoneRequest.Next = IntervalTreeInvalidIndex;
DoneRequest.Priority = AIOP_MIN;
DoneRequest.Status = EInRequestStatus::Num;
verify(OutstandingRequests.Remove(Id) == 1);
InRequestAllocator.Free(Index);
}
void RemoveRequest(TIntervalTreeIndex Index)
{
// CachedFilesScopeLock is locked
FPakInRequest& Request = InRequestAllocator.Get(Index);
uint16 PakIndex = GetRequestPakIndex(Request.OffsetAndPakIndex);
int64 Offset = GetRequestOffset(Request.OffsetAndPakIndex);
int64 Size = Request.Size;
FPakData& Pak = CachedPakData[PakIndex];
check(Offset + Request.Size <= Pak.TotalSize && Request.Size > 0 && Request.Priority >= AIOP_MIN && Request.Priority <= AIOP_MAX && int32(Request.Status) >= 0 && int32(Request.Status) < int32(EInRequestStatus::Num));
if (RemoveFromIntervalTree<FPakInRequest>(&Pak.InRequests[Request.Priority][(int32)Request.Status], InRequestAllocator, Index, Pak.StartShift, Pak.MaxShift))
{
MaybeRemoveOverlappingNodesInIntervalTree<FCacheBlock>(
&Pak.CacheBlocks[(int32)EBlockStatus::Complete],
CacheBlockAllocator,
Offset,
Offset + Size - 1,
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
[this](TIntervalTreeIndex BlockIndex) -> bool
{
FCacheBlock &Block = CacheBlockAllocator.Get(BlockIndex);
check(Block.InRequestRefCount);
if (!--Block.InRequestRefCount)
{
ClearBlock(Block);
return true;
}
return false;
}
);
OverlappingNodesInIntervalTree<FCacheBlock>(
Pak.CacheBlocks[(int32)EBlockStatus::InFlight],
CacheBlockAllocator,
Offset,
Offset + Size - 1,
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
[this](TIntervalTreeIndex BlockIndex) -> bool
{
FCacheBlock &Block = CacheBlockAllocator.Get(BlockIndex);
check(Block.InRequestRefCount);
Block.InRequestRefCount--;
return true;
}
);
}
else
{
check(0); // not found
}
ClearRequest(Request);
}
void NotifyComplete(TIntervalTreeIndex RequestIndex)
{
// CachedFilesScopeLock is locked
FPakInRequest& Request = InRequestAllocator.Get(RequestIndex);
uint16 PakIndex = GetRequestPakIndex(Request.OffsetAndPakIndex);
int64 Offset = GetRequestOffset(Request.OffsetAndPakIndex);
FPakData& Pak = CachedPakData[PakIndex];
check(Offset + Request.Size <= Pak.TotalSize && Request.Size > 0 && Request.Priority >= AIOP_MIN && Request.Priority <= AIOP_MAX && Request.Status == EInRequestStatus::Complete);
check(Request.Owner && Request.UniqueID);
if (Request.Status == EInRequestStatus::Complete && Request.UniqueID == Request.Owner->UniqueID && RequestIndex == Request.Owner->InRequestIndex && Request.OffsetAndPakIndex == Request.Owner->OffsetAndPakIndex)
{
UE_LOG(LogPakFile, Verbose, TEXT("FPakReadRequest[%016llX, %016llX) Notify complete"), Request.OffsetAndPakIndex, Request.OffsetAndPakIndex + Request.Size);
Request.Owner->RequestIsComplete();
return;
}
else
{
check(0); // request should have been found
}
}
FJoinedOffsetAndPakIndex GetNextBlock(EAsyncIOPriority& OutPriority)
{
bool bAcceptingPrecacheRequests = HasEnoughRoomForPrecache();
// CachedFilesScopeLock is locked
uint16 BestPakIndex = 0;
FJoinedOffsetAndPakIndex BestNext = MAX_uint64;
OutPriority = AIOP_MIN;
bool bAnyOutstanding = false;
for (EAsyncIOPriority Priority = AIOP_MAX;; Priority = EAsyncIOPriority(int32(Priority) - 1))
{
if (Priority == AIOP_Precache && !bAcceptingPrecacheRequests && bAnyOutstanding)
{
break;
}
for (int32 Pass = 0; ; Pass++)
{
FJoinedOffsetAndPakIndex LocalLastReadRequest = Pass ? 0 : LastReadRequest;
uint16 PakIndex = GetRequestPakIndex(LocalLastReadRequest);
int64 Offset = GetRequestOffset(LocalLastReadRequest);
check(Offset <= CachedPakData[PakIndex].TotalSize);
for (; BestNext == MAX_uint64 && PakIndex < CachedPakData.Num(); PakIndex++)
{
FPakData& Pak = CachedPakData[PakIndex];
if (Pak.InRequests[Priority][(int32)EInRequestStatus::Complete] != IntervalTreeInvalidIndex)
{
bAnyOutstanding = true;
}
if (Pak.InRequests[Priority][(int32)EInRequestStatus::Waiting] != IntervalTreeInvalidIndex)
{
uint64 Limit = uint64(Pak.TotalSize - 1);
if (BestNext != MAX_uint64 && GetRequestPakIndex(BestNext) == PakIndex)
{
Limit = GetRequestOffset(BestNext) - 1;
}
OverlappingNodesInIntervalTreeWithShrinkingInterval<FPakInRequest>(
Pak.InRequests[Priority][(int32)EInRequestStatus::Waiting],
InRequestAllocator,
uint64(Offset),
Limit,
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
[this, &Pak, &BestNext, &BestPakIndex, PakIndex, &Limit, LocalLastReadRequest](TIntervalTreeIndex Index) -> bool
{
FJoinedOffsetAndPakIndex First = FirstUnfilledBlockForRequest(Index, LocalLastReadRequest);
check(LocalLastReadRequest != 0 || First != MAX_uint64); // if there was not trimming, and this thing is in the waiting list, then why was no start block found?
if (First < BestNext)
{
BestNext = First;
BestPakIndex = PakIndex;
Limit = GetRequestOffset(BestNext) - 1;
}
return true; // always have to keep going because we want the smallest one
}
);
}
}
if (!LocalLastReadRequest)
{
break; // this was a full pass
}
}
if (Priority == AIOP_MIN || BestNext != MAX_uint64)
{
OutPriority = Priority;
break;
}
}
return BestNext;
}
bool AddNewBlock()
{
// CachedFilesScopeLock is locked
EAsyncIOPriority RequestPriority;
FJoinedOffsetAndPakIndex BestNext = GetNextBlock(RequestPriority);
if (BestNext == MAX_uint64)
{
return false;
}
uint16 PakIndex = GetRequestPakIndex(BestNext);
int64 Offset = GetRequestOffset(BestNext);
FPakData& Pak = CachedPakData[PakIndex];
check(Offset < Pak.TotalSize);
int64 FirstByte = AlignDown(Offset, PAK_CACHE_GRANULARITY);
int64 LastByte = FMath::Min(Align(FirstByte + (GPakCache_MaxRequestSizeToLowerLevelKB * 1024), PAK_CACHE_GRANULARITY) - 1, Pak.TotalSize - 1);
check(FirstByte >= 0 && LastByte < Pak.TotalSize && LastByte >= 0 && LastByte >= FirstByte);
uint32 NumBits = (PAK_CACHE_GRANULARITY + LastByte - FirstByte) / PAK_CACHE_GRANULARITY;
uint32 NumQWords = (NumBits + 63) >> 6;
static TArray<uint64> InFlightOrDone;
InFlightOrDone.Reset();
InFlightOrDone.AddZeroed(NumQWords);
if (NumBits != NumQWords * 64)
{
uint32 Extras = NumQWords * 64 - NumBits;
InFlightOrDone[NumQWords - 1] = (MAX_uint64 << (64 - Extras));
}
if (Pak.CacheBlocks[(int32)EBlockStatus::Complete] != IntervalTreeInvalidIndex)
{
OverlappingNodesInIntervalTreeMask<FCacheBlock>(
Pak.CacheBlocks[(int32)EBlockStatus::Complete],
CacheBlockAllocator,
FirstByte,
LastByte,
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
Pak.BytesToBitsShift,
&InFlightOrDone[0]
);
}
if (Pak.CacheBlocks[(int32)EBlockStatus::InFlight] != IntervalTreeInvalidIndex)
{
OverlappingNodesInIntervalTreeMask<FCacheBlock>(
Pak.CacheBlocks[(int32)EBlockStatus::InFlight],
CacheBlockAllocator,
FirstByte,
LastByte,
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
Pak.BytesToBitsShift,
&InFlightOrDone[0]
);
}
static TArray<uint64> Requested;
Requested.Reset();
Requested.AddZeroed(NumQWords);
for (EAsyncIOPriority Priority = AIOP_MAX;; Priority = EAsyncIOPriority(int32(Priority) - 1))
{
if (Priority + PAK_CACHE_MAX_PRIORITY_DIFFERENCE_MERGE < RequestPriority)
{
break;
}
if (Pak.InRequests[Priority][(int32)EInRequestStatus::Waiting] != IntervalTreeInvalidIndex)
{
OverlappingNodesInIntervalTreeMask<FPakInRequest>(
Pak.InRequests[Priority][(int32)EInRequestStatus::Waiting],
InRequestAllocator,
FirstByte,
LastByte,
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
Pak.BytesToBitsShift,
&Requested[0]
);
}
if (Priority == AIOP_MIN)
{
break;
}
}
int64 Size = PAK_CACHE_GRANULARITY * 64 * NumQWords;
for (uint32 Index = 0; Index < NumQWords; Index++)
{
uint64 NotAlreadyInFlightAndRequested = ((~InFlightOrDone[Index]) & Requested[Index]);
if (NotAlreadyInFlightAndRequested != MAX_uint64)
{
Size = PAK_CACHE_GRANULARITY * 64 * Index;
while (NotAlreadyInFlightAndRequested & 1)
{
Size += PAK_CACHE_GRANULARITY;
NotAlreadyInFlightAndRequested >>= 1;
}
break;
}
}
check(Size > 0 && Size <= (GPakCache_MaxRequestSizeToLowerLevelKB * 1024));
Size = FMath::Min(FirstByte + Size, LastByte + 1) - FirstByte;
TIntervalTreeIndex NewIndex = CacheBlockAllocator.Alloc();
FCacheBlock& Block = CacheBlockAllocator.Get(NewIndex);
Block.Index = NewIndex;
Block.InRequestRefCount = 0;
Block.Memory = nullptr;
Block.OffsetAndPakIndex = MakeJoinedRequest(PakIndex, FirstByte);
Block.Size = Size;
Block.Status = EBlockStatus::InFlight;
AddToIntervalTree<FCacheBlock>(
&Pak.CacheBlocks[(int32)EBlockStatus::InFlight],
CacheBlockAllocator,
NewIndex,
Pak.StartShift,
Pak.MaxShift
);
TArray<TIntervalTreeIndex> Inflights;
for (EAsyncIOPriority Priority = AIOP_MAX;; Priority = EAsyncIOPriority(int32(Priority) - 1))
{
if (Pak.InRequests[Priority][(int32)EInRequestStatus::Waiting] != IntervalTreeInvalidIndex)
{
MaybeRemoveOverlappingNodesInIntervalTree<FPakInRequest>(
&Pak.InRequests[Priority][(int32)EInRequestStatus::Waiting],
InRequestAllocator,
uint64(FirstByte),
uint64(FirstByte + Size - 1),
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
[this, &Block, &Inflights](TIntervalTreeIndex RequestIndex) -> bool
{
Block.InRequestRefCount++;
if (FirstUnfilledBlockForRequest(RequestIndex) == MAX_uint64)
{
InRequestAllocator.Get(RequestIndex).Next = IntervalTreeInvalidIndex;
Inflights.Add(RequestIndex);
return true;
}
return false;
}
);
}
#if PAK_EXTRA_CHECKS
OverlappingNodesInIntervalTree<FPakInRequest>(
Pak.InRequests[Priority][(int32)EInRequestStatus::InFlight],
InRequestAllocator,
uint64(FirstByte),
uint64(FirstByte + Size - 1),
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
[](TIntervalTreeIndex) -> bool
{
check(0); // if this is in flight, then why does it overlap my new block
return false;
}
);
OverlappingNodesInIntervalTree<FPakInRequest>(
Pak.InRequests[Priority][(int32)EInRequestStatus::Complete],
InRequestAllocator,
uint64(FirstByte),
uint64(FirstByte + Size - 1),
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
[](TIntervalTreeIndex) -> bool
{
check(0); // if this is complete, then why does it overlap my new block
return false;
}
);
#endif
if (Priority == AIOP_MIN)
{
break;
}
}
for (TIntervalTreeIndex Fli : Inflights)
{
FPakInRequest& CompReq = InRequestAllocator.Get(Fli);
CompReq.Status = EInRequestStatus::InFlight;
AddToIntervalTree(&Pak.InRequests[CompReq.Priority][(int32)EInRequestStatus::InFlight], InRequestAllocator, Fli, Pak.StartShift, Pak.MaxShift);
}
StartBlockTask(Block);
return true;
}
int32 OpenTaskSlot()
{
int32 IndexToFill = -1;
for (int32 Index = 0; Index < GPakCache_MaxRequestsToLowerLevel; Index++)
{
if (!RequestsToLower[Index].RequestHandle)
{
IndexToFill = Index;
break;
}
}
return IndexToFill;
}
bool HasRequestsAtStatus(EInRequestStatus Status)
{
for (uint16 PakIndex = 0; PakIndex < CachedPakData.Num(); PakIndex++)
{
FPakData& Pak = CachedPakData[PakIndex];
for (EAsyncIOPriority Priority = AIOP_MAX;; Priority = EAsyncIOPriority(int32(Priority) - 1))
{
if (Pak.InRequests[Priority][(int32)Status] != IntervalTreeInvalidIndex)
{
return true;
}
if (Priority == AIOP_MIN)
{
break;
}
}
}
return false;
}
bool CanStartAnotherTask()
{
if (OpenTaskSlot() < 0)
{
return false;
}
return HasRequestsAtStatus(EInRequestStatus::Waiting);
}
void ClearOldBlockTasks()
{
if (!NotifyRecursion)
{
for (IAsyncReadRequest* Elem : RequestsToDelete)
{
Elem->WaitCompletion();
delete Elem;
}
RequestsToDelete.Empty();
}
}
void StartBlockTask(FCacheBlock& Block)
{
// CachedFilesScopeLock is locked
int32 IndexToFill = OpenTaskSlot();
if (IndexToFill < 0)
{
check(0);
return;
}
EAsyncIOPriority Priority = AIOP_Normal; // the lower level requests are not prioritized at the moment
check(Block.Status == EBlockStatus::InFlight);
UE_LOG(LogPakFile, Verbose, TEXT("FPakReadRequest[%016llX, %016llX) StartBlockTask"), Block.OffsetAndPakIndex, Block.OffsetAndPakIndex + Block.Size);
uint16 PakIndex = GetRequestPakIndex(Block.OffsetAndPakIndex);
FPakData& Pak = CachedPakData[PakIndex];
RequestsToLower[IndexToFill].BlockIndex = Block.Index;
check(&CacheBlockAllocator.Get(RequestsToLower[IndexToFill].BlockIndex) == &Block);
RequestsToLower[IndexToFill].RequestHandle = Pak.Handle->ReadRequest(GetRequestOffset(Block.OffsetAndPakIndex), Block.Size, Priority, &CallbackFromLower);
LastReadRequest = Block.OffsetAndPakIndex + Block.Size;
Loads++;
LoadSize += Block.Size;
}
void CompleteRequest(bool bWasCanceled, IAsyncReadRequest* Request, TIntervalTreeIndex BlockIndex)
{
FCacheBlock& Block = CacheBlockAllocator.Get(BlockIndex);
uint16 PakIndex = GetRequestPakIndex(Block.OffsetAndPakIndex);
int64 Offset = GetRequestOffset(Block.OffsetAndPakIndex);
FPakData& Pak = CachedPakData[PakIndex];
check(!Block.Memory && Block.Size);
check(!bWasCanceled); // this is doable, but we need to transition requests back to waiting, inflight etc.
if (!RemoveFromIntervalTree<FCacheBlock>(&Pak.CacheBlocks[(int32)EBlockStatus::InFlight], CacheBlockAllocator, Block.Index, Pak.StartShift, Pak.MaxShift))
{
check(0);
}
if (Block.InRequestRefCount == 0 || bWasCanceled)
{
ClearBlock(Block);
}
else
{
Block.Memory = Request->GetReadResults();
check(Block.Memory && Block.Size);
BlockMemory += Block.Size;
check(BlockMemory > 0);
DEC_MEMORY_STAT_BY(STAT_AsyncFileMemory, Block.Size);
INC_MEMORY_STAT_BY(STAT_PakCacheMem, Block.Size);
if (BlockMemory > BlockMemoryHighWater)
{
BlockMemoryHighWater = BlockMemory;
SET_MEMORY_STAT(STAT_PakCacheHighWater, BlockMemoryHighWater);
#if 1
static int64 LastPrint = 0;
if (BlockMemoryHighWater / 1024 / 1024 != LastPrint)
{
LastPrint = BlockMemoryHighWater / 1024 / 1024;
FPlatformMisc::LowLevelOutputDebugStringf(TEXT("Precache HighWater %dMB\r\n"), int32(LastPrint));
}
#endif
}
Block.Status = EBlockStatus::Complete;
AddToIntervalTree<FCacheBlock>(
&Pak.CacheBlocks[(int32)EBlockStatus::Complete],
CacheBlockAllocator,
Block.Index,
Pak.StartShift,
Pak.MaxShift
);
TArray<TIntervalTreeIndex> Completeds;
for (EAsyncIOPriority Priority = AIOP_MAX;; Priority = EAsyncIOPriority(int32(Priority) - 1))
{
if (Pak.InRequests[Priority][(int32)EInRequestStatus::InFlight] != IntervalTreeInvalidIndex)
{
MaybeRemoveOverlappingNodesInIntervalTree<FPakInRequest>(
&Pak.InRequests[Priority][(int32)EInRequestStatus::InFlight],
InRequestAllocator,
uint64(Offset),
uint64(Offset + Block.Size - 1),
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
[this, &Completeds](TIntervalTreeIndex RequestIndex) -> bool
{
if (FirstUnfilledBlockForRequest(RequestIndex) == MAX_uint64)
{
InRequestAllocator.Get(RequestIndex).Next = IntervalTreeInvalidIndex;
Completeds.Add(RequestIndex);
return true;
}
return false;
}
);
}
if (Priority == AIOP_MIN)
{
break;
}
}
for (TIntervalTreeIndex Comp : Completeds)
{
FPakInRequest& CompReq = InRequestAllocator.Get(Comp);
CompReq.Status = EInRequestStatus::Complete;
AddToIntervalTree(&Pak.InRequests[CompReq.Priority][(int32)EInRequestStatus::Complete], InRequestAllocator, Comp, Pak.StartShift, Pak.MaxShift);
NotifyComplete(Comp); // potentially scary recursion here
}
}
}
bool StartNextRequest()
{
if (CanStartAnotherTask())
{
return AddNewBlock();
}
return false;
}
bool GetCompletedRequestData(FPakInRequest& DoneRequest, uint8* Result)
{
// CachedFilesScopeLock is locked
check(DoneRequest.Status == EInRequestStatus::Complete);
uint16 PakIndex = GetRequestPakIndex(DoneRequest.OffsetAndPakIndex);
int64 Offset = GetRequestOffset(DoneRequest.OffsetAndPakIndex);
int64 Size = DoneRequest.Size;
FPakData& Pak = CachedPakData[PakIndex];
check(Offset + DoneRequest.Size <= Pak.TotalSize && DoneRequest.Size > 0 && DoneRequest.Priority >= AIOP_MIN && DoneRequest.Priority <= AIOP_MAX && DoneRequest.Status == EInRequestStatus::Complete);
int64 BytesCopied = 0;
#if 0 // this path removes the block in one pass, however, this is not what we want because it wrecks precaching, if we change back GetCompletedRequest needs to maybe start a new request and the logic of the IAsyncFile read needs to change
MaybeRemoveOverlappingNodesInIntervalTree<FCacheBlock>(
&Pak.CacheBlocks[(int32)EBlockStatus::Complete],
CacheBlockAllocator,
Offset,
Offset + Size - 1,
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
[this, Offset, Size, &BytesCopied, Result, &Pak](TIntervalTreeIndex BlockIndex) -> bool
{
FCacheBlock &Block = CacheBlockAllocator.Get(BlockIndex);
int64 BlockOffset = GetRequestOffset(Block.OffsetAndPakIndex);
check(Block.Memory && Block.Size && BlockOffset >= 0 && BlockOffset + Block.Size <= Pak.TotalSize);
int64 OverlapStart = FMath::Max(Offset, BlockOffset);
int64 OverlapEnd = FMath::Min(Offset + Size, BlockOffset + Block.Size);
check(OverlapEnd > OverlapStart);
BytesCopied += OverlapEnd - OverlapStart;
FMemory::Memcpy(Result + OverlapStart - Offset, Block.Memory + OverlapStart - BlockOffset, OverlapEnd - OverlapStart);
check(Block.InRequestRefCount);
if (!--Block.InRequestRefCount)
{
ClearBlock(Block);
return true;
}
return false;
}
);
if (!RemoveFromIntervalTree<FPakInRequest>(&Pak.InRequests[DoneRequest.Priority][(int32)EInRequestStatus::Complete], InRequestAllocator, DoneRequest.Index, Pak.StartShift, Pak.MaxShift))
{
check(0); // not found
}
ClearRequest(DoneRequest);
#else
OverlappingNodesInIntervalTree<FCacheBlock>(
Pak.CacheBlocks[(int32)EBlockStatus::Complete],
CacheBlockAllocator,
Offset,
Offset + Size - 1,
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
[this, Offset, Size, &BytesCopied, Result, &Pak](TIntervalTreeIndex BlockIndex) -> bool
{
FCacheBlock &Block = CacheBlockAllocator.Get(BlockIndex);
int64 BlockOffset = GetRequestOffset(Block.OffsetAndPakIndex);
check(Block.Memory && Block.Size && BlockOffset >= 0 && BlockOffset + Block.Size <= Pak.TotalSize);
int64 OverlapStart = FMath::Max(Offset, BlockOffset);
int64 OverlapEnd = FMath::Min(Offset + Size, BlockOffset + Block.Size);
check(OverlapEnd > OverlapStart);
BytesCopied += OverlapEnd - OverlapStart;
FMemory::Memcpy(Result + OverlapStart - Offset, Block.Memory + OverlapStart - BlockOffset, OverlapEnd - OverlapStart);
return true;
}
);
#endif
check(BytesCopied == Size);
return true;
}
///// Below here are the thread entrypoints
void NewRequestsToLowerComplete(bool bWasCanceled, IAsyncReadRequest* Request)
{
FScopeLock Lock(&CachedFilesScopeLock);
ClearOldBlockTasks();
NotifyRecursion++;
for (int32 Index = 0; Index < PAK_CACHE_MAX_REQUESTS; Index++) // we loop over them all in case GPakCache_MaxRequestsToLowerLevel is changing
{
if (RequestsToLower[Index].RequestHandle == Request)
{
CompleteRequest(bWasCanceled, Request, RequestsToLower[Index].BlockIndex);
RequestsToLower[Index].RequestHandle = nullptr;
RequestsToDelete.Add(Request);
RequestsToLower[Index].BlockIndex = IntervalTreeInvalidIndex;
StartNextRequest();
NotifyRecursion--;
return;
}
}
StartNextRequest();
NotifyRecursion--;
check(0); // not found?
}
public:
bool QueueRequest(IPakRequestor* Owner, FName File, int64 PakFileSize, int64 Offset, int64 Size, EAsyncIOPriority Priority)
{
check(Owner && File != NAME_None && Size > 0 && Offset >= 0 && Offset < PakFileSize && Priority >= AIOP_MIN && Priority <= AIOP_MAX);
FScopeLock Lock(&CachedFilesScopeLock);
uint16* PakIndexPtr = CachedPaks.Find(File);
if (!PakIndexPtr)
{
check(CachedPakData.Num() < MAX_uint16);
IAsyncReadFileHandle* Handle = LowerLevel->OpenAsyncRead(*File.ToString());
if (!Handle)
{
return false;
}
CachedPakData.Add(FPakData(Handle, File, PakFileSize));
PakIndexPtr = &CachedPaks.Add(File, CachedPakData.Num() - 1);
UE_LOG(LogPakFile, Log, TEXT("New pak file %s added to pak precacher."), *File.ToString());
}
uint16 PakIndex = *PakIndexPtr;
FPakData& Pak = CachedPakData[PakIndex];
check(Pak.Name == File && Pak.TotalSize == PakFileSize && Pak.Handle);
TIntervalTreeIndex RequestIndex = InRequestAllocator.Alloc();
FPakInRequest& Request = InRequestAllocator.Get(RequestIndex);
FJoinedOffsetAndPakIndex RequestOffsetAndPakIndex = MakeJoinedRequest(PakIndex, Offset);
Request.OffsetAndPakIndex = RequestOffsetAndPakIndex;
Request.Size = Size;
Request.Priority = Priority;
Request.Status = EInRequestStatus::Waiting;
Request.Owner = Owner;
Request.UniqueID = NextUniqueID++;
Request.Index = RequestIndex;
check(Request.Next == IntervalTreeInvalidIndex);
Owner->OffsetAndPakIndex = Request.OffsetAndPakIndex;
Owner->UniqueID = Request.UniqueID;
Owner->InRequestIndex = RequestIndex;
check(!OutstandingRequests.Contains(Request.UniqueID));
OutstandingRequests.Add(Request.UniqueID, RequestIndex);
if (AddRequest(RequestIndex))
{
UE_LOG(LogPakFile, Verbose, TEXT("FPakReadRequest[%016llX, %016llX) QueueRequest HOT"), RequestOffsetAndPakIndex, RequestOffsetAndPakIndex + Request.Size);
}
else
{
UE_LOG(LogPakFile, Verbose, TEXT("FPakReadRequest[%016llX, %016llX) QueueRequest COLD"), RequestOffsetAndPakIndex, RequestOffsetAndPakIndex + Request.Size);
}
return true;
}
bool GetCompletedRequest(IPakRequestor* Owner, uint8* UserSuppliedMemory)
{
check(Owner);
FScopeLock Lock(&CachedFilesScopeLock);
ClearOldBlockTasks();
TIntervalTreeIndex RequestIndex = OutstandingRequests.FindRef(Owner->UniqueID);
static_assert(IntervalTreeInvalidIndex == 0, "FindRef will return 0 for something not found");
if (RequestIndex)
{
FPakInRequest& Request = InRequestAllocator.Get(RequestIndex);
check(Owner == Request.Owner && Request.Status == EInRequestStatus::Complete && Request.UniqueID == Request.Owner->UniqueID && RequestIndex == Request.Owner->InRequestIndex && Request.OffsetAndPakIndex == Request.Owner->OffsetAndPakIndex);
return GetCompletedRequestData(Request, UserSuppliedMemory);
}
return false; // canceled
}
void CancelRequest(IPakRequestor* Owner)
{
check(Owner);
FScopeLock Lock(&CachedFilesScopeLock);
ClearOldBlockTasks();
TIntervalTreeIndex RequestIndex = OutstandingRequests.FindRef(Owner->UniqueID);
static_assert(IntervalTreeInvalidIndex == 0, "FindRef will return 0 for something not found");
if (RequestIndex)
{
FPakInRequest& Request = InRequestAllocator.Get(RequestIndex);
check(Owner == Request.Owner && Request.UniqueID == Request.Owner->UniqueID && RequestIndex == Request.Owner->InRequestIndex && Request.OffsetAndPakIndex == Request.Owner->OffsetAndPakIndex);
RemoveRequest(RequestIndex);
}
StartNextRequest();
}
bool IsProbablyIdle() // nothing to prevent new requests from being made before I return
{
FScopeLock Lock(&CachedFilesScopeLock);
return !HasRequestsAtStatus(EInRequestStatus::Waiting) && !HasRequestsAtStatus(EInRequestStatus::InFlight);
}
void Unmount(FName PakFile)
{
FScopeLock Lock(&CachedFilesScopeLock);
uint16* PakIndexPtr = CachedPaks.Find(PakFile);
if (!PakIndexPtr)
{
UE_LOG(LogPakFile, Log, TEXT("Pak file %s was never used, so nothing to unmount"), *PakFile.ToString());
return; // never used for anything, nothing to check or clean up
}
uint16 PakIndex = *PakIndexPtr;
FPakData& Pak = CachedPakData[PakIndex];
int64 Offset = MakeJoinedRequest(PakIndex, 0);
bool bHasOutstandingRequests = false;
OverlappingNodesInIntervalTree<FCacheBlock>(
Pak.CacheBlocks[(int32)EBlockStatus::Complete],
CacheBlockAllocator,
0,
Offset + Pak.TotalSize - 1,
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
[&bHasOutstandingRequests](TIntervalTreeIndex BlockIndex) -> bool
{
check(!"Pak cannot be unmounted with outstanding requests");
bHasOutstandingRequests = true;
return false;
}
);
OverlappingNodesInIntervalTree<FCacheBlock>(
Pak.CacheBlocks[(int32)EBlockStatus::InFlight],
CacheBlockAllocator,
0,
Offset + Pak.TotalSize - 1,
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
[&bHasOutstandingRequests](TIntervalTreeIndex BlockIndex) -> bool
{
check(!"Pak cannot be unmounted with outstanding requests");
bHasOutstandingRequests = true;
return false;
}
);
for (EAsyncIOPriority Priority = AIOP_MAX;; Priority = EAsyncIOPriority(int32(Priority) - 1))
{
OverlappingNodesInIntervalTree<FPakInRequest>(
Pak.InRequests[Priority][(int32)EInRequestStatus::InFlight],
InRequestAllocator,
0,
Offset + Pak.TotalSize - 1,
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
[&bHasOutstandingRequests](TIntervalTreeIndex BlockIndex) -> bool
{
check(!"Pak cannot be unmounted with outstanding requests");
bHasOutstandingRequests = true;
return false;
}
);
OverlappingNodesInIntervalTree<FPakInRequest>(
Pak.InRequests[Priority][(int32)EInRequestStatus::Complete],
InRequestAllocator,
0,
Offset + Pak.TotalSize - 1,
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
[&bHasOutstandingRequests](TIntervalTreeIndex BlockIndex) -> bool
{
check(!"Pak cannot be unmounted with outstanding requests");
bHasOutstandingRequests = true;
return false;
}
);
OverlappingNodesInIntervalTree<FPakInRequest>(
Pak.InRequests[Priority][(int32)EInRequestStatus::Waiting],
InRequestAllocator,
0,
Offset + Pak.TotalSize - 1,
0,
Pak.MaxNode,
Pak.StartShift,
Pak.MaxShift,
[&bHasOutstandingRequests](TIntervalTreeIndex BlockIndex) -> bool
{
check(!"Pak cannot be unmounted with outstanding requests");
bHasOutstandingRequests = true;
return false;
}
);
if (Priority == AIOP_MIN)
{
break;
}
}
if (!bHasOutstandingRequests)
{
UE_LOG(LogPakFile, Log, TEXT("Pak file %s removed from pak precacher."), *PakFile.ToString());
CachedPaks.Remove(PakFile);
check(Pak.Handle);
delete Pak.Handle;
Pak.Handle = nullptr;
int32 NumToTrim = 0;
for (int32 Index = CachedPakData.Num() - 1; Index >= 0; Index--)
{
if (!CachedPakData[Index].Handle)
{
NumToTrim++;
}
else
{
break;
}
}
if (NumToTrim)
{
CachedPakData.RemoveAt(CachedPakData.Num() - NumToTrim, NumToTrim);
}
}
else
{
UE_LOG(LogPakFile, Log, TEXT("Pak file %s was NOT removed from pak precacher because it had outstanding requests."), *PakFile.ToString());
}
}
// these are not threadsafe and should only be used for synthetic testing
uint64 GetLoadSize()
{
return LoadSize;
}
uint32 GetLoads()
{
return Loads;
}
uint32 GetFrees()
{
return Frees;
}
void DumpBlocks()
{
while (!FPakPrecacher::Get().IsProbablyIdle())
{
QUICK_SCOPE_CYCLE_COUNTER(STAT_WaitDumpBlocks);
FPlatformProcess::SleepNoStats(0.001f);
}
FScopeLock Lock(&CachedFilesScopeLock);
bool bDone = !HasRequestsAtStatus(EInRequestStatus::Waiting) && !HasRequestsAtStatus(EInRequestStatus::InFlight) && !HasRequestsAtStatus(EInRequestStatus::Complete);
if (!bDone)
{
UE_LOG(LogPakFile, Log, TEXT("PakCache has outstanding requests with %llu total memory."), BlockMemory);
}
else
{
UE_LOG(LogPakFile, Log, TEXT("PakCache has no outstanding requests with %llu total memory."), BlockMemory);
}
}
};
static void WaitPrecache(const TArray<FString>& Args)
{
uint32 Frees = FPakPrecacher::Get().GetFrees();
uint32 Loads = FPakPrecacher::Get().GetLoads();
uint64 LoadSize = FPakPrecacher::Get().GetLoadSize();
double StartTime = FPlatformTime::Seconds();
while (!FPakPrecacher::Get().IsProbablyIdle())
{
check(Frees == FPakPrecacher::Get().GetFrees()); // otherwise we are discarding things, which is not what we want for this synthetic test
QUICK_SCOPE_CYCLE_COUNTER(STAT_WaitPrecache);
FPlatformProcess::SleepNoStats(0.001f);
}
Loads = FPakPrecacher::Get().GetLoads() - Loads;
LoadSize = FPakPrecacher::Get().GetLoadSize() - LoadSize;
float TimeSpent = FPlatformTime::Seconds() - StartTime;
float LoadSizeMB = float(LoadSize) / (1024.0f * 1024.0f);
float MBs = LoadSizeMB / TimeSpent;
UE_LOG(LogPakFile, Log, TEXT("Loaded %4d blocks (align %4dKB) totalling %7.2fMB in %4.2fs = %6.2fMB/s"), Loads, PAK_CACHE_GRANULARITY / 1024, LoadSizeMB, TimeSpent, MBs);
}
static FAutoConsoleCommand WaitPrecacheCmd(
TEXT("pak.WaitPrecache"),
TEXT("Debug command to wait on the pak precache."),
FConsoleCommandWithArgsDelegate::CreateStatic(&WaitPrecache)
);
static void DumpBlocks(const TArray<FString>& Args)
{
FPakPrecacher::Get().DumpBlocks();
}
static FAutoConsoleCommand DumpBlocksCmd(
TEXT("pak.DumpBlocks"),
TEXT("Debug command to spew the outstanding blocks."),
FConsoleCommandWithArgsDelegate::CreateStatic(&DumpBlocks)
);
static FCriticalSection FPakReadRequestEvent;
class FPakAsyncReadFileHandle;
//uncompress(unencrypt(checksig())))
class FPakReadRequest : public IAsyncReadRequest, public IPakRequestor
{
int64 Offset;
int64 BytesToRead;
FEvent* WaitEvent;
int32 BlockIndex;
EAsyncIOPriority Priority;
bool bRequestOutstanding;
bool bNeedsRemoval;
bool bInternalRequest; // we are using this internally to deal with compressed, encrypted and signed, so we want the memory back from a precache request.
public:
FPakReadRequest(FName InPakFile, int64 PakFileSize, FAsyncFileCallBack* CompleteCallback, int64 InOffset, int64 InBytesToRead, EAsyncIOPriority InPriority, uint8* UserSuppliedMemory, bool bInInternalRequest = false, int32 InBlockIndex = -1)
: IAsyncReadRequest(CompleteCallback, false, UserSuppliedMemory)
, Offset(InOffset)
, BytesToRead(InBytesToRead)
, WaitEvent(nullptr)
, BlockIndex(InBlockIndex)
, Priority(InPriority)
, bRequestOutstanding(true)
, bNeedsRemoval(true)
, bInternalRequest(bInInternalRequest)
{
check(Offset >= 0 && BytesToRead > 0);
check(bInternalRequest || Priority > AIOP_Precache || !bUserSuppliedMemory); // you never get bits back from a precache request, so why supply memory?
if (!FPakPrecacher::Get().QueueRequest(this, InPakFile, PakFileSize, Offset, BytesToRead, Priority))
{
bRequestOutstanding = false;
SetComplete();
}
}
virtual ~FPakReadRequest()
{
if (bNeedsRemoval)
{
FPakPrecacher::Get().CancelRequest(this);
}
if (Memory && !bUserSuppliedMemory)
{
// this can happen with a race on cancel, it is ok, they didn't take the memory, free it now
DEC_MEMORY_STAT_BY(STAT_AsyncFileMemory, BytesToRead);
FMemory::Free(Memory);
}
Memory = nullptr;
}
// IAsyncReadRequest Interface
virtual void WaitCompletionImpl(float TimeLimitSeconds) override
{
if (bRequestOutstanding)
{
{
FScopeLock Lock(&FPakReadRequestEvent);
if (bRequestOutstanding)
{
check(!WaitEvent);
WaitEvent = FPlatformProcess::GetSynchEventFromPool(true);
}
}
if (WaitEvent)
{
if (TimeLimitSeconds == 0.0f)
{
WaitEvent->Wait();
check(!bRequestOutstanding);
}
else
{
WaitEvent->Wait(TimeLimitSeconds * 1000.0f);
}
FPlatformProcess::ReturnSynchEventToPool(WaitEvent);
WaitEvent = nullptr;
}
}
}
virtual void CancelImpl() override
{
check(!WaitEvent); // you canceled from a different thread that you waited from
FPakPrecacher::Get().CancelRequest(this);
bNeedsRemoval = false;
if (bRequestOutstanding)
{
bRequestOutstanding = false;
SetComplete();
}
check(!WaitEvent); // you canceled from a different thread that you waited from
}
// IPakRequestor Interface
virtual void RequestIsComplete() override
{
check(bRequestOutstanding);
if (!bCanceled && (bInternalRequest || Priority > AIOP_Precache))
{
if (!bUserSuppliedMemory)
{
check(!Memory);
Memory = (uint8*)FMemory::Malloc(BytesToRead);
INC_MEMORY_STAT_BY(STAT_AsyncFileMemory, BytesToRead);
}
else
{
check(Memory);
}
if (!FPakPrecacher::Get().GetCompletedRequest(this, Memory))
{
check(bCanceled);
}
}
SetComplete();
FScopeLock Lock(&FPakReadRequestEvent);
bRequestOutstanding = false;
if (WaitEvent)
{
WaitEvent->Trigger();
}
}
int32 GetBlockIndex()
{
return BlockIndex;
}
};
class FPakSizeRequest : public IAsyncReadRequest
{
public:
FPakSizeRequest(FAsyncFileCallBack* CompleteCallback, int64 InFileSize)
: IAsyncReadRequest(CompleteCallback, true, nullptr)
{
Size = InFileSize;
SetComplete();
}
virtual void WaitCompletionImpl(float TimeLimitSeconds) override
{
}
virtual void CancelImpl()
{
}
};
struct FCachedAsyncBlock
{
FPakReadRequest* RawRequest;
uint8* Raw; // compressed, encrypted and/or signature not checked
uint8* Processed; // decompressed, deencrypted and signature checked
FGraphEventRef CPUWorkGraphEvent;
int32 RawSize;
int32 ProcessedSize;
int32 RefCount;
bool bInFlight;
FCachedAsyncBlock()
: RawRequest(0)
, Raw(nullptr)
, Processed(nullptr)
, RawSize(0)
, ProcessedSize(0)
, RefCount(0)
, bInFlight(false)
{
}
};
class FPakProcessedReadRequest : public IAsyncReadRequest
{
FPakAsyncReadFileHandle* Owner;
int64 Offset;
int64 BytesToRead;
FEvent* WaitEvent;
FThreadSafeCounter CompleteRace; // this is used to resolve races with natural completion and cancel; there can be only one.
EAsyncIOPriority Priority;
bool bRequestOutstanding;
bool bHasCancelled;
bool bHasCompleted;
public:
FPakProcessedReadRequest(FPakAsyncReadFileHandle* InOwner, FAsyncFileCallBack* CompleteCallback, int64 InOffset, int64 InBytesToRead, EAsyncIOPriority InPriority, uint8* UserSuppliedMemory)
: IAsyncReadRequest(CompleteCallback, false, UserSuppliedMemory)
, Owner(InOwner)
, Offset(InOffset)
, BytesToRead(InBytesToRead)
, WaitEvent(nullptr)
, Priority(InPriority)
, bRequestOutstanding(true)
, bHasCancelled(false)
, bHasCompleted(false)
{
check(Offset >= 0 && BytesToRead > 0);
check(Priority > AIOP_Precache || !bUserSuppliedMemory); // you never get bits back from a precache request, so why supply memory?
}
virtual ~FPakProcessedReadRequest()
{
DoneWithRawRequests();
if (Memory && !bUserSuppliedMemory)
{
// this can happen with a race on cancel, it is ok, they didn't take the memory, free it now
DEC_MEMORY_STAT_BY(STAT_AsyncFileMemory, BytesToRead);
FMemory::Free(Memory);
}
Memory = nullptr;
}
// IAsyncReadRequest Interface
virtual void WaitCompletionImpl(float TimeLimitSeconds) override
{
if (bRequestOutstanding)
{
{
FScopeLock Lock(&FPakReadRequestEvent);
if (bRequestOutstanding)
{
check(!WaitEvent);
WaitEvent = FPlatformProcess::GetSynchEventFromPool(true);
}
}
if (WaitEvent)
{
if (TimeLimitSeconds == 0.0f)
{
WaitEvent->Wait();
check(!bRequestOutstanding);
}
else
{
WaitEvent->Wait(TimeLimitSeconds * 1000.0f);
}
FPlatformProcess::ReturnSynchEventToPool(WaitEvent);
WaitEvent = nullptr;
}
}
}
virtual void CancelImpl() override
{
check(!WaitEvent); // you canceled from a different thread that you waited from
if (bRequestOutstanding)
{
CancelRawRequests();
bRequestOutstanding = false;
SetComplete();
}
check(!WaitEvent); // you canceled from a different thread that you waited from
}
void RequestIsComplete()
{
check(bRequestOutstanding);
if (!bCanceled && Priority > AIOP_Precache)
{
GatherResults();
}
SetComplete();
FScopeLock Lock(&FPakReadRequestEvent);
bRequestOutstanding = false;
if (WaitEvent)
{
WaitEvent->Trigger();
}
}
void GatherResults();
void DoneWithRawRequests();
bool CheckCompletion(const FPakEntry& FileEntry, int32 BlockIndex, TArray<FCachedAsyncBlock>& Blocks);
void CancelRawRequests();
};
FAutoConsoleTaskPriority CPrio_AsyncIOCPUWorkTaskPriority(
TEXT("TaskGraph.TaskPriorities.AsyncIOCPUWork"),
TEXT("Task and thread priority for decompression, decryption and signature checking of async IO from a pak file."),
ENamedThreads::BackgroundThreadPriority, // if we have background priority task threads, then use them...
ENamedThreads::NormalTaskPriority, // .. at normal task priority
ENamedThreads::NormalTaskPriority // if we don't have background threads, then use normal priority threads at normal task priority instead
);
class FAsyncIOCPUWorkTask
{
FPakAsyncReadFileHandle& Owner;
int32 BlockIndex;
public:
FORCEINLINE FAsyncIOCPUWorkTask(FPakAsyncReadFileHandle& InOwner, int32 InBlockIndex)
: Owner(InOwner)
, BlockIndex(InBlockIndex)
{
}
static FORCEINLINE TStatId GetStatId()
{
RETURN_QUICK_DECLARE_CYCLE_STAT(FsyncIOCPUWorkTask, STATGROUP_TaskGraphTasks);
}
static FORCEINLINE ENamedThreads::Type GetDesiredThread()
{
return CPrio_AsyncIOCPUWorkTaskPriority.Get();
}
FORCEINLINE static ESubsequentsMode::Type GetSubsequentsMode()
{
return ESubsequentsMode::TrackSubsequents;
}
void DoTask(ENamedThreads::Type CurrentThread, const FGraphEventRef& MyCompletionGraphEvent);
};
class FPakAsyncReadFileHandle final : public IAsyncReadFileHandle
{
FName PakFile;
int64 PakFileSize;
int64 OffsetInPak;
int64 CompressedFileSize;
int64 UncompressedFileSize;
const FPakEntry* FileEntry;
TSet<FPakProcessedReadRequest*> LiveRequests;
TArray<FCachedAsyncBlock> Blocks;
FAsyncFileCallBack ReadCallbackFunction;
FCriticalSection CriticalSection;
int32 NumLiveRawRequests;
public:
FPakAsyncReadFileHandle(const FPakEntry* InFileEntry, FPakFile* InPakFile, const TCHAR* Filename)
: PakFile(InPakFile->GetFilenameName())
, PakFileSize(InPakFile->TotalSize())
, FileEntry(InFileEntry)
, NumLiveRawRequests(0)
{
OffsetInPak = FileEntry->Offset + FileEntry->GetSerializedSize(InPakFile->GetInfo().Version);
UncompressedFileSize = FileEntry->UncompressedSize;
CompressedFileSize = FileEntry->UncompressedSize;
if (FileEntry->CompressionMethod != COMPRESS_None && UncompressedFileSize)
{
check(FileEntry->CompressionBlocks.Num());
CompressedFileSize = FileEntry->CompressionBlocks.Last().CompressedEnd - OffsetInPak;
check(CompressedFileSize > 0);
const int32 CompressionBlockSize = FileEntry->CompressionBlockSize;
check((UncompressedFileSize + CompressionBlockSize - 1) / CompressionBlockSize == FileEntry->CompressionBlocks.Num());
Blocks.AddDefaulted(FileEntry->CompressionBlocks.Num());
}
UE_LOG(LogPakFile, Verbose, TEXT("FPakPlatformFile::OpenAsyncRead[%016llX, %016llX) %s"), OffsetInPak, OffsetInPak + CompressedFileSize, Filename);
check(PakFileSize > 0 && OffsetInPak + CompressedFileSize <= PakFileSize && OffsetInPak >= 0);
ReadCallbackFunction = [this](bool bWasCancelled, IAsyncReadRequest* Request)
{
RawReadCallback(bWasCancelled, Request);
};
}
~FPakAsyncReadFileHandle()
{
check(!LiveRequests.Num()); // must delete all requests before you delete the handle
check(!NumLiveRawRequests); // must delete all requests before you delete the handle
}
virtual IAsyncReadRequest* SizeRequest(FAsyncFileCallBack* CompleteCallback = nullptr) override
{
return new FPakSizeRequest(CompleteCallback, UncompressedFileSize);
}
virtual IAsyncReadRequest* ReadRequest(int64 Offset, int64 BytesToRead, EAsyncIOPriority Priority = AIOP_Normal, FAsyncFileCallBack* CompleteCallback = nullptr, uint8* UserSuppliedMemory = nullptr) override
{
if (BytesToRead == MAX_int64)
{
BytesToRead = UncompressedFileSize - Offset;
}
check(Offset + BytesToRead <= UncompressedFileSize && Offset >= 0);
if (FileEntry->CompressionMethod == COMPRESS_None)
{
check(Offset + BytesToRead + OffsetInPak <= PakFileSize);
check(!Blocks.Num());
return new FPakReadRequest(PakFile, PakFileSize, CompleteCallback, OffsetInPak + Offset, BytesToRead, Priority, UserSuppliedMemory);
}
bool bAnyUnfinished = false;
FPakProcessedReadRequest* Result;
{
FScopeLock ScopedLock(&CriticalSection);
check(Blocks.Num());
int32 FirstBlock = Offset / FileEntry->CompressionBlockSize;
int32 LastBlock = (Offset + BytesToRead - 1) / FileEntry->CompressionBlockSize;
check(FirstBlock >= 0 && FirstBlock < Blocks.Num() && LastBlock >= 0 && LastBlock < Blocks.Num() && FirstBlock <= LastBlock);
Result = new FPakProcessedReadRequest(this, CompleteCallback, Offset, BytesToRead, Priority, UserSuppliedMemory);
for (int32 BlockIndex = FirstBlock; BlockIndex <= LastBlock; BlockIndex++)
{
FCachedAsyncBlock& Block = Blocks[BlockIndex];
Block.RefCount++;
if (!Block.bInFlight)
{
StartBlock(BlockIndex, Priority);
bAnyUnfinished = true;
}
if (!Block.Processed)
{
bAnyUnfinished = true;
}
}
if (Result)
{
check(!LiveRequests.Contains(Result))
LiveRequests.Add(Result);
}
if (!bAnyUnfinished)
{
Result->RequestIsComplete();
}
}
return Result;
}
void StartBlock(int32 BlockIndex, EAsyncIOPriority Priority)
{
FCachedAsyncBlock& Block = Blocks[BlockIndex];
Block.bInFlight = true;
check(!Block.RawRequest && !Block.Processed && !Block.Raw && !Block.CPUWorkGraphEvent.GetReference() && !Block.ProcessedSize && !Block.RawSize);
Block.RawSize = FileEntry->CompressionBlocks[BlockIndex].CompressedEnd - FileEntry->CompressionBlocks[BlockIndex].CompressedStart;
NumLiveRawRequests++;
Block.RawRequest = new FPakReadRequest(PakFile, PakFileSize, &ReadCallbackFunction, FileEntry->CompressionBlocks[BlockIndex].CompressedStart, Block.RawSize, Priority, nullptr, true, BlockIndex);
}
void RawReadCallback(bool bWasCancelled, IAsyncReadRequest* InRequest)
{
FPakReadRequest* Request = static_cast<FPakReadRequest*>(InRequest);
// Causes a deadlock, hopefully not needed as we are only referencing the block.
// Potential problem is with cancel
// FScopeLock ScopedLock(&CriticalSection);
int32 BlockIndex = Request->GetBlockIndex();
check(BlockIndex >= 0 && BlockIndex < Blocks.Num());
FCachedAsyncBlock& Block = Blocks[BlockIndex];
check((Block.RawRequest == Request || (!Block.RawRequest && Block.RawSize)) // we still might be in the constructor so the assignment hasn't happened yet
&& !Block.Processed && !Block.Raw);
if (bWasCancelled)
{
Block.RawSize = 0;
}
else
{
Block.Raw = Request->GetReadResults();
check(Block.Raw);
Block.ProcessedSize = FileEntry->CompressionBlockSize;
if (BlockIndex == Blocks.Num() - 1)
{
Block.ProcessedSize = FileEntry->UncompressedSize % FileEntry->CompressionBlockSize;
if (!Block.ProcessedSize)
{
Block.ProcessedSize = FileEntry->CompressionBlockSize; // last block was a full block
}
}
check(Block.ProcessedSize);
Block.CPUWorkGraphEvent = TGraphTask<FAsyncIOCPUWorkTask>::CreateTask().ConstructAndDispatchWhenReady(*this, BlockIndex);
}
}
void DoProcessing(int32 BlockIndex)
{
check(BlockIndex >= 0 && BlockIndex < Blocks.Num());
FCachedAsyncBlock& Block = Blocks[BlockIndex];
check(Block.Raw && Block.RawSize && !Block.Processed);
check(Block.ProcessedSize);
INC_MEMORY_STAT_BY(STAT_AsyncFileMemory, Block.ProcessedSize);
uint8* Output = (uint8*)FMemory::Malloc(Block.ProcessedSize);
FCompression::UncompressMemory((ECompressionFlags)FileEntry->CompressionMethod, Output, Block.ProcessedSize, Block.Raw, Block.RawSize, false, FPlatformMisc::GetPlatformCompression()->GetCompressionBitWindow());
FMemory::Free(Block.Raw);
Block.Raw = nullptr;
DEC_MEMORY_STAT_BY(STAT_AsyncFileMemory, Block.RawSize);
Block.RawSize = 0;
{
FScopeLock ScopedLock(&CriticalSection);
if (Block.RawRequest)
{
Block.RawRequest->WaitCompletion();
delete Block.RawRequest;
Block.RawRequest = nullptr;
NumLiveRawRequests--;
}
if (Block.RefCount > 0)
{
Block.Processed = Output;
for (FPakProcessedReadRequest* Req : LiveRequests)
{
if (Req->CheckCompletion(*FileEntry, BlockIndex, Blocks))
{
Req->RequestIsComplete();
}
}
}
else
{
// must have been canceled, clean up
FMemory::Free(Output);
Output = nullptr;
check(Block.ProcessedSize);
DEC_MEMORY_STAT_BY(STAT_AsyncFileMemory, Block.ProcessedSize);
Block.ProcessedSize = 0;
Block.CPUWorkGraphEvent = nullptr;
Block.bInFlight = false;
}
}
}
void ClearBlock(FCachedAsyncBlock& Block)
{
check(!Block.RawRequest);
Block.RawRequest = nullptr;
check(!Block.CPUWorkGraphEvent.GetReference() || Block.CPUWorkGraphEvent->IsComplete());
Block.CPUWorkGraphEvent = nullptr;
if (Block.Raw)
{
// this was a cancel, clean it up now
FMemory::Free(Block.Raw);
Block.Raw = nullptr;
check(Block.RawSize);
DEC_MEMORY_STAT_BY(STAT_AsyncFileMemory, Block.RawSize);
}
Block.RawSize = 0;
if (Block.Processed)
{
FMemory::Free(Block.Processed);
Block.Processed = nullptr;
check(Block.ProcessedSize);
DEC_MEMORY_STAT_BY(STAT_AsyncFileMemory, Block.ProcessedSize);
}
Block.ProcessedSize = 0;
Block.bInFlight = false;
}
void RemoveRequest(FPakProcessedReadRequest* Req, int64 Offset, int64 BytesToRead)
{
FScopeLock ScopedLock(&CriticalSection);
check(LiveRequests.Contains(Req));
LiveRequests.Remove(Req);
int32 FirstBlock = Offset / FileEntry->CompressionBlockSize;
int32 LastBlock = (Offset + BytesToRead - 1) / FileEntry->CompressionBlockSize;
check(FirstBlock >= 0 && FirstBlock < Blocks.Num() && LastBlock >= 0 && LastBlock < Blocks.Num() && FirstBlock <= LastBlock);
for (int32 BlockIndex = FirstBlock; BlockIndex <= LastBlock; BlockIndex++)
{
FCachedAsyncBlock& Block = Blocks[BlockIndex];
check(Block.RefCount > 0);
if (!--Block.RefCount)
{
if (Block.RawRequest)
{
Block.RawRequest->Cancel();
Block.RawRequest->WaitCompletion();
delete Block.RawRequest;
Block.RawRequest = nullptr;
NumLiveRawRequests--;
}
if (!Block.CPUWorkGraphEvent.GetReference() || Block.CPUWorkGraphEvent->IsComplete())
{
ClearBlock(Block);
}
}
}
}
void GatherResults(uint8* Memory, int64 Offset, int64 BytesToRead)
{
// no lock here, I don't think it is needed because we have a ref count.
int32 FirstBlock = Offset / FileEntry->CompressionBlockSize;
int32 LastBlock = (Offset + BytesToRead - 1) / FileEntry->CompressionBlockSize;
check(FirstBlock >= 0 && FirstBlock < Blocks.Num() && LastBlock >= 0 && LastBlock < Blocks.Num() && FirstBlock <= LastBlock);
for (int32 BlockIndex = FirstBlock; BlockIndex <= LastBlock; BlockIndex++)
{
FCachedAsyncBlock& Block = Blocks[BlockIndex];
check(Block.RefCount > 0 && Block.Processed && Block.ProcessedSize);
int64 BlockStart = int64(BlockIndex) * int64(FileEntry->CompressionBlockSize);
int64 BlockEnd = BlockStart + Block.ProcessedSize;
int64 SrcOffset = 0;
int64 DestOffset = BlockStart - Offset;
if (DestOffset < 0)
{
SrcOffset -= DestOffset;
DestOffset = 0;
}
int64 CopySize = Block.ProcessedSize;
if (DestOffset + CopySize > BytesToRead)
{
CopySize = BytesToRead - DestOffset;
}
if (SrcOffset + CopySize > Block.ProcessedSize)
{
CopySize = Block.ProcessedSize - SrcOffset;
}
check(CopySize > 0 && DestOffset >= 0 && DestOffset + CopySize <= BytesToRead);
check(SrcOffset >= 0 && SrcOffset + CopySize <= Block.ProcessedSize);
FMemory::Memcpy(Memory + DestOffset, Block.Processed + SrcOffset, CopySize);
check(Block.RefCount > 0);
}
}
};
void FPakProcessedReadRequest::CancelRawRequests()
{
if (CompleteRace.Increment() == 1)
{
Owner->RemoveRequest(this, Offset, BytesToRead);
bHasCancelled = true;
}
}
void FPakProcessedReadRequest::GatherResults()
{
if (CompleteRace.Increment() == 1)
{
if (!bUserSuppliedMemory)
{
check(!Memory);
Memory = (uint8*)FMemory::Malloc(BytesToRead);
INC_MEMORY_STAT_BY(STAT_AsyncFileMemory, BytesToRead);
}
check(Memory);
Owner->GatherResults(Memory, Offset, BytesToRead);
}
}
void FPakProcessedReadRequest::DoneWithRawRequests()
{
if (!bHasCancelled)
{
Owner->RemoveRequest(this, Offset, BytesToRead);
}
}
bool FPakProcessedReadRequest::CheckCompletion(const FPakEntry& FileEntry, int32 BlockIndex, TArray<FCachedAsyncBlock>& Blocks)
{
if (!bRequestOutstanding || bHasCompleted)
{
return false;
}
{
int64 BlockStart = int64(BlockIndex) * int64(FileEntry.CompressionBlockSize);
int64 BlockEnd = int64(BlockIndex + 1) * int64(FileEntry.CompressionBlockSize);
if (Offset >= BlockEnd || Offset + BytesToRead <= BlockStart)
{
return false;
}
}
int32 FirstBlock = Offset / FileEntry.CompressionBlockSize;
int32 LastBlock = (Offset + BytesToRead - 1) / FileEntry.CompressionBlockSize;
check(FirstBlock >= 0 && FirstBlock < Blocks.Num() && LastBlock >= 0 && LastBlock < Blocks.Num() && FirstBlock <= LastBlock);
for (int32 MyBlockIndex = FirstBlock; MyBlockIndex <= LastBlock; MyBlockIndex++)
{
FCachedAsyncBlock& Block = Blocks[MyBlockIndex];
if (!Block.Processed)
{
return false;
}
}
bHasCompleted = true;
return true;
}
void FAsyncIOCPUWorkTask::DoTask(ENamedThreads::Type CurrentThread, const FGraphEventRef& MyCompletionGraphEvent)
{
Owner.DoProcessing(BlockIndex);
}
#endif
#if USE_NEW_ASYNC_IO
IAsyncReadFileHandle* FPakPlatformFile::OpenAsyncRead(const TCHAR* Filename)
{
#if USE_PAK_PRECACHE
if (FPlatformProcess::SupportsMultithreading() && GPakCache_Enable > 0)
{
FPakFile* PakFile = NULL;
const FPakEntry* FileEntry = FindFileInPakFiles(Filename, &PakFile);
if (FileEntry && PakFile && PakFile->GetFilenameName() != NAME_None)
{
return new FPakAsyncReadFileHandle(FileEntry, PakFile, Filename);
}
}
#endif
return IPlatformFile::OpenAsyncRead(Filename);
}
#endif // USE_NEW_ASYNC_IO
/**
* Class to handle correctly reading from a compressed file within a compressed package
*/
class FPakSimpleEncryption
{
public:
enum
{
Alignment = FAES::AESBlockSize,
};
static FORCEINLINE int64 AlignReadRequest(int64 Size)
{
return Align(Size, Alignment);
}
static FORCEINLINE void DecryptBlock(void* Data, int64 Size)
{
#ifdef AES_KEY
FAES::DecryptData((uint8*)Data, Size);
#endif
}
};
/**
* Thread local class to manage working buffers for file compression
*/
class FCompressionScratchBuffers : public TThreadSingleton<FCompressionScratchBuffers>
{
public:
FCompressionScratchBuffers()
: TempBufferSize(0)
, ScratchBufferSize(0)
{}
int64 TempBufferSize;
TAutoPtr<uint8> TempBuffer;
int64 ScratchBufferSize;
TAutoPtr<uint8> ScratchBuffer;
void EnsureBufferSpace(int64 CompressionBlockSize, int64 ScrachSize)
{
if(TempBufferSize < CompressionBlockSize)
{
TempBufferSize = CompressionBlockSize;
TempBuffer.Reset((uint8*)FMemory::Malloc(TempBufferSize));
}
if(ScratchBufferSize < ScrachSize)
{
ScratchBufferSize = ScrachSize;
ScratchBuffer.Reset((uint8*)FMemory::Malloc(ScratchBufferSize));
}
}
};
/**
* Class to handle correctly reading from a compressed file within a pak
*/
template< typename EncryptionPolicy = FPakNoEncryption >
class FPakCompressedReaderPolicy
{
public:
class FPakUncompressTask : public FNonAbandonableTask
{
public:
uint8* UncompressedBuffer;
int32 UncompressedSize;
uint8* CompressedBuffer;
int32 CompressedSize;
ECompressionFlags Flags;
void* CopyOut;
int64 CopyOffset;
int64 CopyLength;
void DoWork()
{
// Decrypt and Uncompress from memory to memory.
int64 EncryptionSize = EncryptionPolicy::AlignReadRequest(CompressedSize);
EncryptionPolicy::DecryptBlock(CompressedBuffer, EncryptionSize);
FCompression::UncompressMemory(Flags, UncompressedBuffer, UncompressedSize, CompressedBuffer, CompressedSize, false, FPlatformMisc::GetPlatformCompression()->GetCompressionBitWindow());
if (CopyOut)
{
FMemory::Memcpy(CopyOut, UncompressedBuffer+CopyOffset, CopyLength);
}
}
FORCEINLINE TStatId GetStatId() const
{
// TODO: This is called too early in engine startup.
return TStatId();
//RETURN_QUICK_DECLARE_CYCLE_STAT(FPakUncompressTask, STATGROUP_ThreadPoolAsyncTasks);
}
};
FPakCompressedReaderPolicy(const FPakFile& InPakFile, const FPakEntry& InPakEntry, FArchive* InPakReader)
: PakFile(InPakFile)
, PakEntry(InPakEntry)
, PakReader(InPakReader)
{
}
/** Pak file that own this file data */
const FPakFile& PakFile;
/** Pak file entry for this file. */
const FPakEntry& PakEntry;
/** Pak file archive to read the data from. */
FArchive* PakReader;
FORCEINLINE int64 FileSize() const
{
return PakEntry.UncompressedSize;
}
void Serialize(int64 DesiredPosition, void* V, int64 Length)
{
const int32 CompressionBlockSize = PakEntry.CompressionBlockSize;
uint32 CompressionBlockIndex = DesiredPosition / CompressionBlockSize;
uint8* WorkingBuffers[2];
int64 DirectCopyStart = DesiredPosition % PakEntry.CompressionBlockSize;
FAsyncTask<FPakUncompressTask> UncompressTask;
FCompressionScratchBuffers& ScratchSpace = FCompressionScratchBuffers::Get();
bool bStartedUncompress = false;
int64 WorkingBufferRequiredSize = FCompression::CompressMemoryBound((ECompressionFlags)PakEntry.CompressionMethod,CompressionBlockSize, FPlatformMisc::GetPlatformCompression()->GetCompressionBitWindow());
WorkingBufferRequiredSize = EncryptionPolicy::AlignReadRequest(WorkingBufferRequiredSize);
ScratchSpace.EnsureBufferSpace(CompressionBlockSize, WorkingBufferRequiredSize*2);
WorkingBuffers[0] = ScratchSpace.ScratchBuffer;
WorkingBuffers[1] = ScratchSpace.ScratchBuffer + WorkingBufferRequiredSize;
while (Length > 0)
{
const FPakCompressedBlock& Block = PakEntry.CompressionBlocks[CompressionBlockIndex];
int64 Pos = CompressionBlockIndex * CompressionBlockSize;
int64 CompressedBlockSize = Block.CompressedEnd-Block.CompressedStart;
int64 UncompressedBlockSize = FMath::Min<int64>(PakEntry.UncompressedSize-Pos, PakEntry.CompressionBlockSize);
int64 ReadSize = EncryptionPolicy::AlignReadRequest(CompressedBlockSize);
int64 WriteSize = FMath::Min<int64>(UncompressedBlockSize - DirectCopyStart, Length);
PakReader->Seek(Block.CompressedStart);
PakReader->Serialize(WorkingBuffers[CompressionBlockIndex & 1],ReadSize);
if (bStartedUncompress)
{
UncompressTask.EnsureCompletion();
bStartedUncompress = false;
}
FPakUncompressTask& TaskDetails = UncompressTask.GetTask();
if (DirectCopyStart == 0 && Length >= CompressionBlockSize)
{
// Block can be decompressed directly into output buffer
TaskDetails.Flags = (ECompressionFlags)PakEntry.CompressionMethod;
TaskDetails.UncompressedBuffer = (uint8*)V;
TaskDetails.UncompressedSize = UncompressedBlockSize;
TaskDetails.CompressedBuffer = WorkingBuffers[CompressionBlockIndex & 1];
TaskDetails.CompressedSize = CompressedBlockSize;
TaskDetails.CopyOut = nullptr;
}
else
{
// Block needs to be copied from a working buffer
TaskDetails.Flags = (ECompressionFlags)PakEntry.CompressionMethod;
TaskDetails.UncompressedBuffer = (uint8*)ScratchSpace.TempBuffer;
TaskDetails.UncompressedSize = UncompressedBlockSize;
TaskDetails.CompressedBuffer = WorkingBuffers[CompressionBlockIndex & 1];
TaskDetails.CompressedSize = CompressedBlockSize;
TaskDetails.CopyOut = V;
TaskDetails.CopyOffset = DirectCopyStart;
TaskDetails.CopyLength = WriteSize;
}
if (Length == WriteSize)
{
UncompressTask.StartSynchronousTask();
}
else
{
UncompressTask.StartBackgroundTask();
}
bStartedUncompress = true;
V = (void*)((uint8*)V + WriteSize);
Length -= WriteSize;
DirectCopyStart = 0;
++CompressionBlockIndex;
}
if(bStartedUncompress)
{
UncompressTask.EnsureCompletion();
}
}
};
bool FPakEntry::VerifyPakEntriesMatch(const FPakEntry& FileEntryA, const FPakEntry& FileEntryB)
{
bool bResult = true;
if (FileEntryA.Size != FileEntryB.Size)
{
UE_LOG(LogPakFile, Error, TEXT("Pak header file size mismatch, got: %lld, expected: %lld"), FileEntryB.Size, FileEntryA.Size);
bResult = false;
}
if (FileEntryA.UncompressedSize != FileEntryB.UncompressedSize)
{
UE_LOG(LogPakFile, Error, TEXT("Pak header uncompressed file size mismatch, got: %lld, expected: %lld"), FileEntryB.UncompressedSize, FileEntryA.UncompressedSize);
bResult = false;
}
if (FileEntryA.CompressionMethod != FileEntryB.CompressionMethod)
{
UE_LOG(LogPakFile, Error, TEXT("Pak header file compression method mismatch, got: %d, expected: %d"), FileEntryB.CompressionMethod, FileEntryA.CompressionMethod);
bResult = false;
}
if (FMemory::Memcmp(FileEntryA.Hash, FileEntryB.Hash, sizeof(FileEntryA.Hash)) != 0)
{
UE_LOG(LogPakFile, Error, TEXT("Pak file hash does not match its index entry"));
bResult = false;
}
return bResult;
}
bool FPakPlatformFile::IsNonPakFilenameAllowed(const FString& InFilename)
{
bool bAllowed = true;
#if EXCLUDE_NONPAK_UE_EXTENSIONS
FName Ext = FName(*FPaths::GetExtension(InFilename));
bAllowed = !ExcludedNonPakExtensions.Contains(Ext);
#endif
FFilenameSecurityDelegate& FilenameSecurityDelegate = GetFilenameSecurityDelegate();
if (bAllowed)
{
if (FilenameSecurityDelegate.IsBound())
{
bAllowed = FilenameSecurityDelegate.Execute(*InFilename);;
}
}
return bAllowed;
}
#if IS_PROGRAM
FPakFile::FPakFile(const TCHAR* Filename, bool bIsSigned)
: PakFilename(Filename)
, PakFilenameName(Filename)
, CachedTotalSize(0)
, bSigned(bIsSigned)
, bIsValid(false)
{
FArchive* Reader = GetSharedReader(NULL);
if (Reader)
{
Timestamp = IFileManager::Get().GetTimeStamp(Filename);
Initialize(Reader);
}
}
#endif
FPakFile::FPakFile(IPlatformFile* LowerLevel, const TCHAR* Filename, bool bIsSigned)
: PakFilename(Filename)
, PakFilenameName(Filename)
, CachedTotalSize(0)
, bSigned(bIsSigned)
, bIsValid(false)
{
FArchive* Reader = GetSharedReader(LowerLevel);
if (Reader)
{
Timestamp = LowerLevel->GetTimeStamp(Filename);
Initialize(Reader);
}
}
#if WITH_EDITOR
FPakFile::FPakFile(FArchive* Archive)
: bSigned(false)
, bIsValid(false)
{
Initialize(Archive);
}
#endif
FPakFile::~FPakFile()
{
}
FArchive* FPakFile::CreatePakReader(const TCHAR* Filename)
{
FArchive* ReaderArchive = IFileManager::Get().CreateFileReader(Filename);
return SetupSignedPakReader(ReaderArchive, Filename);
}
FArchive* FPakFile::CreatePakReader(IFileHandle& InHandle, const TCHAR* Filename)
{
FArchive* ReaderArchive = new FArchiveFileReaderGeneric(&InHandle, Filename, InHandle.Size());
return SetupSignedPakReader(ReaderArchive, Filename);
}
FArchive* FPakFile::SetupSignedPakReader(FArchive* ReaderArchive, const TCHAR* Filename)
{
if (FPlatformProperties::RequiresCookedData())
{
#if !USING_SIGNED_CONTENT
if (bSigned || FParse::Param(FCommandLine::Get(), TEXT("signedpak")) || FParse::Param(FCommandLine::Get(), TEXT("signed")))
#endif
{
if (!Decryptor.IsValid())
{
Decryptor = new FChunkCacheWorker(ReaderArchive, Filename);
}
ReaderArchive = new FSignedArchiveReader(ReaderArchive, Decryptor);
}
}
return ReaderArchive;
}
void FPakFile::Initialize(FArchive* Reader)
{
CachedTotalSize = Reader->TotalSize();
if (CachedTotalSize < Info.GetSerializedSize())
{
UE_LOG(LogPakFile, Fatal, TEXT("Corrupted pak file '%s' (too short). Verify your installation."), *PakFilename);
}
else
{
// Serialize trailer and check if everything is as expected.
Reader->Seek(CachedTotalSize - Info.GetSerializedSize());
Info.Serialize(*Reader);
UE_CLOG(Info.Magic != FPakInfo::PakFile_Magic, LogPakFile, Fatal, TEXT("Trailing magic number (%ud) in '%s' is different than the expected one. Verify your installation."), Info.Magic, *PakFilename);
UE_CLOG(!(Info.Version >= FPakInfo::PakFile_Version_Initial && Info.Version <= FPakInfo::PakFile_Version_Latest), LogPakFile, Fatal, TEXT("Invalid pak file version (%d) in '%s'. Verify your installation."), Info.Version, *PakFilename);
LoadIndex(Reader);
// LoadIndex should crash in case of an error, so just assume everything is ok if we got here.
bIsValid = true;
if (FParse::Param(FCommandLine::Get(), TEXT("checkpak")))
{
ensure(Check());
}
}
}
void FPakFile::LoadIndex(FArchive* Reader)
{
if (CachedTotalSize < (Info.IndexOffset + Info.IndexSize))
{
UE_LOG(LogPakFile, Fatal, TEXT("Corrupted index offset in pak file."));
}
else
{
// Load index into memory first.
Reader->Seek(Info.IndexOffset);
TArray<uint8> IndexData;
IndexData.AddUninitialized(Info.IndexSize);
Reader->Serialize(IndexData.GetData(), Info.IndexSize);
FMemoryReader IndexReader(IndexData);
// Check SHA1 value.
uint8 IndexHash[20];
FSHA1::HashBuffer(IndexData.GetData(), IndexData.Num(), IndexHash);
if (FMemory::Memcmp(IndexHash, Info.IndexHash, sizeof(IndexHash)) != 0)
{
UE_LOG(LogPakFile, Fatal, TEXT("Corrupted index in pak file (CRC mismatch)."));
}
// Read the default mount point and all entries.
int32 NumEntries = 0;
IndexReader << MountPoint;
IndexReader << NumEntries;
MakeDirectoryFromPath(MountPoint);
// Allocate enough memory to hold all entries (and not reallocate while they're being added to it).
Files.Empty(NumEntries);
for (int32 EntryIndex = 0; EntryIndex < NumEntries; EntryIndex++)
{
// Serialize from memory.
FPakEntry Entry;
FString Filename;
IndexReader << Filename;
Entry.Serialize(IndexReader, Info.Version);
// Add new file info.
Files.Add(Entry);
// Construct Index of all directories in pak file.
FString Path = FPaths::GetPath(Filename);
MakeDirectoryFromPath(Path);
FPakDirectory* Directory = Index.Find(Path);
if (Directory != NULL)
{
Directory->Add(Filename, &Files.Last());
}
else
{
FPakDirectory NewDirectory;
NewDirectory.Add(Filename, &Files.Last());
Index.Add(Path, NewDirectory);
// add the parent directories up to the mount point
while (MountPoint != Path)
{
Path = Path.Left(Path.Len()-1);
int32 Offset = 0;
if (Path.FindLastChar('/', Offset))
{
Path = Path.Left(Offset);
MakeDirectoryFromPath(Path);
if (Index.Find(Path) == NULL)
{
FPakDirectory ParentDirectory;
Index.Add(Path, ParentDirectory);
}
}
else
{
Path = MountPoint;
}
}
}
}
}
}
bool FPakFile::Check()
{
UE_LOG(LogPakFile, Display, TEXT("Checking pak file \"%s\". This may take a while..."), *PakFilename);
FArchive& PakReader = *GetSharedReader(NULL);
int32 ErrorCount = 0;
int32 FileCount = 0;
for (FPakFile::FFileIterator It(*this); It; ++It, ++FileCount)
{
const FPakEntry& Entry = It.Info();
void* FileContents = FMemory::Malloc(Entry.Size);
PakReader.Seek(Entry.Offset);
uint32 SerializedCrcTest = 0;
FPakEntry EntryInfo;
EntryInfo.Serialize(PakReader, GetInfo().Version);
if (EntryInfo != Entry)
{
UE_LOG(LogPakFile, Error, TEXT("Serialized hash mismatch for \"%s\"."), *It.Filename());
ErrorCount++;
}
PakReader.Serialize(FileContents, Entry.Size);
uint8 TestHash[20];
FSHA1::HashBuffer(FileContents, Entry.Size, TestHash);
if (FMemory::Memcmp(TestHash, Entry.Hash, sizeof(TestHash)) != 0)
{
UE_LOG(LogPakFile, Error, TEXT("Hash mismatch for \"%s\"."), *It.Filename());
ErrorCount++;
}
else
{
UE_LOG(LogPakFile, Display, TEXT("\"%s\" OK."), *It.Filename());
}
FMemory::Free(FileContents);
}
if (ErrorCount == 0)
{
UE_LOG(LogPakFile, Display, TEXT("Pak file \"%s\" healthy, %d files checked."), *PakFilename, FileCount);
}
else
{
UE_LOG(LogPakFile, Display, TEXT("Pak file \"%s\" corrupted (%d errors ouf of %d files checked.)."), *PakFilename, ErrorCount, FileCount);
}
return ErrorCount == 0;
}
FArchive* FPakFile::GetSharedReader(IPlatformFile* LowerLevel)
{
uint32 Thread = FPlatformTLS::GetCurrentThreadId();
FArchive* PakReader = NULL;
{
FScopeLock ScopedLock(&CriticalSection);
TAutoPtr<FArchive>* ExistingReader = ReaderMap.Find(Thread);
if (ExistingReader)
{
PakReader = *ExistingReader;
}
}
if (!PakReader)
{
// Create a new FArchive reader and pass it to the new handle.
if (LowerLevel != NULL)
{
IFileHandle* PakHandle = LowerLevel->OpenRead(*GetFilename());
if (PakHandle)
{
PakReader = CreatePakReader(*PakHandle, *GetFilename());
}
}
else
{
PakReader = CreatePakReader(*GetFilename());
}
if (!PakReader)
{
UE_LOG(LogPakFile, Fatal, TEXT("Unable to create pak \"%s\" handle"), *GetFilename());
}
{
FScopeLock ScopedLock(&CriticalSection);
ReaderMap.Emplace(Thread, PakReader);
}
}
return PakReader;
}
#if !UE_BUILD_SHIPPING
class FPakExec : private FSelfRegisteringExec
{
FPakPlatformFile& PlatformFile;
public:
FPakExec(FPakPlatformFile& InPlatformFile)
: PlatformFile(InPlatformFile)
{}
/** Console commands **/
virtual bool Exec( UWorld* InWorld, const TCHAR* Cmd, FOutputDevice& Ar ) override
{
if (FParse::Command(&Cmd, TEXT("Mount")))
{
PlatformFile.HandleMountCommand(Cmd, Ar);
return true;
}
if (FParse::Command(&Cmd, TEXT("Unmount")))
{
PlatformFile.HandleUnmountCommand(Cmd, Ar);
return true;
}
else if (FParse::Command(&Cmd, TEXT("PakList")))
{
PlatformFile.HandlePakListCommand(Cmd, Ar);
return true;
}
return false;
}
};
static TAutoPtr<FPakExec> GPakExec;
void FPakPlatformFile::HandleMountCommand(const TCHAR* Cmd, FOutputDevice& Ar)
{
const FString PakFilename = FParse::Token(Cmd, false);
if (!PakFilename.IsEmpty())
{
const FString MountPoint = FParse::Token(Cmd, false);
Mount(*PakFilename, 0, MountPoint.IsEmpty() ? NULL : *MountPoint);
}
}
void FPakPlatformFile::HandleUnmountCommand(const TCHAR* Cmd, FOutputDevice& Ar)
{
const FString PakFilename = FParse::Token(Cmd, false);
if (!PakFilename.IsEmpty())
{
Unmount(*PakFilename);
}
}
void FPakPlatformFile::HandlePakListCommand(const TCHAR* Cmd, FOutputDevice& Ar)
{
TArray<FPakListEntry> Paks;
GetMountedPaks(Paks);
for (auto Pak : Paks)
{
Ar.Logf(TEXT("%s Mounted to %s"), *Pak.PakFile->GetFilename(), *Pak.PakFile->GetMountPoint());
}
}
#endif // !UE_BUILD_SHIPPING
FPakPlatformFile::FPakPlatformFile()
: LowerLevel(NULL)
, bSigned(false)
{
}
FPakPlatformFile::~FPakPlatformFile()
{
FCoreDelegates::OnMountPak.Unbind();
FCoreDelegates::OnUnmountPak.Unbind();
#if USE_PAK_PRECACHE
FPakPrecacher::Shutdown();
#endif
#if !USE_NEW_ASYNC_IO
// We need to flush async IO... if it hasn't been shut down already.
if (FIOSystem::HasShutdown() == false)
{
FIOSystem& IOSystem = FIOSystem::Get();
IOSystem.BlockTillAllRequestsFinishedAndFlushHandles();
}
#endif
{
FScopeLock ScopedLock(&PakListCritical);
for (int32 PakFileIndex = 0; PakFileIndex < PakFiles.Num(); PakFileIndex++)
{
delete PakFiles[PakFileIndex].PakFile;
PakFiles[PakFileIndex].PakFile = nullptr;
}
}
}
void FPakPlatformFile::FindPakFilesInDirectory(IPlatformFile* LowLevelFile, const TCHAR* Directory, TArray<FString>& OutPakFiles)
{
// Helper class to find all pak files.
class FPakSearchVisitor : public IPlatformFile::FDirectoryVisitor
{
TArray<FString>& FoundPakFiles;
IPlatformChunkInstall* ChunkInstall;
public:
FPakSearchVisitor(TArray<FString>& InFoundPakFiles, IPlatformChunkInstall* InChunkInstall)
: FoundPakFiles(InFoundPakFiles)
, ChunkInstall(InChunkInstall)
{}
virtual bool Visit(const TCHAR* FilenameOrDirectory, bool bIsDirectory)
{
if (bIsDirectory == false)
{
FString Filename(FilenameOrDirectory);
if (FPaths::GetExtension(Filename) == TEXT("pak"))
{
// if a platform supports chunk style installs, make sure that the chunk a pak file resides in is actually fully installed before accepting pak files from it
if (ChunkInstall)
{
FString ChunkIdentifier(TEXT("pakchunk"));
FString BaseFilename = FPaths::GetBaseFilename(Filename);
if (BaseFilename.StartsWith(ChunkIdentifier))
{
int32 DelimiterIndex = 0;
int32 StartOfChunkIndex = ChunkIdentifier.Len();
BaseFilename.FindChar(TEXT('-'), DelimiterIndex);
FString ChunkNumberString = BaseFilename.Mid(StartOfChunkIndex, DelimiterIndex-StartOfChunkIndex);
int32 ChunkNumber = 0;
TTypeFromString<int32>::FromString(ChunkNumber, *ChunkNumberString);
if (ChunkInstall->GetChunkLocation(ChunkNumber) == EChunkLocation::NotAvailable)
{
return true;
}
}
}
FoundPakFiles.Add(Filename);
}
}
return true;
}
};
// Find all pak files.
FPakSearchVisitor Visitor(OutPakFiles, FPlatformMisc::GetPlatformChunkInstall());
LowLevelFile->IterateDirectoryRecursively(Directory, Visitor);
}
void FPakPlatformFile::FindAllPakFiles(IPlatformFile* LowLevelFile, const TArray<FString>& PakFolders, TArray<FString>& OutPakFiles)
{
// Find pak files from the specified directories.
for (int32 FolderIndex = 0; FolderIndex < PakFolders.Num(); ++FolderIndex)
{
FindPakFilesInDirectory(LowLevelFile, *PakFolders[FolderIndex], OutPakFiles);
}
}
void FPakPlatformFile::GetPakFolders(const TCHAR* CmdLine, TArray<FString>& OutPakFolders)
{
#if !UE_BUILD_SHIPPING
// Command line folders
FString PakDirs;
if (FParse::Value(CmdLine, TEXT("-pakdir="), PakDirs))
{
TArray<FString> CmdLineFolders;
PakDirs.ParseIntoArray(CmdLineFolders, TEXT("*"), true);
OutPakFolders.Append(CmdLineFolders);
}
#endif
// @todo plugin urgent: Needs to handle plugin Pak directories, too
// Hardcoded locations
OutPakFolders.Add(FString::Printf(TEXT("%sPaks/"), *FPaths::GameContentDir()));
OutPakFolders.Add(FString::Printf(TEXT("%sPaks/"), *FPaths::GameSavedDir()));
OutPakFolders.Add(FString::Printf(TEXT("%sPaks/"), *FPaths::EngineContentDir()));
}
bool FPakPlatformFile::CheckIfPakFilesExist(IPlatformFile* LowLevelFile, const TArray<FString>& PakFolders)
{
TArray<FString> FoundPakFiles;
FindAllPakFiles(LowLevelFile, PakFolders, FoundPakFiles);
return FoundPakFiles.Num() > 0;
}
bool FPakPlatformFile::ShouldBeUsed(IPlatformFile* Inner, const TCHAR* CmdLine) const
{
#if !USING_SIGNED_CONTENT
bool Result = FParse::Param(CmdLine, TEXT("Pak")) || FParse::Param(CmdLine, TEXT("Signedpak")) || FParse::Param(CmdLine, TEXT("Signed"));
if (FPlatformProperties::RequiresCookedData() && !Result && !FParse::Param(CmdLine, TEXT("NoPak")))
{
TArray<FString> PakFolders;
GetPakFolders(CmdLine, PakFolders);
Result = CheckIfPakFilesExist(Inner, PakFolders);
}
return Result;
#else
return true;
#endif
}
bool FPakPlatformFile::Initialize(IPlatformFile* Inner, const TCHAR* CmdLine)
{
// Inner is required.
check(Inner != NULL);
LowerLevel = Inner;
#if EXCLUDE_NONPAK_UE_EXTENSIONS
// Extensions for file types that should only ever be in a pak file. Used to stop unnecessary access to the lower level platform file
ExcludedNonPakExtensions.Add(TEXT("uasset"));
ExcludedNonPakExtensions.Add(TEXT("umap"));
ExcludedNonPakExtensions.Add(TEXT("ubulk"));
ExcludedNonPakExtensions.Add(TEXT("uexp"));
#endif
#if !USING_SIGNED_CONTENT
bSigned = FParse::Param(CmdLine, TEXT("Signedpak")) || FParse::Param(CmdLine, TEXT("Signed"));
if (!bSigned)
{
// Even if -signed is not provided in the command line, use signed reader if the hardcoded key is non-zero.
FEncryptionKey DecryptionKey;
DecryptionKey.Exponent.Parse(DECRYPTION_KEY_EXPONENT);
DecryptionKey.Modulus.Parse(DECRYPTION_KEY_MODULUS);
bSigned = !DecryptionKey.Exponent.IsZero() && !DecryptionKey.Modulus.IsZero();
}
#else
bSigned = true;
#endif
bool bMountPaks = true;
TArray<FString> PaksToLoad;
#if !UE_BUILD_SHIPPING
// Optionally get a list of pak filenames to load, only these paks will be mounted
FString CmdLinePaksToLoad;
if (FParse::Value(CmdLine, TEXT("-paklist="), CmdLinePaksToLoad))
{
CmdLinePaksToLoad.ParseIntoArray(PaksToLoad, TEXT("+"), true);
}
//if we are using a fileserver, then dont' mount paks automatically. We only want to read files from the server.
FString FileHostIP;
const bool bCookOnTheFly = FParse::Value(FCommandLine::Get(), TEXT("filehostip"), FileHostIP);
bMountPaks = !bCookOnTheFly;
#endif
if (bMountPaks)
{
// Find and mount pak files from the specified directories.
TArray<FString> PakFolders;
GetPakFolders(CmdLine, PakFolders);
TArray<FString> FoundPakFiles;
FindAllPakFiles(LowerLevel, PakFolders, FoundPakFiles);
// Sort in descending order.
FoundPakFiles.Sort(TGreater<FString>());
// Mount all found pak files
for (int32 PakFileIndex = 0; PakFileIndex < FoundPakFiles.Num(); PakFileIndex++)
{
const FString& PakFilename = FoundPakFiles[PakFileIndex];
bool bLoadPak = true;
if (PaksToLoad.Num() && !PaksToLoad.Contains(FPaths::GetBaseFilename(PakFilename)))
{
bLoadPak = false;
}
if (bLoadPak)
{
// hardcode default load ordering of game main pak -> game content -> engine content -> saved dir
// would be better to make this config but not even the config system is initialized here so we can't do that
uint32 PakOrder = 0;
if (PakFilename.StartsWith(FString::Printf(TEXT("%sPaks/%s-"), *FPaths::GameContentDir(), FApp::GetGameName())))
{
PakOrder = 4;
}
else if (PakFilename.StartsWith(FPaths::GameContentDir()))
{
PakOrder = 3;
}
else if (PakFilename.StartsWith(FPaths::EngineContentDir()))
{
PakOrder = 2;
}
else if (PakFilename.StartsWith(FPaths::GameSavedDir()))
{
PakOrder = 1;
}
Mount(*PakFilename, PakOrder);
}
}
}
#if !UE_BUILD_SHIPPING
GPakExec = new FPakExec(*this);
#endif // !UE_BUILD_SHIPPING
FCoreDelegates::OnMountPak.BindRaw(this, &FPakPlatformFile::HandleMountPakDelegate);
FCoreDelegates::OnUnmountPak.BindRaw(this, &FPakPlatformFile::HandleUnmountPakDelegate);
#if USE_PAK_PRECACHE
if (!WITH_EDITOR && FPlatformProcess::SupportsMultithreading())
{
FPakPrecacher::Init(LowerLevel);
}
else
{
GPakCache_Enable = 0;
}
#endif
return !!LowerLevel;
}
bool FPakPlatformFile::Mount(const TCHAR* InPakFilename, uint32 PakOrder, const TCHAR* InPath /*= NULL*/)
{
bool bSuccess = false;
TSharedPtr<IFileHandle> PakHandle = MakeShareable(LowerLevel->OpenRead(InPakFilename));
if (PakHandle.IsValid())
{
FPakFile* Pak = new FPakFile(LowerLevel, InPakFilename, bSigned);
if (Pak->IsValid())
{
if (InPath != NULL)
{
Pak->SetMountPoint(InPath);
}
FString PakFilename = InPakFilename;
if ( PakFilename.EndsWith(TEXT("_P.pak")) )
{
PakOrder += 100;
}
{
// Add new pak file
FScopeLock ScopedLock(&PakListCritical);
FPakListEntry Entry;
Entry.ReadOrder = PakOrder;
Entry.PakFile = Pak;
PakFiles.Add(Entry);
PakFiles.StableSort();
}
bSuccess = true;
}
else
{
UE_LOG(LogPakFile, Warning, TEXT("Failed to mount pak \"%s\", pak is invalid."), InPakFilename);
}
}
else
{
UE_LOG(LogPakFile, Warning, TEXT("Pak \"%s\" does not exist!"), InPakFilename);
}
return bSuccess;
}
bool FPakPlatformFile::Unmount(const TCHAR* InPakFilename)
{
#if USE_PAK_PRECACHE
if (GPakCache_Enable)
{
FPakPrecacher::Get().Unmount(InPakFilename);
}
#endif
{
FScopeLock ScopedLock(&PakListCritical);
for (int32 PakIndex = 0; PakIndex < PakFiles.Num(); PakIndex++)
{
if (PakFiles[PakIndex].PakFile->GetFilename() == InPakFilename)
{
delete PakFiles[PakIndex].PakFile;
PakFiles.RemoveAt(PakIndex);
return true;
}
}
}
return false;
}
IFileHandle* FPakPlatformFile::CreatePakFileHandle(const TCHAR* Filename, FPakFile* PakFile, const FPakEntry* FileEntry)
{
IFileHandle* Result = NULL;
bool bNeedsDelete = true;
FArchive* PakReader = PakFile->GetSharedReader(LowerLevel);
// Create the handle.
if (FileEntry->CompressionMethod != COMPRESS_None && PakFile->GetInfo().Version >= FPakInfo::PakFile_Version_CompressionEncryption)
{
if (FileEntry->bEncrypted)
{
Result = new FPakFileHandle< FPakCompressedReaderPolicy<FPakSimpleEncryption> >(*PakFile, *FileEntry, PakReader, bNeedsDelete);
}
else
{
Result = new FPakFileHandle< FPakCompressedReaderPolicy<> >(*PakFile, *FileEntry, PakReader, bNeedsDelete);
}
}
else if (FileEntry->bEncrypted)
{
Result = new FPakFileHandle< FPakReaderPolicy<FPakSimpleEncryption> >(*PakFile, *FileEntry, PakReader, bNeedsDelete);
}
else
{
Result = new FPakFileHandle<>(*PakFile, *FileEntry, PakReader, bNeedsDelete);
}
return Result;
}
bool FPakPlatformFile::HandleMountPakDelegate(const FString& PakFilePath, uint32 PakOrder, IPlatformFile::FDirectoryVisitor* Visitor)
{
bool bReturn = Mount(*PakFilePath, PakOrder);
if (bReturn && Visitor != nullptr)
{
TArray<FPakListEntry> Paks;
GetMountedPaks(Paks);
// Find the single pak we just mounted
for (auto Pak : Paks)
{
if (PakFilePath == Pak.PakFile->GetFilename())
{
// Get a list of all of the files in the pak
for (FPakFile::FFileIterator It(*Pak.PakFile); It; ++It)
{
Visitor->Visit(*It.Filename(), false);
}
return true;
}
}
}
return bReturn;
}
bool FPakPlatformFile::HandleUnmountPakDelegate(const FString& PakFilePath)
{
return Unmount(*PakFilePath);
}
IFileHandle* FPakPlatformFile::OpenRead(const TCHAR* Filename, bool bAllowWrite)
{
IFileHandle* Result = NULL;
FPakFile* PakFile = NULL;
const FPakEntry* FileEntry = FindFileInPakFiles(Filename, &PakFile);
if (FileEntry != NULL)
{
Result = CreatePakFileHandle(Filename, PakFile, FileEntry);
}
#if !USING_SIGNED_CONTENT
else
{
if (IsNonPakFilenameAllowed(Filename))
{
// Default to wrapped file
Result = LowerLevel->OpenRead(Filename, bAllowWrite);
}
}
#endif
return Result;
}
bool FPakPlatformFile::BufferedCopyFile(IFileHandle& Dest, IFileHandle& Source, const int64 FileSize, uint8* Buffer, const int64 BufferSize) const
{
int64 RemainingSizeToCopy = FileSize;
// Continue copying chunks using the buffer
while (RemainingSizeToCopy > 0)
{
const int64 SizeToCopy = FMath::Min(BufferSize, RemainingSizeToCopy);
if (Source.Read(Buffer, SizeToCopy) == false)
{
return false;
}
if (Dest.Write(Buffer, SizeToCopy) == false)
{
return false;
}
RemainingSizeToCopy -= SizeToCopy;
}
return true;
}
bool FPakPlatformFile::CopyFile(const TCHAR* To, const TCHAR* From, EPlatformFileRead ReadFlags, EPlatformFileWrite WriteFlags)
{
bool Result = false;
FPakFile* PakFile = NULL;
const FPakEntry* FileEntry = FindFileInPakFiles(From, &PakFile);
if (FileEntry != NULL)
{
// Copy from pak to LowerLevel->
// Create handles both files.
TAutoPtr<IFileHandle> DestHandle(LowerLevel->OpenWrite(To, false, (WriteFlags & EPlatformFileWrite::AllowRead) != EPlatformFileWrite::None));
TAutoPtr<IFileHandle> SourceHandle(CreatePakFileHandle(From, PakFile, FileEntry));
if (DestHandle.IsValid() && SourceHandle.IsValid())
{
const int64 BufferSize = 64 * 1024; // Copy in 64K chunks.
uint8* Buffer = (uint8*)FMemory::Malloc(BufferSize);
Result = BufferedCopyFile(*DestHandle, *SourceHandle, SourceHandle->Size(), Buffer, BufferSize);
FMemory::Free(Buffer);
}
}
else
{
Result = LowerLevel->CopyFile(To, From, ReadFlags, WriteFlags);
}
return Result;
}
uint32 ComputePakChunkHash(const uint8* InData, const int64 InDataSize)
{
return FCrc::MemCrc32(InData, InDataSize);
}
/**
* Module for the pak file
*/
class FPakFileModule : public IPlatformFileModule
{
public:
virtual IPlatformFile* GetPlatformFile() override
{
static TScopedPointer<IPlatformFile> AutoDestroySingleton(new FPakPlatformFile());
return AutoDestroySingleton.GetOwnedPointer();
}
};
IMPLEMENT_MODULE(FPakFileModule, PakFile);