Bug 1177278 - Large OOMs in CacheFileMetadata::WriteMetadata, r=honzab

This commit is contained in:
Michal Novotny 2015-07-22 15:23:16 +02:00
parent 1143947892
commit 6de49b5bc3
5 changed files with 85 additions and 21 deletions

View File

@ -32,6 +32,9 @@ namespace net {
// Initial elements buffer size.
#define kInitialBufSize 64
// Max size of elements in bytes.
#define kMaxElementsSize 64*1024
#define kCacheEntryVersion 1
#define NOW_SECONDS() (uint32_t(PR_Now() / PR_USEC_PER_SEC))
@ -236,6 +239,17 @@ CacheFileMetadata::ReadMetadata(CacheFileMetadataListener *aListener)
return NS_OK;
}
uint32_t
CacheFileMetadata::CalcMetadataSize(uint32_t aElementsSize, uint32_t aHashCount)
{
return sizeof(uint32_t) + // hash of the metadata
aHashCount * sizeof(CacheHash::Hash16_t) + // array of chunk hashes
sizeof(CacheFileMetadataHeader) + // metadata header
mKey.Length() + 1 + // key with trailing null
aElementsSize + // elements
sizeof(uint32_t); // offset
}
nsresult
CacheFileMetadata::WriteMetadata(uint32_t aOffset,
CacheFileMetadataListener *aListener)
@ -250,10 +264,11 @@ CacheFileMetadata::WriteMetadata(uint32_t aOffset,
mIsDirty = false;
mWriteBuf = static_cast<char *>(moz_xmalloc(sizeof(uint32_t) +
mHashCount * sizeof(CacheHash::Hash16_t) +
sizeof(CacheFileMetadataHeader) + mKey.Length() + 1 +
mElementsSize + sizeof(uint32_t)));
mWriteBuf = static_cast<char *>(malloc(CalcMetadataSize(mElementsSize,
mHashCount)));
if (!mWriteBuf) {
return NS_ERROR_OUT_OF_MEMORY;
}
char *p = mWriteBuf + sizeof(uint32_t);
memcpy(p, mHashArray, mHashCount * sizeof(CacheHash::Hash16_t));
@ -406,6 +421,8 @@ CacheFileMetadata::SetElement(const char *aKey, const char *aValue)
MarkDirty();
nsresult rv;
const uint32_t keySize = strlen(aKey) + 1;
char *pos = const_cast<char *>(GetElement(aKey));
@ -431,7 +448,10 @@ CacheFileMetadata::SetElement(const char *aKey, const char *aValue)
// Update the value in place
newSize -= oldValueSize;
EnsureBuffer(newSize);
rv = EnsureBuffer(newSize);
if (NS_FAILED(rv)) {
return rv;
}
// Move the remainder to the right place
pos = mBuf + offset;
@ -439,7 +459,10 @@ CacheFileMetadata::SetElement(const char *aKey, const char *aValue)
} else {
// allocate new meta data element
newSize += keySize;
EnsureBuffer(newSize);
rv = EnsureBuffer(newSize);
if (NS_FAILED(rv)) {
return rv;
}
// Add after last element
pos = mBuf + mElementsSize;
@ -665,7 +688,7 @@ CacheFileMetadata::OnDataRead(CacheFileHandle *aHandle, char *aBuf,
if (realOffset >= size) {
LOG(("CacheFileMetadata::OnDataRead() - Invalid realOffset, creating "
"empty metadata. [this=%p, realOffset=%d, size=%lld]", this,
"empty metadata. [this=%p, realOffset=%u, size=%lld]", this,
realOffset, size));
InitEmptyMetadata();
@ -675,6 +698,21 @@ CacheFileMetadata::OnDataRead(CacheFileHandle *aHandle, char *aBuf,
return NS_OK;
}
uint32_t maxHashCount = size / kChunkSize;
uint32_t maxMetadataSize = CalcMetadataSize(kMaxElementsSize, maxHashCount);
if (size - realOffset > maxMetadataSize) {
LOG(("CacheFileMetadata::OnDataRead() - Invalid realOffset, metadata would "
"be too big, creating empty metadata. [this=%p, realOffset=%u, "
"maxMetadataSize=%u, size=%lld]", this, realOffset, maxMetadataSize,
size));
InitEmptyMetadata();
mListener.swap(listener);
listener->OnMetadataRead(NS_OK);
return NS_OK;
}
uint32_t usedOffset = size - mBufSize;
if (realOffset < usedOffset) {
@ -932,9 +970,13 @@ CacheFileMetadata::CheckElements(const char *aBuf, uint32_t aSize)
return NS_OK;
}
void
nsresult
CacheFileMetadata::EnsureBuffer(uint32_t aSize)
{
if (aSize > kMaxElementsSize) {
return NS_ERROR_FAILURE;
}
if (mBufSize < aSize) {
if (mAllocExactSize) {
// If this is not the only allocation, use power of two for following
@ -955,11 +997,17 @@ CacheFileMetadata::EnsureBuffer(uint32_t aSize)
aSize = kInitialBufSize;
}
char *newBuf = static_cast<char *>(realloc(mBuf, aSize));
if (!newBuf) {
return NS_ERROR_OUT_OF_MEMORY;
}
mBufSize = aSize;
mBuf = static_cast<char *>(moz_xrealloc(mBuf, mBufSize));
mBuf = newBuf;
DoMemoryReport(MemoryUsage());
}
return NS_OK;
}
nsresult

View File

@ -121,6 +121,7 @@ public:
nsresult GetKey(nsACString &_retval);
nsresult ReadMetadata(CacheFileMetadataListener *aListener);
uint32_t CalcMetadataSize(uint32_t aElementsSize, uint32_t aHashCount);
nsresult WriteMetadata(uint32_t aOffset,
CacheFileMetadataListener *aListener);
nsresult SyncReadMetadata(nsIFile *aFile);
@ -171,7 +172,7 @@ private:
void InitEmptyMetadata();
nsresult ParseMetadata(uint32_t aMetaOffset, uint32_t aBufOffset, bool aHaveKey);
nsresult CheckElements(const char *aBuf, uint32_t aSize);
void EnsureBuffer(uint32_t aSize);
nsresult EnsureBuffer(uint32_t aSize);
nsresult ParseKey(const nsACString &aKey);
nsRefPtr<CacheFileHandle> mHandle;

View File

@ -103,6 +103,18 @@ CacheFileOutputStream::Write(const char * aBuf, uint32_t aCount,
return NS_ERROR_FILE_TOO_BIG;
}
// We use 64-bit offset when accessing the file, unfortunatelly we use 32-bit
// metadata offset, so we cannot handle data bigger than 4GB.
if (mPos + aCount > PR_UINT32_MAX) {
LOG(("CacheFileOutputStream::Write() - Entry's size exceeds 4GB while it "
"isn't too big according to CacheObserver::EntryIsTooBig(). Failing "
"and dooming the entry. [this=%p]", this));
mFile->DoomLocked(nullptr);
CloseWithStatusLocked(NS_ERROR_FILE_TOO_BIG);
return NS_ERROR_FILE_TOO_BIG;
}
*_retval = aCount;
while (aCount) {

View File

@ -65,11 +65,11 @@ bool CacheObserver::sSmartCacheSizeEnabled = kDefaultSmartCacheSizeEnabled;
static uint32_t const kDefaultPreloadChunkCount = 4;
uint32_t CacheObserver::sPreloadChunkCount = kDefaultPreloadChunkCount;
static uint32_t const kDefaultMaxMemoryEntrySize = 4 * 1024; // 4 MB
uint32_t CacheObserver::sMaxMemoryEntrySize = kDefaultMaxMemoryEntrySize;
static int32_t const kDefaultMaxMemoryEntrySize = 4 * 1024; // 4 MB
int32_t CacheObserver::sMaxMemoryEntrySize = kDefaultMaxMemoryEntrySize;
static uint32_t const kDefaultMaxDiskEntrySize = 50 * 1024; // 50 MB
uint32_t CacheObserver::sMaxDiskEntrySize = kDefaultMaxDiskEntrySize;
static int32_t const kDefaultMaxDiskEntrySize = 50 * 1024; // 50 MB
int32_t CacheObserver::sMaxDiskEntrySize = kDefaultMaxDiskEntrySize;
static uint32_t const kDefaultMaxDiskChunksMemoryUsage = 10 * 1024; // 10MB
uint32_t CacheObserver::sMaxDiskChunksMemoryUsage = kDefaultMaxDiskChunksMemoryUsage;
@ -170,9 +170,9 @@ CacheObserver::AttachToPreferences()
mozilla::Preferences::AddUintVarCache(
&sPreloadChunkCount, "browser.cache.disk.preload_chunk_count", kDefaultPreloadChunkCount);
mozilla::Preferences::AddUintVarCache(
mozilla::Preferences::AddIntVarCache(
&sMaxDiskEntrySize, "browser.cache.disk.max_entry_size", kDefaultMaxDiskEntrySize);
mozilla::Preferences::AddUintVarCache(
mozilla::Preferences::AddIntVarCache(
&sMaxMemoryEntrySize, "browser.cache.memory.max_entry_size", kDefaultMaxMemoryEntrySize);
mozilla::Preferences::AddUintVarCache(
@ -472,9 +472,12 @@ CacheStorageEvictHelper::ClearStorage(bool const aPrivate,
bool const CacheObserver::EntryIsTooBig(int64_t aSize, bool aUsingDisk)
{
// If custom limit is set, check it.
int64_t preferredLimit = aUsingDisk
? static_cast<int64_t>(sMaxDiskEntrySize) << 10
: static_cast<int64_t>(sMaxMemoryEntrySize) << 10;
int64_t preferredLimit = aUsingDisk ? sMaxDiskEntrySize : sMaxMemoryEntrySize;
// do not convert to bytes when the limit is -1, which means no limit
if (preferredLimit > 0) {
preferredLimit <<= 10;
}
if (preferredLimit != -1 && aSize > preferredLimit)
return true;

View File

@ -90,8 +90,8 @@ private:
static uint32_t sDiskFreeSpaceHardLimit;
static bool sSmartCacheSizeEnabled;
static uint32_t sPreloadChunkCount;
static uint32_t sMaxMemoryEntrySize;
static uint32_t sMaxDiskEntrySize;
static int32_t sMaxMemoryEntrySize;
static int32_t sMaxDiskEntrySize;
static uint32_t sMaxDiskChunksMemoryUsage;
static uint32_t sMaxDiskPriorityChunksMemoryUsage;
static uint32_t sCompressionLevel;