Bug 1013395 - HTTP cache v2: have a limit for write backlog, r=honzab

This commit is contained in:
Michal Novotny 2014-07-10 07:59:29 +02:00
parent f059293f00
commit 5b6c995de2
15 changed files with 391 additions and 95 deletions

View File

@ -58,6 +58,17 @@ pref("browser.cache.memory.enable", true);
// Max-size (in KB) for entries in memory cache. Set to -1 for no limit.
// (Note: entries bigger than than 90% of the mem-cache are never cached)
pref("browser.cache.memory.max_entry_size", 5120);
// Memory limit (in kB) for new cache data not yet written to disk. Writes to
// the cache are buffered and written to disk on background with low priority.
// With a slow persistent storage these buffers may grow when data is coming
// fast from the network. When the amount of unwritten data is exceeded, new
// writes will simply fail. We have two buckets, one for important data
// (priority) like html, css, fonts and js, and one for other data like images,
// video, etc.
// Note: 0 means no limit.
pref("browser.cache.disk.max_chunks_memory_usage", 10240);
pref("browser.cache.disk.max_priority_chunks_memory_usage", 10240);
pref("browser.cache.disk_cache_ssl", true);
// 0 = once-per-session, 1 = each-time, 2 = never, 3 = when-appropriate/automatically
pref("browser.cache.check_doc_frequency", 3);

View File

@ -13,6 +13,7 @@
#include <algorithm>
#include "nsComponentManagerUtils.h"
#include "nsProxyRelease.h"
#include "mozilla/Telemetry.h"
// When CACHE_CHUNKS is defined we always cache unused chunks in mCacheChunks.
// When it is not defined, we always release the chunks ASAP, i.e. we cache
@ -178,25 +179,17 @@ CacheFile::CacheFile()
, mReady(false)
, mMemoryOnly(false)
, mOpenAsMemoryOnly(false)
, mPriority(false)
, mDataAccessed(false)
, mDataIsDirty(false)
, mWritingMetadata(false)
, mPreloadWithoutInputStreams(true)
, mPreloadChunkCount(0)
, mStatus(NS_OK)
, mDataSize(-1)
, mOutput(nullptr)
{
LOG(("CacheFile::CacheFile() [this=%p]", this));
// Some consumers (at least nsHTTPCompressConv) assume that Read() can read
// such amount of data that was announced by Available().
// CacheFileInputStream::Available() uses also preloaded chunks to compute
// number of available bytes in the input stream, so we have to make sure the
// preloadChunkCount won't change during CacheFile's lifetime since otherwise
// we could potentially release some cached chunks that was used to calculate
// available bytes but would not be available later during call to
// CacheFileInputStream::Read().
mPreloadChunkCount = CacheObserver::PreloadChunkCount();
}
CacheFile::~CacheFile()
@ -224,9 +217,21 @@ CacheFile::Init(const nsACString &aKey,
mKey = aKey;
mOpenAsMemoryOnly = mMemoryOnly = aMemoryOnly;
mPriority = aPriority;
// Some consumers (at least nsHTTPCompressConv) assume that Read() can read
// such amount of data that was announced by Available().
// CacheFileInputStream::Available() uses also preloaded chunks to compute
// number of available bytes in the input stream, so we have to make sure the
// preloadChunkCount won't change during CacheFile's lifetime since otherwise
// we could potentially release some cached chunks that was used to calculate
// available bytes but would not be available later during call to
// CacheFileInputStream::Read().
mPreloadChunkCount = CacheObserver::PreloadChunkCount();
LOG(("CacheFile::Init() [this=%p, key=%s, createNew=%d, memoryOnly=%d, "
"listener=%p]", this, mKey.get(), aCreateNew, aMemoryOnly, aCallback));
"priority=%d, listener=%p]", this, mKey.get(), aCreateNew, aMemoryOnly,
aPriority, aCallback));
if (mMemoryOnly) {
MOZ_ASSERT(!aCallback);
@ -250,8 +255,9 @@ CacheFile::Init(const nsACString &aKey,
flags = CacheFileIOManager::CREATE;
}
if (aPriority)
if (mPriority) {
flags |= CacheFileIOManager::PRIORITY;
}
mOpeningFile = true;
mListener = aCallback;
@ -307,7 +313,6 @@ CacheFile::OnChunkRead(nsresult aResult, CacheFileChunk *aChunk)
if (NS_FAILED(aResult)) {
SetError(aResult);
CacheFileIOManager::DoomFile(mHandle, nullptr);
}
if (HaveChunkListeners(index)) {
@ -334,7 +339,6 @@ CacheFile::OnChunkWritten(nsresult aResult, CacheFileChunk *aChunk)
if (NS_FAILED(aResult)) {
SetError(aResult);
CacheFileIOManager::DoomFile(mHandle, nullptr);
}
if (NS_SUCCEEDED(aResult) && !aChunk->IsDirty()) {
@ -494,6 +498,9 @@ CacheFile::OnFileOpened(CacheFileHandle *aHandle, nsresult aResult)
}
else {
mHandle = aHandle;
if (NS_FAILED(mStatus)) {
CacheFileIOManager::DoomFile(mHandle, nullptr);
}
if (mMetadata) {
InitIndexEntry();
@ -1082,7 +1089,7 @@ CacheFile::GetChunkLocked(uint32_t aIndex, ECallerType aCaller,
return NS_ERROR_UNEXPECTED;
}
chunk = new CacheFileChunk(this, aIndex);
chunk = new CacheFileChunk(this, aIndex, aCaller == WRITER);
mChunks.Put(aIndex, chunk);
chunk->mActiveChunk = true;
@ -1113,14 +1120,14 @@ CacheFile::GetChunkLocked(uint32_t aIndex, ECallerType aCaller,
} else if (off == mDataSize) {
if (aCaller == WRITER) {
// this listener is going to write to the chunk
chunk = new CacheFileChunk(this, aIndex);
chunk = new CacheFileChunk(this, aIndex, true);
mChunks.Put(aIndex, chunk);
chunk->mActiveChunk = true;
LOG(("CacheFile::GetChunkLocked() - Created new empty chunk %p [this=%p]",
chunk.get(), this));
chunk->InitNew(this);
chunk->InitNew();
mMetadata->SetHash(aIndex, chunk->Hash());
if (HaveChunkListeners(aIndex)) {
@ -1340,6 +1347,10 @@ CacheFile::DeactivateChunk(CacheFileChunk *aChunk)
}
#endif
if (NS_FAILED(chunk->GetStatus())) {
SetError(chunk->GetStatus());
}
if (NS_FAILED(mStatus)) {
// Don't write any chunk to disk since this entry will be doomed
LOG(("CacheFile::DeactivateChunk() - Releasing chunk because of status "
@ -1364,7 +1375,6 @@ CacheFile::DeactivateChunk(CacheFileChunk *aChunk)
RemoveChunkInternal(chunk, false);
SetError(rv);
CacheFileIOManager::DoomFile(mHandle, nullptr);
return rv;
}
@ -1460,12 +1470,40 @@ CacheFile::BytesFromChunk(uint32_t aIndex)
return std::min(advance, tail);
}
static uint32_t
StatusToTelemetryEnum(nsresult aStatus)
{
if (NS_SUCCEEDED(aStatus)) {
return 0;
}
switch (aStatus) {
case NS_BASE_STREAM_CLOSED:
return 0; // Log this as a success
case NS_ERROR_OUT_OF_MEMORY:
return 2;
case NS_ERROR_FILE_DISK_FULL:
return 3;
case NS_ERROR_FILE_CORRUPTED:
return 4;
case NS_ERROR_FILE_NOT_FOUND:
return 5;
case NS_BINDING_ABORTED:
return 6;
default:
return 1; // other error
}
NS_NOTREACHED("We should never get here");
}
nsresult
CacheFile::RemoveInput(CacheFileInputStream *aInput)
CacheFile::RemoveInput(CacheFileInputStream *aInput, nsresult aStatus)
{
CacheFileAutoLock lock(this);
LOG(("CacheFile::RemoveInput() [this=%p, input=%p]", this, aInput));
LOG(("CacheFile::RemoveInput() [this=%p, input=%p, status=0x%08x]", this,
aInput, aStatus));
DebugOnly<bool> found;
found = mInputs.RemoveElement(aInput);
@ -1480,15 +1518,19 @@ CacheFile::RemoveInput(CacheFileInputStream *aInput)
// chunks that won't be used anymore.
mCachedChunks.Enumerate(&CacheFile::CleanUpCachedChunks, this);
Telemetry::Accumulate(Telemetry::NETWORK_CACHE_V2_INPUT_STREAM_STATUS,
StatusToTelemetryEnum(aStatus));
return NS_OK;
}
nsresult
CacheFile::RemoveOutput(CacheFileOutputStream *aOutput)
CacheFile::RemoveOutput(CacheFileOutputStream *aOutput, nsresult aStatus)
{
AssertOwnsLock();
LOG(("CacheFile::RemoveOutput() [this=%p, output=%p]", this, aOutput));
LOG(("CacheFile::RemoveOutput() [this=%p, output=%p, status=0x%08x]", this,
aOutput, aStatus));
if (mOutput != aOutput) {
LOG(("CacheFile::RemoveOutput() - This output was already removed, ignoring"
@ -1507,6 +1549,9 @@ CacheFile::RemoveOutput(CacheFileOutputStream *aOutput)
// Notify close listener as the last action
aOutput->NotifyCloseListener();
Telemetry::Accumulate(Telemetry::NETWORK_CACHE_V2_OUTPUT_STREAM_STATUS,
StatusToTelemetryEnum(aStatus));
return NS_OK;
}
@ -1849,8 +1894,13 @@ CacheFile::PadChunkWithZeroes(uint32_t aChunkIdx)
void
CacheFile::SetError(nsresult aStatus)
{
AssertOwnsLock();
if (NS_SUCCEEDED(mStatus)) {
mStatus = aStatus;
if (mHandle) {
CacheFileIOManager::DoomFile(mHandle, nullptr);
}
}
}

View File

@ -145,8 +145,8 @@ private:
int64_t BytesFromChunk(uint32_t aIndex);
nsresult RemoveInput(CacheFileInputStream *aInput);
nsresult RemoveOutput(CacheFileOutputStream *aOutput);
nsresult RemoveInput(CacheFileInputStream *aInput, nsresult aStatus);
nsresult RemoveOutput(CacheFileOutputStream *aOutput, nsresult aStatus);
nsresult NotifyChunkListener(CacheFileChunkListener *aCallback,
nsIEventTarget *aTarget,
nsresult aResult,
@ -192,6 +192,7 @@ private:
bool mReady;
bool mMemoryOnly;
bool mOpenAsMemoryOnly;
bool mPriority;
bool mDataAccessed;
bool mDataIsDirty;
bool mWritingMetadata;

View File

@ -101,7 +101,8 @@ NS_INTERFACE_MAP_BEGIN(CacheFileChunk)
NS_INTERFACE_MAP_ENTRY(nsISupports)
NS_INTERFACE_MAP_END_THREADSAFE
CacheFileChunk::CacheFileChunk(CacheFile *aFile, uint32_t aIndex)
CacheFileChunk::CacheFileChunk(CacheFile *aFile, uint32_t aIndex,
bool aInitByWriter)
: CacheMemoryConsumer(aFile->mOpenAsMemoryOnly ? MEMORY_ONLY : DONT_REPORT)
, mIndex(aIndex)
, mState(INITIAL)
@ -109,6 +110,9 @@ CacheFileChunk::CacheFileChunk(CacheFile *aFile, uint32_t aIndex)
, mIsDirty(false)
, mActiveChunk(false)
, mDataSize(0)
, mReportedAllocation(0)
, mLimitAllocation(!aFile->mOpenAsMemoryOnly && aInitByWriter)
, mIsPriority(aFile->mPriority)
, mBuf(nullptr)
, mBufSize(0)
, mRWBuf(nullptr)
@ -116,7 +120,8 @@ CacheFileChunk::CacheFileChunk(CacheFile *aFile, uint32_t aIndex)
, mReadHash(0)
, mFile(aFile)
{
LOG(("CacheFileChunk::CacheFileChunk() [this=%p]", this));
LOG(("CacheFileChunk::CacheFileChunk() [this=%p, index=%u, initByWriter=%d]",
this, aIndex, aInitByWriter));
MOZ_COUNT_CTOR(CacheFileChunk);
}
@ -129,33 +134,32 @@ CacheFileChunk::~CacheFileChunk()
free(mBuf);
mBuf = nullptr;
mBufSize = 0;
ChunkAllocationChanged();
}
if (mRWBuf) {
free(mRWBuf);
mRWBuf = nullptr;
mRWBufSize = 0;
ChunkAllocationChanged();
}
}
void
CacheFileChunk::InitNew(CacheFileChunkListener *aCallback)
CacheFileChunk::InitNew()
{
mFile->AssertOwnsLock();
LOG(("CacheFileChunk::InitNew() [this=%p, listener=%p]", this, aCallback));
LOG(("CacheFileChunk::InitNew() [this=%p]", this));
MOZ_ASSERT(mState == INITIAL);
MOZ_ASSERT(NS_SUCCEEDED(mStatus));
MOZ_ASSERT(!mBuf);
MOZ_ASSERT(!mRWBuf);
MOZ_ASSERT(!mIsDirty);
MOZ_ASSERT(mDataSize == 0);
mBuf = static_cast<char *>(moz_xmalloc(kMinBufSize));
mBufSize = kMinBufSize;
mDataSize = 0;
mState = READY;
mIsDirty = true;
DoMemoryReport(MemorySize());
}
nsresult
@ -169,14 +173,28 @@ CacheFileChunk::Read(CacheFileHandle *aHandle, uint32_t aLen,
this, aHandle, aLen, aCallback));
MOZ_ASSERT(mState == INITIAL);
MOZ_ASSERT(NS_SUCCEEDED(mStatus));
MOZ_ASSERT(!mBuf);
MOZ_ASSERT(!mRWBuf);
MOZ_ASSERT(aLen);
nsresult rv;
mRWBuf = static_cast<char *>(moz_xmalloc(aLen));
mRWBufSize = aLen;
mState = READING;
if (CanAllocate(aLen)) {
mRWBuf = static_cast<char *>(moz_malloc(aLen));
if (mRWBuf) {
mRWBufSize = aLen;
ChunkAllocationChanged();
}
}
if (!mRWBuf) {
// Allocation was denied or failed
SetError(NS_ERROR_OUT_OF_MEMORY);
return mStatus;
}
DoMemoryReport(MemorySize());
@ -186,7 +204,6 @@ CacheFileChunk::Read(CacheFileHandle *aHandle, uint32_t aLen,
rv = mIndex ? NS_ERROR_FILE_CORRUPTED : NS_ERROR_FILE_NOT_FOUND;
SetError(rv);
} else {
mState = READING;
mListener = aCallback;
mDataSize = aLen;
mReadHash = aHash;
@ -205,12 +222,14 @@ CacheFileChunk::Write(CacheFileHandle *aHandle,
this, aHandle, aCallback));
MOZ_ASSERT(mState == READY);
MOZ_ASSERT(NS_SUCCEEDED(mStatus));
MOZ_ASSERT(!mRWBuf);
MOZ_ASSERT(mBuf);
MOZ_ASSERT(mDataSize); // Don't write chunk when it is empty
nsresult rv;
mState = WRITING;
mRWBuf = mBuf;
mRWBufSize = mBufSize;
mBuf = nullptr;
@ -221,7 +240,6 @@ CacheFileChunk::Write(CacheFileHandle *aHandle,
if (NS_WARN_IF(NS_FAILED(rv))) {
SetError(rv);
} else {
mState = WRITING;
mListener = aCallback;
mIsDirty = false;
}
@ -324,11 +342,10 @@ CacheFileChunk::Hash()
{
mFile->AssertOwnsLock();
MOZ_ASSERT(mBuf);
MOZ_ASSERT(!mListener);
MOZ_ASSERT(IsReady());
return CacheHash::Hash16(BufForReading(), mDataSize);
return CacheHash::Hash16(mDataSize ? BufForReading() : nullptr, mDataSize);
}
uint32_t
@ -349,7 +366,7 @@ CacheFileChunk::UpdateDataSize(uint32_t aOffset, uint32_t aLen, bool aEOF)
// UpdateDataSize() is called only when we've written some data to the chunk
// and we never write data anymore once some error occurs.
MOZ_ASSERT(mState != ERROR);
MOZ_ASSERT(NS_SUCCEEDED(mStatus));
LOG(("CacheFileChunk::UpdateDataSize() [this=%p, offset=%d, len=%d, EOF=%d]",
this, aOffset, aLen, aEOF));
@ -413,19 +430,22 @@ CacheFileChunk::OnDataWritten(CacheFileHandle *aHandle, const char *aBuf,
if (NS_WARN_IF(NS_FAILED(aResult))) {
SetError(aResult);
} else {
mState = READY;
}
mState = READY;
if (!mBuf) {
mBuf = mRWBuf;
mBufSize = mRWBufSize;
mRWBuf = nullptr;
mRWBufSize = 0;
} else {
free(mRWBuf);
mRWBuf = nullptr;
mRWBufSize = 0;
ChunkAllocationChanged();
}
mRWBuf = nullptr;
mRWBufSize = 0;
DoMemoryReport(MemorySize());
@ -473,23 +493,56 @@ CacheFileChunk::OnDataRead(CacheFileHandle *aHandle, char *aBuf,
this));
// Merge data with write buffer
if (mRWBufSize < mBufSize) {
mRWBuf = static_cast<char *>(moz_xrealloc(mRWBuf, mBufSize));
mRWBufSize = mBufSize;
}
if (mRWBufSize >= mBufSize) {
// The new data will fit into the buffer that contains data read
// from the disk. Simply copy the valid pieces.
mValidityMap.Log();
for (uint32_t i = 0; i < mValidityMap.Length(); i++) {
if (mValidityMap[i].Offset() + mValidityMap[i].Len() > mBufSize) {
MOZ_CRASH("Unexpected error in validity map!");
}
memcpy(mRWBuf + mValidityMap[i].Offset(),
mBuf + mValidityMap[i].Offset(), mValidityMap[i].Len());
}
mValidityMap.Clear();
mValidityMap.Log();
for (uint32_t i = 0 ; i < mValidityMap.Length() ; i++) {
memcpy(mRWBuf + mValidityMap[i].Offset(),
mBuf + mValidityMap[i].Offset(), mValidityMap[i].Len());
}
mValidityMap.Clear();
free(mBuf);
mBuf = mRWBuf;
mBufSize = mRWBufSize;
mRWBuf = nullptr;
mRWBufSize = 0;
ChunkAllocationChanged();
} else {
// Buffer holding the new data is larger. Use it as the destination
// buffer to avoid reallocating mRWBuf. We need to copy those pieces
// from mRWBuf which are not valid in mBuf.
uint32_t invalidOffset = 0;
uint32_t invalidLength;
mValidityMap.Log();
for (uint32_t i = 0; i < mValidityMap.Length(); i++) {
MOZ_ASSERT(invalidOffset <= mValidityMap[i].Offset());
invalidLength = mValidityMap[i].Offset() - invalidOffset;
if (invalidLength > 0) {
if (invalidOffset + invalidLength > mRWBufSize) {
MOZ_CRASH("Unexpected error in validity map!");
}
memcpy(mBuf + invalidOffset, mRWBuf + invalidOffset,
invalidLength);
}
invalidOffset = mValidityMap[i].Offset() + mValidityMap[i].Len();
}
if (invalidOffset < mRWBufSize) {
invalidLength = invalidOffset - mRWBufSize;
memcpy(mBuf + invalidOffset, mRWBuf + invalidOffset,
invalidLength);
}
mValidityMap.Clear();
free(mBuf);
mBuf = mRWBuf;
mBufSize = mRWBufSize;
mRWBuf = nullptr;
mRWBufSize = 0;
free(mRWBuf);
mRWBuf = nullptr;
mRWBufSize = 0;
ChunkAllocationChanged();
}
DoMemoryReport(MemorySize());
}
@ -500,10 +553,9 @@ CacheFileChunk::OnDataRead(CacheFileHandle *aHandle, char *aBuf,
aResult = mIndex ? NS_ERROR_FILE_CORRUPTED : NS_ERROR_FILE_NOT_FOUND;
SetError(aResult);
mDataSize = 0;
} else {
mState = READY;
}
mState = READY;
mListener.swap(listener);
}
@ -560,13 +612,14 @@ CacheFileChunk::GetStatus()
void
CacheFileChunk::SetError(nsresult aStatus)
{
if (NS_SUCCEEDED(mStatus)) {
MOZ_ASSERT(mState != ERROR);
mStatus = aStatus;
mState = ERROR;
} else {
MOZ_ASSERT(mState == ERROR);
MOZ_ASSERT(NS_FAILED(aStatus));
if (NS_FAILED(mStatus)) {
// Remember only the first error code.
return;
}
mStatus = aStatus;
}
char *
@ -576,6 +629,7 @@ CacheFileChunk::BufForWriting() const
MOZ_ASSERT(mBuf); // Writer should always first call EnsureBufSize()
MOZ_ASSERT(NS_SUCCEEDED(mStatus));
MOZ_ASSERT((mState == READY && !mRWBuf) ||
(mState == WRITING && mRWBuf) ||
(mState == READING && mRWBuf));
@ -594,17 +648,18 @@ CacheFileChunk::BufForReading() const
return mBuf ? mBuf : mRWBuf;
}
void
MOZ_WARN_UNUSED_RESULT nsresult
CacheFileChunk::EnsureBufSize(uint32_t aBufSize)
{
mFile->AssertOwnsLock();
// EnsureBufSize() is called only when we want to write some data to the chunk
// and we never write data anymore once some error occurs.
MOZ_ASSERT(mState != ERROR);
MOZ_ASSERT(NS_SUCCEEDED(mStatus));
if (mBufSize >= aBufSize)
return;
if (mBufSize >= aBufSize) {
return NS_OK;
}
bool copy = false;
if (!mBuf && mState == WRITING) {
@ -629,13 +684,27 @@ CacheFileChunk::EnsureBufSize(uint32_t aBufSize)
const uint32_t maxBufSize = kChunkSize;
aBufSize = clamped(aBufSize, minBufSize, maxBufSize);
mBuf = static_cast<char *>(moz_xrealloc(mBuf, aBufSize));
if (!CanAllocate(aBufSize - mBufSize)) {
SetError(NS_ERROR_OUT_OF_MEMORY);
return mStatus;
}
char *newBuf = static_cast<char *>(moz_realloc(mBuf, aBufSize));
if (!newBuf) {
SetError(NS_ERROR_OUT_OF_MEMORY);
return mStatus;
}
mBuf = newBuf;
mBufSize = aBufSize;
ChunkAllocationChanged();
if (copy)
memcpy(mBuf, mRWBuf, mRWBufSize);
DoMemoryReport(MemorySize());
return NS_OK;
}
// Memory reporting
@ -657,5 +726,50 @@ CacheFileChunk::SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const
return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf);
}
bool
CacheFileChunk::CanAllocate(uint32_t aSize)
{
if (!mLimitAllocation) {
return true;
}
LOG(("CacheFileChunk::CanAllocate() [this=%p, size=%u]", this, aSize));
uint32_t limit = CacheObserver::MaxDiskChunksMemoryUsage(mIsPriority);
if (limit == 0) {
return true;
}
uint32_t usage = ChunksMemoryUsage();
if (usage + aSize > limit) {
LOG(("CacheFileChunk::CanAllocate() - Returning false. [this=%p]", this));
return false;
}
return true;
}
void
CacheFileChunk::ChunkAllocationChanged()
{
if (!mLimitAllocation) {
return;
}
ChunksMemoryUsage() -= mReportedAllocation;
mReportedAllocation = mBufSize + mRWBufSize;
ChunksMemoryUsage() += mReportedAllocation;
LOG(("CacheFileChunk::ChunkAllocationChanged() - %s chunks usage %u "
"[this=%p]", mIsPriority ? "Priority" : "Normal",
static_cast<uint32_t>(ChunksMemoryUsage()), this));
}
mozilla::Atomic<uint32_t>& CacheFileChunk::ChunksMemoryUsage()
{
static mozilla::Atomic<uint32_t> chunksMemoryUsage(0);
static mozilla::Atomic<uint32_t> prioChunksMemoryUsage(0);
return mIsPriority ? prioChunksMemoryUsage : chunksMemoryUsage;
}
} // net
} // mozilla

View File

@ -70,9 +70,9 @@ public:
NS_DECL_THREADSAFE_ISUPPORTS
bool DispatchRelease();
CacheFileChunk(CacheFile *aFile, uint32_t aIndex);
CacheFileChunk(CacheFile *aFile, uint32_t aIndex, bool aInitByWriter);
void InitNew(CacheFileChunkListener *aCallback);
void InitNew();
nsresult Read(CacheFileHandle *aHandle, uint32_t aLen,
CacheHash::Hash16_t aHash,
CacheFileChunkListener *aCallback);
@ -103,7 +103,7 @@ public:
char * BufForWriting() const;
const char * BufForReading() const;
void EnsureBufSize(uint32_t aBufSize);
nsresult EnsureBufSize(uint32_t aBufSize);
uint32_t MemorySize() const { return sizeof(CacheFileChunk) + mRWBufSize + mBufSize; }
// Memory reporting
@ -117,12 +117,15 @@ private:
virtual ~CacheFileChunk();
bool CanAllocate(uint32_t aSize);
void ChunkAllocationChanged();
mozilla::Atomic<uint32_t>& ChunksMemoryUsage();
enum EState {
INITIAL = 0,
READING = 1,
WRITING = 2,
READY = 3,
ERROR = 4
READY = 3
};
uint32_t mIndex;
@ -135,6 +138,11 @@ private:
// lock.
uint32_t mDataSize;
uint32_t mReportedAllocation;
bool const mLimitAllocation : 1; // Whether this chunk respects limit for disk
// chunks memory usage.
bool const mIsPriority : 1;
char *mBuf;
uint32_t mBufSize;

View File

@ -28,7 +28,7 @@ CacheFileInputStream::Release()
}
if (count == 1) {
mFile->RemoveInput(this);
mFile->RemoveInput(this, mStatus);
}
return count;
@ -153,13 +153,15 @@ CacheFileInputStream::ReadSegments(nsWriteSegmentFun aWriter, void *aClosure,
int64_t canRead;
const char *buf;
CanRead(&canRead, &buf);
if (NS_FAILED(mStatus)) {
return mStatus;
}
if (canRead < 0) {
// file was truncated ???
MOZ_ASSERT(false, "SetEOF is currenty not implemented?!");
rv = NS_OK;
}
else if (canRead > 0) {
} else if (canRead > 0) {
uint32_t toRead = std::min(static_cast<uint32_t>(canRead), aCount);
// We need to release the lock to avoid lock re-entering unless the
@ -199,8 +201,7 @@ CacheFileInputStream::ReadSegments(nsWriteSegmentFun aWriter, void *aClosure,
}
rv = NS_OK;
}
else {
} else {
if (mFile->mOutput)
rv = NS_BASE_STREAM_WOULD_BLOCK;
else {
@ -542,7 +543,14 @@ CacheFileInputStream::CanRead(int64_t *aCanRead, const char **aBuf)
uint32_t chunkOffset = mPos - (mPos / kChunkSize) * kChunkSize;
*aCanRead = mChunk->DataSize() - chunkOffset;
*aBuf = mChunk->BufForReading() + chunkOffset;
if (*aCanRead > 0) {
*aBuf = mChunk->BufForReading() + chunkOffset;
} else {
*aBuf = nullptr;
if (NS_FAILED(mChunk->GetStatus())) {
CloseWithStatusLocked(mChunk->GetStatus());
}
}
LOG(("CacheFileInputStream::CanRead() [this=%p, canRead=%lld]",
this, *aCanRead));
@ -603,6 +611,12 @@ CacheFileInputStream::MaybeNotifyListener()
int64_t canRead;
const char *buf;
CanRead(&canRead, &buf);
if (NS_FAILED(mStatus)) {
// CanRead() called CloseWithStatusLocked() which called
// MaybeNotifyListener() so the listener was already notified. Stop here.
MOZ_ASSERT(!mCallback);
return;
}
if (canRead > 0) {
if (!(mCallbackFlags & WAIT_CLOSURE_ONLY))

View File

@ -46,6 +46,9 @@ private:
nsresult CloseWithStatusLocked(nsresult aStatus);
void ReleaseChunk();
void EnsureCorrectChunk(bool aReleaseOnly);
// CanRead returns negative value when output stream truncates the data before
// the input stream's mPos.
void CanRead(int64_t *aCanRead, const char **aBuf);
void NotifyListener();
void MaybeNotifyListener();

View File

@ -27,7 +27,7 @@ CacheFileOutputStream::Release()
mRefCnt = 1;
{
CacheFileAutoLock lock(mFile);
mFile->RemoveOutput(this);
mFile->RemoveOutput(this, mStatus);
}
delete (this);
return 0;
@ -107,15 +107,23 @@ CacheFileOutputStream::Write(const char * aBuf, uint32_t aCount,
while (aCount) {
EnsureCorrectChunk(false);
if (NS_FAILED(mStatus))
if (NS_FAILED(mStatus)) {
return mStatus;
}
FillHole();
if (NS_FAILED(mStatus)) {
return mStatus;
}
uint32_t chunkOffset = mPos - (mPos / kChunkSize) * kChunkSize;
uint32_t canWrite = kChunkSize - chunkOffset;
uint32_t thisWrite = std::min(static_cast<uint32_t>(canWrite), aCount);
mChunk->EnsureBufSize(chunkOffset + thisWrite);
nsresult rv = mChunk->EnsureBufSize(chunkOffset + thisWrite);
if (NS_FAILED(rv)) {
CloseWithStatusLocked(rv);
return rv;
}
memcpy(mChunk->BufForWriting() + chunkOffset, aBuf, thisWrite);
mPos += thisWrite;
@ -194,7 +202,7 @@ CacheFileOutputStream::CloseWithStatusLocked(nsresult aStatus)
NotifyListener();
}
mFile->RemoveOutput(this);
mFile->RemoveOutput(this, mStatus);
return NS_OK;
}
@ -390,7 +398,12 @@ CacheFileOutputStream::FillHole()
LOG(("CacheFileOutputStream::FillHole() - Zeroing hole in chunk %d, range "
"%d-%d [this=%p]", mChunk->Index(), mChunk->DataSize(), pos - 1, this));
mChunk->EnsureBufSize(pos);
nsresult rv = mChunk->EnsureBufSize(pos);
if (NS_FAILED(rv)) {
CloseWithStatusLocked(rv);
return;
}
memset(mChunk->BufForWriting() + mChunk->DataSize(), 0,
pos - mChunk->DataSize());

View File

@ -2283,7 +2283,7 @@ CacheIndex::ProcessJournalEntry(CacheIndexEntry *aEntry, void* aClosure)
{
CacheIndex *index = static_cast<CacheIndex *>(aClosure);
LOG(("CacheFile::ProcessJournalEntry() [hash=%08x%08x%08x%08x%08x]",
LOG(("CacheIndex::ProcessJournalEntry() [hash=%08x%08x%08x%08x%08x]",
LOGSHA1(aEntry->Hash())));
CacheIndexEntry *entry = index->mIndex.GetEntry(*aEntry->Hash());

View File

@ -65,6 +65,12 @@ uint32_t CacheObserver::sMaxMemoryEntrySize = kDefaultMaxMemoryEntrySize;
static uint32_t const kDefaultMaxDiskEntrySize = 50 * 1024; // 50 MB
uint32_t CacheObserver::sMaxDiskEntrySize = kDefaultMaxDiskEntrySize;
static uint32_t const kDefaultMaxDiskChunksMemoryUsage = 10 * 1024; // 10MB
uint32_t CacheObserver::sMaxDiskChunksMemoryUsage = kDefaultMaxDiskChunksMemoryUsage;
static uint32_t const kDefaultMaxDiskPriorityChunksMemoryUsage = 10 * 1024; // 10MB
uint32_t CacheObserver::sMaxDiskPriorityChunksMemoryUsage = kDefaultMaxDiskPriorityChunksMemoryUsage;
static uint32_t const kDefaultCompressionLevel = 1;
uint32_t CacheObserver::sCompressionLevel = kDefaultCompressionLevel;
@ -152,6 +158,11 @@ CacheObserver::AttachToPreferences()
mozilla::Preferences::AddUintVarCache(
&sMaxMemoryEntrySize, "browser.cache.memory.max_entry_size", kDefaultMaxMemoryEntrySize);
mozilla::Preferences::AddUintVarCache(
&sMaxDiskChunksMemoryUsage, "browser.cache.disk.max_chunks_memory_usage", kDefaultMaxDiskChunksMemoryUsage);
mozilla::Preferences::AddUintVarCache(
&sMaxDiskPriorityChunksMemoryUsage, "browser.cache.disk.max_priority_chunks_memory_usage", kDefaultMaxDiskPriorityChunksMemoryUsage);
// http://mxr.mozilla.org/mozilla-central/source/netwerk/cache/nsCacheEntryDescriptor.cpp#367
mozilla::Preferences::AddUintVarCache(
&sCompressionLevel, "browser.cache.compression_level", kDefaultCompressionLevel);

View File

@ -46,6 +46,9 @@ class CacheObserver : public nsIObserver
{ return sMaxMemoryEntrySize << 10; }
static uint32_t const MaxDiskEntrySize() // result in bytes.
{ return sMaxDiskEntrySize << 10; }
static uint32_t const MaxDiskChunksMemoryUsage(bool aPriority) // result in bytes.
{ return aPriority ? sMaxDiskPriorityChunksMemoryUsage << 10
: sMaxDiskChunksMemoryUsage << 10; }
static uint32_t const CompressionLevel()
{ return sCompressionLevel; }
static uint32_t const HalfLifeSeconds()
@ -75,6 +78,8 @@ private:
static uint32_t sPreloadChunkCount;
static uint32_t sMaxMemoryEntrySize;
static uint32_t sMaxDiskEntrySize;
static uint32_t sMaxDiskChunksMemoryUsage;
static uint32_t sMaxDiskPriorityChunksMemoryUsage;
static uint32_t sCompressionLevel;
static uint32_t sHalfLifeHours;
static int32_t sHalfLifeExperiment;

View File

@ -76,9 +76,10 @@ namespace {
(loadFlags & (nsIRequest::LOAD_BYPASS_CACHE | \
nsICachingChannel::LOAD_BYPASS_LOCAL_CACHE))
#define CACHE_FILE_GONE(result) \
#define RECOVER_FROM_CACHE_FILE_ERROR(result) \
((result) == NS_ERROR_FILE_NOT_FOUND || \
(result) == NS_ERROR_FILE_CORRUPTED)
(result) == NS_ERROR_FILE_CORRUPTED || \
(result) == NS_ERROR_OUT_OF_MEMORY)
static NS_DEFINE_CID(kStreamListenerTeeCID, NS_STREAMLISTENERTEE_CID);
static NS_DEFINE_CID(kStreamTransportServiceCID,
@ -4927,9 +4928,10 @@ nsHttpChannel::OnStartRequest(nsIRequest *request, nsISupports *ctxt)
NS_WARNING("No response head in OnStartRequest");
}
// cache file could be deleted on our behalf, reload from network here.
if (mCacheEntry && mCachePump && CACHE_FILE_GONE(mStatus)) {
LOG((" cache file gone, reloading from server"));
// cache file could be deleted on our behalf, it could contain errors or
// it failed to allocate memory, reload from network here.
if (mCacheEntry && mCachePump && RECOVER_FROM_CACHE_FILE_ERROR(mStatus)) {
LOG((" cache file error, reloading from server"));
mCacheEntry->AsyncDoom(nullptr);
nsresult rv = StartRedirectChannelToURI(mURI, nsIChannelEventSink::REDIRECT_INTERNAL);
if (NS_SUCCEEDED(rv))

View File

@ -0,0 +1,51 @@
Components.utils.import('resource://gre/modules/LoadContextInfo.jsm');
function gen_200k()
{
var i;
var data="0123456789ABCDEFGHIJLKMNO";
for (i=0; i<13; i++)
data+=data;
return data;
}
// Keep the output stream of the first entry in a global variable, so the
// CacheFile and its buffer isn't released before we write the data to the
// second entry.
var oStr;
function run_test()
{
do_get_profile();
if (!newCacheBackEndUsed()) {
do_check_true(true, "This test doesn't run when the old cache back end is used since the behavior is different");
return;
}
var prefBranch = Cc["@mozilla.org/preferences-service;1"].
getService(Ci.nsIPrefBranch);
// set max chunks memory so that only one full chunk fits within the limit
prefBranch.setIntPref("browser.cache.disk.max_chunks_memory_usage", 300);
asyncOpenCacheEntry("http://a/", "disk", Ci.nsICacheStorage.OPEN_NORMALLY, null,
function(status, entry) {
do_check_eq(status, Cr.NS_OK);
oStr = entry.openOutputStream(0);
var data = gen_200k();
do_check_eq(data.length, oStr.write(data, data.length));
asyncOpenCacheEntry("http://b/", "disk", Ci.nsICacheStorage.OPEN_NORMALLY, null,
function(status, entry) {
do_check_eq(status, Cr.NS_OK);
var oStr2 = entry.openOutputStream(0);
do_check_throws_nsIException(() => oStr2.write(data, data.length), 'NS_ERROR_OUT_OF_MEMORY');
finish_cache2_test();
}
);
}
);
do_test_pending();
}

View File

@ -64,6 +64,7 @@ skip-if = os == "android"
[test_cache2-24-exists.js]
# Bug 675039, comment 6: "The difference is that the memory cache is disabled in Armv6 builds."
skip-if = os == "android"
[test_cache2-25-chunk-memory-limit.js]
[test_cache2-26-no-outputstream-open.js]
# GC, that this patch is depenedent on, doesn't work well on Android."
skip-if = os == "android"

View File

@ -6178,6 +6178,18 @@
"extended_statistics_ok": true,
"description": "Time spent to open an existing cache entry"
},
"NETWORK_CACHE_V2_OUTPUT_STREAM_STATUS": {
"expires_in_version": "never",
"kind": "enumerated",
"n_values": "7",
"description": "Final status of the CacheFileOutputStream (0=ok, 1=other error, 2=out of memory, 3=disk full, 4=file corrupted, 5=file not found, 6=binding aborted)"
},
"NETWORK_CACHE_V2_INPUT_STREAM_STATUS": {
"expires_in_version": "never",
"kind": "enumerated",
"n_values": "7",
"description": "Final status of the CacheFileInputStream (0=ok, 1=other error, 2=out of memory, 3=disk full, 4=file corrupted, 5=file not found, 6=binding aborted)"
},
"SQLITEBRIDGE_PROVIDER_PASSWORDS_LOCKED": {
"expires_in_version": "never",
"kind": "enumerated",