Bug 1066726 - Concurrent HTTP cache read and write issues, r=michal+jduell

This commit is contained in:
Honza Bambas 2014-09-16 15:51:50 +02:00
parent ee43f2e8a8
commit 68d2a7c257
7 changed files with 258 additions and 8 deletions

View File

@ -678,6 +678,19 @@ CacheFile::OpenInputStream(nsIInputStream **_retval)
return NS_ERROR_NOT_AVAILABLE;
}
if (NS_FAILED(mStatus)) {
LOG(("CacheFile::OpenInputStream() - CacheFile is in a failure state "
"[this=%p, status=0x%08x]", this, mStatus));
// Don't allow opening the input stream when this CacheFile is in
// a failed state. This is the only way to protect consumers correctly
// from reading a broken entry. When the file is in the failed state,
// it's also doomed, so reopening the entry won't make any difference -
// data will still be inaccessible anymore. Note that for just doomed
// files, we must allow reading the data.
return mStatus;
}
// Once we open input stream we no longer allow preloading of chunks without
// input stream, i.e. we will no longer keep first few chunks preloaded when
// the last input stream is closed.
@ -1566,6 +1579,13 @@ CacheFile::RemoveOutput(CacheFileOutputStream *aOutput, nsresult aStatus)
if (!mMemoryOnly)
WriteMetadataIfNeededLocked();
// Make sure the CacheFile status is set to a failure when the output stream
// is closed with a fatal error. This way we propagate correctly and w/o any
// windows the failure state of this entry to end consumers.
if (NS_SUCCEEDED(mStatus) && NS_FAILED(aStatus) && aStatus != NS_BASE_STREAM_CLOSED) {
mStatus = aStatus;
}
// Notify close listener as the last action
aOutput->NotifyCloseListener();

View File

@ -896,13 +896,13 @@ nsHttpChannel::CallOnStartRequest()
mResponseHead->SetContentCharset(mContentCharsetHint);
if (mResponseHead && mCacheEntry) {
// If we have a cache entry, set its predicted size to ContentLength to
// If we have a cache entry, set its predicted size to TotalEntitySize to
// avoid caching an entry that will exceed the max size limit.
rv = mCacheEntry->SetPredictedDataSize(
mResponseHead->ContentLength());
mResponseHead->TotalEntitySize());
if (NS_ERROR_FILE_TOO_BIG == rv) {
mCacheEntry = nullptr;
LOG((" entry too big, throwing away"));
// Don't throw the entry away, we will need it later.
LOG((" entry too big"));
} else {
NS_ENSURE_SUCCESS(rv, rv);
}
@ -2063,12 +2063,13 @@ nsHttpChannel::IsResumable(int64_t partialLen, int64_t contentLength,
}
nsresult
nsHttpChannel::MaybeSetupByteRangeRequest(int64_t partialLen, int64_t contentLength)
nsHttpChannel::MaybeSetupByteRangeRequest(int64_t partialLen, int64_t contentLength,
bool ignoreMissingPartialLen)
{
// Be pesimistic
mIsPartialRequest = false;
if (!IsResumable(partialLen, contentLength))
if (!IsResumable(partialLen, contentLength, ignoreMissingPartialLen))
return NS_ERROR_NOT_RESUMABLE;
// looks like a partial entry we can reuse; add If-Range
@ -5149,7 +5150,9 @@ nsHttpChannel::OnStopRequest(nsIRequest *request, nsISupports *ctxt, nsresult st
else if (contentLength != int64_t(-1) && contentLength != size) {
LOG((" concurrent cache entry write has been interrupted"));
mCachedResponseHead = Move(mResponseHead);
rv = MaybeSetupByteRangeRequest(size, contentLength);
// Ignore zero partial length because we also want to resume when
// no data at all has been read from the cache.
rv = MaybeSetupByteRangeRequest(size, contentLength, true);
if (NS_SUCCEEDED(rv) && mIsPartialRequest) {
// Prevent read from cache again
mCachedContentIsValid = 0;

View File

@ -325,7 +325,8 @@ private:
bool MustValidateBasedOnQueryUrl() const;
bool IsResumable(int64_t partialLen, int64_t contentLength,
bool ignoreMissingPartialLen = false) const;
nsresult MaybeSetupByteRangeRequest(int64_t partialLen, int64_t contentLength);
nsresult MaybeSetupByteRangeRequest(int64_t partialLen, int64_t contentLength,
bool ignoreMissingPartialLen = false);
nsresult SetupByteRangeRequest(int64_t partialLen);
nsresult OpenCacheInputStream(nsICacheEntry* cacheEntry, bool startBuffering,
bool checkingAppCacheEntry);

View File

@ -0,0 +1,72 @@
/*
Checkes if the concurrent cache read/write works when the write is interrupted because of max-entry-size limits
This test is using a resumable response.
- with a profile, set max-entry-size to 0
- first channel makes a request for a resumable response
- second channel makes a request for the same resource, concurrent read happens
- first channel sets predicted data size on the entry, it's doomed
- second channel now must engage interrupted concurrent write algorithm and read the content again from the network
- both channels must deliver full content w/o errors
*/
Cu.import("resource://testing-common/httpd.js");
Cu.import("resource://gre/modules/Services.jsm");
XPCOMUtils.defineLazyGetter(this, "URL", function() {
return "http://localhost:" + httpServer.identity.primaryPort;
});
var httpServer = null;
function make_channel(url, callback, ctx) {
var ios = Cc["@mozilla.org/network/io-service;1"].
getService(Ci.nsIIOService);
return ios.newChannel(url, "", null);
}
const responseBody = "response body";
function contentHandler(metadata, response)
{
response.setHeader("Content-Type", "text/plain");
response.setHeader("ETag", "Just testing");
response.setHeader("Cache-Control", "max-age=99999");
response.setHeader("Accept-Ranges", "bytes");
response.setHeader("Content-Length", "" + responseBody.length);
if (metadata.hasHeader("If-Range")) {
response.setStatusLine(metadata.httpVersion, 206, "Partial Content");
response.setHeader("Content-Range", "0-12/13");
}
response.bodyOutputStream.write(responseBody, responseBody.length);
}
function run_test()
{
do_get_profile();
Services.prefs.setIntPref("browser.cache.disk.max_entry_size", 0);
httpServer = new HttpServer();
httpServer.registerPathHandler("/content", contentHandler);
httpServer.start(-1);
var chan1 = make_channel(URL + "/content");
chan1.asyncOpen(new ChannelListener(firstTimeThrough, null), null);
var chan2 = make_channel(URL + "/content");
chan2.asyncOpen(new ChannelListener(secondTimeThrough, null), null);
do_test_pending();
}
function firstTimeThrough(request, buffer)
{
do_check_eq(buffer, responseBody);
}
function secondTimeThrough(request, buffer)
{
do_check_eq(buffer, responseBody);
httpServer.stop(do_test_finished);
}

View File

@ -0,0 +1,72 @@
/*
Checkes if the concurrent cache read/write works when the write is interrupted because of max-entry-size limits.
This test is using a non-resumable response.
- with a profile, set max-entry-size to 0
- first channel makes a request for a non-resumable (chunked) response
- second channel makes a request for the same resource, concurrent read is bypassed (non-resumable response)
- first channel writes first bytes to the cache output stream, but that fails because of the max-entry-size limit and entry is doomed
- cache entry output stream is closed
- second channel gets the entry, opening the input stream must fail
- second channel must read the content again from the network
- both channels must deliver full content w/o errors
*/
Cu.import("resource://testing-common/httpd.js");
Cu.import("resource://gre/modules/Services.jsm");
XPCOMUtils.defineLazyGetter(this, "URL", function() {
return "http://localhost:" + httpServer.identity.primaryPort;
});
var httpServer = null;
function make_channel(url, callback, ctx) {
var ios = Cc["@mozilla.org/network/io-service;1"].
getService(Ci.nsIIOService);
return ios.newChannel(url, "", null);
}
const responseBody = "c\r\ndata reached\r\n3\r\nhej\r\n0\r\n\r\n";
const responseBodyDecoded = "data reachedhej";
function contentHandler(metadata, response)
{
response.seizePower();
response.write("HTTP/1.1 200 OK\r\n");
response.write("Content-Type: text/plain\r\n");
response.write("Transfer-Encoding: chunked\r\n");
response.write("\r\n");
response.write(responseBody);
response.finish();
}
function run_test()
{
do_get_profile();
Services.prefs.setIntPref("browser.cache.disk.max_entry_size", 0);
httpServer = new HttpServer();
httpServer.registerPathHandler("/content", contentHandler);
httpServer.start(-1);
var chan1 = make_channel(URL + "/content");
chan1.asyncOpen(new ChannelListener(firstTimeThrough, null, CL_ALLOW_UNKNOWN_CL), null);
var chan2 = make_channel(URL + "/content");
chan2.asyncOpen(new ChannelListener(secondTimeThrough, null, CL_ALLOW_UNKNOWN_CL), null);
do_test_pending();
}
function firstTimeThrough(request, buffer)
{
do_check_eq(buffer, responseBodyDecoded);
}
function secondTimeThrough(request, buffer)
{
do_check_eq(buffer, responseBodyDecoded);
httpServer.stop(do_test_finished);
}

View File

@ -0,0 +1,79 @@
/*
This is only a crash test. We load a partial content, cache it. Then we change the limit
for single cache entry size (shrink it) so that the next request for the rest of the content
will hit that limit and doom/remove the entry. We change the size manually, but in reality
it's being changed by cache smart size.
*/
Cu.import("resource://testing-common/httpd.js");
Cu.import("resource://gre/modules/Services.jsm");
XPCOMUtils.defineLazyGetter(this, "URL", function() {
return "http://localhost:" + httpServer.identity.primaryPort;
});
var httpServer = null;
function make_channel(url, callback, ctx) {
var ios = Cc["@mozilla.org/network/io-service;1"].
getService(Ci.nsIIOService);
return ios.newChannel(url, "", null);
}
// Have 2kb response (8 * 2 ^ 8)
var responseBody = "response";
for (var i = 0; i < 8; ++i) responseBody += responseBody;
function contentHandler(metadata, response)
{
response.setHeader("Content-Type", "text/plain", false);
response.setHeader("ETag", "range");
response.setHeader("Accept-Ranges", "bytes");
response.setHeader("Cache-Control", "max-age=360000");
if (!metadata.hasHeader("If-Range")) {
response.setHeader("Content-Length", responseBody.length + "");
response.processAsync();
var slice = responseBody.slice(0, 100);
response.bodyOutputStream.write(slice, slice.length);
response.finish();
} else {
var slice = responseBody.slice(100);
response.setStatusLine(metadata.httpVersion, 206, "Partial Content");
response.setHeader("Content-Range",
(responseBody.length - slice.length).toString() + "-" +
(responseBody.length - 1).toString() + "/" +
(responseBody.length).toString());
response.setHeader("Content-Length", slice.length + "");
response.bodyOutputStream.write(slice, slice.length);
}
}
function run_test()
{
httpServer = new HttpServer();
httpServer.registerPathHandler("/content", contentHandler);
httpServer.start(-1);
var chan = make_channel(URL + "/content");
chan.asyncOpen(new ChannelListener(firstTimeThrough, null, CL_EXPECT_LATE_FAILURE), null);
do_test_pending();
}
function firstTimeThrough(request, buffer)
{
// Change single cache entry limit to 1 kb. This emulates smart size change.
Services.prefs.setIntPref("browser.cache.disk.max_entry_size", 1);
var chan = make_channel(URL + "/content");
chan.asyncOpen(new ChannelListener(finish_test, null), null);
}
function finish_test(request, buffer)
{
do_check_eq(buffer, responseBody);
httpServer.stop(do_test_finished);
}

View File

@ -65,6 +65,9 @@ skip-if = true
[test_cache2-28a-OPEN_SECRETLY.js]
# This test will be fixed in bug 1067931
skip-if = true
[test_cache2-28-concurrent_read_resumable_entry_size_zero.js]
[test_cache2-29-concurrent_read_non-resumable_entry_size_zero.js]
[test_partial_response_entry_size_smart_shrink.js]
[test_304_responses.js]
[test_cacheForOfflineUse_no-store.js]
[test_307_redirect.js]