Bug 1197847 - Disallow folded headers in h2. r=mcmanus

This also fixes a lot of situations in which we could get a compression
state out of sync with the server, which would be Very Bad.
This commit is contained in:
Nicholas Hurley 2015-09-22 19:58:14 -07:00
parent 5c70c463e3
commit 312dfeef47
5 changed files with 251 additions and 48 deletions

View File

@ -360,7 +360,9 @@ Http2Decompressor::DecodeHeaderBlock(const uint8_t *data, uint32_t datalen,
mIsPush = isPush; mIsPush = isPush;
nsresult rv = NS_OK; nsresult rv = NS_OK;
nsresult softfail_rv = NS_OK;
while (NS_SUCCEEDED(rv) && (mOffset < datalen)) { while (NS_SUCCEEDED(rv) && (mOffset < datalen)) {
bool modifiesTable = true;
if (mData[mOffset] & 0x80) { if (mData[mOffset] & 0x80) {
rv = DoIndexed(); rv = DoIndexed();
LOG(("Decompressor state after indexed")); LOG(("Decompressor state after indexed"));
@ -371,16 +373,37 @@ Http2Decompressor::DecodeHeaderBlock(const uint8_t *data, uint32_t datalen,
rv = DoContextUpdate(); rv = DoContextUpdate();
LOG(("Decompressor state after context update")); LOG(("Decompressor state after context update"));
} else if (mData[mOffset] & 0x10) { } else if (mData[mOffset] & 0x10) {
modifiesTable = false;
rv = DoLiteralNeverIndexed(); rv = DoLiteralNeverIndexed();
LOG(("Decompressor state after literal never index")); LOG(("Decompressor state after literal never index"));
} else { } else {
modifiesTable = false;
rv = DoLiteralWithoutIndex(); rv = DoLiteralWithoutIndex();
LOG(("Decompressor state after literal without index")); LOG(("Decompressor state after literal without index"));
} }
DumpState(); DumpState();
if (rv == NS_ERROR_ILLEGAL_VALUE) {
if (modifiesTable) {
// Unfortunately, we can't count on our peer now having the same state
// as us, so let's terminate the session and we can try again later.
return NS_ERROR_FAILURE;
}
// This is an http-level error that we can handle by resetting the stream
// in the upper layers. Let's note that we saw this, then continue
// decompressing until we either hit the end of the header block or find a
// hard failure. That way we won't get an inconsistent compression state
// with the server.
softfail_rv = rv;
rv = NS_OK;
}
} }
return rv; if (NS_FAILED(rv)) {
return rv;
}
return softfail_rv;
} }
nsresult nsresult
@ -407,7 +430,8 @@ Http2Decompressor::DecodeInteger(uint32_t prefixLen, uint32_t &accum)
if (mOffset >= mDataLen) { if (mOffset >= mDataLen) {
NS_WARNING("Ran out of data to decode integer"); NS_WARNING("Ran out of data to decode integer");
return NS_ERROR_ILLEGAL_VALUE; // This is session-fatal.
return NS_ERROR_FAILURE;
} }
bool chainBit = mData[mOffset] & 0x80; bool chainBit = mData[mOffset] & 0x80;
accum += (mData[mOffset] & 0x7f) * factor; accum += (mData[mOffset] & 0x7f) * factor;
@ -419,12 +443,16 @@ Http2Decompressor::DecodeInteger(uint32_t prefixLen, uint32_t &accum)
// really big offsets are just trawling for overflows // really big offsets are just trawling for overflows
if (accum >= 0x800000) { if (accum >= 0x800000) {
NS_WARNING("Decoding integer >= 0x800000"); NS_WARNING("Decoding integer >= 0x800000");
return NS_ERROR_ILLEGAL_VALUE; // This is not strictly fatal to the session, but given the fact that
// the value is way to large to be reasonable, let's just tell our peer
// to go away.
return NS_ERROR_FAILURE;
} }
if (mOffset >= mDataLen) { if (mOffset >= mDataLen) {
NS_WARNING("Ran out of data to decode integer"); NS_WARNING("Ran out of data to decode integer");
return NS_ERROR_ILLEGAL_VALUE; // This is session-fatal.
return NS_ERROR_FAILURE;
} }
chainBit = mData[mOffset] & 0x80; chainBit = mData[mOffset] & 0x80;
accum += (mData[mOffset] & 0x7f) * factor; accum += (mData[mOffset] & 0x7f) * factor;
@ -538,7 +566,8 @@ Http2Decompressor::OutputHeader(uint32_t index)
// bounds check // bounds check
if (mHeaderTable.Length() <= index) { if (mHeaderTable.Length() <= index) {
LOG(("Http2Decompressor::OutputHeader index too large %u", index)); LOG(("Http2Decompressor::OutputHeader index too large %u", index));
return NS_ERROR_ILLEGAL_VALUE; // This is session-fatal.
return NS_ERROR_FAILURE;
} }
return OutputHeader(mHeaderTable[index]->mName, return OutputHeader(mHeaderTable[index]->mName,
@ -550,8 +579,10 @@ Http2Decompressor::CopyHeaderString(uint32_t index, nsACString &name)
{ {
// NWGH - make this < index // NWGH - make this < index
// bounds check // bounds check
if (mHeaderTable.Length() <= index) if (mHeaderTable.Length() <= index) {
return NS_ERROR_ILLEGAL_VALUE; // This is session-fatal.
return NS_ERROR_FAILURE;
}
name = mHeaderTable[index]->mName; name = mHeaderTable[index]->mName;
return NS_OK; return NS_OK;
@ -560,8 +591,10 @@ Http2Decompressor::CopyHeaderString(uint32_t index, nsACString &name)
nsresult nsresult
Http2Decompressor::CopyStringFromInput(uint32_t bytes, nsACString &val) Http2Decompressor::CopyStringFromInput(uint32_t bytes, nsACString &val)
{ {
if (mOffset + bytes > mDataLen) if (mOffset + bytes > mDataLen) {
return NS_ERROR_ILLEGAL_VALUE; // This is session-fatal.
return NS_ERROR_FAILURE;
}
val.Assign(reinterpret_cast<const char *>(mData) + mOffset, bytes); val.Assign(reinterpret_cast<const char *>(mData) + mOffset, bytes);
mOffset += bytes; mOffset += bytes;
@ -584,21 +617,21 @@ Http2Decompressor::DecodeFinalHuffmanCharacter(HuffmanIncomingTable *table,
if (entry->mPtr) { if (entry->mPtr) {
// Can't chain to another table when we're all out of bits in the encoding // Can't chain to another table when we're all out of bits in the encoding
LOG(("DecodeFinalHuffmanCharacter trying to chain when we're out of bits")); LOG(("DecodeFinalHuffmanCharacter trying to chain when we're out of bits"));
return NS_ERROR_ILLEGAL_VALUE; return NS_ERROR_FAILURE;
} }
if (bitsLeft < entry->mPrefixLen) { if (bitsLeft < entry->mPrefixLen) {
// We don't have enough bits to actually make a match, this is some sort of // We don't have enough bits to actually make a match, this is some sort of
// invalid coding // invalid coding
LOG(("DecodeFinalHuffmanCharacter does't have enough bits to match")); LOG(("DecodeFinalHuffmanCharacter does't have enough bits to match"));
return NS_ERROR_ILLEGAL_VALUE; return NS_ERROR_FAILURE;
} }
// This is a character! // This is a character!
if (entry->mValue == 256) { if (entry->mValue == 256) {
// EOS // EOS
LOG(("DecodeFinalHuffmanCharacter actually decoded an EOS")); LOG(("DecodeFinalHuffmanCharacter actually decoded an EOS"));
return NS_ERROR_ILLEGAL_VALUE; return NS_ERROR_FAILURE;
} }
c = static_cast<uint8_t>(entry->mValue & 0xFF); c = static_cast<uint8_t>(entry->mValue & 0xFF);
bitsLeft -= entry->mPrefixLen; bitsLeft -= entry->mPrefixLen;
@ -644,7 +677,7 @@ Http2Decompressor::DecodeHuffmanCharacter(HuffmanIncomingTable *table,
// TODO - does this get me into trouble in the new world? // TODO - does this get me into trouble in the new world?
// No info left in input to try to consume, we're done // No info left in input to try to consume, we're done
LOG(("DecodeHuffmanCharacter all out of bits to consume, can't chain")); LOG(("DecodeHuffmanCharacter all out of bits to consume, can't chain"));
return NS_ERROR_ILLEGAL_VALUE; return NS_ERROR_FAILURE;
} }
// We might get lucky here! // We might get lucky here!
@ -657,7 +690,7 @@ Http2Decompressor::DecodeHuffmanCharacter(HuffmanIncomingTable *table,
if (entry->mValue == 256) { if (entry->mValue == 256) {
LOG(("DecodeHuffmanCharacter found an actual EOS")); LOG(("DecodeHuffmanCharacter found an actual EOS"));
return NS_ERROR_ILLEGAL_VALUE; return NS_ERROR_FAILURE;
} }
c = static_cast<uint8_t>(entry->mValue & 0xFF); c = static_cast<uint8_t>(entry->mValue & 0xFF);
@ -680,7 +713,7 @@ Http2Decompressor::CopyHuffmanStringFromInput(uint32_t bytes, nsACString &val)
{ {
if (mOffset + bytes > mDataLen) { if (mOffset + bytes > mDataLen) {
LOG(("CopyHuffmanStringFromInput not enough data")); LOG(("CopyHuffmanStringFromInput not enough data"));
return NS_ERROR_ILLEGAL_VALUE; return NS_ERROR_FAILURE;
} }
uint32_t bytesRead = 0; uint32_t bytesRead = 0;
@ -704,7 +737,7 @@ Http2Decompressor::CopyHuffmanStringFromInput(uint32_t bytes, nsACString &val)
if (bytesRead > bytes) { if (bytesRead > bytes) {
LOG(("CopyHuffmanStringFromInput read more bytes than was allowed!")); LOG(("CopyHuffmanStringFromInput read more bytes than was allowed!"));
return NS_ERROR_ILLEGAL_VALUE; return NS_ERROR_FAILURE;
} }
if (bitsLeft) { if (bitsLeft) {
@ -719,7 +752,7 @@ Http2Decompressor::CopyHuffmanStringFromInput(uint32_t bytes, nsACString &val)
if (bitsLeft > 7) { if (bitsLeft > 7) {
LOG(("CopyHuffmanStringFromInput more than 7 bits of padding")); LOG(("CopyHuffmanStringFromInput more than 7 bits of padding"));
return NS_ERROR_ILLEGAL_VALUE; return NS_ERROR_FAILURE;
} }
if (bitsLeft) { if (bitsLeft) {
@ -730,7 +763,7 @@ Http2Decompressor::CopyHuffmanStringFromInput(uint32_t bytes, nsACString &val)
if (bits != mask) { if (bits != mask) {
LOG(("CopyHuffmanStringFromInput ran out of data but found possible " LOG(("CopyHuffmanStringFromInput ran out of data but found possible "
"non-EOS symbol")); "non-EOS symbol"));
return NS_ERROR_ILLEGAL_VALUE; return NS_ERROR_FAILURE;
} }
} }
@ -749,13 +782,14 @@ Http2Decompressor::DoIndexed()
uint32_t index; uint32_t index;
nsresult rv = DecodeInteger(7, index); nsresult rv = DecodeInteger(7, index);
if (NS_FAILED(rv)) if (NS_FAILED(rv)) {
return rv; return rv;
}
LOG(("HTTP decompressor indexed entry %u\n", index)); LOG(("HTTP decompressor indexed entry %u\n", index));
if (index == 0) { if (index == 0) {
return NS_ERROR_ILLEGAL_VALUE; return NS_ERROR_FAILURE;
} }
// NWGH - remove this line, since we'll keep everything 1-indexed // NWGH - remove this line, since we'll keep everything 1-indexed
index--; // Internally, we 0-index everything, since this is, y'know, C++ index--; // Internally, we 0-index everything, since this is, y'know, C++
@ -775,8 +809,9 @@ Http2Decompressor::DoLiteralInternal(nsACString &name, nsACString &value,
// first let's get the name // first let's get the name
uint32_t index; uint32_t index;
nsresult rv = DecodeInteger(namePrefixLen, index); nsresult rv = DecodeInteger(namePrefixLen, index);
if (NS_FAILED(rv)) if (NS_FAILED(rv)) {
return rv; return rv;
}
bool isHuffmanEncoded; bool isHuffmanEncoded;
@ -801,8 +836,9 @@ Http2Decompressor::DoLiteralInternal(nsACString &name, nsACString &value,
LOG(("Http2Decompressor::DoLiteralInternal indexed name %d %s", LOG(("Http2Decompressor::DoLiteralInternal indexed name %d %s",
index, name.BeginReading())); index, name.BeginReading()));
} }
if (NS_FAILED(rv)) if (NS_FAILED(rv)) {
return rv; return rv;
}
// now the value // now the value
uint32_t valueLen; uint32_t valueLen;
@ -815,8 +851,19 @@ Http2Decompressor::DoLiteralInternal(nsACString &name, nsACString &value,
rv = CopyStringFromInput(valueLen, value); rv = CopyStringFromInput(valueLen, value);
} }
} }
if (NS_FAILED(rv)) if (NS_FAILED(rv)) {
return rv; return rv;
}
int32_t newline = 0;
while ((newline = value.FindChar('\n', newline)) != -1) {
if (value[newline + 1] == ' ' || value[newline + 1] == '\t') {
LOG(("Http2Decompressor::Disallowing folded header value %s",
value.BeginReading()));
return NS_ERROR_ILLEGAL_VALUE;
}
}
LOG(("Http2Decompressor::DoLiteralInternal value %s", value.BeginReading())); LOG(("Http2Decompressor::DoLiteralInternal value %s", value.BeginReading()));
return NS_OK; return NS_OK;
} }
@ -833,8 +880,9 @@ Http2Decompressor::DoLiteralWithoutIndex()
LOG(("HTTP decompressor literal without index %s %s\n", LOG(("HTTP decompressor literal without index %s %s\n",
name.get(), value.get())); name.get(), value.get()));
if (NS_SUCCEEDED(rv)) if (NS_SUCCEEDED(rv)) {
rv = OutputHeader(name, value); rv = OutputHeader(name, value);
}
return rv; return rv;
} }
@ -846,10 +894,12 @@ Http2Decompressor::DoLiteralWithIncremental()
nsAutoCString name, value; nsAutoCString name, value;
nsresult rv = DoLiteralInternal(name, value, 6); nsresult rv = DoLiteralInternal(name, value, 6);
if (NS_SUCCEEDED(rv)) if (NS_SUCCEEDED(rv)) {
rv = OutputHeader(name, value); rv = OutputHeader(name, value);
if (NS_FAILED(rv)) }
if (NS_FAILED(rv)) {
return rv; return rv;
}
uint32_t room = nvPair(name, value).Size(); uint32_t room = nvPair(name, value).Size();
if (room > mMaxBuffer) { if (room > mMaxBuffer) {
@ -884,8 +934,9 @@ Http2Decompressor::DoLiteralNeverIndexed()
LOG(("HTTP decompressor literal never indexed %s %s\n", LOG(("HTTP decompressor literal never indexed %s %s\n",
name.get(), value.get())); name.get(), value.get()));
if (NS_SUCCEEDED(rv)) if (NS_SUCCEEDED(rv)) {
rv = OutputHeader(name, value); rv = OutputHeader(name, value);
}
return rv; return rv;
} }
@ -899,8 +950,9 @@ Http2Decompressor::DoContextUpdate()
uint32_t newMaxSize; uint32_t newMaxSize;
nsresult rv = DecodeInteger(5, newMaxSize); nsresult rv = DecodeInteger(5, newMaxSize);
LOG(("Http2Decompressor::DoContextUpdate new maximum size %u", newMaxSize)); LOG(("Http2Decompressor::DoContextUpdate new maximum size %u", newMaxSize));
if (NS_FAILED(rv)) if (NS_FAILED(rv)) {
return rv; return rv;
}
return mCompressor->SetMaxBufferSizeInternal(newMaxSize); return mCompressor->SetMaxBufferSizeInternal(newMaxSize);
} }
@ -946,13 +998,15 @@ Http2Compressor::EncodeHeaderBlock(const nsCString &nvInput,
int32_t startIndex = crlfIndex + 2; int32_t startIndex = crlfIndex + 2;
crlfIndex = nvInput.Find("\r\n", false, startIndex); crlfIndex = nvInput.Find("\r\n", false, startIndex);
if (crlfIndex == -1) if (crlfIndex == -1) {
break; break;
}
int32_t colonIndex = nvInput.Find(":", false, startIndex, int32_t colonIndex = nvInput.Find(":", false, startIndex,
crlfIndex - startIndex); crlfIndex - startIndex);
if (colonIndex == -1) if (colonIndex == -1) {
break; break;
}
nsDependentCSubstring name = Substring(beginBuffer + startIndex, nsDependentCSubstring name = Substring(beginBuffer + startIndex,
beginBuffer + colonIndex); beginBuffer + colonIndex);
@ -999,8 +1053,9 @@ Http2Compressor::EncodeHeaderBlock(const nsCString &nvInput,
if (name.EqualsLiteral("content-length")) { if (name.EqualsLiteral("content-length")) {
int64_t len; int64_t len;
nsCString tmp(value); nsCString tmp(value);
if (nsHttp::ParseInt64(tmp.get(), nullptr, &len)) if (nsHttp::ParseInt64(tmp.get(), nullptr, &len)) {
mParsedContentLength = len; mParsedContentLength = len;
}
} }
if (name.EqualsLiteral("cookie")) { if (name.EqualsLiteral("cookie")) {
@ -1134,8 +1189,9 @@ Http2Compressor::EncodeInteger(uint32_t prefixLen, uint32_t val)
q = val / 128; q = val / 128;
r = val % 128; r = val % 128;
tmp = r; tmp = r;
if (q) if (q) {
tmp |= 0x80; // chain bit tmp |= 0x80; // chain bit
}
val = q; val = q;
mOutput->Append(reinterpret_cast<char *>(&tmp), 1); mOutput->Append(reinterpret_cast<char *>(&tmp), 1);
} while (q); } while (q);
@ -1304,7 +1360,7 @@ nsresult
Http2Compressor::SetMaxBufferSizeInternal(uint32_t maxBufferSize) Http2Compressor::SetMaxBufferSizeInternal(uint32_t maxBufferSize)
{ {
if (maxBufferSize > mMaxBufferSetting) { if (maxBufferSize > mMaxBufferSetting) {
return NS_ERROR_ILLEGAL_VALUE; return NS_ERROR_FAILURE;
} }
uint32_t removedCount = 0; uint32_t removedCount = 0;

View File

@ -1284,6 +1284,9 @@ Http2Session::RecvHeaders(Http2Session *self)
self->CleanupStream(self->mInputFrameDataStream, rv, PROTOCOL_ERROR); self->CleanupStream(self->mInputFrameDataStream, rv, PROTOCOL_ERROR);
self->ResetDownstreamState(); self->ResetDownstreamState();
rv = NS_OK; rv = NS_OK;
} else if (NS_FAILED(rv)) {
// This is fatal to the session.
self->mGoAwayReason = COMPRESSION_ERROR;
} }
return rv; return rv;
} }
@ -1688,8 +1691,17 @@ Http2Session::RecvPushPromise(Http2Session *self)
return NS_OK; return NS_OK;
} }
if (NS_FAILED(rv)) if (rv == NS_ERROR_ILLEGAL_VALUE) {
// This means the decompression completed ok, but there was a problem with
// the decoded headers. Reset the stream and go away.
self->GenerateRstStream(PROTOCOL_ERROR, promisedID);
delete pushedStream;
return NS_OK;
} else if (NS_FAILED(rv)) {
// This is fatal to the session.
self->mGoAwayReason = COMPRESSION_ERROR;
return rv; return rv;
}
// Ownership of the pushed stream is by the transaction hash, just as it // Ownership of the pushed stream is by the transaction hash, just as it
// is for a client initiated stream. Errors that aren't fatal to the // is for a client initiated stream. Errors that aren't fatal to the

View File

@ -994,7 +994,7 @@ Http2Stream::ConvertResponseHeaders(Http2Decompressor *decompressor,
aHeadersOut, false); aHeadersOut, false);
if (NS_FAILED(rv)) { if (NS_FAILED(rv)) {
LOG3(("Http2Stream::ConvertResponseHeaders %p decode Error\n", this)); LOG3(("Http2Stream::ConvertResponseHeaders %p decode Error\n", this));
return NS_ERROR_ILLEGAL_VALUE; return rv;
} }
nsAutoCString statusString; nsAutoCString statusString;
@ -1055,7 +1055,7 @@ Http2Stream::ConvertPushHeaders(Http2Decompressor *decompressor,
aHeadersOut, true); aHeadersOut, true);
if (NS_FAILED(rv)) { if (NS_FAILED(rv)) {
LOG3(("Http2Stream::ConvertPushHeaders %p Error\n", this)); LOG3(("Http2Stream::ConvertPushHeaders %p Error\n", this));
return NS_ERROR_ILLEGAL_VALUE; return rv;
} }
nsCString method; nsCString method;

View File

@ -47,15 +47,21 @@ Http2CheckListener.prototype = {
shouldBeHttp2 : true, shouldBeHttp2 : true,
accum : 0, accum : 0,
expected: -1, expected: -1,
shouldSucceed: true,
onStartRequest: function testOnStartRequest(request, ctx) { onStartRequest: function testOnStartRequest(request, ctx) {
this.onStartRequestFired = true; this.onStartRequestFired = true;
if (!Components.isSuccessCode(request.status)) if (this.shouldSucceed && !Components.isSuccessCode(request.status)) {
do_throw("Channel should have a success code! (" + request.status + ")"); do_throw("Channel should have a success code! (" + request.status + ")");
} else if (!this.shouldSucceed && Components.isSuccessCode(request.status)) {
do_throw("Channel succeeded unexpectedly!");
}
do_check_true(request instanceof Components.interfaces.nsIHttpChannel); do_check_true(request instanceof Components.interfaces.nsIHttpChannel);
do_check_eq(request.responseStatus, 200); do_check_eq(request.requestSucceeded, this.shouldSucceed);
do_check_eq(request.requestSucceeded, true); if (this.shouldSucceed) {
do_check_eq(request.responseStatus, 200);
}
}, },
onDataAvailable: function testOnDataAvailable(request, ctx, stream, off, cnt) { onDataAvailable: function testOnDataAvailable(request, ctx, stream, off, cnt) {
@ -67,12 +73,16 @@ Http2CheckListener.prototype = {
onStopRequest: function testOnStopRequest(request, ctx, status) { onStopRequest: function testOnStopRequest(request, ctx, status) {
do_check_true(this.onStartRequestFired); do_check_true(this.onStartRequestFired);
do_check_true(Components.isSuccessCode(status));
do_check_true(this.onDataAvailableFired);
do_check_true(this.isHttp2Connection == this.shouldBeHttp2);
if (this.expected != -1) { if (this.expected != -1) {
do_check_eq(this.accum, this.expected); do_check_eq(this.accum, this.expected);
} }
if (this.shouldSucceed) {
do_check_true(Components.isSuccessCode(status));
do_check_true(this.onDataAvailableFired);
do_check_true(this.isHttp2Connection == this.shouldBeHttp2);
} else {
do_check_false(Components.isSuccessCode(status));
}
run_next_test(); run_next_test();
do_test_finished(); do_test_finished();
@ -804,6 +814,57 @@ function test_http2_continuations() {
chan.asyncOpen(listener, chan); chan.asyncOpen(listener, chan);
} }
function Http2IllegalHpackValidationListener() { }
Http2IllegalHpackValidationListener.prototype = new Http2CheckListener();
Http2IllegalHpackValidationListener.prototype.shouldGoAway = false;
Http2IllegalHpackValidationListener.prototype.onStopRequest = function (request, ctx, status) {
var wentAway = (request.getResponseHeader('X-Did-Goaway') === 'yes');
do_check_eq(wentAway, this.shouldGoAway);
do_check_true(this.onStartRequestFired);
do_check_true(this.onDataAvailableFired);
do_check_true(this.isHttp2Connection == this.shouldBeHttp2);
run_next_test();
do_test_finished();
};
function Http2IllegalHpackListener() { }
Http2IllegalHpackListener.prototype = new Http2CheckListener();
Http2IllegalHpackListener.prototype.shouldGoAway = false;
Http2IllegalHpackListener.prototype.onStopRequest = function (request, ctx, status) {
var chan = makeChan("https://localhost:" + serverPort + "/illegalhpack_validate");
var listener = new Http2IllegalHpackValidationListener();
listener.shouldGoAway = this.shouldGoAway;
chan.asyncOpen(listener, null);
};
function test_http2_illegalhpacksoft() {
var chan = makeChan("https://localhost:" + serverPort + "/illegalhpacksoft");
var listener = new Http2IllegalHpackListener();
listener.shouldGoAway = false;
listener.shouldSucceed = false;
chan.asyncOpen(listener, null);
}
function test_http2_illegalhpackhard() {
var chan = makeChan("https://localhost:" + serverPort + "/illegalhpackhard");
var listener = new Http2IllegalHpackListener();
listener.shouldGoAway = true;
listener.shouldSucceed = false;
chan.asyncOpen(listener, null);
}
function test_http2_folded_header() {
var chan = makeChan("https://localhost:" + serverPort + "/foldedheader");
chan.loadGroup = loadGroup;
var listener = new Http2CheckListener();
listener.shouldSucceed = false;
chan.asyncOpen(listener, null);
}
function test_complete() { function test_complete() {
resetPrefs(); resetPrefs();
do_test_pending(); do_test_pending();
@ -843,6 +904,9 @@ var tests = [ test_http2_post_big
, test_http2_pushapi_1 , test_http2_pushapi_1
, test_http2_continuations , test_http2_continuations
, test_http2_blocking_download , test_http2_blocking_download
, test_http2_illegalhpacksoft
, test_http2_illegalhpackhard
, test_http2_folded_header
// Add new tests above here - best to add new tests before h1 // Add new tests above here - best to add new tests before h1
// streams get too involved // streams get too involved
// These next two must always come in this order // These next two must always come in this order
@ -858,7 +922,7 @@ var current_test = 0;
function run_next_test() { function run_next_test() {
if (current_test < tests.length) { if (current_test < tests.length) {
dump("starting test number " + current_test + "\n"); dump("starting test number " + current_test + "\n");
tests[current_test](); tests[current_test]();
current_test++; current_test++;
do_test_pending(); do_test_pending();

View File

@ -108,11 +108,11 @@ moreData.prototype = {
content = generateContent(1024*1024); content = generateContent(1024*1024);
this.resp.write(content); // 1mb chunk this.resp.write(content); // 1mb chunk
this.iter--; this.iter--;
if (!this.iter) { if (!this.iter) {
this.resp.end(); this.resp.end();
} else { } else {
setTimeout(executeRunLater, 1, this); setTimeout(executeRunLater, 1, this);
} }
} }
}; };
@ -120,12 +120,54 @@ function executeRunLater(arg) {
arg.onTimeout(); arg.onTimeout();
} }
var Compressor = http2_compression.Compressor;
var HeaderSetCompressor = http2_compression.HeaderSetCompressor;
var originalCompressHeaders = Compressor.prototype.compress;
function insertSoftIllegalHpack(headers) {
var originalCompressed = originalCompressHeaders.apply(this, headers);
var illegalLiteral = new Buffer([
0x00, // Literal, no index
0x08, // Name: not huffman encoded, 8 bytes long
0x3a, 0x69, 0x6c, 0x6c, 0x65, 0x67, 0x61, 0x6c, // :illegal
0x10, // Value: not huffman encoded, 16 bytes long
// REALLY NOT LEGAL
0x52, 0x45, 0x41, 0x4c, 0x4c, 0x59, 0x20, 0x4e, 0x4f, 0x54, 0x20, 0x4c, 0x45, 0x47, 0x41, 0x4c
]);
var newBufferLength = originalCompressed.length + illegalLiteral.length;
var concatenated = new Buffer(newBufferLength);
originalCompressed.copy(concatenated, 0);
illegalLiteral.copy(concatenated, originalCompressed.length);
return concatenated;
}
function insertHardIllegalHpack(headers) {
var originalCompressed = originalCompressHeaders.apply(this, headers);
// Now we have to add an invalid header
var illegalIndexed = HeaderSetCompressor.integer(5000, 7);
// The above returns an array of buffers, but there's only one buffer, so
// get rid of the array.
illegalIndexed = illegalIndexed[0];
// Set the first bit to 1 to signal this is an indexed representation
illegalIndexed[0] |= 0x80;
var newBufferLength = originalCompressed.length + illegalIndexed.length;
var concatenated = new Buffer(newBufferLength);
originalCompressed.copy(concatenated, 0);
illegalIndexed.copy(concatenated, originalCompressed.length);
return concatenated;
}
var h11required_conn = null; var h11required_conn = null;
var h11required_header = "yes"; var h11required_header = "yes";
var didRst = false; var didRst = false;
var rstConnection = null; var rstConnection = null;
var illegalheader_conn = null;
function handleRequest(req, res) { function handleRequest(req, res) {
// We do this first to ensure nothing goes wonky in our tests that don't want
// the headers to have something illegal in them
Compressor.prototype.compress = originalCompressHeaders;
var u = url.parse(req.url); var u = url.parse(req.url);
var content = getHttpContent(u.pathname); var content = getHttpContent(u.pathname);
var push, push1, push1a, push2, push3; var push, push1, push1a, push2, push3;
@ -572,6 +614,35 @@ function handleRequest(req, res) {
return; return;
} }
else if (u.pathname === "/illegalhpacksoft") {
// This will cause the compressor to compress a header that is not legal,
// but only affects the stream, not the session.
illegalheader_conn = req.stream.connection;
Compressor.prototype.compress = insertSoftIllegalHpack;
// Fall through to the default response behavior
}
else if (u.pathname === "/illegalhpackhard") {
// This will cause the compressor to insert an HPACK instruction that will
// cause a session failure.
Compressor.prototype.compress = insertHardIllegalHpack;
// Fall through to default response behavior
}
else if (u.pathname === "/illegalhpack_validate") {
if (req.stream.connection === illegalheader_conn) {
res.setHeader('X-Did-Goaway', 'no');
} else {
res.setHeader('X-Did-Goaway', 'yes');
}
// Fall through to the default response behavior
}
else if (u.pathname === "/foldedheader") {
res.setHeader('X-Folded-Header', 'this is\n folded');
// Fall through to the default response behavior
}
res.setHeader('Content-Type', 'text/html'); res.setHeader('Content-Type', 'text/html');
if (req.httpVersionMajor != 2) { if (req.httpVersionMajor != 2) {
res.setHeader('Connection', 'close'); res.setHeader('Connection', 'close');