Bug 1244861 - Gzip outgoing telemetry pings. r=rnewman

This commit adds the GzipNonChunkedCompressingEntity which is necessary because
the telemetry servers don't support chunked uploading, which the built in
GzipCompressingEntity does.

I tested this on my local device and logs for successful uploads were sent for
both the testing gzip server as well as the official telemetry server. My data
correctly appears on the former and I did not check the latter.

MozReview-Commit-ID: 4bCNiRYyqFD
This commit is contained in:
Michael Comella 2016-02-17 18:20:20 -08:00
parent d0f4eaa603
commit efd611eed3
4 changed files with 131 additions and 0 deletions

View File

@ -912,6 +912,7 @@ sync_java_files = [TOPSRCDIR + '/mobile/android/services/src/main/java/org/mozil
'sync/net/BearerAuthHeaderProvider.java',
'sync/net/BrowserIDAuthHeaderProvider.java',
'sync/net/ConnectionMonitorThread.java',
'sync/net/GzipNonChunkedCompressingEntity.java',
'sync/net/HandleProgressException.java',
'sync/net/HawkAuthHeaderProvider.java',
'sync/net/HMACAuthHeaderProvider.java',

View File

@ -200,6 +200,8 @@ public class TelemetryUploadService extends BackgroundService {
delegate.setResource(resource);
resource.delegate = delegate;
resource.setShouldCompressUploadedEntity(true);
resource.setShouldChunkUploadsHint(false); // Telemetry servers don't support chunking.
// We're in a background thread so we don't have any reason to do this asynchronously.
// If we tried, onStartCommand would return and IntentService might stop itself before we finish.

View File

@ -30,6 +30,7 @@ import ch.boye.httpclientandroidlib.HttpResponse;
import ch.boye.httpclientandroidlib.HttpVersion;
import ch.boye.httpclientandroidlib.client.AuthCache;
import ch.boye.httpclientandroidlib.client.ClientProtocolException;
import ch.boye.httpclientandroidlib.client.entity.GzipCompressingEntity;
import ch.boye.httpclientandroidlib.client.methods.HttpDelete;
import ch.boye.httpclientandroidlib.client.methods.HttpGet;
import ch.boye.httpclientandroidlib.client.methods.HttpPatch;
@ -80,6 +81,10 @@ public class BaseResource implements Resource {
protected HttpRequestBase request;
public final String charset = "utf-8";
private boolean shouldGzipCompress = false;
// A hint whether uploaded payloads are chunked. Default true to use GzipCompressingEntity, which is built-in functionality.
private boolean shouldChunkUploadsHint = true;
/**
* We have very few writes (observers tend to be installed around sync
* sessions) and many iterations (every HTTP request iterates observers), so
@ -162,6 +167,34 @@ public class BaseResource implements Resource {
return this.getURI().getHost();
}
/**
* Causes the Resource to compress the uploaded entity payload in requests with payloads (e.g. post, put)
* @param shouldCompress true if the entity should be compressed, false otherwise
*/
public void setShouldCompressUploadedEntity(final boolean shouldCompress) {
shouldGzipCompress = shouldCompress;
}
/**
* Causes the Resource to chunk the uploaded entity payload in requests with payloads (e.g. post, put).
* Note: this flag is only a hint - chunking is not guaranteed.
*
* Chunking is currently supported with gzip compression.
*
* @param shouldChunk true if the transfer should be chunked, false otherwise
*/
public void setShouldChunkUploadsHint(final boolean shouldChunk) {
shouldChunkUploadsHint = shouldChunk;
}
private HttpEntity getMaybeCompressedEntity(final HttpEntity entity) {
if (!shouldGzipCompress) {
return entity;
}
return shouldChunkUploadsHint ? new GzipCompressingEntity(entity) : new GzipNonChunkedCompressingEntity(entity);
}
/**
* This shuts up HttpClient, which will otherwise debug log about there
* being no auth cache in the context.
@ -365,6 +398,7 @@ public class BaseResource implements Resource {
@Override
public void post(HttpEntity body) {
Logger.debug(LOG_TAG, "HTTP POST " + this.uri.toASCIIString());
body = getMaybeCompressedEntity(body);
HttpPost request = new HttpPost(this.uri);
request.setEntity(body);
this.go(request);
@ -373,6 +407,7 @@ public class BaseResource implements Resource {
@Override
public void patch(HttpEntity body) {
Logger.debug(LOG_TAG, "HTTP PATCH " + this.uri.toASCIIString());
body = getMaybeCompressedEntity(body);
HttpPatch request = new HttpPatch(this.uri);
request.setEntity(body);
this.go(request);
@ -381,6 +416,7 @@ public class BaseResource implements Resource {
@Override
public void put(HttpEntity body) {
Logger.debug(LOG_TAG, "HTTP PUT " + this.uri.toASCIIString());
body = getMaybeCompressedEntity(body);
HttpPut request = new HttpPut(this.uri);
request.setEntity(body);
this.go(request);

View File

@ -0,0 +1,92 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package org.mozilla.gecko.sync.net;
import ch.boye.httpclientandroidlib.HttpEntity;
import ch.boye.httpclientandroidlib.client.entity.GzipCompressingEntity;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
/**
* Wrapping entity that compresses content when {@link #writeTo writing}.
*
* This differs from {@link GzipCompressingEntity} in that it does not chunk
* the sent data, therefore replacing the "Transfer-Encoding" HTTP header with
* the "Content-Length" header required by some servers.
*
* However, to measure the content length, the gzipped content will be temporarily
* stored in memory so be careful what content you send!
*/
public class GzipNonChunkedCompressingEntity extends GzipCompressingEntity {
final int MAX_BUFFER_SIZE_BYTES = 10 * 1000 * 1000; // 10 MB.
private byte[] gzippedContent;
public GzipNonChunkedCompressingEntity(final HttpEntity entity) {
super(entity);
}
/**
* @return content length for gzipped content or -1 if there is an error
*/
@Override
public long getContentLength() {
try {
initBuffer();
} catch (final IOException e) {
// GzipCompressingEntity always returns -1 in which case a 'Content-Length' header is omitted.
// Presumably, without it the request will fail (either client-side or server-side).
return -1;
}
return gzippedContent.length;
}
@Override
public boolean isChunked() {
// "Content-Length" & chunked encoding are mutually exclusive:
// https://en.wikipedia.org/wiki/Chunked_transfer_encoding
return false;
}
@Override
public InputStream getContent() throws IOException {
initBuffer();
return new ByteArrayInputStream(gzippedContent);
}
@Override
public void writeTo(final OutputStream outstream) throws IOException {
initBuffer();
outstream.write(gzippedContent);
}
private void initBuffer() throws IOException {
if (gzippedContent != null) {
return;
}
final long unzippedContentLength = wrappedEntity.getContentLength();
if (unzippedContentLength > MAX_BUFFER_SIZE_BYTES) {
throw new IOException(
"Wrapped entity content length, " + unzippedContentLength + " bytes, exceeds max: " + MAX_BUFFER_SIZE_BYTES);
}
// The buffer size needed by the gzipped content should be smaller than this,
// but it's more efficient just to allocate one larger buffer than allocate
// twice if the gzipped content is too large for the default buffer.
final ByteArrayOutputStream s = new ByteArrayOutputStream((int) unzippedContentLength);
try {
super.writeTo(s);
} finally {
s.close();
}
gzippedContent = s.toByteArray();
}
}