Bug 1021419: Implement per-table update and gethash requests

This commit is contained in:
Monica Chew 2014-06-12 10:24:03 -07:00
parent 6e7a2fe51b
commit a78d3a2975
21 changed files with 498 additions and 487 deletions

View File

@ -102,8 +102,6 @@ add_test(function test_safebrowsing_update() {
var streamUpdater = Cc["@mozilla.org/url-classifier/streamupdater;1"]
.getService(Ci.nsIUrlClassifierStreamUpdater);
streamUpdater.updateUrl = URL + safebrowsingUpdatePath;
function onSuccess() {
run_next_test();
}
@ -115,7 +113,7 @@ add_test(function test_safebrowsing_update() {
}
streamUpdater.downloadUpdates("test-phish-simple,test-malware-simple", "",
onSuccess, onUpdateError, onDownloadError);
URL + safebrowsingUpdatePath, onSuccess, onUpdateError, onDownloadError);
});
add_test(function test_non_safebrowsing_cookie() {

View File

@ -150,9 +150,9 @@
#define NS_URLCLASSIFIERDBSERVICE_CID \
{ 0x8a389f21, 0xf821, 0x4e29, { 0x9c, 0x6b, 0x3d, 0xe6, 0xf3, 0x3c, 0xd7, 0xcf} }
// {79e6b710-ce68-4639-ac6b-7d293af424a1}
// e1797597-f4d6-4dd3-a1e1-745ad352cd80
#define NS_URLCLASSIFIERSTREAMUPDATER_CID \
{ 0x79e6b710, 0xce68, 0x4639, { 0xac, 0x6b, 0x7d, 0x29, 0x3a, 0xf4, 0x24, 0xa1} }
{ 0xe1797597, 0xf4d6, 0x4dd3, { 0xa1, 0xe1, 0x74, 0x5a, 0xd3, 0x52, 0xcd, 0x80 }}
// {b7b2ccec-7912-4ea6-a548-b038447004bd}
#define NS_URLCLASSIFIERUTILS_CID \

View File

@ -205,7 +205,6 @@ add_test(function test_local_list() {
let streamUpdater = Cc["@mozilla.org/url-classifier/streamupdater;1"]
.getService(Ci.nsIUrlClassifierStreamUpdater);
streamUpdater.updateUrl = "http://localhost:4444/downloads";
// Load up some update chunks for the safebrowsing server to serve.
// This chunk contains the hash of blocklisted.com/.
@ -228,6 +227,7 @@ add_test(function test_local_list() {
streamUpdater.downloadUpdates(
"goog-downloadwhite-digest256,goog-badbinurl-shavar",
"goog-downloadwhite-digest256,goog-badbinurl-shavar;\n",
"http://localhost:4444/downloads",
updateSuccess, handleError, handleError);
});

View File

@ -257,7 +257,6 @@ function waitForUpdates() {
let streamUpdater = Cc["@mozilla.org/url-classifier/streamupdater;1"]
.getService(Ci.nsIUrlClassifierStreamUpdater);
streamUpdater.updateUrl = "http://localhost:4444/downloads";
// Load up some update chunks for the safebrowsing server to serve. This
// particular chunk contains the hash of whitelisted.com/ and
@ -280,6 +279,7 @@ function waitForUpdates() {
streamUpdater.downloadUpdates(
"goog-downloadwhite-digest256",
"goog-downloadwhite-digest256;\n",
"http://localhost:4444/downloads",
updateSuccess, handleError, handleError);
return deferred.promise;
}

View File

@ -318,7 +318,7 @@ Classifier::ApplyUpdates(nsTArray<TableUpdate*>* aUpdates)
nsresult rv = BackupTables();
NS_ENSURE_SUCCESS(rv, rv);
LOG(("Applying table updates."));
LOG(("Applying %d table updates.", aUpdates->Length()));
for (uint32_t i = 0; i < aUpdates->Length(); i++) {
// Previous ApplyTableUpdates() may have consumed this update..
@ -576,8 +576,9 @@ Classifier::ApplyTableUpdates(nsTArray<TableUpdate*>* aUpdates,
nsAutoPtr<HashStore> store(new HashStore(aTable, mStoreDirectory));
if (!store)
if (!store) {
return NS_ERROR_FAILURE;
}
// take the quick exit if there is no valid update for us
// (common case)

View File

@ -54,16 +54,16 @@ this.SafeBrowsing = {
let listManager = Cc["@mozilla.org/url-classifier/listmanager;1"].
getService(Ci.nsIUrlListManager);
for (let i = 0; i < phishingLists.length; ++i) {
listManager.registerTable(phishingLists[i], false);
listManager.registerTable(phishingLists[i], this.updateURL, this.gethashURL);
}
for (let i = 0; i < malwareLists.length; ++i) {
listManager.registerTable(malwareLists[i], false);
listManager.registerTable(malwareLists[i], this.updateURL, this.gethashURL);
}
for (let i = 0; i < downloadBlockLists.length; ++i) {
listManager.registerTable(downloadBlockLists[i], false);
listManager.registerTable(downloadBlockLists[i], this.updateURL, this.gethashURL);
}
for (let i = 0; i < downloadAllowLists.length; ++i) {
listManager.registerTable(downloadAllowLists[i], false);
listManager.registerTable(downloadAllowLists[i], this.updateURL, this.gethashURL);
}
this.addMozEntries();
@ -134,15 +134,8 @@ this.SafeBrowsing = {
this.updateURL = this.updateURL.replace("SAFEBROWSING_ID", clientID);
this.gethashURL = this.gethashURL.replace("SAFEBROWSING_ID", clientID);
let listManager = Cc["@mozilla.org/url-classifier/listmanager;1"].
getService(Ci.nsIUrlListManager);
listManager.setUpdateUrl(this.updateURL);
listManager.setGethashUrl(this.gethashURL);
},
controlUpdateChecking: function() {
log("phishingEnabled:", this.phishingEnabled, "malwareEnabled:", this.malwareEnabled);

View File

@ -2,7 +2,6 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
// This is the only implementation of nsIUrlListManager.
// A class that manages lists, namely white and black lists for
// phishing or malware protection. The ListManager knows how to fetch,
@ -13,6 +12,17 @@
// TODO more comprehensive update tests, for example add unittest check
// that the listmanagers tables are properly written on updates
// Log only if browser.safebrowsing.debug is true
var debug = false;
function log(...stuff) {
if (!debug) {
return;
}
let msg = "listmanager: " + stuff.join(" ");
dump(msg + "\n");
}
function QueryAdapter(callback) {
this.callback_ = callback;
};
@ -28,46 +38,31 @@ QueryAdapter.prototype.handleResponse = function(value) {
* @constructor
*/
function PROT_ListManager() {
this.debugZone = "listmanager";
G_debugService.enableZone(this.debugZone);
this.currentUpdateChecker_ = null; // set when we toggle updates
this.prefs_ = new G_Preferences();
this.updateInterval = this.prefs_.getPref("urlclassifier.updateinterval", 30 * 60) * 1000;
this.updateserverURL_ = null;
this.gethashURL_ = null;
this.isTesting_ = false;
// A map of tableNames to objects of type
// { updateUrl: <updateUrl>, gethashUrl: <gethashUrl> }
this.tablesData = {};
// A map of updateUrls to maps of tables requiring updates, e.g.
// { safebrowsing-update-url: { goog-phish-shavar: true,
// goog-malware-shavar: true }
this.needsUpdate_ = {};
this.observerServiceObserver_ = new G_ObserverServiceObserver(
'quit-application',
BindToObject(this.shutdown_, this),
true /*only once*/);
this.cookieObserver_ = new G_ObserverServiceObserver(
'cookie-changed',
BindToObject(this.cookieChanged_, this),
false);
/* Backoff interval should be between 30 and 60 minutes. */
var backoffInterval = 30 * 60 * 1000;
backoffInterval += Math.floor(Math.random() * (30 * 60 * 1000));
this.requestBackoff_ = new RequestBackoff(2 /* max errors */,
60*1000 /* retry interval, 1 min */,
4 /* num requests */,
60*60*1000 /* request time, 60 min */,
backoffInterval /* backoff interval, 60 min */,
8*60*60*1000 /* max backoff, 8hr */);
this.updateCheckers_ = {};
this.requestBackoffs_ = {};
this.dbService_ = Cc["@mozilla.org/url-classifier/dbservice;1"]
.getService(Ci.nsIUrlClassifierDBService);
this.hashCompleter_ = Cc["@mozilla.org/url-classifier/hashcompleter;1"]
.getService(Ci.nsIUrlClassifierHashCompleter);
debug = this.prefs_.getPref("browser.safebrowsing.debug");
}
/**
@ -80,53 +75,53 @@ PROT_ListManager.prototype.shutdown_ = function() {
}
}
/**
* Set the url we check for updates. If the new url is valid and different,
* update our table list.
*
* After setting the update url, the caller is responsible for registering
* tables and then toggling update checking. All the code for this logic is
* currently in browser/components/safebrowsing. Maybe it should be part of
* the listmanger?
*/
PROT_ListManager.prototype.setUpdateUrl = function(url) {
G_Debug(this, "Set update url: " + url);
if (url != this.updateserverURL_) {
this.updateserverURL_ = url;
this.requestBackoff_.reset();
// Remove old tables which probably aren't valid for the new provider.
for (var name in this.tablesData) {
delete this.tablesData[name];
}
}
}
/**
* Set the gethash url.
*/
PROT_ListManager.prototype.setGethashUrl = function(url) {
G_Debug(this, "Set gethash url: " + url);
if (url != this.gethashURL_) {
this.gethashURL_ = url;
this.hashCompleter_.gethashUrl = url;
}
}
/**
* Register a new table table
* @param tableName - the name of the table
* @param opt_requireMac true if a mac is required on update, false otherwise
* @param updateUrl - the url for updating the table
* @param gethashUrl - the url for fetching hash completions
* @returns true if the table could be created; false otherwise
*/
PROT_ListManager.prototype.registerTable = function(tableName,
opt_requireMac) {
PROT_ListManager.prototype.registerTable = function(tableName,
updateUrl,
gethashUrl) {
log("registering " + tableName + " with " + updateUrl);
if (!updateUrl) {
log("Can't register table " + tableName + " without updateUrl");
return false;
}
this.tablesData[tableName] = {};
this.tablesData[tableName].needsUpdate = false;
this.tablesData[tableName].updateUrl = updateUrl;
this.tablesData[tableName].gethashUrl = gethashUrl;
// Keep track of all of our update URLs.
if (!this.needsUpdate_[updateUrl]) {
this.needsUpdate_[updateUrl] = {};
/* Backoff interval should be between 30 and 60 minutes. */
var backoffInterval = 30 * 60 * 1000;
backoffInterval += Math.floor(Math.random() * (30 * 60 * 1000));
log("Creating request backoff for " + updateUrl);
this.requestBackoffs_[updateUrl] = new RequestBackoff(2 /* max errors */,
60*1000 /* retry interval, 1 min */,
4 /* num requests */,
60*60*1000 /* request time, 60 min */,
backoffInterval /* backoff interval, 60 min */,
8*60*60*1000 /* max backoff, 8hr */);
}
this.needsUpdate_[updateUrl][tableName] = false;
return true;
}
PROT_ListManager.prototype.getGethashUrl = function(tableName) {
if (this.tablesData[tableName] && this.tablesData[tableName].gethashUrl) {
return this.tablesData[tableName].gethashUrl;
}
return "";
}
/**
* Enable updates for some tables
* @param tables - an array of table names that need updating
@ -135,13 +130,14 @@ PROT_ListManager.prototype.enableUpdate = function(tableName) {
var changed = false;
var table = this.tablesData[tableName];
if (table) {
G_Debug(this, "Enabling table updates for " + tableName);
table.needsUpdate = true;
log("Enabling table updates for " + tableName);
this.needsUpdate_[table.updateUrl][tableName] = true;
changed = true;
}
if (changed === true)
if (changed) {
this.maybeToggleUpdateChecking();
}
}
/**
@ -152,42 +148,30 @@ PROT_ListManager.prototype.disableUpdate = function(tableName) {
var changed = false;
var table = this.tablesData[tableName];
if (table) {
G_Debug(this, "Disabling table updates for " + tableName);
table.needsUpdate = false;
log("Disabling table updates for " + tableName);
this.needsUpdate_[table.updateUrl][tableName] = false;
changed = true;
}
if (changed === true)
if (changed) {
this.maybeToggleUpdateChecking();
}
}
/**
* Determine if we have some tables that need updating.
*/
PROT_ListManager.prototype.requireTableUpdates = function() {
for (var type in this.tablesData) {
// Tables that need updating even if other tables dont require it
if (this.tablesData[type].needsUpdate)
for (var name in this.tablesData) {
// Tables that need updating even if other tables don't require it
if (this.needsUpdate_[this.tablesData[name].updateUrl][name]) {
return true;
}
}
return false;
}
/**
* Start managing the lists we know about. We don't do this automatically
* when the listmanager is instantiated because their profile directory
* (where we store the lists) might not be available.
*/
PROT_ListManager.prototype.maybeStartManagingUpdates = function() {
if (this.isTesting_)
return;
// We might have been told about tables already, so see if we should be
// actually updating.
this.maybeToggleUpdateChecking();
}
/**
* Acts as a nsIUrlClassifierCallback for getTables.
*/
@ -197,25 +181,40 @@ PROT_ListManager.prototype.kickoffUpdate_ = function (onDiskTableData)
var initialUpdateDelay = 3000;
// Check if any table registered for updates has ever been downloaded.
var diskTablesAreUpdating = false;
var updatingExisting = false;
for (var tableName in this.tablesData) {
if (this.tablesData[tableName].needsUpdate) {
if (this.needsUpdate_[this.tablesData[tableName].updateUrl][tableName]) {
if (onDiskTableData.indexOf(tableName) != -1) {
diskTablesAreUpdating = true;
updatingExisting = true;
}
}
}
// If the user has never downloaded tables, do the check now.
// If the user has tables, add a fuzz of a few minutes.
if (diskTablesAreUpdating) {
// Add a fuzz of 0-5 minutes.
initialUpdateDelay += Math.floor(Math.random() * (5 * 60 * 1000));
log("needsUpdate: " + JSON.stringify(this.needsUpdate_, undefined, 2));
for (var url in this.needsUpdate_) {
// If the user has tables, add a fuzz of a few minutes.
if (updatingExisting) {
// Add a fuzz of 0-5 minutes.
initialUpdateDelay += Math.floor(Math.random() * (5 * 60 * 1000));
}
// If we haven't already kicked off updates for this updateUrl, set a
// repeating timer for it. The delay will be reset either on updateSuccess
// to this.updateinterval, or backed off on downloadError.
if (!this.updateCheckers_[url]) {
this.updateCheckers_[url] =
new G_Alarm(BindToObject(this.checkForUpdates, this, url),
initialUpdateDelay, true /* repeating */);
}
}
}
this.currentUpdateChecker_ =
new G_Alarm(BindToObject(this.checkForUpdates, this),
initialUpdateDelay);
PROT_ListManager.prototype.stopUpdateCheckers = function() {
log("Stopping updates");
for (var updateUrl in this.updateCheckers_) {
this.updateCheckers_[updateUrl].cancel();
this.updateCheckers_[updateUrl] = null;
}
}
/**
@ -223,70 +222,21 @@ PROT_ListManager.prototype.kickoffUpdate_ = function (onDiskTableData)
* Wardens may call us with new tables that need to be updated.
*/
PROT_ListManager.prototype.maybeToggleUpdateChecking = function() {
// If we are testing or dont have an application directory yet, we should
// not start reading tables from disk or schedule remote updates
if (this.isTesting_)
return;
// We update tables if we have some tables that want updates. If there
// are no tables that want to be updated - we dont need to check anything.
if (this.requireTableUpdates() === true) {
G_Debug(this, "Starting managing lists");
this.startUpdateChecker();
if (this.requireTableUpdates()) {
log("Starting managing lists");
// Multiple warden can ask us to reenable updates at the same time, but we
// really just need to schedule a single update.
if (!this.currentUpdateChecker && !this.startingUpdate_) {
// Get the list of existing tables from the DBService before making any
// update requests.
if (!this.startingUpdate_) {
this.startingUpdate_ = true;
// check the current state of tables in the database
this.dbService_.getTables(BindToObject(this.kickoffUpdate_, this));
}
} else {
G_Debug(this, "Stopping managing lists (if currently active)");
this.stopUpdateChecker(); // Cancel pending updates
}
}
/**
* Start periodic checks for updates. Idempotent.
* We want to distribute update checks evenly across the update period (an
* hour). The first update is scheduled for a random time between 0.5 and 1.5
* times the update interval.
*/
PROT_ListManager.prototype.startUpdateChecker = function() {
this.stopUpdateChecker();
// Schedule the first check for between 15 and 45 minutes.
var repeatingUpdateDelay = this.updateInterval / 2;
repeatingUpdateDelay += Math.floor(Math.random() * this.updateInterval);
this.updateChecker_ = new G_Alarm(BindToObject(this.initialUpdateCheck_,
this),
repeatingUpdateDelay);
}
/**
* Callback for the first update check.
* We go ahead and check for table updates, then start a regular timer (once
* every update interval).
*/
PROT_ListManager.prototype.initialUpdateCheck_ = function() {
this.checkForUpdates();
this.updateChecker_ = new G_Alarm(BindToObject(this.checkForUpdates, this),
this.updateInterval, true /* repeat */);
}
/**
* Stop checking for updates. Idempotent.
*/
PROT_ListManager.prototype.stopUpdateChecker = function() {
if (this.updateChecker_) {
this.updateChecker_.cancel();
this.updateChecker_ = null;
}
// Cancel the oneoff check from maybeToggleUpdateChecking.
if (this.currentUpdateChecker_) {
this.currentUpdateChecker_.cancel();
this.currentUpdateChecker_ = null;
log("Stopping managing lists (if currently active)");
this.stopUpdateCheckers(); // Cancel pending updates
}
}
@ -303,13 +253,13 @@ PROT_ListManager.prototype.stopUpdateChecker = function() {
*/
PROT_ListManager.prototype.safeLookup = function(key, callback) {
try {
G_Debug(this, "safeLookup: " + key);
log("safeLookup: " + key);
var cb = new QueryAdapter(callback);
this.dbService_.lookup(key,
BindToObject(cb.handleResponse, cb),
true);
} catch(e) {
G_Debug(this, "safeLookup masked failure for key " + key + ": " + e);
log("safeLookup masked failure for key " + key + ": " + e);
callback.handleEvent("");
}
}
@ -317,24 +267,22 @@ PROT_ListManager.prototype.safeLookup = function(key, callback) {
/**
* Updates our internal tables from the update server
*
* @returns true when a new request was scheduled, false if an old request
* was still pending.
* @param updateUrl: request updates for tables associated with that url, or
* for all tables if the url is empty.
*/
PROT_ListManager.prototype.checkForUpdates = function() {
// Allow new updates to be scheduled from maybeToggleUpdateChecking()
this.currentUpdateChecker_ = null;
if (!this.updateserverURL_) {
G_Debug(this, 'checkForUpdates: no update server url');
PROT_ListManager.prototype.checkForUpdates = function(updateUrl) {
log("checkForUpdates with " + updateUrl);
// See if we've triggered the request backoff logic.
if (!updateUrl) {
return false;
}
// See if we've triggered the request backoff logic.
if (!this.requestBackoff_.canMakeRequest())
if (!this.requestBackoffs_[updateUrl] ||
!this.requestBackoffs_[updateUrl].canMakeRequest()) {
return false;
}
// Grab the current state of the tables from the database
this.dbService_.getTables(BindToObject(this.makeUpdateRequest_, this));
this.dbService_.getTables(BindToObject(this.makeUpdateRequest_, this,
updateUrl));
return true;
}
@ -344,56 +292,76 @@ PROT_ListManager.prototype.checkForUpdates = function() {
* @param tableData List of table data already in the database, in the form
* tablename;<chunk ranges>\n
*/
PROT_ListManager.prototype.makeUpdateRequest_ = function(tableData) {
var tableList;
var tableNames = {};
PROT_ListManager.prototype.makeUpdateRequest_ = function(updateUrl, tableData) {
log("this.tablesData: " + JSON.stringify(this.tablesData, undefined, 2));
log("existing chunks: " + tableData + "\n");
// Disallow blank updateUrls
if (!updateUrl) {
return;
}
// An object of the form
// { tableList: comma-separated list of tables to request,
// tableNames: map of tables that need updating,
// request: list of tables and existing chunk ranges from tableData
// }
var streamerMap = { tableList: null, tableNames: {}, request: "" };
for (var tableName in this.tablesData) {
if (this.tablesData[tableName].needsUpdate)
tableNames[tableName] = true;
if (!tableList) {
tableList = tableName;
// Skip tables not matching this update url
if (this.tablesData[tableName].updateUrl != updateUrl) {
continue;
}
if (this.needsUpdate_[this.tablesData[tableName].updateUrl][tableName]) {
streamerMap.tableNames[tableName] = true;
}
if (!streamerMap.tableList) {
streamerMap.tableList = tableName;
} else {
tableList += "," + tableName;
streamerMap.tableList += "," + tableName;
}
}
var request = "";
// For each table already in the database, include the chunk data from
// the database
// Build the request. For each table already in the database, include the
// chunk data from the database
var lines = tableData.split("\n");
for (var i = 0; i < lines.length; i++) {
var fields = lines[i].split(";");
if (tableNames[fields[0]]) {
request += lines[i] + "\n";
delete tableNames[fields[0]];
var name = fields[0];
if (streamerMap.tableNames[name]) {
streamerMap.request += lines[i] + "\n";
delete streamerMap.tableNames[name];
}
}
// For each requested table that didn't have chunk data in the database,
// request it fresh
for (var tableName in tableNames) {
request += tableName + ";\n";
for (let tableName in streamerMap.tableNames) {
streamerMap.request += tableName + ";\n";
}
G_Debug(this, 'checkForUpdates: scheduling request..');
log("update request: " + JSON.stringify(streamerMap, undefined, 2) + "\n");
if (Object.keys(streamerMap.tableNames).length > 0) {
this.makeUpdateRequestForEntry_(updateUrl, streamerMap.tableList,
streamerMap.request);
}
}
PROT_ListManager.prototype.makeUpdateRequestForEntry_ = function(updateUrl,
tableList,
request) {
log("makeUpdateRequestForEntry_: request " + request + " update: " +
updateUrl + " tablelist: " + tableList + "\n");
var streamer = Cc["@mozilla.org/url-classifier/streamupdater;1"]
.getService(Ci.nsIUrlClassifierStreamUpdater);
try {
streamer.updateUrl = this.updateserverURL_;
} catch (e) {
G_Debug(this, 'invalid url');
return;
}
this.requestBackoff_.noteRequest();
this.requestBackoffs_[updateUrl].noteRequest();
if (!streamer.downloadUpdates(tableList,
request,
BindToObject(this.updateSuccess_, this),
BindToObject(this.updateError_, this),
BindToObject(this.downloadError_, this))) {
G_Debug(this, "pending update, wait until later");
if (!streamer.downloadUpdates(
tableList,
request,
updateUrl,
BindToObject(this.updateSuccess_, this, tableList, updateUrl),
BindToObject(this.updateError_, this, tableList, updateUrl),
BindToObject(this.downloadError_, this, tableList, updateUrl))) {
log("pending update, wait until later");
}
}
@ -402,26 +370,34 @@ PROT_ListManager.prototype.makeUpdateRequest_ = function(tableData) {
* @param waitForUpdate String The number of seconds that the client should
* wait before requesting again.
*/
PROT_ListManager.prototype.updateSuccess_ = function(waitForUpdate) {
G_Debug(this, "update success: " + waitForUpdate);
PROT_ListManager.prototype.updateSuccess_ = function(tableList, updateUrl,
waitForUpdate) {
log("update success for " + tableList + " from " + updateUrl + ": " +
waitForUpdate + "\n");
if (waitForUpdate) {
var delay = parseInt(waitForUpdate, 10);
// As long as the delay is something sane (5 minutes or more), update
// our delay time for requesting updates
if (delay >= (5 * 60) && this.updateChecker_)
this.updateChecker_.setDelay(delay * 1000);
// our delay time for requesting updates. Setting the delay requires a
// repeating timer, so always use one.
if (delay >= (5 * 60) && this.updateCheckers_[updateUrl]) {
log("Waiting " + delay);
this.updateCheckers_[updateUrl].setDelay(delay * 1000);
} else {
log("Ignoring delay from server, waiting " + this.updateInterval);
this.updateCheckers_[updateUrl].setDelay(this.updateInterval);
}
}
// Let the backoff object know that we completed successfully.
this.requestBackoff_.noteServerResponse(200);
this.requestBackoffs_[updateUrl].noteServerResponse(200);
}
/**
* Callback function if the update request succeeded.
* @param result String The error code of the failure
*/
PROT_ListManager.prototype.updateError_ = function(result) {
G_Debug(this, "update error: " + result);
PROT_ListManager.prototype.updateError_ = function(table, updateUrl, result) {
log("update error for " + table + " from " + updateUrl + ": " + result + "\n");
// XXX: there was some trouble applying the updates.
}
@ -429,34 +405,22 @@ PROT_ListManager.prototype.updateError_ = function(result) {
* Callback function when the download failed
* @param status String http status or an empty string if connection refused.
*/
PROT_ListManager.prototype.downloadError_ = function(status) {
G_Debug(this, "download error: " + status);
PROT_ListManager.prototype.downloadError_ = function(table, updateUrl, status) {
log("download error for " + table + ": " + status + "\n");
// If status is empty, then we assume that we got an NS_CONNECTION_REFUSED
// error. In this case, we treat this is a http 500 error.
if (!status) {
status = 500;
}
status = parseInt(status, 10);
this.requestBackoff_.noteServerResponse(status);
if (this.requestBackoff_.isErrorStatus(status)) {
this.requestBackoffs_[updateUrl].noteServerResponse(status);
if (this.requestBackoffs_[updateUrl].isErrorStatus(status)) {
// Schedule an update for when our backoff is complete
this.currentUpdateChecker_ =
new G_Alarm(BindToObject(this.checkForUpdates, this),
this.requestBackoff_.nextRequestDelay());
this.updateCheckers_[updateUrl].setDelay(
this.requestBackoffs_[updateUrl].nextRequestDelay());
}
}
/**
* Called when cookies are cleared
*/
PROT_ListManager.prototype.cookieChanged_ = function(subject, topic, data) {
if (data != "cleared")
return;
G_Debug(this, "cookies cleared");
}
PROT_ListManager.prototype.QueryInterface = function(iid) {
if (iid.equals(Ci.nsISupports) ||
iid.equals(Ci.nsIUrlListManager) ||

View File

@ -21,7 +21,7 @@ const HTTP_TEMPORARY_REDIRECT = 307;
* @param retryIncrement Time (ms) for each retry before backing off.
* @param maxRequests Number the number of requests needed to trigger backoff
* @param requestPeriod Number time (ms) in which maxRequests have to occur to
* trigger the backoff behavior
* trigger the backoff behavior (0 to disable maxRequests)
* @param timeoutIncrement Number time (ms) the starting timeout period
* we double this time for consecutive errors
* @param maxTimeout Number time (ms) maximum timeout period
@ -45,7 +45,7 @@ function RequestBackoff(maxErrors, retryIncrement,
}
/**
* Reset the object for reuse.
* Reset the object for reuse. This deliberately doesn't clear requestTimes_.
*/
RequestBackoff.prototype.reset = function() {
this.numErrors_ = 0;

View File

@ -46,21 +46,20 @@ interface nsIUrlClassifierHashCompleterCallback : nsISupports
* This is only ever used for testing and should absolutely be deleted (I
* think).
*/
[scriptable, uuid(ade9b72b-3562-44f5-aba6-e63246be53ae)]
[scriptable, uuid(231fb2ad-ea8a-4e63-a331-eafc3b434811)]
interface nsIUrlClassifierHashCompleter : nsISupports
{
/**
* Request a completed hash.
* Request a completed hash from the given gethash url.
*
* @param partialHash
* The 32-bit hash encountered by the url-classifier.
* @param gethashUrl
* The gethash url to use.
* @param callback
* An nsIUrlClassifierCompleterCallback instance.
*/
void complete(in ACString partialHash,
in ACString gethashUrl,
in nsIUrlClassifierHashCompleterCallback callback);
/**
* The URL for the gethash request
*/
attribute ACString gethashUrl;
};

View File

@ -11,22 +11,17 @@
* downloading the whole update and then updating the sqlite database, we
* update tables as the data is streaming in.
*/
[scriptable, uuid(79e6b710-ce68-4639-ac6b-7d293af424a1)]
[scriptable, uuid(e1797597-f4d6-4dd3-a1e1-745ad352cd80)]
interface nsIUrlClassifierStreamUpdater : nsISupports
{
/**
* The Url to download from. Should be plain ascii text.
*/
attribute ACString updateUrl;
/**
* Try to download updates from updateUrl. Only one instance of this
* runs at a time, so we return false if another instance is already
* running.
* This is used in nsIUrlListManager as well as in testing.
* Try to download updates from updateUrl. If an update is already in
* progress, queues the requested update. This is used in nsIUrlListManager
* as well as in testing.
* @param aRequestTables Comma-separated list of tables included in this
* update.
* @param aRequestBody The body for the request.
* @param aUpdateUrl The plaintext url from which to request updates.
* @param aSuccessCallback Called after a successful update.
* @param aUpdateErrorCallback Called for problems applying the update
* @param aDownloadErrorCallback Called if we get an http error or a
@ -34,6 +29,7 @@ interface nsIUrlClassifierStreamUpdater : nsISupports
*/
boolean downloadUpdates(in ACString aRequestTables,
in ACString aRequestBody,
in ACString aUpdateUrl,
in nsIUrlClassifierCallback aSuccessCallback,
in nsIUrlClassifierCallback aUpdateErrorCallback,
in nsIUrlClassifierCallback aDownloadErrorCallback);

View File

@ -18,29 +18,29 @@ interface nsIUrlListManagerCallback : nsISupports {
};
[scriptable, uuid(62484bb5-bf7e-4988-9055-8803b16b48a1)]
[scriptable, uuid(653afdc4-ad4e-4ee0-8375-c576e85ebc40)]
interface nsIUrlListManager : nsISupports
{
/**
* Set the URL we check for updates.
* Get the gethash url for this table
*/
void setUpdateUrl(in ACString url);
ACString getGethashUrl(in ACString tableName);
/**
* Set the URL that we will query for complete hashes after a partial
* hash match.
*/
void setGethashUrl(in ACString url);
/**
* Add a table to the list of tables we are managing. The name is a
* Add a table to the list of tables we are managing. The name is a
* string of the format provider_name-semantic_type-table_type. For
* example, goog-white-enchash or goog-black-url.
* @param tableName A string of the format
* provider_name-semantic_type-table_type. For example,
* goog-white-enchash or goog-black-url.
* @param updateUrl The URL from which to fetch updates.
* @param gethashUrl The URL from which to fetch hash completions.
*/
boolean registerTable(in ACString tableName);
boolean registerTable(in ACString tableName,
in ACString updateUrl,
in ACString gethashUrl);
/**
* Turn on update checking for a table. I.e., during the next server
* Turn on update checking for a table. I.e., during the next server
* check, download updates for this table.
*/
void enableUpdate(in ACString tableName);
@ -57,6 +57,4 @@ interface nsIUrlListManager : nsISupports
*/
void safeLookup(in nsIPrincipal key,
in nsIUrlListManagerCallback cb);
void checkForUpdates();
};

View File

@ -94,7 +94,7 @@ static bool gShuttingDownThread = false;
static mozilla::Atomic<int32_t> gFreshnessGuarantee(CONFIRM_AGE_DEFAULT_SEC);
// -------------------------------------------------------------------------
// Actual worker implemenatation
// Actual worker implementation
class nsUrlClassifierDBServiceWorker MOZ_FINAL :
public nsIUrlClassifierDBServiceWorker
{
@ -428,8 +428,9 @@ nsUrlClassifierDBServiceWorker::BeginUpdate(nsIUrlClassifierUpdateObserver *obse
{
LOG(("nsUrlClassifierDBServiceWorker::BeginUpdate [%s]", PromiseFlatCString(tables).get()));
if (gShuttingDownThread)
if (gShuttingDownThread) {
return NS_ERROR_NOT_INITIALIZED;
}
NS_ENSURE_STATE(!mUpdateObserver);
@ -523,8 +524,10 @@ nsUrlClassifierDBServiceWorker::UpdateStream(const nsACString& chunk)
NS_IMETHODIMP
nsUrlClassifierDBServiceWorker::FinishStream()
{
if (gShuttingDownThread)
if (gShuttingDownThread) {
LOG(("shutting down"));
return NS_ERROR_NOT_INITIALIZED;
}
NS_ENSURE_STATE(mInStream);
NS_ENSURE_STATE(mUpdateObserver);
@ -822,13 +825,26 @@ nsUrlClassifierLookupCallback::LookupComplete(nsTArray<LookupResult>* results)
// We will complete partial matches and matches that are stale.
if (!result.Confirmed()) {
nsCOMPtr<nsIUrlClassifierHashCompleter> completer;
nsCString gethashUrl;
nsresult rv;
nsCOMPtr<nsIUrlListManager> listManager = do_GetService(
"@mozilla.org/url-classifier/listmanager;1", &rv);
NS_ENSURE_SUCCESS(rv, rv);
rv = listManager->GetGethashUrl(result.mTableName, gethashUrl);
NS_ENSURE_SUCCESS(rv, rv);
// We only allow empty gethashUrls for test- tables
if (gethashUrl.IsEmpty()) {
MOZ_ASSERT(
StringBeginsWith(result.mTableName, NS_LITERAL_CSTRING("test-")),
"Only test tables may have empty gethash urls");
}
if (mDBService->GetCompleter(result.mTableName,
getter_AddRefs(completer))) {
nsAutoCString partialHash;
partialHash.Assign(reinterpret_cast<char*>(&result.hash.prefix),
PREFIX_SIZE);
nsresult rv = completer->Complete(partialHash, this);
nsresult rv = completer->Complete(partialHash, gethashUrl, this);
if (NS_SUCCEEDED(rv)) {
mPendingCompletions++;
}
@ -1342,8 +1358,10 @@ nsUrlClassifierDBService::BeginUpdate(nsIUrlClassifierUpdateObserver *observer,
{
NS_ENSURE_TRUE(gDbBackgroundThread, NS_ERROR_NOT_INITIALIZED);
if (mInUpdate)
if (mInUpdate) {
LOG(("Already updating, not available"));
return NS_ERROR_NOT_AVAILABLE;
}
mInUpdate = true;

View File

@ -13,6 +13,7 @@
#include "nsIObserver.h"
#include "nsUrlClassifierPrefixSet.h"
#include "nsIUrlClassifierHashCompleter.h"
#include "nsIUrlListManager.h"
#include "nsIUrlClassifierDBService.h"
#include "nsIURIClassifier.h"
#include "nsToolkitCompsCID.h"

View File

@ -14,51 +14,37 @@ const COMPLETE_LENGTH = 32;
const PARTIAL_LENGTH = 4;
// These backoff related constants are taken from v2 of the Google Safe Browsing
// API.
// API. All times are in milliseconds.
// BACKOFF_ERRORS: the number of errors incurred until we start to back off.
// BACKOFF_INTERVAL: the initial time, in seconds, to wait once we start backing
// BACKOFF_INTERVAL: the initial time to wait once we start backing
// off.
// BACKOFF_MAX: as the backoff time doubles after each failure, this is a
// ceiling on the time to wait, in seconds.
// BACKOFF_TIME: length of the interval of time, in seconds, during which errors
// are taken into account.
// ceiling on the time to wait.
const BACKOFF_ERRORS = 2;
const BACKOFF_INTERVAL = 30 * 60;
const BACKOFF_MAX = 8 * 60 * 60;
const BACKOFF_TIME = 5 * 60;
const BACKOFF_INTERVAL = 30 * 60 * 1000;
const BACKOFF_MAX = 8 * 60 * 60 * 1000;
Cu.import("resource://gre/modules/XPCOMUtils.jsm");
Cu.import("resource://gre/modules/Services.jsm");
function HashCompleter() {
// This is a HashCompleterRequest and is used by multiple calls to |complete|
// in succession to avoid unnecessarily creating requests. Once it has been
// started, this is set to null again.
// The current HashCompleterRequest in flight. Once it is started, it is set
// to null. It may be used by multiple calls to |complete| in succession to
// avoid creating multiple requests to the same gethash URL.
this._currentRequest = null;
// A map of gethashUrls to HashCompleterRequests that haven't yet begun.
this._pendingRequests = {};
// A map of gethash URLs to RequestBackoff objects.
this._backoffs = {};
// Whether we have been informed of a shutdown by the xpcom-shutdown event.
this._shuttingDown = false;
// All of these backoff properties are different per completer as the DB
// service keeps one completer per table.
//
// _backoff tells us whether we are "backing off" from making requests.
// It is set in |noteServerResponse| and set after a number of failures.
this._backoff = false;
// _backoffTime tells us how long we should wait (in seconds) before making
// another request.
this._backoffTime = 0;
// _nextRequestTime is the earliest time at which we are allowed to make
// another request by the backoff policy. It is measured in milliseconds.
this._nextRequestTime = 0;
// A list of the times at which a failed request was made, recorded in
// |noteServerResponse|. Sorted by oldest to newest and its length is clamped
// by BACKOFF_ERRORS.
this._errorTimes = [];
Services.obs.addObserver(this, "xpcom-shutdown", true);
}
HashCompleter.prototype = {
classID: Components.ID("{9111de73-9322-4bfc-8b65-2b727f3e6ec8}"),
QueryInterface: XPCOMUtils.generateQI([Ci.nsIUrlClassifierHashCompleter,
@ -70,108 +56,91 @@ HashCompleter.prototype = {
// This is mainly how the HashCompleter interacts with other components.
// Even though it only takes one partial hash and callback, subsequent
// calls are made into the same HTTP request by using a thread dispatch.
complete: function HC_complete(aPartialHash, aCallback) {
if (!this._currentRequest) {
this._currentRequest = new HashCompleterRequest(this);
// It's possible for getHashUrl to not be set, usually at start-up.
if (this._getHashUrl) {
Services.tm.currentThread.dispatch(this, Ci.nsIThread.DISPATCH_NORMAL);
}
complete: function HC_complete(aPartialHash, aGethashUrl, aCallback) {
if (!aGethashUrl) {
throw Cr.NS_ERROR_NOT_INITIALIZED;
}
this._currentRequest.add(aPartialHash, aCallback);
if (!this._currentRequest) {
this._currentRequest = new HashCompleterRequest(this, aGethashUrl);
}
if (this._currentRequest.gethashUrl == aGethashUrl) {
this._currentRequest.add(aPartialHash, aCallback);
} else {
if (!this._pendingRequests[aGethashUrl]) {
this._pendingRequests[aGethashUrl] =
new HashCompleterRequest(this, aGethashUrl);
}
this._pendingRequests[aGethashUrl].add(aPartialHash, aCallback);
}
if (!this._backoffs[aGethashUrl]) {
// Initialize request backoffs separately, since requests are deleted
// after they are dispatched.
var jslib = Cc["@mozilla.org/url-classifier/jslib;1"]
.getService().wrappedJSObject;
this._backoffs[aGethashUrl] = new jslib.RequestBackoff(
BACKOFF_ERRORS /* max errors */,
60*1000 /* retry interval, 1 min */,
10 /* keep track of max requests */,
0 /* don't throttle on successful requests per time period */,
BACKOFF_INTERVAL /* backoff interval, 60 min */,
BACKOFF_MAX /* max backoff, 8hr */);
}
// Start off this request. Without dispatching to a thread, every call to
// complete makes an individual HTTP request.
Services.tm.currentThread.dispatch(this, Ci.nsIThread.DISPATCH_NORMAL);
},
// This is called when either the getHashUrl has been set or after a few calls
// to |complete|. It starts off the HTTP request by making a |begin| call
// to the HashCompleterRequest.
run: function HC_run() {
// This is called after several calls to |complete|, or after the
// currentRequest has finished. It starts off the HTTP request by making a
// |begin| call to the HashCompleterRequest.
run: function() {
// Clear everything on shutdown
if (this._shuttingDown) {
this._currentRequest = null;
this._pendingRequests = null;
for (var url in this._backoffs) {
this._backoffs[url] = null;
}
throw Cr.NS_ERROR_NOT_INITIALIZED;
}
if (!this._currentRequest) {
return;
// If we don't have an in-flight request, make one
let pendingUrls = Object.keys(this._pendingRequests);
if (!this._currentRequest && (pendingUrls.length > 0)) {
let nextUrl = pendingUrls[0];
this._currentRequest = this._pendingRequests[nextUrl];
delete this._pendingRequests[nextUrl];
}
if (!this._getHashUrl) {
throw Cr.NS_ERROR_NOT_INITIALIZED;
}
let url = this._getHashUrl;
let uri = Services.io.newURI(url, null, null);
this._currentRequest.setURI(uri);
// If |begin| fails, we should get rid of our request.
try {
this._currentRequest.begin();
}
finally {
this._currentRequest = null;
}
},
get gethashUrl() {
return this._getHashUrl;
},
// Because we hold off on making a request until we have a valid getHashUrl,
// we kick off the process here.
set gethashUrl(aNewUrl) {
this._getHashUrl = aNewUrl;
if (this._currentRequest) {
Services.tm.currentThread.dispatch(this, Ci.nsIThread.DISPATCH_NORMAL);
try {
this._currentRequest.begin();
} finally {
// If |begin| fails, we should get rid of our request.
this._currentRequest = null;
}
}
},
// This handles all the logic about setting a back off time based on
// server responses. It should only be called once in the life time of a
// request.
// The logic behind backoffs is documented in the Google Safe Browsing API,
// the general idea is that a completer should go into backoff mode after
// BACKOFF_ERRORS errors in the last BACKOFF_TIME seconds. From there,
// we do not make a request for BACKOFF_INTERVAL seconds and for every failed
// request after that we double how long to wait, to a maximum of BACKOFF_MAX.
// Note that even in the case of a successful response we still keep a history
// of past errors.
noteServerResponse: function HC_noteServerResponse(aSuccess) {
if (aSuccess) {
this._backoff = false;
this._nextRequestTime = 0;
this._backoffTime = 0;
return;
}
let now = Date.now();
// We only alter the size of |_errorTimes| here, so we can guarantee that
// its length is at most BACKOFF_ERRORS.
this._errorTimes.push(now);
if (this._errorTimes.length > BACKOFF_ERRORS) {
this._errorTimes.shift();
}
if (this._backoff) {
this._backoffTime *= 2;
} else if (this._errorTimes.length == BACKOFF_ERRORS &&
((now - this._errorTimes[0]) / 1000) <= BACKOFF_TIME) {
this._backoff = true;
this._backoffTime = BACKOFF_INTERVAL;
}
if (this._backoff) {
this._backoffTime = Math.min(this._backoffTime, BACKOFF_MAX);
this._nextRequestTime = now + (this._backoffTime * 1000);
}
// Pass the server response status to the RequestBackoff for the given
// gethashUrl and fetch the next pending request, if there is one.
finishRequest: function(url, aStatus) {
this._backoffs[url].noteServerResponse(aStatus);
Services.tm.currentThread.dispatch(this, Ci.nsIThread.DISPATCH_NORMAL);
},
// This is not included on the interface but is used to communicate to the
// HashCompleterRequest. It returns a time in milliseconds.
get nextRequestTime() {
return this._nextRequestTime;
// Returns true if we can make a request from the given url, false otherwise.
canMakeRequest: function(aGethashUrl) {
return this._backoffs[aGethashUrl].canMakeRequest();
},
// Notifies the RequestBackoff of a new request so we can throttle based on
// max requests/time period. This must be called before a channel is opened,
// and finishRequest must be called once the response is received.
noteRequest: function(aGethashUrl) {
return this._backoffs[aGethashUrl].noteRequest();
},
observe: function HC_observe(aSubject, aTopic, aData) {
@ -181,20 +150,18 @@ HashCompleter.prototype = {
},
};
function HashCompleterRequest(aCompleter) {
function HashCompleterRequest(aCompleter, aGethashUrl) {
// HashCompleter object that created this HashCompleterRequest.
this._completer = aCompleter;
// The internal set of hashes and callbacks that this request corresponds to.
this._requests = [];
// URI to query for hash completions. Largely comes from the
// browser.safebrowsing.gethashURL pref.
this._uri = null;
// nsIChannel that the hash completion query is transmitted over.
this._channel = null;
// Response body of hash completion. Created in onDataAvailable.
this._response = "";
// Whether we have been informed of a shutdown by the xpcom-shutdown event.
this._shuttingDown = false;
this.gethashUrl = aGethashUrl;
}
HashCompleterRequest.prototype = {
QueryInterface: XPCOMUtils.generateQI([Ci.nsIRequestObserver,
@ -213,9 +180,11 @@ HashCompleterRequest.prototype = {
},
// This initiates the HTTP request. It can fail due to backoff timings and
// will notify all callbacks as necessary.
// will notify all callbacks as necessary. We notify the backoff object on
// begin.
begin: function HCR_begin() {
if (Date.now() < this._completer.nextRequestTime) {
if (!this._completer.canMakeRequest(this.gethashUrl)) {
dump("hashcompleter: Can't make request to " + this.gethashUrl + "\n");
this.notifyFailure(Cr.NS_ERROR_ABORT);
return;
}
@ -224,6 +193,9 @@ HashCompleterRequest.prototype = {
try {
this.openChannel();
// Notify the RequestBackoff if opening the channel succeeded. At this
// point, finishRequest must be called.
this._completer.noteRequest(this.gethashUrl);
}
catch (err) {
this.notifyFailure(err);
@ -231,16 +203,13 @@ HashCompleterRequest.prototype = {
}
},
setURI: function HCR_setURI(aURI) {
this._uri = aURI;
},
// Creates an nsIChannel for the request and fills the body.
openChannel: function HCR_openChannel() {
let loadFlags = Ci.nsIChannel.INHIBIT_CACHING |
Ci.nsIChannel.LOAD_BYPASS_CACHE;
let channel = Services.io.newChannelFromURI(this._uri);
let uri = Services.io.newURI(this.gethashUrl, null, null);
let channel = Services.io.newChannelFromURI(uri);
channel.loadFlags = loadFlags;
this._channel = channel;
@ -306,12 +275,13 @@ HashCompleterRequest.prototype = {
let start = 0;
let length = this._response.length;
while (start != length)
while (start != length) {
start = this.handleTable(start);
}
},
// This parses a table entry in the response body and calls |handleItem|
// for complete hash in the table entry.
// for complete hash in the table entry.
handleTable: function HCR_handleTable(aStart) {
let body = this._response.substring(aStart);
@ -404,16 +374,21 @@ HashCompleterRequest.prototype = {
throw Cr.NS_ERROR_ABORT;
}
// Default HTTP status to service unavailable, in case we can't retrieve
// the true status from the channel.
let httpStatus = 503;
if (Components.isSuccessCode(aStatusCode)) {
let channel = aRequest.QueryInterface(Ci.nsIHttpChannel);
let success = channel.requestSucceeded;
httpStatus = channel.responseStatus;
if (!success) {
aStatusCode = Cr.NS_ERROR_ABORT;
}
}
let success = Components.isSuccessCode(aStatusCode);
this._completer.noteServerResponse(success);
// Notify the RequestBackoff once a response is received.
this._completer.finishRequest(this.gethashUrl, httpStatus);
if (success) {
try {

View File

@ -39,13 +39,13 @@ static const PRLogModuleInfo *gUrlClassifierStreamUpdaterLog = nullptr;
nsUrlClassifierStreamUpdater::nsUrlClassifierStreamUpdater()
: mIsUpdating(false), mInitialized(false), mDownloadError(false),
mBeganStream(false), mUpdateUrl(nullptr), mChannel(nullptr)
mBeganStream(false), mChannel(nullptr)
{
#if defined(PR_LOGGING)
if (!gUrlClassifierStreamUpdaterLog)
gUrlClassifierStreamUpdaterLog = PR_NewLogModule("UrlClassifierStreamUpdater");
#endif
LOG(("nsUrlClassifierStreamUpdater init [this=%p]", this));
}
NS_IMPL_ISUPPORTS(nsUrlClassifierStreamUpdater,
@ -76,33 +76,20 @@ nsUrlClassifierStreamUpdater::DownloadDone()
///////////////////////////////////////////////////////////////////////////////
// nsIUrlClassifierStreamUpdater implementation
NS_IMETHODIMP
nsUrlClassifierStreamUpdater::GetUpdateUrl(nsACString & aUpdateUrl)
{
if (mUpdateUrl) {
mUpdateUrl->GetSpec(aUpdateUrl);
} else {
aUpdateUrl.Truncate();
}
return NS_OK;
}
NS_IMETHODIMP
nsUrlClassifierStreamUpdater::SetUpdateUrl(const nsACString & aUpdateUrl)
{
LOG(("Update URL is %s\n", PromiseFlatCString(aUpdateUrl).get()));
nsresult rv = NS_NewURI(getter_AddRefs(mUpdateUrl), aUpdateUrl);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
nsresult
nsUrlClassifierStreamUpdater::FetchUpdate(nsIURI *aUpdateUrl,
const nsACString & aRequestBody,
const nsACString & aStreamTable)
{
#ifdef DEBUG
{
nsCString spec;
aUpdateUrl->GetSpec(spec);
LOG(("Fetching update %s from %s", aRequestBody.Data(), spec.get()));
}
#endif
nsresult rv;
uint32_t loadFlags = nsIChannel::INHIBIT_CACHING |
nsIChannel::LOAD_BYPASS_CACHE;
@ -165,24 +152,33 @@ nsUrlClassifierStreamUpdater::FetchUpdate(const nsACString & aUpdateUrl,
NS_IMETHODIMP
nsUrlClassifierStreamUpdater::DownloadUpdates(
const nsACString &aRequestTables,
const nsACString &aRequestBody,
nsIUrlClassifierCallback *aSuccessCallback,
nsIUrlClassifierCallback *aUpdateErrorCallback,
nsIUrlClassifierCallback *aDownloadErrorCallback,
bool *_retval)
const nsACString &aRequestTables,
const nsACString &aRequestBody,
const nsACString &aUpdateUrl,
nsIUrlClassifierCallback *aSuccessCallback,
nsIUrlClassifierCallback *aUpdateErrorCallback,
nsIUrlClassifierCallback *aDownloadErrorCallback,
bool *_retval)
{
NS_ENSURE_ARG(aSuccessCallback);
NS_ENSURE_ARG(aUpdateErrorCallback);
NS_ENSURE_ARG(aDownloadErrorCallback);
if (mIsUpdating) {
LOG(("already updating, skipping update"));
LOG(("Already updating, queueing update %s from %s", aRequestBody.Data(),
aUpdateUrl.Data()));
*_retval = false;
PendingRequest *request = mPendingRequests.AppendElement();
request->mTables = aRequestTables;
request->mRequest = aRequestBody;
request->mUrl = aUpdateUrl;
request->mSuccessCallback = aSuccessCallback;
request->mUpdateErrorCallback = aUpdateErrorCallback;
request->mDownloadErrorCallback = aDownloadErrorCallback;
return NS_OK;
}
if (!mUpdateUrl) {
if (aUpdateUrl.IsEmpty()) {
NS_ERROR("updateUrl not set");
return NS_ERROR_NOT_INITIALIZED;
}
@ -208,10 +204,20 @@ nsUrlClassifierStreamUpdater::DownloadUpdates(
rv = mDBService->BeginUpdate(this, aRequestTables);
if (rv == NS_ERROR_NOT_AVAILABLE) {
LOG(("already updating, skipping update"));
LOG(("Service busy, already updating, queuing update %s from %s",
aRequestBody.Data(), aUpdateUrl.Data()));
*_retval = false;
PendingRequest *request = mPendingRequests.AppendElement();
request->mTables = aRequestTables;
request->mRequest = aRequestBody;
request->mUrl = aUpdateUrl;
request->mSuccessCallback = aSuccessCallback;
request->mUpdateErrorCallback = aUpdateErrorCallback;
request->mDownloadErrorCallback = aDownloadErrorCallback;
return NS_OK;
} else if (NS_FAILED(rv)) {
}
if (NS_FAILED(rv)) {
return rv;
}
@ -222,14 +228,10 @@ nsUrlClassifierStreamUpdater::DownloadUpdates(
mIsUpdating = true;
*_retval = true;
nsAutoCString urlSpec;
mUpdateUrl->GetAsciiSpec(urlSpec);
LOG(("FetchUpdate: %s", urlSpec.get()));
LOG(("FetchUpdate: %s", aUpdateUrl.Data()));
//LOG(("requestBody: %s", aRequestBody.Data()));
LOG(("Calling into FetchUpdate"));
return FetchUpdate(mUpdateUrl, aRequestBody, EmptyCString());
return FetchUpdate(aUpdateUrl, aRequestBody, EmptyCString());
}
///////////////////////////////////////////////////////////////////////////////
@ -290,6 +292,34 @@ nsUrlClassifierStreamUpdater::FetchNext()
return NS_OK;
}
nsresult
nsUrlClassifierStreamUpdater::FetchNextRequest()
{
if (mPendingRequests.Length() == 0) {
LOG(("No more requests, returning"));
return NS_OK;
}
PendingRequest &request = mPendingRequests[0];
LOG(("Stream updater: fetching next request: %s, %s",
request.mTables.get(), request.mUrl.get()));
bool dummy;
DownloadUpdates(
request.mTables,
request.mRequest,
request.mUrl,
request.mSuccessCallback,
request.mUpdateErrorCallback,
request.mDownloadErrorCallback,
&dummy);
request.mSuccessCallback = nullptr;
request.mUpdateErrorCallback = nullptr;
request.mDownloadErrorCallback = nullptr;
mPendingRequests.RemoveElementAt(0);
return NS_OK;
}
NS_IMETHODIMP
nsUrlClassifierStreamUpdater::StreamFinished(nsresult status,
uint32_t requestedDelay)
@ -334,6 +364,9 @@ nsUrlClassifierStreamUpdater::UpdateSuccess(uint32_t requestedTimeout)
if (successCallback) {
successCallback->HandleEvent(strTimeout);
}
// Now fetch the next request
LOG(("stream updater: calling into fetch next request"));
FetchNextRequest();
return NS_OK;
}

View File

@ -63,17 +63,29 @@ private:
// Fetches the next table, from mPendingUpdates.
nsresult FetchNext();
// Fetches the next request, from mPendingRequests
nsresult FetchNextRequest();
bool mIsUpdating;
bool mInitialized;
bool mDownloadError;
bool mBeganStream;
nsCOMPtr<nsIURI> mUpdateUrl;
nsCString mStreamTable;
nsCOMPtr<nsIChannel> mChannel;
nsCOMPtr<nsIUrlClassifierDBService> mDBService;
nsCOMPtr<nsITimer> mTimer;
struct PendingRequest {
nsCString mTables;
nsCString mRequest;
nsCString mUrl;
nsCOMPtr<nsIUrlClassifierCallback> mSuccessCallback;
nsCOMPtr<nsIUrlClassifierCallback> mUpdateErrorCallback;
nsCOMPtr<nsIUrlClassifierCallback> mDownloadErrorCallback;
};
nsTArray<PendingRequest> mPendingRequests;
struct PendingUpdate {
nsCString mUrl;
nsCString mTable;

View File

@ -174,9 +174,8 @@ function doStreamUpdate(updateText, success, failure, downloadFailure) {
if (!downloadFailure)
downloadFailure = failure;
streamUpdater.updateUrl = dataUpdate;
streamUpdater.downloadUpdates("test-phish-simple,test-malware-simple", "",
success, failure, downloadFailure);
dataUpdate, success, failure, downloadFailure);
}
var gAssertions = {

View File

@ -102,7 +102,6 @@ function handleError(aEvent) {
add_test(function test_update() {
let streamUpdater = Cc["@mozilla.org/url-classifier/streamupdater;1"]
.getService(Ci.nsIUrlClassifierStreamUpdater);
streamUpdater.updateUrl = "http://localhost:4444/downloads";
// Load up some update chunks for the safebrowsing server to serve.
registerTableUpdate("goog-downloadwhite-digest256", "data/digest1.chunk");
@ -119,6 +118,7 @@ add_test(function test_update() {
streamUpdater.downloadUpdates(
"goog-downloadwhite-digest256",
"goog-downloadwhite-digest256;\n",
"http://localhost:4444/downloads",
updateSuccess, handleError, handleError);
});

View File

@ -15,6 +15,8 @@
// to be COMPLETE_LENGTH.
// expectCompletion: boolean indicating whether the server should respond
// with a full hash.
// forceServerError: boolean indicating whether the server should respond
// with a 503.
// table: name of the table that the hash corresponds to. Only needs to be set
// if a completion is expected.
// chunkId: positive integer corresponding to the chunk that the hash belongs
@ -114,12 +116,12 @@ let multipleResponsesCompletionSet = [
}
];
// The fifth completion set is added at runtime by addRandomCompletionSet.
// The fifth completion set is added at runtime by getRandomCompletionSet.
// Each completion in the set only has one response and its purpose is to
// provide an easy way to test the HashCompleter handling an arbitrarily large
// completion set (determined by SIZE_OF_RANDOM_SET).
const SIZE_OF_RANDOM_SET = 16;
function addRandomCompletionSet() {
function getRandomCompletionSet(forceServerError) {
let completionSet = [];
let hashPrefixes = [];
@ -128,7 +130,7 @@ function addRandomCompletionSet() {
let rand = new LFSRgenerator(seed);
for (let i = 0; i < SIZE_OF_RANDOM_SET; i++) {
let completion = {};
let completion = { expectCompletion: false, forceServerError: false, _finished: false };
// Generate a random 256 bit hash. First we get a random number and then
// convert it to a string.
@ -145,7 +147,11 @@ function addRandomCompletionSet() {
hashPrefixes.push(prefix);
completion.hash = hash;
completion.expectCompletion = rand.nextNum(1) == 1;
if (!forceServerError) {
completion.expectCompletion = rand.nextNum(1) == 1;
} else {
completion.forceServerError = true;
}
if (completion.expectCompletion) {
// Generate a random alpha-numeric string of length at most 6 for the
// table name.
@ -153,11 +159,10 @@ function addRandomCompletionSet() {
completion.chunkId = rand.nextNum(16);
}
completionSet.push(completion);
}
completionSets.push(completionSet);
return completionSet;
}
let completionSets = [basicCompletionSet, falseCompletionSet,
@ -177,8 +182,22 @@ const COMPLETE_LENGTH = 32;
let completer = Cc["@mozilla.org/url-classifier/hashcompleter;1"].
getService(Ci.nsIUrlClassifierHashCompleter);
let gethashUrl;
// Expected highest completion set for which the server sends a response.
let expectedMaxServerCompletionSet = 0;
let maxServerCompletionSet = 0;
function run_test() {
addRandomCompletionSet();
// Generate a random completion set that return successful responses.
completionSets.push(getRandomCompletionSet(false));
// We backoff after receiving an error, so requests shouldn't reach the
// server after that.
expectedMaxServerCompletionSet = completionSets.length;
// Generate some completion sets that return 503s.
for (let j = 0; j < 10; ++j) {
completionSets.push(getRandomCompletionSet(true));
}
// Fix up the completions before running the test.
for each (let completionSet in completionSets) {
@ -204,32 +223,26 @@ function run_test() {
const SERVER_PORT = 8080;
server.start(SERVER_PORT);
completer.gethashUrl = "http://localhost:" + SERVER_PORT + SERVER_PATH;
runNextCompletion();
}
function doneCompletionSet() {
do_check_eq(finishedCompletions, completionSets[currentCompletionSet].length);
for each (let completion in completionSets[currentCompletionSet])
do_check_true(completion._finished);
gethashUrl = "http://localhost:" + SERVER_PORT + SERVER_PATH;
runNextCompletion();
}
function runNextCompletion() {
// The server relies on currentCompletionSet to send the correct response, so
// don't increment it until we start the new set of callbacks.
currentCompletionSet++;
finishedCompletions = 0;
if (currentCompletionSet >= completionSets.length) {
finish();
return;
}
dump("Now on completion set index " + currentCompletionSet + "\n");
dump("Now on completion set index " + currentCompletionSet + ", length " +
completionSets[currentCompletionSet].length + "\n");
// Number of finished completions for this set.
finishedCompletions = 0;
for each (let completion in completionSets[currentCompletionSet]) {
completer.complete(completion.hash.substring(0,4),
completer.complete(completion.hash.substring(0,4), gethashUrl,
(new callback(completion)));
}
}
@ -251,6 +264,9 @@ function hashCompleterServer(aRequest, aResponse) {
function responseForCompletion(x) {
return x.table + ":" + x.chunkId + ":" + x.hash.length + "\n" + x.hash;
}
// As per the spec, a server should response with a 204 if there are no
// full-length hashes that match the prefixes.
let httpStatus = 204;
for each (let completion in completionSets[currentCompletionSet]) {
if (completion.expectCompletion &&
(completedHashes.indexOf(completion.hash) == -1)) {
@ -261,24 +277,28 @@ function hashCompleterServer(aRequest, aResponse) {
else
responseText += responseForCompletion(completion);
}
if (completion.forceServerError) {
httpStatus = 503;
}
}
// As per the spec, a server should response with a 204 if there are no
// full-length hashes that match the prefixes.
if (responseText)
dump("Server sending response for " + currentCompletionSet + "\n");
maxServerCompletionSet = currentCompletionSet;
if (responseText && httpStatus != 503) {
aResponse.write(responseText);
else
aResponse.setStatusLine(null, 204, null);
} else {
aResponse.setStatusLine(null, httpStatus, null);
}
}
function callback(completion) {
this._completion = completion;
}
callback.prototype = {
completion: function completion(hash, table, chunkId, trusted) {
do_check_true(this._completion.expectCompletion);
if (this._completion.multipleCompletions) {
for each (let completion in this._completion.completions) {
if (completion.hash == hash) {
@ -306,16 +326,20 @@ callback.prototype = {
},
completionFinished: function completionFinished(status) {
finishedCompletions++;
do_check_eq(!!this._completion.expectCompletion, !!this._completed);
this._completion._finished = true;
finishedCompletions++;
if (finishedCompletions == completionSets[currentCompletionSet].length)
doneCompletionSet();
// currentCompletionSet can mutate before all of the callbacks are complete.
if (currentCompletionSet < completionSets.length &&
finishedCompletions == completionSets[currentCompletionSet].length) {
runNextCompletion();
}
},
};
function finish() {
do_check_eq(expectedMaxServerCompletionSet, maxServerCompletionSet);
server.stop(function() {
do_test_finished();
});

View File

@ -20,7 +20,7 @@ QueryInterface: function(iid)
return this;
},
complete: function(partialHash, cb)
complete: function(partialHash, gethashUrl, cb)
{
this.queries.push(partialHash);
var fragments = this.fragments;

View File

@ -10,7 +10,7 @@ support-files =
[test_dbservice.js]
[test_hashcompleter.js]
# Bug 752243: Profile cleanup frequently fails
skip-if = os == "mac" || os == "linux"
#skip-if = os == "mac" || os == "linux"
[test_partial.js]
[test_prefixset.js]
[test_streamupdater.js]