2007-03-22 10:30:00 -07:00
|
|
|
//* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
|
|
|
/* ***** BEGIN LICENSE BLOCK *****
|
|
|
|
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the Mozilla Public License Version
|
|
|
|
* 1.1 (the "License"); you may not use this file except in compliance with
|
|
|
|
* the License. You may obtain a copy of the License at
|
|
|
|
* http://www.mozilla.org/MPL/
|
|
|
|
*
|
|
|
|
* Software distributed under the License is distributed on an "AS IS" basis,
|
|
|
|
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
|
|
|
* for the specific language governing rights and limitations under the
|
|
|
|
* License.
|
|
|
|
*
|
|
|
|
* The Original Code is Url Classifier code
|
|
|
|
*
|
|
|
|
* The Initial Developer of the Original Code is
|
|
|
|
* Google Inc.
|
|
|
|
* Portions created by the Initial Developer are Copyright (C) 2006
|
|
|
|
* the Initial Developer. All Rights Reserved.
|
|
|
|
*
|
|
|
|
* Contributor(s):
|
|
|
|
* Tony Chang <tony@ponderer.org> (original author)
|
|
|
|
* Brett Wilson <brettw@gmail.com>
|
2007-07-25 23:38:43 -07:00
|
|
|
* Dave Camp <dcamp@mozilla.com>
|
2010-02-09 15:48:51 -08:00
|
|
|
* David Dahl <ddahl@mozilla.com>
|
2011-09-08 13:15:08 -07:00
|
|
|
* Gian-Carlo Pascutto <gpascutto@mozilla.com>
|
2007-03-22 10:30:00 -07:00
|
|
|
*
|
|
|
|
* Alternatively, the contents of this file may be used under the terms of
|
|
|
|
* either the GNU General Public License Version 2 or later (the "GPL"), or
|
|
|
|
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
|
|
|
|
* in which case the provisions of the GPL or the LGPL are applicable instead
|
|
|
|
* of those above. If you wish to allow use of your version of this file only
|
|
|
|
* under the terms of either the GPL or the LGPL, and not to allow others to
|
|
|
|
* use your version of this file under the terms of the MPL, indicate your
|
|
|
|
* decision by deleting the provisions above and replace them with the notice
|
|
|
|
* and other provisions required by the GPL or the LGPL. If you do not delete
|
|
|
|
* the provisions above, a recipient may use your version of this file under
|
|
|
|
* the terms of any one of the MPL, the GPL or the LGPL.
|
|
|
|
*
|
|
|
|
* ***** END LICENSE BLOCK ***** */
|
|
|
|
|
2007-08-31 16:18:46 -07:00
|
|
|
#include "nsAutoPtr.h"
|
2007-07-25 23:38:43 -07:00
|
|
|
#include "nsCOMPtr.h"
|
2007-03-22 10:30:00 -07:00
|
|
|
#include "mozIStorageService.h"
|
|
|
|
#include "mozIStorageConnection.h"
|
|
|
|
#include "mozIStorageStatement.h"
|
2007-07-25 23:38:43 -07:00
|
|
|
#include "mozStorageHelper.h"
|
2007-03-22 10:30:00 -07:00
|
|
|
#include "mozStorageCID.h"
|
|
|
|
#include "nsAppDirectoryServiceDefs.h"
|
|
|
|
#include "nsCRT.h"
|
2008-04-15 15:39:44 -07:00
|
|
|
#include "nsDataHashtable.h"
|
2007-07-25 23:38:43 -07:00
|
|
|
#include "nsICryptoHash.h"
|
2008-02-27 00:51:02 -08:00
|
|
|
#include "nsICryptoHMAC.h"
|
2007-03-22 10:30:00 -07:00
|
|
|
#include "nsIDirectoryService.h"
|
2008-03-18 12:45:40 -07:00
|
|
|
#include "nsIKeyModule.h"
|
2007-03-22 10:30:00 -07:00
|
|
|
#include "nsIObserverService.h"
|
2011-05-03 10:39:28 -07:00
|
|
|
#include "nsIPermissionManager.h"
|
2007-08-31 16:18:46 -07:00
|
|
|
#include "nsIPrefBranch.h"
|
|
|
|
#include "nsIPrefBranch2.h"
|
|
|
|
#include "nsIPrefService.h"
|
2007-03-22 10:30:00 -07:00
|
|
|
#include "nsIProperties.h"
|
|
|
|
#include "nsToolkitCompsCID.h"
|
2007-07-25 23:38:43 -07:00
|
|
|
#include "nsIUrlClassifierUtils.h"
|
2007-03-22 10:30:00 -07:00
|
|
|
#include "nsUrlClassifierDBService.h"
|
2008-02-27 00:51:02 -08:00
|
|
|
#include "nsUrlClassifierUtils.h"
|
2011-08-19 08:50:04 -07:00
|
|
|
#include "nsUrlClassifierProxies.h"
|
2007-08-31 16:18:46 -07:00
|
|
|
#include "nsURILoader.h"
|
2007-03-22 10:30:00 -07:00
|
|
|
#include "nsString.h"
|
2009-01-21 20:15:34 -08:00
|
|
|
#include "nsReadableUtils.h"
|
2007-03-22 10:30:00 -07:00
|
|
|
#include "nsTArray.h"
|
2007-07-25 23:38:43 -07:00
|
|
|
#include "nsNetUtil.h"
|
|
|
|
#include "nsNetCID.h"
|
2007-03-22 10:30:00 -07:00
|
|
|
#include "nsThreadUtils.h"
|
|
|
|
#include "nsXPCOMStrings.h"
|
Rollup of bug 645263 and bug 646259: Switch to mozilla:: sync primitives. r=cjones,dbaron,doublec,ehsan src=bsmedberg
Bug 645263, part 0: Count sync primitive ctor/dtors. r=dbaron
Bug 645263, part 1: Migrate content/media to mozilla:: sync primitives. r=doublec
Bug 645263, part 2: Migrate modules/plugin to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 3: Migrate nsComponentManagerImpl to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 4: Migrate everything else to mozilla:: sync primitives. r=dbaron
Bug 645263, part 5: Remove nsAutoLock.*. sr=bsmedberg
Bug 645263, part 6: Make editor test be nicer to deadlock detector. r=ehsan
Bug 645263, part 7: Disable tracemalloc backtraces for xpcshell tests. r=dbaron
Bug 646259: Fix nsCacheService to use a CondVar for notifying. r=cjones
2011-03-31 21:29:02 -07:00
|
|
|
#include "mozilla/Mutex.h"
|
2011-09-08 13:17:34 -07:00
|
|
|
#include "mozilla/Telemetry.h"
|
2007-03-22 10:30:00 -07:00
|
|
|
#include "prlog.h"
|
|
|
|
#include "prprf.h"
|
2008-01-29 12:57:18 -08:00
|
|
|
#include "prnetdb.h"
|
2007-07-25 23:38:43 -07:00
|
|
|
#include "zlib.h"
|
|
|
|
|
2008-08-14 22:46:41 -07:00
|
|
|
// Needed to interpert mozIStorageConnection::GetLastError
|
|
|
|
#include <sqlite3.h>
|
|
|
|
|
Rollup of bug 645263 and bug 646259: Switch to mozilla:: sync primitives. r=cjones,dbaron,doublec,ehsan src=bsmedberg
Bug 645263, part 0: Count sync primitive ctor/dtors. r=dbaron
Bug 645263, part 1: Migrate content/media to mozilla:: sync primitives. r=doublec
Bug 645263, part 2: Migrate modules/plugin to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 3: Migrate nsComponentManagerImpl to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 4: Migrate everything else to mozilla:: sync primitives. r=dbaron
Bug 645263, part 5: Remove nsAutoLock.*. sr=bsmedberg
Bug 645263, part 6: Make editor test be nicer to deadlock detector. r=ehsan
Bug 645263, part 7: Disable tracemalloc backtraces for xpcshell tests. r=dbaron
Bug 646259: Fix nsCacheService to use a CondVar for notifying. r=cjones
2011-03-31 21:29:02 -07:00
|
|
|
using namespace mozilla;
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
/**
|
|
|
|
* The DBServices stores a set of Fragments. A fragment is one URL
|
|
|
|
* fragment containing two or more domain components and some number
|
|
|
|
* of path components.
|
|
|
|
*
|
|
|
|
* Fragment examples:
|
|
|
|
* example.com/
|
|
|
|
* www.example.com/foo/bar
|
|
|
|
* www.mail.example.com/mail
|
|
|
|
*
|
|
|
|
* Fragments are described in "Simplified Regular Expression Lookup"
|
|
|
|
* section of the protocol document at
|
|
|
|
* http://code.google.com/p/google-safe-browsing/wiki/Protocolv2Spec
|
|
|
|
*
|
2008-01-29 12:57:18 -08:00
|
|
|
* A fragment is associated with a domain. The domain for a given
|
|
|
|
* fragment is the three-host-component domain of the fragment (two
|
|
|
|
* host components for URLs with only two components) with a trailing
|
|
|
|
* slash. So for the fragments listed above, the domains are
|
|
|
|
* example.com/, www.example.com/ and mail.example.com/.
|
2007-07-25 23:38:43 -07:00
|
|
|
*
|
|
|
|
* Fragments and domains are hashed in the database. The hash is described
|
|
|
|
* in the protocol document, but it's basically a truncated SHA256 hash.
|
2008-01-29 12:57:18 -08:00
|
|
|
*
|
|
|
|
* A (table, chunk id, domain key, fragment) tuple is referred to as
|
|
|
|
* an Entry.
|
2007-07-25 23:38:43 -07:00
|
|
|
*/
|
2007-03-22 10:30:00 -07:00
|
|
|
|
|
|
|
// NSPR_LOG_MODULES=UrlClassifierDbService:5
|
|
|
|
#if defined(PR_LOGGING)
|
|
|
|
static const PRLogModuleInfo *gUrlClassifierDbServiceLog = nsnull;
|
|
|
|
#define LOG(args) PR_LOG(gUrlClassifierDbServiceLog, PR_LOG_DEBUG, args)
|
2007-07-25 23:38:43 -07:00
|
|
|
#define LOG_ENABLED() PR_LOG_TEST(gUrlClassifierDbServiceLog, 4)
|
2007-03-22 10:30:00 -07:00
|
|
|
#else
|
|
|
|
#define LOG(args)
|
2011-10-17 07:59:28 -07:00
|
|
|
#define LOG_ENABLED() (false)
|
2007-03-22 10:30:00 -07:00
|
|
|
#endif
|
|
|
|
|
2007-12-04 19:22:39 -08:00
|
|
|
// Schema versioning: note that we don't bother to migrate between different
|
|
|
|
// versions of the schema, we just start fetching the data freshly with each
|
|
|
|
// migration.
|
|
|
|
|
|
|
|
// The database filename is updated when there is an incompatible
|
|
|
|
// schema change and we expect both implementations to continue
|
|
|
|
// accessing the same database (such as between stable versions of the
|
|
|
|
// platform).
|
2007-07-25 23:38:43 -07:00
|
|
|
#define DATABASE_FILENAME "urlclassifier3.sqlite"
|
|
|
|
|
2007-12-04 19:22:39 -08:00
|
|
|
// The implementation version is updated during development when we
|
|
|
|
// want to change schema, or to recover from updating bugs. When an
|
|
|
|
// implementation version change is detected, the database is scrapped
|
|
|
|
// and we start over.
|
2010-02-09 15:48:51 -08:00
|
|
|
#define IMPLEMENTATION_VERSION 7
|
2007-12-04 19:22:39 -08:00
|
|
|
|
2011-09-08 13:16:59 -07:00
|
|
|
// Name of the persistent PrefixSet storage
|
|
|
|
#define PREFIXSET_FILENAME "urlclassifier.pset"
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
#define MAX_HOST_COMPONENTS 5
|
|
|
|
#define MAX_PATH_COMPONENTS 4
|
|
|
|
|
|
|
|
// Updates will fail if fed chunks larger than this
|
|
|
|
#define MAX_CHUNK_SIZE (1024 * 1024)
|
|
|
|
|
2007-08-31 16:18:46 -07:00
|
|
|
// Prefs for implementing nsIURIClassifier to block page loads
|
|
|
|
#define CHECK_MALWARE_PREF "browser.safebrowsing.malware.enabled"
|
2011-10-17 07:59:28 -07:00
|
|
|
#define CHECK_MALWARE_DEFAULT false
|
2007-08-31 16:18:46 -07:00
|
|
|
|
2007-12-03 21:10:22 -08:00
|
|
|
#define CHECK_PHISHING_PREF "browser.safebrowsing.enabled"
|
2011-10-17 07:59:28 -07:00
|
|
|
#define CHECK_PHISHING_DEFAULT false
|
2007-12-03 21:10:22 -08:00
|
|
|
|
2008-02-26 21:51:28 -08:00
|
|
|
#define GETHASH_NOISE_PREF "urlclassifier.gethashnoise"
|
|
|
|
#define GETHASH_NOISE_DEFAULT 4
|
|
|
|
|
2008-04-15 15:39:44 -07:00
|
|
|
#define GETHASH_TABLES_PREF "urlclassifier.gethashtables"
|
|
|
|
|
|
|
|
#define CONFIRM_AGE_PREF "urlclassifier.confirm-age"
|
|
|
|
#define CONFIRM_AGE_DEFAULT_SEC (45 * 60)
|
|
|
|
|
2008-05-02 02:55:09 -07:00
|
|
|
#define UPDATE_CACHE_SIZE_PREF "urlclassifier.updatecachemax"
|
|
|
|
#define UPDATE_CACHE_SIZE_DEFAULT -1
|
|
|
|
|
2011-09-08 13:15:37 -07:00
|
|
|
#define LOOKUP_CACHE_SIZE_PREF "urlclassifier.lookupcachemax"
|
|
|
|
#define LOOKUP_CACHE_SIZE_DEFAULT -1
|
|
|
|
|
2008-05-07 13:33:45 -07:00
|
|
|
// Amount of time to spend updating before committing and delaying, in
|
|
|
|
// seconds. This is checked after each update stream, so the actual
|
|
|
|
// time spent can be higher than this, depending on update stream size.
|
|
|
|
#define UPDATE_WORKING_TIME "urlclassifier.workingtime"
|
|
|
|
#define UPDATE_WORKING_TIME_DEFAULT 5
|
|
|
|
|
|
|
|
// The amount of time to delay after hitting UPDATE_WORKING_TIME, in
|
|
|
|
// seconds.
|
|
|
|
#define UPDATE_DELAY_TIME "urlclassifier.updatetime"
|
|
|
|
#define UPDATE_DELAY_TIME_DEFAULT 60
|
|
|
|
|
2008-01-12 13:32:01 -08:00
|
|
|
class nsUrlClassifierDBServiceWorker;
|
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
// Singleton instance.
|
|
|
|
static nsUrlClassifierDBService* sUrlClassifierDBService;
|
|
|
|
|
2011-08-19 08:50:04 -07:00
|
|
|
nsIThread* nsUrlClassifierDBService::gDbBackgroundThread = nsnull;
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2007-04-24 10:41:44 -07:00
|
|
|
// Once we've committed to shutting down, don't do work in the background
|
|
|
|
// thread.
|
2011-09-28 23:19:26 -07:00
|
|
|
static bool gShuttingDownThread = false;
|
2007-04-24 10:41:44 -07:00
|
|
|
|
2008-04-15 15:39:44 -07:00
|
|
|
static PRInt32 gFreshnessGuarantee = CONFIRM_AGE_DEFAULT_SEC;
|
|
|
|
|
2008-05-02 02:55:09 -07:00
|
|
|
static PRInt32 gUpdateCacheSize = UPDATE_CACHE_SIZE_DEFAULT;
|
2011-09-08 13:15:37 -07:00
|
|
|
static PRInt32 gLookupCacheSize = LOOKUP_CACHE_SIZE_DEFAULT;
|
2008-05-02 02:55:09 -07:00
|
|
|
|
2008-05-07 13:33:45 -07:00
|
|
|
static PRInt32 gWorkingTimeThreshold = UPDATE_WORKING_TIME_DEFAULT;
|
|
|
|
static PRInt32 gDelayTime = UPDATE_DELAY_TIME_DEFAULT;
|
|
|
|
|
2008-04-15 15:39:44 -07:00
|
|
|
static void
|
|
|
|
SplitTables(const nsACString& str, nsTArray<nsCString>& tables)
|
|
|
|
{
|
|
|
|
tables.Clear();
|
|
|
|
|
|
|
|
nsACString::const_iterator begin, iter, end;
|
|
|
|
str.BeginReading(begin);
|
|
|
|
str.EndReading(end);
|
|
|
|
while (begin != end) {
|
|
|
|
iter = begin;
|
|
|
|
FindCharInReadable(',', iter, end);
|
|
|
|
tables.AppendElement(Substring(begin, iter));
|
|
|
|
begin = iter;
|
|
|
|
if (begin != end)
|
|
|
|
begin++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
// -------------------------------------------------------------------------
|
|
|
|
// Hash class implementation
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// A convenience wrapper around the potentially-truncated hash for a
|
|
|
|
// domain or fragment.
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
template <PRUint32 S>
|
2007-07-25 23:38:43 -07:00
|
|
|
struct nsUrlClassifierHash
|
|
|
|
{
|
2008-01-29 12:57:18 -08:00
|
|
|
static const PRUint32 sHashSize = S;
|
|
|
|
typedef nsUrlClassifierHash<S> self_type;
|
|
|
|
PRUint8 buf[S];
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
nsresult FromPlaintext(const nsACString& plainText, nsICryptoHash *hash) {
|
|
|
|
// From the protocol doc:
|
|
|
|
// Each entry in the chunk is composed of the 128 most significant bits
|
|
|
|
// of the SHA 256 hash of a suffix/prefix expression.
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
nsresult rv = hash->Init(nsICryptoHash::SHA256);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
rv = hash->Update
|
|
|
|
(reinterpret_cast<const PRUint8*>(plainText.BeginReading()),
|
|
|
|
plainText.Length());
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
nsCAutoString hashed;
|
2011-10-17 07:59:28 -07:00
|
|
|
rv = hash->Finish(false, hashed);
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_ASSERTION(hashed.Length() >= sHashSize,
|
|
|
|
"not enough characters in the hash");
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
memcpy(buf, hashed.BeginReading(), sHashSize);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
return NS_OK;
|
|
|
|
}
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
void Assign(const nsACString& str) {
|
|
|
|
NS_ASSERTION(str.Length() >= sHashSize,
|
|
|
|
"string must be at least sHashSize characters long");
|
|
|
|
memcpy(buf, str.BeginReading(), sHashSize);
|
|
|
|
}
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-04-15 15:39:44 -07:00
|
|
|
void Clear() {
|
|
|
|
memset(buf, 0, sizeof(buf));
|
|
|
|
}
|
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
const bool operator==(const self_type& hash) const {
|
2008-01-29 12:57:18 -08:00
|
|
|
return (memcmp(buf, hash.buf, sizeof(buf)) == 0);
|
|
|
|
}
|
2011-09-28 23:19:26 -07:00
|
|
|
const bool operator!=(const self_type& hash) const {
|
2008-01-29 12:57:18 -08:00
|
|
|
return !(*this == hash);
|
|
|
|
}
|
2011-09-28 23:19:26 -07:00
|
|
|
const bool operator<(const self_type& hash) const {
|
2008-01-29 12:57:18 -08:00
|
|
|
return memcmp(buf, hash.buf, sizeof(self_type)) < 0;
|
|
|
|
}
|
2011-09-28 23:19:26 -07:00
|
|
|
const bool StartsWith(const nsUrlClassifierHash<PARTIAL_LENGTH>& hash) const {
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_ASSERTION(sHashSize >= PARTIAL_LENGTH, "nsUrlClassifierHash must be at least PARTIAL_LENGTH bytes long");
|
|
|
|
return memcmp(buf, hash.buf, PARTIAL_LENGTH) == 0;
|
|
|
|
}
|
2011-09-08 13:15:18 -07:00
|
|
|
PRUint32 ToUint32() const {
|
|
|
|
return *(reinterpret_cast<const PRUint32*>(buf));
|
|
|
|
}
|
2008-01-29 12:57:18 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef nsUrlClassifierHash<DOMAIN_LENGTH> nsUrlClassifierDomainHash;
|
|
|
|
typedef nsUrlClassifierHash<PARTIAL_LENGTH> nsUrlClassifierPartialHash;
|
|
|
|
typedef nsUrlClassifierHash<COMPLETE_LENGTH> nsUrlClassifierCompleteHash;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
|
|
|
|
|
|
|
// -------------------------------------------------------------------------
|
|
|
|
// Entry class implementation
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// This class represents one entry in the classifier database. It consists
|
|
|
|
// of a table id, a chunk id, a domain hash, and a partial or complete hash.
|
2007-07-25 23:38:43 -07:00
|
|
|
class nsUrlClassifierEntry
|
|
|
|
{
|
|
|
|
public:
|
2008-01-29 12:57:18 -08:00
|
|
|
nsUrlClassifierEntry()
|
2008-02-26 21:51:28 -08:00
|
|
|
: mId(-1)
|
2011-10-17 07:59:28 -07:00
|
|
|
, mHavePartial(false)
|
|
|
|
, mHaveComplete(false)
|
2008-01-29 12:57:18 -08:00
|
|
|
, mTableId(0)
|
|
|
|
, mChunkId(0)
|
|
|
|
, mAddChunkId(0)
|
|
|
|
{}
|
2007-07-25 23:38:43 -07:00
|
|
|
~nsUrlClassifierEntry() {}
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// Check that this entry could potentially match the complete hash.
|
2011-09-28 23:19:26 -07:00
|
|
|
bool Match(const nsUrlClassifierCompleteHash &hash);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// Check that the sub entry should apply to this entry.
|
2011-09-28 23:19:26 -07:00
|
|
|
bool SubMatch(const nsUrlClassifierEntry& sub);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// Clear out the entry structure
|
|
|
|
void Clear();
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// Set the partial hash for this domain.
|
|
|
|
void SetHash(const nsUrlClassifierPartialHash &partialHash) {
|
|
|
|
mPartialHash = partialHash;
|
2011-10-17 07:59:28 -07:00
|
|
|
mHavePartial = true;
|
2008-01-29 12:57:18 -08:00
|
|
|
}
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// Set the complete hash for this domain.
|
|
|
|
void SetHash(const nsUrlClassifierCompleteHash &completeHash) {
|
|
|
|
mCompleteHash = completeHash;
|
2011-10-17 07:59:28 -07:00
|
|
|
mHaveComplete = true;
|
2008-01-29 12:57:18 -08:00
|
|
|
}
|
2008-01-28 17:58:15 -08:00
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool operator== (const nsUrlClassifierEntry& entry) const {
|
2008-01-29 12:57:18 -08:00
|
|
|
return ! (mTableId != entry.mTableId ||
|
|
|
|
mChunkId != entry.mChunkId ||
|
|
|
|
mHavePartial != entry.mHavePartial ||
|
|
|
|
(mHavePartial && mPartialHash != entry.mPartialHash) ||
|
|
|
|
mHaveComplete != entry.mHaveComplete ||
|
|
|
|
(mHaveComplete && mCompleteHash != entry.mCompleteHash));
|
|
|
|
}
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool operator< (const nsUrlClassifierEntry& entry) const {
|
2008-01-29 12:57:18 -08:00
|
|
|
return (mTableId < entry.mTableId ||
|
|
|
|
mChunkId < entry.mChunkId ||
|
|
|
|
mHavePartial && !entry.mHavePartial ||
|
|
|
|
(mHavePartial && mPartialHash < entry.mPartialHash) ||
|
|
|
|
mHaveComplete && !entry.mHaveComplete ||
|
|
|
|
(mHaveComplete && mCompleteHash < entry.mCompleteHash));
|
|
|
|
}
|
2008-01-28 17:58:15 -08:00
|
|
|
|
2008-02-26 21:51:28 -08:00
|
|
|
PRInt64 mId;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
nsUrlClassifierDomainHash mKey;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool mHavePartial;
|
2008-01-29 12:57:18 -08:00
|
|
|
nsUrlClassifierPartialHash mPartialHash;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool mHaveComplete;
|
2008-01-29 12:57:18 -08:00
|
|
|
nsUrlClassifierCompleteHash mCompleteHash;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
PRUint32 mTableId;
|
|
|
|
PRUint32 mChunkId;
|
|
|
|
PRUint32 mAddChunkId;
|
2007-07-25 23:38:43 -07:00
|
|
|
};
|
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool
|
2008-01-29 12:57:18 -08:00
|
|
|
nsUrlClassifierEntry::Match(const nsUrlClassifierCompleteHash &hash)
|
2007-07-25 23:38:43 -07:00
|
|
|
{
|
2008-01-29 12:57:18 -08:00
|
|
|
if (mHaveComplete)
|
|
|
|
return mCompleteHash == hash;
|
2008-01-28 17:58:15 -08:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
if (mHavePartial)
|
|
|
|
return hash.StartsWith(mPartialHash);
|
2008-01-28 17:58:15 -08:00
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
return false;
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool
|
2008-01-29 12:57:18 -08:00
|
|
|
nsUrlClassifierEntry::SubMatch(const nsUrlClassifierEntry &subEntry)
|
2007-07-25 23:38:43 -07:00
|
|
|
{
|
2008-01-29 12:57:18 -08:00
|
|
|
if ((mTableId != subEntry.mTableId) || (mChunkId != subEntry.mAddChunkId))
|
2011-10-17 07:59:28 -07:00
|
|
|
return false;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
if (subEntry.mHaveComplete)
|
|
|
|
return mHaveComplete && mCompleteHash == subEntry.mCompleteHash;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
if (subEntry.mHavePartial)
|
|
|
|
return mHavePartial && mPartialHash == subEntry.mPartialHash;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
return false;
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
void
|
|
|
|
nsUrlClassifierEntry::Clear()
|
2008-01-28 17:58:15 -08:00
|
|
|
{
|
2008-02-26 21:51:28 -08:00
|
|
|
mId = -1;
|
2011-10-17 07:59:28 -07:00
|
|
|
mHavePartial = false;
|
|
|
|
mHaveComplete = false;
|
2008-01-28 17:58:15 -08:00
|
|
|
}
|
2008-01-28 15:04:43 -08:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// -------------------------------------------------------------------------
|
|
|
|
// Lookup result class implementation
|
|
|
|
|
|
|
|
// This helper class wraps a nsUrlClassifierEntry found during a lookup.
|
|
|
|
class nsUrlClassifierLookupResult
|
2007-07-25 23:38:43 -07:00
|
|
|
{
|
2008-01-29 12:57:18 -08:00
|
|
|
public:
|
2011-10-17 07:59:28 -07:00
|
|
|
nsUrlClassifierLookupResult() : mConfirmed(false), mNoise(false) {
|
2008-04-15 15:39:44 -07:00
|
|
|
mLookupFragment.Clear();
|
|
|
|
}
|
2008-01-29 12:57:18 -08:00
|
|
|
~nsUrlClassifierLookupResult() {}
|
2008-01-12 13:32:01 -08:00
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool operator==(const nsUrlClassifierLookupResult &result) const {
|
2008-01-29 12:57:18 -08:00
|
|
|
// Don't need to compare table name, it's contained by id in the entry.
|
|
|
|
return (mLookupFragment == result.mLookupFragment &&
|
|
|
|
mConfirmed == result.mConfirmed &&
|
|
|
|
mEntry == result.mEntry);
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool operator<(const nsUrlClassifierLookupResult &result) const {
|
2008-01-29 12:57:18 -08:00
|
|
|
// Don't need to compare table name, it's contained by id in the entry.
|
|
|
|
return (mLookupFragment < result.mLookupFragment ||
|
|
|
|
mConfirmed < result.mConfirmed ||
|
|
|
|
mEntry < result.mEntry);
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// The hash that matched this entry.
|
|
|
|
nsUrlClassifierCompleteHash mLookupFragment;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// The entry that was found during the lookup.
|
|
|
|
nsUrlClassifierEntry mEntry;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// TRUE if the lookup matched a complete hash (not just a partial
|
|
|
|
// one).
|
2011-09-28 23:19:26 -07:00
|
|
|
bool mConfirmed;
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2008-02-26 21:51:28 -08:00
|
|
|
// TRUE if this lookup is gethash noise. Does not represent an actual
|
|
|
|
// result.
|
2011-09-28 23:19:26 -07:00
|
|
|
bool mNoise;
|
2008-02-26 21:51:28 -08:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// The table name associated with mEntry.mTableId.
|
|
|
|
nsCString mTableName;
|
|
|
|
};
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2008-01-12 13:32:01 -08:00
|
|
|
// -------------------------------------------------------------------------
|
|
|
|
// Store class implementation
|
|
|
|
|
|
|
|
// This class mediates access to the classifier and chunk entry tables.
|
|
|
|
class nsUrlClassifierStore
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
nsUrlClassifierStore() {}
|
2008-01-29 12:57:18 -08:00
|
|
|
virtual ~nsUrlClassifierStore() {}
|
2008-01-12 13:32:01 -08:00
|
|
|
|
|
|
|
// Initialize the statements for the store.
|
|
|
|
nsresult Init(nsUrlClassifierDBServiceWorker *worker,
|
|
|
|
mozIStorageConnection *connection,
|
2008-01-29 12:57:18 -08:00
|
|
|
const nsACString& entriesTableName);
|
2008-01-12 13:32:01 -08:00
|
|
|
// Shut down the store.
|
|
|
|
void Close();
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// Read an entry from a database statement
|
2011-09-28 23:19:26 -07:00
|
|
|
virtual bool ReadStatement(mozIStorageStatement* statement,
|
2008-01-29 12:57:18 -08:00
|
|
|
nsUrlClassifierEntry& entry);
|
|
|
|
|
|
|
|
// Prepare a statement to write this entry to the database
|
|
|
|
virtual nsresult BindStatement(const nsUrlClassifierEntry& entry,
|
|
|
|
mozIStorageStatement* statement);
|
|
|
|
|
2008-01-12 13:32:01 -08:00
|
|
|
// Read the entry with a given ID from the database
|
2011-09-28 23:19:26 -07:00
|
|
|
nsresult ReadEntry(PRInt64 id, nsUrlClassifierEntry& entry, bool *exists);
|
2008-01-12 13:32:01 -08:00
|
|
|
|
|
|
|
// Remove an entry from the database
|
|
|
|
nsresult DeleteEntry(nsUrlClassifierEntry& entry);
|
|
|
|
|
|
|
|
// Write an entry to the database
|
|
|
|
nsresult WriteEntry(nsUrlClassifierEntry& entry);
|
|
|
|
|
2008-01-29 18:26:44 -08:00
|
|
|
// Update an entry in the database. The entry must already exist in the
|
|
|
|
// database or this method will fail.
|
|
|
|
nsresult UpdateEntry(nsUrlClassifierEntry& entry);
|
|
|
|
|
2008-01-12 13:32:01 -08:00
|
|
|
// Remove all entries for a given table/chunk pair from the database.
|
|
|
|
nsresult Expire(PRUint32 tableId,
|
|
|
|
PRUint32 chunkNum);
|
|
|
|
|
2008-02-26 21:51:28 -08:00
|
|
|
// Read a certain number of rows adjacent to the requested rowid that
|
|
|
|
// don't have complete hash data.
|
|
|
|
nsresult ReadNoiseEntries(PRInt64 rowID,
|
|
|
|
PRUint32 numRequested,
|
2011-09-28 23:19:26 -07:00
|
|
|
bool before,
|
2008-02-26 21:51:28 -08:00
|
|
|
nsTArray<nsUrlClassifierEntry> &entries);
|
|
|
|
|
2011-12-02 01:47:43 -08:00
|
|
|
// Ask the db for a random number. This is temporary, and should be
|
|
|
|
// replaced with nsIRandomGenerator when 419739 is fixed.
|
|
|
|
nsresult RandomNumber(PRInt64 *randomNum);
|
2011-09-08 13:15:18 -07:00
|
|
|
// Return an array with all Prefixes known
|
2011-09-08 13:17:25 -07:00
|
|
|
nsresult ReadPrefixes(nsTArray<PRUint32>& array, PRUint32 aKey);
|
2011-09-08 13:15:18 -07:00
|
|
|
|
2011-12-02 01:47:43 -08:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
protected:
|
|
|
|
nsresult ReadEntries(mozIStorageStatement *statement,
|
|
|
|
nsTArray<nsUrlClassifierEntry>& entries);
|
2008-01-12 13:32:01 -08:00
|
|
|
nsUrlClassifierDBServiceWorker *mWorker;
|
|
|
|
nsCOMPtr<mozIStorageConnection> mConnection;
|
|
|
|
|
|
|
|
nsCOMPtr<mozIStorageStatement> mLookupWithIDStatement;
|
|
|
|
|
2008-01-29 18:26:44 -08:00
|
|
|
nsCOMPtr<mozIStorageStatement> mInsertStatement;
|
2008-01-12 13:32:01 -08:00
|
|
|
nsCOMPtr<mozIStorageStatement> mUpdateStatement;
|
|
|
|
nsCOMPtr<mozIStorageStatement> mDeleteStatement;
|
2008-01-29 12:57:18 -08:00
|
|
|
nsCOMPtr<mozIStorageStatement> mExpireStatement;
|
2008-02-26 21:51:28 -08:00
|
|
|
|
|
|
|
nsCOMPtr<mozIStorageStatement> mPartialEntriesStatement;
|
|
|
|
nsCOMPtr<mozIStorageStatement> mPartialEntriesAfterStatement;
|
|
|
|
nsCOMPtr<mozIStorageStatement> mLastPartialEntriesStatement;
|
|
|
|
nsCOMPtr<mozIStorageStatement> mPartialEntriesBeforeStatement;
|
2011-09-08 13:15:18 -07:00
|
|
|
|
2011-12-02 01:47:43 -08:00
|
|
|
nsCOMPtr<mozIStorageStatement> mRandomStatement;
|
2011-09-08 13:15:18 -07:00
|
|
|
nsCOMPtr<mozIStorageStatement> mAllPrefixStatement;
|
2008-01-12 13:32:01 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierStore::Init(nsUrlClassifierDBServiceWorker *worker,
|
|
|
|
mozIStorageConnection *connection,
|
2008-01-29 12:57:18 -08:00
|
|
|
const nsACString& entriesName)
|
2008-01-12 13:32:01 -08:00
|
|
|
{
|
|
|
|
mWorker = worker;
|
|
|
|
mConnection = connection;
|
|
|
|
|
|
|
|
nsresult rv = mConnection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("SELECT * FROM ") + entriesName +
|
|
|
|
NS_LITERAL_CSTRING(" WHERE id=?1"),
|
|
|
|
getter_AddRefs(mLookupWithIDStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = mConnection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("DELETE FROM ") + entriesName +
|
|
|
|
NS_LITERAL_CSTRING(" WHERE id=?1"),
|
|
|
|
getter_AddRefs(mDeleteStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = mConnection->CreateStatement
|
2008-01-29 12:57:18 -08:00
|
|
|
(NS_LITERAL_CSTRING("DELETE FROM ") + entriesName +
|
2008-01-12 13:32:01 -08:00
|
|
|
NS_LITERAL_CSTRING(" WHERE table_id=?1 AND chunk_id=?2"),
|
2008-01-29 12:57:18 -08:00
|
|
|
getter_AddRefs(mExpireStatement));
|
2008-02-26 21:51:28 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = mConnection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("SELECT * FROM ") + entriesName +
|
|
|
|
NS_LITERAL_CSTRING(" WHERE complete_data ISNULL"
|
|
|
|
" LIMIT ?1"),
|
|
|
|
getter_AddRefs(mPartialEntriesStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = mConnection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("SELECT * FROM ") + entriesName +
|
|
|
|
NS_LITERAL_CSTRING(" WHERE id > ?1 AND complete_data ISNULL"
|
|
|
|
" LIMIT ?2"),
|
|
|
|
getter_AddRefs(mPartialEntriesAfterStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = mConnection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("SELECT * FROM ") + entriesName +
|
|
|
|
NS_LITERAL_CSTRING(" WHERE complete_data ISNULL"
|
|
|
|
" ORDER BY id DESC LIMIT ?1"),
|
|
|
|
getter_AddRefs(mLastPartialEntriesStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = mConnection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("SELECT * FROM ") + entriesName +
|
|
|
|
NS_LITERAL_CSTRING(" WHERE id < ?1 AND complete_data ISNULL"
|
|
|
|
" ORDER BY id DESC LIMIT ?2"),
|
|
|
|
getter_AddRefs(mPartialEntriesBeforeStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-09-08 13:15:18 -07:00
|
|
|
rv = mConnection->CreateStatement
|
2011-12-02 01:47:43 -08:00
|
|
|
(NS_LITERAL_CSTRING("SELECT abs(random())"),
|
|
|
|
getter_AddRefs(mRandomStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = mConnection->CreateStatement(NS_LITERAL_CSTRING("SELECT domain, partial_data, complete_data FROM ")
|
2011-09-08 13:15:18 -07:00
|
|
|
+ entriesName,
|
|
|
|
getter_AddRefs(mAllPrefixStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2008-01-12 13:32:01 -08:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nsUrlClassifierStore::Close()
|
|
|
|
{
|
|
|
|
mLookupWithIDStatement = nsnull;
|
|
|
|
|
2008-01-29 18:26:44 -08:00
|
|
|
mInsertStatement = nsnull;
|
2008-01-12 13:32:01 -08:00
|
|
|
mUpdateStatement = nsnull;
|
|
|
|
mDeleteStatement = nsnull;
|
2008-01-29 12:57:18 -08:00
|
|
|
mExpireStatement = nsnull;
|
2008-01-28 15:04:43 -08:00
|
|
|
|
2008-02-26 21:51:28 -08:00
|
|
|
mPartialEntriesStatement = nsnull;
|
|
|
|
mPartialEntriesAfterStatement = nsnull;
|
|
|
|
mPartialEntriesBeforeStatement = nsnull;
|
|
|
|
mLastPartialEntriesStatement = nsnull;
|
2011-12-02 01:47:43 -08:00
|
|
|
mRandomStatement = nsnull;
|
2008-02-26 22:06:22 -08:00
|
|
|
|
2011-09-08 13:15:18 -07:00
|
|
|
mAllPrefixStatement = nsnull;
|
|
|
|
|
2008-01-28 17:58:15 -08:00
|
|
|
mConnection = nsnull;
|
2008-01-28 15:04:43 -08:00
|
|
|
}
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool
|
2008-01-29 12:57:18 -08:00
|
|
|
nsUrlClassifierStore::ReadStatement(mozIStorageStatement* statement,
|
|
|
|
nsUrlClassifierEntry& entry)
|
|
|
|
{
|
2008-02-26 21:51:28 -08:00
|
|
|
entry.mId = statement->AsInt64(0);
|
2008-01-29 12:57:18 -08:00
|
|
|
|
|
|
|
PRUint32 size;
|
|
|
|
const PRUint8* blob = statement->AsSharedBlob(1, &size);
|
|
|
|
if (!blob || (size != DOMAIN_LENGTH))
|
2011-10-17 07:59:28 -07:00
|
|
|
return false;
|
2008-01-29 12:57:18 -08:00
|
|
|
memcpy(entry.mKey.buf, blob, DOMAIN_LENGTH);
|
|
|
|
|
|
|
|
blob = statement->AsSharedBlob(2, &size);
|
|
|
|
if (!blob || size == 0) {
|
2011-10-17 07:59:28 -07:00
|
|
|
entry.mHavePartial = false;
|
2008-01-29 12:57:18 -08:00
|
|
|
} else {
|
|
|
|
if (size != PARTIAL_LENGTH)
|
2011-10-17 07:59:28 -07:00
|
|
|
return false;
|
|
|
|
entry.mHavePartial = true;
|
2008-01-29 12:57:18 -08:00
|
|
|
memcpy(entry.mPartialHash.buf, blob, PARTIAL_LENGTH);
|
|
|
|
}
|
|
|
|
|
|
|
|
blob = statement->AsSharedBlob(3, &size);
|
|
|
|
if (!blob || size == 0) {
|
2011-10-17 07:59:28 -07:00
|
|
|
entry.mHaveComplete = false;
|
2008-01-29 12:57:18 -08:00
|
|
|
} else {
|
|
|
|
if (size != COMPLETE_LENGTH)
|
2011-10-17 07:59:28 -07:00
|
|
|
return false;
|
|
|
|
entry.mHaveComplete = true;
|
2008-01-29 12:57:18 -08:00
|
|
|
memcpy(entry.mCompleteHash.buf, blob, COMPLETE_LENGTH);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we only have a partial entry, and that partial entry matches the
|
|
|
|
// domain, we don't save the extra copy to the database.
|
|
|
|
if (!(entry.mHavePartial || entry.mHaveComplete)) {
|
|
|
|
entry.SetHash(entry.mKey);
|
|
|
|
}
|
|
|
|
|
|
|
|
entry.mChunkId = statement->AsInt32(4);
|
|
|
|
entry.mTableId = statement->AsInt32(5);
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
return true;
|
2008-01-29 12:57:18 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierStore::BindStatement(const nsUrlClassifierEntry &entry,
|
|
|
|
mozIStorageStatement* statement)
|
|
|
|
{
|
|
|
|
nsresult rv;
|
|
|
|
|
2008-02-26 21:51:28 -08:00
|
|
|
if (entry.mId == -1)
|
2011-03-31 10:19:31 -07:00
|
|
|
rv = statement->BindNullByIndex(0);
|
2008-01-29 12:57:18 -08:00
|
|
|
else
|
2011-03-31 10:19:31 -07:00
|
|
|
rv = statement->BindInt64ByIndex(0, entry.mId);
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-03-31 10:19:31 -07:00
|
|
|
rv = statement->BindBlobByIndex(1, entry.mKey.buf, DOMAIN_LENGTH);
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
if (entry.mHavePartial) {
|
|
|
|
// If we only have a partial entry and that entry matches the domain,
|
|
|
|
// we'll save some space by only storing the domain hash.
|
|
|
|
if (!entry.mHaveComplete && entry.mKey == entry.mPartialHash) {
|
2011-03-31 10:19:31 -07:00
|
|
|
rv = statement->BindNullByIndex(2);
|
2008-01-29 12:57:18 -08:00
|
|
|
} else {
|
2011-03-31 10:19:31 -07:00
|
|
|
rv = statement->BindBlobByIndex(2, entry.mPartialHash.buf,
|
2008-01-29 12:57:18 -08:00
|
|
|
PARTIAL_LENGTH);
|
|
|
|
}
|
|
|
|
} else {
|
2011-03-31 10:19:31 -07:00
|
|
|
rv = statement->BindNullByIndex(2);
|
2008-01-29 12:57:18 -08:00
|
|
|
}
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
if (entry.mHaveComplete) {
|
2011-03-31 10:19:31 -07:00
|
|
|
rv = statement->BindBlobByIndex(3, entry.mCompleteHash.buf, COMPLETE_LENGTH);
|
2008-01-29 12:57:18 -08:00
|
|
|
} else {
|
2011-03-31 10:19:31 -07:00
|
|
|
rv = statement->BindNullByIndex(3);
|
2008-01-29 12:57:18 -08:00
|
|
|
}
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-03-31 10:19:31 -07:00
|
|
|
rv = statement->BindInt32ByIndex(4, entry.mChunkId);
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-03-31 10:19:31 -07:00
|
|
|
rv = statement->BindInt32ByIndex(5, entry.mTableId);
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
return true;
|
2008-01-29 12:57:18 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierStore::ReadEntries(mozIStorageStatement *statement,
|
|
|
|
nsTArray<nsUrlClassifierEntry>& entries)
|
|
|
|
{
|
2011-09-28 23:19:26 -07:00
|
|
|
bool exists;
|
2008-01-29 12:57:18 -08:00
|
|
|
nsresult rv = statement->ExecuteStep(&exists);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
while (exists) {
|
|
|
|
nsUrlClassifierEntry *entry = entries.AppendElement();
|
|
|
|
if (!entry) {
|
|
|
|
return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ReadStatement(statement, *entry))
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
|
|
|
|
statement->ExecuteStep(&exists);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
2008-02-26 21:51:28 -08:00
|
|
|
nsUrlClassifierStore::ReadEntry(PRInt64 id,
|
2008-01-29 12:57:18 -08:00
|
|
|
nsUrlClassifierEntry& entry,
|
2011-09-28 23:19:26 -07:00
|
|
|
bool *exists)
|
2008-01-29 12:57:18 -08:00
|
|
|
{
|
|
|
|
entry.Clear();
|
|
|
|
|
|
|
|
mozStorageStatementScoper scoper(mLookupWithIDStatement);
|
|
|
|
|
2011-03-31 10:19:31 -07:00
|
|
|
nsresult rv = mLookupWithIDStatement->BindInt64ByIndex(0, id);
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = mLookupWithIDStatement->ExecuteStep(exists);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
if (*exists) {
|
|
|
|
if (ReadStatement(mLookupWithIDStatement, entry))
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
2008-01-28 15:04:43 -08:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2008-02-26 21:51:28 -08:00
|
|
|
nsresult
|
|
|
|
nsUrlClassifierStore::ReadNoiseEntries(PRInt64 rowID,
|
|
|
|
PRUint32 numRequested,
|
2011-09-28 23:19:26 -07:00
|
|
|
bool before,
|
2008-02-26 21:51:28 -08:00
|
|
|
nsTArray<nsUrlClassifierEntry> &entries)
|
|
|
|
{
|
|
|
|
if (numRequested == 0) {
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
mozIStorageStatement *statement =
|
|
|
|
before ? mPartialEntriesBeforeStatement : mPartialEntriesAfterStatement;
|
|
|
|
mozStorageStatementScoper scoper(statement);
|
|
|
|
|
2011-03-31 10:19:31 -07:00
|
|
|
nsresult rv = statement->BindInt64ByIndex(0, rowID);
|
2008-02-26 21:51:28 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-03-31 10:19:31 -07:00
|
|
|
statement->BindInt32ByIndex(1, numRequested);
|
2008-02-26 21:51:28 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
PRUint32 length = entries.Length();
|
|
|
|
rv = ReadEntries(statement, entries);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
PRUint32 numRead = entries.Length() - length;
|
|
|
|
|
|
|
|
if (numRead >= numRequested)
|
|
|
|
return NS_OK;
|
|
|
|
|
|
|
|
// If we didn't get enough entries, we need the search to wrap around from
|
|
|
|
// beginning to end (or vice-versa)
|
|
|
|
|
|
|
|
mozIStorageStatement *wraparoundStatement =
|
|
|
|
before ? mPartialEntriesStatement : mLastPartialEntriesStatement;
|
|
|
|
mozStorageStatementScoper wraparoundScoper(wraparoundStatement);
|
|
|
|
|
2011-03-31 10:19:31 -07:00
|
|
|
rv = wraparoundStatement->BindInt32ByIndex(0, numRequested - numRead);
|
2008-02-26 21:51:28 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
return ReadEntries(wraparoundStatement, entries);
|
|
|
|
}
|
|
|
|
|
2011-12-02 01:47:43 -08:00
|
|
|
nsresult
|
|
|
|
nsUrlClassifierStore::RandomNumber(PRInt64 *randomNum)
|
|
|
|
{
|
|
|
|
mozStorageStatementScoper randScoper(mRandomStatement);
|
|
|
|
PRBool exists;
|
|
|
|
nsresult rv = mRandomStatement->ExecuteStep(&exists);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
if (!exists)
|
|
|
|
return NS_ERROR_NOT_AVAILABLE;
|
|
|
|
|
|
|
|
*randomNum = mRandomStatement->AsInt64(0);
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// -------------------------------------------------------------------------
|
|
|
|
// nsUrlClassifierAddStore class implementation
|
|
|
|
|
|
|
|
// This class accesses the moz_classifier table.
|
|
|
|
class nsUrlClassifierAddStore: public nsUrlClassifierStore
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
nsUrlClassifierAddStore() {};
|
|
|
|
virtual ~nsUrlClassifierAddStore() {};
|
|
|
|
|
|
|
|
nsresult Init(nsUrlClassifierDBServiceWorker *worker,
|
|
|
|
mozIStorageConnection *connection,
|
|
|
|
const nsACString& entriesTableName);
|
2008-10-19 20:48:25 -07:00
|
|
|
|
|
|
|
void Close();
|
|
|
|
|
|
|
|
// Read the entries for a given key/table/chunk from the database
|
|
|
|
nsresult ReadAddEntries(const nsUrlClassifierDomainHash& key,
|
|
|
|
PRUint32 tableId,
|
|
|
|
PRUint32 chunkId,
|
|
|
|
nsTArray<nsUrlClassifierEntry>& entry);
|
|
|
|
|
|
|
|
// Read the entries for a given host key from the database.
|
|
|
|
nsresult ReadAddEntries(const nsUrlClassifierDomainHash& key,
|
|
|
|
nsTArray<nsUrlClassifierEntry>& entry);
|
|
|
|
|
|
|
|
protected:
|
|
|
|
nsCOMPtr<mozIStorageStatement> mLookupStatement;
|
|
|
|
nsCOMPtr<mozIStorageStatement> mLookupWithChunkStatement;
|
2008-01-29 12:57:18 -08:00
|
|
|
};
|
2008-01-28 15:04:43 -08:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
nsresult
|
|
|
|
nsUrlClassifierAddStore::Init(nsUrlClassifierDBServiceWorker *worker,
|
|
|
|
mozIStorageConnection *connection,
|
|
|
|
const nsACString &entriesTableName)
|
|
|
|
{
|
|
|
|
nsresult rv = nsUrlClassifierStore::Init(worker, connection,
|
|
|
|
entriesTableName);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = mConnection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("INSERT OR REPLACE INTO ") + entriesTableName +
|
|
|
|
NS_LITERAL_CSTRING(" VALUES (?1, ?2, ?3, ?4, ?5, ?6)"),
|
2008-01-29 18:26:44 -08:00
|
|
|
getter_AddRefs(mInsertStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = mConnection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("UPDATE ") + entriesTableName +
|
|
|
|
NS_LITERAL_CSTRING(" SET domain=?2, partial_data=?3, "
|
|
|
|
" complete_data=?4, chunk_id=?5, table_id=?6"
|
|
|
|
" WHERE id=?1"),
|
2008-01-29 12:57:18 -08:00
|
|
|
getter_AddRefs(mUpdateStatement));
|
2008-01-12 13:32:01 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2008-10-19 20:48:25 -07:00
|
|
|
rv = mConnection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("SELECT * FROM ") + entriesTableName +
|
|
|
|
NS_LITERAL_CSTRING(" WHERE domain=?1"),
|
|
|
|
getter_AddRefs(mLookupStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = mConnection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("SELECT * FROM ") + entriesTableName +
|
|
|
|
NS_LITERAL_CSTRING(" WHERE domain=?1 AND table_id=?2 AND chunk_id=?3"),
|
|
|
|
getter_AddRefs(mLookupWithChunkStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2008-01-12 13:32:01 -08:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2008-10-19 20:48:25 -07:00
|
|
|
void
|
|
|
|
nsUrlClassifierAddStore::Close()
|
|
|
|
{
|
|
|
|
nsUrlClassifierStore::Close();
|
|
|
|
|
|
|
|
mLookupStatement = nsnull;
|
|
|
|
mLookupWithChunkStatement = nsnull;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierAddStore::ReadAddEntries(const nsUrlClassifierDomainHash& hash,
|
|
|
|
PRUint32 tableId,
|
|
|
|
PRUint32 chunkId,
|
|
|
|
nsTArray<nsUrlClassifierEntry>& entries)
|
|
|
|
{
|
|
|
|
mozStorageStatementScoper scoper(mLookupWithChunkStatement);
|
|
|
|
|
2011-03-31 10:19:31 -07:00
|
|
|
nsresult rv = mLookupWithChunkStatement->BindBlobByIndex
|
2008-10-19 20:48:25 -07:00
|
|
|
(0, hash.buf, DOMAIN_LENGTH);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-03-31 10:19:31 -07:00
|
|
|
rv = mLookupWithChunkStatement->BindInt32ByIndex(1, tableId);
|
2008-10-19 20:48:25 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2011-03-31 10:19:31 -07:00
|
|
|
rv = mLookupWithChunkStatement->BindInt32ByIndex(2, chunkId);
|
2008-10-19 20:48:25 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
return ReadEntries(mLookupWithChunkStatement, entries);
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierAddStore::ReadAddEntries(const nsUrlClassifierDomainHash& hash,
|
|
|
|
nsTArray<nsUrlClassifierEntry>& entries)
|
|
|
|
{
|
|
|
|
mozStorageStatementScoper scoper(mLookupStatement);
|
|
|
|
|
2011-03-31 10:19:31 -07:00
|
|
|
nsresult rv = mLookupStatement->BindBlobByIndex
|
2008-10-19 20:48:25 -07:00
|
|
|
(0, hash.buf, DOMAIN_LENGTH);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
return ReadEntries(mLookupStatement, entries);
|
|
|
|
}
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// -------------------------------------------------------------------------
|
|
|
|
// nsUrlClassifierSubStore class implementation
|
|
|
|
|
|
|
|
// This class accesses the moz_subs table.
|
|
|
|
class nsUrlClassifierSubStore : public nsUrlClassifierStore
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
nsUrlClassifierSubStore() {};
|
|
|
|
virtual ~nsUrlClassifierSubStore() {};
|
|
|
|
|
|
|
|
nsresult Init(nsUrlClassifierDBServiceWorker *worker,
|
|
|
|
mozIStorageConnection *connection,
|
|
|
|
const nsACString& entriesTableName);
|
|
|
|
|
|
|
|
void Close();
|
|
|
|
|
|
|
|
// Read an entry from a database statement
|
2011-09-28 23:19:26 -07:00
|
|
|
virtual bool ReadStatement(mozIStorageStatement* statement,
|
2008-01-29 12:57:18 -08:00
|
|
|
nsUrlClassifierEntry& entry);
|
|
|
|
|
|
|
|
// Prepare a statement to write this entry to the database
|
|
|
|
virtual nsresult BindStatement(const nsUrlClassifierEntry& entry,
|
|
|
|
mozIStorageStatement* statement);
|
|
|
|
|
2008-10-19 20:48:25 -07:00
|
|
|
// Read sub entries for a given add chunk
|
|
|
|
nsresult ReadSubEntries(PRUint32 tableId, PRUint32 chunkId,
|
2008-01-29 12:57:18 -08:00
|
|
|
nsTArray<nsUrlClassifierEntry> &subEntry);
|
|
|
|
|
2008-10-19 20:48:25 -07:00
|
|
|
// Expire sub entries for a given add chunk
|
|
|
|
nsresult ExpireAddChunk(PRUint32 tableId, PRUint32 chunkId);
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
protected:
|
|
|
|
nsCOMPtr<mozIStorageStatement> mLookupWithAddChunkStatement;
|
2008-10-19 20:48:25 -07:00
|
|
|
nsCOMPtr<mozIStorageStatement> mExpireAddChunkStatement;
|
2008-01-29 12:57:18 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierSubStore::Init(nsUrlClassifierDBServiceWorker *worker,
|
|
|
|
mozIStorageConnection *connection,
|
|
|
|
const nsACString &entriesTableName)
|
|
|
|
{
|
|
|
|
nsresult rv = nsUrlClassifierStore::Init(worker, connection,
|
|
|
|
entriesTableName);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = mConnection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("INSERT OR REPLACE INTO ") + entriesTableName +
|
|
|
|
NS_LITERAL_CSTRING(" VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)"),
|
2008-01-29 18:26:44 -08:00
|
|
|
getter_AddRefs(mInsertStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = mConnection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("UPDATE ") + entriesTableName +
|
|
|
|
NS_LITERAL_CSTRING(" SET domain=?2, partial_data=?3, complete_data=?4,"
|
|
|
|
" chunk_id=?5, table_id=?6, add_chunk_id=?7"
|
|
|
|
" WHERE id=?1"),
|
2008-01-29 12:57:18 -08:00
|
|
|
getter_AddRefs(mUpdateStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = mConnection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("SELECT * FROM ") + entriesTableName +
|
2008-10-19 20:48:25 -07:00
|
|
|
NS_LITERAL_CSTRING(" WHERE table_id=?1 AND add_chunk_id=?2"),
|
2008-01-29 12:57:18 -08:00
|
|
|
getter_AddRefs(mLookupWithAddChunkStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2008-10-19 20:48:25 -07:00
|
|
|
rv = mConnection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("DELETE FROM ") + entriesTableName +
|
|
|
|
NS_LITERAL_CSTRING(" WHERE table_id=?1 AND add_chunk_id=?2"),
|
|
|
|
getter_AddRefs(mExpireAddChunkStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool
|
2008-01-29 12:57:18 -08:00
|
|
|
nsUrlClassifierSubStore::ReadStatement(mozIStorageStatement* statement,
|
|
|
|
nsUrlClassifierEntry& entry)
|
|
|
|
{
|
|
|
|
if (!nsUrlClassifierStore::ReadStatement(statement, entry))
|
2011-10-17 07:59:28 -07:00
|
|
|
return false;
|
2008-01-29 12:57:18 -08:00
|
|
|
|
|
|
|
entry.mAddChunkId = statement->AsInt32(6);
|
2011-10-17 07:59:28 -07:00
|
|
|
return true;
|
2008-01-29 12:57:18 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierSubStore::BindStatement(const nsUrlClassifierEntry& entry,
|
|
|
|
mozIStorageStatement* statement)
|
|
|
|
{
|
|
|
|
nsresult rv = nsUrlClassifierStore::BindStatement(entry, statement);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-03-31 10:19:31 -07:00
|
|
|
return statement->BindInt32ByIndex(6, entry.mAddChunkId);
|
2008-01-29 12:57:18 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
2008-10-19 20:48:25 -07:00
|
|
|
nsUrlClassifierSubStore::ReadSubEntries(PRUint32 tableId, PRUint32 addChunkId,
|
2008-01-29 12:57:18 -08:00
|
|
|
nsTArray<nsUrlClassifierEntry>& entries)
|
|
|
|
{
|
|
|
|
mozStorageStatementScoper scoper(mLookupWithAddChunkStatement);
|
|
|
|
|
2011-03-31 10:19:31 -07:00
|
|
|
nsresult rv = mLookupWithAddChunkStatement->BindInt32ByIndex(0, tableId);
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2011-03-31 10:19:31 -07:00
|
|
|
rv = mLookupWithAddChunkStatement->BindInt32ByIndex(1, addChunkId);
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
return ReadEntries(mLookupWithAddChunkStatement, entries);
|
|
|
|
}
|
|
|
|
|
2008-10-19 20:48:25 -07:00
|
|
|
nsresult
|
|
|
|
nsUrlClassifierSubStore::ExpireAddChunk(PRUint32 tableId, PRUint32 addChunkId)
|
|
|
|
{
|
|
|
|
mozStorageStatementScoper scoper(mExpireAddChunkStatement);
|
|
|
|
|
2011-03-31 10:19:31 -07:00
|
|
|
nsresult rv = mExpireAddChunkStatement->BindInt32ByIndex(0, tableId);
|
2008-10-19 20:48:25 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2011-03-31 10:19:31 -07:00
|
|
|
rv = mExpireAddChunkStatement->BindInt32ByIndex(1, addChunkId);
|
2008-10-19 20:48:25 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
return mExpireAddChunkStatement->Execute();
|
|
|
|
}
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
void
|
|
|
|
nsUrlClassifierSubStore::Close()
|
|
|
|
{
|
|
|
|
nsUrlClassifierStore::Close();
|
|
|
|
mLookupWithAddChunkStatement = nsnull;
|
2008-10-19 20:48:25 -07:00
|
|
|
mExpireAddChunkStatement = nsnull;
|
2008-01-29 12:57:18 -08:00
|
|
|
}
|
|
|
|
|
2011-09-08 13:15:27 -07:00
|
|
|
// Similar to GetKey(), but if the domain contains three or more components,
|
|
|
|
// two keys will be returned:
|
|
|
|
// hostname.com/foo/bar -> [hostname.com]
|
|
|
|
// mail.hostname.com/foo/bar -> [hostname.com, mail.hostname.com]
|
|
|
|
// www.mail.hostname.com/foo/bar -> [hostname.com, mail.hostname.com]
|
|
|
|
static nsresult GetHostKeys(const nsACString &spec,
|
|
|
|
nsTArray<nsCString> &hostKeys);
|
|
|
|
|
2011-09-08 13:17:14 -07:00
|
|
|
// take a lookup string (www.hostname.com/path/to/resource.html) and
|
|
|
|
// expand it into the set of fragments that should be searched for in an
|
|
|
|
// entry
|
|
|
|
static nsresult GetLookupFragments(const nsCSubstring& spec,
|
|
|
|
nsTArray<nsCString>& fragments);
|
|
|
|
|
2011-09-08 13:15:27 -07:00
|
|
|
// Check for a canonicalized IP address.
|
2011-09-28 23:19:26 -07:00
|
|
|
static bool IsCanonicalizedIP(const nsACString& host);
|
2011-09-08 13:15:27 -07:00
|
|
|
|
2011-09-08 13:17:25 -07:00
|
|
|
// Get the database key for a given URI. This is the top three
|
|
|
|
// domain components if they exist, otherwise the top two.
|
|
|
|
// hostname.com/foo/bar -> hostname.com
|
|
|
|
// mail.hostname.com/foo/bar -> mail.hostname.com
|
|
|
|
// www.mail.hostname.com/foo/bar -> mail.hostname.com
|
|
|
|
static nsresult GetKey(const nsACString& spec, nsUrlClassifierDomainHash& hash,
|
|
|
|
nsICryptoHash * aCryptoHash);
|
|
|
|
|
|
|
|
// We have both a prefix and a domain. Drop the domain, but
|
|
|
|
// hash the domain, the prefix and a random value together,
|
|
|
|
// ensuring any collisions happens at a different points for
|
|
|
|
// different users.
|
|
|
|
static nsresult KeyedHash(PRUint32 aPref, PRUint32 aDomain,
|
|
|
|
PRUint32 aKey, PRUint32 *aOut);
|
|
|
|
|
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
// -------------------------------------------------------------------------
|
|
|
|
// Actual worker implemenatation
|
|
|
|
class nsUrlClassifierDBServiceWorker : public nsIUrlClassifierDBServiceWorker
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
nsUrlClassifierDBServiceWorker();
|
|
|
|
|
|
|
|
NS_DECL_ISUPPORTS
|
|
|
|
NS_DECL_NSIURLCLASSIFIERDBSERVICE
|
|
|
|
NS_DECL_NSIURLCLASSIFIERDBSERVICEWORKER
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
// Initialize, called in the main thread
|
2011-09-08 13:15:18 -07:00
|
|
|
nsresult Init(PRInt32 gethashNoise,
|
2011-09-08 13:15:27 -07:00
|
|
|
nsRefPtr<nsUrlClassifierPrefixSet> & prefSet);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
|
|
|
// Queue a lookup for the worker to perform, called in the main thread.
|
|
|
|
nsresult QueueLookup(const nsACString& lookupKey,
|
2008-01-29 12:57:18 -08:00
|
|
|
nsIUrlClassifierLookupCallback* callback);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-12 13:32:01 -08:00
|
|
|
// Handle any queued-up lookups. We call this function during long-running
|
|
|
|
// update operations to prevent lookups from blocking for too long.
|
|
|
|
nsresult HandlePendingLookups();
|
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
private:
|
|
|
|
// No subclassing
|
|
|
|
~nsUrlClassifierDBServiceWorker();
|
|
|
|
|
|
|
|
// Disallow copy constructor
|
|
|
|
nsUrlClassifierDBServiceWorker(nsUrlClassifierDBServiceWorker&);
|
|
|
|
|
|
|
|
// Try to open the db, DATABASE_FILENAME.
|
|
|
|
nsresult OpenDb();
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
// Create table in the db if they don't exist.
|
|
|
|
nsresult MaybeCreateTables(mozIStorageConnection* connection);
|
|
|
|
|
|
|
|
nsresult GetTableName(PRUint32 tableId, nsACString& table);
|
|
|
|
nsresult GetTableId(const nsACString& table, PRUint32* tableId);
|
|
|
|
|
|
|
|
// Decompress a zlib'ed chunk (used for -exp tables)
|
|
|
|
nsresult InflateChunk(nsACString& chunk);
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// Expand shavar chunk into its individual entries
|
|
|
|
nsresult GetShaEntries(PRUint32 tableId,
|
|
|
|
PRUint32 chunkType,
|
|
|
|
PRUint32 chunkNum,
|
|
|
|
PRUint32 domainSize,
|
|
|
|
PRUint32 hashSize,
|
|
|
|
nsACString& chunk,
|
|
|
|
nsTArray<nsUrlClassifierEntry>& entries);
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
// Expand a chunk into its individual entries
|
|
|
|
nsresult GetChunkEntries(const nsACString& table,
|
|
|
|
PRUint32 tableId,
|
2008-01-29 12:57:18 -08:00
|
|
|
PRUint32 chunkType,
|
2007-07-25 23:38:43 -07:00
|
|
|
PRUint32 chunkNum,
|
2008-01-29 12:57:18 -08:00
|
|
|
PRUint32 hashSize,
|
2007-07-25 23:38:43 -07:00
|
|
|
nsACString& chunk,
|
|
|
|
nsTArray<nsUrlClassifierEntry>& entries);
|
|
|
|
|
2008-02-15 14:47:16 -08:00
|
|
|
// Parse one stringified range of chunks of the form "n" or "n-m" from a
|
|
|
|
// comma-separated list of chunks. Upon return, 'begin' will point to the
|
|
|
|
// next range of chunks in the list of chunks.
|
2011-09-28 23:19:26 -07:00
|
|
|
bool ParseChunkRange(nsACString::const_iterator &begin,
|
2008-02-15 14:47:16 -08:00
|
|
|
const nsACString::const_iterator &end,
|
2007-12-04 16:16:41 -08:00
|
|
|
PRUint32 *first, PRUint32 *last);
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
// Expand a stringified chunk list into an array of ints.
|
|
|
|
nsresult ParseChunkList(const nsACString& chunkStr,
|
|
|
|
nsTArray<PRUint32>& chunks);
|
|
|
|
|
|
|
|
// Join an array of ints into a stringified chunk list.
|
|
|
|
nsresult JoinChunkList(nsTArray<PRUint32>& chunks, nsCString& chunkStr);
|
|
|
|
|
|
|
|
// List the add/subtract chunks that have been applied to a table
|
|
|
|
nsresult GetChunkLists(PRUint32 tableId,
|
|
|
|
nsACString& addChunks,
|
|
|
|
nsACString& subChunks);
|
|
|
|
|
|
|
|
// Set the list of add/subtract chunks that have been applied to a table
|
|
|
|
nsresult SetChunkLists(PRUint32 tableId,
|
|
|
|
const nsACString& addChunks,
|
|
|
|
const nsACString& subChunks);
|
|
|
|
|
2007-12-03 20:49:22 -08:00
|
|
|
// Cache the list of add/subtract chunks applied to the table, optionally
|
|
|
|
// parsing the add or sub lists. These lists are cached while updating
|
|
|
|
// tables to avoid excessive database reads/writes and parsing.
|
|
|
|
nsresult CacheChunkLists(PRUint32 tableId,
|
2011-09-28 23:19:26 -07:00
|
|
|
bool parseAdds,
|
|
|
|
bool parseSubs);
|
2008-01-29 12:57:18 -08:00
|
|
|
|
|
|
|
// Clear the cached list of add/subtract chunks.
|
|
|
|
void ClearCachedChunkLists();
|
|
|
|
|
2007-12-03 20:49:22 -08:00
|
|
|
// Flush the cached add/subtract lists to the database.
|
|
|
|
nsresult FlushChunkLists();
|
|
|
|
|
2008-05-06 18:51:11 -07:00
|
|
|
// Inserts a chunk id into the list, sorted. Returns TRUE if the
|
|
|
|
// number was successfully added, FALSE if the chunk already exists.
|
2011-09-28 23:19:26 -07:00
|
|
|
bool InsertChunkId(nsTArray<PRUint32>& chunks, PRUint32 chunkNum);
|
2008-05-06 18:51:11 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
// Add a list of entries to the database, merging with
|
|
|
|
// existing entries as necessary
|
|
|
|
nsresult AddChunk(PRUint32 tableId, PRUint32 chunkNum,
|
|
|
|
nsTArray<nsUrlClassifierEntry>& entries);
|
|
|
|
|
|
|
|
// Expire an add chunk
|
|
|
|
nsresult ExpireAdd(PRUint32 tableId, PRUint32 chunkNum);
|
|
|
|
|
|
|
|
// Subtract a list of entries from the database
|
|
|
|
nsresult SubChunk(PRUint32 tableId, PRUint32 chunkNum,
|
|
|
|
nsTArray<nsUrlClassifierEntry>& entries);
|
|
|
|
|
|
|
|
// Expire a subtract chunk
|
|
|
|
nsresult ExpireSub(PRUint32 tableId, PRUint32 chunkNum);
|
|
|
|
|
|
|
|
// Handle line-oriented control information from a stream update
|
2011-09-28 23:19:26 -07:00
|
|
|
nsresult ProcessResponseLines(bool* done);
|
2007-07-25 23:38:43 -07:00
|
|
|
// Handle chunk data from a stream update
|
2011-09-28 23:19:26 -07:00
|
|
|
nsresult ProcessChunk(bool* done);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-05-07 13:33:45 -07:00
|
|
|
// Sets up a transaction and begins counting update time.
|
|
|
|
nsresult SetupUpdate();
|
|
|
|
|
|
|
|
// Applies the current transaction and resets the update/working times.
|
|
|
|
nsresult ApplyUpdate();
|
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
// Reset the in-progress update stream
|
|
|
|
void ResetStream();
|
|
|
|
|
|
|
|
// Reset the in-progress update
|
2007-07-25 23:38:43 -07:00
|
|
|
void ResetUpdate();
|
|
|
|
|
|
|
|
// Look for a given lookup string (www.hostname.com/path/to/resource.html)
|
2009-02-09 22:40:39 -08:00
|
|
|
// Returns a list of entries that match.
|
|
|
|
nsresult Check(const nsCSubstring& spec,
|
|
|
|
nsTArray<nsUrlClassifierLookupResult>& results);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
|
|
|
// Perform a classifier lookup for a given url.
|
2008-01-29 12:57:18 -08:00
|
|
|
nsresult DoLookup(const nsACString& spec, nsIUrlClassifierLookupCallback* c);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-02-26 21:51:28 -08:00
|
|
|
// Add entries to the results.
|
|
|
|
nsresult AddNoise(PRInt64 nearID,
|
|
|
|
PRInt32 count,
|
|
|
|
nsTArray<nsUrlClassifierLookupResult>& results);
|
|
|
|
|
2011-09-08 13:16:59 -07:00
|
|
|
// Construct a Prefix Set with known prefixes
|
|
|
|
nsresult LoadPrefixSet(nsCOMPtr<nsIFile> & aFile);
|
|
|
|
nsresult ConstructPrefixSet();
|
2011-09-08 13:15:18 -07:00
|
|
|
|
2011-09-08 13:15:37 -07:00
|
|
|
// Set the SQLite cache size
|
|
|
|
nsresult SetCacheSize(mozIStorageConnection * aConnection,
|
|
|
|
PRInt32 aCacheSize);
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
nsCOMPtr<nsIFile> mDBFile;
|
2011-09-08 13:16:59 -07:00
|
|
|
nsCOMPtr<nsIFile> mPSFile;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
|
|
|
nsCOMPtr<nsICryptoHash> mCryptoHash;
|
2007-03-22 10:30:00 -07:00
|
|
|
|
|
|
|
// Holds a connection to the Db. We lazily initialize this because it has
|
|
|
|
// to be created in the background thread (currently mozStorageConnection
|
|
|
|
// isn't thread safe).
|
2007-07-25 23:38:43 -07:00
|
|
|
nsCOMPtr<mozIStorageConnection> mConnection;
|
|
|
|
|
2008-01-12 13:32:01 -08:00
|
|
|
// The main collection of entries. This is the store that will be checked
|
|
|
|
// when classifying a URL.
|
2008-01-29 12:57:18 -08:00
|
|
|
nsUrlClassifierAddStore mMainStore;
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2008-01-12 13:32:01 -08:00
|
|
|
// The collection of subs waiting for their accompanying add.
|
2008-01-29 12:57:18 -08:00
|
|
|
nsUrlClassifierSubStore mPendingSubStore;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
|
|
|
nsCOMPtr<mozIStorageStatement> mGetChunkListsStatement;
|
|
|
|
nsCOMPtr<mozIStorageStatement> mSetChunkListsStatement;
|
|
|
|
|
|
|
|
nsCOMPtr<mozIStorageStatement> mGetTablesStatement;
|
|
|
|
nsCOMPtr<mozIStorageStatement> mGetTableIdStatement;
|
|
|
|
nsCOMPtr<mozIStorageStatement> mGetTableNameStatement;
|
|
|
|
nsCOMPtr<mozIStorageStatement> mInsertTableIdStatement;
|
2010-07-01 10:57:13 -07:00
|
|
|
nsCOMPtr<mozIStorageStatement> mGetPageSizeStatement;
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2008-04-15 15:39:44 -07:00
|
|
|
// Stores the last time a given table was updated.
|
|
|
|
nsDataHashtable<nsCStringHashKey, PRInt64> mTableFreshness;
|
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
// We receive data in small chunks that may be broken in the middle of
|
|
|
|
// a line. So we save the last partial line here.
|
|
|
|
nsCString mPendingStreamUpdate;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
|
|
|
PRInt32 mUpdateWait;
|
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool mResetRequested;
|
|
|
|
bool mGrewCache;
|
2008-02-29 16:46:43 -08:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
enum {
|
|
|
|
STATE_LINE,
|
|
|
|
STATE_CHUNK
|
|
|
|
} mState;
|
|
|
|
|
|
|
|
enum {
|
|
|
|
CHUNK_ADD,
|
|
|
|
CHUNK_SUB
|
|
|
|
} mChunkType;
|
|
|
|
|
|
|
|
PRUint32 mChunkNum;
|
2008-01-29 12:57:18 -08:00
|
|
|
PRUint32 mHashSize;
|
2007-07-25 23:38:43 -07:00
|
|
|
PRUint32 mChunkLen;
|
|
|
|
|
2008-04-15 15:39:44 -07:00
|
|
|
// List of tables included in this update.
|
|
|
|
nsTArray<nsCString> mUpdateTables;
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
nsCString mUpdateTable;
|
|
|
|
PRUint32 mUpdateTableId;
|
|
|
|
|
|
|
|
nsresult mUpdateStatus;
|
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
nsCOMPtr<nsIUrlClassifierUpdateObserver> mUpdateObserver;
|
2011-09-28 23:19:26 -07:00
|
|
|
bool mInStream;
|
|
|
|
bool mPrimaryStream;
|
2008-01-12 14:22:03 -08:00
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool mHaveCachedLists;
|
2007-12-03 20:49:22 -08:00
|
|
|
PRUint32 mCachedListsTable;
|
|
|
|
nsCAutoString mCachedSubsStr;
|
|
|
|
nsCAutoString mCachedAddsStr;
|
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool mHaveCachedAddChunks;
|
2007-12-03 20:49:22 -08:00
|
|
|
nsTArray<PRUint32> mCachedAddChunks;
|
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool mHaveCachedSubChunks;
|
2007-12-03 20:49:22 -08:00
|
|
|
nsTArray<PRUint32> mCachedSubChunks;
|
|
|
|
|
2008-02-27 00:51:02 -08:00
|
|
|
// The client key with which the data from the server will be MAC'ed.
|
|
|
|
nsCString mUpdateClientKey;
|
|
|
|
|
|
|
|
// The MAC stated by the server.
|
|
|
|
nsCString mServerMAC;
|
|
|
|
|
2008-05-07 13:33:45 -07:00
|
|
|
// Start time of the current update interval. This will be reset
|
|
|
|
// every time we apply the update.
|
|
|
|
PRIntervalTime mUpdateStartTime;
|
|
|
|
|
2008-02-27 00:51:02 -08:00
|
|
|
nsCOMPtr<nsICryptoHMAC> mHMAC;
|
2008-02-26 21:51:28 -08:00
|
|
|
// The number of noise entries to add to the set of lookup results.
|
|
|
|
PRInt32 mGethashNoise;
|
2007-12-03 20:49:22 -08:00
|
|
|
|
2011-09-08 13:15:18 -07:00
|
|
|
// Set of prefixes known to be in the database
|
2011-09-08 13:15:27 -07:00
|
|
|
nsRefPtr<nsUrlClassifierPrefixSet> mPrefixSet;
|
2008-10-19 20:07:46 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
// Pending lookups are stored in a queue for processing. The queue
|
|
|
|
// is protected by mPendingLookupLock.
|
Rollup of bug 645263 and bug 646259: Switch to mozilla:: sync primitives. r=cjones,dbaron,doublec,ehsan src=bsmedberg
Bug 645263, part 0: Count sync primitive ctor/dtors. r=dbaron
Bug 645263, part 1: Migrate content/media to mozilla:: sync primitives. r=doublec
Bug 645263, part 2: Migrate modules/plugin to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 3: Migrate nsComponentManagerImpl to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 4: Migrate everything else to mozilla:: sync primitives. r=dbaron
Bug 645263, part 5: Remove nsAutoLock.*. sr=bsmedberg
Bug 645263, part 6: Make editor test be nicer to deadlock detector. r=ehsan
Bug 645263, part 7: Disable tracemalloc backtraces for xpcshell tests. r=dbaron
Bug 646259: Fix nsCacheService to use a CondVar for notifying. r=cjones
2011-03-31 21:29:02 -07:00
|
|
|
Mutex mPendingLookupLock;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
|
|
|
class PendingLookup {
|
|
|
|
public:
|
|
|
|
nsCString mKey;
|
2008-01-29 12:57:18 -08:00
|
|
|
nsCOMPtr<nsIUrlClassifierLookupCallback> mCallback;
|
2007-07-25 23:38:43 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
// list of pending lookups
|
|
|
|
nsTArray<PendingLookup> mPendingLookups;
|
2007-03-22 10:30:00 -07:00
|
|
|
};
|
|
|
|
|
2008-04-07 23:18:35 -07:00
|
|
|
NS_IMPL_THREADSAFE_ISUPPORTS2(nsUrlClassifierDBServiceWorker,
|
|
|
|
nsIUrlClassifierDBServiceWorker,
|
|
|
|
nsIUrlClassifierDBService)
|
2007-03-22 10:30:00 -07:00
|
|
|
|
|
|
|
nsUrlClassifierDBServiceWorker::nsUrlClassifierDBServiceWorker()
|
2008-01-29 12:57:18 -08:00
|
|
|
: mUpdateWait(0)
|
2011-10-17 07:59:28 -07:00
|
|
|
, mResetRequested(false)
|
|
|
|
, mGrewCache(false)
|
2008-01-29 12:57:18 -08:00
|
|
|
, mState(STATE_LINE)
|
|
|
|
, mChunkType(CHUNK_ADD)
|
|
|
|
, mChunkNum(0)
|
|
|
|
, mHashSize(0)
|
|
|
|
, mChunkLen(0)
|
|
|
|
, mUpdateTableId(0)
|
|
|
|
, mUpdateStatus(NS_OK)
|
2011-10-17 07:59:28 -07:00
|
|
|
, mInStream(false)
|
|
|
|
, mPrimaryStream(false)
|
|
|
|
, mHaveCachedLists(false)
|
2007-12-03 20:49:22 -08:00
|
|
|
, mCachedListsTable(PR_UINT32_MAX)
|
2011-10-17 07:59:28 -07:00
|
|
|
, mHaveCachedAddChunks(false)
|
|
|
|
, mHaveCachedSubChunks(false)
|
2008-05-07 13:33:45 -07:00
|
|
|
, mUpdateStartTime(0)
|
2008-02-26 21:51:28 -08:00
|
|
|
, mGethashNoise(0)
|
2011-09-08 13:15:18 -07:00
|
|
|
, mPrefixSet(0)
|
Rollup of bug 645263 and bug 646259: Switch to mozilla:: sync primitives. r=cjones,dbaron,doublec,ehsan src=bsmedberg
Bug 645263, part 0: Count sync primitive ctor/dtors. r=dbaron
Bug 645263, part 1: Migrate content/media to mozilla:: sync primitives. r=doublec
Bug 645263, part 2: Migrate modules/plugin to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 3: Migrate nsComponentManagerImpl to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 4: Migrate everything else to mozilla:: sync primitives. r=dbaron
Bug 645263, part 5: Remove nsAutoLock.*. sr=bsmedberg
Bug 645263, part 6: Make editor test be nicer to deadlock detector. r=ehsan
Bug 645263, part 7: Disable tracemalloc backtraces for xpcshell tests. r=dbaron
Bug 646259: Fix nsCacheService to use a CondVar for notifying. r=cjones
2011-03-31 21:29:02 -07:00
|
|
|
, mPendingLookupLock("nsUrlClassifierDBServerWorker.mPendingLookupLock")
|
2007-03-22 10:30:00 -07:00
|
|
|
{
|
|
|
|
}
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
nsUrlClassifierDBServiceWorker::~nsUrlClassifierDBServiceWorker()
|
|
|
|
{
|
2007-07-25 23:38:43 -07:00
|
|
|
NS_ASSERTION(!mConnection,
|
2007-03-22 10:30:00 -07:00
|
|
|
"Db connection not closed, leaking memory! Call CloseDb "
|
|
|
|
"to close the connection.");
|
2007-07-25 20:11:43 -07:00
|
|
|
}
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
nsresult
|
2011-09-08 13:15:18 -07:00
|
|
|
nsUrlClassifierDBServiceWorker::Init(PRInt32 gethashNoise,
|
2011-09-08 13:15:27 -07:00
|
|
|
nsRefPtr<nsUrlClassifierPrefixSet> & prefSet)
|
2007-07-25 23:38:43 -07:00
|
|
|
{
|
2008-02-26 21:51:28 -08:00
|
|
|
mGethashNoise = gethashNoise;
|
2011-09-08 13:15:18 -07:00
|
|
|
mPrefixSet = prefSet;
|
2008-02-26 21:51:28 -08:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
// Compute database filename
|
2007-07-25 18:49:20 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
// Because we dump raw integers into the database, this database isn't
|
|
|
|
// portable between machine types, so store it in the local profile dir.
|
|
|
|
nsresult rv = NS_GetSpecialDirectory(NS_APP_USER_PROFILE_LOCAL_50_DIR,
|
|
|
|
getter_AddRefs(mDBFile));
|
2008-01-04 22:38:30 -08:00
|
|
|
|
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
rv = NS_GetSpecialDirectory(NS_APP_USER_PROFILE_50_DIR,
|
|
|
|
getter_AddRefs(mDBFile));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NS_FAILED(rv)) return NS_ERROR_NOT_AVAILABLE;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2011-09-08 13:16:59 -07:00
|
|
|
rv = mDBFile->Clone(getter_AddRefs(mPSFile));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
rv = mDBFile->Append(NS_LITERAL_STRING(DATABASE_FILENAME));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-09-08 13:16:59 -07:00
|
|
|
rv = mPSFile->Append(NS_LITERAL_STRING(PREFIXSET_FILENAME));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
ResetUpdate();
|
|
|
|
|
2008-04-15 15:39:44 -07:00
|
|
|
mTableFreshness.Init();
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::QueueLookup(const nsACString& spec,
|
2008-01-29 12:57:18 -08:00
|
|
|
nsIUrlClassifierLookupCallback* callback)
|
2007-07-25 20:11:43 -07:00
|
|
|
{
|
Rollup of bug 645263 and bug 646259: Switch to mozilla:: sync primitives. r=cjones,dbaron,doublec,ehsan src=bsmedberg
Bug 645263, part 0: Count sync primitive ctor/dtors. r=dbaron
Bug 645263, part 1: Migrate content/media to mozilla:: sync primitives. r=doublec
Bug 645263, part 2: Migrate modules/plugin to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 3: Migrate nsComponentManagerImpl to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 4: Migrate everything else to mozilla:: sync primitives. r=dbaron
Bug 645263, part 5: Remove nsAutoLock.*. sr=bsmedberg
Bug 645263, part 6: Make editor test be nicer to deadlock detector. r=ehsan
Bug 645263, part 7: Disable tracemalloc backtraces for xpcshell tests. r=dbaron
Bug 646259: Fix nsCacheService to use a CondVar for notifying. r=cjones
2011-03-31 21:29:02 -07:00
|
|
|
MutexAutoLock lock(mPendingLookupLock);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
|
|
|
PendingLookup* lookup = mPendingLookups.AppendElement();
|
|
|
|
if (!lookup) return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
|
|
|
|
lookup->mKey = spec;
|
|
|
|
lookup->mCallback = callback;
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2008-10-19 20:07:46 -07:00
|
|
|
nsresult
|
2011-09-08 13:17:14 -07:00
|
|
|
nsUrlClassifierDBService::CheckClean(const nsACString &spec,
|
2011-09-28 23:19:26 -07:00
|
|
|
bool *clean)
|
2008-10-19 20:07:46 -07:00
|
|
|
{
|
2011-09-08 13:17:34 -07:00
|
|
|
Telemetry::AutoTimer<Telemetry::URLCLASSIFIER_PS_LOOKUP_TIME> timer;
|
|
|
|
|
2011-09-08 13:17:14 -07:00
|
|
|
// Get the set of fragments to look up.
|
|
|
|
nsTArray<nsCString> fragments;
|
|
|
|
nsresult rv = GetLookupFragments(spec, fragments);
|
2008-10-19 20:07:46 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-09-08 13:17:25 -07:00
|
|
|
PRUint32 prefixkey;
|
|
|
|
rv = mPrefixSet->GetKey(&prefixkey);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
*clean = true;
|
2008-10-19 20:07:46 -07:00
|
|
|
|
2011-09-08 13:17:14 -07:00
|
|
|
for (PRUint32 i = 0; i < fragments.Length(); i++) {
|
|
|
|
nsUrlClassifierDomainHash fragmentKeyHash;
|
|
|
|
fragmentKeyHash.FromPlaintext(fragments[i], mHash);
|
2011-09-08 13:15:27 -07:00
|
|
|
|
2011-09-08 13:17:25 -07:00
|
|
|
// Find the corresponding host key
|
|
|
|
nsUrlClassifierDomainHash hostkey;
|
|
|
|
rv = GetKey(fragments[i], hostkey, mHash);
|
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
/* This happens for hosts on the local network,
|
|
|
|
can't check these against the DB */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
PRUint32 hostprefix = hostkey.ToUint32();
|
2011-09-08 13:17:14 -07:00
|
|
|
PRUint32 fragkey = fragmentKeyHash.ToUint32();
|
2011-09-08 13:17:25 -07:00
|
|
|
PRUint32 codedkey;
|
|
|
|
rv = KeyedHash(fragkey, hostprefix, prefixkey, &codedkey);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2011-09-08 13:15:27 -07:00
|
|
|
|
2011-10-20 07:48:11 -07:00
|
|
|
bool found = false;
|
2011-09-28 23:19:26 -07:00
|
|
|
bool ready = false; /* opportunistic probe */
|
2011-09-08 13:17:25 -07:00
|
|
|
rv = mPrefixSet->Probe(codedkey, prefixkey, &ready, &found);
|
2011-09-08 13:15:27 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2011-09-08 13:17:14 -07:00
|
|
|
LOG(("CheckClean Probed %X ready: %d found: %d ",
|
2011-09-08 13:17:25 -07:00
|
|
|
codedkey, ready, found));
|
2011-09-08 13:15:27 -07:00
|
|
|
if (found || !ready) {
|
2011-10-17 07:59:28 -07:00
|
|
|
*clean = false;
|
2008-10-19 20:07:46 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2011-09-08 13:17:14 -07:00
|
|
|
static nsresult GetHostKeys(const nsACString &spec,
|
|
|
|
nsTArray<nsCString> &hostKeys)
|
|
|
|
{
|
|
|
|
nsACString::const_iterator begin, end, iter;
|
|
|
|
spec.BeginReading(begin);
|
|
|
|
spec.EndReading(end);
|
|
|
|
|
|
|
|
iter = begin;
|
|
|
|
if (!FindCharInReadable('/', iter, end)) {
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
const nsCSubstring& host = Substring(begin, iter);
|
|
|
|
|
|
|
|
if (IsCanonicalizedIP(host)) {
|
|
|
|
nsCString *key = hostKeys.AppendElement();
|
|
|
|
if (!key)
|
|
|
|
return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
|
|
|
|
key->Assign(host);
|
|
|
|
key->Append("/");
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsTArray<nsCString> hostComponents;
|
|
|
|
ParseString(PromiseFlatCString(host), '.', hostComponents);
|
|
|
|
|
|
|
|
if (hostComponents.Length() < 2) {
|
|
|
|
// no host or toplevel host, this won't match anything in the db
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
// First check with two domain components
|
|
|
|
PRInt32 last = PRInt32(hostComponents.Length()) - 1;
|
|
|
|
nsCString *lookupHost = hostKeys.AppendElement();
|
|
|
|
if (!lookupHost)
|
|
|
|
return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
|
|
|
|
lookupHost->Assign(hostComponents[last - 1]);
|
|
|
|
lookupHost->Append(".");
|
|
|
|
lookupHost->Append(hostComponents[last]);
|
|
|
|
lookupHost->Append("/");
|
|
|
|
|
|
|
|
// Now check with three domain components
|
|
|
|
if (hostComponents.Length() > 2) {
|
|
|
|
nsCString *lookupHost2 = hostKeys.AppendElement();
|
|
|
|
if (!lookupHost2)
|
|
|
|
return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
lookupHost2->Assign(hostComponents[last - 2]);
|
|
|
|
lookupHost2->Append(".");
|
|
|
|
lookupHost2->Append(*lookupHost);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
nsresult
|
2011-09-08 13:17:14 -07:00
|
|
|
GetLookupFragments(const nsACString& spec,
|
|
|
|
nsTArray<nsCString>& fragments)
|
2007-07-25 23:38:43 -07:00
|
|
|
{
|
|
|
|
fragments.Clear();
|
|
|
|
|
|
|
|
nsACString::const_iterator begin, end, iter;
|
|
|
|
spec.BeginReading(begin);
|
|
|
|
spec.EndReading(end);
|
|
|
|
|
|
|
|
iter = begin;
|
|
|
|
if (!FindCharInReadable('/', iter, end)) {
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
const nsCSubstring& host = Substring(begin, iter++);
|
2007-10-01 16:53:53 -07:00
|
|
|
nsCAutoString path;
|
|
|
|
path.Assign(Substring(iter, end));
|
2007-07-25 23:38:43 -07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* From the protocol doc:
|
2007-10-01 16:53:53 -07:00
|
|
|
* For the hostname, the client will try at most 5 different strings. They
|
2007-07-25 23:38:43 -07:00
|
|
|
* are:
|
|
|
|
* a) The exact hostname of the url
|
|
|
|
* b) The 4 hostnames formed by starting with the last 5 components and
|
|
|
|
* successivly removing the leading component. The top-level component
|
2011-06-29 22:02:15 -07:00
|
|
|
* can be skipped. This is not done if the hostname is a numerical IP.
|
2007-07-25 23:38:43 -07:00
|
|
|
*/
|
2009-01-21 20:15:34 -08:00
|
|
|
nsTArray<nsCString> hosts;
|
|
|
|
hosts.AppendElement(host);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2011-06-29 22:02:15 -07:00
|
|
|
if (!IsCanonicalizedIP(host)) {
|
2007-07-25 23:38:43 -07:00
|
|
|
host.BeginReading(begin);
|
2011-06-29 22:02:15 -07:00
|
|
|
host.EndReading(end);
|
|
|
|
int numHostComponents = 0;
|
|
|
|
while (RFindInReadable(NS_LITERAL_CSTRING("."), begin, end) &&
|
|
|
|
numHostComponents < MAX_HOST_COMPONENTS) {
|
|
|
|
// don't bother checking toplevel domains
|
|
|
|
if (++numHostComponents >= 2) {
|
|
|
|
host.EndReading(iter);
|
|
|
|
hosts.AppendElement(Substring(end, iter));
|
|
|
|
}
|
|
|
|
end = begin;
|
|
|
|
host.BeginReading(begin);
|
|
|
|
}
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* From the protocol doc:
|
2007-10-01 16:53:53 -07:00
|
|
|
* For the path, the client will also try at most 6 different strings.
|
2007-07-25 23:38:43 -07:00
|
|
|
* They are:
|
2007-10-01 16:53:53 -07:00
|
|
|
* a) the exact path of the url, including query parameters
|
|
|
|
* b) the exact path of the url, without query parameters
|
|
|
|
* c) the 4 paths formed by starting at the root (/) and
|
2007-07-25 23:38:43 -07:00
|
|
|
* successively appending path components, including a trailing
|
|
|
|
* slash. This behavior should only extend up to the next-to-last
|
|
|
|
* path component, that is, a trailing slash should never be
|
|
|
|
* appended that was not present in the original url.
|
|
|
|
*/
|
2009-01-21 20:15:34 -08:00
|
|
|
nsTArray<nsCString> paths;
|
2011-06-29 22:01:34 -07:00
|
|
|
nsCAutoString pathToAdd;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2011-06-29 22:01:34 -07:00
|
|
|
path.BeginReading(begin);
|
2007-10-01 16:53:53 -07:00
|
|
|
path.EndReading(end);
|
2011-06-29 22:01:34 -07:00
|
|
|
iter = begin;
|
2007-10-01 16:53:53 -07:00
|
|
|
if (FindCharInReadable('?', iter, end)) {
|
2011-06-29 22:01:34 -07:00
|
|
|
pathToAdd = Substring(begin, iter);
|
|
|
|
paths.AppendElement(pathToAdd);
|
|
|
|
end = iter;
|
2007-10-01 16:53:53 -07:00
|
|
|
}
|
|
|
|
|
2011-06-29 22:01:34 -07:00
|
|
|
int numPathComponents = 1;
|
2007-07-25 23:38:43 -07:00
|
|
|
iter = begin;
|
|
|
|
while (FindCharInReadable('/', iter, end) &&
|
2011-06-29 22:01:34 -07:00
|
|
|
numPathComponents < MAX_PATH_COMPONENTS) {
|
2007-07-25 23:38:43 -07:00
|
|
|
iter++;
|
2011-06-29 22:01:34 -07:00
|
|
|
pathToAdd.Assign(Substring(begin, iter));
|
|
|
|
paths.AppendElement(pathToAdd);
|
|
|
|
numPathComponents++;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we haven't already done so, add the full path
|
|
|
|
if (!pathToAdd.Equals(path)) {
|
|
|
|
paths.AppendElement(path);
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
2011-06-29 22:01:34 -07:00
|
|
|
// Check an empty path (for whole-domain blacklist entries)
|
|
|
|
paths.AppendElement(EmptyCString());
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2009-01-21 20:15:34 -08:00
|
|
|
for (PRUint32 hostIndex = 0; hostIndex < hosts.Length(); hostIndex++) {
|
|
|
|
for (PRUint32 pathIndex = 0; pathIndex < paths.Length(); pathIndex++) {
|
2008-10-19 20:07:46 -07:00
|
|
|
nsCString key;
|
2009-01-21 20:15:34 -08:00
|
|
|
key.Assign(hosts[hostIndex]);
|
2007-07-25 23:38:43 -07:00
|
|
|
key.Append('/');
|
2009-01-21 20:15:34 -08:00
|
|
|
key.Append(paths[pathIndex]);
|
2007-07-25 23:38:43 -07:00
|
|
|
LOG(("Chking %s", key.get()));
|
|
|
|
|
2008-10-19 20:07:46 -07:00
|
|
|
fragments.AppendElement(key);
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
2011-09-08 13:15:18 -07:00
|
|
|
nsUrlClassifierDBServiceWorker::Check(const nsACString& spec,
|
|
|
|
nsTArray<nsUrlClassifierLookupResult>& results)
|
2007-07-25 23:38:43 -07:00
|
|
|
{
|
2011-09-08 13:17:14 -07:00
|
|
|
PRInt64 now = (PR_Now() / PR_USEC_PER_SEC);
|
|
|
|
|
|
|
|
// Get list of host keys to look up
|
|
|
|
nsAutoTArray<nsCString, 2> lookupHosts;
|
|
|
|
nsresult rv = GetHostKeys(spec, lookupHosts);
|
|
|
|
|
|
|
|
nsTArray<nsUrlClassifierEntry> mCachedEntries;
|
|
|
|
|
|
|
|
// Gather host's prefixes
|
|
|
|
for (PRUint32 i = 0; i < lookupHosts.Length(); i++) {
|
|
|
|
// Find the corresponding host key
|
|
|
|
nsUrlClassifierDomainHash hostKey;
|
2011-09-08 13:17:25 -07:00
|
|
|
nsresult rv = GetKey(lookupHosts[i], hostKey, mCryptoHash);
|
2011-09-08 13:17:14 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
// Read the entries for this fragments host from SQLite
|
|
|
|
mMainStore.ReadAddEntries(hostKey, mCachedEntries);
|
|
|
|
}
|
|
|
|
|
2008-10-19 20:07:46 -07:00
|
|
|
// Now get the set of fragments to look up.
|
|
|
|
nsTArray<nsCString> fragments;
|
2011-09-08 13:17:14 -07:00
|
|
|
rv = GetLookupFragments(spec, fragments);
|
2007-07-25 23:38:43 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2008-10-19 20:07:46 -07:00
|
|
|
// Now check each lookup fragment against the entries in the DB.
|
|
|
|
for (PRUint32 i = 0; i < fragments.Length(); i++) {
|
|
|
|
nsUrlClassifierCompleteHash lookupHash;
|
|
|
|
lookupHash.FromPlaintext(fragments[i], mCryptoHash);
|
2008-04-15 15:39:44 -07:00
|
|
|
|
2011-09-08 13:17:14 -07:00
|
|
|
for (PRUint32 j = 0; j < mCachedEntries.Length(); j++) {
|
|
|
|
nsUrlClassifierEntry &entry = mCachedEntries[j];
|
|
|
|
if (entry.Match(lookupHash)) {
|
|
|
|
// If the entry doesn't contain a complete hash, we need to
|
|
|
|
// save it here so that it can be compared against the
|
|
|
|
// complete hash. However, we don't set entry.mHaveComplete
|
|
|
|
// because it isn't a verified part of the entry yet.
|
|
|
|
nsUrlClassifierLookupResult *result = results.AppendElement();
|
|
|
|
if (!result)
|
|
|
|
return NS_ERROR_OUT_OF_MEMORY;
|
2008-04-15 15:39:44 -07:00
|
|
|
|
2011-09-08 13:17:14 -07:00
|
|
|
result->mLookupFragment = lookupHash;
|
|
|
|
result->mEntry = entry;
|
2008-04-15 15:39:44 -07:00
|
|
|
|
2011-09-08 13:17:14 -07:00
|
|
|
// Fill in the table name.
|
|
|
|
GetTableName(entry.mTableId, result->mTableName);
|
2008-04-15 15:39:44 -07:00
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool fresh;
|
2011-09-08 13:17:14 -07:00
|
|
|
PRInt64 tableUpdateTime;
|
|
|
|
if (mTableFreshness.Get(result->mTableName, &tableUpdateTime)) {
|
|
|
|
LOG(("tableUpdateTime: %lld, now: %lld, freshnessGuarantee: %d\n",
|
|
|
|
tableUpdateTime, now, gFreshnessGuarantee));
|
|
|
|
fresh = ((now - tableUpdateTime) <= gFreshnessGuarantee);
|
|
|
|
} else {
|
|
|
|
LOG(("No expiration time for this table.\n"));
|
2011-10-17 07:59:28 -07:00
|
|
|
fresh = false;
|
2011-09-08 13:15:18 -07:00
|
|
|
}
|
2011-09-08 13:17:14 -07:00
|
|
|
|
|
|
|
// This is a confirmed result if we match a complete fragment in
|
|
|
|
// an up-to-date table.
|
|
|
|
result->mConfirmed = entry.mHaveComplete && fresh;
|
|
|
|
|
|
|
|
LOG(("Found a result. complete=%d, fresh=%d",
|
|
|
|
entry.mHaveComplete, fresh));
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Lookup up a key in the database is a two step process:
|
|
|
|
*
|
|
|
|
* a) First we look for any Entries in the database that might apply to this
|
|
|
|
* url. For each URL there are one or two possible domain names to check:
|
|
|
|
* the two-part domain name (example.com) and the three-part name
|
|
|
|
* (www.example.com). We check the database for both of these.
|
|
|
|
* b) If we find any entries, we check the list of fragments for that entry
|
|
|
|
* against the possible subfragments of the URL as described in the
|
|
|
|
* "Simplified Regular Expression Lookup" section of the protocol doc.
|
|
|
|
*/
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::DoLookup(const nsACString& spec,
|
2008-01-29 12:57:18 -08:00
|
|
|
nsIUrlClassifierLookupCallback* c)
|
2007-07-25 23:38:43 -07:00
|
|
|
{
|
|
|
|
if (gShuttingDownThread) {
|
2008-01-29 12:57:18 -08:00
|
|
|
c->LookupComplete(nsnull);
|
2007-07-25 20:11:43 -07:00
|
|
|
return NS_ERROR_NOT_INITIALIZED;
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
nsresult rv = OpenDb();
|
|
|
|
if (NS_FAILED(rv)) {
|
2008-01-29 12:57:18 -08:00
|
|
|
c->LookupComplete(nsnull);
|
2007-07-25 23:38:43 -07:00
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(PR_LOGGING)
|
|
|
|
PRIntervalTime clockStart = 0;
|
|
|
|
if (LOG_ENABLED()) {
|
|
|
|
clockStart = PR_IntervalNow();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
nsAutoPtr<nsTArray<nsUrlClassifierLookupResult> > results;
|
|
|
|
results = new nsTArray<nsUrlClassifierLookupResult>();
|
|
|
|
if (!results) {
|
|
|
|
c->LookupComplete(nsnull);
|
|
|
|
return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
}
|
|
|
|
|
2009-02-09 22:40:39 -08:00
|
|
|
// we ignore failures from Check because we'd rather return the
|
|
|
|
// results that were found than fail.
|
|
|
|
Check(spec, *results);
|
2008-01-28 17:58:15 -08:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
#if defined(PR_LOGGING)
|
|
|
|
if (LOG_ENABLED()) {
|
|
|
|
PRIntervalTime clockEnd = PR_IntervalNow();
|
|
|
|
LOG(("query took %dms\n",
|
|
|
|
PR_IntervalToMilliseconds(clockEnd - clockStart)));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-02-26 21:51:28 -08:00
|
|
|
for (PRUint32 i = 0; i < results->Length(); i++) {
|
|
|
|
if (!results->ElementAt(i).mConfirmed) {
|
|
|
|
// We're going to be doing a gethash request, add some extra entries.
|
|
|
|
AddNoise(results->ElementAt(i).mEntry.mId, mGethashNoise, *results);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// At this point ownership of 'results' is handed to the callback.
|
|
|
|
c->LookupComplete(results.forget());
|
2007-07-25 23:38:43 -07:00
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::HandlePendingLookups()
|
|
|
|
{
|
Rollup of bug 645263 and bug 646259: Switch to mozilla:: sync primitives. r=cjones,dbaron,doublec,ehsan src=bsmedberg
Bug 645263, part 0: Count sync primitive ctor/dtors. r=dbaron
Bug 645263, part 1: Migrate content/media to mozilla:: sync primitives. r=doublec
Bug 645263, part 2: Migrate modules/plugin to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 3: Migrate nsComponentManagerImpl to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 4: Migrate everything else to mozilla:: sync primitives. r=dbaron
Bug 645263, part 5: Remove nsAutoLock.*. sr=bsmedberg
Bug 645263, part 6: Make editor test be nicer to deadlock detector. r=ehsan
Bug 645263, part 7: Disable tracemalloc backtraces for xpcshell tests. r=dbaron
Bug 646259: Fix nsCacheService to use a CondVar for notifying. r=cjones
2011-03-31 21:29:02 -07:00
|
|
|
MutexAutoLock lock(mPendingLookupLock);
|
2007-07-25 23:38:43 -07:00
|
|
|
while (mPendingLookups.Length() > 0) {
|
|
|
|
PendingLookup lookup = mPendingLookups[0];
|
|
|
|
mPendingLookups.RemoveElementAt(0);
|
Rollup of bug 645263 and bug 646259: Switch to mozilla:: sync primitives. r=cjones,dbaron,doublec,ehsan src=bsmedberg
Bug 645263, part 0: Count sync primitive ctor/dtors. r=dbaron
Bug 645263, part 1: Migrate content/media to mozilla:: sync primitives. r=doublec
Bug 645263, part 2: Migrate modules/plugin to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 3: Migrate nsComponentManagerImpl to mozilla:: sync primitives. sr=bsmedberg
Bug 645263, part 4: Migrate everything else to mozilla:: sync primitives. r=dbaron
Bug 645263, part 5: Remove nsAutoLock.*. sr=bsmedberg
Bug 645263, part 6: Make editor test be nicer to deadlock detector. r=ehsan
Bug 645263, part 7: Disable tracemalloc backtraces for xpcshell tests. r=dbaron
Bug 646259: Fix nsCacheService to use a CondVar for notifying. r=cjones
2011-03-31 21:29:02 -07:00
|
|
|
{
|
|
|
|
MutexAutoUnlock unlock(mPendingLookupLock);
|
|
|
|
DoLookup(lookup.mKey, lookup.mCallback);
|
|
|
|
}
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2008-02-26 21:51:28 -08:00
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::AddNoise(PRInt64 nearID,
|
|
|
|
PRInt32 count,
|
|
|
|
nsTArray<nsUrlClassifierLookupResult>& results)
|
|
|
|
{
|
|
|
|
if (count < 1) {
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2011-12-02 01:47:43 -08:00
|
|
|
PRInt64 randomNum;
|
|
|
|
nsresult rv = mMainStore.RandomNumber(&randomNum);
|
2008-02-26 21:51:28 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
PRInt32 numBefore = randomNum % count;
|
|
|
|
|
|
|
|
nsTArray<nsUrlClassifierEntry> noiseEntries;
|
2011-10-17 07:59:28 -07:00
|
|
|
rv = mMainStore.ReadNoiseEntries(nearID, numBefore, true, noiseEntries);
|
2008-02-26 21:51:28 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
rv = mMainStore.ReadNoiseEntries(nearID, count - numBefore, false, noiseEntries);
|
2008-02-26 21:51:28 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
for (PRUint32 i = 0; i < noiseEntries.Length(); i++) {
|
|
|
|
nsUrlClassifierLookupResult *result = results.AppendElement();
|
|
|
|
if (!result)
|
|
|
|
return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
|
|
|
|
result->mEntry = noiseEntries[i];
|
2011-10-17 07:59:28 -07:00
|
|
|
result->mConfirmed = false;
|
|
|
|
result->mNoise = true;
|
2008-02-26 21:51:28 -08:00
|
|
|
|
|
|
|
// Fill in the table name.
|
|
|
|
GetTableName(noiseEntries[i].mTableId, result->mTableName);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
// Lookup a key in the db.
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsUrlClassifierDBServiceWorker::Lookup(const nsACString& spec,
|
2008-01-29 12:57:18 -08:00
|
|
|
nsIUrlClassifierCallback* c)
|
2007-07-25 23:38:43 -07:00
|
|
|
{
|
|
|
|
return HandlePendingLookups();
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsUrlClassifierDBServiceWorker::GetTables(nsIUrlClassifierCallback* c)
|
|
|
|
{
|
|
|
|
if (gShuttingDownThread)
|
|
|
|
return NS_ERROR_NOT_INITIALIZED;
|
|
|
|
|
|
|
|
nsresult rv = OpenDb();
|
|
|
|
if (NS_FAILED(rv)) {
|
2011-07-06 11:08:52 -07:00
|
|
|
NS_ERROR("Unable to open database");
|
2007-07-25 23:38:43 -07:00
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
mozStorageStatementScoper scoper(mGetTablesStatement);
|
|
|
|
|
|
|
|
nsCAutoString response;
|
2011-09-28 23:19:26 -07:00
|
|
|
bool hasMore;
|
2007-07-25 23:38:43 -07:00
|
|
|
while (NS_SUCCEEDED(rv = mGetTablesStatement->ExecuteStep(&hasMore)) &&
|
|
|
|
hasMore) {
|
|
|
|
nsCAutoString val;
|
|
|
|
mGetTablesStatement->GetUTF8String(0, val);
|
|
|
|
|
|
|
|
if (val.IsEmpty()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
response.Append(val);
|
|
|
|
response.Append(';');
|
|
|
|
|
|
|
|
mGetTablesStatement->GetUTF8String(1, val);
|
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool haveAdds = false;
|
2007-07-25 23:38:43 -07:00
|
|
|
if (!val.IsEmpty()) {
|
|
|
|
response.Append("a:");
|
|
|
|
response.Append(val);
|
2011-10-17 07:59:28 -07:00
|
|
|
haveAdds = true;
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
mGetTablesStatement->GetUTF8String(2, val);
|
|
|
|
if (!val.IsEmpty()) {
|
2007-10-11 16:25:10 -07:00
|
|
|
if (haveAdds)
|
|
|
|
response.Append(":");
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
response.Append("s:");
|
|
|
|
response.Append(val);
|
|
|
|
}
|
|
|
|
|
|
|
|
response.Append('\n');
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
response.Truncate();
|
|
|
|
}
|
|
|
|
|
|
|
|
c->HandleEvent(response);
|
|
|
|
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::GetTableId(const nsACString& table,
|
|
|
|
PRUint32* tableId)
|
|
|
|
{
|
|
|
|
mozStorageStatementScoper findScoper(mGetTableIdStatement);
|
|
|
|
|
2011-03-31 10:19:31 -07:00
|
|
|
nsresult rv = mGetTableIdStatement->BindUTF8StringByIndex(0, table);
|
2007-07-25 23:38:43 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool exists;
|
2007-07-25 23:38:43 -07:00
|
|
|
rv = mGetTableIdStatement->ExecuteStep(&exists);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
if (exists) {
|
|
|
|
*tableId = mGetTableIdStatement->AsInt32(0);
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
mozStorageStatementScoper insertScoper(mInsertTableIdStatement);
|
2011-03-31 10:19:31 -07:00
|
|
|
rv = mInsertTableIdStatement->BindUTF8StringByIndex(0, table);
|
2007-07-25 23:38:43 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = mInsertTableIdStatement->Execute();
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
PRInt64 rowId;
|
|
|
|
rv = mConnection->GetLastInsertRowID(&rowId);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
if (rowId > PR_UINT32_MAX)
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
|
|
|
|
*tableId = rowId;
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::GetTableName(PRUint32 tableId,
|
|
|
|
nsACString& tableName)
|
|
|
|
{
|
|
|
|
mozStorageStatementScoper findScoper(mGetTableNameStatement);
|
2011-03-31 10:19:31 -07:00
|
|
|
nsresult rv = mGetTableNameStatement->BindInt32ByIndex(0, tableId);
|
2007-07-25 23:38:43 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2011-09-28 23:19:26 -07:00
|
|
|
bool exists;
|
2007-07-25 23:38:43 -07:00
|
|
|
rv = mGetTableNameStatement->ExecuteStep(&exists);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
if (!exists) return NS_ERROR_FAILURE;
|
|
|
|
|
|
|
|
return mGetTableNameStatement->GetUTF8String(0, tableName);
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::InflateChunk(nsACString& chunk)
|
|
|
|
{
|
|
|
|
nsCAutoString inflated;
|
|
|
|
char buf[4096];
|
|
|
|
|
|
|
|
const nsPromiseFlatCString& flat = PromiseFlatCString(chunk);
|
|
|
|
|
|
|
|
z_stream stream;
|
|
|
|
memset(&stream, 0, sizeof(stream));
|
|
|
|
stream.next_in = (Bytef*)flat.get();
|
|
|
|
stream.avail_in = flat.Length();
|
|
|
|
|
|
|
|
if (inflateInit(&stream) != Z_OK) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
int code;
|
|
|
|
do {
|
|
|
|
stream.next_out = (Bytef*)buf;
|
|
|
|
stream.avail_out = sizeof(buf);
|
2008-01-28 17:58:15 -08:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
code = inflate(&stream, Z_NO_FLUSH);
|
|
|
|
PRUint32 numRead = sizeof(buf) - stream.avail_out;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
if (code == Z_OK || code == Z_STREAM_END) {
|
|
|
|
inflated.Append(buf, numRead);
|
|
|
|
}
|
|
|
|
} while (code == Z_OK);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
inflateEnd(&stream);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
if (code != Z_STREAM_END) {
|
|
|
|
return NS_ERROR_FAILURE;
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
chunk = inflated;
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
2008-01-12 13:32:01 -08:00
|
|
|
nsUrlClassifierStore::DeleteEntry(nsUrlClassifierEntry& entry)
|
2007-07-25 23:38:43 -07:00
|
|
|
{
|
2008-02-26 21:51:28 -08:00
|
|
|
if (entry.mId == -1) {
|
2007-07-25 23:38:43 -07:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
mozStorageStatementScoper scoper(mDeleteStatement);
|
2011-03-31 10:19:31 -07:00
|
|
|
mDeleteStatement->BindInt64ByIndex(0, entry.mId);
|
2007-07-25 23:38:43 -07:00
|
|
|
nsresult rv = mDeleteStatement->Execute();
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2008-02-26 21:51:28 -08:00
|
|
|
entry.mId = -1;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
2008-01-12 13:32:01 -08:00
|
|
|
nsUrlClassifierStore::WriteEntry(nsUrlClassifierEntry& entry)
|
2007-07-25 23:38:43 -07:00
|
|
|
{
|
2009-01-16 11:19:28 -08:00
|
|
|
if (entry.mId != -1) {
|
|
|
|
// existing entry, just ignore it
|
|
|
|
return NS_OK;
|
|
|
|
}
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2009-01-16 11:19:28 -08:00
|
|
|
mozStorageStatementScoper scoper(mInsertStatement);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2009-01-16 11:19:28 -08:00
|
|
|
nsresult rv = BindStatement(entry, mInsertStatement);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2009-01-16 11:19:28 -08:00
|
|
|
rv = mInsertStatement->Execute();
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
PRInt64 rowId;
|
|
|
|
rv = mConnection->GetLastInsertRowID(&rowId);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2009-01-16 11:19:28 -08:00
|
|
|
if (rowId > PR_UINT32_MAX) {
|
|
|
|
return NS_ERROR_FAILURE;
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
|
2009-01-16 11:19:28 -08:00
|
|
|
entry.mId = rowId;
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2008-01-29 18:26:44 -08:00
|
|
|
nsresult
|
|
|
|
nsUrlClassifierStore::UpdateEntry(nsUrlClassifierEntry& entry)
|
|
|
|
{
|
|
|
|
mozStorageStatementScoper scoper(mUpdateStatement);
|
|
|
|
|
2008-02-26 21:51:28 -08:00
|
|
|
NS_ENSURE_ARG(entry.mId != -1);
|
2008-01-29 18:26:44 -08:00
|
|
|
|
|
|
|
nsresult rv = BindStatement(entry, mUpdateStatement);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = mUpdateStatement->Execute();
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
static bool
|
2011-09-08 13:15:27 -07:00
|
|
|
IsCanonicalizedIP(const nsACString& host)
|
2007-10-01 17:21:45 -07:00
|
|
|
{
|
|
|
|
// The canonicalization process will have left IP addresses in dotted
|
|
|
|
// decimal with no surprises.
|
|
|
|
PRUint32 i1, i2, i3, i4;
|
|
|
|
char c;
|
|
|
|
if (PR_sscanf(PromiseFlatCString(host).get(), "%u.%u.%u.%u%c",
|
|
|
|
&i1, &i2, &i3, &i4, &c) == 4) {
|
2008-08-19 20:47:10 -07:00
|
|
|
return (i1 <= 0xFF && i2 <= 0xFF && i3 <= 0xFF && i4 <= 0xFF);
|
2007-10-01 17:21:45 -07:00
|
|
|
}
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
return false;
|
2007-10-01 17:21:45 -07:00
|
|
|
}
|
|
|
|
|
2011-09-08 13:17:25 -07:00
|
|
|
static nsresult
|
|
|
|
GetKey(const nsACString& spec,
|
|
|
|
nsUrlClassifierDomainHash& hash,
|
|
|
|
nsICryptoHash * aCryptoHash)
|
2007-07-25 23:38:43 -07:00
|
|
|
{
|
|
|
|
nsACString::const_iterator begin, end, iter;
|
|
|
|
spec.BeginReading(begin);
|
|
|
|
spec.EndReading(end);
|
|
|
|
|
|
|
|
iter = begin;
|
|
|
|
if (!FindCharInReadable('/', iter, end)) {
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2007-10-01 17:21:45 -07:00
|
|
|
const nsCSubstring& host = Substring(begin, iter);
|
|
|
|
|
|
|
|
if (IsCanonicalizedIP(host)) {
|
2008-01-29 12:57:18 -08:00
|
|
|
nsCAutoString key;
|
|
|
|
key.Assign(host);
|
|
|
|
key.Append("/");
|
2011-09-08 13:17:25 -07:00
|
|
|
return hash.FromPlaintext(key, aCryptoHash);
|
2007-10-01 17:21:45 -07:00
|
|
|
}
|
|
|
|
|
2009-01-21 20:15:34 -08:00
|
|
|
nsTArray<nsCString> hostComponents;
|
|
|
|
ParseString(PromiseFlatCString(host), '.', hostComponents);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2009-01-21 20:15:34 -08:00
|
|
|
if (hostComponents.Length() < 2)
|
2007-07-25 23:38:43 -07:00
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
|
2009-01-21 20:15:34 -08:00
|
|
|
PRInt32 last = PRInt32(hostComponents.Length()) - 1;
|
2007-07-25 23:38:43 -07:00
|
|
|
nsCAutoString lookupHost;
|
|
|
|
|
2009-01-21 20:15:34 -08:00
|
|
|
if (hostComponents.Length() > 2) {
|
|
|
|
lookupHost.Append(hostComponents[last - 2]);
|
2007-07-25 23:38:43 -07:00
|
|
|
lookupHost.Append(".");
|
|
|
|
}
|
|
|
|
|
2009-01-21 20:15:34 -08:00
|
|
|
lookupHost.Append(hostComponents[last - 1]);
|
2007-07-25 23:38:43 -07:00
|
|
|
lookupHost.Append(".");
|
2009-01-21 20:15:34 -08:00
|
|
|
lookupHost.Append(hostComponents[last]);
|
2007-07-25 23:38:43 -07:00
|
|
|
lookupHost.Append("/");
|
|
|
|
|
2011-09-08 13:17:25 -07:00
|
|
|
return hash.FromPlaintext(lookupHost, aCryptoHash);
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::GetShaEntries(PRUint32 tableId,
|
|
|
|
PRUint32 chunkType,
|
|
|
|
PRUint32 chunkNum,
|
|
|
|
PRUint32 domainSize,
|
|
|
|
PRUint32 fragmentSize,
|
|
|
|
nsACString& chunk,
|
|
|
|
nsTArray<nsUrlClassifierEntry>& entries)
|
|
|
|
{
|
|
|
|
PRUint32 start = 0;
|
|
|
|
while (start + domainSize + 1 <= chunk.Length()) {
|
|
|
|
nsUrlClassifierDomainHash domain;
|
|
|
|
domain.Assign(Substring(chunk, start, DOMAIN_LENGTH));
|
|
|
|
start += domainSize;
|
|
|
|
|
|
|
|
// then there is a one-byte count of fragments
|
|
|
|
PRUint8 numEntries = static_cast<PRUint8>(chunk[start]);
|
|
|
|
start++;
|
|
|
|
|
|
|
|
if (numEntries == 0) {
|
|
|
|
// if there are no fragments, the domain itself is treated as a
|
|
|
|
// fragment. This will only work if domainHashSize == hashSize
|
|
|
|
if (domainSize != fragmentSize) {
|
|
|
|
NS_WARNING("Received 0-fragment entry where domainSize != fragmentSize");
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsUrlClassifierEntry* entry = entries.AppendElement();
|
|
|
|
if (!entry) return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
|
|
|
|
entry->mKey = domain;
|
|
|
|
entry->mTableId = tableId;
|
|
|
|
entry->mChunkId = chunkNum;
|
|
|
|
entry->SetHash(domain);
|
|
|
|
|
|
|
|
if (chunkType == CHUNK_SUB) {
|
|
|
|
if (start + 4 > chunk.Length()) {
|
|
|
|
// there isn't as much data as there should be.
|
|
|
|
NS_WARNING("Received a zero-entry sub chunk without an associated add.");
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
const nsCSubstring& str = Substring(chunk, start, 4);
|
2009-02-10 15:10:08 -08:00
|
|
|
PRUint32 p;
|
|
|
|
memcpy(&p, str.BeginReading(), 4);
|
|
|
|
entry->mAddChunkId = PR_ntohl(p);
|
2008-01-29 12:57:18 -08:00
|
|
|
if (entry->mAddChunkId == 0) {
|
|
|
|
NS_WARNING("Received invalid chunk number.");
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
start += 4;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
PRUint32 entrySize = fragmentSize;
|
|
|
|
if (chunkType == CHUNK_SUB) {
|
|
|
|
entrySize += 4;
|
|
|
|
}
|
|
|
|
if (start + (numEntries * entrySize) > chunk.Length()) {
|
|
|
|
// there isn't as much data as they said there would be.
|
|
|
|
NS_WARNING("Received a chunk without enough data");
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (PRUint8 i = 0; i < numEntries; i++) {
|
|
|
|
nsUrlClassifierEntry* entry = entries.AppendElement();
|
|
|
|
if (!entry) return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
|
|
|
|
entry->mKey = domain;
|
|
|
|
entry->mTableId = tableId;
|
|
|
|
entry->mChunkId = chunkNum;
|
|
|
|
|
|
|
|
if (chunkType == CHUNK_SUB) {
|
|
|
|
const nsCSubstring& str = Substring(chunk, start, 4);
|
2009-02-10 15:10:08 -08:00
|
|
|
PRUint32 p;
|
|
|
|
memcpy(&p, str.BeginReading(), 4);
|
|
|
|
entry->mAddChunkId = PR_ntohl(p);
|
2008-01-29 12:57:18 -08:00
|
|
|
if (entry->mAddChunkId == 0) {
|
|
|
|
NS_WARNING("Received invalid chunk number.");
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
start += 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fragmentSize == PARTIAL_LENGTH) {
|
|
|
|
nsUrlClassifierPartialHash hash;
|
|
|
|
hash.Assign(Substring(chunk, start, PARTIAL_LENGTH));
|
|
|
|
entry->SetHash(hash);
|
|
|
|
} else if (fragmentSize == COMPLETE_LENGTH) {
|
|
|
|
nsUrlClassifierCompleteHash hash;
|
|
|
|
hash.Assign(Substring(chunk, start, COMPLETE_LENGTH));
|
|
|
|
entry->SetHash(hash);
|
|
|
|
} else {
|
2011-10-17 07:59:28 -07:00
|
|
|
NS_ASSERTION(false, "Invalid fragment size!");
|
2008-01-29 12:57:18 -08:00
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
start += fragmentSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::GetChunkEntries(const nsACString& table,
|
|
|
|
PRUint32 tableId,
|
2008-01-29 12:57:18 -08:00
|
|
|
PRUint32 chunkType,
|
2007-07-25 23:38:43 -07:00
|
|
|
PRUint32 chunkNum,
|
2008-01-29 12:57:18 -08:00
|
|
|
PRUint32 hashSize,
|
2007-07-25 23:38:43 -07:00
|
|
|
nsACString& chunk,
|
|
|
|
nsTArray<nsUrlClassifierEntry>& entries)
|
|
|
|
{
|
|
|
|
nsresult rv;
|
|
|
|
if (StringEndsWith(table, NS_LITERAL_CSTRING("-exp"))) {
|
|
|
|
// regexp tables need to be ungzipped
|
|
|
|
rv = InflateChunk(chunk);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
}
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
if (StringEndsWith(table, NS_LITERAL_CSTRING("-shavar"))) {
|
|
|
|
rv = GetShaEntries(tableId, chunkType, chunkNum, DOMAIN_LENGTH, hashSize,
|
|
|
|
chunk, entries);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-07-25 23:38:43 -07:00
|
|
|
} else {
|
2009-01-21 20:15:34 -08:00
|
|
|
nsTArray<nsCString> lines;
|
|
|
|
ParseString(PromiseFlatCString(chunk), '\n', lines);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2007-11-13 21:31:07 -08:00
|
|
|
// non-hashed tables need to be hashed
|
2009-01-21 20:15:34 -08:00
|
|
|
for (PRInt32 i = 0; i < PRInt32(lines.Length()); i++) {
|
2008-01-29 12:57:18 -08:00
|
|
|
nsUrlClassifierEntry *entry = entries.AppendElement();
|
|
|
|
if (!entry)
|
|
|
|
return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
|
|
|
|
nsCAutoString entryStr;
|
|
|
|
if (chunkType == CHUNK_SUB) {
|
|
|
|
nsCString::const_iterator begin, iter, end;
|
2009-01-21 20:15:34 -08:00
|
|
|
lines[i].BeginReading(begin);
|
|
|
|
lines[i].EndReading(end);
|
2008-01-29 12:57:18 -08:00
|
|
|
iter = begin;
|
|
|
|
if (!FindCharInReadable(':', iter, end) ||
|
2009-01-21 20:15:34 -08:00
|
|
|
PR_sscanf(lines[i].get(), "%d:", &entry->mAddChunkId) != 1) {
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_WARNING("Received sub chunk without associated add chunk.");
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
iter++;
|
|
|
|
entryStr = Substring(iter, end);
|
|
|
|
} else {
|
2009-01-21 20:15:34 -08:00
|
|
|
entryStr = lines[i];
|
2008-01-28 15:04:43 -08:00
|
|
|
}
|
2008-01-28 17:58:15 -08:00
|
|
|
|
2011-09-08 13:17:25 -07:00
|
|
|
rv = GetKey(entryStr, entry->mKey, mCryptoHash);
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2008-01-28 17:58:15 -08:00
|
|
|
entry->mTableId = tableId;
|
2008-01-29 12:57:18 -08:00
|
|
|
entry->mChunkId = chunkNum;
|
|
|
|
if (hashSize == PARTIAL_LENGTH) {
|
|
|
|
nsUrlClassifierPartialHash hash;
|
|
|
|
hash.FromPlaintext(entryStr, mCryptoHash);
|
|
|
|
entry->SetHash(hash);
|
|
|
|
} else if (hashSize == COMPLETE_LENGTH) {
|
|
|
|
nsUrlClassifierCompleteHash hash;
|
|
|
|
hash.FromPlaintext(entryStr, mCryptoHash);
|
|
|
|
entry->SetHash(hash);
|
|
|
|
} else {
|
2011-10-17 07:59:28 -07:00
|
|
|
NS_ASSERTION(false, "Invalid fragment size!");
|
2008-01-29 12:57:18 -08:00
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
2007-11-13 21:31:07 -08:00
|
|
|
}
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool
|
2008-02-15 14:47:16 -08:00
|
|
|
nsUrlClassifierDBServiceWorker::ParseChunkRange(nsACString::const_iterator &begin,
|
|
|
|
const nsACString::const_iterator &end,
|
2007-12-04 16:16:41 -08:00
|
|
|
PRUint32 *first,
|
|
|
|
PRUint32 *last)
|
|
|
|
{
|
2008-02-15 14:47:16 -08:00
|
|
|
nsACString::const_iterator iter = begin;
|
|
|
|
FindCharInReadable(',', iter, end);
|
|
|
|
|
|
|
|
nsCAutoString element(Substring(begin, iter));
|
|
|
|
begin = iter;
|
|
|
|
if (begin != end)
|
|
|
|
begin++;
|
|
|
|
|
|
|
|
PRUint32 numRead = PR_sscanf(element.get(), "%u-%u", first, last);
|
2007-12-04 16:16:41 -08:00
|
|
|
if (numRead == 2) {
|
|
|
|
if (*first > *last) {
|
|
|
|
PRUint32 tmp = *first;
|
|
|
|
*first = *last;
|
|
|
|
*last = tmp;
|
|
|
|
}
|
2011-10-17 07:59:28 -07:00
|
|
|
return true;
|
2007-12-04 16:16:41 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (numRead == 1) {
|
|
|
|
*last = *first;
|
2011-10-17 07:59:28 -07:00
|
|
|
return true;
|
2007-12-04 16:16:41 -08:00
|
|
|
}
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
return false;
|
2007-12-04 16:16:41 -08:00
|
|
|
}
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::ParseChunkList(const nsACString& chunkStr,
|
|
|
|
nsTArray<PRUint32>& chunks)
|
|
|
|
{
|
|
|
|
LOG(("Parsing %s", PromiseFlatCString(chunkStr).get()));
|
|
|
|
|
2008-02-15 14:47:16 -08:00
|
|
|
nsACString::const_iterator begin, end;
|
|
|
|
chunkStr.BeginReading(begin);
|
|
|
|
chunkStr.EndReading(end);
|
|
|
|
while (begin != end) {
|
|
|
|
PRUint32 first, last;
|
|
|
|
if (ParseChunkRange(begin, end, &first, &last)) {
|
2007-07-25 23:38:43 -07:00
|
|
|
for (PRUint32 num = first; num <= last; num++) {
|
|
|
|
chunks.AppendElement(num);
|
|
|
|
}
|
|
|
|
}
|
2007-11-13 21:31:07 -08:00
|
|
|
}
|
2007-07-25 23:38:43 -07:00
|
|
|
|
|
|
|
LOG(("Got %d elements.", chunks.Length()));
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::JoinChunkList(nsTArray<PRUint32>& chunks,
|
|
|
|
nsCString& chunkStr)
|
|
|
|
{
|
|
|
|
chunkStr.Truncate();
|
|
|
|
chunks.Sort();
|
|
|
|
|
|
|
|
PRUint32 i = 0;
|
|
|
|
while (i < chunks.Length()) {
|
|
|
|
if (i != 0) {
|
|
|
|
chunkStr.Append(',');
|
|
|
|
}
|
|
|
|
chunkStr.AppendInt(chunks[i]);
|
|
|
|
|
|
|
|
PRUint32 first = i;
|
|
|
|
PRUint32 last = first;
|
|
|
|
i++;
|
2008-05-02 02:55:09 -07:00
|
|
|
while (i < chunks.Length() && (chunks[i] == chunks[i - 1] + 1 || chunks[i] == chunks[i - 1])) {
|
|
|
|
last = i++;
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (last != first) {
|
|
|
|
chunkStr.Append('-');
|
2008-05-02 02:55:09 -07:00
|
|
|
chunkStr.AppendInt(chunks[last]);
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::GetChunkLists(PRUint32 tableId,
|
|
|
|
nsACString& addChunks,
|
|
|
|
nsACString& subChunks)
|
|
|
|
{
|
|
|
|
addChunks.Truncate();
|
|
|
|
subChunks.Truncate();
|
|
|
|
|
|
|
|
mozStorageStatementScoper scoper(mGetChunkListsStatement);
|
|
|
|
|
2011-03-31 10:19:31 -07:00
|
|
|
nsresult rv = mGetChunkListsStatement->BindInt32ByIndex(0, tableId);
|
2007-07-25 23:38:43 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool hasMore = false;
|
2007-07-25 23:38:43 -07:00
|
|
|
rv = mGetChunkListsStatement->ExecuteStep(&hasMore);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
if (!hasMore) {
|
|
|
|
LOG(("Getting chunks for %d, found nothing", tableId));
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
rv = mGetChunkListsStatement->GetUTF8String(0, addChunks);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = mGetChunkListsStatement->GetUTF8String(1, subChunks);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2008-10-19 20:48:25 -07:00
|
|
|
LOG(("Getting chunks for %d, got %s/%s",
|
2007-07-25 23:38:43 -07:00
|
|
|
tableId,
|
|
|
|
PromiseFlatCString(addChunks).get(),
|
|
|
|
PromiseFlatCString(subChunks).get()));
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::SetChunkLists(PRUint32 tableId,
|
|
|
|
const nsACString& addChunks,
|
|
|
|
const nsACString& subChunks)
|
|
|
|
{
|
|
|
|
mozStorageStatementScoper scoper(mSetChunkListsStatement);
|
|
|
|
|
2011-03-31 10:19:31 -07:00
|
|
|
mSetChunkListsStatement->BindUTF8StringByIndex(0, addChunks);
|
|
|
|
mSetChunkListsStatement->BindUTF8StringByIndex(1, subChunks);
|
|
|
|
mSetChunkListsStatement->BindInt32ByIndex(2, tableId);
|
2007-07-25 23:38:43 -07:00
|
|
|
nsresult rv = mSetChunkListsStatement->Execute();
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2007-12-03 20:49:22 -08:00
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::CacheChunkLists(PRUint32 tableId,
|
2011-09-28 23:19:26 -07:00
|
|
|
bool parseAdds,
|
|
|
|
bool parseSubs)
|
2007-12-03 20:49:22 -08:00
|
|
|
{
|
|
|
|
nsresult rv;
|
|
|
|
|
|
|
|
if (mHaveCachedLists && mCachedListsTable != tableId) {
|
|
|
|
rv = FlushChunkLists();
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mHaveCachedLists) {
|
|
|
|
rv = GetChunkLists(tableId, mCachedAddsStr, mCachedSubsStr);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
mHaveCachedLists = true;
|
2007-12-03 20:49:22 -08:00
|
|
|
mCachedListsTable = tableId;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (parseAdds && !mHaveCachedAddChunks) {
|
|
|
|
ParseChunkList(mCachedAddsStr, mCachedAddChunks);
|
2011-10-17 07:59:28 -07:00
|
|
|
mHaveCachedAddChunks = true;
|
2007-12-03 20:49:22 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (parseSubs && !mHaveCachedSubChunks) {
|
|
|
|
ParseChunkList(mCachedSubsStr, mCachedSubChunks);
|
2011-10-17 07:59:28 -07:00
|
|
|
mHaveCachedSubChunks = true;
|
2007-12-03 20:49:22 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::FlushChunkLists()
|
|
|
|
{
|
|
|
|
if (!mHaveCachedLists) {
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mHaveCachedAddChunks) {
|
|
|
|
JoinChunkList(mCachedAddChunks, mCachedAddsStr);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mHaveCachedSubChunks) {
|
|
|
|
JoinChunkList(mCachedSubChunks, mCachedSubsStr);
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult rv = SetChunkLists(mCachedListsTable,
|
|
|
|
mCachedAddsStr, mCachedSubsStr);
|
2008-01-29 12:57:18 -08:00
|
|
|
|
2007-12-03 20:49:22 -08:00
|
|
|
// clear out the cache before checking/returning the error here.
|
2008-01-29 12:57:18 -08:00
|
|
|
ClearCachedChunkLists();
|
2007-12-03 20:49:22 -08:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nsUrlClassifierDBServiceWorker::ClearCachedChunkLists()
|
|
|
|
{
|
2007-12-03 20:49:22 -08:00
|
|
|
mCachedAddsStr.Truncate();
|
|
|
|
mCachedSubsStr.Truncate();
|
|
|
|
mCachedListsTable = PR_UINT32_MAX;
|
2011-10-17 07:59:28 -07:00
|
|
|
mHaveCachedLists = false;
|
2007-12-03 20:49:22 -08:00
|
|
|
|
|
|
|
mCachedAddChunks.Clear();
|
2011-10-17 07:59:28 -07:00
|
|
|
mHaveCachedAddChunks = false;
|
2007-12-03 20:49:22 -08:00
|
|
|
|
|
|
|
mCachedSubChunks.Clear();
|
2011-10-17 07:59:28 -07:00
|
|
|
mHaveCachedSubChunks = false;
|
2007-12-03 20:49:22 -08:00
|
|
|
}
|
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool
|
2008-05-06 18:51:11 -07:00
|
|
|
nsUrlClassifierDBServiceWorker::InsertChunkId(nsTArray<PRUint32> &chunks,
|
|
|
|
PRUint32 chunkNum)
|
|
|
|
{
|
|
|
|
PRUint32 low = 0, high = chunks.Length();
|
|
|
|
while (high > low) {
|
|
|
|
PRUint32 mid = (high + low) >> 1;
|
|
|
|
if (chunks[mid] == chunkNum)
|
2011-10-17 07:59:28 -07:00
|
|
|
return false;
|
2008-05-06 18:51:11 -07:00
|
|
|
if (chunks[mid] < chunkNum)
|
|
|
|
low = mid + 1;
|
|
|
|
else
|
|
|
|
high = mid;
|
|
|
|
}
|
|
|
|
|
|
|
|
PRUint32 *item = chunks.InsertElementAt(low, chunkNum);
|
|
|
|
return (item != nsnull);
|
|
|
|
}
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::AddChunk(PRUint32 tableId,
|
|
|
|
PRUint32 chunkNum,
|
|
|
|
nsTArray<nsUrlClassifierEntry>& entries)
|
|
|
|
{
|
|
|
|
#if defined(PR_LOGGING)
|
|
|
|
PRIntervalTime clockStart = 0;
|
|
|
|
if (LOG_ENABLED()) {
|
|
|
|
clockStart = PR_IntervalNow();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
nsresult rv = CacheChunkLists(tableId, true, false);
|
2007-07-25 23:38:43 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2008-05-06 18:51:11 -07:00
|
|
|
|
|
|
|
if (!InsertChunkId(mCachedAddChunks, chunkNum)) {
|
|
|
|
LOG(("Ignoring duplicate add chunk %d in table %d", chunkNum, tableId));
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG(("Adding %d entries to chunk %d in table %d", entries.Length(), chunkNum, tableId));
|
2007-07-25 23:38:43 -07:00
|
|
|
|
|
|
|
nsTArray<PRUint32> entryIDs;
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
nsAutoTArray<nsUrlClassifierEntry, 5> subEntries;
|
2008-10-19 20:48:25 -07:00
|
|
|
rv = mPendingSubStore.ReadSubEntries(tableId, chunkNum, subEntries);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2008-01-29 12:57:18 -08:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
for (PRUint32 i = 0; i < entries.Length(); i++) {
|
|
|
|
nsUrlClassifierEntry& thisEntry = entries[i];
|
|
|
|
|
|
|
|
HandlePendingLookups();
|
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool writeEntry = true;
|
2008-01-29 12:57:18 -08:00
|
|
|
for (PRUint32 j = 0; j < subEntries.Length(); j++) {
|
|
|
|
if (thisEntry.SubMatch(subEntries[j])) {
|
|
|
|
subEntries.RemoveElementAt(j);
|
2008-01-28 17:58:15 -08:00
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
writeEntry = false;
|
2008-01-29 12:57:18 -08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2008-01-28 17:58:15 -08:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
HandlePendingLookups();
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
if (writeEntry) {
|
|
|
|
rv = mMainStore.WriteEntry(thisEntry);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
}
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
|
2008-10-19 20:48:25 -07:00
|
|
|
rv = mPendingSubStore.ExpireAddChunk(tableId, chunkNum);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
#if defined(PR_LOGGING)
|
|
|
|
if (LOG_ENABLED()) {
|
|
|
|
PRIntervalTime clockEnd = PR_IntervalNow();
|
2008-01-29 12:57:18 -08:00
|
|
|
LOG(("adding chunk %d took %dms\n", chunkNum,
|
|
|
|
PR_IntervalToMilliseconds(clockEnd - clockStart)));
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
2008-01-29 12:57:18 -08:00
|
|
|
nsUrlClassifierStore::Expire(PRUint32 tableId, PRUint32 chunkNum)
|
2007-07-25 23:38:43 -07:00
|
|
|
{
|
|
|
|
LOG(("Expiring chunk %d\n", chunkNum));
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
mozStorageStatementScoper expireScoper(mExpireStatement);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2011-03-31 10:19:31 -07:00
|
|
|
nsresult rv = mExpireStatement->BindInt32ByIndex(0, tableId);
|
2007-07-25 23:38:43 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2011-03-31 10:19:31 -07:00
|
|
|
rv = mExpireStatement->BindInt32ByIndex(1, chunkNum);
|
2007-07-25 23:38:43 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2008-01-12 13:32:01 -08:00
|
|
|
mWorker->HandlePendingLookups();
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
rv = mExpireStatement->Execute();
|
2007-07-25 23:38:43 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2007-12-03 20:35:25 -08:00
|
|
|
return NS_OK;
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
|
2008-01-12 13:32:01 -08:00
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::ExpireAdd(PRUint32 tableId,
|
|
|
|
PRUint32 chunkNum)
|
|
|
|
{
|
2011-10-17 07:59:28 -07:00
|
|
|
nsresult rv = CacheChunkLists(tableId, true, false);
|
2008-01-12 13:32:01 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
mCachedAddChunks.RemoveElement(chunkNum);
|
|
|
|
|
|
|
|
return mMainStore.Expire(tableId, chunkNum);
|
|
|
|
}
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::SubChunk(PRUint32 tableId,
|
|
|
|
PRUint32 chunkNum,
|
|
|
|
nsTArray<nsUrlClassifierEntry>& entries)
|
|
|
|
{
|
2011-10-17 07:59:28 -07:00
|
|
|
nsresult rv = CacheChunkLists(tableId, true, true);
|
2008-05-06 18:51:11 -07:00
|
|
|
|
|
|
|
if (!InsertChunkId(mCachedSubChunks, chunkNum)) {
|
|
|
|
LOG(("Ignoring duplicate sub chunk %d in table %d", chunkNum, tableId));
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
LOG(("Subbing %d entries in chunk %d in table %d", entries.Length(), chunkNum, tableId));
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
for (PRUint32 i = 0; i < entries.Length(); i++) {
|
2010-02-09 15:48:51 -08:00
|
|
|
nsAutoTArray<nsUrlClassifierEntry, 5> existingEntries;
|
2007-07-25 23:38:43 -07:00
|
|
|
nsUrlClassifierEntry& thisEntry = entries[i];
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
HandlePendingLookups();
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2008-10-19 20:48:25 -07:00
|
|
|
// Check if we have the add chunk associated with the sub.
|
2011-09-28 23:19:26 -07:00
|
|
|
bool haveAdds = (mCachedAddChunks.BinaryIndexOf(thisEntry.mAddChunkId) !=
|
2008-10-19 20:48:25 -07:00
|
|
|
mCachedAddChunks.NoIndex);
|
|
|
|
|
2010-02-09 15:48:51 -08:00
|
|
|
if (haveAdds) {
|
|
|
|
rv = mMainStore.ReadAddEntries(thisEntry.mKey, thisEntry.mTableId,
|
|
|
|
thisEntry.mAddChunkId, existingEntries);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2008-01-12 13:32:01 -08:00
|
|
|
}
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
for (PRUint32 j = 0; j < existingEntries.Length(); j++) {
|
|
|
|
if (existingEntries[j].SubMatch(thisEntry)) {
|
|
|
|
rv = mMainStore.DeleteEntry(existingEntries[j]);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
existingEntries.RemoveElementAt(j);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2008-01-12 13:32:01 -08:00
|
|
|
|
2008-10-19 20:48:25 -07:00
|
|
|
if (!haveAdds) {
|
2008-01-29 12:57:18 -08:00
|
|
|
// Save this entry in the pending subtraction store.
|
|
|
|
rv = mPendingSubStore.WriteEntry(thisEntry);
|
2008-01-12 13:32:01 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-12-03 20:35:25 -08:00
|
|
|
return NS_OK;
|
2007-03-22 10:30:00 -07:00
|
|
|
}
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::ExpireSub(PRUint32 tableId, PRUint32 chunkNum)
|
2007-03-22 10:30:00 -07:00
|
|
|
{
|
2011-10-17 07:59:28 -07:00
|
|
|
nsresult rv = CacheChunkLists(tableId, false, true);
|
2007-07-25 23:38:43 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-12-03 20:49:22 -08:00
|
|
|
mCachedSubChunks.RemoveElement(chunkNum);
|
2007-07-25 18:49:20 -07:00
|
|
|
|
2008-01-12 13:32:01 -08:00
|
|
|
return mPendingSubStore.Expire(tableId, chunkNum);
|
2007-07-25 18:49:20 -07:00
|
|
|
}
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
nsresult
|
2011-09-28 23:19:26 -07:00
|
|
|
nsUrlClassifierDBServiceWorker::ProcessChunk(bool* done)
|
2007-07-25 18:49:20 -07:00
|
|
|
{
|
2008-01-29 12:57:18 -08:00
|
|
|
// wait until the chunk has been read
|
|
|
|
if (mPendingStreamUpdate.Length() < static_cast<PRUint32>(mChunkLen)) {
|
2011-10-17 07:59:28 -07:00
|
|
|
*done = true;
|
2007-07-25 23:38:43 -07:00
|
|
|
return NS_OK;
|
|
|
|
}
|
2007-07-25 20:11:43 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
nsCAutoString chunk;
|
|
|
|
chunk.Assign(Substring(mPendingStreamUpdate, 0, mChunkLen));
|
|
|
|
mPendingStreamUpdate = Substring(mPendingStreamUpdate, mChunkLen);
|
|
|
|
|
|
|
|
LOG(("Handling a chunk sized %d", chunk.Length()));
|
|
|
|
|
|
|
|
nsTArray<nsUrlClassifierEntry> entries;
|
2008-01-29 12:57:18 -08:00
|
|
|
nsresult rv = GetChunkEntries(mUpdateTable, mUpdateTableId, mChunkType,
|
|
|
|
mChunkNum, mHashSize, chunk, entries);
|
2007-12-03 21:49:11 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
|
|
|
if (mChunkType == CHUNK_ADD) {
|
|
|
|
rv = AddChunk(mUpdateTableId, mChunkNum, entries);
|
|
|
|
} else {
|
|
|
|
rv = SubChunk(mUpdateTableId, mChunkNum, entries);
|
|
|
|
}
|
|
|
|
|
|
|
|
mState = STATE_LINE;
|
2011-10-17 07:59:28 -07:00
|
|
|
*done = false;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
|
|
|
return rv;
|
|
|
|
}
|
2007-07-25 18:49:20 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
nsresult
|
2011-09-28 23:19:26 -07:00
|
|
|
nsUrlClassifierDBServiceWorker::ProcessResponseLines(bool* done)
|
2007-07-25 23:38:43 -07:00
|
|
|
{
|
2007-07-25 20:11:43 -07:00
|
|
|
PRUint32 cur = 0;
|
|
|
|
PRInt32 next;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
|
|
|
nsresult rv;
|
|
|
|
// We will run to completion unless we find a chunk line
|
2011-10-17 07:59:28 -07:00
|
|
|
*done = true;
|
2007-07-25 23:38:43 -07:00
|
|
|
|
|
|
|
nsACString& updateString = mPendingStreamUpdate;
|
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
while(cur < updateString.Length() &&
|
|
|
|
(next = updateString.FindChar('\n', cur)) != kNotFound) {
|
2007-07-25 23:38:43 -07:00
|
|
|
const nsCSubstring& line = Substring(updateString, cur, next - cur);
|
|
|
|
cur = next + 1;
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
LOG(("Processing %s\n", PromiseFlatCString(line).get()));
|
2007-07-25 18:49:20 -07:00
|
|
|
|
2008-02-27 00:51:02 -08:00
|
|
|
if (mHMAC && mServerMAC.IsEmpty()) {
|
|
|
|
// If we did not receive a server MAC during BeginStream(), we
|
|
|
|
// require the first line of the update to be either a MAC or
|
|
|
|
// a request to rekey.
|
|
|
|
if (StringBeginsWith(line, NS_LITERAL_CSTRING("m:"))) {
|
|
|
|
mServerMAC = Substring(line, 2);
|
|
|
|
nsUrlClassifierUtils::UnUrlsafeBase64(mServerMAC);
|
|
|
|
|
|
|
|
// The remainder of the pending update needs to be digested.
|
|
|
|
const nsCSubstring &toDigest = Substring(updateString, cur);
|
|
|
|
rv = mHMAC->Update(reinterpret_cast<const PRUint8*>(toDigest.BeginReading()),
|
|
|
|
toDigest.Length());
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
} else if (line.EqualsLiteral("e:pleaserekey")) {
|
|
|
|
mUpdateObserver->RekeyRequested();
|
|
|
|
} else {
|
|
|
|
LOG(("No MAC specified!"));
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
} else if (StringBeginsWith(line, NS_LITERAL_CSTRING("n:"))) {
|
2007-07-25 23:38:43 -07:00
|
|
|
if (PR_sscanf(PromiseFlatCString(line).get(), "n:%d",
|
|
|
|
&mUpdateWait) != 1) {
|
|
|
|
LOG(("Error parsing n: field: %s", PromiseFlatCString(line).get()));
|
|
|
|
mUpdateWait = 0;
|
|
|
|
}
|
2008-02-29 16:46:43 -08:00
|
|
|
} else if (line.EqualsLiteral("r:pleasereset")) {
|
2011-10-17 07:59:28 -07:00
|
|
|
mResetRequested = true;
|
2008-02-27 00:51:02 -08:00
|
|
|
} else if (line.EqualsLiteral("e:pleaserekey")) {
|
|
|
|
mUpdateObserver->RekeyRequested();
|
2007-07-25 23:38:43 -07:00
|
|
|
} else if (StringBeginsWith(line, NS_LITERAL_CSTRING("i:"))) {
|
2008-02-27 00:51:02 -08:00
|
|
|
mUpdateTable.Assign(Substring(line, 2));
|
2007-07-25 23:38:43 -07:00
|
|
|
GetTableId(mUpdateTable, &mUpdateTableId);
|
|
|
|
LOG(("update table: '%s' (%d)", mUpdateTable.get(), mUpdateTableId));
|
2008-01-12 14:22:03 -08:00
|
|
|
} else if (StringBeginsWith(line, NS_LITERAL_CSTRING("u:"))) {
|
|
|
|
if (!mPrimaryStream) {
|
|
|
|
LOG(("Forwarded update tried to add its own forwarded update."));
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
const nsCSubstring& data = Substring(line, 2);
|
2008-02-27 00:51:02 -08:00
|
|
|
if (mHMAC) {
|
|
|
|
// We're expecting MACs alongside any url forwards.
|
|
|
|
nsCSubstring::const_iterator begin, end, sepBegin, sepEnd;
|
|
|
|
data.BeginReading(begin);
|
|
|
|
sepBegin = begin;
|
|
|
|
|
|
|
|
data.EndReading(end);
|
|
|
|
sepEnd = end;
|
|
|
|
|
|
|
|
if (!RFindInReadable(NS_LITERAL_CSTRING(","), sepBegin, sepEnd)) {
|
|
|
|
NS_WARNING("No MAC specified for a redirect in a request that expects a MAC");
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsCString serverMAC(Substring(sepEnd, end));
|
|
|
|
nsUrlClassifierUtils::UnUrlsafeBase64(serverMAC);
|
|
|
|
mUpdateObserver->UpdateUrlRequested(Substring(begin, sepBegin),
|
|
|
|
mUpdateTable,
|
|
|
|
serverMAC);
|
2008-01-12 14:22:03 -08:00
|
|
|
} else {
|
2008-02-27 00:51:02 -08:00
|
|
|
// We didn't ask for a MAC, none should have been specified.
|
|
|
|
mUpdateObserver->UpdateUrlRequested(data, mUpdateTable,
|
|
|
|
NS_LITERAL_CSTRING(""));
|
2008-01-12 14:22:03 -08:00
|
|
|
}
|
2007-07-25 23:38:43 -07:00
|
|
|
} else if (StringBeginsWith(line, NS_LITERAL_CSTRING("a:")) ||
|
|
|
|
StringBeginsWith(line, NS_LITERAL_CSTRING("s:"))) {
|
|
|
|
mState = STATE_CHUNK;
|
|
|
|
char command;
|
|
|
|
if (PR_sscanf(PromiseFlatCString(line).get(),
|
2008-01-29 12:57:18 -08:00
|
|
|
"%c:%d:%d:%d", &command, &mChunkNum, &mHashSize, &mChunkLen) != 4) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mChunkLen > MAX_CHUNK_SIZE) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(mHashSize == PARTIAL_LENGTH || mHashSize == COMPLETE_LENGTH)) {
|
|
|
|
NS_WARNING("Invalid hash size specified in update.");
|
2007-07-25 23:38:43 -07:00
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
2008-01-29 12:57:18 -08:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
mChunkType = (command == 'a') ? CHUNK_ADD : CHUNK_SUB;
|
|
|
|
|
|
|
|
// Done parsing lines, move to chunk state now
|
2011-10-17 07:59:28 -07:00
|
|
|
*done = false;
|
2007-07-25 23:38:43 -07:00
|
|
|
break;
|
|
|
|
} else if (StringBeginsWith(line, NS_LITERAL_CSTRING("ad:"))) {
|
2008-02-15 14:47:16 -08:00
|
|
|
const nsCSubstring &list = Substring(line, 3);
|
|
|
|
nsACString::const_iterator begin, end;
|
|
|
|
list.BeginReading(begin);
|
|
|
|
list.EndReading(end);
|
|
|
|
while (begin != end) {
|
|
|
|
PRUint32 first, last;
|
|
|
|
if (ParseChunkRange(begin, end, &first, &last)) {
|
|
|
|
for (PRUint32 num = first; num <= last; num++) {
|
|
|
|
rv = ExpireAdd(mUpdateTableId, num);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
2007-12-04 16:16:41 -08:00
|
|
|
}
|
2007-07-25 23:38:43 -07:00
|
|
|
} else if (StringBeginsWith(line, NS_LITERAL_CSTRING("sd:"))) {
|
2008-02-15 14:47:16 -08:00
|
|
|
const nsCSubstring &list = Substring(line, 3);
|
|
|
|
nsACString::const_iterator begin, end;
|
|
|
|
list.BeginReading(begin);
|
|
|
|
list.EndReading(end);
|
|
|
|
while (begin != end) {
|
|
|
|
PRUint32 first, last;
|
|
|
|
if (ParseChunkRange(begin, end, &first, &last)) {
|
|
|
|
for (PRUint32 num = first; num <= last; num++) {
|
|
|
|
rv = ExpireSub(mUpdateTableId, num);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
2007-12-04 16:16:41 -08:00
|
|
|
}
|
2007-03-22 10:30:00 -07:00
|
|
|
} else {
|
2007-07-25 23:38:43 -07:00
|
|
|
LOG(("ignoring unknown line: '%s'", PromiseFlatCString(line).get()));
|
2007-03-22 10:30:00 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
mPendingStreamUpdate = Substring(updateString, cur);
|
2007-03-22 10:30:00 -07:00
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
void
|
2008-01-12 14:22:03 -08:00
|
|
|
nsUrlClassifierDBServiceWorker::ResetStream()
|
2007-07-25 23:38:43 -07:00
|
|
|
{
|
|
|
|
mState = STATE_LINE;
|
|
|
|
mChunkNum = 0;
|
2008-01-29 12:57:18 -08:00
|
|
|
mHashSize = 0;
|
2007-07-25 23:38:43 -07:00
|
|
|
mChunkLen = 0;
|
2011-10-17 07:59:28 -07:00
|
|
|
mInStream = false;
|
|
|
|
mPrimaryStream = false;
|
2007-07-25 23:38:43 -07:00
|
|
|
mUpdateTable.Truncate();
|
|
|
|
mPendingStreamUpdate.Truncate();
|
2008-02-27 00:51:02 -08:00
|
|
|
mServerMAC.Truncate();
|
|
|
|
mHMAC = nsnull;
|
2007-07-25 23:38:43 -07:00
|
|
|
}
|
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
void
|
|
|
|
nsUrlClassifierDBServiceWorker::ResetUpdate()
|
|
|
|
{
|
|
|
|
mUpdateWait = 0;
|
|
|
|
mUpdateStatus = NS_OK;
|
|
|
|
mUpdateObserver = nsnull;
|
2008-02-27 00:51:02 -08:00
|
|
|
mUpdateClientKey.Truncate();
|
2011-10-17 07:59:28 -07:00
|
|
|
mResetRequested = false;
|
2008-04-15 15:39:44 -07:00
|
|
|
mUpdateTables.Clear();
|
2008-01-12 14:22:03 -08:00
|
|
|
}
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsUrlClassifierDBServiceWorker::SetHashCompleter(const nsACString &tableName,
|
|
|
|
nsIUrlClassifierHashCompleter *completer)
|
|
|
|
{
|
|
|
|
return NS_ERROR_NOT_IMPLEMENTED;
|
|
|
|
}
|
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
NS_IMETHODIMP
|
2008-02-27 00:51:02 -08:00
|
|
|
nsUrlClassifierDBServiceWorker::BeginUpdate(nsIUrlClassifierUpdateObserver *observer,
|
2008-04-15 15:39:44 -07:00
|
|
|
const nsACString &tables,
|
2008-02-27 00:51:02 -08:00
|
|
|
const nsACString &clientKey)
|
2008-01-12 14:22:03 -08:00
|
|
|
{
|
2008-01-16 11:20:45 -08:00
|
|
|
if (gShuttingDownThread)
|
|
|
|
return NS_ERROR_NOT_INITIALIZED;
|
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
NS_ENSURE_STATE(!mUpdateObserver);
|
|
|
|
|
|
|
|
nsresult rv = OpenDb();
|
|
|
|
if (NS_FAILED(rv)) {
|
2011-07-06 11:08:52 -07:00
|
|
|
NS_ERROR("Unable to open database");
|
2008-01-12 14:22:03 -08:00
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool transaction;
|
2008-01-12 14:22:03 -08:00
|
|
|
rv = mConnection->GetTransactionInProgress(&transaction);
|
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
mUpdateStatus = rv;
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (transaction) {
|
|
|
|
NS_WARNING("Transaction already in progress in nsUrlClassifierDBServiceWorker::BeginUpdate. Cancelling update.");
|
|
|
|
mUpdateStatus = NS_ERROR_FAILURE;
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2008-05-07 13:33:45 -07:00
|
|
|
rv = SetupUpdate();
|
2008-01-12 14:22:03 -08:00
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
mUpdateStatus = rv;
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
mUpdateObserver = observer;
|
|
|
|
|
2008-02-27 00:51:02 -08:00
|
|
|
if (!clientKey.IsEmpty()) {
|
|
|
|
rv = nsUrlClassifierUtils::DecodeClientKey(clientKey, mUpdateClientKey);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
}
|
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
// The first stream in an update is the only stream that may request
|
|
|
|
// forwarded updates.
|
2011-10-17 07:59:28 -07:00
|
|
|
mPrimaryStream = true;
|
2008-01-12 14:22:03 -08:00
|
|
|
|
2008-04-15 15:39:44 -07:00
|
|
|
SplitTables(tables, mUpdateTables);
|
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
2008-02-27 00:51:02 -08:00
|
|
|
nsUrlClassifierDBServiceWorker::BeginStream(const nsACString &table,
|
|
|
|
const nsACString &serverMAC)
|
2008-01-12 14:22:03 -08:00
|
|
|
{
|
2008-01-16 11:20:45 -08:00
|
|
|
if (gShuttingDownThread)
|
|
|
|
return NS_ERROR_NOT_INITIALIZED;
|
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
NS_ENSURE_STATE(mUpdateObserver);
|
|
|
|
NS_ENSURE_STATE(!mInStream);
|
|
|
|
|
2008-05-07 13:33:45 -07:00
|
|
|
// We may have committed the update in FinishStream, if so set it up
|
|
|
|
// again here.
|
|
|
|
nsresult rv = SetupUpdate();
|
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
mUpdateStatus = rv;
|
|
|
|
return rv;
|
|
|
|
}
|
2008-02-27 00:51:02 -08:00
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
mInStream = true;
|
2008-05-07 06:18:38 -07:00
|
|
|
|
2008-02-27 00:51:02 -08:00
|
|
|
// If we're expecting a MAC, create the nsICryptoHMAC component now.
|
|
|
|
if (!mUpdateClientKey.IsEmpty()) {
|
2008-03-18 12:45:40 -07:00
|
|
|
nsCOMPtr<nsIKeyObjectFactory> keyObjectFactory(do_GetService(
|
|
|
|
"@mozilla.org/security/keyobjectfactory;1", &rv));
|
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
NS_WARNING("Failed to get nsIKeyObjectFactory service");
|
|
|
|
mUpdateStatus = rv;
|
|
|
|
return mUpdateStatus;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsCOMPtr<nsIKeyObject> keyObject;
|
|
|
|
rv = keyObjectFactory->KeyFromString(nsIKeyObject::HMAC, mUpdateClientKey,
|
|
|
|
getter_AddRefs(keyObject));
|
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
NS_WARNING("Failed to create key object, maybe not FIPS compliant?");
|
|
|
|
mUpdateStatus = rv;
|
|
|
|
return mUpdateStatus;
|
|
|
|
}
|
|
|
|
|
2008-02-27 00:51:02 -08:00
|
|
|
mHMAC = do_CreateInstance(NS_CRYPTO_HMAC_CONTRACTID, &rv);
|
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
NS_WARNING("Failed to create nsICryptoHMAC instance");
|
|
|
|
mUpdateStatus = rv;
|
|
|
|
return mUpdateStatus;
|
|
|
|
}
|
|
|
|
|
2008-03-18 12:45:40 -07:00
|
|
|
rv = mHMAC->Init(nsICryptoHMAC::SHA1, keyObject);
|
2008-02-27 00:51:02 -08:00
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
NS_WARNING("Failed to initialize nsICryptoHMAC instance");
|
|
|
|
mUpdateStatus = rv;
|
|
|
|
return mUpdateStatus;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mServerMAC = serverMAC;
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
if (!table.IsEmpty()) {
|
|
|
|
mUpdateTable = table;
|
|
|
|
GetTableId(mUpdateTable, &mUpdateTableId);
|
|
|
|
LOG(("update table: '%s' (%d)", mUpdateTable.get(), mUpdateTableId));
|
|
|
|
}
|
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
/**
|
|
|
|
* Updating the database:
|
|
|
|
*
|
2010-05-13 05:19:50 -07:00
|
|
|
* The Update() method takes a series of chunks separated with control data,
|
2007-07-25 23:38:43 -07:00
|
|
|
* as described in
|
|
|
|
* http://code.google.com/p/google-safe-browsing/wiki/Protocolv2Spec
|
|
|
|
*
|
|
|
|
* It will iterate through the control data until it reaches a chunk. By
|
|
|
|
* the time it reaches a chunk, it should have received
|
|
|
|
* a) the table to which this chunk applies
|
|
|
|
* b) the type of chunk (add, delete, expire add, expire delete).
|
|
|
|
* c) the chunk ID
|
|
|
|
* d) the length of the chunk.
|
|
|
|
*
|
|
|
|
* For add and subtract chunks, it needs to read the chunk data (expires
|
|
|
|
* don't have any data). Chunk data is a list of URI fragments whose
|
|
|
|
* encoding depends on the type of table (which is indicated by the end
|
|
|
|
* of the table name):
|
|
|
|
* a) tables ending with -exp are a zlib-compressed list of URI fragments
|
|
|
|
* separated by newlines.
|
|
|
|
* b) tables ending with -sha128 have the form
|
|
|
|
* [domain][N][frag0]...[fragN]
|
|
|
|
* 16 1 16 16
|
|
|
|
* If N is 0, the domain is reused as a fragment.
|
|
|
|
* c) any other tables are assumed to be a plaintext list of URI fragments
|
|
|
|
* separated by newlines.
|
|
|
|
*
|
|
|
|
* Update() can be fed partial data; It will accumulate data until there is
|
|
|
|
* enough to act on. Finish() should be called when there will be no more
|
|
|
|
* data.
|
|
|
|
*/
|
2007-03-22 10:30:00 -07:00
|
|
|
NS_IMETHODIMP
|
2008-01-12 14:22:03 -08:00
|
|
|
nsUrlClassifierDBServiceWorker::UpdateStream(const nsACString& chunk)
|
2007-03-22 10:30:00 -07:00
|
|
|
{
|
2007-07-25 23:38:43 -07:00
|
|
|
if (gShuttingDownThread)
|
|
|
|
return NS_ERROR_NOT_INITIALIZED;
|
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
NS_ENSURE_STATE(mInStream);
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
HandlePendingLookups();
|
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
LOG(("Update from Stream."));
|
|
|
|
nsresult rv = OpenDb();
|
|
|
|
if (NS_FAILED(rv)) {
|
2011-07-06 11:08:52 -07:00
|
|
|
NS_ERROR("Unable to open database");
|
2007-03-22 10:30:00 -07:00
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
// if something has gone wrong during this update, just throw it away
|
|
|
|
if (NS_FAILED(mUpdateStatus)) {
|
|
|
|
return mUpdateStatus;
|
2007-03-22 10:30:00 -07:00
|
|
|
}
|
|
|
|
|
2008-02-27 00:51:02 -08:00
|
|
|
if (mHMAC && !mServerMAC.IsEmpty()) {
|
|
|
|
rv = mHMAC->Update(reinterpret_cast<const PRUint8*>(chunk.BeginReading()),
|
|
|
|
chunk.Length());
|
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
mUpdateStatus = rv;
|
|
|
|
return mUpdateStatus;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
LOG(("Got %s\n", PromiseFlatCString(chunk).get()));
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
mPendingStreamUpdate.Append(chunk);
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool done = false;
|
2007-07-25 23:38:43 -07:00
|
|
|
while (!done) {
|
|
|
|
if (mState == STATE_CHUNK) {
|
|
|
|
rv = ProcessChunk(&done);
|
2007-03-22 10:30:00 -07:00
|
|
|
} else {
|
2007-07-25 23:38:43 -07:00
|
|
|
rv = ProcessResponseLines(&done);
|
|
|
|
}
|
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
mUpdateStatus = rv;
|
|
|
|
return rv;
|
2007-03-22 10:30:00 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
2008-01-12 14:22:03 -08:00
|
|
|
nsUrlClassifierDBServiceWorker::FinishStream()
|
2007-03-22 10:30:00 -07:00
|
|
|
{
|
2008-01-16 11:20:45 -08:00
|
|
|
if (gShuttingDownThread)
|
|
|
|
return NS_ERROR_NOT_INITIALIZED;
|
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
NS_ENSURE_STATE(mInStream);
|
|
|
|
NS_ENSURE_STATE(mUpdateObserver);
|
|
|
|
|
2008-05-07 13:33:45 -07:00
|
|
|
PRInt32 nextStreamDelay = 0;
|
|
|
|
|
2008-02-27 00:51:02 -08:00
|
|
|
if (NS_SUCCEEDED(mUpdateStatus) && mHMAC) {
|
|
|
|
nsCAutoString clientMAC;
|
2011-10-17 07:59:28 -07:00
|
|
|
mHMAC->Finish(true, clientMAC);
|
2008-02-27 00:51:02 -08:00
|
|
|
|
|
|
|
if (clientMAC != mServerMAC) {
|
|
|
|
NS_WARNING("Invalid update MAC!");
|
|
|
|
LOG(("Invalid update MAC: expected %s, got %s",
|
|
|
|
mServerMAC.get(), clientMAC.get()));
|
|
|
|
mUpdateStatus = NS_ERROR_FAILURE;
|
|
|
|
}
|
2008-05-07 13:33:45 -07:00
|
|
|
PRIntervalTime updateTime = PR_IntervalNow() - mUpdateStartTime;
|
|
|
|
if (PR_IntervalToSeconds(updateTime) >=
|
|
|
|
static_cast<PRUint32>(gWorkingTimeThreshold)) {
|
|
|
|
// We've spent long enough working that we should commit what we
|
|
|
|
// have and hold off for a bit.
|
2011-12-02 01:45:56 -08:00
|
|
|
nsresult rv = ApplyUpdate();
|
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
if (rv == NS_ERROR_FILE_CORRUPTED) {
|
|
|
|
ResetDatabase();
|
|
|
|
}
|
|
|
|
return rv;
|
|
|
|
}
|
2008-05-07 13:33:45 -07:00
|
|
|
nextStreamDelay = gDelayTime * 1000;
|
|
|
|
}
|
2008-02-27 00:51:02 -08:00
|
|
|
}
|
|
|
|
|
2008-05-07 13:33:45 -07:00
|
|
|
mUpdateObserver->StreamFinished(mUpdateStatus,
|
|
|
|
static_cast<PRUint32>(nextStreamDelay));
|
2008-01-12 14:22:03 -08:00
|
|
|
|
|
|
|
ResetStream();
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2011-09-08 13:15:37 -07:00
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::SetCacheSize(
|
|
|
|
mozIStorageConnection * aConnection, PRInt32 aCacheSize)
|
|
|
|
{
|
|
|
|
mozStorageStatementScoper scoper(mGetPageSizeStatement);
|
2011-09-28 23:19:26 -07:00
|
|
|
bool hasResult;
|
2011-09-08 13:15:37 -07:00
|
|
|
nsresult rv = mGetPageSizeStatement->ExecuteStep(&hasResult);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
NS_ASSERTION(hasResult, "Should always be able to get page size from sqlite");
|
|
|
|
PRUint32 pageSize = mGetPageSizeStatement->AsInt32(0);
|
|
|
|
PRUint32 cachePages = aCacheSize / pageSize;
|
|
|
|
nsCAutoString cacheSizePragma("PRAGMA cache_size=");
|
|
|
|
cacheSizePragma.AppendInt(cachePages);
|
|
|
|
rv = aConnection->ExecuteSimpleSQL(cacheSizePragma);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2008-05-07 13:33:45 -07:00
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::SetupUpdate()
|
|
|
|
{
|
|
|
|
LOG(("nsUrlClassifierDBServiceWorker::SetupUpdate"));
|
2011-09-28 23:19:26 -07:00
|
|
|
bool inProgress;
|
2008-05-07 13:33:45 -07:00
|
|
|
nsresult rv = mConnection->GetTransactionInProgress(&inProgress);
|
|
|
|
if (inProgress) {
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
mUpdateStartTime = PR_IntervalNow();
|
|
|
|
|
|
|
|
rv = mConnection->BeginTransaction();
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
if (gUpdateCacheSize > 0) {
|
2011-09-08 13:15:37 -07:00
|
|
|
rv = SetCacheSize(mConnection, gUpdateCacheSize);
|
2010-07-01 10:57:13 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2011-09-08 13:15:37 -07:00
|
|
|
if (gUpdateCacheSize != gLookupCacheSize) {
|
2011-10-17 07:59:28 -07:00
|
|
|
mGrewCache = true;
|
2011-09-08 13:15:37 -07:00
|
|
|
}
|
2008-05-07 13:33:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::ApplyUpdate()
|
|
|
|
{
|
|
|
|
LOG(("nsUrlClassifierDBServiceWorker::ApplyUpdate"));
|
|
|
|
|
2010-11-19 06:18:08 -08:00
|
|
|
if (mConnection) {
|
|
|
|
if (NS_FAILED(mUpdateStatus)) {
|
|
|
|
mConnection->RollbackTransaction();
|
|
|
|
} else {
|
|
|
|
mUpdateStatus = FlushChunkLists();
|
|
|
|
if (NS_SUCCEEDED(mUpdateStatus)) {
|
|
|
|
mUpdateStatus = mConnection->CommitTransaction();
|
|
|
|
}
|
2008-05-07 13:33:45 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-10-19 20:07:46 -07:00
|
|
|
if (NS_SUCCEEDED(mUpdateStatus)) {
|
2011-09-08 13:15:18 -07:00
|
|
|
// Reconstruct the prefix tree from the DB
|
2011-09-08 13:16:59 -07:00
|
|
|
nsresult rv = ConstructPrefixSet();
|
2011-09-08 13:15:18 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2008-10-19 20:07:46 -07:00
|
|
|
}
|
|
|
|
|
2008-05-07 13:33:45 -07:00
|
|
|
if (mGrewCache) {
|
|
|
|
// During the update we increased the page cache to bigger than we
|
|
|
|
// want to keep around. At the moment, the only reliable way to make
|
|
|
|
// sure that the page cache is freed is to reopen the connection.
|
2011-09-08 13:15:27 -07:00
|
|
|
LOG(("GrewCache true, reopening DB"));
|
2011-10-17 07:59:28 -07:00
|
|
|
mGrewCache = false;
|
2008-05-07 13:33:45 -07:00
|
|
|
CloseDb();
|
|
|
|
OpenDb();
|
|
|
|
}
|
|
|
|
|
|
|
|
mUpdateStartTime = 0;
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsUrlClassifierDBServiceWorker::FinishUpdate()
|
|
|
|
{
|
2008-05-07 13:33:45 -07:00
|
|
|
LOG(("nsUrlClassifierDBServiceWorker::FinishUpdate()"));
|
2008-01-16 11:20:45 -08:00
|
|
|
if (gShuttingDownThread)
|
|
|
|
return NS_ERROR_NOT_INITIALIZED;
|
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
NS_ENSURE_STATE(!mInStream);
|
|
|
|
NS_ENSURE_STATE(mUpdateObserver);
|
|
|
|
|
2008-08-14 22:46:41 -07:00
|
|
|
// We need to get the error code before ApplyUpdate, because it might
|
|
|
|
// close/open the connection.
|
|
|
|
PRInt32 errcode = SQLITE_OK;
|
2010-11-19 06:18:08 -08:00
|
|
|
if (mConnection)
|
|
|
|
mConnection->GetLastError(&errcode);
|
2008-08-14 22:46:41 -07:00
|
|
|
|
2011-12-02 01:45:56 -08:00
|
|
|
nsresult rv = ApplyUpdate();
|
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
if (rv == NS_ERROR_FILE_CORRUPTED) {
|
|
|
|
ResetDatabase();
|
|
|
|
}
|
|
|
|
return rv;
|
|
|
|
}
|
2007-12-03 20:35:25 -08:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
if (NS_SUCCEEDED(mUpdateStatus)) {
|
2008-01-12 14:22:03 -08:00
|
|
|
mUpdateObserver->UpdateSuccess(mUpdateWait);
|
2007-07-25 20:11:43 -07:00
|
|
|
} else {
|
2008-01-12 14:22:03 -08:00
|
|
|
mUpdateObserver->UpdateError(mUpdateStatus);
|
2007-07-25 20:11:43 -07:00
|
|
|
}
|
2007-07-25 18:49:20 -07:00
|
|
|
|
2008-08-14 22:46:41 -07:00
|
|
|
// It's important that we only reset the database on an update
|
|
|
|
// command if the update was successful, otherwise unauthenticated
|
|
|
|
// updates could cause a database reset.
|
2011-09-28 23:19:26 -07:00
|
|
|
bool resetDB = (NS_SUCCEEDED(mUpdateStatus) && mResetRequested) ||
|
2008-08-14 22:46:41 -07:00
|
|
|
errcode == SQLITE_CORRUPT;
|
|
|
|
|
|
|
|
if (!resetDB) {
|
2008-04-15 15:39:44 -07:00
|
|
|
if (NS_SUCCEEDED(mUpdateStatus)) {
|
|
|
|
PRInt64 now = (PR_Now() / PR_USEC_PER_SEC);
|
|
|
|
for (PRUint32 i = 0; i < mUpdateTables.Length(); i++) {
|
|
|
|
LOG(("Successfully updated %s", mUpdateTables[i].get()));
|
|
|
|
mTableFreshness.Put(mUpdateTables[i], now);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (PRUint32 i = 0; i < mUpdateTables.Length(); i++) {
|
|
|
|
LOG(("Failed updating %s", mUpdateTables[i].get()));
|
|
|
|
mTableFreshness.Remove(mUpdateTables[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
ResetUpdate();
|
|
|
|
|
2008-08-14 22:46:41 -07:00
|
|
|
if (resetDB) {
|
2008-02-29 16:46:43 -08:00
|
|
|
ResetDatabase();
|
|
|
|
}
|
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2008-01-12 13:32:01 -08:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsUrlClassifierDBServiceWorker::ResetDatabase()
|
|
|
|
{
|
2008-02-29 16:46:43 -08:00
|
|
|
LOG(("nsUrlClassifierDBServiceWorker::ResetDatabase [%p]", this));
|
2008-01-29 12:57:18 -08:00
|
|
|
ClearCachedChunkLists();
|
|
|
|
|
2008-04-15 15:39:44 -07:00
|
|
|
mTableFreshness.Clear();
|
|
|
|
|
2008-01-12 13:32:01 -08:00
|
|
|
nsresult rv = CloseDb();
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-09-08 13:15:27 -07:00
|
|
|
rv = mPrefixSet->SetPrefixes(nsnull, 0);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
mDBFile->Remove(false);
|
|
|
|
mPSFile->Remove(false);
|
2008-01-12 13:32:01 -08:00
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
NS_IMETHODIMP
|
2008-01-12 14:22:03 -08:00
|
|
|
nsUrlClassifierDBServiceWorker::CancelUpdate()
|
2007-03-22 10:30:00 -07:00
|
|
|
{
|
2008-01-12 14:22:03 -08:00
|
|
|
LOG(("CancelUpdate"));
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2008-01-14 13:13:43 -08:00
|
|
|
if (mUpdateObserver) {
|
|
|
|
mUpdateStatus = NS_BINDING_ABORTED;
|
2008-01-12 14:22:03 -08:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
ClearCachedChunkLists();
|
2008-01-14 13:13:43 -08:00
|
|
|
mConnection->RollbackTransaction();
|
|
|
|
mUpdateObserver->UpdateError(mUpdateStatus);
|
2008-01-12 14:22:03 -08:00
|
|
|
|
2008-04-15 15:39:44 -07:00
|
|
|
for (PRUint32 i = 0; i < mUpdateTables.Length(); i++) {
|
|
|
|
LOG(("Failed updating %s", mUpdateTables[i].get()));
|
|
|
|
mTableFreshness.Remove(mUpdateTables[i]);
|
|
|
|
}
|
|
|
|
|
2008-01-14 13:13:43 -08:00
|
|
|
ResetStream();
|
|
|
|
ResetUpdate();
|
|
|
|
}
|
2007-03-22 10:30:00 -07:00
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allows the main thread to delete the connection which may be in
|
|
|
|
// a background thread.
|
|
|
|
// XXX This could be turned into a single shutdown event so the logic
|
|
|
|
// is simpler in nsUrlClassifierDBService::Shutdown.
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsUrlClassifierDBServiceWorker::CloseDb()
|
|
|
|
{
|
2007-07-25 23:38:43 -07:00
|
|
|
if (mConnection) {
|
2008-01-12 13:32:01 -08:00
|
|
|
mMainStore.Close();
|
|
|
|
mPendingSubStore.Close();
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
mGetChunkListsStatement = nsnull;
|
|
|
|
mSetChunkListsStatement = nsnull;
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
mGetTablesStatement = nsnull;
|
|
|
|
mGetTableIdStatement = nsnull;
|
|
|
|
mGetTableNameStatement = nsnull;
|
|
|
|
mInsertTableIdStatement = nsnull;
|
2010-07-01 10:57:13 -07:00
|
|
|
mGetPageSizeStatement = nsnull;
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
mConnection = nsnull;
|
|
|
|
LOG(("urlclassifier db closed\n"));
|
2007-07-25 20:11:43 -07:00
|
|
|
}
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
mCryptoHash = nsnull;
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
return NS_OK;
|
2007-07-25 20:11:43 -07:00
|
|
|
}
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2008-01-29 18:26:44 -08:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsUrlClassifierDBServiceWorker::CacheCompletions(nsTArray<nsUrlClassifierLookupResult> *results)
|
|
|
|
{
|
|
|
|
LOG(("nsUrlClassifierDBServiceWorker::CacheCompletions [%p]", this));
|
|
|
|
|
|
|
|
nsAutoPtr<nsTArray<nsUrlClassifierLookupResult> > resultsPtr(results);
|
|
|
|
|
|
|
|
// Start a new transaction. If a transaction is open for an update
|
|
|
|
// this will be a noop, and this cache will be included in the
|
|
|
|
// update's transaction.
|
2011-10-17 07:59:28 -07:00
|
|
|
mozStorageTransaction trans(mConnection, true);
|
2008-01-29 18:26:44 -08:00
|
|
|
|
|
|
|
for (PRUint32 i = 0; i < results->Length(); i++) {
|
|
|
|
nsUrlClassifierLookupResult& result = results->ElementAt(i);
|
|
|
|
// Failing to update here shouldn't be fatal (and might be common,
|
|
|
|
// if we're updating entries that were removed since they were
|
|
|
|
// returned after a lookup).
|
|
|
|
mMainStore.UpdateEntry(result.mEntry);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2007-07-25 20:11:43 -07:00
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::OpenDb()
|
|
|
|
{
|
|
|
|
// Connection already open, don't do anything.
|
2011-09-08 13:15:27 -07:00
|
|
|
if (mConnection) {
|
2007-07-25 20:11:43 -07:00
|
|
|
return NS_OK;
|
2011-09-08 13:15:27 -07:00
|
|
|
}
|
2007-07-25 18:49:20 -07:00
|
|
|
|
2007-07-25 20:11:43 -07:00
|
|
|
LOG(("Opening db\n"));
|
2007-07-25 18:49:20 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
nsresult rv;
|
2007-07-25 20:11:43 -07:00
|
|
|
// open the connection
|
|
|
|
nsCOMPtr<mozIStorageService> storageService =
|
|
|
|
do_GetService(MOZ_STORAGE_SERVICE_CONTRACTID, &rv);
|
2007-07-25 18:49:20 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool exists;
|
2007-12-04 19:22:39 -08:00
|
|
|
rv = mDBFile->Exists(&exists);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2011-09-28 23:19:26 -07:00
|
|
|
bool newDB = !exists;
|
2007-12-04 19:22:39 -08:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
nsCOMPtr<mozIStorageConnection> connection;
|
|
|
|
rv = storageService->OpenDatabase(mDBFile, getter_AddRefs(connection));
|
2008-03-24 15:14:38 -07:00
|
|
|
if (rv == NS_ERROR_FILE_CORRUPTED) {
|
2007-07-25 20:11:43 -07:00
|
|
|
// delete the db and try opening again
|
2011-10-17 07:59:28 -07:00
|
|
|
rv = mDBFile->Remove(false);
|
2007-07-25 20:11:43 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-12-04 19:22:39 -08:00
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
newDB = true;
|
2007-12-04 19:22:39 -08:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
rv = storageService->OpenDatabase(mDBFile, getter_AddRefs(connection));
|
2007-07-25 20:11:43 -07:00
|
|
|
}
|
2008-03-24 15:14:38 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-07-25 18:49:20 -07:00
|
|
|
|
2007-12-04 19:22:39 -08:00
|
|
|
if (!newDB) {
|
|
|
|
PRInt32 databaseVersion;
|
|
|
|
rv = connection->GetSchemaVersion(&databaseVersion);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
if (databaseVersion != IMPLEMENTATION_VERSION) {
|
|
|
|
LOG(("Incompatible database, removing."));
|
|
|
|
|
|
|
|
rv = connection->Close();
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
rv = mDBFile->Remove(false);
|
2007-12-04 19:22:39 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
newDB = true;
|
2007-12-04 19:22:39 -08:00
|
|
|
|
|
|
|
rv = storageService->OpenDatabase(mDBFile, getter_AddRefs(connection));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-01 18:35:46 -07:00
|
|
|
connection->SetGrowthIncrement(5 * 1024 * 1024, EmptyCString());
|
2008-05-02 02:55:09 -07:00
|
|
|
rv = connection->ExecuteSimpleSQL(NS_LITERAL_CSTRING("PRAGMA synchronous=OFF"));
|
2007-07-25 18:49:20 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-09-08 13:15:37 -07:00
|
|
|
rv = connection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("PRAGMA page_size"),
|
|
|
|
getter_AddRefs(mGetPageSizeStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = SetCacheSize(connection, gLookupCacheSize);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2008-05-02 02:55:09 -07:00
|
|
|
if (newDB) {
|
|
|
|
rv = connection->SetSchemaVersion(IMPLEMENTATION_VERSION);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
}
|
2007-07-25 18:49:20 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
// Create the table
|
|
|
|
rv = MaybeCreateTables(connection);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-07-25 20:11:43 -07:00
|
|
|
|
2008-01-12 13:32:01 -08:00
|
|
|
rv = mMainStore.Init(this, connection,
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_LITERAL_CSTRING("moz_classifier"));
|
2007-07-25 23:38:43 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2008-01-12 13:32:01 -08:00
|
|
|
rv = mPendingSubStore.Init(this, connection,
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_LITERAL_CSTRING("moz_subs"));
|
2008-01-29 18:26:44 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-07-25 23:38:43 -07:00
|
|
|
|
|
|
|
rv = connection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("SELECT add_chunks, sub_chunks FROM moz_tables"
|
|
|
|
" WHERE id=?1"),
|
|
|
|
getter_AddRefs(mGetChunkListsStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = connection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("UPDATE moz_tables"
|
|
|
|
" SET add_chunks=?1, sub_chunks=?2"
|
|
|
|
" WHERE id=?3"),
|
|
|
|
getter_AddRefs(mSetChunkListsStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = connection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("SELECT name, add_chunks, sub_chunks"
|
|
|
|
" FROM moz_tables"),
|
|
|
|
getter_AddRefs(mGetTablesStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-07-25 20:11:43 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
rv = connection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("SELECT id FROM moz_tables"
|
|
|
|
" WHERE name = ?1"),
|
|
|
|
getter_AddRefs(mGetTableIdStatement));
|
2007-07-25 18:49:20 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
rv = connection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("SELECT name FROM moz_tables"
|
|
|
|
" WHERE id = ?1"),
|
|
|
|
getter_AddRefs(mGetTableNameStatement));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-07-25 20:11:43 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
rv = connection->CreateStatement
|
|
|
|
(NS_LITERAL_CSTRING("INSERT INTO moz_tables(id, name, add_chunks, sub_chunks)"
|
|
|
|
" VALUES (null, ?1, null, null)"),
|
|
|
|
getter_AddRefs(mInsertTableIdStatement));
|
2007-03-22 10:30:00 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
mConnection = connection;
|
|
|
|
|
|
|
|
mCryptoHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID, &rv);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2011-09-08 13:16:59 -07:00
|
|
|
LOG(("loading Prefix Set\n"));
|
|
|
|
rv = LoadPrefixSet(mPSFile);
|
2011-12-02 01:45:56 -08:00
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
if (rv == NS_ERROR_FILE_CORRUPTED) {
|
|
|
|
ResetDatabase();
|
|
|
|
}
|
|
|
|
return rv;
|
|
|
|
}
|
2011-09-08 13:15:18 -07:00
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2011-09-08 13:17:25 -07:00
|
|
|
// We have both a prefix and a domain. Drop the domain, but
|
|
|
|
// hash the domain, the prefix and a random value together,
|
|
|
|
// ensuring any collisions happens at a different points for
|
|
|
|
// different users.
|
|
|
|
// We need to calculate +- 500k hashes each update.
|
|
|
|
// The extensive initialization and finalization of normal
|
|
|
|
// cryptographic hashes, as well as fairly low speed, causes them
|
|
|
|
// to be prohibitively slow here, hence we can't use them.
|
|
|
|
// We use MurmurHash3 instead because it's reasonably well
|
|
|
|
// researched, trusted inside some other big projects, extremely
|
|
|
|
// fast and with a specific a 32-bit output version, and fairly
|
|
|
|
// compact. Upon testing with the actual prefix data, it does
|
|
|
|
// not appear to increase the number of collisions by any
|
|
|
|
// meaningful amount.
|
|
|
|
static nsresult KeyedHash(PRUint32 aPref, PRUint32 aDomain,
|
|
|
|
PRUint32 aKey, PRUint32 *aOut)
|
|
|
|
{
|
|
|
|
// This is a reimplementation of MurmurHash3 32-bit
|
|
|
|
// based on the public domain C++ sources.
|
|
|
|
// http://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp
|
|
|
|
// for nblocks = 2
|
|
|
|
PRUint32 c1 = 0xCC9E2D51;
|
|
|
|
PRUint32 c2 = 0x1B873593;
|
|
|
|
PRUint32 c3 = 0xE6546B64;
|
|
|
|
PRUint32 c4 = 0x85EBCA6B;
|
|
|
|
PRUint32 c5 = 0xC2B2AE35;
|
|
|
|
PRUint32 h1 = aPref; // seed
|
|
|
|
PRUint32 k1;
|
|
|
|
PRUint32 karr[2];
|
|
|
|
|
|
|
|
karr[0] = aDomain;
|
|
|
|
karr[1] = aKey;
|
|
|
|
|
|
|
|
for (PRUint32 i = 0; i < 2; i++) {
|
|
|
|
k1 = karr[i];
|
|
|
|
k1 *= c1;
|
|
|
|
k1 = (k1 << 15) | (k1 >> (32-15));
|
|
|
|
k1 *= c2;
|
|
|
|
|
|
|
|
h1 ^= k1;
|
|
|
|
h1 = (h1 << 13) | (h1 >> (32-13));
|
|
|
|
h1 *= 5;
|
|
|
|
h1 += c3;
|
|
|
|
}
|
|
|
|
|
|
|
|
h1 ^= 2; // len
|
|
|
|
// fmix
|
|
|
|
h1 ^= h1 >> 16;
|
|
|
|
h1 *= c4;
|
|
|
|
h1 ^= h1 >> 13;
|
|
|
|
h1 *= c5;
|
|
|
|
h1 ^= h1 >> 16;
|
|
|
|
|
|
|
|
*aOut = h1;
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult nsUrlClassifierStore::ReadPrefixes(nsTArray<PRUint32>& array,
|
|
|
|
PRUint32 aKey)
|
2011-09-08 13:15:18 -07:00
|
|
|
{
|
|
|
|
mozStorageStatementScoper scoper(mAllPrefixStatement);
|
2011-09-28 23:19:26 -07:00
|
|
|
bool hasMoreData;
|
2011-09-08 13:15:18 -07:00
|
|
|
PRUint32 pcnt = 0;
|
|
|
|
PRUint32 fcnt = 0;
|
|
|
|
|
2011-09-08 13:17:25 -07:00
|
|
|
#if defined(PR_LOGGING)
|
|
|
|
PRIntervalTime clockStart = 0;
|
|
|
|
if (LOG_ENABLED()) {
|
|
|
|
clockStart = PR_IntervalNow();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-09-08 13:15:18 -07:00
|
|
|
while (NS_SUCCEEDED(mAllPrefixStatement->ExecuteStep(&hasMoreData)) && hasMoreData) {
|
|
|
|
PRUint32 prefixval;
|
|
|
|
PRUint32 domainval;
|
|
|
|
PRUint32 size;
|
|
|
|
|
|
|
|
const PRUint8 *blobdomain = mAllPrefixStatement->AsSharedBlob(0, &size);
|
|
|
|
if (!blobdomain || (size != DOMAIN_LENGTH))
|
2011-10-17 07:59:28 -07:00
|
|
|
return false;
|
2011-09-08 13:15:18 -07:00
|
|
|
|
|
|
|
domainval = *(reinterpret_cast<const PRUint32*>(blobdomain));
|
|
|
|
|
|
|
|
const PRUint8 *blobprefix = mAllPrefixStatement->AsSharedBlob(1, &size);
|
|
|
|
if (!blobprefix || (size != PARTIAL_LENGTH)) {
|
|
|
|
const PRUint8 *blobfull = mAllPrefixStatement->AsSharedBlob(2, &size);
|
|
|
|
if (!blobfull || (size != COMPLETE_LENGTH)) {
|
|
|
|
prefixval = domainval;
|
|
|
|
fcnt++;
|
|
|
|
} else {
|
|
|
|
prefixval = *(reinterpret_cast<const PRUint32*>(blobfull));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
prefixval = *(reinterpret_cast<const PRUint32*>(blobprefix));
|
|
|
|
}
|
|
|
|
|
2011-09-08 13:17:25 -07:00
|
|
|
PRUint32 keyedVal;
|
|
|
|
nsresult rv = KeyedHash(prefixval, domainval, aKey, &keyedVal);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
array.AppendElement(keyedVal);
|
2011-09-08 13:15:18 -07:00
|
|
|
pcnt++;
|
2011-12-02 01:45:56 -08:00
|
|
|
// Normal DB size is about 500k entries. If we are getting 10x
|
|
|
|
// as much, the database must be corrupted.
|
|
|
|
if (pcnt > 5000000) {
|
|
|
|
return NS_ERROR_FILE_CORRUPTED;
|
|
|
|
}
|
2011-09-08 13:15:18 -07:00
|
|
|
}
|
|
|
|
|
2011-09-08 13:17:14 -07:00
|
|
|
LOG(("SB prefixes: %d fulldomain: %d\n", pcnt, fcnt));
|
2011-09-08 13:15:18 -07:00
|
|
|
|
2011-09-08 13:17:25 -07:00
|
|
|
#if defined(PR_LOGGING)
|
|
|
|
if (LOG_ENABLED()) {
|
|
|
|
PRIntervalTime clockEnd = PR_IntervalNow();
|
|
|
|
LOG(("Gathering took %dms\n",
|
|
|
|
PR_IntervalToMilliseconds(clockEnd - clockStart)));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-09-08 13:15:18 -07:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
2011-09-08 13:16:59 -07:00
|
|
|
nsUrlClassifierDBServiceWorker::ConstructPrefixSet()
|
2011-09-08 13:15:18 -07:00
|
|
|
{
|
2011-09-08 13:17:34 -07:00
|
|
|
Telemetry::AutoTimer<Telemetry::URLCLASSIFIER_PS_CONSTRUCT_TIME> timer;
|
|
|
|
|
2011-09-08 13:17:25 -07:00
|
|
|
PRUint32 key;
|
|
|
|
nsresult rv = mPrefixSet->GetKey(&key);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-09-08 13:15:18 -07:00
|
|
|
nsTArray<PRUint32> array;
|
2011-09-08 13:17:25 -07:00
|
|
|
rv = mMainStore.ReadPrefixes(array, key);
|
2011-09-08 13:15:18 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-09-08 13:17:25 -07:00
|
|
|
#ifdef HASHFUNCTION_COLLISION_TEST
|
|
|
|
array.Sort();
|
|
|
|
PRUint32 collisions = 0;
|
|
|
|
for (int i = 1; i < array.Length(); i++) {
|
|
|
|
if (array[i - 1] == array[i]) {
|
|
|
|
collisions++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
LOG(("%d collisions in the set", collisions));
|
|
|
|
#endif
|
|
|
|
|
2011-09-08 13:15:27 -07:00
|
|
|
// clear old tree
|
|
|
|
rv = mPrefixSet->SetPrefixes(nsnull, 0);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
if (array.IsEmpty()) {
|
|
|
|
// DB is empty, but put a sentinel to show that we looked
|
|
|
|
array.AppendElement(0);
|
2011-09-08 13:15:18 -07:00
|
|
|
}
|
2011-09-08 13:15:27 -07:00
|
|
|
// construct new one
|
|
|
|
rv = mPrefixSet->SetPrefixes(array.Elements(), array.Length());
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2011-09-08 13:16:59 -07:00
|
|
|
|
|
|
|
// store the new tree to disk
|
|
|
|
rv = mPrefixSet->StoreToFile(mPSFile);
|
|
|
|
NS_WARN_IF_FALSE(NS_SUCCEEDED(rv), "failed to store the prefixset");
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBServiceWorker::LoadPrefixSet(nsCOMPtr<nsIFile> & aFile)
|
|
|
|
{
|
2011-09-28 23:19:26 -07:00
|
|
|
bool empty;
|
2011-09-08 13:16:59 -07:00
|
|
|
nsresult rv = mPrefixSet->IsEmpty(&empty);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
if (!empty) {
|
|
|
|
LOG(("PrefixSet already loaded, not loading again"));
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool exists;
|
2011-09-08 13:16:59 -07:00
|
|
|
rv = aFile->Exists(&exists);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-09-08 13:17:25 -07:00
|
|
|
#if defined(PR_LOGGING)
|
|
|
|
PRIntervalTime clockStart = 0;
|
|
|
|
if (LOG_ENABLED()) {
|
|
|
|
clockStart = PR_IntervalNow();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-09-08 13:16:59 -07:00
|
|
|
if (exists) {
|
2011-09-08 13:17:34 -07:00
|
|
|
Telemetry::AutoTimer<Telemetry::URLCLASSIFIER_PS_FILELOAD_TIME> timer;
|
2011-09-08 13:16:59 -07:00
|
|
|
LOG(("stored PrefixSet exists, loading from disk"));
|
|
|
|
rv = mPrefixSet->LoadFromFile(aFile);
|
|
|
|
}
|
|
|
|
if (!exists || NS_FAILED(rv)) {
|
|
|
|
LOG(("no (usable) stored PrefixSet found, constructing from store"));
|
2011-12-02 01:45:56 -08:00
|
|
|
rv = ConstructPrefixSet();
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2011-09-08 13:16:59 -07:00
|
|
|
}
|
|
|
|
|
2011-09-08 13:15:18 -07:00
|
|
|
#ifdef DEBUG
|
|
|
|
PRUint32 size = 0;
|
2011-11-15 09:51:06 -08:00
|
|
|
rv = mPrefixSet->SizeOfIncludingThis(&size);
|
2011-09-08 13:15:18 -07:00
|
|
|
LOG(("SB tree done, size = %d bytes\n", size));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
#endif
|
2011-09-08 13:17:25 -07:00
|
|
|
#if defined(PR_LOGGING)
|
|
|
|
if (LOG_ENABLED()) {
|
|
|
|
PRIntervalTime clockEnd = PR_IntervalNow();
|
|
|
|
LOG(("Loading took %dms\n",
|
|
|
|
PR_IntervalToMilliseconds(clockEnd - clockStart)));
|
|
|
|
}
|
|
|
|
#endif
|
2011-09-08 13:15:18 -07:00
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
2007-07-25 23:38:43 -07:00
|
|
|
nsUrlClassifierDBServiceWorker::MaybeCreateTables(mozIStorageConnection* connection)
|
2007-03-22 10:30:00 -07:00
|
|
|
{
|
2007-07-25 23:38:43 -07:00
|
|
|
LOG(("MaybeCreateTables\n"));
|
|
|
|
|
|
|
|
nsresult rv = connection->ExecuteSimpleSQL(
|
|
|
|
NS_LITERAL_CSTRING("CREATE TABLE IF NOT EXISTS moz_classifier"
|
|
|
|
" (id INTEGER PRIMARY KEY,"
|
|
|
|
" domain BLOB,"
|
2008-01-29 12:57:18 -08:00
|
|
|
" partial_data BLOB,"
|
|
|
|
" complete_data BLOB,"
|
|
|
|
" chunk_id INTEGER,"
|
2007-07-25 23:38:43 -07:00
|
|
|
" table_id INTEGER)"));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
rv = connection->ExecuteSimpleSQL(
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_LITERAL_CSTRING("CREATE INDEX IF NOT EXISTS"
|
2007-07-25 23:38:43 -07:00
|
|
|
" moz_classifier_domain_index"
|
2008-01-29 12:57:18 -08:00
|
|
|
" ON moz_classifier(domain)"));
|
2008-01-12 13:32:01 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = connection->ExecuteSimpleSQL(
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_LITERAL_CSTRING("CREATE INDEX IF NOT EXISTS"
|
|
|
|
" moz_classifier_chunk_index"
|
|
|
|
" ON moz_classifier(chunk_id)"));
|
2008-01-12 13:32:01 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = connection->ExecuteSimpleSQL(
|
|
|
|
NS_LITERAL_CSTRING("CREATE TABLE IF NOT EXISTS moz_subs"
|
2007-07-25 23:38:43 -07:00
|
|
|
" (id INTEGER PRIMARY KEY,"
|
2008-01-12 13:32:01 -08:00
|
|
|
" domain BLOB,"
|
2008-01-29 12:57:18 -08:00
|
|
|
" partial_data BLOB,"
|
|
|
|
" complete_data BLOB,"
|
|
|
|
" chunk_id INTEGER,"
|
|
|
|
" table_id INTEGER,"
|
|
|
|
" add_chunk_id INTEGER)"));
|
2007-07-25 23:38:43 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
rv = connection->ExecuteSimpleSQL(
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_LITERAL_CSTRING("CREATE INDEX IF NOT EXISTS"
|
2008-10-19 20:48:25 -07:00
|
|
|
" moz_subs_addchunk_index"
|
|
|
|
" ON moz_subs(add_chunk_id)"));
|
2008-01-12 13:32:01 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = connection->ExecuteSimpleSQL(
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_LITERAL_CSTRING("CREATE INDEX IF NOT EXISTS"
|
|
|
|
" moz_subs_chunk_index"
|
|
|
|
" ON moz_subs(chunk_id)"));
|
2008-01-12 13:32:01 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
|
|
|
rv = connection->ExecuteSimpleSQL(
|
|
|
|
NS_LITERAL_CSTRING("CREATE TABLE IF NOT EXISTS moz_tables"
|
|
|
|
" (id INTEGER PRIMARY KEY,"
|
|
|
|
" name TEXT,"
|
|
|
|
" add_chunks TEXT,"
|
|
|
|
" sub_chunks TEXT);"));
|
2007-07-25 23:38:43 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
return rv;
|
2007-03-22 10:30:00 -07:00
|
|
|
}
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// -------------------------------------------------------------------------
|
|
|
|
// nsUrlClassifierLookupCallback
|
|
|
|
//
|
|
|
|
// This class takes the results of a lookup found on the worker thread
|
|
|
|
// and handles any necessary partial hash expansions before calling
|
|
|
|
// the client callback.
|
|
|
|
|
|
|
|
class nsUrlClassifierLookupCallback : public nsIUrlClassifierLookupCallback
|
|
|
|
, public nsIUrlClassifierHashCompleterCallback
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
NS_DECL_ISUPPORTS
|
|
|
|
NS_DECL_NSIURLCLASSIFIERLOOKUPCALLBACK
|
|
|
|
NS_DECL_NSIURLCLASSIFIERHASHCOMPLETERCALLBACK
|
|
|
|
|
|
|
|
nsUrlClassifierLookupCallback(nsUrlClassifierDBService *dbservice,
|
|
|
|
nsIUrlClassifierCallback *c)
|
|
|
|
: mDBService(dbservice)
|
|
|
|
, mResults(nsnull)
|
|
|
|
, mPendingCompletions(0)
|
|
|
|
, mCallback(c)
|
|
|
|
{}
|
|
|
|
|
|
|
|
private:
|
|
|
|
nsresult HandleResults();
|
|
|
|
|
|
|
|
nsRefPtr<nsUrlClassifierDBService> mDBService;
|
|
|
|
nsAutoPtr<nsTArray<nsUrlClassifierLookupResult> > mResults;
|
2008-01-29 18:26:44 -08:00
|
|
|
|
|
|
|
// Completed results to send back to the worker for caching.
|
|
|
|
nsAutoPtr<nsTArray<nsUrlClassifierLookupResult> > mCacheResults;
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
PRUint32 mPendingCompletions;
|
|
|
|
nsCOMPtr<nsIUrlClassifierCallback> mCallback;
|
|
|
|
};
|
|
|
|
|
|
|
|
NS_IMPL_THREADSAFE_ISUPPORTS2(nsUrlClassifierLookupCallback,
|
|
|
|
nsIUrlClassifierLookupCallback,
|
|
|
|
nsIUrlClassifierHashCompleterCallback)
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsUrlClassifierLookupCallback::LookupComplete(nsTArray<nsUrlClassifierLookupResult>* results)
|
|
|
|
{
|
|
|
|
NS_ASSERTION(mResults == nsnull,
|
|
|
|
"Should only get one set of results per nsUrlClassifierLookupCallback!");
|
|
|
|
|
|
|
|
if (!results) {
|
|
|
|
HandleResults();
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
mResults = results;
|
2008-02-26 21:51:28 -08:00
|
|
|
mResults->Sort();
|
2008-01-29 12:57:18 -08:00
|
|
|
|
2008-04-15 15:39:44 -07:00
|
|
|
// Check the results entries that need to be completed.
|
2008-01-29 12:57:18 -08:00
|
|
|
for (PRUint32 i = 0; i < results->Length(); i++) {
|
|
|
|
nsUrlClassifierLookupResult& result = results->ElementAt(i);
|
2008-04-15 15:39:44 -07:00
|
|
|
|
|
|
|
// We will complete partial matches and matches that are stale.
|
2008-01-29 12:57:18 -08:00
|
|
|
if (!result.mConfirmed) {
|
|
|
|
nsCOMPtr<nsIUrlClassifierHashCompleter> completer;
|
|
|
|
if (mDBService->GetCompleter(result.mTableName,
|
|
|
|
getter_AddRefs(completer))) {
|
|
|
|
nsCAutoString partialHash;
|
2008-04-15 15:39:44 -07:00
|
|
|
PRUint8 *buf =
|
|
|
|
result.mEntry.mHavePartial ? result.mEntry.mPartialHash.buf
|
|
|
|
: result.mEntry.mCompleteHash.buf;
|
|
|
|
partialHash.Assign(reinterpret_cast<char*>(buf), PARTIAL_LENGTH);
|
2008-01-29 12:57:18 -08:00
|
|
|
|
|
|
|
nsresult rv = completer->Complete(partialHash, this);
|
|
|
|
if (NS_SUCCEEDED(rv)) {
|
|
|
|
mPendingCompletions++;
|
|
|
|
}
|
|
|
|
} else {
|
2008-04-15 15:39:44 -07:00
|
|
|
// For tables with no hash completer, a complete hash match is
|
|
|
|
// good enough, it doesn't need to be fresh. (we need the
|
|
|
|
// mLookupFragment comparison to weed out noise entries, which
|
|
|
|
// should never be confirmed).
|
|
|
|
if (result.mEntry.mHaveComplete
|
|
|
|
&& (result.mLookupFragment == result.mEntry.mCompleteHash)) {
|
2011-10-17 07:59:28 -07:00
|
|
|
result.mConfirmed = true;
|
2008-04-15 15:39:44 -07:00
|
|
|
} else {
|
|
|
|
NS_WARNING("Partial match in a table without a valid completer, ignoring partial match.");
|
|
|
|
}
|
2008-01-29 12:57:18 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mPendingCompletions == 0) {
|
|
|
|
// All results were complete, we're ready!
|
|
|
|
HandleResults();
|
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsUrlClassifierLookupCallback::CompletionFinished(nsresult status)
|
|
|
|
{
|
|
|
|
LOG(("nsUrlClassifierLookupCallback::CompletionFinished [%p, %08x]",
|
|
|
|
this, status));
|
|
|
|
if (NS_FAILED(status)) {
|
|
|
|
NS_WARNING("gethash response failed.");
|
|
|
|
}
|
|
|
|
|
|
|
|
mPendingCompletions--;
|
|
|
|
if (mPendingCompletions == 0) {
|
|
|
|
HandleResults();
|
2008-01-29 18:26:44 -08:00
|
|
|
|
|
|
|
if (mCacheResults) {
|
|
|
|
// This hands ownership of the cache results array back to the worker
|
|
|
|
// thread.
|
|
|
|
mDBService->CacheCompletions(mCacheResults.forget());
|
|
|
|
}
|
2008-01-29 12:57:18 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsUrlClassifierLookupCallback::Completion(const nsACString& completeHash,
|
|
|
|
const nsACString& tableName,
|
2008-02-27 00:51:02 -08:00
|
|
|
PRUint32 chunkId,
|
2011-09-28 23:19:26 -07:00
|
|
|
bool verified)
|
2008-01-29 12:57:18 -08:00
|
|
|
{
|
2008-04-15 15:39:44 -07:00
|
|
|
LOG(("nsUrlClassifierLookupCallback::Completion [%p, %s, %d, %d]",
|
|
|
|
this, PromiseFlatCString(tableName).get(), chunkId, verified));
|
2008-01-29 12:57:18 -08:00
|
|
|
nsUrlClassifierCompleteHash hash;
|
|
|
|
hash.Assign(completeHash);
|
|
|
|
|
|
|
|
for (PRUint32 i = 0; i < mResults->Length(); i++) {
|
|
|
|
nsUrlClassifierLookupResult& result = mResults->ElementAt(i);
|
|
|
|
|
2008-04-15 15:39:44 -07:00
|
|
|
// First, see if this result can be used to update an entry.
|
|
|
|
if (verified &&
|
|
|
|
!result.mEntry.mHaveComplete &&
|
2008-01-29 12:57:18 -08:00
|
|
|
hash.StartsWith(result.mEntry.mPartialHash) &&
|
|
|
|
result.mTableName == tableName &&
|
|
|
|
result.mEntry.mChunkId == chunkId) {
|
|
|
|
// We have a completion for this entry. Fill it in...
|
|
|
|
result.mEntry.SetHash(hash);
|
|
|
|
|
2008-04-15 15:39:44 -07:00
|
|
|
if (!mCacheResults) {
|
|
|
|
mCacheResults = new nsTArray<nsUrlClassifierLookupResult>();
|
|
|
|
if (!mCacheResults)
|
|
|
|
return NS_ERROR_OUT_OF_MEMORY;
|
2008-02-27 00:51:02 -08:00
|
|
|
}
|
2008-04-15 15:39:44 -07:00
|
|
|
|
|
|
|
mCacheResults->AppendElement(result);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now, see if it verifies a lookup
|
|
|
|
if (result.mLookupFragment == hash) {
|
2011-10-17 07:59:28 -07:00
|
|
|
result.mConfirmed = true;
|
2008-03-04 15:39:37 -08:00
|
|
|
|
2008-04-15 15:39:44 -07:00
|
|
|
if (result.mTableName != tableName ||
|
|
|
|
result.mEntry.mChunkId != chunkId) {
|
|
|
|
// The hash we got for this completion matches the hash we
|
|
|
|
// looked up, but doesn't match the table/chunk id. This could
|
|
|
|
// happen in rare cases where a given URL was moved between
|
|
|
|
// lists or added/removed/re-added to the list in the time since
|
|
|
|
// we've updated.
|
|
|
|
//
|
|
|
|
// Update the lookup result, but don't update the entry or try
|
|
|
|
// cache the results of this completion, as it might confuse
|
|
|
|
// things.
|
|
|
|
result.mTableName = tableName;
|
|
|
|
NS_WARNING("Accepting a gethash with an invalid table name or chunk id");
|
2011-09-08 13:17:25 -07:00
|
|
|
LOG(("Tablename: %s ?= %s, ChunkId %d ?= %d",
|
|
|
|
result.mTableName.get(), PromiseFlatCString(tableName).get(),
|
|
|
|
result.mEntry.mChunkId, chunkId));
|
2008-04-15 15:39:44 -07:00
|
|
|
}
|
2008-01-29 12:57:18 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierLookupCallback::HandleResults()
|
|
|
|
{
|
|
|
|
if (!mResults) {
|
|
|
|
// No results, this URI is clean.
|
|
|
|
return mCallback->HandleEvent(NS_LITERAL_CSTRING(""));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build a stringified list of result tables.
|
|
|
|
mResults->Sort();
|
|
|
|
PRUint32 lastTableId = 0;
|
|
|
|
nsCAutoString tables;
|
|
|
|
for (PRUint32 i = 0; i < mResults->Length(); i++) {
|
|
|
|
nsUrlClassifierLookupResult& result = mResults->ElementAt(i);
|
|
|
|
// Leave out results that weren't confirmed, as their existence on
|
2008-02-26 21:51:28 -08:00
|
|
|
// the list can't be verified. Also leave out randomly-generated
|
|
|
|
// noise.
|
|
|
|
if (!result.mConfirmed || result.mNoise)
|
2008-01-29 12:57:18 -08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (tables.Length() > 0) {
|
|
|
|
if (lastTableId == result.mEntry.mTableId)
|
|
|
|
continue;
|
|
|
|
tables.Append(",");
|
|
|
|
}
|
|
|
|
|
|
|
|
tables.Append(result.mTableName);
|
|
|
|
lastTableId = result.mEntry.mTableId;
|
|
|
|
}
|
|
|
|
|
|
|
|
return mCallback->HandleEvent(tables);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-08-31 16:18:46 -07:00
|
|
|
// -------------------------------------------------------------------------
|
|
|
|
// Helper class for nsIURIClassifier implementation, translates table names
|
|
|
|
// to nsIURIClassifier enums.
|
|
|
|
|
|
|
|
class nsUrlClassifierClassifyCallback : public nsIUrlClassifierCallback
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
NS_DECL_ISUPPORTS
|
|
|
|
NS_DECL_NSIURLCLASSIFIERCALLBACK
|
|
|
|
|
2007-12-03 21:10:22 -08:00
|
|
|
nsUrlClassifierClassifyCallback(nsIURIClassifierCallback *c,
|
2011-09-28 23:19:26 -07:00
|
|
|
bool checkMalware,
|
|
|
|
bool checkPhishing)
|
2007-08-31 16:18:46 -07:00
|
|
|
: mCallback(c)
|
2007-12-03 21:10:22 -08:00
|
|
|
, mCheckMalware(checkMalware)
|
|
|
|
, mCheckPhishing(checkPhishing)
|
2007-08-31 16:18:46 -07:00
|
|
|
{}
|
|
|
|
|
|
|
|
private:
|
|
|
|
nsCOMPtr<nsIURIClassifierCallback> mCallback;
|
2011-09-28 23:19:26 -07:00
|
|
|
bool mCheckMalware;
|
|
|
|
bool mCheckPhishing;
|
2007-08-31 16:18:46 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
NS_IMPL_THREADSAFE_ISUPPORTS1(nsUrlClassifierClassifyCallback,
|
2007-08-31 16:48:39 -07:00
|
|
|
nsIUrlClassifierCallback)
|
2007-08-31 16:18:46 -07:00
|
|
|
|
|
|
|
NS_IMETHODIMP
|
|
|
|
nsUrlClassifierClassifyCallback::HandleEvent(const nsACString& tables)
|
|
|
|
{
|
|
|
|
// XXX: we should probably have the wardens tell the service which table
|
|
|
|
// names match with which classification. For now the table names give
|
|
|
|
// enough information.
|
|
|
|
nsresult response = NS_OK;
|
|
|
|
|
|
|
|
nsACString::const_iterator begin, end;
|
|
|
|
|
|
|
|
tables.BeginReading(begin);
|
|
|
|
tables.EndReading(end);
|
2007-12-03 21:10:22 -08:00
|
|
|
if (mCheckMalware &&
|
|
|
|
FindInReadable(NS_LITERAL_CSTRING("-malware-"), begin, end)) {
|
2007-08-31 16:18:46 -07:00
|
|
|
response = NS_ERROR_MALWARE_URI;
|
2007-10-29 11:29:14 -07:00
|
|
|
} else {
|
|
|
|
// Reset begin before checking phishing table
|
|
|
|
tables.BeginReading(begin);
|
2007-12-03 21:10:22 -08:00
|
|
|
|
|
|
|
if (mCheckPhishing &&
|
|
|
|
FindInReadable(NS_LITERAL_CSTRING("-phish-"), begin, end)) {
|
2007-10-29 11:29:14 -07:00
|
|
|
response = NS_ERROR_PHISHING_URI;
|
|
|
|
}
|
2007-08-31 16:18:46 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
mCallback->OnClassifyComplete(response);
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
// -------------------------------------------------------------------------
|
|
|
|
// Proxy class implementation
|
|
|
|
|
2007-08-31 16:18:46 -07:00
|
|
|
NS_IMPL_THREADSAFE_ISUPPORTS3(nsUrlClassifierDBService,
|
2007-03-22 10:30:00 -07:00
|
|
|
nsIUrlClassifierDBService,
|
2007-08-31 16:18:46 -07:00
|
|
|
nsIURIClassifier,
|
2007-03-22 10:30:00 -07:00
|
|
|
nsIObserver)
|
|
|
|
|
|
|
|
/* static */ nsUrlClassifierDBService*
|
2008-01-04 22:38:30 -08:00
|
|
|
nsUrlClassifierDBService::GetInstance(nsresult *result)
|
2007-03-22 10:30:00 -07:00
|
|
|
{
|
2008-01-04 22:38:30 -08:00
|
|
|
*result = NS_OK;
|
2007-03-22 10:30:00 -07:00
|
|
|
if (!sUrlClassifierDBService) {
|
|
|
|
sUrlClassifierDBService = new nsUrlClassifierDBService();
|
2008-01-04 22:38:30 -08:00
|
|
|
if (!sUrlClassifierDBService) {
|
|
|
|
*result = NS_ERROR_OUT_OF_MEMORY;
|
2007-03-22 10:30:00 -07:00
|
|
|
return nsnull;
|
2008-01-04 22:38:30 -08:00
|
|
|
}
|
2007-03-22 10:30:00 -07:00
|
|
|
|
|
|
|
NS_ADDREF(sUrlClassifierDBService); // addref the global
|
|
|
|
|
2008-01-04 22:38:30 -08:00
|
|
|
*result = sUrlClassifierDBService->Init();
|
|
|
|
if (NS_FAILED(*result)) {
|
2007-03-22 10:30:00 -07:00
|
|
|
NS_RELEASE(sUrlClassifierDBService);
|
|
|
|
return nsnull;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Already exists, just add a ref
|
|
|
|
NS_ADDREF(sUrlClassifierDBService); // addref the return result
|
|
|
|
}
|
|
|
|
return sUrlClassifierDBService;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
nsUrlClassifierDBService::nsUrlClassifierDBService()
|
2007-08-31 16:18:46 -07:00
|
|
|
: mCheckMalware(CHECK_MALWARE_DEFAULT)
|
2007-12-03 21:10:22 -08:00
|
|
|
, mCheckPhishing(CHECK_PHISHING_DEFAULT)
|
2011-10-17 07:59:28 -07:00
|
|
|
, mInUpdate(false)
|
2007-03-22 10:30:00 -07:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
nsUrlClassifierDBService::~nsUrlClassifierDBService()
|
|
|
|
{
|
|
|
|
sUrlClassifierDBService = nsnull;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBService::Init()
|
|
|
|
{
|
|
|
|
#if defined(PR_LOGGING)
|
|
|
|
if (!gUrlClassifierDbServiceLog)
|
|
|
|
gUrlClassifierDbServiceLog = PR_NewLogModule("UrlClassifierDbService");
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// Force the storage service to be created on the main thread.
|
|
|
|
nsresult rv;
|
|
|
|
nsCOMPtr<mozIStorageService> storageService =
|
|
|
|
do_GetService(MOZ_STORAGE_SERVICE_CONTRACTID, &rv);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2007-08-09 15:33:04 -07:00
|
|
|
// Force PSM to be loaded on the main thread.
|
2011-09-08 13:15:27 -07:00
|
|
|
mHash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID, &rv);
|
2007-08-09 15:33:04 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-09-08 13:15:27 -07:00
|
|
|
mPrefixSet = new nsUrlClassifierPrefixSet();
|
2011-09-08 13:15:18 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2007-08-31 16:18:46 -07:00
|
|
|
// Should we check document loads for malware URIs?
|
|
|
|
nsCOMPtr<nsIPrefBranch2> prefs = do_GetService(NS_PREFSERVICE_CONTRACTID);
|
|
|
|
|
2008-02-26 21:51:28 -08:00
|
|
|
PRInt32 gethashNoise = 0;
|
2007-08-31 16:18:46 -07:00
|
|
|
if (prefs) {
|
2011-09-28 23:19:26 -07:00
|
|
|
bool tmpbool;
|
2007-08-31 16:18:46 -07:00
|
|
|
rv = prefs->GetBoolPref(CHECK_MALWARE_PREF, &tmpbool);
|
|
|
|
mCheckMalware = NS_SUCCEEDED(rv) ? tmpbool : CHECK_MALWARE_DEFAULT;
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
prefs->AddObserver(CHECK_MALWARE_PREF, this, false);
|
2007-12-03 21:10:22 -08:00
|
|
|
|
|
|
|
rv = prefs->GetBoolPref(CHECK_PHISHING_PREF, &tmpbool);
|
|
|
|
mCheckPhishing = NS_SUCCEEDED(rv) ? tmpbool : CHECK_PHISHING_DEFAULT;
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
prefs->AddObserver(CHECK_PHISHING_PREF, this, false);
|
2008-02-26 21:51:28 -08:00
|
|
|
|
|
|
|
if (NS_FAILED(prefs->GetIntPref(GETHASH_NOISE_PREF, &gethashNoise))) {
|
|
|
|
gethashNoise = GETHASH_NOISE_DEFAULT;
|
|
|
|
}
|
2008-04-15 15:39:44 -07:00
|
|
|
|
|
|
|
nsXPIDLCString tmpstr;
|
|
|
|
if (NS_SUCCEEDED(prefs->GetCharPref(GETHASH_TABLES_PREF, getter_Copies(tmpstr)))) {
|
|
|
|
SplitTables(tmpstr, mGethashWhitelist);
|
|
|
|
}
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
prefs->AddObserver(GETHASH_TABLES_PREF, this, false);
|
2008-04-15 15:39:44 -07:00
|
|
|
|
|
|
|
PRInt32 tmpint;
|
|
|
|
rv = prefs->GetIntPref(CONFIRM_AGE_PREF, &tmpint);
|
2011-03-28 12:58:49 -07:00
|
|
|
PR_ATOMIC_SET(&gFreshnessGuarantee, NS_SUCCEEDED(rv) ? tmpint : CONFIRM_AGE_DEFAULT_SEC);
|
2008-04-15 15:39:44 -07:00
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
prefs->AddObserver(CONFIRM_AGE_PREF, this, false);
|
2008-04-15 15:39:44 -07:00
|
|
|
|
2008-05-02 02:55:09 -07:00
|
|
|
rv = prefs->GetIntPref(UPDATE_CACHE_SIZE_PREF, &tmpint);
|
2011-03-28 12:58:49 -07:00
|
|
|
PR_ATOMIC_SET(&gUpdateCacheSize, NS_SUCCEEDED(rv) ? tmpint : UPDATE_CACHE_SIZE_DEFAULT);
|
2008-05-07 13:33:45 -07:00
|
|
|
|
2011-09-08 13:15:37 -07:00
|
|
|
rv = prefs->GetIntPref(LOOKUP_CACHE_SIZE_PREF, &tmpint);
|
|
|
|
PR_ATOMIC_SET(&gLookupCacheSize, NS_SUCCEEDED(rv) ? tmpint : LOOKUP_CACHE_SIZE_DEFAULT);
|
|
|
|
|
2008-05-07 13:33:45 -07:00
|
|
|
rv = prefs->GetIntPref(UPDATE_WORKING_TIME, &tmpint);
|
2011-03-28 12:58:49 -07:00
|
|
|
PR_ATOMIC_SET(&gWorkingTimeThreshold,
|
|
|
|
NS_SUCCEEDED(rv) ? tmpint : UPDATE_WORKING_TIME_DEFAULT);
|
2008-05-07 13:33:45 -07:00
|
|
|
|
|
|
|
rv = prefs->GetIntPref(UPDATE_DELAY_TIME, &tmpint);
|
2011-03-28 12:58:49 -07:00
|
|
|
PR_ATOMIC_SET(&gDelayTime,
|
|
|
|
NS_SUCCEEDED(rv) ? tmpint : UPDATE_DELAY_TIME_DEFAULT);
|
2007-08-31 16:18:46 -07:00
|
|
|
}
|
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
// Start the background thread.
|
|
|
|
rv = NS_NewThread(&gDbBackgroundThread);
|
|
|
|
if (NS_FAILED(rv))
|
|
|
|
return rv;
|
|
|
|
|
|
|
|
mWorker = new nsUrlClassifierDBServiceWorker();
|
|
|
|
if (!mWorker)
|
|
|
|
return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
|
2011-09-08 13:15:18 -07:00
|
|
|
rv = mWorker->Init(gethashNoise, mPrefixSet);
|
2007-07-25 23:38:43 -07:00
|
|
|
if (NS_FAILED(rv)) {
|
|
|
|
mWorker = nsnull;
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
// Proxy for calling the worker on the background thread
|
2011-08-19 08:50:04 -07:00
|
|
|
mWorkerProxy = new UrlClassifierDBServiceWorkerProxy(mWorker);
|
2008-01-12 14:22:03 -08:00
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
mCompleters.Init();
|
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
// Add an observer for shutdown
|
|
|
|
nsCOMPtr<nsIObserverService> observerService =
|
Bug 560095 - Use mozilla::services::GetObserverService(). r=biesi,dveditz,gavin,josh,jst,mrbkap,roc,sdwilsh,shaver,sicking,smontagu,surkov
2010-04-29 09:59:13 -07:00
|
|
|
mozilla::services::GetObserverService();
|
2007-03-22 10:30:00 -07:00
|
|
|
if (!observerService)
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
observerService->AddObserver(this, "profile-before-change", false);
|
|
|
|
observerService->AddObserver(this, "xpcom-shutdown-threads", false);
|
2007-03-22 10:30:00 -07:00
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2007-08-31 16:18:46 -07:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsUrlClassifierDBService::Classify(nsIURI *uri,
|
|
|
|
nsIURIClassifierCallback* c,
|
2011-09-28 23:19:26 -07:00
|
|
|
bool* result)
|
2007-08-31 16:18:46 -07:00
|
|
|
{
|
|
|
|
NS_ENSURE_TRUE(gDbBackgroundThread, NS_ERROR_NOT_INITIALIZED);
|
|
|
|
|
2007-12-03 21:10:22 -08:00
|
|
|
if (!(mCheckMalware || mCheckPhishing)) {
|
2011-10-17 07:59:28 -07:00
|
|
|
*result = false;
|
2007-08-31 16:18:46 -07:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
nsRefPtr<nsUrlClassifierClassifyCallback> callback =
|
2007-12-03 21:10:22 -08:00
|
|
|
new nsUrlClassifierClassifyCallback(c, mCheckMalware, mCheckPhishing);
|
2007-08-31 16:18:46 -07:00
|
|
|
if (!callback) return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
nsresult rv = LookupURI(uri, callback, false, result);
|
2007-11-27 12:08:02 -08:00
|
|
|
if (rv == NS_ERROR_MALFORMED_URI) {
|
2011-10-17 07:59:28 -07:00
|
|
|
*result = false;
|
2008-10-19 20:07:46 -07:00
|
|
|
// The URI had no hostname, don't try to classify it.
|
2007-11-27 12:08:02 -08:00
|
|
|
return NS_OK;
|
|
|
|
}
|
2008-02-21 02:45:56 -08:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
2007-11-27 12:08:02 -08:00
|
|
|
|
|
|
|
return NS_OK;
|
2007-08-31 16:18:46 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
2007-07-25 23:38:43 -07:00
|
|
|
nsUrlClassifierDBService::Lookup(const nsACString& spec,
|
2008-01-29 12:57:18 -08:00
|
|
|
nsIUrlClassifierCallback* c)
|
2007-03-22 10:30:00 -07:00
|
|
|
{
|
|
|
|
NS_ENSURE_TRUE(gDbBackgroundThread, NS_ERROR_NOT_INITIALIZED);
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
nsCOMPtr<nsIURI> uri;
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
nsresult rv = NS_NewURI(getter_AddRefs(uri), spec);
|
2007-03-22 10:30:00 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
uri = NS_GetInnermostURI(uri);
|
|
|
|
if (!uri) {
|
|
|
|
return NS_ERROR_FAILURE;
|
|
|
|
}
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool didLookup;
|
2011-10-17 07:59:28 -07:00
|
|
|
return LookupURI(uri, c, true, &didLookup);
|
2007-08-31 16:18:46 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBService::LookupURI(nsIURI* uri,
|
2008-10-19 20:07:46 -07:00
|
|
|
nsIUrlClassifierCallback* c,
|
2011-09-28 23:19:26 -07:00
|
|
|
bool forceLookup,
|
|
|
|
bool *didLookup)
|
2007-08-31 16:18:46 -07:00
|
|
|
{
|
|
|
|
NS_ENSURE_TRUE(gDbBackgroundThread, NS_ERROR_NOT_INITIALIZED);
|
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
nsCAutoString key;
|
|
|
|
// Canonicalize the url
|
|
|
|
nsCOMPtr<nsIUrlClassifierUtils> utilsService =
|
|
|
|
do_GetService(NS_URLCLASSIFIERUTILS_CONTRACTID);
|
2007-08-31 16:18:46 -07:00
|
|
|
nsresult rv = utilsService->GetKeyForURI(uri, key);
|
2008-02-21 02:45:56 -08:00
|
|
|
if (NS_FAILED(rv))
|
|
|
|
return rv;
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2008-10-19 20:07:46 -07:00
|
|
|
if (forceLookup) {
|
2011-10-17 07:59:28 -07:00
|
|
|
*didLookup = true;
|
2008-10-19 20:07:46 -07:00
|
|
|
} else {
|
2011-09-08 13:17:14 -07:00
|
|
|
// Check if the URI is clean. If so, we don't need to
|
|
|
|
// bother queueing up a lookup, we can just return.;
|
2011-09-28 23:19:26 -07:00
|
|
|
bool clean;
|
2011-09-08 13:17:14 -07:00
|
|
|
rv = CheckClean(key, &clean);
|
2008-10-19 20:07:46 -07:00
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2011-05-03 10:39:28 -07:00
|
|
|
if (!clean) {
|
|
|
|
nsCOMPtr<nsIPermissionManager> permissionManager =
|
|
|
|
do_GetService(NS_PERMISSIONMANAGER_CONTRACTID);
|
|
|
|
|
|
|
|
if (permissionManager) {
|
|
|
|
PRUint32 perm;
|
|
|
|
permissionManager->TestPermission(uri, "safe-browsing", &perm);
|
|
|
|
clean |= (perm == nsIPermissionManager::ALLOW_ACTION);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-10-19 20:07:46 -07:00
|
|
|
*didLookup = !clean;
|
|
|
|
if (clean) {
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
// Create an nsUrlClassifierLookupCallback object. This object will
|
|
|
|
// take care of confirming partial hash matches if necessary before
|
|
|
|
// calling the client's callback.
|
|
|
|
nsCOMPtr<nsIUrlClassifierLookupCallback> callback =
|
|
|
|
new nsUrlClassifierLookupCallback(this, c);
|
|
|
|
if (!callback)
|
|
|
|
return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
|
2011-08-19 08:50:04 -07:00
|
|
|
nsCOMPtr<nsIUrlClassifierLookupCallback> proxyCallback =
|
|
|
|
new UrlClassifierLookupCallbackProxy(callback);
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2007-07-25 23:38:43 -07:00
|
|
|
// Queue this lookup and call the lookup function to flush the queue if
|
|
|
|
// necessary.
|
|
|
|
rv = mWorker->QueueLookup(key, proxyCallback);
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
return mWorkerProxy->Lookup(EmptyCString(), nsnull);
|
2007-03-22 10:30:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
2007-07-25 23:38:43 -07:00
|
|
|
nsUrlClassifierDBService::GetTables(nsIUrlClassifierCallback* c)
|
2007-03-22 10:30:00 -07:00
|
|
|
{
|
|
|
|
NS_ENSURE_TRUE(gDbBackgroundThread, NS_ERROR_NOT_INITIALIZED);
|
|
|
|
|
|
|
|
nsresult rv;
|
|
|
|
// The proxy callback uses the current thread.
|
2011-08-19 08:50:04 -07:00
|
|
|
nsCOMPtr<nsIUrlClassifierCallback> proxyCallback =
|
|
|
|
new UrlClassifierCallbackProxy(c);
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
return mWorkerProxy->GetTables(proxyCallback);
|
2007-03-22 10:30:00 -07:00
|
|
|
}
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsUrlClassifierDBService::SetHashCompleter(const nsACString &tableName,
|
|
|
|
nsIUrlClassifierHashCompleter *completer)
|
|
|
|
{
|
|
|
|
if (completer) {
|
|
|
|
if (!mCompleters.Put(tableName, completer)) {
|
|
|
|
return NS_ERROR_OUT_OF_MEMORY;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
mCompleters.Remove(tableName);
|
|
|
|
}
|
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
NS_IMETHODIMP
|
2008-02-27 00:51:02 -08:00
|
|
|
nsUrlClassifierDBService::BeginUpdate(nsIUrlClassifierUpdateObserver *observer,
|
2008-04-15 15:39:44 -07:00
|
|
|
const nsACString &updateTables,
|
2008-02-27 00:51:02 -08:00
|
|
|
const nsACString &clientKey)
|
2007-03-22 10:30:00 -07:00
|
|
|
{
|
|
|
|
NS_ENSURE_TRUE(gDbBackgroundThread, NS_ERROR_NOT_INITIALIZED);
|
|
|
|
|
2008-01-23 11:30:54 -08:00
|
|
|
if (mInUpdate)
|
|
|
|
return NS_ERROR_NOT_AVAILABLE;
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
mInUpdate = true;
|
2008-01-23 11:30:54 -08:00
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
nsresult rv;
|
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
// The proxy observer uses the current thread
|
2011-08-19 08:50:04 -07:00
|
|
|
nsCOMPtr<nsIUrlClassifierUpdateObserver> proxyObserver =
|
|
|
|
new UrlClassifierUpdateObserverProxy(observer);
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2008-04-15 15:39:44 -07:00
|
|
|
return mWorkerProxy->BeginUpdate(proxyObserver, updateTables, clientKey);
|
2007-03-22 10:30:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
NS_IMETHODIMP
|
2008-02-27 00:51:02 -08:00
|
|
|
nsUrlClassifierDBService::BeginStream(const nsACString &table,
|
|
|
|
const nsACString &serverMAC)
|
2007-03-22 10:30:00 -07:00
|
|
|
{
|
|
|
|
NS_ENSURE_TRUE(gDbBackgroundThread, NS_ERROR_NOT_INITIALIZED);
|
|
|
|
|
2008-02-27 00:51:02 -08:00
|
|
|
return mWorkerProxy->BeginStream(table, serverMAC);
|
2008-01-12 14:22:03 -08:00
|
|
|
}
|
2007-07-25 23:38:43 -07:00
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsUrlClassifierDBService::UpdateStream(const nsACString& aUpdateChunk)
|
|
|
|
{
|
|
|
|
NS_ENSURE_TRUE(gDbBackgroundThread, NS_ERROR_NOT_INITIALIZED);
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
return mWorkerProxy->UpdateStream(aUpdateChunk);
|
|
|
|
}
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsUrlClassifierDBService::FinishStream()
|
|
|
|
{
|
|
|
|
NS_ENSURE_TRUE(gDbBackgroundThread, NS_ERROR_NOT_INITIALIZED);
|
|
|
|
|
|
|
|
return mWorkerProxy->FinishStream();
|
2007-03-22 10:30:00 -07:00
|
|
|
}
|
|
|
|
|
2008-01-12 13:32:01 -08:00
|
|
|
NS_IMETHODIMP
|
2008-01-12 14:22:03 -08:00
|
|
|
nsUrlClassifierDBService::FinishUpdate()
|
2008-01-12 13:32:01 -08:00
|
|
|
{
|
2008-01-12 14:22:03 -08:00
|
|
|
NS_ENSURE_TRUE(gDbBackgroundThread, NS_ERROR_NOT_INITIALIZED);
|
2008-01-12 13:32:01 -08:00
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
mInUpdate = false;
|
2008-01-23 11:30:54 -08:00
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
return mWorkerProxy->FinishUpdate();
|
2008-01-12 13:32:01 -08:00
|
|
|
}
|
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
NS_IMETHODIMP
|
2008-01-12 14:22:03 -08:00
|
|
|
nsUrlClassifierDBService::CancelUpdate()
|
2007-03-22 10:30:00 -07:00
|
|
|
{
|
|
|
|
NS_ENSURE_TRUE(gDbBackgroundThread, NS_ERROR_NOT_INITIALIZED);
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
mInUpdate = false;
|
2008-01-23 11:30:54 -08:00
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
return mWorkerProxy->CancelUpdate();
|
|
|
|
}
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsUrlClassifierDBService::ResetDatabase()
|
|
|
|
{
|
|
|
|
NS_ENSURE_TRUE(gDbBackgroundThread, NS_ERROR_NOT_INITIALIZED);
|
2007-03-22 10:30:00 -07:00
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
return mWorkerProxy->ResetDatabase();
|
2007-03-22 10:30:00 -07:00
|
|
|
}
|
|
|
|
|
2008-01-29 19:15:13 -08:00
|
|
|
nsresult
|
2008-01-29 18:26:44 -08:00
|
|
|
nsUrlClassifierDBService::CacheCompletions(nsTArray<nsUrlClassifierLookupResult> *results)
|
|
|
|
{
|
|
|
|
NS_ENSURE_TRUE(gDbBackgroundThread, NS_ERROR_NOT_INITIALIZED);
|
|
|
|
|
|
|
|
return mWorkerProxy->CacheCompletions(results);
|
|
|
|
}
|
|
|
|
|
2011-09-28 23:19:26 -07:00
|
|
|
bool
|
2008-03-04 14:05:05 -08:00
|
|
|
nsUrlClassifierDBService::GetCompleter(const nsACString &tableName,
|
|
|
|
nsIUrlClassifierHashCompleter **completer)
|
|
|
|
{
|
|
|
|
if (mCompleters.Get(tableName, completer)) {
|
2011-10-17 07:59:28 -07:00
|
|
|
return true;
|
2008-03-04 14:05:05 -08:00
|
|
|
}
|
|
|
|
|
2008-04-15 15:39:44 -07:00
|
|
|
if (!mGethashWhitelist.Contains(tableName)) {
|
2011-10-17 07:59:28 -07:00
|
|
|
return false;
|
2008-04-15 15:39:44 -07:00
|
|
|
}
|
|
|
|
|
2008-03-04 14:05:05 -08:00
|
|
|
return NS_SUCCEEDED(CallGetService(NS_URLCLASSIFIERHASHCOMPLETER_CONTRACTID,
|
|
|
|
completer));
|
|
|
|
}
|
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
NS_IMETHODIMP
|
|
|
|
nsUrlClassifierDBService::Observe(nsISupports *aSubject, const char *aTopic,
|
|
|
|
const PRUnichar *aData)
|
|
|
|
{
|
2007-08-31 16:18:46 -07:00
|
|
|
if (!strcmp(aTopic, NS_PREFBRANCH_PREFCHANGE_TOPIC_ID)) {
|
|
|
|
nsresult rv;
|
|
|
|
nsCOMPtr<nsIPrefBranch> prefs(do_QueryInterface(aSubject, &rv));
|
|
|
|
NS_ENSURE_SUCCESS(rv, rv);
|
|
|
|
if (NS_LITERAL_STRING(CHECK_MALWARE_PREF).Equals(aData)) {
|
2011-09-28 23:19:26 -07:00
|
|
|
bool tmpbool;
|
2007-08-31 16:18:46 -07:00
|
|
|
rv = prefs->GetBoolPref(CHECK_MALWARE_PREF, &tmpbool);
|
|
|
|
mCheckMalware = NS_SUCCEEDED(rv) ? tmpbool : CHECK_MALWARE_DEFAULT;
|
2007-12-03 21:10:22 -08:00
|
|
|
} else if (NS_LITERAL_STRING(CHECK_PHISHING_PREF).Equals(aData)) {
|
2011-09-28 23:19:26 -07:00
|
|
|
bool tmpbool;
|
2007-12-03 21:10:22 -08:00
|
|
|
rv = prefs->GetBoolPref(CHECK_PHISHING_PREF, &tmpbool);
|
|
|
|
mCheckPhishing = NS_SUCCEEDED(rv) ? tmpbool : CHECK_PHISHING_DEFAULT;
|
2008-04-15 15:39:44 -07:00
|
|
|
} else if (NS_LITERAL_STRING(GETHASH_TABLES_PREF).Equals(aData)) {
|
|
|
|
mGethashWhitelist.Clear();
|
|
|
|
nsXPIDLCString val;
|
|
|
|
if (NS_SUCCEEDED(prefs->GetCharPref(GETHASH_TABLES_PREF, getter_Copies(val)))) {
|
|
|
|
SplitTables(val, mGethashWhitelist);
|
|
|
|
}
|
|
|
|
} else if (NS_LITERAL_STRING(CONFIRM_AGE_PREF).Equals(aData)) {
|
|
|
|
PRInt32 tmpint;
|
|
|
|
rv = prefs->GetIntPref(CONFIRM_AGE_PREF, &tmpint);
|
2011-03-28 12:58:49 -07:00
|
|
|
PR_ATOMIC_SET(&gFreshnessGuarantee, NS_SUCCEEDED(rv) ? tmpint : CONFIRM_AGE_DEFAULT_SEC);
|
2008-05-02 02:55:09 -07:00
|
|
|
} else if (NS_LITERAL_STRING(UPDATE_CACHE_SIZE_PREF).Equals(aData)) {
|
|
|
|
PRInt32 tmpint;
|
|
|
|
rv = prefs->GetIntPref(UPDATE_CACHE_SIZE_PREF, &tmpint);
|
2011-03-28 12:58:49 -07:00
|
|
|
PR_ATOMIC_SET(&gUpdateCacheSize, NS_SUCCEEDED(rv) ? tmpint : UPDATE_CACHE_SIZE_DEFAULT);
|
2011-09-08 13:15:37 -07:00
|
|
|
} else if (NS_LITERAL_STRING(LOOKUP_CACHE_SIZE_PREF).Equals(aData)) {
|
|
|
|
PRInt32 tmpint;
|
|
|
|
rv = prefs->GetIntPref(LOOKUP_CACHE_SIZE_PREF, &tmpint);
|
|
|
|
PR_ATOMIC_SET(&gLookupCacheSize, NS_SUCCEEDED(rv) ? tmpint : LOOKUP_CACHE_SIZE_DEFAULT);
|
2008-05-07 13:33:45 -07:00
|
|
|
} else if (NS_LITERAL_STRING(UPDATE_WORKING_TIME).Equals(aData)) {
|
|
|
|
PRInt32 tmpint;
|
|
|
|
rv = prefs->GetIntPref(UPDATE_WORKING_TIME, &tmpint);
|
2011-03-28 12:58:49 -07:00
|
|
|
PR_ATOMIC_SET(&gWorkingTimeThreshold,
|
|
|
|
NS_SUCCEEDED(rv) ? tmpint : UPDATE_WORKING_TIME_DEFAULT);
|
2008-05-07 13:33:45 -07:00
|
|
|
} else if (NS_LITERAL_STRING(UPDATE_DELAY_TIME).Equals(aData)) {
|
|
|
|
PRInt32 tmpint;
|
|
|
|
rv = prefs->GetIntPref(UPDATE_DELAY_TIME, &tmpint);
|
2011-03-28 12:58:49 -07:00
|
|
|
PR_ATOMIC_SET(&gDelayTime,
|
|
|
|
NS_SUCCEEDED(rv) ? tmpint : UPDATE_DELAY_TIME_DEFAULT);
|
2007-08-31 16:18:46 -07:00
|
|
|
}
|
|
|
|
} else if (!strcmp(aTopic, "profile-before-change") ||
|
|
|
|
!strcmp(aTopic, "xpcom-shutdown-threads")) {
|
|
|
|
Shutdown();
|
|
|
|
} else {
|
|
|
|
return NS_ERROR_UNEXPECTED;
|
|
|
|
}
|
2007-07-11 09:52:12 -07:00
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
return NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Join the background thread if it exists.
|
|
|
|
nsresult
|
|
|
|
nsUrlClassifierDBService::Shutdown()
|
|
|
|
{
|
2007-07-11 09:52:12 -07:00
|
|
|
LOG(("shutting down db service\n"));
|
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
if (!gDbBackgroundThread)
|
|
|
|
return NS_OK;
|
|
|
|
|
2008-01-29 12:57:18 -08:00
|
|
|
mCompleters.Clear();
|
|
|
|
|
2007-08-31 16:18:46 -07:00
|
|
|
nsCOMPtr<nsIPrefBranch2> prefs = do_GetService(NS_PREFSERVICE_CONTRACTID);
|
|
|
|
if (prefs) {
|
|
|
|
prefs->RemoveObserver(CHECK_MALWARE_PREF, this);
|
2007-12-03 21:10:22 -08:00
|
|
|
prefs->RemoveObserver(CHECK_PHISHING_PREF, this);
|
2008-04-15 15:39:44 -07:00
|
|
|
prefs->RemoveObserver(GETHASH_TABLES_PREF, this);
|
|
|
|
prefs->RemoveObserver(CONFIRM_AGE_PREF, this);
|
2007-08-31 16:18:46 -07:00
|
|
|
}
|
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
nsresult rv;
|
|
|
|
// First close the db connection.
|
|
|
|
if (mWorker) {
|
2008-05-07 13:33:45 -07:00
|
|
|
rv = mWorkerProxy->CancelUpdate();
|
2009-06-17 03:47:08 -07:00
|
|
|
NS_ASSERTION(NS_SUCCEEDED(rv), "failed to post cancel update event");
|
2011-09-08 13:16:59 -07:00
|
|
|
|
2008-01-12 14:22:03 -08:00
|
|
|
rv = mWorkerProxy->CloseDb();
|
|
|
|
NS_ASSERTION(NS_SUCCEEDED(rv), "failed to post close db event");
|
2007-03-22 10:30:00 -07:00
|
|
|
}
|
2008-01-12 14:22:03 -08:00
|
|
|
|
|
|
|
mWorkerProxy = nsnull;
|
|
|
|
|
2007-03-22 10:30:00 -07:00
|
|
|
LOG(("joining background thread"));
|
|
|
|
|
2011-10-17 07:59:28 -07:00
|
|
|
gShuttingDownThread = true;
|
2008-01-16 11:20:45 -08:00
|
|
|
|
|
|
|
nsIThread *backgroundThread = gDbBackgroundThread;
|
|
|
|
gDbBackgroundThread = nsnull;
|
|
|
|
backgroundThread->Shutdown();
|
|
|
|
NS_RELEASE(backgroundThread);
|
2007-03-22 10:30:00 -07:00
|
|
|
|
|
|
|
return NS_OK;
|
|
|
|
}
|
2011-08-19 08:50:04 -07:00
|
|
|
|
|
|
|
nsIThread*
|
|
|
|
nsUrlClassifierDBService::BackgroundThread()
|
|
|
|
{
|
|
|
|
return gDbBackgroundThread;
|
|
|
|
}
|
|
|
|
|