Bug 580892 - Checking 'clear history when minefield closes' is not clearing cache on shutdown. r=sdwilsh a=blocking

This commit is contained in:
Marco Bonardo 2010-08-27 21:24:54 -04:00
parent d7342d297f
commit 0054893ef4
10 changed files with 208 additions and 136 deletions

View File

@ -225,8 +225,7 @@ BrowserGlue.prototype = {
Services.obs.removeObserver(this, "places-shutdown");
this._isPlacesShutdownObserver = false;
}
// places-shutdown is fired on profile-before-change, but before
// Places executes the last flush and closes connection.
// places-shutdown is fired when the profile is about to disappear.
this._onProfileShutdown();
break;
case "idle":

View File

@ -47,20 +47,47 @@ const URIS = [
, "http://c.example3.com/"
];
let expirationObserver = {
observe: function observe(aSubject, aTopic, aData) {
print("Finished expiration.");
Services.obs.removeObserver(expirationObserver,
PlacesUtils.TOPIC_EXPIRATION_FINISHED);
let db = PlacesUtils.history
.QueryInterface(Ci.nsPIPlacesDatabase)
.DBConnection;
const TOPIC_CONNECTION_CLOSED = "places-connection-closed";
let stmt = db.createStatement(
"SELECT id FROM moz_places_temp WHERE url = :page_url "
+ "UNION ALL "
+ "SELECT id FROM moz_places WHERE url = :page_url "
let EXPECTED_NOTIFICATIONS = [
"places-shutdown"
, "places-will-close-connection"
, "places-connection-closing"
, "places-sync-finished"
, "places-expiration-finished"
, "places-sync-finished"
, "places-connection-closed"
];
const UNEXPECTED_NOTIFICATIONS = [
"xpcom-shutdown"
];
const URL = "ftp://localhost/clearHistoryOnShutdown/";
let notificationIndex = 0;
let notificationsObserver = {
observe: function observe(aSubject, aTopic, aData) {
print("Received notification: " + aTopic);
// Note that some of these notifications could arrive multiple times, for
// example in case of sync, we allow that.
if (EXPECTED_NOTIFICATIONS[notificationIndex] != aTopic)
notificationIndex++;
do_check_eq(EXPECTED_NOTIFICATIONS[notificationIndex], aTopic);
if (aTopic != TOPIC_CONNECTION_CLOSED)
return;
getDistinctNotifications().forEach(
function (topic) Services.obs.removeObserver(notificationsObserver, topic)
);
print("Looking for uncleared stuff.");
let stmt = DBConn().createStatement(
"SELECT id FROM moz_places WHERE url = :page_url "
);
try {
@ -73,6 +100,9 @@ let expirationObserver = {
stmt.finalize();
}
// Check cache.
do_check_false(cacheExists(URL));
do_test_finished();
}
}
@ -104,11 +134,60 @@ function run_test() {
PlacesUtils.history.TRANSITION_TYPED,
false, 0);
});
print("Add cache.");
storeCache(URL, "testData");
print("Wait expiration.");
Services.obs.addObserver(expirationObserver,
PlacesUtils.TOPIC_EXPIRATION_FINISHED, false);
print("Simulate shutdown.");
PlacesUtils.history.QueryInterface(Ci.nsIObserver)
.observe(null, TOPIC_GLOBAL_SHUTDOWN, null);
print("Simulate and wait shutdown.");
getDistinctNotifications().forEach(
function (topic)
Services.obs.addObserver(notificationsObserver, topic, false)
);
shutdownPlaces();
}
function getDistinctNotifications() {
let ar = EXPECTED_NOTIFICATIONS.concat(UNEXPECTED_NOTIFICATIONS);
return [ar[i] for (i in ar) if (ar.slice(0, i).indexOf(ar[i]) == -1)];
}
function storeCache(aURL, aContent) {
let cache = Cc["@mozilla.org/network/cache-service;1"].
getService(Ci.nsICacheService);
let session = cache.createSession("FTP", Ci.nsICache.STORE_ANYWHERE,
Ci.nsICache.STREAM_BASED);
let cacheEntry =
session.openCacheEntry(aURL, Ci.nsICache.ACCESS_READ_WRITE, false);
cacheEntry.setMetaDataElement("servertype", "0");
var oStream = cacheEntry.openOutputStream(0);
var written = oStream.write(aContent, aContent.length);
if (written != aContent.length) {
do_throw("oStream.write has not written all data!\n" +
" Expected: " + written + "\n" +
" Actual: " + aContent.length + "\n");
}
oStream.close();
cacheEntry.close();
}
function cacheExists(aURL) {
let cache = Cc["@mozilla.org/network/cache-service;1"].
getService(Ci.nsICacheService);
let session = cache.createSession("FTP", Ci.nsICache.STORE_ANYWHERE,
Ci.nsICache.STREAM_BASED);
try {
let cacheEntry =
session.openCacheEntry(aURL, Ci.nsICache.ACCESS_READ, true);
} catch (e) {
if (e.result == Cr.NS_ERROR_CACHE_KEY_NOT_FOUND ||
e.result == Cr.NS_ERROR_FAILURE)
return false;
// Throw the textual error description.
do_throw(e);
}
cacheEntry.close();
return true;
}

View File

@ -65,6 +65,7 @@
#include "nsThreadUtils.h"
#include "nsAppDirectoryServiceDefs.h"
#include "nsMathUtils.h"
#include "mozIStorageCompletionCallback.h"
#include "nsNavBookmarks.h"
#include "nsAnnotationService.h"
@ -182,7 +183,8 @@ static const PRInt64 USECS_PER_DAY = LL_INIT(20, 500654080);
#endif
#define TOPIC_IDLE_DAILY "idle-daily"
#define TOPIC_PREF_CHANGED "nsPref:changed"
#define TOPIC_GLOBAL_SHUTDOWN "profile-before-change"
#define TOPIC_PROFILE_TEARDOWN "profile-change-teardown"
#define TOPIC_PROFILE_CHANGE "profile-before-change"
NS_IMPL_THREADSAFE_ADDREF(nsNavHistory)
NS_IMPL_THREADSAFE_RELEASE(nsNavHistory)
@ -312,8 +314,11 @@ protected:
class PlacesEvent : public nsRunnable
, public mozIStorageCompletionCallback
{
public:
NS_DECL_ISUPPORTS
PlacesEvent(const char* aTopic)
: mTopic(aTopic)
, mDoubleEnqueue(false)
@ -333,6 +338,12 @@ public:
return NS_OK;
}
NS_IMETHODIMP Complete()
{
Notify();
return NS_OK;
}
protected:
void Notify()
{
@ -351,6 +362,12 @@ protected:
bool mDoubleEnqueue;
};
NS_IMPL_ISUPPORTS2(
PlacesEvent
, mozIStorageCompletionCallback
, nsIRunnable
)
} // anonymouse namespace
@ -475,7 +492,8 @@ nsNavHistory::Init()
nsCOMPtr<nsIObserverService> obsSvc =
do_GetService(NS_OBSERVERSERVICE_CONTRACTID);
if (obsSvc) {
(void)obsSvc->AddObserver(this, TOPIC_GLOBAL_SHUTDOWN, PR_FALSE);
(void)obsSvc->AddObserver(this, TOPIC_PROFILE_TEARDOWN, PR_FALSE);
(void)obsSvc->AddObserver(this, TOPIC_PROFILE_CHANGE, PR_FALSE);
(void)obsSvc->AddObserver(this, TOPIC_IDLE_DAILY, PR_FALSE);
(void)obsSvc->AddObserver(this, NS_PRIVATE_BROWSING_SWITCH_TOPIC, PR_FALSE);
#ifdef MOZ_XUL
@ -5668,69 +5686,73 @@ nsNavHistory::Observe(nsISupports *aSubject, const char *aTopic,
{
NS_ASSERTION(NS_IsMainThread(), "This can only be called on the main thread");
if (strcmp(aTopic, TOPIC_GLOBAL_SHUTDOWN) == 0) {
if (strcmp(aTopic, TOPIC_PROFILE_TEARDOWN) == 0) {
nsCOMPtr<nsIObserverService> os = mozilla::services::GetObserverService();
if (!os) {
NS_WARNING("Unable to shutdown Places: Observer Service unavailable.");
return NS_OK;
}
(void)os->RemoveObserver(this, TOPIC_GLOBAL_SHUTDOWN);
(void)os->RemoveObserver(this, TOPIC_PROFILE_TEARDOWN);
(void)os->RemoveObserver(this, NS_PRIVATE_BROWSING_SWITCH_TOPIC);
(void)os->RemoveObserver(this, TOPIC_IDLE_DAILY);
#ifdef MOZ_XUL
(void)os->RemoveObserver(this, TOPIC_AUTOCOMPLETE_FEEDBACK_INCOMING);
#endif
// Notify all Places users that we are about to shutdown. The notification
// is enqueued because there is network work on profile-before-change that
// should run before us.
nsRefPtr<PlacesEvent> shutdownEvent =
new PlacesEvent(TOPIC_PLACES_SHUTDOWN);
nsresult rv = NS_DispatchToMainThread(shutdownEvent);
NS_WARN_IF_FALSE(NS_SUCCEEDED(rv),
"Unable to shutdown Places: message dispatch failed.");
// If shutdown happens in the same scope as the service init, we should
// immediately notify the places-init topic. Otherwise, since it's an
// enqueued notification and the event loop won't spin, it could be notified
// after xpcom-shutdown, when the connection does not exist anymore.
nsCOMPtr<nsISimpleEnumerator> e;
nsresult rv = os->EnumerateObservers(TOPIC_PLACES_INIT_COMPLETE,
getter_AddRefs(e));
if (NS_SUCCEEDED(rv) && e) {
nsCOMPtr<nsIObserver> observer;
PRBool loop = PR_TRUE;
while(NS_SUCCEEDED(e->HasMoreElements(&loop)) && loop) {
e->GetNext(getter_AddRefs(observer));
(void)observer->Observe(observer, TOPIC_PLACES_INIT_COMPLETE, nsnull);
}
}
// Once everybody has been notified, proceed with the real shutdown.
// Note: PlacesEvent contains special code to double enqueue this
// notification because we must ensure any enqueued work from observers
// is complete before going on.
(void)os->AddObserver(this, TOPIC_PLACES_TEARDOWN, PR_FALSE);
nsRefPtr<PlacesEvent> teardownEvent =
new PlacesEvent(TOPIC_PLACES_TEARDOWN, true);
rv = NS_DispatchToMainThread(teardownEvent);
NS_WARN_IF_FALSE(NS_SUCCEEDED(rv),
"Unable to shutdown Places: message dispatch failed.");
// Notify all Places users that we are about to shutdown.
(void)os->NotifyObservers(nsnull, TOPIC_PLACES_SHUTDOWN, nsnull);
}
else if (strcmp(aTopic, TOPIC_PLACES_TEARDOWN) == 0) {
// Don't even try to notify observers from this point on, the category
// cache would init services that could not shutdown correctly or try to
// use our APIs.
mCanNotify = false;
else if (strcmp(aTopic, TOPIC_PROFILE_CHANGE) == 0) {
// Fire internal shutdown notifications.
nsCOMPtr<nsIObserverService> os = mozilla::services::GetObserverService();
if (os) {
(void)os->RemoveObserver(this, TOPIC_PROFILE_CHANGE);
// Double notification allows to correctly enqueue tasks without the need
// to enqueue notification events to the main-thread. There is no
// guarantee that the event loop will spin before xpcom-shutdown indeed,
// see bug 580892.
(void)os->NotifyObservers(nsnull, TOPIC_PLACES_WILL_CLOSE_CONNECTION, nsnull);
(void)os->NotifyObservers(nsnull, TOPIC_PLACES_CONNECTION_CLOSING, nsnull);
}
// Operations that are unlikely to create issues to implementers should go
// in global shutdown. Any other thing that must run really late must be
// in profile teardown. Any other thing that must run really late should be
// here instead.
nsCOMPtr<nsIObserverService> os = mozilla::services::GetObserverService();
if (os)
(void)os->RemoveObserver(this, TOPIC_PLACES_TEARDOWN);
// Don't even try to notify observers from this point on, the category
// cache would init services that could try to use our APIs.
mCanNotify = false;
// Stop observing preferences changes.
if (mPrefBranch)
mPrefBranch->RemoveObserver("", this);
// Force a preferences save.
nsCOMPtr<nsIPrefService> prefService = do_QueryInterface(mPrefBranch);
if (prefService)
prefService->SavePrefFile(nsnull);
// Finalize all statements.
nsresult rv = FinalizeInternalStatements();
NS_ENSURE_SUCCESS(rv, rv);
// NOTE: We don't close the connection because the sync service could still
// need it for a final flush.
// Finally, close the connection.
nsRefPtr<PlacesEvent> closeListener =
new PlacesEvent(TOPIC_PLACES_CONNECTION_CLOSED);
(void)mDBConn->AsyncClose(closeListener);
}
#ifdef MOZ_XUL

View File

@ -93,12 +93,22 @@
// Fired after autocomplete feedback has been updated.
#define TOPIC_AUTOCOMPLETE_FEEDBACK_UPDATED "places-autocomplete-feedback-updated"
#endif
// Fired when Places is shutting down.
// Fired when Places is shutting down. Any code should stop accessing Places
// APIs after this notification. If you need to listen for Places shutdown
// you should only use this notification, next ones are intended only for
// internal Places use.
#define TOPIC_PLACES_SHUTDOWN "places-shutdown"
// Internal notification, called after places-shutdown.
// If you need to listen for Places shutdown, you should really use
// places-shutdown, because places-teardown is guaranteed to break your code.
#define TOPIC_PLACES_TEARDOWN "places-teardown"
// For Internal use only. Fired when connection is about to be closed, only
// cleanup tasks should run at this stage, nothing should be added to the
// database, nor APIs should be called.
#define TOPIC_PLACES_WILL_CLOSE_CONNECTION "places-will-close-connection"
// For Internal use only. Fired as the last notification before the connection
// is gone.
#define TOPIC_PLACES_CONNECTION_CLOSING "places-connection-closing"
// Fired when the connection has gone, nothing will work from now on.
#define TOPIC_PLACES_CONNECTION_CLOSED "places-connection-closed"
// Fired when Places found a locked database while initing.
#define TOPIC_DATABASE_LOCKED "places-database-locked"
// Fired after Places inited.

View File

@ -46,10 +46,8 @@ const Ci = Components.interfaces;
const Cr = Components.results;
const Cu = Components.utils;
// Use places-teardown to ensure we run last in the shutdown process.
// Any other implementer should use places-shutdown instead, since teardown is
// where things really break.
const kTopicShutdown = "places-teardown";
// A database flush should be the last operation during the shutdown process.
const kTopicShutdown = "places-connection-closing";
const kSyncFinished = "places-sync-finished";
const kDebugStopSync = "places-debug-stop-sync";
const kDebugStartSync = "places-debug-start-sync";
@ -125,22 +123,9 @@ nsPlacesDBFlush.prototype = {
this._timer = null;
}
// Other components could still make changes to history at this point,
// for example to clear private data on shutdown, so here we dispatch
// an event to the main thread so that we will sync after
// Places shutdown ensuring all data have been saved.
Services.tm.mainThread.dispatch({
_self: this,
run: function() {
// Flush any remaining change to disk tables.
this._self._flushWithQueries([kQuerySyncPlacesId, kQuerySyncHistoryVisitsId]);
// Close the database connection, this was the last sync and we can't
// ensure database coherence from now on.
this._self._finalizeInternalStatements();
this._self._db.asyncClose();
}
}, Ci.nsIThread.DISPATCH_NORMAL);
// Flush any remaining change to disk tables.
this._flushWithQueries([kQuerySyncPlacesId, kQuerySyncHistoryVisitsId]);
this._finalizeInternalStatements();
}
else if (aTopic == "nsPref:changed" && aData == kSyncPrefName) {
// Get the new pref value, and then update our timer

View File

@ -80,7 +80,8 @@ const nsPlacesExpirationFactory = {
////////////////////////////////////////////////////////////////////////////////
//// Constants
const TOPIC_SHUTDOWN = "places-shutdown";
// Last expiration step should run before the final sync.
const TOPIC_SHUTDOWN = "places-will-close-connection";
const TOPIC_PREF_CHANGED = "nsPref:changed";
const TOPIC_DEBUG_START_EXPIRATION = "places-debug-start-expiration";
const TOPIC_EXPIRATION_FINISHED = "places-expiration-finished";
@ -522,24 +523,16 @@ nsPlacesExpiration.prototype = {
this._timer = null;
}
// We must be sure to run after all other shutdown observers (but DBFlush)
// thus we enqueue the calls.
let self = this;
Services.tm.mainThread.dispatch({
run: function() {
// If we ran a clearHistory recently, or database id not dirty, we don't want to spend
// time expiring on shutdown. In such a case just expire session annotations.
let hasRecentClearHistory =
Date.now() - self._lastClearHistoryTime <
SHUTDOWN_WITH_RECENT_CLEARHISTORY_TIMEOUT_SECONDS * 1000;
let action = hasRecentClearHistory ||
self.status != STATUS.DIRTY ? ACTION.CLEAN_SHUTDOWN
: ACTION.SHUTDOWN;
self._expireWithActionAndLimit(action, LIMIT.LARGE);
self._finalizeInternalStatements();
}
}, Ci.nsIThread.DISPATCH_NORMAL);
// If we ran a clearHistory recently, or database id not dirty, we don't want to spend
// time expiring on shutdown. In such a case just expire session annotations.
let hasRecentClearHistory =
Date.now() - this._lastClearHistoryTime <
SHUTDOWN_WITH_RECENT_CLEARHISTORY_TIMEOUT_SECONDS * 1000;
let action = hasRecentClearHistory ||
this.status != STATUS.DIRTY ? ACTION.CLEAN_SHUTDOWN
: ACTION.SHUTDOWN;
this._expireWithActionAndLimit(action, LIMIT.LARGE);
this._finalizeInternalStatements();
}
else if (aTopic == TOPIC_PREF_CHANGED) {
this._loadPrefs();

View File

@ -57,7 +57,7 @@ let (commonFile = do_get_file("../head_common.js", false)) {
function shutdownExpiration()
{
let expire = Cc["@mozilla.org/places/expiration;1"].getService(Ci.nsIObserver);
expire.observe(null, PlacesUtils.TOPIC_SHUTDOWN, null);
expire.observe(null, "places-will-close-connection", null);
}

View File

@ -40,9 +40,6 @@ const NS_APP_PROFILE_DIR_STARTUP = "ProfDS";
const NS_APP_HISTORY_50_FILE = "UHist";
const NS_APP_BOOKMARKS_50_FILE = "BMarks";
// Backwards compatible consts, use PlacesUtils properties if possible.
const TOPIC_GLOBAL_SHUTDOWN = "profile-before-change";
// Shortcuts to transactions type.
const TRANSITION_LINK = Ci.nsINavHistoryService.TRANSITION_LINK;
const TRANSITION_TYPED = Ci.nsINavHistoryService.TRANSITION_TYPED;
@ -347,11 +344,11 @@ function waitForClearHistory(aCallback) {
/**
* Simulates a Places shutdown.
*/
function shutdownPlaces()
function shutdownPlaces(aKeepAliveConnection)
{
let hs = Cc["@mozilla.org/browser/nav-history-service;1"].
getService(Ci.nsIObserver);
hs.observe(null, TOPIC_GLOBAL_SHUTDOWN, null);
let hs = PlacesUtils.history.QueryInterface(Ci.nsIObserver);
hs.observe(null, "profile-change-teardown", null);
hs.observe(null, "profile-before-change", null);
}

View File

@ -50,7 +50,7 @@ const TEST_URI = "http://test.com/";
const kSyncPrefName = "syncDBTableIntervalInSecs";
const SYNC_INTERVAL = 600; // ten minutes
const kSyncFinished = "places-sync-finished";
const TOPIC_CONNECTION_CLOSED = "places-connection-closed";
var historyObserver = {
onVisit: function(aURI, aVisitId, aTime, aSessionId, aReferringId,
@ -64,9 +64,8 @@ hs.addObserver(historyObserver, false);
var observer = {
visitId: -1,
observe: function(aSubject, aTopic, aData) {
if (aTopic == kSyncFinished) {
// remove the observer, we don't need to observe sync on quit
os.removeObserver(this, kSyncFinished);
if (aTopic == TOPIC_CONNECTION_CLOSED) {
os.removeObserver(this, aTopic);
// visit id must be valid
do_check_neq(this.visitId, -1);
@ -75,7 +74,7 @@ var observer = {
}
}
}
os.addObserver(observer, kSyncFinished, false);
os.addObserver(observer, TOPIC_CONNECTION_CLOSED, false);
function run_test()
{

View File

@ -53,10 +53,7 @@ const TEST_URI = "http://test.com/";
const PREF_SYNC_INTERVAL = "syncDBTableIntervalInSecs";
const SYNC_INTERVAL = 600; // ten minutes
const TOPIC_SYNC_FINISHED = "places-sync-finished";
// Polling constants to check the connection closed status.
const POLLING_TIMEOUT_MS = 100;
const POLLING_MAX_PASSES = 20;
const TOPIC_CONNECTION_CLOSED = "places-connection-closed";
var historyObserver = {
visitId: -1,
@ -82,6 +79,7 @@ var observer = {
// The first sync is due to the insert bookmark.
// Simulate a clear private data just before shutdown.
bh.removeAllPages();
os.addObserver(shutdownObserver, TOPIC_CONNECTION_CLOSED, false);
// Immediately notify shutdown.
shutdownPlaces();
return;
@ -94,27 +92,16 @@ var observer = {
do_check_neq(historyObserver.visitId, -1);
// History must have been cleared.
do_check_true(historyObserver.cleared);
// The database connection will be closed after this sync, but we can't
// know how much time it will take, so we use a polling strategy.
do_timeout(POLLING_TIMEOUT_MS, check_results);
}
}
}
os.addObserver(observer, TOPIC_SYNC_FINISHED, false);
var gPasses = 0;
function check_results() {
if (++gPasses >= POLLING_MAX_PASSES) {
do_throw("Maximum time elapsdes waiting for Places database connection to close");
do_test_finished();
}
if (PlacesUtils.history.QueryInterface(Ci.nsPIPlacesDatabase)
.DBConnection.connectionReady) {
do_timeout(POLLING_TIMEOUT_MS, check_results);
return;
}
let shutdownObserver = {
observe: function(aSubject, aTopic, aData) {
os.removeObserver(this, aTopic);
do_check_false(PlacesUtils.history.QueryInterface(Ci.nsPIPlacesDatabase)
.DBConnection.connectionReady);
let dbConn = DBConn();
do_check_true(dbConn.connectionReady);
@ -144,6 +131,7 @@ function check_results() {
do_test_finished();
});
}
}
function run_test()