2010-06-16 14:30:08 -07:00
|
|
|
Cu.import("resource://services-sync/base_records/crypto.js");
|
|
|
|
Cu.import("resource://services-sync/base_records/wbo.js");
|
|
|
|
Cu.import("resource://services-sync/constants.js");
|
|
|
|
Cu.import("resource://services-sync/engines.js");
|
|
|
|
Cu.import("resource://services-sync/identity.js");
|
|
|
|
Cu.import("resource://services-sync/resource.js");
|
|
|
|
Cu.import("resource://services-sync/stores.js");
|
|
|
|
Cu.import("resource://services-sync/trackers.js");
|
|
|
|
Cu.import("resource://services-sync/util.js");
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* A fake engine implementation.
|
|
|
|
*
|
|
|
|
* Complete with record, store, and tracker implementations.
|
|
|
|
*/
|
|
|
|
|
2010-11-29 16:41:17 -08:00
|
|
|
function SteamRecord(collection, id) {
|
|
|
|
CryptoWrapper.call(this, collection, id);
|
2010-06-01 15:07:50 -07:00
|
|
|
}
|
|
|
|
SteamRecord.prototype = {
|
|
|
|
__proto__: CryptoWrapper.prototype
|
|
|
|
};
|
|
|
|
Utils.deferGetSet(SteamRecord, "cleartext", ["denomination"]);
|
|
|
|
|
|
|
|
function SteamStore() {
|
|
|
|
Store.call(this, "Steam");
|
|
|
|
this.items = {};
|
|
|
|
}
|
|
|
|
SteamStore.prototype = {
|
|
|
|
__proto__: Store.prototype,
|
|
|
|
|
|
|
|
create: function Store_create(record) {
|
|
|
|
this.items[record.id] = record.denomination;
|
|
|
|
},
|
|
|
|
|
|
|
|
remove: function Store_remove(record) {
|
|
|
|
delete this.items[record.id];
|
|
|
|
},
|
|
|
|
|
|
|
|
update: function Store_update(record) {
|
|
|
|
this.items[record.id] = record.denomination;
|
|
|
|
},
|
|
|
|
|
|
|
|
itemExists: function Store_itemExists(id) {
|
|
|
|
return (id in this.items);
|
|
|
|
},
|
|
|
|
|
2010-11-29 16:41:17 -08:00
|
|
|
createRecord: function(id, collection) {
|
|
|
|
var record = new SteamRecord(collection, id);
|
2010-06-01 15:07:50 -07:00
|
|
|
record.denomination = this.items[id] || "Data for new record: " + id;
|
|
|
|
return record;
|
|
|
|
},
|
|
|
|
|
|
|
|
changeItemID: function(oldID, newID) {
|
|
|
|
this.items[newID] = this.items[oldID];
|
|
|
|
delete this.items[oldID];
|
|
|
|
},
|
|
|
|
|
|
|
|
getAllIDs: function() {
|
|
|
|
let ids = {};
|
|
|
|
for (var id in this.items) {
|
|
|
|
ids[id] = true;
|
|
|
|
}
|
|
|
|
return ids;
|
|
|
|
},
|
|
|
|
|
|
|
|
wipe: function() {
|
|
|
|
this.items = {};
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
function SteamTracker() {
|
|
|
|
Tracker.call(this, "Steam");
|
|
|
|
}
|
|
|
|
SteamTracker.prototype = {
|
|
|
|
__proto__: Tracker.prototype
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
function SteamEngine() {
|
|
|
|
SyncEngine.call(this, "Steam");
|
|
|
|
}
|
|
|
|
SteamEngine.prototype = {
|
|
|
|
__proto__: SyncEngine.prototype,
|
|
|
|
_storeObj: SteamStore,
|
|
|
|
_trackerObj: SteamTracker,
|
|
|
|
_recordObj: SteamRecord,
|
|
|
|
|
|
|
|
_findDupe: function(item) {
|
|
|
|
for (let [id, value] in Iterator(this._store.items)) {
|
|
|
|
if (item.denomination == value) {
|
|
|
|
return id;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
function makeSteamEngine() {
|
|
|
|
return new SteamEngine();
|
|
|
|
}
|
|
|
|
|
|
|
|
var syncTesting = new SyncTestingInfrastructure(makeSteamEngine);
|
|
|
|
|
Backed out changesets 4d16d58becaf, 680d3557cafe, 13ea9fc20ed2, 1b2d4ba850a9, 81d8bf53ab01, 3e74c1c15b63, a29a2a0ae764, d2fd62ffd88e, 183be003b312, faa6ce5a0a20, 3ee27049cd1a. a=permaorange
2010-12-07 03:50:55 -08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Test setup helpers
|
|
|
|
*/
|
|
|
|
|
|
|
|
function sync_httpd_setup(handlers) {
|
|
|
|
handlers["/1.0/foo/storage/meta/global"]
|
|
|
|
= (new ServerWBO('global', {})).handler();
|
|
|
|
return httpd_setup(handlers);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Turn WBO cleartext into "encrypted" payload as it goes over the wire
|
|
|
|
function encryptPayload(cleartext) {
|
|
|
|
if (typeof cleartext == "object") {
|
|
|
|
cleartext = JSON.stringify(cleartext);
|
|
|
|
}
|
|
|
|
|
|
|
|
return {encryption: "../crypto/steam",
|
|
|
|
ciphertext: cleartext, // ciphertext == cleartext with fake crypto
|
|
|
|
IV: "irrelevant",
|
|
|
|
hmac: Utils.sha256HMAC(cleartext, null)};
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-06-01 15:07:50 -07:00
|
|
|
/*
|
|
|
|
* Tests
|
|
|
|
*
|
|
|
|
* SyncEngine._sync() is divided into four rather independent steps:
|
|
|
|
*
|
|
|
|
* - _syncStartup()
|
|
|
|
* - _processIncoming()
|
|
|
|
* - _uploadOutgoing()
|
|
|
|
* - _syncFinish()
|
|
|
|
*
|
|
|
|
* In the spirit of unit testing, these are tested individually for
|
|
|
|
* different scenarios below.
|
|
|
|
*/
|
|
|
|
|
|
|
|
function test_syncStartup_emptyOrOutdatedGlobalsResetsSync() {
|
2010-11-29 16:41:17 -08:00
|
|
|
_("SyncEngine._syncStartup resets sync and wipes server data if there's no or an outdated global record");
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
Svc.Prefs.set("clusterURL", "http://localhost:8080/");
|
2010-07-14 09:36:13 -07:00
|
|
|
Svc.Prefs.set("username", "foo");
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
// Some server side data that's going to be wiped
|
|
|
|
let collection = new ServerCollection();
|
|
|
|
collection.wbos.flying = new ServerWBO(
|
|
|
|
'flying', encryptPayload({id: 'flying',
|
|
|
|
denomination: "LNER Class A3 4472"}));
|
|
|
|
collection.wbos.scotsman = new ServerWBO(
|
|
|
|
'scotsman', encryptPayload({id: 'scotsman',
|
|
|
|
denomination: "Flying Scotsman"}));
|
|
|
|
|
|
|
|
let server = sync_httpd_setup({
|
|
|
|
"/1.0/foo/storage/steam": collection.handler()
|
|
|
|
});
|
2010-08-02 22:37:13 -07:00
|
|
|
do_test_pending();
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
let engine = makeSteamEngine();
|
|
|
|
engine._store.items = {rekolok: "Rekonstruktionslokomotive"};
|
|
|
|
try {
|
|
|
|
|
|
|
|
// Confirm initial environment
|
|
|
|
do_check_eq(engine._tracker.changedIDs["rekolok"], undefined);
|
|
|
|
let metaGlobal = Records.get(engine.metaURL);
|
|
|
|
do_check_eq(metaGlobal.payload.engines, undefined);
|
|
|
|
do_check_true(!!collection.wbos.flying.payload);
|
|
|
|
do_check_true(!!collection.wbos.scotsman.payload);
|
|
|
|
|
|
|
|
engine.lastSync = Date.now() / 1000;
|
2010-11-11 11:00:35 -08:00
|
|
|
engine.lastSyncLocal = Date.now();
|
2010-11-29 16:41:17 -08:00
|
|
|
|
|
|
|
// Trying to prompt a wipe -- we no longer track CryptoMeta per engine,
|
|
|
|
// so it has nothing to check.
|
2010-06-01 15:07:50 -07:00
|
|
|
engine._syncStartup();
|
|
|
|
|
|
|
|
// The meta/global WBO has been filled with data about the engine
|
|
|
|
let engineData = metaGlobal.payload.engines["steam"];
|
|
|
|
do_check_eq(engineData.version, engine.version);
|
|
|
|
do_check_eq(engineData.syncID, engine.syncID);
|
|
|
|
|
|
|
|
// Sync was reset and server data was wiped
|
|
|
|
do_check_eq(engine.lastSync, 0);
|
|
|
|
do_check_eq(collection.wbos.flying.payload, undefined);
|
|
|
|
do_check_eq(collection.wbos.scotsman.payload, undefined);
|
|
|
|
|
2010-06-14 15:16:53 -07:00
|
|
|
} finally {
|
2010-08-02 22:37:13 -07:00
|
|
|
server.stop(do_test_finished);
|
2010-06-14 15:16:53 -07:00
|
|
|
Svc.Prefs.resetBranch("");
|
|
|
|
Records.clearCache();
|
|
|
|
syncTesting = new SyncTestingInfrastructure(makeSteamEngine);
|
|
|
|
}
|
|
|
|
}
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
function test_syncStartup_serverHasNewerVersion() {
|
|
|
|
_("SyncEngine._syncStartup ");
|
|
|
|
|
|
|
|
Svc.Prefs.set("clusterURL", "http://localhost:8080/");
|
2010-07-14 09:36:13 -07:00
|
|
|
Svc.Prefs.set("username", "foo");
|
2010-06-01 15:07:50 -07:00
|
|
|
let global = new ServerWBO('global', {engines: {steam: {version: 23456}}});
|
|
|
|
let server = httpd_setup({
|
|
|
|
"/1.0/foo/storage/meta/global": global.handler()
|
|
|
|
});
|
2010-08-02 22:37:13 -07:00
|
|
|
do_test_pending();
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
let engine = makeSteamEngine();
|
|
|
|
try {
|
|
|
|
|
|
|
|
// The server has a newer version of the data and our engine can
|
|
|
|
// handle. That should give us an exception.
|
|
|
|
let error;
|
|
|
|
try {
|
|
|
|
engine._syncStartup();
|
|
|
|
} catch (ex) {
|
|
|
|
error = ex;
|
|
|
|
}
|
|
|
|
do_check_eq(error.failureCode, VERSION_OUT_OF_DATE);
|
|
|
|
|
|
|
|
} finally {
|
2010-08-02 22:37:13 -07:00
|
|
|
server.stop(do_test_finished);
|
2010-06-04 08:38:38 -07:00
|
|
|
Svc.Prefs.resetBranch("");
|
2010-06-01 15:07:50 -07:00
|
|
|
Records.clearCache();
|
|
|
|
syncTesting = new SyncTestingInfrastructure(makeSteamEngine);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
function test_syncStartup_syncIDMismatchResetsClient() {
|
|
|
|
_("SyncEngine._syncStartup resets sync if syncIDs don't match");
|
|
|
|
|
|
|
|
Svc.Prefs.set("clusterURL", "http://localhost:8080/");
|
2010-07-14 09:36:13 -07:00
|
|
|
Svc.Prefs.set("username", "foo");
|
2010-11-29 16:41:17 -08:00
|
|
|
let server = sync_httpd_setup({});
|
2010-08-02 22:37:13 -07:00
|
|
|
do_test_pending();
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
// global record with a different syncID than our engine has
|
|
|
|
let engine = makeSteamEngine();
|
|
|
|
let global = new ServerWBO('global',
|
|
|
|
{engines: {steam: {version: engine.version,
|
|
|
|
syncID: 'foobar'}}});
|
|
|
|
server.registerPathHandler("/1.0/foo/storage/meta/global", global.handler());
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
|
|
|
// Confirm initial environment
|
|
|
|
do_check_eq(engine.syncID, 'fake-guid-0');
|
|
|
|
do_check_eq(engine._tracker.changedIDs["rekolok"], undefined);
|
|
|
|
|
|
|
|
engine.lastSync = Date.now() / 1000;
|
2010-11-11 11:00:35 -08:00
|
|
|
engine.lastSyncLocal = Date.now();
|
2010-06-01 15:07:50 -07:00
|
|
|
engine._syncStartup();
|
|
|
|
|
|
|
|
// The engine has assumed the server's syncID
|
|
|
|
do_check_eq(engine.syncID, 'foobar');
|
|
|
|
|
|
|
|
// Sync was reset
|
|
|
|
do_check_eq(engine.lastSync, 0);
|
|
|
|
|
|
|
|
} finally {
|
2010-08-02 22:37:13 -07:00
|
|
|
server.stop(do_test_finished);
|
2010-06-04 08:38:38 -07:00
|
|
|
Svc.Prefs.resetBranch("");
|
2010-06-01 15:07:50 -07:00
|
|
|
Records.clearCache();
|
|
|
|
syncTesting = new SyncTestingInfrastructure(makeSteamEngine);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
function test_processIncoming_emptyServer() {
|
|
|
|
_("SyncEngine._processIncoming working with an empty server backend");
|
|
|
|
|
|
|
|
Svc.Prefs.set("clusterURL", "http://localhost:8080/");
|
2010-07-14 09:36:13 -07:00
|
|
|
Svc.Prefs.set("username", "foo");
|
2010-06-01 15:07:50 -07:00
|
|
|
let collection = new ServerCollection();
|
|
|
|
|
|
|
|
let server = sync_httpd_setup({
|
|
|
|
"/1.0/foo/storage/steam": collection.handler()
|
|
|
|
});
|
2010-08-02 22:37:13 -07:00
|
|
|
do_test_pending();
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
let engine = makeSteamEngine();
|
|
|
|
try {
|
|
|
|
|
|
|
|
// Merely ensure that this code path is run without any errors
|
|
|
|
engine._processIncoming();
|
|
|
|
do_check_eq(engine.lastSync, 0);
|
|
|
|
|
|
|
|
} finally {
|
2010-08-02 22:37:13 -07:00
|
|
|
server.stop(do_test_finished);
|
2010-06-04 08:38:38 -07:00
|
|
|
Svc.Prefs.resetBranch("");
|
2010-06-01 15:07:50 -07:00
|
|
|
Records.clearCache();
|
|
|
|
syncTesting = new SyncTestingInfrastructure(makeSteamEngine);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
function test_processIncoming_createFromServer() {
|
|
|
|
_("SyncEngine._processIncoming creates new records from server data");
|
|
|
|
|
|
|
|
Svc.Prefs.set("clusterURL", "http://localhost:8080/");
|
2010-07-14 09:36:13 -07:00
|
|
|
Svc.Prefs.set("username", "foo");
|
2010-11-29 16:41:17 -08:00
|
|
|
|
|
|
|
CollectionKeys.generateNewKeys();
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
// Some server records that will be downloaded
|
|
|
|
let collection = new ServerCollection();
|
|
|
|
collection.wbos.flying = new ServerWBO(
|
|
|
|
'flying', encryptPayload({id: 'flying',
|
|
|
|
denomination: "LNER Class A3 4472"}));
|
|
|
|
collection.wbos.scotsman = new ServerWBO(
|
|
|
|
'scotsman', encryptPayload({id: 'scotsman',
|
|
|
|
denomination: "Flying Scotsman"}));
|
|
|
|
|
2010-10-04 13:39:08 -07:00
|
|
|
// Two pathological cases involving relative URIs gone wrong.
|
|
|
|
collection.wbos['../pathological'] = new ServerWBO(
|
|
|
|
'../pathological', encryptPayload({id: '../pathological',
|
|
|
|
denomination: "Pathological Case"}));
|
|
|
|
|
2010-06-01 15:07:50 -07:00
|
|
|
let server = sync_httpd_setup({
|
|
|
|
"/1.0/foo/storage/steam": collection.handler(),
|
|
|
|
"/1.0/foo/storage/steam/flying": collection.wbos.flying.handler(),
|
|
|
|
"/1.0/foo/storage/steam/scotsman": collection.wbos.scotsman.handler()
|
|
|
|
});
|
2010-08-02 22:37:13 -07:00
|
|
|
do_test_pending();
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
let engine = makeSteamEngine();
|
2010-11-11 11:00:35 -08:00
|
|
|
let meta_global = Records.set(engine.metaURL, new WBORecord(engine.metaURL));
|
|
|
|
meta_global.payload.engines = {steam: {version: engine.version,
|
|
|
|
syncID: engine.syncID}};
|
|
|
|
|
2010-06-01 15:07:50 -07:00
|
|
|
try {
|
|
|
|
|
|
|
|
// Confirm initial environment
|
|
|
|
do_check_eq(engine.lastSync, 0);
|
|
|
|
do_check_eq(engine.lastModified, null);
|
|
|
|
do_check_eq(engine._store.items.flying, undefined);
|
|
|
|
do_check_eq(engine._store.items.scotsman, undefined);
|
2010-10-04 13:39:08 -07:00
|
|
|
do_check_eq(engine._store.items['../pathological'], undefined);
|
2010-06-01 15:07:50 -07:00
|
|
|
|
2010-11-11 11:00:35 -08:00
|
|
|
engine._syncStartup();
|
2010-06-01 15:07:50 -07:00
|
|
|
engine._processIncoming();
|
|
|
|
|
|
|
|
// Timestamps of last sync and last server modification are set.
|
|
|
|
do_check_true(engine.lastSync > 0);
|
|
|
|
do_check_true(engine.lastModified > 0);
|
|
|
|
|
|
|
|
// Local records have been created from the server data.
|
|
|
|
do_check_eq(engine._store.items.flying, "LNER Class A3 4472");
|
|
|
|
do_check_eq(engine._store.items.scotsman, "Flying Scotsman");
|
2010-10-04 13:39:08 -07:00
|
|
|
do_check_eq(engine._store.items['../pathological'], "Pathological Case");
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
} finally {
|
2010-08-02 22:37:13 -07:00
|
|
|
server.stop(do_test_finished);
|
2010-06-04 08:38:38 -07:00
|
|
|
Svc.Prefs.resetBranch("");
|
2010-06-01 15:07:50 -07:00
|
|
|
Records.clearCache();
|
|
|
|
syncTesting = new SyncTestingInfrastructure(makeSteamEngine);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
function test_processIncoming_reconcile() {
|
|
|
|
_("SyncEngine._processIncoming updates local records");
|
|
|
|
|
|
|
|
Svc.Prefs.set("clusterURL", "http://localhost:8080/");
|
2010-07-14 09:36:13 -07:00
|
|
|
Svc.Prefs.set("username", "foo");
|
2010-06-01 15:07:50 -07:00
|
|
|
let collection = new ServerCollection();
|
|
|
|
|
|
|
|
// This server record is newer than the corresponding client one,
|
|
|
|
// so it'll update its data.
|
|
|
|
collection.wbos.newrecord = new ServerWBO(
|
|
|
|
'newrecord', encryptPayload({id: 'newrecord',
|
|
|
|
denomination: "New stuff..."}));
|
|
|
|
|
|
|
|
// This server record is newer than the corresponding client one,
|
|
|
|
// so it'll update its data.
|
|
|
|
collection.wbos.newerserver = new ServerWBO(
|
|
|
|
'newerserver', encryptPayload({id: 'newerserver',
|
|
|
|
denomination: "New data!"}));
|
|
|
|
|
|
|
|
// This server record is 2 mins older than the client counterpart
|
|
|
|
// but identical to it, so we're expecting the client record's
|
|
|
|
// changedID to be reset.
|
|
|
|
collection.wbos.olderidentical = new ServerWBO(
|
|
|
|
'olderidentical', encryptPayload({id: 'olderidentical',
|
|
|
|
denomination: "Older but identical"}));
|
|
|
|
collection.wbos.olderidentical.modified -= 120;
|
|
|
|
|
|
|
|
// This item simply has different data than the corresponding client
|
|
|
|
// record (which is unmodified), so it will update the client as well
|
|
|
|
collection.wbos.updateclient = new ServerWBO(
|
|
|
|
'updateclient', encryptPayload({id: 'updateclient',
|
|
|
|
denomination: "Get this!"}));
|
|
|
|
|
|
|
|
// This is a dupe of 'original' but with a longer GUID, so we're
|
|
|
|
// expecting it to be marked for deletion from the server
|
|
|
|
collection.wbos.duplication = new ServerWBO(
|
|
|
|
'duplication', encryptPayload({id: 'duplication',
|
|
|
|
denomination: "Original Entry"}));
|
|
|
|
|
|
|
|
// This is a dupe of 'long_original' but with a shorter GUID, so we're
|
|
|
|
// expecting it to replace 'long_original'.
|
|
|
|
collection.wbos.dupe = new ServerWBO(
|
|
|
|
'dupe', encryptPayload({id: 'dupe',
|
|
|
|
denomination: "Long Original Entry"}));
|
|
|
|
|
|
|
|
// This record is marked as deleted, so we're expecting the client
|
|
|
|
// record to be removed.
|
|
|
|
collection.wbos.nukeme = new ServerWBO(
|
|
|
|
'nukeme', encryptPayload({id: 'nukeme',
|
|
|
|
denomination: "Nuke me!",
|
|
|
|
deleted: true}));
|
|
|
|
|
|
|
|
let server = sync_httpd_setup({
|
|
|
|
"/1.0/foo/storage/steam": collection.handler()
|
|
|
|
});
|
2010-08-02 22:37:13 -07:00
|
|
|
do_test_pending();
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
let engine = makeSteamEngine();
|
|
|
|
engine._store.items = {newerserver: "New data, but not as new as server!",
|
|
|
|
olderidentical: "Older but identical",
|
|
|
|
updateclient: "Got data?",
|
|
|
|
original: "Original Entry",
|
|
|
|
long_original: "Long Original Entry",
|
|
|
|
nukeme: "Nuke me!"};
|
|
|
|
// Make this record 1 min old, thus older than the one on the server
|
|
|
|
engine._tracker.addChangedID('newerserver', Date.now()/1000 - 60);
|
|
|
|
// This record has been changed 2 mins later than the one on the server
|
|
|
|
engine._tracker.addChangedID('olderidentical', Date.now()/1000);
|
|
|
|
|
2010-11-11 11:00:35 -08:00
|
|
|
let meta_global = Records.set(engine.metaURL, new WBORecord(engine.metaURL));
|
|
|
|
meta_global.payload.engines = {steam: {version: engine.version,
|
|
|
|
syncID: engine.syncID}};
|
|
|
|
|
2010-06-01 15:07:50 -07:00
|
|
|
try {
|
|
|
|
|
|
|
|
// Confirm initial environment
|
|
|
|
do_check_eq(engine._store.items.newrecord, undefined);
|
|
|
|
do_check_eq(engine._store.items.newerserver, "New data, but not as new as server!");
|
|
|
|
do_check_eq(engine._store.items.olderidentical, "Older but identical");
|
|
|
|
do_check_eq(engine._store.items.updateclient, "Got data?");
|
|
|
|
do_check_eq(engine._store.items.nukeme, "Nuke me!");
|
|
|
|
do_check_true(engine._tracker.changedIDs['olderidentical'] > 0);
|
|
|
|
|
2010-11-11 11:00:35 -08:00
|
|
|
engine._syncStartup();
|
2010-06-01 15:07:50 -07:00
|
|
|
engine._processIncoming();
|
|
|
|
|
|
|
|
// Timestamps of last sync and last server modification are set.
|
|
|
|
do_check_true(engine.lastSync > 0);
|
|
|
|
do_check_true(engine.lastModified > 0);
|
|
|
|
|
|
|
|
// The new record is created.
|
|
|
|
do_check_eq(engine._store.items.newrecord, "New stuff...");
|
|
|
|
|
|
|
|
// The 'newerserver' record is updated since the server data is newer.
|
|
|
|
do_check_eq(engine._store.items.newerserver, "New data!");
|
|
|
|
|
|
|
|
// The data for 'olderidentical' is identical on the server, so
|
|
|
|
// it's no longer marked as changed anymore.
|
|
|
|
do_check_eq(engine._store.items.olderidentical, "Older but identical");
|
|
|
|
do_check_eq(engine._tracker.changedIDs['olderidentical'], undefined);
|
|
|
|
|
|
|
|
// Updated with server data.
|
|
|
|
do_check_eq(engine._store.items.updateclient, "Get this!");
|
|
|
|
|
|
|
|
// The dupe with the shorter ID is kept, the longer one is slated
|
|
|
|
// for deletion.
|
|
|
|
do_check_eq(engine._store.items.long_original, undefined);
|
|
|
|
do_check_eq(engine._store.items.dupe, "Long Original Entry");
|
|
|
|
do_check_neq(engine._delete.ids.indexOf('duplication'), -1);
|
|
|
|
|
|
|
|
// The 'nukeme' record marked as deleted is removed.
|
|
|
|
do_check_eq(engine._store.items.nukeme, undefined);
|
|
|
|
|
|
|
|
} finally {
|
2010-08-02 22:37:13 -07:00
|
|
|
server.stop(do_test_finished);
|
2010-06-04 08:38:38 -07:00
|
|
|
Svc.Prefs.resetBranch("");
|
2010-06-01 15:07:50 -07:00
|
|
|
Records.clearCache();
|
|
|
|
syncTesting = new SyncTestingInfrastructure(makeSteamEngine);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-11-09 13:51:19 -08:00
|
|
|
function test_processIncoming_mobile_batchSize() {
|
|
|
|
_("SyncEngine._processIncoming doesn't fetch everything at once on mobile clients");
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
Svc.Prefs.set("clusterURL", "http://localhost:8080/");
|
2010-07-14 09:36:13 -07:00
|
|
|
Svc.Prefs.set("username", "foo");
|
2010-06-01 15:07:50 -07:00
|
|
|
Svc.Prefs.set("client.type", "mobile");
|
|
|
|
let crypto_steam = new ServerWBO('steam');
|
2010-11-09 13:51:19 -08:00
|
|
|
|
|
|
|
// A collection that logs each GET
|
2010-06-01 15:07:50 -07:00
|
|
|
let collection = new ServerCollection();
|
2010-11-09 13:51:19 -08:00
|
|
|
collection.get_log = [];
|
|
|
|
collection._get = collection.get;
|
|
|
|
collection.get = function (options) {
|
|
|
|
this.get_log.push(options);
|
|
|
|
return this._get(options);
|
|
|
|
};
|
2010-06-01 15:07:50 -07:00
|
|
|
|
2010-06-02 16:51:48 -07:00
|
|
|
// Let's create some 234 server side records. They're all at least
|
2010-06-01 15:07:50 -07:00
|
|
|
// 10 minutes old.
|
2010-06-02 16:51:48 -07:00
|
|
|
for (var i = 0; i < 234; i++) {
|
2010-06-01 15:07:50 -07:00
|
|
|
let id = 'record-no-' + i;
|
|
|
|
let payload = encryptPayload({id: id, denomination: "Record No. " + i});
|
|
|
|
let wbo = new ServerWBO(id, payload);
|
|
|
|
wbo.modified = Date.now()/1000 - 60*(i+10);
|
|
|
|
collection.wbos[id] = wbo;
|
|
|
|
}
|
|
|
|
|
|
|
|
let server = sync_httpd_setup({
|
|
|
|
"/1.0/foo/storage/steam": collection.handler()
|
|
|
|
});
|
2010-08-02 22:37:13 -07:00
|
|
|
do_test_pending();
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
let engine = makeSteamEngine();
|
2010-11-11 11:00:35 -08:00
|
|
|
let meta_global = Records.set(engine.metaURL, new WBORecord(engine.metaURL));
|
|
|
|
meta_global.payload.engines = {steam: {version: engine.version,
|
|
|
|
syncID: engine.syncID}};
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
try {
|
|
|
|
|
Backed out changesets 4d16d58becaf, 680d3557cafe, 13ea9fc20ed2, 1b2d4ba850a9, 81d8bf53ab01, 3e74c1c15b63, a29a2a0ae764, d2fd62ffd88e, 183be003b312, faa6ce5a0a20, 3ee27049cd1a. a=permaorange
2010-12-07 03:50:55 -08:00
|
|
|
// On a mobile client, we get new records from the server in batches of 50.
|
2010-11-11 11:00:35 -08:00
|
|
|
engine._syncStartup();
|
2010-06-01 15:07:50 -07:00
|
|
|
engine._processIncoming();
|
2010-11-09 13:51:19 -08:00
|
|
|
do_check_eq([id for (id in engine._store.items)].length, 234);
|
2010-06-01 15:07:50 -07:00
|
|
|
do_check_true('record-no-0' in engine._store.items);
|
|
|
|
do_check_true('record-no-49' in engine._store.items);
|
|
|
|
do_check_true('record-no-50' in engine._store.items);
|
2010-11-09 13:51:19 -08:00
|
|
|
do_check_true('record-no-233' in engine._store.items);
|
2010-06-01 15:07:50 -07:00
|
|
|
|
2010-11-09 13:51:19 -08:00
|
|
|
// Verify that the right number of GET requests with the right
|
|
|
|
// kind of parameters were made.
|
|
|
|
do_check_eq(collection.get_log.length,
|
|
|
|
Math.ceil(234 / MOBILE_BATCH_SIZE) + 1);
|
|
|
|
do_check_eq(collection.get_log[0].full, 1);
|
|
|
|
do_check_eq(collection.get_log[0].limit, MOBILE_BATCH_SIZE);
|
|
|
|
do_check_eq(collection.get_log[1].full, undefined);
|
|
|
|
do_check_eq(collection.get_log[1].limit, undefined);
|
|
|
|
for (let i = 1; i <= Math.floor(234 / MOBILE_BATCH_SIZE); i++) {
|
|
|
|
do_check_eq(collection.get_log[i+1].full, 1);
|
|
|
|
do_check_eq(collection.get_log[i+1].limit, undefined);
|
|
|
|
if (i < Math.floor(234 / MOBILE_BATCH_SIZE))
|
|
|
|
do_check_eq(collection.get_log[i+1].ids.length, MOBILE_BATCH_SIZE);
|
|
|
|
else
|
|
|
|
do_check_eq(collection.get_log[i+1].ids.length, 234 % MOBILE_BATCH_SIZE);
|
2010-06-01 15:07:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
} finally {
|
2010-08-02 22:37:13 -07:00
|
|
|
server.stop(do_test_finished);
|
2010-06-04 08:38:38 -07:00
|
|
|
Svc.Prefs.resetBranch("");
|
2010-06-01 15:07:50 -07:00
|
|
|
Records.clearCache();
|
|
|
|
syncTesting = new SyncTestingInfrastructure(makeSteamEngine);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Backed out changesets 4d16d58becaf, 680d3557cafe, 13ea9fc20ed2, 1b2d4ba850a9, 81d8bf53ab01, 3e74c1c15b63, a29a2a0ae764, d2fd62ffd88e, 183be003b312, faa6ce5a0a20, 3ee27049cd1a. a=permaorange
2010-12-07 03:50:55 -08:00
|
|
|
|
2010-06-01 15:07:50 -07:00
|
|
|
function test_uploadOutgoing_toEmptyServer() {
|
|
|
|
_("SyncEngine._uploadOutgoing uploads new records to server");
|
|
|
|
|
|
|
|
Svc.Prefs.set("clusterURL", "http://localhost:8080/");
|
2010-07-14 09:36:13 -07:00
|
|
|
Svc.Prefs.set("username", "foo");
|
2010-06-01 15:07:50 -07:00
|
|
|
let collection = new ServerCollection();
|
|
|
|
collection.wbos.flying = new ServerWBO('flying');
|
|
|
|
collection.wbos.scotsman = new ServerWBO('scotsman');
|
|
|
|
|
|
|
|
let server = sync_httpd_setup({
|
|
|
|
"/1.0/foo/storage/steam": collection.handler(),
|
|
|
|
"/1.0/foo/storage/steam/flying": collection.wbos.flying.handler(),
|
|
|
|
"/1.0/foo/storage/steam/scotsman": collection.wbos.scotsman.handler()
|
|
|
|
});
|
2010-08-02 22:37:13 -07:00
|
|
|
do_test_pending();
|
2010-11-29 16:41:17 -08:00
|
|
|
CollectionKeys.generateNewKeys();
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
let engine = makeSteamEngine();
|
2010-11-11 11:00:35 -08:00
|
|
|
engine.lastSync = 123; // needs to be non-zero so that tracker is queried
|
2010-06-01 15:07:50 -07:00
|
|
|
engine._store.items = {flying: "LNER Class A3 4472",
|
|
|
|
scotsman: "Flying Scotsman"};
|
|
|
|
// Mark one of these records as changed
|
|
|
|
engine._tracker.addChangedID('scotsman', 0);
|
|
|
|
|
2010-11-11 11:00:35 -08:00
|
|
|
let meta_global = Records.set(engine.metaURL, new WBORecord(engine.metaURL));
|
|
|
|
meta_global.payload.engines = {steam: {version: engine.version,
|
|
|
|
syncID: engine.syncID}};
|
|
|
|
|
2010-06-01 15:07:50 -07:00
|
|
|
try {
|
|
|
|
|
|
|
|
// Confirm initial environment
|
2010-11-11 11:00:35 -08:00
|
|
|
do_check_eq(engine.lastSyncLocal, 0);
|
2010-06-01 15:07:50 -07:00
|
|
|
do_check_eq(collection.wbos.flying.payload, undefined);
|
|
|
|
do_check_eq(collection.wbos.scotsman.payload, undefined);
|
|
|
|
|
2010-11-11 11:00:35 -08:00
|
|
|
engine._syncStartup();
|
2010-06-01 15:07:50 -07:00
|
|
|
engine._uploadOutgoing();
|
|
|
|
|
2010-11-11 11:00:35 -08:00
|
|
|
// Local timestamp has been set.
|
|
|
|
do_check_true(engine.lastSyncLocal > 0);
|
|
|
|
|
2010-06-01 15:07:50 -07:00
|
|
|
// Ensure the marked record ('scotsman') has been uploaded and is
|
|
|
|
// no longer marked.
|
|
|
|
do_check_eq(collection.wbos.flying.payload, undefined);
|
|
|
|
do_check_true(!!collection.wbos.scotsman.payload);
|
|
|
|
do_check_eq(JSON.parse(collection.wbos.scotsman.data.ciphertext).id,
|
|
|
|
'scotsman');
|
|
|
|
do_check_eq(engine._tracker.changedIDs['scotsman'], undefined);
|
|
|
|
|
|
|
|
// The 'flying' record wasn't marked so it wasn't uploaded
|
|
|
|
do_check_eq(collection.wbos.flying.payload, undefined);
|
|
|
|
|
|
|
|
} finally {
|
2010-08-02 22:37:13 -07:00
|
|
|
server.stop(do_test_finished);
|
2010-06-04 08:38:38 -07:00
|
|
|
Svc.Prefs.resetBranch("");
|
2010-06-01 15:07:50 -07:00
|
|
|
Records.clearCache();
|
|
|
|
syncTesting = new SyncTestingInfrastructure(makeSteamEngine);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-07-31 04:28:00 -07:00
|
|
|
function test_uploadOutgoing_failed() {
|
|
|
|
_("SyncEngine._uploadOutgoing doesn't clear the tracker of objects that failed to upload.");
|
|
|
|
|
|
|
|
Svc.Prefs.set("clusterURL", "http://localhost:8080/");
|
|
|
|
Svc.Prefs.set("username", "foo");
|
|
|
|
let collection = new ServerCollection();
|
|
|
|
// We only define the "flying" WBO on the server, not the "scotsman"
|
|
|
|
// and "peppercorn" ones.
|
|
|
|
collection.wbos.flying = new ServerWBO('flying');
|
|
|
|
|
|
|
|
let server = sync_httpd_setup({
|
|
|
|
"/1.0/foo/storage/steam": collection.handler()
|
|
|
|
});
|
2010-08-06 07:55:06 -07:00
|
|
|
do_test_pending();
|
2010-07-31 04:28:00 -07:00
|
|
|
|
|
|
|
let engine = makeSteamEngine();
|
2010-11-11 11:00:35 -08:00
|
|
|
engine.lastSync = 123; // needs to be non-zero so that tracker is queried
|
2010-07-31 04:28:00 -07:00
|
|
|
engine._store.items = {flying: "LNER Class A3 4472",
|
|
|
|
scotsman: "Flying Scotsman",
|
|
|
|
peppercorn: "Peppercorn Class"};
|
2010-11-11 11:00:35 -08:00
|
|
|
// Mark these records as changed
|
2010-07-31 04:28:00 -07:00
|
|
|
const FLYING_CHANGED = 12345;
|
|
|
|
const SCOTSMAN_CHANGED = 23456;
|
|
|
|
const PEPPERCORN_CHANGED = 34567;
|
|
|
|
engine._tracker.addChangedID('flying', FLYING_CHANGED);
|
|
|
|
engine._tracker.addChangedID('scotsman', SCOTSMAN_CHANGED);
|
|
|
|
engine._tracker.addChangedID('peppercorn', PEPPERCORN_CHANGED);
|
|
|
|
|
2010-11-11 11:00:35 -08:00
|
|
|
let meta_global = Records.set(engine.metaURL, new WBORecord(engine.metaURL));
|
|
|
|
meta_global.payload.engines = {steam: {version: engine.version,
|
|
|
|
syncID: engine.syncID}};
|
|
|
|
|
2010-07-31 04:28:00 -07:00
|
|
|
try {
|
|
|
|
|
|
|
|
// Confirm initial environment
|
2010-11-11 11:00:35 -08:00
|
|
|
do_check_eq(engine.lastSyncLocal, 0);
|
2010-07-31 04:28:00 -07:00
|
|
|
do_check_eq(collection.wbos.flying.payload, undefined);
|
|
|
|
do_check_eq(engine._tracker.changedIDs['flying'], FLYING_CHANGED);
|
|
|
|
do_check_eq(engine._tracker.changedIDs['scotsman'], SCOTSMAN_CHANGED);
|
|
|
|
do_check_eq(engine._tracker.changedIDs['peppercorn'], PEPPERCORN_CHANGED);
|
|
|
|
|
2010-11-23 21:21:31 -08:00
|
|
|
engine.enabled = true;
|
|
|
|
engine.sync();
|
2010-07-31 04:28:00 -07:00
|
|
|
|
2010-11-11 11:00:35 -08:00
|
|
|
// Local timestamp has been set.
|
|
|
|
do_check_true(engine.lastSyncLocal > 0);
|
|
|
|
|
2010-07-31 04:28:00 -07:00
|
|
|
// Ensure the 'flying' record has been uploaded and is no longer marked.
|
|
|
|
do_check_true(!!collection.wbos.flying.payload);
|
|
|
|
do_check_eq(engine._tracker.changedIDs['flying'], undefined);
|
|
|
|
|
|
|
|
// The 'scotsman' and 'peppercorn' records couldn't be uploaded so
|
|
|
|
// they weren't cleared from the tracker.
|
|
|
|
do_check_eq(engine._tracker.changedIDs['scotsman'], SCOTSMAN_CHANGED);
|
|
|
|
do_check_eq(engine._tracker.changedIDs['peppercorn'], PEPPERCORN_CHANGED);
|
|
|
|
|
|
|
|
} finally {
|
2010-08-06 07:55:06 -07:00
|
|
|
server.stop(do_test_finished);
|
2010-07-31 04:28:00 -07:00
|
|
|
Svc.Prefs.resetBranch("");
|
|
|
|
Records.clearCache();
|
|
|
|
syncTesting = new SyncTestingInfrastructure(makeSteamEngine);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-06-01 15:07:50 -07:00
|
|
|
function test_uploadOutgoing_MAX_UPLOAD_RECORDS() {
|
|
|
|
_("SyncEngine._uploadOutgoing uploads in batches of MAX_UPLOAD_RECORDS");
|
|
|
|
|
|
|
|
Svc.Prefs.set("clusterURL", "http://localhost:8080/");
|
2010-07-14 09:36:13 -07:00
|
|
|
Svc.Prefs.set("username", "foo");
|
2010-06-01 15:07:50 -07:00
|
|
|
let collection = new ServerCollection();
|
|
|
|
|
|
|
|
// Let's count how many times the client posts to the server
|
|
|
|
var noOfUploads = 0;
|
|
|
|
collection.post = (function(orig) {
|
|
|
|
return function() {
|
|
|
|
noOfUploads++;
|
|
|
|
return orig.apply(this, arguments);
|
|
|
|
};
|
|
|
|
}(collection.post));
|
|
|
|
|
|
|
|
// Create a bunch of records (and server side handlers)
|
|
|
|
let engine = makeSteamEngine();
|
2010-06-02 16:51:48 -07:00
|
|
|
for (var i = 0; i < 234; i++) {
|
2010-06-01 15:07:50 -07:00
|
|
|
let id = 'record-no-' + i;
|
|
|
|
engine._store.items[id] = "Record No. " + i;
|
|
|
|
engine._tracker.addChangedID(id, 0);
|
|
|
|
collection.wbos[id] = new ServerWBO(id);
|
|
|
|
}
|
|
|
|
|
2010-11-11 11:00:35 -08:00
|
|
|
let meta_global = Records.set(engine.metaURL, new WBORecord(engine.metaURL));
|
|
|
|
meta_global.payload.engines = {steam: {version: engine.version,
|
|
|
|
syncID: engine.syncID}};
|
|
|
|
|
2010-06-01 15:07:50 -07:00
|
|
|
let server = sync_httpd_setup({
|
|
|
|
"/1.0/foo/storage/steam": collection.handler()
|
|
|
|
});
|
2010-08-02 22:37:13 -07:00
|
|
|
do_test_pending();
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
try {
|
|
|
|
|
|
|
|
// Confirm initial environment
|
|
|
|
do_check_eq(noOfUploads, 0);
|
|
|
|
|
2010-11-11 11:00:35 -08:00
|
|
|
engine._syncStartup();
|
2010-06-01 15:07:50 -07:00
|
|
|
engine._uploadOutgoing();
|
|
|
|
|
|
|
|
// Ensure all records have been uploaded
|
2010-06-02 16:51:48 -07:00
|
|
|
for (i = 0; i < 234; i++) {
|
2010-06-01 15:07:50 -07:00
|
|
|
do_check_true(!!collection.wbos['record-no-'+i].payload);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that the uploads were performed in batches of MAX_UPLOAD_RECORDS
|
2010-06-02 16:51:48 -07:00
|
|
|
do_check_eq(noOfUploads, Math.ceil(234/MAX_UPLOAD_RECORDS));
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
} finally {
|
2010-08-02 22:37:13 -07:00
|
|
|
server.stop(do_test_finished);
|
2010-06-04 08:38:38 -07:00
|
|
|
Svc.Prefs.resetBranch("");
|
2010-06-01 15:07:50 -07:00
|
|
|
Records.clearCache();
|
|
|
|
syncTesting = new SyncTestingInfrastructure(makeSteamEngine);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
function test_syncFinish_noDelete() {
|
|
|
|
_("SyncEngine._syncFinish resets tracker's score");
|
|
|
|
let engine = makeSteamEngine();
|
|
|
|
engine._delete = {}; // Nothing to delete
|
|
|
|
engine._tracker.score = 100;
|
|
|
|
|
|
|
|
// _syncFinish() will reset the engine's score.
|
|
|
|
engine._syncFinish();
|
|
|
|
do_check_eq(engine.score, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
function test_syncFinish_deleteByIds() {
|
|
|
|
_("SyncEngine._syncFinish deletes server records slated for deletion (list of record IDs).");
|
|
|
|
|
|
|
|
Svc.Prefs.set("clusterURL", "http://localhost:8080/");
|
2010-07-14 09:36:13 -07:00
|
|
|
Svc.Prefs.set("username", "foo");
|
2010-06-01 15:07:50 -07:00
|
|
|
let collection = new ServerCollection();
|
|
|
|
collection.wbos.flying = new ServerWBO(
|
|
|
|
'flying', encryptPayload({id: 'flying',
|
|
|
|
denomination: "LNER Class A3 4472"}));
|
|
|
|
collection.wbos.scotsman = new ServerWBO(
|
|
|
|
'scotsman', encryptPayload({id: 'scotsman',
|
|
|
|
denomination: "Flying Scotsman"}));
|
|
|
|
collection.wbos.rekolok = new ServerWBO(
|
|
|
|
'rekolok', encryptPayload({id: 'rekolok',
|
|
|
|
denomination: "Rekonstruktionslokomotive"}));
|
|
|
|
|
|
|
|
let server = httpd_setup({
|
|
|
|
"/1.0/foo/storage/steam": collection.handler()
|
|
|
|
});
|
2010-08-02 22:37:13 -07:00
|
|
|
do_test_pending();
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
let engine = makeSteamEngine();
|
|
|
|
try {
|
|
|
|
engine._delete = {ids: ['flying', 'rekolok']};
|
|
|
|
engine._syncFinish();
|
|
|
|
|
|
|
|
// The 'flying' and 'rekolok' records were deleted while the
|
|
|
|
// 'scotsman' one wasn't.
|
|
|
|
do_check_eq(collection.wbos.flying.payload, undefined);
|
|
|
|
do_check_true(!!collection.wbos.scotsman.payload);
|
|
|
|
do_check_eq(collection.wbos.rekolok.payload, undefined);
|
|
|
|
|
|
|
|
// The deletion todo list has been reset.
|
|
|
|
do_check_eq(engine._delete.ids, undefined);
|
|
|
|
|
|
|
|
} finally {
|
2010-08-02 22:37:13 -07:00
|
|
|
server.stop(do_test_finished);
|
2010-06-04 08:38:38 -07:00
|
|
|
Svc.Prefs.resetBranch("");
|
2010-06-01 15:07:50 -07:00
|
|
|
Records.clearCache();
|
|
|
|
syncTesting = new SyncTestingInfrastructure(makeSteamEngine);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
function test_syncFinish_deleteLotsInBatches() {
|
|
|
|
_("SyncEngine._syncFinish deletes server records in batches of 100 (list of record IDs).");
|
|
|
|
|
2010-07-14 09:36:13 -07:00
|
|
|
Svc.Prefs.set("clusterURL", "http://localhost:8080/");
|
|
|
|
Svc.Prefs.set("username", "foo");
|
2010-06-01 15:07:50 -07:00
|
|
|
let collection = new ServerCollection();
|
|
|
|
|
|
|
|
// Let's count how many times the client does a DELETE request to the server
|
|
|
|
var noOfUploads = 0;
|
|
|
|
collection.delete = (function(orig) {
|
|
|
|
return function() {
|
|
|
|
noOfUploads++;
|
|
|
|
return orig.apply(this, arguments);
|
|
|
|
};
|
|
|
|
}(collection.delete));
|
|
|
|
|
|
|
|
// Create a bunch of records on the server
|
2010-06-02 16:51:48 -07:00
|
|
|
let now = Date.now();
|
|
|
|
for (var i = 0; i < 234; i++) {
|
2010-06-01 15:07:50 -07:00
|
|
|
let id = 'record-no-' + i;
|
|
|
|
let payload = encryptPayload({id: id, denomination: "Record No. " + i});
|
|
|
|
let wbo = new ServerWBO(id, payload);
|
2010-06-02 16:51:48 -07:00
|
|
|
wbo.modified = now / 1000 - 60 * (i + 110);
|
2010-06-01 15:07:50 -07:00
|
|
|
collection.wbos[id] = wbo;
|
|
|
|
}
|
|
|
|
|
|
|
|
let server = httpd_setup({
|
|
|
|
"/1.0/foo/storage/steam": collection.handler()
|
|
|
|
});
|
2010-08-02 22:37:13 -07:00
|
|
|
do_test_pending();
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
let engine = makeSteamEngine();
|
|
|
|
try {
|
|
|
|
|
|
|
|
// Confirm initial environment
|
|
|
|
do_check_eq(noOfUploads, 0);
|
|
|
|
|
2010-06-02 16:51:48 -07:00
|
|
|
// Declare what we want to have deleted: all records no. 100 and
|
2010-06-01 15:07:50 -07:00
|
|
|
// up and all records that are less than 200 mins old (which are
|
2010-06-02 16:51:48 -07:00
|
|
|
// records 0 thru 90).
|
2010-06-01 15:07:50 -07:00
|
|
|
engine._delete = {ids: [],
|
2010-06-02 16:51:48 -07:00
|
|
|
newer: now / 1000 - 60 * 200.5};
|
|
|
|
for (i = 100; i < 234; i++) {
|
2010-06-01 15:07:50 -07:00
|
|
|
engine._delete.ids.push('record-no-' + i);
|
|
|
|
}
|
|
|
|
|
|
|
|
engine._syncFinish();
|
|
|
|
|
|
|
|
// Ensure that the appropriate server data has been wiped while
|
2010-06-02 16:51:48 -07:00
|
|
|
// preserving records 90 thru 200.
|
|
|
|
for (i = 0; i < 234; i++) {
|
2010-06-01 15:07:50 -07:00
|
|
|
let id = 'record-no-' + i;
|
2010-06-02 16:51:48 -07:00
|
|
|
if (i <= 90 || i >= 100) {
|
2010-06-01 15:07:50 -07:00
|
|
|
do_check_eq(collection.wbos[id].payload, undefined);
|
|
|
|
} else {
|
|
|
|
do_check_true(!!collection.wbos[id].payload);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The deletion was done in batches
|
2010-06-02 16:51:48 -07:00
|
|
|
do_check_eq(noOfUploads, 2 + 1);
|
2010-06-01 15:07:50 -07:00
|
|
|
|
|
|
|
// The deletion todo list has been reset.
|
|
|
|
do_check_eq(engine._delete.ids, undefined);
|
|
|
|
|
|
|
|
} finally {
|
2010-08-02 22:37:13 -07:00
|
|
|
server.stop(do_test_finished);
|
2010-06-04 08:38:38 -07:00
|
|
|
Svc.Prefs.resetBranch("");
|
2010-06-01 15:07:50 -07:00
|
|
|
Records.clearCache();
|
|
|
|
syncTesting = new SyncTestingInfrastructure(makeSteamEngine);
|
|
|
|
}
|
|
|
|
}
|
2010-06-22 13:18:10 -07:00
|
|
|
|
2010-11-23 21:21:31 -08:00
|
|
|
|
|
|
|
function test_sync_partialUpload() {
|
|
|
|
_("SyncEngine.sync() keeps changedIDs that couldn't be uploaded.");
|
|
|
|
|
2010-11-11 11:00:35 -08:00
|
|
|
Svc.Prefs.set("clusterURL", "http://localhost:8080/");
|
|
|
|
Svc.Prefs.set("username", "foo");
|
|
|
|
|
|
|
|
let crypto_steam = new ServerWBO('steam');
|
2010-11-23 21:21:31 -08:00
|
|
|
let collection = new ServerCollection();
|
2010-11-11 11:00:35 -08:00
|
|
|
let server = sync_httpd_setup({
|
2010-11-23 21:21:31 -08:00
|
|
|
"/1.0/foo/storage/crypto/steam": crypto_steam.handler(),
|
|
|
|
"/1.0/foo/storage/steam": collection.handler()
|
2010-11-11 11:00:35 -08:00
|
|
|
});
|
|
|
|
do_test_pending();
|
2010-11-29 16:41:17 -08:00
|
|
|
CollectionKeys.generateNewKeys();
|
2010-11-11 11:00:35 -08:00
|
|
|
|
|
|
|
let engine = makeSteamEngine();
|
|
|
|
engine.lastSync = 123; // needs to be non-zero so that tracker is queried
|
|
|
|
engine.lastSyncLocal = 456;
|
|
|
|
|
2010-11-23 21:21:31 -08:00
|
|
|
// Let the third upload fail completely
|
|
|
|
var noOfUploads = 0;
|
|
|
|
collection.post = (function(orig) {
|
|
|
|
return function() {
|
|
|
|
if (noOfUploads == 2)
|
|
|
|
throw "FAIL!";
|
|
|
|
noOfUploads++;
|
|
|
|
return orig.apply(this, arguments);
|
|
|
|
};
|
|
|
|
}(collection.post));
|
|
|
|
|
|
|
|
// Create a bunch of records (and server side handlers)
|
|
|
|
for (let i = 0; i < 234; i++) {
|
|
|
|
let id = 'record-no-' + i;
|
|
|
|
engine._store.items[id] = "Record No. " + i;
|
|
|
|
engine._tracker.addChangedID(id, i);
|
|
|
|
// Let two items in the first upload batch fail.
|
|
|
|
if ((i != 23) && (i != 42))
|
|
|
|
collection.wbos[id] = new ServerWBO(id);
|
|
|
|
}
|
2010-11-11 11:00:35 -08:00
|
|
|
|
|
|
|
let meta_global = Records.set(engine.metaURL, new WBORecord(engine.metaURL));
|
|
|
|
meta_global.payload.engines = {steam: {version: engine.version,
|
|
|
|
syncID: engine.syncID}};
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
|
|
|
engine.enabled = true;
|
|
|
|
let error;
|
|
|
|
try {
|
|
|
|
engine.sync();
|
|
|
|
} catch (ex) {
|
|
|
|
error = ex;
|
|
|
|
}
|
|
|
|
do_check_true(!!error);
|
|
|
|
|
2010-11-23 21:21:31 -08:00
|
|
|
// The timestamp has been updated.
|
|
|
|
do_check_true(engine.lastSyncLocal > 456);
|
|
|
|
|
|
|
|
for (let i = 0; i < 234; i++) {
|
|
|
|
let id = 'record-no-' + i;
|
|
|
|
// Ensure failed records are back in the tracker:
|
|
|
|
// * records no. 23 and 42 were rejected by the server,
|
|
|
|
// * records no. 200 and higher couldn't be uploaded because we failed
|
|
|
|
// hard on the 3rd upload.
|
|
|
|
if ((i == 23) || (i == 42) || (i >= 200))
|
|
|
|
do_check_eq(engine._tracker.changedIDs[id], i);
|
|
|
|
else
|
|
|
|
do_check_false(id in engine._tracker.changedIDs);
|
|
|
|
}
|
2010-11-11 11:00:35 -08:00
|
|
|
|
|
|
|
} finally {
|
|
|
|
server.stop(do_test_finished);
|
|
|
|
Svc.Prefs.resetBranch("");
|
|
|
|
Records.clearCache();
|
|
|
|
syncTesting = new SyncTestingInfrastructure(makeSteamEngine);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-11-29 16:41:17 -08:00
|
|
|
function test_canDecrypt_noCryptoKeys() {
|
|
|
|
_("SyncEngine.canDecrypt returns false if the engine fails to decrypt items on the server, e.g. due to a missing crypto key collection.");
|
2010-10-05 11:32:56 -07:00
|
|
|
Svc.Prefs.set("clusterURL", "http://localhost:8080/");
|
|
|
|
Svc.Prefs.set("username", "foo");
|
|
|
|
|
2010-11-29 16:41:17 -08:00
|
|
|
// Wipe CollectionKeys so we can test the desired scenario.
|
|
|
|
CollectionKeys.setContents({"collections": {}, "default": null});
|
|
|
|
|
2010-10-05 11:32:56 -07:00
|
|
|
let collection = new ServerCollection();
|
|
|
|
collection.wbos.flying = new ServerWBO(
|
|
|
|
'flying', encryptPayload({id: 'flying',
|
|
|
|
denomination: "LNER Class A3 4472"}));
|
|
|
|
|
|
|
|
let server = sync_httpd_setup({
|
|
|
|
"/1.0/foo/storage/steam": collection.handler()
|
|
|
|
});
|
|
|
|
do_test_pending();
|
|
|
|
|
|
|
|
let engine = makeSteamEngine();
|
|
|
|
try {
|
|
|
|
|
|
|
|
do_check_false(engine.canDecrypt());
|
|
|
|
|
|
|
|
} finally {
|
|
|
|
server.stop(do_test_finished);
|
|
|
|
Svc.Prefs.resetBranch("");
|
|
|
|
Records.clearCache();
|
|
|
|
syncTesting = new SyncTestingInfrastructure(makeSteamEngine);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
function test_canDecrypt_true() {
|
|
|
|
_("SyncEngine.canDecrypt returns true if the engine can decrypt the items on the server.");
|
|
|
|
Svc.Prefs.set("clusterURL", "http://localhost:8080/");
|
|
|
|
Svc.Prefs.set("username", "foo");
|
|
|
|
|
2010-11-29 16:41:17 -08:00
|
|
|
// Set up CollectionKeys, as service.js does.
|
|
|
|
CollectionKeys.generateNewKeys();
|
|
|
|
|
2010-10-05 11:32:56 -07:00
|
|
|
let collection = new ServerCollection();
|
|
|
|
collection.wbos.flying = new ServerWBO(
|
|
|
|
'flying', encryptPayload({id: 'flying',
|
|
|
|
denomination: "LNER Class A3 4472"}));
|
|
|
|
|
|
|
|
let server = sync_httpd_setup({
|
|
|
|
"/1.0/foo/storage/steam": collection.handler()
|
|
|
|
});
|
|
|
|
do_test_pending();
|
|
|
|
|
|
|
|
let engine = makeSteamEngine();
|
|
|
|
try {
|
|
|
|
|
|
|
|
do_check_true(engine.canDecrypt());
|
|
|
|
|
|
|
|
} finally {
|
|
|
|
server.stop(do_test_finished);
|
|
|
|
Svc.Prefs.resetBranch("");
|
|
|
|
Records.clearCache();
|
|
|
|
syncTesting = new SyncTestingInfrastructure(makeSteamEngine);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2010-06-22 13:18:10 -07:00
|
|
|
function run_test() {
|
2010-10-21 04:47:47 -07:00
|
|
|
if (DISABLE_TESTS_BUG_604565)
|
|
|
|
return;
|
|
|
|
|
2010-06-22 13:18:10 -07:00
|
|
|
test_syncStartup_emptyOrOutdatedGlobalsResetsSync();
|
|
|
|
test_syncStartup_serverHasNewerVersion();
|
|
|
|
test_syncStartup_syncIDMismatchResetsClient();
|
|
|
|
test_processIncoming_emptyServer();
|
|
|
|
test_processIncoming_createFromServer();
|
|
|
|
test_processIncoming_reconcile();
|
2010-11-09 13:51:19 -08:00
|
|
|
test_processIncoming_mobile_batchSize();
|
2010-06-22 13:18:10 -07:00
|
|
|
test_uploadOutgoing_toEmptyServer();
|
2010-07-31 04:28:00 -07:00
|
|
|
test_uploadOutgoing_failed();
|
2010-06-22 13:18:10 -07:00
|
|
|
test_uploadOutgoing_MAX_UPLOAD_RECORDS();
|
|
|
|
test_syncFinish_noDelete();
|
|
|
|
test_syncFinish_deleteByIds();
|
|
|
|
test_syncFinish_deleteLotsInBatches();
|
2010-11-23 21:21:31 -08:00
|
|
|
test_sync_partialUpload();
|
2010-11-29 16:41:17 -08:00
|
|
|
test_canDecrypt_noCryptoKeys();
|
2010-10-05 11:32:56 -07:00
|
|
|
test_canDecrypt_true();
|
2010-06-22 13:18:10 -07:00
|
|
|
}
|