Bug 710448 - Rewritten Sync record reconciling to handle edge cases; r=rnewman, r=philikon

This patch rewrites Sync's record reconciling logic to properly handle
edge cases. Before, there were a few cases where data loss could occur
because of improperly handled and ignored timestamps.
This commit is contained in:
Gregory Szorc 2012-01-25 13:32:05 -08:00
parent b6b933a99d
commit 11e6746349
7 changed files with 387 additions and 179 deletions

View File

@ -747,11 +747,8 @@ SyncEngine.prototype = {
// Clear the tracker now. If the sync fails we'll add the ones we failed
// to upload back.
this._tracker.clearChangedIDs();
// Array of just the IDs from this._modified. This is what we iterate over
// so we can modify this._modified during the iteration.
this._modifiedIDs = Object.keys(this._modified);
this._log.info(this._modifiedIDs.length +
this._log.info(Object.keys(this._modified).length +
" outgoing items pre-reconciliation");
// Keep track of what to delete at the end of sync
@ -1013,19 +1010,6 @@ SyncEngine.prototype = {
// By default, assume there's no dupe items for the engine
},
_isEqual: function SyncEngine__isEqual(item) {
let local = this._createRecord(item.id);
if (this._log.level <= Log4Moz.Level.Trace)
this._log.trace("Local record: " + local);
if (Utils.deepEquals(item.cleartext, local.cleartext)) {
this._log.trace("Local record is the same");
return true;
} else {
this._log.trace("Local record is different");
return false;
}
},
_deleteId: function _deleteId(id) {
this._tracker.removeChangedID(id);
@ -1036,85 +1020,195 @@ SyncEngine.prototype = {
this._delete.ids.push(id);
},
_handleDupe: function _handleDupe(item, dupeId) {
// Prefer shorter guids; for ties, just do an ASCII compare
let preferLocal = dupeId.length < item.id.length ||
(dupeId.length == item.id.length && dupeId < item.id);
if (preferLocal) {
this._log.trace("Preferring local id: " + [dupeId, item.id]);
this._deleteId(item.id);
item.id = dupeId;
this._tracker.addChangedID(dupeId, 0);
}
else {
this._log.trace("Switching local id to incoming: " + [item.id, dupeId]);
this._store.changeItemID(dupeId, item.id);
this._deleteId(dupeId);
}
},
// Reconcile incoming and existing records. Return true if server
// data should be applied.
_reconcile: function SyncEngine__reconcile(item, dupePerformed) {
if (this._log.level <= Log4Moz.Level.Trace)
/**
* Reconcile incoming record with local state.
*
* This function essentially determines whether to apply an incoming record.
*
* @param item
* Record from server to be tested for application.
* @return boolean
* Truthy if incoming record should be applied. False if not.
*/
_reconcile: function _reconcile(item) {
if (this._log.level <= Log4Moz.Level.Trace) {
this._log.trace("Incoming: " + item);
}
this._log.trace("Reconcile step 1: Check for conflicts");
if (item.id in this._modified) {
// If the incoming and local changes are the same, skip
if (this._isEqual(item)) {
delete this._modified[item.id];
// We start reconciling by collecting a bunch of state. We do this here
// because some state may change during the course of this function and we
// need to operate on the original values.
let existsLocally = this._store.itemExists(item.id);
let locallyModified = item.id in this._modified;
// TODO Handle clock drift better. Tracked in bug 721181.
let remoteAge = AsyncResource.serverTime - item.modified;
let localAge = locallyModified ?
(Date.now() / 1000 - this._modified[item.id]) : null;
let remoteIsNewer = remoteAge < localAge;
this._log.trace("Reconciling " + item.id + ". exists=" +
existsLocally + "; modified=" + locallyModified +
"; local age=" + localAge + "; incoming age=" +
remoteAge);
// We handle deletions first so subsequent logic doesn't have to check
// deleted flags.
if (item.deleted) {
// If the item doesn't exist locally, there is nothing for us to do. We
// can't check for duplicates because the incoming record has no data
// which can be used for duplicate detection.
if (!existsLocally) {
this._log.trace("Ignoring incoming item because it was deleted and " +
"the item does not exist locally.");
return false;
}
// Records differ so figure out which to take
let recordAge = AsyncResource.serverTime - item.modified;
let localAge = Date.now() / 1000 - this._modified[item.id];
this._log.trace("Record age vs local age: " + [recordAge, localAge]);
// We decide whether to process the deletion by comparing the record
// ages. If the item is not modified locally, the remote side wins and
// the deletion is processed. If it is modified locally, we take the
// newer record.
if (!locallyModified) {
this._log.trace("Applying incoming delete because the local item " +
"exists and isn't modified.");
return true;
}
// Apply the record if the record is newer (server wins)
return recordAge < localAge;
// TODO As part of bug 720592, determine whether we should do more here.
// In the case where the local changes are newer, it is quite possible
// that the local client will restore data a remote client had tried to
// delete. There might be a good reason for that delete and it might be
// enexpected for this client to restore that data.
this._log.trace("Incoming record is deleted but we had local changes. " +
"Applying the youngest record.");
return remoteIsNewer;
}
this._log.trace("Reconcile step 2: Check for updates");
if (this._store.itemExists(item.id))
return !this._isEqual(item);
// At this point the incoming record is not for a deletion and must have
// data. If the incoming record does not exist locally, we check for a local
// duplicate existing under a different ID. The default implementation of
// _findDupe() is empty, so engines have to opt in to this functionality.
//
// If we find a duplicate, we change the local ID to the incoming ID and we
// refresh the metadata collected above. See bug 710448 for the history
// of this logic.
if (!existsLocally) {
let dupeID = this._findDupe(item);
if (dupeID) {
this._log.trace("Local item " + dupeID + " is a duplicate for " +
"incoming item " + item.id);
this._log.trace("Reconcile step 2.5: Don't dupe deletes");
if (item.deleted)
return true;
// The local, duplicate ID is always deleted on the server.
this._deleteId(dupeID);
// This shouldn't happen, but we protect ourself from infinite recursion.
if (dupePerformed) {
this._log.warn("Duplicate record not reconciled on second pass: " +
item);
// We go ahead and apply it.
// The current API contract does not mandate that the ID returned by
// _findDupe() actually exists. Therefore, we have to perform this
// check.
existsLocally = this._store.itemExists(dupeID);
// We unconditionally change the item's ID in case the engine knows of
// an item but doesn't expose it through itemExists. If the API
// contract were stronger, this could be changed.
this._log.debug("Switching local ID to incoming: " + dupeID + " -> " +
item.id);
this._store.changeItemID(dupeID, item.id);
// If the local item was modified, we carry its metadata forward so
// appropriate reconciling can be performed.
if (dupeID in this._modified) {
locallyModified = true;
localAge = Date.now() / 1000 - this._modified[dupeID];
remoteIsNewer = remoteAge < localAge;
this._modified[item.id] = this._modified[dupeID];
delete this._modified[dupeID];
} else {
locallyModified = false;
localAge = null;
}
this._log.debug("Local item after duplication: age=" + localAge +
"; modified=" + locallyModified + "; exists=" +
existsLocally);
} else {
this._log.trace("No duplicate found for incoming item: " + item.id);
}
}
// At this point we've performed duplicate detection. But, nothing here
// should depend on duplicate detection as the above should have updated
// state seamlessly.
if (!existsLocally) {
// If the item doesn't exist locally and we have no local modifications
// to the item (implying that it was not deleted), always apply the remote
// item.
if (!locallyModified) {
this._log.trace("Applying incoming because local item does not exist " +
"and was not deleted.");
return true;
}
// If the item was modified locally but isn't present, it must have
// been deleted. If the incoming record is younger, we restore from
// that record.
if (remoteIsNewer) {
this._log.trace("Applying incoming because local item was deleted " +
"before the incoming item was changed.");
delete this._modified[item.id];
return true;
}
this._log.trace("Ignoring incoming item because the local item's " +
"deletion is newer.");
return false;
}
// If the remote and local records are the same, there is nothing to be
// done, so we don't do anything. In the ideal world, this logic wouldn't
// be here and the engine would take a record and apply it. The reason we
// want to defer this logic is because it would avoid a redundant and
// possibly expensive dip into the storage layer to query item state.
// This should get addressed in the async rewrite, so we ignore it for now.
let localRecord = this._createRecord(item.id);
let recordsEqual = Utils.deepEquals(item.cleartext,
localRecord.cleartext);
// If the records are the same, we don't need to do anything. This does
// potentially throw away a local modification time. But, if the records
// are the same, does it matter?
if (recordsEqual) {
this._log.trace("Ignoring incoming item because the local item is " +
"identical.");
delete this._modified[item.id];
return false;
}
// At this point the records are different.
// If we have no local modifications, always take the server record.
if (!locallyModified) {
this._log.trace("Applying incoming record because no local conflicts.");
return true;
}
// When a dupe is detected, we feed the record (with a possibly changed ID)
// back through reconciling. If there are changes in both the local and
// incoming records, this should ensure that the proper record is used.
this._log.trace("Reconcile step 3: Find dupes");
let dupeId = this._findDupe(item);
if (dupeId) {
// _handleDupe() doesn't really handle anything. Instead, it just
// determines which GUID to use.
this._handleDupe(item, dupeId);
this._log.debug("Reconciling de-duped record: " + item.id);
return this._reconcile(item, true);
}
// Apply the incoming item.
return true;
// At this point, records are different and the local record is modified.
// We resolve conflicts by record age, where the newest one wins. This does
// result in data loss and should be handled by giving the engine an
// opportunity to merge the records. Bug 720592 tracks this feature.
this._log.warn("DATA LOSS: Both local and remote changes to record: " +
item.id);
return remoteIsNewer;
},
// Upload outgoing records
_uploadOutgoing: function SyncEngine__uploadOutgoing() {
this._log.trace("Uploading local changes to server.");
if (this._modifiedIDs.length) {
this._log.trace("Preparing " + this._modifiedIDs.length +
let modifiedIDs = Object.keys(this._modified);
if (modifiedIDs.length) {
this._log.trace("Preparing " + modifiedIDs.length +
" outgoing records");
// collection we'll upload
@ -1123,8 +1217,8 @@ SyncEngine.prototype = {
// Upload what we've got so far in the collection
let doUpload = Utils.bind2(this, function(desc) {
this._log.info("Uploading " + desc + " of " +
this._modifiedIDs.length + " records");
this._log.info("Uploading " + desc + " of " + modifiedIDs.length +
" records");
let resp = up.post();
if (!resp.success) {
this._log.debug("Uploading records failed: " + resp);
@ -1151,7 +1245,7 @@ SyncEngine.prototype = {
up.clearRecords();
});
for each (let id in this._modifiedIDs) {
for each (let id in modifiedIDs) {
try {
let out = this._createRecord(id);
if (this._log.level <= Log4Moz.Level.Trace)
@ -1214,8 +1308,7 @@ SyncEngine.prototype = {
for (let [id, when] in Iterator(this._modified)) {
this._tracker.addChangedID(id, when);
}
this._modified = {};
this._modifiedIDs = [];
this._modified = {};
},
_sync: function SyncEngine__sync() {

View File

@ -446,16 +446,6 @@ BookmarksEngine.prototype = {
if (item.hasDupe)
return;
return this._mapDupe(item);
},
_handleDupe: function _handleDupe(item, dupeId) {
// Always change the local GUID to the incoming one.
this._store.changeItemID(dupeId, item.id);
this._deleteId(dupeId);
this._tracker.addChangedID(item.id, 0);
if (item.parentid) {
this._tracker.addChangedID(item.parentid, 0);
}
}
};

View File

@ -76,11 +76,7 @@ HistoryEngine.prototype = {
_storeObj: HistoryStore,
_trackerObj: HistoryTracker,
downloadLimit: MAX_HISTORY_DOWNLOAD,
applyIncomingBatchSize: HISTORY_STORE_BATCH_SIZE,
_findDupe: function _findDupe(item) {
return this._store.GUIDForUri(item.histUri);
}
applyIncomingBatchSize: HISTORY_STORE_BATCH_SIZE
};
function HistoryStore(name) {

View File

@ -400,7 +400,10 @@ RotaryStore.prototype = {
},
changeItemID: function(oldID, newID) {
this.items[newID] = this.items[oldID];
if (oldID in this.items) {
this.items[newID] = this.items[oldID];
}
delete this.items[oldID];
},

View File

@ -1,5 +1,8 @@
const Cm = Components.manager;
const TEST_CLUSTER_URL = "http://localhost:8080/";
const TEST_SERVER_URL = "http://localhost:8080/";
// Shared logging for all HTTP server functions.
Cu.import("resource://services-sync/log4moz.js");
const SYNC_HTTP_LOGGER = "Sync.Test.Server";

View File

@ -63,7 +63,7 @@ add_test(function test_bad_hmac() {
_("First sync, client record is uploaded");
do_check_eq(Clients.lastRecordUpload, 0);
check_clients_count(0);
Clients.sync();
Clients._sync();
check_clients_count(1);
do_check_true(Clients.lastRecordUpload > 0);
@ -81,7 +81,7 @@ add_test(function test_bad_hmac() {
do_check_true(serverKeys.upload(Weave.Service.cryptoKeysURL).success);
_("Sync.");
Clients.sync();
Clients._sync();
_("Old record " + oldLocalID + " was deleted, new one uploaded.");
check_clients_count(1);
@ -96,7 +96,7 @@ add_test(function test_bad_hmac() {
deletedCollections = [];
deletedItems = [];
check_clients_count(1);
Clients.sync();
Clients._sync();
_("Old record was not deleted, new one uploaded.");
do_check_eq(deletedCollections.length, 0);
@ -116,7 +116,7 @@ add_test(function test_bad_hmac() {
uploadNewKeys();
// Sync once to upload a record.
Clients.sync();
Clients._sync();
check_clients_count(1);
// Generate and upload new keys, so the old client record is wrong.
@ -133,7 +133,7 @@ add_test(function test_bad_hmac() {
do_check_eq(deletedCollections.length, 0);
do_check_eq(deletedItems.length, 0);
Clients.sync();
Clients._sync();
do_check_eq(deletedItems.length, 1);
check_client_deleted(oldLocalID);
check_clients_count(1);
@ -186,7 +186,7 @@ add_test(function test_sync() {
_("First sync. Client record is uploaded.");
do_check_eq(clientWBO(), undefined);
do_check_eq(Clients.lastRecordUpload, 0);
Clients.sync();
Clients._sync();
do_check_true(!!clientWBO().payload);
do_check_true(Clients.lastRecordUpload > 0);
@ -194,7 +194,7 @@ add_test(function test_sync() {
Clients.lastRecordUpload -= MORE_THAN_CLIENTS_TTL_REFRESH;
let lastweek = Clients.lastRecordUpload;
clientWBO().payload = undefined;
Clients.sync();
Clients._sync();
do_check_true(!!clientWBO().payload);
do_check_true(Clients.lastRecordUpload > lastweek);
@ -205,7 +205,7 @@ add_test(function test_sync() {
_("Time travel one day back, no record uploaded.");
Clients.lastRecordUpload -= LESS_THAN_CLIENTS_TTL_REFRESH;
let yesterday = Clients.lastRecordUpload;
Clients.sync();
Clients._sync();
do_check_eq(clientWBO().payload, undefined);
do_check_eq(Clients.lastRecordUpload, yesterday);
@ -404,7 +404,10 @@ add_test(function test_command_sync() {
_("Ensure that commands are synced across clients.");
Svc.Prefs.set("clusterURL", "http://localhost:8080/");
Svc.Prefs.set("username", "foo");
Clients._store.wipe();
generateNewKeys();
let contents = {
meta: {global: {engines: {clients: {version: Clients.version,
syncID: Clients.syncID}}}},
@ -431,7 +434,7 @@ add_test(function test_command_sync() {
try {
_("Syncing.");
Clients.sync();
Clients._sync();
_("Checking record was uploaded.");
do_check_neq(clientWBO(Clients.localID).payload, undefined);
do_check_true(Clients.lastRecordUpload > 0);
@ -441,7 +444,8 @@ add_test(function test_command_sync() {
Svc.Prefs.set("client.GUID", remoteId);
Clients._resetClient();
do_check_eq(Clients.localID, remoteId);
Clients.sync();
_("Performing sync on resetted client.");
Clients._sync();
do_check_neq(Clients.localCommands, undefined);
do_check_eq(Clients.localCommands.length, 1);

View File

@ -18,6 +18,29 @@ function cleanAndGo(server) {
server.stop(run_next_test);
}
function createServerAndConfigureClient() {
let engine = new RotaryEngine();
let contents = {
meta: {global: {engines: {rotary: {version: engine.version,
syncID: engine.syncID}}}},
crypto: {},
rotary: {}
};
const USER = "foo";
Svc.Prefs.set("clusterURL", TEST_CLUSTER_URL);
Svc.Prefs.set("serverURL", TEST_SERVER_URL);
Svc.Prefs.set("username", USER);
let server = new SyncServer();
server.registerUser(USER, "password");
server.createContents(USER, contents);
server.start();
return [engine, server, USER];
}
function run_test() {
generateNewKeys();
Svc.Prefs.set("log.logger.engine.rotary", "Trace");
@ -278,18 +301,11 @@ add_test(function test_processIncoming_reconcile() {
encryptPayload({id: 'updateclient',
denomination: "Get this!"}));
// This is a dupe of 'original' but with a longer GUID, so we're
// expecting it to be marked for deletion from the server
// This is a dupe of 'original'.
collection.insert('duplication',
encryptPayload({id: 'duplication',
denomination: "Original Entry"}));
// This is a dupe of 'long_original' but with a shorter GUID, so we're
// expecting it to replace 'long_original'.
collection.insert('dupe',
encryptPayload({id: 'dupe',
denomination: "Long Original Entry"}));
// This record is marked as deleted, so we're expecting the client
// record to be removed.
collection.insert('nukeme',
@ -348,49 +364,92 @@ add_test(function test_processIncoming_reconcile() {
// Updated with server data.
do_check_eq(engine._store.items.updateclient, "Get this!");
// The dupe with the shorter ID is kept, the longer one is slated
// for deletion.
do_check_eq(engine._store.items.long_original, undefined);
do_check_eq(engine._store.items.dupe, "Long Original Entry");
do_check_neq(engine._delete.ids.indexOf('duplication'), -1);
// The incoming ID is preferred.
do_check_eq(engine._store.items.original, undefined);
do_check_eq(engine._store.items.duplication, "Original Entry");
do_check_neq(engine._delete.ids.indexOf("original"), -1);
// The 'nukeme' record marked as deleted is removed.
do_check_eq(engine._store.items.nukeme, undefined);
} finally {
cleanAndGo(server);
}
})
});
add_test(function test_processIncoming_reconcile_deleted_dupe() {
_("Ensure that locally deleted duplicate record is handled properly.");
add_test(function test_processIncoming_reconcile_local_deleted() {
_("Ensure local, duplicate ID is deleted on server.");
let engine = new RotaryEngine();
let contents = {
meta: {global: {engines: {rotary: {version: engine.version,
syncID: engine.syncID}}}},
crypto: {},
rotary: {}
};
const USER = "foo";
Svc.Prefs.set("clusterURL", "http://localhost:8080/");
Svc.Prefs.set("username", USER);
// When a duplicate is resolved, the local ID (which is never taken) should
// be deleted on the server.
let [engine, server, user] = createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
engine.lastSync = now;
engine.lastModified = now + 1;
let server = new SyncServer();
server.registerUser(USER, "password");
server.createContents(USER, contents);
server.start();
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "value"});
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(USER, "rotary", wbo);
server.insertWBO(user, "rotary", wbo);
let record = encryptPayload({id: "DUPE_LOCAL", denomination: "local"});
let wbo = new ServerWBO("DUPE_LOCAL", record, now - 1);
server.insertWBO(user, "rotary", wbo);
engine._store.create({id: "DUPE_LOCAL", denomination: "local"});
do_check_true(engine._store.itemExists("DUPE_LOCAL"));
do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"}));
engine._sync();
do_check_attribute_count(engine._store.items, 1);
do_check_true("DUPE_INCOMING" in engine._store.items);
let collection = server.getCollection(user, "rotary");
do_check_eq(1, collection.count());
do_check_neq(undefined, collection.wbo("DUPE_INCOMING"));
cleanAndGo(server);
});
add_test(function test_processIncoming_reconcile_equivalent() {
_("Ensure proper handling of incoming records that match local.");
let [engine, server, user] = createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
engine.lastSync = now;
engine.lastModified = now + 1;
let record = encryptPayload({id: "entry", denomination: "denomination"});
let wbo = new ServerWBO("entry", record, now + 2);
server.insertWBO(user, "rotary", wbo);
engine._store.items = {entry: "denomination"};
do_check_true(engine._store.itemExists("entry"));
engine._sync();
do_check_attribute_count(engine._store.items, 1);
cleanAndGo(server);
});
add_test(function test_processIncoming_reconcile_locally_deleted_dupe_new() {
_("Ensure locally deleted duplicate record newer than incoming is handled.");
// This is a somewhat complicated test. It ensures that if a client receives
// a modified record for an item that is deleted locally but with a different
// ID that the incoming record is ignored. This is a corner case for record
// handling, but it needs to be supported.
let [engine, server, user] = createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
engine.lastSync = now;
engine.lastModified = now + 1;
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(user, "rotary", wbo);
// Simulate a locally-deleted item.
engine._store.items = {};
@ -401,16 +460,54 @@ add_test(function test_processIncoming_reconcile_deleted_dupe() {
engine._sync();
// After the sync, nothing should exist since the local record had been
// deleted after the incoming record was updated. The server should also have
// deleted the incoming record. Since the local record only existed on the
// client at the beginning of the sync, it shouldn't exist on the server
// after.
// After the sync, the server's payload for the original ID should be marked
// as deleted.
do_check_empty(engine._store.items);
let collection = server.getCollection(USER, "rotary");
let collection = server.getCollection(user, "rotary");
do_check_eq(1, collection.count());
do_check_eq(undefined, collection.payload("DUPE_INCOMING"));
let wbo = collection.wbo("DUPE_INCOMING");
do_check_neq(null, wbo);
let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext);
do_check_true(payload.deleted);
cleanAndGo(server);
});
add_test(function test_processIncoming_reconcile_locally_deleted_dupe_old() {
_("Ensure locally deleted duplicate record older than incoming is restored.");
// This is similar to the above test except it tests the condition where the
// incoming record is newer than the local deletion, therefore overriding it.
let [engine, server, user] = createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
engine.lastSync = now;
engine.lastModified = now + 1;
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(user, "rotary", wbo);
// Simulate a locally-deleted item.
engine._store.items = {};
engine._tracker.addChangedID("DUPE_LOCAL", now + 1);
do_check_false(engine._store.itemExists("DUPE_LOCAL"));
do_check_false(engine._store.itemExists("DUPE_INCOMING"));
do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"}));
engine._sync();
// Since the remote change is newer, the incoming item should exist locally.
do_check_attribute_count(engine._store.items, 1);
do_check_true("DUPE_INCOMING" in engine._store.items);
do_check_eq("incoming", engine._store.items.DUPE_INCOMING);
let collection = server.getCollection(user, "rotary");
do_check_eq(1, collection.count());
let wbo = collection.wbo("DUPE_INCOMING");
let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext);
do_check_eq("incoming", payload.denomination);
cleanAndGo(server);
});
@ -418,30 +515,16 @@ add_test(function test_processIncoming_reconcile_deleted_dupe() {
add_test(function test_processIncoming_reconcile_changed_dupe() {
_("Ensure that locally changed duplicate record is handled properly.");
let engine = new RotaryEngine();
let contents = {
meta: {global: {engines: {rotary: {version: engine.version,
syncID: engine.syncID}}}},
crypto: {},
rotary: {}
};
const USER = "foo";
Svc.Prefs.set("clusterURL", "http://localhost:8080/");
Svc.Prefs.set("username", USER);
let [engine, server, user] = createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
engine.lastSync = now;
engine.lastModified = now + 1;
let server = new SyncServer();
server.registerUser(USER, "password");
server.createContents(USER, contents);
server.start();
// The local record is newer than the incoming one, so it should be retained.
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(USER, "rotary", wbo);
server.insertWBO(user, "rotary", wbo);
engine._store.create({id: "DUPE_LOCAL", denomination: "local"});
engine._tracker.addChangedID("DUPE_LOCAL", now + 3);
@ -450,23 +533,59 @@ add_test(function test_processIncoming_reconcile_changed_dupe() {
engine._sync();
// The ID should have been changed to incoming.
do_check_attribute_count(engine._store.items, 1);
do_check_true("DUPE_LOCAL" in engine._store.items);
do_check_true("DUPE_INCOMING" in engine._store.items);
let collection = server.getCollection(USER, "rotary");
// On the server, the local ID should be deleted and the incoming ID should
// have its payload set to what was in the local record.
let collection = server.getCollection(user, "rotary");
do_check_eq(1, collection.count());
let wbo = collection.wbo("DUPE_INCOMING");
do_check_neq(undefined, wbo);
do_check_eq(undefined, wbo.payload);
let wbo = collection.wbo("DUPE_LOCAL");
do_check_neq(undefined, wbo);
do_check_neq(undefined, wbo.payload);
let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext);
do_check_eq("local", payload.denomination);
cleanAndGo(server);
});
add_test(function test_processIncoming_reconcile_changed_dupe_new() {
_("Ensure locally changed duplicate record older than incoming is ignored.");
// This test is similar to the above except the incoming record is younger
// than the local record. The incoming record should be authoritative.
let [engine, server, user] = createServerAndConfigureClient();
let now = Date.now() / 1000 - 10;
engine.lastSync = now;
engine.lastModified = now + 1;
let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
server.insertWBO(user, "rotary", wbo);
engine._store.create({id: "DUPE_LOCAL", denomination: "local"});
engine._tracker.addChangedID("DUPE_LOCAL", now + 1);
do_check_true(engine._store.itemExists("DUPE_LOCAL"));
do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"}));
engine._sync();
// The ID should have been changed to incoming.
do_check_attribute_count(engine._store.items, 1);
do_check_true("DUPE_INCOMING" in engine._store.items);
// On the server, the local ID should be deleted and the incoming ID should
// have its payload retained.
let collection = server.getCollection(user, "rotary");
do_check_eq(1, collection.count());
let wbo = collection.wbo("DUPE_INCOMING");
do_check_neq(undefined, wbo);
let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext);
do_check_eq("incoming", payload.denomination);
cleanAndGo(server);
});
add_test(function test_processIncoming_mobile_batchSize() {
_("SyncEngine._processIncoming doesn't fetch everything at once on mobile clients");