Merge m-c to fx-team. a=merge

This commit is contained in:
Ryan VanderMeulen 2014-07-18 16:08:36 -04:00
commit 00c2204f2b
279 changed files with 4462 additions and 1707 deletions

View File

@ -6,9 +6,9 @@ support-files =
filepicker_path_handler_chrome.js
systemapp_helper.js
[test_sandbox_permission.html]
[test_filepicker_path.html]
[test_permission_deny.html]
[test_permission_gum_remember.html]
skip-if = (toolkit == 'gonk' && debug) # Bug 1019572 - debug-only timeout
skip-if = true # Bug 1019572 - frequent timeouts
[test_sandbox_permission.html]
[test_systemapp.html]

View File

@ -19,7 +19,7 @@
<copyfile dest="Makefile" src="core/root.mk"/>
</project>
<project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="48fe31ffcd3b9eca4eeb13cd0a73c1a28b45b295"/>
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="7104d2996b9ad65d9fff57ada454699d3348f0a5"/>
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="7f792d756385bb894fba7645da59c67fe2c804bf"/>
<project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
<project name="platform_hardware_ril" path="hardware/ril" remote="b2g" revision="cd88d860656c31c7da7bb310d6a160d0011b0961"/>

View File

@ -17,7 +17,7 @@
</project>
<project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
<project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
<project name="gaia" path="gaia" remote="mozillaorg" revision="48fe31ffcd3b9eca4eeb13cd0a73c1a28b45b295"/>
<project name="gaia" path="gaia" remote="mozillaorg" revision="7104d2996b9ad65d9fff57ada454699d3348f0a5"/>
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="7f792d756385bb894fba7645da59c67fe2c804bf"/>
<project name="moztt" path="external/moztt" remote="b2g" revision="dc5ca96695cab87b4c2fcd7c9f046ae3415a70a5"/>
<project name="apitrace" path="external/apitrace" remote="apitrace" revision="8b04b5aca4b0a894de40f4d53ae9750222d349a8"/>

View File

@ -15,7 +15,7 @@
<project name="platform_build" path="build" remote="b2g" revision="276ce45e78b09c4a4ee643646f691d22804754c1">
<copyfile dest="Makefile" src="core/root.mk"/>
</project>
<project name="gaia" path="gaia" remote="mozillaorg" revision="48fe31ffcd3b9eca4eeb13cd0a73c1a28b45b295"/>
<project name="gaia" path="gaia" remote="mozillaorg" revision="7104d2996b9ad65d9fff57ada454699d3348f0a5"/>
<project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="7f792d756385bb894fba7645da59c67fe2c804bf"/>
<project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>

View File

@ -19,7 +19,7 @@
<copyfile dest="Makefile" src="core/root.mk"/>
</project>
<project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="48fe31ffcd3b9eca4eeb13cd0a73c1a28b45b295"/>
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="7104d2996b9ad65d9fff57ada454699d3348f0a5"/>
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="7f792d756385bb894fba7645da59c67fe2c804bf"/>
<project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
<project name="platform_hardware_ril" path="hardware/ril" remote="b2g" revision="cd88d860656c31c7da7bb310d6a160d0011b0961"/>

View File

@ -17,7 +17,7 @@
</project>
<project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>
<project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
<project name="gaia" path="gaia" remote="mozillaorg" revision="48fe31ffcd3b9eca4eeb13cd0a73c1a28b45b295"/>
<project name="gaia" path="gaia" remote="mozillaorg" revision="7104d2996b9ad65d9fff57ada454699d3348f0a5"/>
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="7f792d756385bb894fba7645da59c67fe2c804bf"/>
<project name="moztt" path="external/moztt" remote="b2g" revision="dc5ca96695cab87b4c2fcd7c9f046ae3415a70a5"/>
<project name="apitrace" path="external/apitrace" remote="apitrace" revision="8b04b5aca4b0a894de40f4d53ae9750222d349a8"/>

View File

@ -4,6 +4,6 @@
"remote": "",
"branch": ""
},
"revision": "72a7f5cea568a34d5aa9e221a4cc546de856cace",
"revision": "ced3062ac363c95da1cd3a55f8c6e172490e42d5",
"repo_path": "/integration/gaia-central"
}

View File

@ -17,7 +17,7 @@
<copyfile dest="Makefile" src="core/root.mk"/>
</project>
<project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="48fe31ffcd3b9eca4eeb13cd0a73c1a28b45b295"/>
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="7104d2996b9ad65d9fff57ada454699d3348f0a5"/>
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="7f792d756385bb894fba7645da59c67fe2c804bf"/>
<project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
<project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>

View File

@ -15,7 +15,7 @@
<copyfile dest="Makefile" src="core/root.mk"/>
</project>
<project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="48fe31ffcd3b9eca4eeb13cd0a73c1a28b45b295"/>
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="7104d2996b9ad65d9fff57ada454699d3348f0a5"/>
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="7f792d756385bb894fba7645da59c67fe2c804bf"/>
<project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
<project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>

View File

@ -17,7 +17,7 @@
</project>
<project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
<project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
<project name="gaia" path="gaia" remote="mozillaorg" revision="48fe31ffcd3b9eca4eeb13cd0a73c1a28b45b295"/>
<project name="gaia" path="gaia" remote="mozillaorg" revision="7104d2996b9ad65d9fff57ada454699d3348f0a5"/>
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="7f792d756385bb894fba7645da59c67fe2c804bf"/>
<project name="moztt" path="external/moztt" remote="b2g" revision="dc5ca96695cab87b4c2fcd7c9f046ae3415a70a5"/>
<project name="apitrace" path="external/apitrace" remote="apitrace" revision="8b04b5aca4b0a894de40f4d53ae9750222d349a8"/>

View File

@ -17,7 +17,7 @@
<copyfile dest="Makefile" src="core/root.mk"/>
</project>
<project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="48fe31ffcd3b9eca4eeb13cd0a73c1a28b45b295"/>
<project name="gaia.git" path="gaia" remote="mozillaorg" revision="7104d2996b9ad65d9fff57ada454699d3348f0a5"/>
<project name="gonk-misc" path="gonk-misc" remote="b2g" revision="7f792d756385bb894fba7645da59c67fe2c804bf"/>
<project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
<project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>

View File

@ -1526,6 +1526,8 @@ pref("loop.server", "https://loop.services.mozilla.com");
pref("loop.seenToS", "unseen");
pref("loop.do_not_disturb", false);
pref("loop.ringtone", "chrome://browser/content/loop/shared/sounds/Firefox-Long.ogg");
pref("loop.retry_delay.start", 60000);
pref("loop.retry_delay.limit", 300000);
// serverURL to be assigned by services team
pref("services.push.serverURL", "wss://push.services.mozilla.com/");

View File

@ -59,7 +59,12 @@ let WebrtcIndicator = {
streamData.browser.focus();
}
browserWindow.focus();
PopupNotifications.getNotification("webRTC-sharingDevices",
streamData.browser).reshow();
let notif = PopupNotifications.getNotification("webRTC-sharingDevices",
streamData.browser);
if (!notif) {
notif = PopupNotifications.getNotification("webRTC-sharingScreen",
streamData.browser);
}
notif.reshow();
}
}

View File

@ -729,6 +729,8 @@
<image id="webRTC-sharingDevices-notification-icon" class="notification-anchor-icon" role="button"/>
<image id="webRTC-shareMicrophone-notification-icon" class="notification-anchor-icon" role="button"/>
<image id="webRTC-sharingMicrophone-notification-icon" class="notification-anchor-icon" role="button"/>
<image id="webRTC-shareScreen-notification-icon" class="notification-anchor-icon" role="button"/>
<image id="webRTC-sharingScreen-notification-icon" class="notification-anchor-icon" role="button"/>
<image id="pointerLock-notification-icon" class="notification-anchor-icon" role="button"/>
<image id="servicesInstall-notification-icon" class="notification-anchor-icon" role="button"/>
<image id="translate-notification-icon" class="notification-anchor-icon" role="button"/>

View File

@ -18,6 +18,19 @@
<menupopup id="webRTC-selectCamera-menupopup"/>
</menulist>
</popupnotificationcontent>
<popupnotificationcontent id="webRTC-selectWindowOrScreen" orient="vertical">
<separator class="thin"/>
<label value="&getUserMedia.selectWindowOrScreen.label;"
accesskey="&getUserMedia.selectWindowOrScreen.accesskey;"
control="webRTC-selectWindow-menulist"/>
<menulist id="webRTC-selectWindow-menulist"
oncommand="WebrtcIndicator.UIModule.updateMainActionLabel(this);">
<menupopup id="webRTC-selectWindow-menupopup"/>
</menulist>
<description id="webRTC-all-windows-shared" hidden="true">&getUserMedia.allWindowsShared.message;</description>
</popupnotificationcontent>
<popupnotificationcontent id="webRTC-selectMicrophone" orient="vertical">
<separator class="thin"/>
<label value="&getUserMedia.selectMicrophone.label;"

View File

@ -8,6 +8,7 @@ const { classes: Cc, interfaces: Ci, utils: Cu } = Components;
Cu.import("resource://gre/modules/Services.jsm");
Cu.import("resource://gre/modules/XPCOMUtils.jsm");
Cu.import("resource://gre/modules/Timer.jsm");
this.EXPORTED_SYMBOLS = ["MozLoopPushHandler"];
@ -17,17 +18,36 @@ XPCOMUtils.defineLazyModuleGetter(this, "console",
/**
* We don't have push notifications on desktop currently, so this is a
* workaround to get them going for us.
*
* XXX Handle auto-reconnections if connection fails for whatever reason
* (bug 1013248).
*/
let MozLoopPushHandler = {
// This is the uri of the push server.
pushServerUri: Services.prefs.getCharPref("services.push.serverURL"),
// This is the channel id we're using for notifications
channelID: "8b1081ce-9b35-42b5-b8f5-3ff8cb813a50",
// This is the UserAgent UUID assigned by the PushServer
uaID: undefined,
// Stores the push url if we're registered and we have one.
pushUrl: undefined,
// Set to true once the channelID has been registered with the PushServer.
registered: false,
_minRetryDelay_ms: (() => {
try {
return Services.prefs.getIntPref("loop.retry_delay.start")
}
catch (e) {
return 60000 // 1 minute
}
})(),
_maxRetryDelay_ms: (() => {
try {
return Services.prefs.getIntPref("loop.retry_delay.limit")
}
catch (e) {
return 300000 // 5 minutes
}
})(),
/**
* Starts a connection to the push socket server. On
@ -51,25 +71,13 @@ let MozLoopPushHandler = {
* the websocket to be mocked for tests.
*/
initialize: function(registerCallback, notificationCallback, mockPushHandler) {
if (Services.io.offline) {
registerCallback("offline");
return;
if (mockPushHandler) {
this._mockPushHandler = mockPushHandler;
}
this._registerCallback = registerCallback;
this._notificationCallback = notificationCallback;
if (mockPushHandler) {
// For tests, use the mock instance.
this._websocket = mockPushHandler;
} else {
this._websocket = Cc["@mozilla.org/network/protocol;1?name=wss"]
.createInstance(Ci.nsIWebSocketChannel);
}
this._websocket.protocol = "push-notification";
let pushURI = Services.io.newURI(this.pushServerUri, null, null);
this._websocket.asyncOpen(pushURI, this.pushServerUri, this, null);
this._openSocket();
},
/**
@ -79,8 +87,18 @@ let MozLoopPushHandler = {
* @param {nsISupports} aContext Not used
*/
onStart: function() {
let helloMsg = { messageType: "hello", uaid: "", channelIDs: [] };
this._websocket.sendMsg(JSON.stringify(helloMsg));
this._retryEnd();
// If a uaID has already been assigned, assume this is a re-connect
// and send the uaID in order to re-synch with the
// PushServer. If a registration has been completed, send the channelID.
let helloMsg = { messageType: "hello",
uaid: this.uaID,
channelIDs: this.registered ? [this.channelID] :[] };
this._retryOperation(() => this.onStart(), this._maxRetryDelay_ms);
try { // in case websocket has closed before this handler is run
this._websocket.sendMsg(JSON.stringify(helloMsg));
}
catch (e) {console.warn("MozLoopPushHandler::onStart websocket.sendMsg() failure");}
},
/**
@ -90,11 +108,8 @@ let MozLoopPushHandler = {
* @param {nsresult} aStatusCode Reason for stopping (NS_OK = successful)
*/
onStop: function(aContext, aStatusCode) {
// XXX We really should be handling auto-reconnect here, this will be
// implemented in bug 994151. For now, just log a warning, so that a
// developer can find out it has happened and not get too confused.
Cu.reportError("Loop Push server web socket closed! Code: " + aStatusCode);
this.pushUrl = undefined;
this._retryOperation(() => this._openSocket());
},
/**
@ -107,11 +122,8 @@ let MozLoopPushHandler = {
* @param {String} aReason the websocket closing handshake close reason
*/
onServerClose: function(aContext, aCode) {
// XXX We really should be handling auto-reconnect here, this will be
// implemented in bug 994151. For now, just log a warning, so that a
// developer can find out it has happened and not get too confused.
Cu.reportError("Loop Push server web socket closed (server)! Code: " + aCode);
this.pushUrl = undefined;
this._retryOperation(() => this._openSocket());
},
/**
@ -125,30 +137,125 @@ let MozLoopPushHandler = {
switch(msg.messageType) {
case "hello":
this._registerChannel();
this._retryEnd();
if (this.uaID !== msg.uaid) {
this.uaID = msg.uaid;
this._registerChannel();
}
break;
case "register":
this.pushUrl = msg.pushEndpoint;
this._registerCallback(null, this.pushUrl);
this._onRegister(msg);
break;
case "notification":
msg.updates.forEach(function(update) {
msg.updates.forEach((update) => {
if (update.channelID === this.channelID) {
this._notificationCallback(update.version);
}
}.bind(this));
});
break;
}
},
/**
* Handles the PushServer registration response.
*
* @param {} msg PushServer to UserAgent registration response (parsed from JSON).
*/
_onRegister: function(msg) {
switch (msg.status) {
case 200:
this._retryEnd(); // reset retry mechanism
this.registered = true;
if (this.pushUrl !== msg.pushEndpoint) {
this.pushUrl = msg.pushEndpoint;
this._registerCallback(null, this.pushUrl);
}
break;
case 500:
// retry the registration request after a suitable delay
this._retryOperation(() => this._registerChannel());
break;
case 409:
this._registerCallback("error: PushServer ChannelID already in use");
break;
default:
this._registerCallback("error: PushServer registration failure, status = " + msg.status);
break;
}
},
/**
* Attempts to open a websocket.
*
* A new websocket interface is used each time. If an onStop callback
* was received, calling asyncOpen() on the same interface will
* trigger a "alreay open socket" exception even though the channel
* is logically closed.
*/
_openSocket: function() {
if (this._mockPushHandler) {
// For tests, use the mock instance.
this._websocket = this._mockPushHandler;
} else if (!Services.io.offline) {
this._websocket = Cc["@mozilla.org/network/protocol;1?name=wss"]
.createInstance(Ci.nsIWebSocketChannel);
} else {
this._registerCallback("offline");
console.warn("MozLoopPushHandler - IO offline");
return;
}
this._websocket.protocol = "push-notification";
let uri = Services.io.newURI(this.pushServerUri, null, null);
this._websocket.asyncOpen(uri, this.pushServerUri, this, null);
},
/**
* Handles registering a service
*/
_registerChannel: function() {
this._websocket.sendMsg(JSON.stringify({
messageType: "register",
channelID: this.channelID
}));
this.registered = false;
try { // in case websocket has closed
this._websocket.sendMsg(JSON.stringify({messageType: "register",
channelID: this.channelID}));
}
catch (e) {console.warn("MozLoopPushHandler::_registerChannel websocket.sendMsg() failure");}
},
/**
* Method to handle retrying UserAgent to PushServer request following
* a retry back-off scheme managed by this function.
*
* @param {function} delayedOp Function to call after current delay is satisfied
*
* @param {number} [optional] retryDelay This parameter will be used as the initial delay
*/
_retryOperation: function(delayedOp, retryDelay) {
if (!this._retryCount) {
this._retryDelay = retryDelay || this._minRetryDelay_ms;
this._retryCount = 1;
} else {
let nextDelay = this._retryDelay * 2;
this._retryDelay = nextDelay > this._maxRetryDelay_ms ? this._maxRetryDelay_ms : nextDelay;
this._retryCount += 1;
}
this._timeoutID = setTimeout(delayedOp, this._retryDelay);
},
/**
* Method used to reset the retry delay back-off logic.
*
*/
_retryEnd: function() {
if (this._retryCount) {
clearTimeout(this._timeoutID);
this._retryCount = 0;
}
}
};

View File

@ -18,6 +18,8 @@ const kMockWebSocketChannelName = "Mock WebSocket Channel";
const kWebSocketChannelContractID = "@mozilla.org/network/protocol;1?name=wss";
const kServerPushUrl = "http://localhost:3456";
const kEndPointUrl = "http://example.com/fake";
const kUAID = "f47ac11b-58ca-4372-9567-0e02b2c3d479";
// Fake loop server
var loopServer;
@ -67,7 +69,8 @@ let mockPushHandler = {
* enables us to check parameters and return messages similar to the push
* server.
*/
let MockWebSocketChannel = function() {
let MockWebSocketChannel = function(initRegStatus) {
this.initRegStatus = initRegStatus;
};
MockWebSocketChannel.prototype = {
@ -86,28 +89,46 @@ MockWebSocketChannel.prototype = {
this.listener.onStart(this.context);
},
notify: function(version) {
this.listener.onMessageAvailable(this.context,
JSON.stringify({
messageType: "notification", updates: [{
channelID: "8b1081ce-9b35-42b5-b8f5-3ff8cb813a50",
version: version
}]
}));
},
sendMsg: function(aMsg) {
var message = JSON.parse(aMsg);
switch(message.messageType) {
case "hello":
this.listener.onMessageAvailable(this.context,
JSON.stringify({messageType: "hello"}));
JSON.stringify({messageType: "hello",
uaid: kUAID}));
break;
case "register":
this.channelID = message.channelID;
let statusCode = 200;
if (this.initRegStatus) {
statusCode = this.initRegStatus;
this.initRegStatus = 0;
}
this.listener.onMessageAvailable(this.context,
JSON.stringify({messageType: "register", pushEndpoint: "http://example.com/fake"}));
JSON.stringify({messageType: "register",
status: statusCode,
channelID: this.channelID,
pushEndpoint: kEndPointUrl}));
break;
}
}
},
notify: function(version) {
this.listener.onMessageAvailable(this.context,
JSON.stringify({
messageType: "notification", updates: [{
channelID: this.channelID,
version: version
}]
}));
},
stop: function (err) {
this.listener.onStop(this.context, err || -1);
},
serverClose: function (err) {
this.listener.onServerClose(this.context, err || -1);
},
};

View File

@ -1,36 +1,63 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
{
add_test(function test_initalize_offline() {
Services.io.offline = true;
add_test(function test_initalize_offline() {
Services.io.offline = true;
MozLoopPushHandler.initialize(function(err) {
Assert.equal(err, "offline", "Should error with 'offline' when offline");
MozLoopPushHandler.initialize(function(err) {
Assert.equal(err, "offline", "Should error with 'offline' when offline");
Services.io.offline = false;
run_next_test();
Services.io.offline = false;
run_next_test();
});
});
});
add_test(function test_initalize_websocket() {
let mockWebSocket = new MockWebSocketChannel();
MozLoopPushHandler.initialize(function(err) {
Assert.equal(err, null, "Should return null for success");
add_test(function test_initalize_websocket() {
MozLoopPushHandler.initialize(
function(err, url) {
Assert.equal(err, null, "Should return null for success");
Assert.equal(url, kEndPointUrl, "Should return push server application URL");
Assert.equal(mockWebSocket.uri.prePath, kServerPushUrl,
"Should have the url from preferences");
Assert.equal(mockWebSocket.origin, kServerPushUrl,
"Should have the origin url from preferences");
Assert.equal(mockWebSocket.protocol, "push-notification",
"Should have the protocol set to push-notifications");
mockWebSocket.notify(15);
},
function(version) {
Assert.equal(version, 15, "Should have version number 15");
run_next_test();
},
mockWebSocket);
});
Assert.equal(mockWebSocket.uri.prePath, kServerPushUrl,
"Should have the url from preferences");
Assert.equal(mockWebSocket.origin, kServerPushUrl,
"Should have the origin url from preferences");
Assert.equal(mockWebSocket.protocol, "push-notification",
"Should have the protocol set to push-notifications");
add_test(function test_reconnect_websocket() {
MozLoopPushHandler.uaID = undefined;
MozLoopPushHandler.pushUrl = undefined; //Do this to force a new registration callback.
mockWebSocket.stop();
});
add_test(function test_reopen_websocket() {
MozLoopPushHandler.uaID = undefined;
MozLoopPushHandler.pushUrl = undefined; //Do this to force a new registration callback.
mockWebSocket.serverClose();
});
add_test(function test_retry_registration() {
MozLoopPushHandler.uaID = undefined;
MozLoopPushHandler.pushUrl = undefined; //Do this to force a new registration callback.
mockWebSocket.initRegStatus = 500;
mockWebSocket.stop();
});
function run_test() {
Services.prefs.setCharPref("services.push.serverURL", kServerPushUrl);
Services.prefs.setIntPref("loop.retry_delay.start", 10); // 10 ms
Services.prefs.setIntPref("loop.retry_delay.limit", 20); // 20 ms
run_next_test();
}, function() {}, mockWebSocket);
});
function run_test() {
Services.prefs.setCharPref("services.push.serverURL", kServerPushUrl);
run_next_test();
};
};
}

View File

@ -16,9 +16,10 @@
class nsWindowsShellService : public nsIWindowsShellService
{
virtual ~nsWindowsShellService();
public:
nsWindowsShellService();
virtual ~nsWindowsShellService();
NS_DECL_ISUPPORTS
NS_DECL_NSISHELLSERVICE

View File

@ -709,8 +709,11 @@ just addresses the organization to follow, e.g. "This site is run by " -->
<!ENTITY getUserMedia.selectCamera.label "Camera to share:">
<!ENTITY getUserMedia.selectCamera.accesskey "C">
<!ENTITY getUserMedia.selectWindowOrScreen.label "Window or screen to share:">
<!ENTITY getUserMedia.selectWindowOrScreen.accesskey "W">
<!ENTITY getUserMedia.selectMicrophone.label "Microphone to share:">
<!ENTITY getUserMedia.selectMicrophone.accesskey "M">
<!ENTITY getUserMedia.allWindowsShared.message "All visible windows on your screen will be shared.">
<!ENTITY webrtcIndicatorButton.label "Camera / Microphone Access">
<!ENTITY webrtcIndicatorButton.tooltip "Display sites you are currently sharing your camera or microphone with">

View File

@ -495,18 +495,28 @@ identity.loggedIn.description = Signed in as: %S
identity.loggedIn.signOut.label = Sign Out
identity.loggedIn.signOut.accessKey = O
# LOCALIZATION NOTE (getUserMedia.shareCamera.message, getUserMedia.shareMicrophone.message, getUserMedia.shareCameraAndMicrophone.message): %S is the website origin (e.g. www.mozilla.org)
# LOCALIZATION NOTE (getUserMedia.shareCamera.message, getUserMedia.shareMicrophone.message,
# getUserMedia.shareScreen.message, getUserMedia.shareCameraAndMicrophone.message,
# getUserMedia.shareScreenAndMicrophone.message):
# %S is the website origin (e.g. www.mozilla.org)
getUserMedia.shareCamera.message = Would you like to share your camera with %S?
getUserMedia.shareMicrophone.message = Would you like to share your microphone with %S?
getUserMedia.shareScreen.message = Would you like to share your screen with %S?
getUserMedia.shareCameraAndMicrophone.message = Would you like to share your camera and microphone with %S?
getUserMedia.shareScreenAndMicrophone.message = Would you like to share your microphone and screen with %S?
getUserMedia.noVideo.label = No Video
getUserMedia.noWindowOrScreen.label = No Window or Screen
getUserMedia.noAudio.label = No Audio
getUserMedia.shareEntireScreen.label = Entire screen
# LOCALIZATION NOTE (getUserMedia.shareSelectedDevices.label):
# Semicolon-separated list of plural forms. See:
# http://developer.mozilla.org/en/docs/Localization_and_Plurals
# The number of devices can be either one or two.
getUserMedia.shareCamera.message = Would you like to share your camera with %S?
getUserMedia.shareMicrophone.message = Would you like to share your microphone with %S?
getUserMedia.shareCameraAndMicrophone.message = Would you like to share your camera and microphone with %S?
getUserMedia.noVideo.label = No Video
getUserMedia.noAudio.label = No Audio
getUserMedia.shareSelectedDevices.label = Share Selected Device;Share Selected Devices
getUserMedia.shareSelectedDevices.accesskey = S
getUserMedia.shareScreen.label = Share Screen
getUserMedia.shareWindow.label = Share Selected Window
getUserMedia.shareSelectedItems.label = Share Selected Items
getUserMedia.always.label = Always Share
getUserMedia.always.accesskey = A
getUserMedia.denyRequest.label = Don't Share
@ -516,6 +526,8 @@ getUserMedia.never.accesskey = N
getUserMedia.sharingCamera.message2 = You are currently sharing your camera with this page.
getUserMedia.sharingMicrophone.message2 = You are currently sharing your microphone with this page.
getUserMedia.sharingCameraAndMicrophone.message2 = You are currently sharing your camera and microphone with this page.
getUserMedia.sharingScreen.message = You are currently sharing your screen with this page.
getUserMedia.sharingWindow.message = You are currently sharing a window with this page.
getUserMedia.continueSharing.label = Continue Sharing
getUserMedia.continueSharing.accesskey = C
getUserMedia.stopSharing.label = Stop Sharing

View File

@ -52,6 +52,22 @@ this.webrtcUI = {
});
}
return activeStreams;
},
updateMainActionLabel: function(aMenuList) {
let type = aMenuList.selectedItem.getAttribute("devicetype");
let document = aMenuList.ownerDocument;
document.getElementById("webRTC-all-windows-shared").hidden = type != "Screen";
// If we are also requesting audio in addition to screen sharing,
// always use a generic label.
if (!document.getElementById("webRTC-selectMicrophone").hidden)
type = "";
let bundle = document.defaultView.gNavigatorBundle;
let stringId = "getUserMedia.share" + (type || "SelectedItems") + ".label";
let popupnotification = aMenuList.parentNode.parentNode;
popupnotification.setAttribute("buttonlabel", bundle.getString(stringId));
}
}
@ -94,31 +110,36 @@ function denyRequest(aCallID, aError) {
Services.obs.notifyObservers(msg, "getUserMedia:response:deny", aCallID);
}
function prompt(aContentWindow, aCallID, aAudioRequested, aVideoRequested, aDevices, aSecure) {
function prompt(aContentWindow, aCallID, aAudio, aVideo, aDevices, aSecure) {
let audioDevices = [];
let videoDevices = [];
// MediaStreamConstraints defines video as 'boolean or MediaTrackConstraints'.
let sharingScreen = aVideo && typeof(aVideo) != "boolean" &&
aVideo.mediaSource != "camera";
for (let device of aDevices) {
device = device.QueryInterface(Ci.nsIMediaDevice);
switch (device.type) {
case "audio":
if (aAudioRequested)
if (aAudio)
audioDevices.push(device);
break;
case "video":
if (aVideoRequested)
// Verify that if we got a camera, we haven't requested a screen share,
// or that if we requested a screen share we aren't getting a camera.
if (aVideo && (device.mediaSource == "camera") != sharingScreen)
videoDevices.push(device);
break;
}
}
let requestType;
if (audioDevices.length && videoDevices.length)
requestType = "CameraAndMicrophone";
else if (audioDevices.length)
requestType = "Microphone";
else if (videoDevices.length)
requestType = "Camera";
else {
let requestTypes = [];
if (videoDevices.length)
requestTypes.push(sharingScreen ? "Screen" : "Camera");
if (audioDevices.length)
requestTypes.push("Microphone");
if (!requestTypes.length) {
denyRequest(aCallID, "NO_DEVICES_FOUND");
return;
}
@ -128,12 +149,19 @@ function prompt(aContentWindow, aCallID, aAudioRequested, aVideoRequested, aDevi
let chromeDoc = browser.ownerDocument;
let chromeWin = chromeDoc.defaultView;
let stringBundle = chromeWin.gNavigatorBundle;
let message = stringBundle.getFormattedString("getUserMedia.share" + requestType + ".message",
[ uri.host ]);
let stringId = "getUserMedia.share" + requestTypes.join("And") + ".message";
let message = stringBundle.getFormattedString(stringId, [uri.host]);
let mainLabel;
if (sharingScreen) {
mainLabel = stringBundle.getString("getUserMedia.shareSelectedItems.label");
}
else {
let string = stringBundle.getString("getUserMedia.shareSelectedDevices.label");
mainLabel = PluralForm.get(requestTypes.length, string);
}
let mainAction = {
label: PluralForm.get(requestType == "CameraAndMicrophone" ? 2 : 1,
stringBundle.getString("getUserMedia.shareSelectedDevices.label")),
label: mainLabel,
accessKey: stringBundle.getString("getUserMedia.shareSelectedDevices.accesskey"),
// The real callback will be set during the "showing" event. The
// empty function here is so that PopupNotifications.show doesn't
@ -148,8 +176,11 @@ function prompt(aContentWindow, aCallID, aAudioRequested, aVideoRequested, aDevi
callback: function () {
denyRequest(aCallID);
}
},
{
}
];
if (!sharingScreen) { // Bug 1037438: implement 'never' for screen sharing.
secondaryActions.push({
label: stringBundle.getString("getUserMedia.never.label"),
accessKey: stringBundle.getString("getUserMedia.never.accesskey"),
callback: function () {
@ -162,11 +193,13 @@ function prompt(aContentWindow, aCallID, aAudioRequested, aVideoRequested, aDevi
if (videoDevices.length)
perms.add(uri, "camera", perms.DENY_ACTION);
}
}
];
});
}
if (aSecure) {
// Don't show the 'Always' action if the connection isn't secure.
if (aSecure && !sharingScreen) {
// Don't show the 'Always' action if the connection isn't secure, or for
// screen sharing (because we can't guess which window the user wants to
// share without prompting).
secondaryActions.unshift({
label: stringBundle.getString("getUserMedia.always.label"),
accessKey: stringBundle.getString("getUserMedia.always.accesskey"),
@ -185,7 +218,11 @@ function prompt(aContentWindow, aCallID, aAudioRequested, aVideoRequested, aDevi
if (aTopic == "shown") {
let PopupNotifications = chromeDoc.defaultView.PopupNotifications;
let popupId = requestType == "Microphone" ? "Microphone" : "Devices";
let popupId = "Devices";
if (requestTypes.length == 1 && requestTypes[0] == "Microphone")
popupId = "Microphone";
if (requestTypes.indexOf("Screen") != -1)
popupId = "Screen";
PopupNotifications.panel.firstChild.setAttribute("popupid", "webRTC-share" + popupId);
}
@ -206,6 +243,10 @@ function prompt(aContentWindow, aCallID, aAudioRequested, aVideoRequested, aDevi
if (camPerm == perms.PROMPT_ACTION)
camPerm = perms.UNKNOWN_ACTION;
// Screen sharing shouldn't follow the camera permissions.
if (videoDevices.length && sharingScreen)
camPerm = perms.UNKNOWN_ACTION;
// We don't check that permissions are set to ALLOW_ACTION in this
// test; only that they are set. This is because if audio is allowed
// and video is denied persistently, we don't want to show the prompt,
@ -235,24 +276,70 @@ function prompt(aContentWindow, aCallID, aAudioRequested, aVideoRequested, aDevi
}
}
function addDeviceToList(menupopup, deviceName, deviceIndex) {
function listScreenShareDevices(menupopup, devices) {
while (menupopup.lastChild)
menupopup.removeChild(menupopup.lastChild);
// "No Window or Screen" is the default because we can't pick a
// 'default' window to share.
addDeviceToList(menupopup,
stringBundle.getString("getUserMedia.noWindowOrScreen.label"),
"-1");
// Then add the 'Entire screen' item if mozGetUserMediaDevices returned it.
for (let i = 0; i < devices.length; ++i) {
if (devices[i].mediaSource == "screen") {
menupopup.appendChild(chromeDoc.createElement("menuseparator"));
addDeviceToList(menupopup,
stringBundle.getString("getUserMedia.shareEntireScreen.label"),
i, "Screen");
break;
}
}
// Finally add all the window names.
let separatorNeeded = true;
for (let i = 0; i < devices.length; ++i) {
if (devices[i].mediaSource == "window") {
if (separatorNeeded) {
menupopup.appendChild(chromeDoc.createElement("menuseparator"));
separatorNeeded = false;
}
addDeviceToList(menupopup, devices[i].name, i, "Window");
}
}
// Always re-select the "No Window or Screen" item.
chromeDoc.getElementById("webRTC-selectWindow-menulist").removeAttribute("value");
chromeDoc.getElementById("webRTC-all-windows-shared").hidden = true;
}
function addDeviceToList(menupopup, deviceName, deviceIndex, type) {
let menuitem = chromeDoc.createElement("menuitem");
menuitem.setAttribute("value", deviceIndex);
menuitem.setAttribute("label", deviceName);
menuitem.setAttribute("tooltiptext", deviceName);
if (type)
menuitem.setAttribute("devicetype", type);
menupopup.appendChild(menuitem);
}
chromeDoc.getElementById("webRTC-selectCamera").hidden = !videoDevices.length;
chromeDoc.getElementById("webRTC-selectCamera").hidden = !videoDevices.length || sharingScreen;
chromeDoc.getElementById("webRTC-selectWindowOrScreen").hidden = !sharingScreen || !videoDevices.length;
chromeDoc.getElementById("webRTC-selectMicrophone").hidden = !audioDevices.length;
let camMenupopup = chromeDoc.getElementById("webRTC-selectCamera-menupopup");
let windowMenupopup = chromeDoc.getElementById("webRTC-selectWindow-menupopup");
let micMenupopup = chromeDoc.getElementById("webRTC-selectMicrophone-menupopup");
listDevices(camMenupopup, videoDevices);
if (sharingScreen)
listScreenShareDevices(windowMenupopup, videoDevices);
else
listDevices(camMenupopup, videoDevices);
listDevices(micMenupopup, audioDevices);
if (requestType == "CameraAndMicrophone") {
if (requestTypes.length == 2) {
let stringBundle = chromeDoc.defaultView.gNavigatorBundle;
addDeviceToList(camMenupopup, stringBundle.getString("getUserMedia.noVideo.label"), "-1");
if (!sharingScreen)
addDeviceToList(camMenupopup, stringBundle.getString("getUserMedia.noVideo.label"), "-1");
addDeviceToList(micMenupopup, stringBundle.getString("getUserMedia.noAudio.label"), "-1");
}
@ -261,7 +348,8 @@ function prompt(aContentWindow, aCallID, aAudioRequested, aVideoRequested, aDevi
.createInstance(Ci.nsISupportsArray);
let perms = Services.perms;
if (videoDevices.length) {
let videoDeviceIndex = chromeDoc.getElementById("webRTC-selectCamera-menulist").value;
let listId = "webRTC-select" + (sharingScreen ? "Window" : "Camera") + "-menulist";
let videoDeviceIndex = chromeDoc.getElementById(listId).value;
let allowCamera = videoDeviceIndex != "-1";
if (allowCamera)
allowedDevices.AppendElement(videoDevices[videoDeviceIndex]);
@ -292,8 +380,11 @@ function prompt(aContentWindow, aCallID, aAudioRequested, aVideoRequested, aDevi
}
};
let anchorId = requestType == "Microphone" ? "webRTC-shareMicrophone-notification-icon"
: "webRTC-shareDevices-notification-icon";
let anchorId = "webRTC-shareDevices-notification-icon";
if (requestTypes.length == 1 && requestTypes[0] == "Microphone")
anchorId = "webRTC-shareMicrophone-notification-icon";
if (requestTypes.indexOf("Screen") != -1)
anchorId = "webRTC-shareScreen-notification-icon";
chromeWin.PopupNotifications.show(browser, "webRTC-shareDevices", message,
anchorId, mainAction, secondaryActions, options);
}
@ -311,18 +402,17 @@ function updateIndicators() {
}
function showBrowserSpecificIndicator(aBrowser) {
let hasVideo = {};
let hasAudio = {};
let camera = {}, microphone = {}, screen = {}, window = {};
MediaManagerService.mediaCaptureWindowState(aBrowser.contentWindow,
hasVideo, hasAudio);
camera, microphone, screen, window);
let captureState;
if (hasVideo.value && hasAudio.value) {
if (camera.value && microphone.value) {
captureState = "CameraAndMicrophone";
} else if (hasVideo.value) {
} else if (camera.value) {
captureState = "Camera";
} else if (hasAudio.value) {
} else if (microphone.value) {
captureState = "Microphone";
} else {
} else if (!screen.value && !window.value) {
Cu.reportError("showBrowserSpecificIndicator: got neither video nor audio access");
return;
}
@ -330,8 +420,6 @@ function showBrowserSpecificIndicator(aBrowser) {
let chromeWin = aBrowser.ownerDocument.defaultView;
let stringBundle = chromeWin.gNavigatorBundle;
let message = stringBundle.getString("getUserMedia.sharing" + captureState + ".message2");
let uri = aBrowser.contentWindow.document.documentURIObject;
let windowId = aBrowser.contentWindow
.QueryInterface(Ci.nsIInterfaceRequestor)
@ -348,14 +436,22 @@ function showBrowserSpecificIndicator(aBrowser) {
accessKey: stringBundle.getString("getUserMedia.stopSharing.accesskey"),
callback: function () {
let perms = Services.perms;
if (hasVideo.value &&
if (camera.value &&
perms.testExactPermission(uri, "camera") == perms.ALLOW_ACTION)
perms.remove(uri.host, "camera");
if (hasAudio.value &&
if (microphone.value &&
perms.testExactPermission(uri, "microphone") == perms.ALLOW_ACTION)
perms.remove(uri.host, "microphone");
Services.obs.notifyObservers(null, "getUserMedia:revoke", windowId);
// Performing an action from a notification removes it, but if the page
// uses screensharing and a device, we may have another notification to remove.
let outerWindowID = Services.wm.getCurrentInnerWindowWithId(windowId)
.QueryInterface(Ci.nsIInterfaceRequestor)
.getInterface(Ci.nsIDOMWindowUtils)
.outerWindowID;
removeBrowserSpecificIndicator(null, null, outerWindowID);
}
}];
let options = {
@ -370,18 +466,46 @@ function showBrowserSpecificIndicator(aBrowser) {
return aTopic == "swapping";
}
};
let anchorId = captureState == "Microphone" ? "webRTC-sharingMicrophone-notification-icon"
: "webRTC-sharingDevices-notification-icon";
chromeWin.PopupNotifications.show(aBrowser, "webRTC-sharingDevices", message,
anchorId, mainAction, secondaryActions, options);
if (captureState) {
let anchorId = captureState == "Microphone" ? "webRTC-sharingMicrophone-notification-icon"
: "webRTC-sharingDevices-notification-icon";
let message = stringBundle.getString("getUserMedia.sharing" + captureState + ".message2");
chromeWin.PopupNotifications.show(aBrowser, "webRTC-sharingDevices", message,
anchorId, mainAction, secondaryActions, options);
}
// Now handle the screen sharing indicator.
if (!screen.value && !window.value)
return;
options = {
hideNotNow: true,
dismissed: true,
eventCallback: function(aTopic) {
if (aTopic == "shown") {
let PopupNotifications = this.browser.ownerDocument.defaultView.PopupNotifications;
PopupNotifications.panel.firstChild.setAttribute("popupid", "webRTC-sharingScreen");
}
return aTopic == "swapping";
}
};
// If we are sharing both a window and the screen, show 'Screen'.
let stringId = "getUserMedia.sharing" + (screen.value ? "Screen" : "Window") + ".message";
chromeWin.PopupNotifications.show(aBrowser, "webRTC-sharingScreen",
stringBundle.getString(stringId),
"webRTC-sharingScreen-notification-icon",
mainAction, secondaryActions, options);
}
function removeBrowserSpecificIndicator(aSubject, aTopic, aData) {
let browser = getBrowserForWindowId(aData);
let PopupNotifications = browser.ownerDocument.defaultView.PopupNotifications;
let notification = PopupNotifications &&
PopupNotifications.getNotification("webRTC-sharingDevices",
browser);
if (notification)
PopupNotifications.remove(notification);
if (!PopupNotifications)
return;
for (let notifId of ["webRTC-sharingDevices", "webRTC-sharingScreen"]) {
let notification = PopupNotifications.getNotification(notifId, browser);
if (notification)
PopupNotifications.remove(notification);
}
}

View File

@ -1231,6 +1231,11 @@ toolbarbutton[sdk-button="true"][cui-areatype="toolbar"] > .toolbarbutton-icon {
list-style-image: url(chrome://browser/skin/webRTC-shareMicrophone-64.png);
}
.popup-notification-icon[popupid="webRTC-sharingScreen"],
.popup-notification-icon[popupid="webRTC-shareScreen"] {
list-style-image: url(chrome://browser/skin/webRTC-shareScreen-64.png);
}
.popup-notification-icon[popupid="pointerLock"] {
list-style-image: url(chrome://browser/skin/pointerLock-64.png);
}
@ -1377,6 +1382,16 @@ toolbarbutton[sdk-button="true"][cui-areatype="toolbar"] > .toolbarbutton-icon {
list-style-image: url(chrome://browser/skin/webRTC-sharingMicrophone-16.png);
}
.webRTC-shareScreen-notification-icon,
#webRTC-shareScreen-notification-icon {
list-style-image: url(chrome://browser/skin/webRTC-shareScreen-16.png);
}
.webRTC-sharingScreen-notification-icon,
#webRTC-sharingScreen-notification-icon {
list-style-image: url(chrome://browser/skin/webRTC-sharingScreen-16.png);
}
.web-notifications-notification-icon,
#web-notifications-notification-icon {
list-style-image: url(chrome://browser/skin/notification-16.png);

View File

@ -74,6 +74,9 @@ browser.jar:
skin/classic/browser/webRTC-shareMicrophone-16.png
skin/classic/browser/webRTC-shareMicrophone-64.png
skin/classic/browser/webRTC-sharingMicrophone-16.png
skin/classic/browser/webRTC-shareScreen-16.png (../shared/webrtc/webRTC-shareScreen-16.png)
skin/classic/browser/webRTC-shareScreen-64.png (../shared/webrtc/webRTC-shareScreen-64.png)
skin/classic/browser/webRTC-sharingScreen-16.png (../shared/webrtc/webRTC-sharingScreen-16.png)
skin/classic/browser/customizableui/background-noise-toolbar.png (customizableui/background-noise-toolbar.png)
skin/classic/browser/customizableui/customize-illustration.png (../shared/customizableui/customize-illustration.png)
skin/classic/browser/customizableui/customize-illustration-rtl.png (../shared/customizableui/customize-illustration-rtl.png)

View File

@ -3570,6 +3570,28 @@ toolbarbutton.chevron > .toolbarbutton-menu-dropmarker {
}
}
.webRTC-shareScreen-notification-icon,
#webRTC-shareScreen-notification-icon {
list-style-image: url(chrome://browser/skin/webRTC-shareScreen-16.png);
}
@media (min-resolution: 2dppx) {
.webRTC-shareScreen-notification-icon,
#webRTC-shareScreen-notification-icon {
list-style-image: url(chrome://browser/skin/webRTC-shareScreen-16@2x.png);
}
}
.webRTC-sharingScreen-notification-icon,
#webRTC-sharingScreen-notification-icon {
list-style-image: url(chrome://browser/skin/webRTC-sharingScreen-16.png);
}
@media (min-resolution: 2dppx) {
.webRTC-sharingScreen-notification-icon,
#webRTC-sharingScreen-notification-icon {
list-style-image: url(chrome://browser/skin/webRTC-sharingScreen-16@2x.png);
}
}
.web-notifications-notification-icon,
#web-notifications-notification-icon {
list-style-image: url(chrome://browser/skin/notification-16.png);
@ -3892,6 +3914,17 @@ menulist.translate-infobar-element > .menulist-dropmarker {
}
}
.popup-notification-icon[popupid="webRTC-sharingScreen"],
.popup-notification-icon[popupid="webRTC-shareScreen"] {
list-style-image: url(chrome://browser/skin/webRTC-shareScreen-64.png);
}
@media (min-resolution: 2dppx) {
.popup-notification-icon[popupid="webRTC-sharingScreen"],
.popup-notification-icon[popupid="webRTC-shareScreen"] {
list-style-image: url(chrome://browser/skin/webRTC-shareScreen-64@2x.png);
}
}
/* Popup Buttons */
#identity-popup-more-info-button {
@hudButton@

View File

@ -125,6 +125,12 @@ browser.jar:
skin/classic/browser/webRTC-shareMicrophone-64@2x.png
skin/classic/browser/webRTC-sharingMicrophone-16.png
skin/classic/browser/webRTC-sharingMicrophone-16@2x.png
skin/classic/browser/webRTC-shareScreen-16.png (../shared/webrtc/webRTC-shareScreen-16.png)
skin/classic/browser/webRTC-shareScreen-16@2x.png (../shared/webrtc/webRTC-shareScreen-16@2x.png)
skin/classic/browser/webRTC-shareScreen-64.png (../shared/webrtc/webRTC-shareScreen-64.png)
skin/classic/browser/webRTC-shareScreen-64@2x.png (../shared/webrtc/webRTC-shareScreen-64@2x.png)
skin/classic/browser/webRTC-sharingScreen-16.png (../shared/webrtc/webRTC-sharingScreen-16.png)
skin/classic/browser/webRTC-sharingScreen-16@2x.png (../shared/webrtc/webRTC-sharingScreen-16@2x.png)
skin/classic/browser/customizableui/background-noise-toolbar.png (customizableui/background-noise-toolbar.png)
skin/classic/browser/customizableui/customize-titleBar-toggle.png (customizableui/customize-titleBar-toggle.png)
skin/classic/browser/customizableui/customize-titleBar-toggle@2x.png (customizableui/customize-titleBar-toggle@2x.png)

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

View File

@ -2213,6 +2213,11 @@ toolbarbutton.bookmark-item[dragover="true"][open="true"] {
list-style-image: url(chrome://browser/skin/webRTC-shareMicrophone-64.png);
}
.popup-notification-icon[popupid="webRTC-sharingScreen"],
.popup-notification-icon[popupid="webRTC-shareScreen"] {
list-style-image: url(chrome://browser/skin/webRTC-shareScreen-64.png);
}
.popup-notification-icon[popupid="pointerLock"] {
list-style-image: url(chrome://browser/skin/pointerLock-64.png);
}
@ -2353,6 +2358,16 @@ toolbarbutton.bookmark-item[dragover="true"][open="true"] {
list-style-image: url(chrome://browser/skin/webRTC-sharingMicrophone-16.png);
}
.webRTC-shareScreen-notification-icon,
#webRTC-shareScreen-notification-icon {
list-style-image: url(chrome://browser/skin/webRTC-shareScreen-16.png);
}
.webRTC-sharingScreen-notification-icon,
#webRTC-sharingScreen-notification-icon {
list-style-image: url(chrome://browser/skin/webRTC-sharingScreen-16.png);
}
.web-notifications-notification-icon,
#web-notifications-notification-icon {
list-style-image: url(chrome://browser/skin/notification-16.png);

View File

@ -95,6 +95,9 @@ browser.jar:
skin/classic/browser/webRTC-shareMicrophone-16.png
skin/classic/browser/webRTC-shareMicrophone-64.png
skin/classic/browser/webRTC-sharingMicrophone-16.png
skin/classic/browser/webRTC-shareScreen-16.png (../shared/webrtc/webRTC-shareScreen-16.png)
skin/classic/browser/webRTC-shareScreen-64.png (../shared/webrtc/webRTC-shareScreen-64.png)
skin/classic/browser/webRTC-sharingScreen-16.png (../shared/webrtc/webRTC-sharingScreen-16.png)
skin/classic/browser/customizableui/background-noise-toolbar.png (customizableui/background-noise-toolbar.png)
skin/classic/browser/customizableui/customizeFavicon.ico (../shared/customizableui/customizeFavicon.ico)
skin/classic/browser/customizableui/customize-illustration.png (../shared/customizableui/customize-illustration.png)
@ -504,6 +507,9 @@ browser.jar:
skin/classic/aero/browser/webRTC-shareMicrophone-16.png
skin/classic/aero/browser/webRTC-shareMicrophone-64.png
skin/classic/aero/browser/webRTC-sharingMicrophone-16.png
skin/classic/aero/browser/webRTC-shareScreen-16.png (../shared/webrtc/webRTC-shareScreen-16.png)
skin/classic/aero/browser/webRTC-shareScreen-64.png (../shared/webrtc/webRTC-shareScreen-64.png)
skin/classic/aero/browser/webRTC-sharingScreen-16.png (../shared/webrtc/webRTC-sharingScreen-16.png)
skin/classic/aero/browser/customizableui/background-noise-toolbar.png (customizableui/background-noise-toolbar.png)
skin/classic/aero/browser/customizableui/customize-illustration.png (../shared/customizableui/customize-illustration.png)
skin/classic/aero/browser/customizableui/customize-illustration-rtl.png (../shared/customizableui/customize-illustration-rtl.png)

View File

@ -78,7 +78,7 @@ DEBUGGER_INFO = {
# semi-deliberately leaked, so we set '--show-possibly-lost=no' to avoid
# uninteresting output from those objects. We set '--smc-check==all-non-file'
# and '--vex-iropt-register-updates=allregs-at-mem-access' so that valgrind
# deals properly with JIT'd JavaScript code.
# deals properly with JIT'd JavaScript code.
"valgrind": {
"interactive": False,
"args": " ".join(["--leak-check=full",
@ -602,29 +602,31 @@ class ShutdownLeaks(object):
self.currentTest = None
self.seenShutdown = False
def log(self, line):
if line[2:11] == "DOMWINDOW":
self._logWindow(line)
elif line[2:10] == "DOCSHELL":
self._logDocShell(line)
elif line.startswith("TEST-START"):
fileName = line.split(" ")[-1].strip().replace("chrome://mochitests/content/browser/", "")
def log(self, message):
if message['action'] == 'log':
line = message['message']
if line[2:11] == "DOMWINDOW":
self._logWindow(line)
elif line[2:10] == "DOCSHELL":
self._logDocShell(line)
elif message['action'] == 'test_start':
fileName = message['test'].replace("chrome://mochitests/content/browser/", "")
self.currentTest = {"fileName": fileName, "windows": set(), "docShells": set()}
elif line.startswith("INFO TEST-END"):
elif message['action'] == 'test_end':
# don't track a test if no windows or docShells leaked
if self.currentTest and (self.currentTest["windows"] or self.currentTest["docShells"]):
self.tests.append(self.currentTest)
self.currentTest = None
elif line.startswith("INFO TEST-START | Shutdown"):
elif message['action'] == 'suite_end':
self.seenShutdown = True
def process(self):
for test in self._parseLeakingTests():
for url, count in self._zipLeakedWindows(test["leakedWindows"]):
self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d window(s) until shutdown [url = %s]", test["fileName"], count, url)
self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d window(s) until shutdown [url = %s]" % (test["fileName"], count, url))
if test["leakedDocShells"]:
self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d docShell(s) until shutdown", test["fileName"], len(test["leakedDocShells"]))
self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d docShell(s) until shutdown" % (test["fileName"], len(test["leakedDocShells"])))
def _logWindow(self, line):
created = line[:2] == "++"
@ -633,7 +635,7 @@ class ShutdownLeaks(object):
# log line has invalid format
if not pid or not serial:
self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>", line)
self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>" % line)
return
key = pid + "." + serial
@ -654,7 +656,7 @@ class ShutdownLeaks(object):
# log line has invalid format
if not pid or not id:
self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>", line)
self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>" % line)
return
key = pid + "." + id

View File

@ -21,11 +21,13 @@ fennecLogcatFilters = [ "The character encoding of the HTML document was not dec
class RemoteAutomation(Automation):
_devicemanager = None
def __init__(self, deviceManager, appName = '', remoteLog = None):
def __init__(self, deviceManager, appName = '', remoteLog = None,
processArgs=None):
self._devicemanager = deviceManager
self._appName = appName
self._remoteProfile = None
self._remoteLog = remoteLog
self._processArgs = processArgs or {};
# Default our product to fennec
self._product = "fennec"
@ -184,17 +186,21 @@ class RemoteAutomation(Automation):
if stdout == None or stdout == -1 or stdout == subprocess.PIPE:
stdout = self._remoteLog
return self.RProcess(self._devicemanager, cmd, stdout, stderr, env, cwd, self._appName)
return self.RProcess(self._devicemanager, cmd, stdout, stderr, env, cwd, self._appName,
**self._processArgs)
# be careful here as this inner class doesn't have access to outer class members
class RProcess(object):
# device manager process
dm = None
def __init__(self, dm, cmd, stdout = None, stderr = None, env = None, cwd = None, app = None):
def __init__(self, dm, cmd, stdout=None, stderr=None, env=None, cwd=None, app=None,
messageLogger=None):
self.dm = dm
self.stdoutlen = 0
self.lastTestSeen = "remoteautomation.py"
self.proc = dm.launchProcess(cmd, stdout, cwd, env, True)
self.messageLogger = messageLogger
if (self.proc is None):
if cmd[0] == 'am':
self.proc = stdout
@ -210,6 +216,9 @@ class RemoteAutomation(Automation):
# The benefit of the following sleep is unclear; it was formerly 15 seconds
time.sleep(1)
# Used to buffer log messages until we meet a line break
self.logBuffer = ""
@property
def pid(self):
pid = self.dm.processExist(self.procName)
@ -221,29 +230,49 @@ class RemoteAutomation(Automation):
return 0
return pid
@property
def stdout(self):
def read_stdout(self):
""" Fetch the full remote log file using devicemanager and return just
the new log entries since the last call (as a multi-line string).
the new log entries since the last call (as a list of messages or lines).
"""
if self.dm.fileExists(self.proc):
try:
newLogContent = self.dm.pullFile(self.proc, self.stdoutlen)
except DMError:
# we currently don't retry properly in the pullFile
# function in dmSUT, so an error here is not necessarily
# the end of the world
return ''
self.stdoutlen += len(newLogContent)
# Match the test filepath from the last TEST-START line found in the new
# log content. These lines are in the form:
# 1234 INFO TEST-START | /filepath/we/wish/to/capture.html\n
if not self.dm.fileExists(self.proc):
return []
try:
newLogContent = self.dm.pullFile(self.proc, self.stdoutlen)
except DMError:
# we currently don't retry properly in the pullFile
# function in dmSUT, so an error here is not necessarily
# the end of the world
return []
if not newLogContent:
return []
self.stdoutlen += len(newLogContent)
if self.messageLogger is None:
testStartFilenames = re.findall(r"TEST-START \| ([^\s]*)", newLogContent)
if testStartFilenames:
self.lastTestSeen = testStartFilenames[-1]
return newLogContent.strip('\n').strip()
else:
return ''
print newLogContent
return [newLogContent]
self.logBuffer += newLogContent
lines = self.logBuffer.split('\n')
if not lines:
return
# We only keep the last (unfinished) line in the buffer
self.logBuffer = lines[-1]
del lines[-1]
messages = []
for line in lines:
# This passes the line to the logger (to be logged or buffered)
# and returns a list of structured messages (dict) or None, depending on the log
parsed_messages = self.messageLogger.write(line)
for message in parsed_messages:
if message['action'] == 'test_start':
self.lastTestSeen = message['test']
messages += parsed_messages
return messages
@property
def getLastTestSeen(self):
@ -258,7 +287,7 @@ class RemoteAutomation(Automation):
def wait(self, timeout = None, noOutputTimeout = None):
timer = 0
noOutputTimer = 0
interval = 20
interval = 20
if timeout == None:
timeout = self.timeout
@ -266,10 +295,9 @@ class RemoteAutomation(Automation):
status = 0
while (self.dm.getTopActivity() == self.procName):
# retrieve log updates every 60 seconds
if timer % 60 == 0:
t = self.stdout
if t != '':
print t
if timer % 60 == 0:
messages = self.read_stdout()
if messages:
noOutputTimer = 0
time.sleep(interval)
@ -283,7 +311,7 @@ class RemoteAutomation(Automation):
break
# Flush anything added to stdout during the sleep
print self.stdout
self.read_stdout()
return status

View File

@ -12,7 +12,6 @@ public class FennecMochitestAssert implements Assert {
private LinkedList<testInfo> mTestList = new LinkedList<testInfo>();
// Internal state variables to make logging match up with existing mochitests
private int mLineNumber = 0;
private int mPassed = 0;
private int mFailed = 0;
private int mTodo = 0;
@ -45,13 +44,13 @@ public class FennecMochitestAssert implements Assert {
String message;
if (!mLogStarted) {
dumpLog(Integer.toString(mLineNumber++) + " INFO SimpleTest START");
dumpLog("SimpleTest START");
mLogStarted = true;
}
if (mLogTestName != "") {
long diff = SystemClock.uptimeMillis() - mStartTime;
message = Integer.toString(mLineNumber++) + " INFO TEST-END | " + mLogTestName;
message = "TEST-END | " + mLogTestName;
message += " | finished in " + diff + "ms";
dumpLog(message);
mLogTestName = "";
@ -63,7 +62,7 @@ public class FennecMochitestAssert implements Assert {
mLogTestName = nameParts[nameParts.length - 1];
mStartTime = SystemClock.uptimeMillis();
dumpLog(Integer.toString(mLineNumber++) + " INFO TEST-START | " + mLogTestName);
dumpLog("TEST-START | " + mLogTestName);
}
class testInfo {
@ -95,7 +94,7 @@ public class FennecMochitestAssert implements Assert {
String diag = test.mName;
if (test.mDiag != null) diag += " - " + test.mDiag;
String message = Integer.toString(mLineNumber++) + " INFO " + resultString + " | " + mLogTestName + " | " + diag;
String message = resultString + " | " + mLogTestName + " | " + diag;
dumpLog(message);
if (test.mInfo) {
@ -117,21 +116,21 @@ public class FennecMochitestAssert implements Assert {
if (mLogTestName != "") {
long diff = SystemClock.uptimeMillis() - mStartTime;
message = Integer.toString(mLineNumber++) + " INFO TEST-END | " + mLogTestName;
message = "TEST-END | " + mLogTestName;
message += " | finished in " + diff + "ms";
dumpLog(message);
mLogTestName = "";
}
message = Integer.toString(mLineNumber++) + " INFO TEST-START | Shutdown";
message = "TEST-START | Shutdown";
dumpLog(message);
message = Integer.toString(mLineNumber++) + " INFO Passed: " + Integer.toString(mPassed);
message = "Passed: " + Integer.toString(mPassed);
dumpLog(message);
message = Integer.toString(mLineNumber++) + " INFO Failed: " + Integer.toString(mFailed);
message = "Failed: " + Integer.toString(mFailed);
dumpLog(message);
message = Integer.toString(mLineNumber++) + " INFO Todo: " + Integer.toString(mTodo);
message = "Todo: " + Integer.toString(mTodo);
dumpLog(message);
message = Integer.toString(mLineNumber++) + " INFO SimpleTest FINISHED";
message = "SimpleTest FINISHED";
dumpLog(message);
}

View File

@ -504,6 +504,8 @@ case "$target" in
# Disable SEH on clang-cl because it doesn't implement them yet.
if test -z "$CLANG_CL"; then
AC_DEFINE(HAVE_SEH_EXCEPTIONS)
else
AC_DEFINE_UNQUOTED(GTEST_HAS_SEH, 0)
fi
if test -n "$WIN32_REDIST_DIR"; then
@ -3876,7 +3878,6 @@ MOZ_PAY=
MOZ_AUDIO_CHANNEL_MANAGER=
NSS_NO_LIBPKIX=
MOZ_CONTENT_SANDBOX=
MOZ_CONTENT_SANDBOX_REPORTER=1
JSGC_USE_EXACT_ROOTING=
JSGC_GENERATIONAL=
@ -6442,17 +6443,6 @@ fi
AC_SUBST(MOZ_CONTENT_SANDBOX)
MOZ_ARG_ENABLE_BOOL(content-sandbox-reporter,
[ --enable-content-sandbox-reporter Enable syscall reporter to troubleshoot syscalls denied by the content-processes sandbox],
MOZ_CONTENT_SANDBOX_REPORTER=1,
MOZ_CONTENT_SANDBOX_REPORTER=)
if test -n "$MOZ_CONTENT_SANDBOX_REPORTER"; then
AC_DEFINE(MOZ_CONTENT_SANDBOX_REPORTER)
fi
AC_SUBST(MOZ_CONTENT_SANDBOX_REPORTER)
dnl ========================================================
dnl =
dnl = Module specific options

View File

@ -2991,6 +2991,14 @@ nsXMLHttpRequest::Send(nsIVariant* aVariant, const Nullable<RequestBody>& aBody)
if (scheme.LowerCaseEqualsLiteral("app") ||
scheme.LowerCaseEqualsLiteral("jar")) {
mIsMappedArrayBuffer = true;
if (XRE_GetProcessType() != GeckoProcessType_Default) {
nsCOMPtr<nsIJARChannel> jarChannel = do_QueryInterface(mChannel);
// For memory mapping from child process, we need to get file
// descriptor of the JAR file opened remotely on the parent proess.
// Set this to make sure that file descriptor can be obtained by
// child process.
jarChannel->EnsureChildFd();
}
}
}
}

View File

@ -149,7 +149,6 @@ is(r.result, null,
// Test loading an empty file works (and doesn't crash!)
var emptyFile = createFileWithData("");
dump("hello nurse");
r = new FileReader();
r.onload = getLoadHandler("", 0, "empty no encoding reading");
r.readAsText(emptyFile, "");
@ -427,8 +426,8 @@ function testHasRun() {
//alert(testRanCounter);
++testRanCounter;
if (testRanCounter == expectedTestCount) {
is(onloadHasRunText, true, "onload text should have fired by now");
is(onloadHasRunBinary, true, "onload binary should have fired by now");
is(onloadHasRunText, true, "onload text should have fired by now");
is(onloadHasRunBinary, true, "onload binary should have fired by now");
SimpleTest.finish();
}
}

View File

@ -312,19 +312,6 @@ AudioNodeStream::ObtainInputBlock(AudioChunk& aTmpChunk, uint32_t aPortIndex)
continue;
}
// It is possible for mLastChunks to be empty here, because `a` might be a
// AudioNodeStream that has not been scheduled yet, because it is further
// down the graph _but_ as a connection to this node. Because we enforce the
// presence of at least one DelayNode, with at least one block of delay, and
// because the output of a DelayNode when it has been fed less that
// `delayTime` amount of audio is silence, we can simply continue here,
// because this input would not influence the output of this node. Next
// iteration, a->mLastChunks.IsEmpty() will be false, and everthing will
// work as usual.
if (a->mLastChunks.IsEmpty()) {
continue;
}
AudioChunk* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
MOZ_ASSERT(chunk);
if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) {
@ -453,7 +440,7 @@ AudioNodeStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags)
// appear to extend slightly beyond aFrom, so we might not be blocked yet.
bool blocked = mFinished || mBlocked.GetAt(aFrom);
// If the stream has finished at this time, it will be blocked.
if (mMuted || blocked) {
if (blocked || InMutedCycle()) {
for (uint16_t i = 0; i < outputCount; ++i) {
mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE);
}
@ -498,6 +485,32 @@ AudioNodeStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags)
}
}
void
AudioNodeStream::ProduceOutputBeforeInput(GraphTime aFrom)
{
MOZ_ASSERT(mEngine->AsDelayNodeEngine());
MOZ_ASSERT(mEngine->OutputCount() == 1,
"DelayNodeEngine output count should be 1");
MOZ_ASSERT(!InMutedCycle(), "DelayNodes should break cycles");
mLastChunks.SetLength(1);
// Consider this stream blocked if it has already finished output. Normally
// mBlocked would reflect this, but due to rounding errors our audio track may
// appear to extend slightly beyond aFrom, so we might not be blocked yet.
bool blocked = mFinished || mBlocked.GetAt(aFrom);
// If the stream has finished at this time, it will be blocked.
if (blocked) {
mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
} else {
mEngine->ProduceBlockBeforeInput(&mLastChunks[0]);
NS_ASSERTION(mLastChunks[0].GetDuration() == WEBAUDIO_BLOCK_SIZE,
"Invalid WebAudio chunk size");
if (mDisabledTrackIDs.Contains(static_cast<TrackID>(AUDIO_TRACK))) {
mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE);
}
}
}
void
AudioNodeStream::AdvanceOutputSegment()
{

View File

@ -15,7 +15,6 @@ namespace mozilla {
namespace dom {
struct ThreeDPoint;
class AudioParamTimeline;
class DelayNodeEngine;
class AudioContext;
}
@ -55,8 +54,7 @@ public:
mKind(aKind),
mNumberOfInputChannels(2),
mMarkAsFinishedAfterThisBlock(false),
mAudioParamStream(false),
mMuted(false)
mAudioParamStream(false)
{
MOZ_ASSERT(NS_IsMainThread());
mChannelCountMode = ChannelCountMode::Max;
@ -107,18 +105,17 @@ public:
ChannelCountMode aChannelCountMoe,
ChannelInterpretation aChannelInterpretation);
virtual void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) MOZ_OVERRIDE;
/**
* Produce the next block of output, before input is provided.
* ProcessInput() will be called later, and it then should not change
* the output. This is used only for DelayNodeEngine in a feedback loop.
*/
void ProduceOutputBeforeInput(GraphTime aFrom);
TrackTicks GetCurrentPosition();
bool IsAudioParamStream() const
{
return mAudioParamStream;
}
void Mute() {
mMuted = true;
}
void Unmute() {
mMuted = false;
}
const OutputChunks& LastChunks() const
{
@ -195,8 +192,6 @@ protected:
bool mMarkAsFinishedAfterThisBlock;
// Whether the stream is an AudioParamHelper stream.
bool mAudioParamStream;
// Whether the stream is muted. Access only on the MediaStreamGraph thread.
bool mMuted;
};
}

View File

@ -258,7 +258,8 @@ AudioStream::AudioStream()
AudioStream::~AudioStream()
{
LOG(("AudioStream: delete %p, state %d", this, mState));
Shutdown();
MOZ_ASSERT(mState == SHUTDOWN && !mCubebStream,
"Should've called Shutdown() before deleting an AudioStream");
if (mDumpFile) {
fclose(mDumpFile);
}
@ -317,6 +318,10 @@ nsresult AudioStream::EnsureTimeStretcherInitializedUnlocked()
nsresult AudioStream::SetPlaybackRate(double aPlaybackRate)
{
// MUST lock since the rate transposer is used from the cubeb callback,
// and rate changes can cause the buffer to be reallocated
MonitorAutoLock mon(mMonitor);
NS_ASSERTION(aPlaybackRate > 0.0,
"Can't handle negative or null playbackrate in the AudioStream.");
// Avoid instantiating the resampler if we are not changing the playback rate.
@ -325,9 +330,6 @@ nsresult AudioStream::SetPlaybackRate(double aPlaybackRate)
return NS_OK;
}
// MUST lock since the rate transposer is used from the cubeb callback,
// and rate changes can cause the buffer to be reallocated
MonitorAutoLock mon(mMonitor);
if (EnsureTimeStretcherInitializedUnlocked() != NS_OK) {
return NS_ERROR_FAILURE;
}
@ -347,14 +349,15 @@ nsresult AudioStream::SetPlaybackRate(double aPlaybackRate)
nsresult AudioStream::SetPreservesPitch(bool aPreservesPitch)
{
// MUST lock since the rate transposer is used from the cubeb callback,
// and rate changes can cause the buffer to be reallocated
MonitorAutoLock mon(mMonitor);
// Avoid instantiating the timestretcher instance if not needed.
if (aPreservesPitch == mAudioClock.GetPreservesPitch()) {
return NS_OK;
}
// MUST lock since the rate transposer is used from the cubeb callback,
// and rate changes can cause the buffer to be reallocated
MonitorAutoLock mon(mMonitor);
if (EnsureTimeStretcherInitializedUnlocked() != NS_OK) {
return NS_ERROR_FAILURE;
}
@ -374,6 +377,7 @@ nsresult AudioStream::SetPreservesPitch(bool aPreservesPitch)
int64_t AudioStream::GetWritten()
{
MonitorAutoLock mon(mMonitor);
return mWritten;
}
@ -533,7 +537,10 @@ AudioStream::Init(int32_t aNumChannels, int32_t aRate,
nsresult rv = OpenCubeb(params, aLatencyRequest);
// See if we need to start() the stream, since we must do that from this
// thread for now (cubeb API issue)
CheckForStart();
{
MonitorAutoLock mon(mMonitor);
CheckForStart();
}
return rv;
}
@ -600,6 +607,7 @@ AudioStream::OpenCubeb(cubeb_stream_params &aParams,
void
AudioStream::CheckForStart()
{
mMonitor.AssertCurrentThreadOwns();
if (mState == INITIALIZED) {
// Start the stream right away when low latency has been requested. This means
// that the DataCallback will feed silence to cubeb, until the first frames
@ -779,9 +787,12 @@ AudioStream::StartUnlocked()
mNeedsStart = true;
return;
}
MonitorAutoUnlock mon(mMonitor);
if (mState == INITIALIZED) {
int r = cubeb_stream_start(mCubebStream);
int r;
{
MonitorAutoUnlock mon(mMonitor);
r = cubeb_stream_start(mCubebStream);
}
mState = r == CUBEB_OK ? STARTED : ERRORED;
LOG(("AudioStream: started %p, state %s", this, mState == STARTED ? "STARTED" : "ERRORED"));
}
@ -828,21 +839,19 @@ AudioStream::Resume()
void
AudioStream::Shutdown()
{
MonitorAutoLock mon(mMonitor);
LOG(("AudioStream: Shutdown %p, state %d", this, mState));
{
MonitorAutoLock mon(mMonitor);
if (mState == STARTED || mState == RUNNING) {
MonitorAutoUnlock mon(mMonitor);
Pause();
}
MOZ_ASSERT(mState != STARTED && mState != RUNNING); // paranoia
mState = SHUTDOWN;
}
// Must not try to shut down cubeb from within the lock! wasapi may still
// call our callback after Pause()/stop()!?! Bug 996162
if (mCubebStream) {
MonitorAutoUnlock mon(mMonitor);
// Force stop to put the cubeb stream in a stable state before deletion.
cubeb_stream_stop(mCubebStream);
// Must not try to shut down cubeb from within the lock! wasapi may still
// call our callback after Pause()/stop()!?! Bug 996162
mCubebStream.reset();
}
mState = SHUTDOWN;
}
int64_t
@ -907,6 +916,7 @@ AudioStream::IsPaused()
void
AudioStream::GetBufferInsertTime(int64_t &aTimeMs)
{
mMonitor.AssertCurrentThreadOwns();
if (mInserts.Length() > 0) {
// Find the right block, but don't leave the array empty
while (mInserts.Length() > 1 && mReadPoint >= mInserts[0].mFrames) {
@ -924,6 +934,7 @@ AudioStream::GetBufferInsertTime(int64_t &aTimeMs)
long
AudioStream::GetUnprocessed(void* aBuffer, long aFrames, int64_t &aTimeMs)
{
mMonitor.AssertCurrentThreadOwns();
uint8_t* wpos = reinterpret_cast<uint8_t*>(aBuffer);
// Flush the timestretcher pipeline, if we were playing using a playback rate
@ -955,6 +966,7 @@ AudioStream::GetUnprocessed(void* aBuffer, long aFrames, int64_t &aTimeMs)
long
AudioStream::GetUnprocessedWithSilencePadding(void* aBuffer, long aFrames, int64_t& aTimeMs)
{
mMonitor.AssertCurrentThreadOwns();
uint32_t toPopBytes = FramesToBytes(aFrames);
uint32_t available = std::min(toPopBytes, mBuffer.Length());
uint32_t silenceOffset = toPopBytes - available;
@ -979,6 +991,7 @@ AudioStream::GetUnprocessedWithSilencePadding(void* aBuffer, long aFrames, int64
long
AudioStream::GetTimeStretched(void* aBuffer, long aFrames, int64_t &aTimeMs)
{
mMonitor.AssertCurrentThreadOwns();
long processedFrames = 0;
// We need to call the non-locking version, because we already have the lock.
@ -1021,6 +1034,7 @@ long
AudioStream::DataCallback(void* aBuffer, long aFrames)
{
MonitorAutoLock mon(mMonitor);
MOZ_ASSERT(mState != SHUTDOWN, "No data callback after shutdown");
uint32_t available = std::min(static_cast<uint32_t>(FramesToBytes(aFrames)), mBuffer.Length());
NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames");
AudioDataValue* output = reinterpret_cast<AudioDataValue*>(aBuffer);
@ -1131,6 +1145,8 @@ void
AudioStream::StateCallback(cubeb_state aState)
{
MonitorAutoLock mon(mMonitor);
MOZ_ASSERT(mState != SHUTDOWN, "No state callback after shutdown");
LOG(("AudioStream: StateCallback %p, mState=%d cubeb_state=%d", this, mState, aState));
if (aState == CUBEB_STATE_DRAINED) {
mState = DRAINED;
} else if (aState == CUBEB_STATE_ERROR) {

View File

@ -17,6 +17,7 @@
#include "mozilla/dom/BlobEvent.h"
#include "nsIPrincipal.h"
#include "nsMimeTypes.h"
#include "nsProxyRelease.h"
#include "mozilla/dom/AudioStreamTrack.h"
#include "mozilla/dom/VideoStreamTrack.h"
@ -149,9 +150,19 @@ class MediaRecorder::Session: public nsIObserver
class ExtractRunnable : public nsRunnable
{
public:
ExtractRunnable(Session *aSession)
ExtractRunnable(already_AddRefed<Session>&& aSession)
: mSession(aSession) {}
~ExtractRunnable()
{
if (mSession) {
NS_WARNING("~ExtractRunnable something wrong the mSession should null");
nsCOMPtr<nsIThread> mainThread = do_GetMainThread();
NS_WARN_IF_FALSE(mainThread, "Couldn't get the main thread!");
NS_ProxyRelease(mainThread, mSession);
}
}
NS_IMETHODIMP Run()
{
MOZ_ASSERT(NS_GetCurrentThread() == mSession->mReadThread);
@ -159,12 +170,18 @@ class MediaRecorder::Session: public nsIObserver
LOG(PR_LOG_DEBUG, ("Session.ExtractRunnable shutdown = %d", mSession->mEncoder->IsShutdown()));
if (!mSession->mEncoder->IsShutdown()) {
mSession->Extract(false);
NS_DispatchToCurrentThread(new ExtractRunnable(mSession));
nsRefPtr<nsIRunnable> event = new ExtractRunnable(mSession.forget());
if (NS_FAILED(NS_DispatchToCurrentThread(event))) {
NS_WARNING("Failed to dispatch ExtractRunnable to encoder thread");
}
} else {
// Flush out remaining encoded data.
mSession->Extract(true);
// Destroy this Session object in main thread.
NS_DispatchToMainThread(new DestroyRunnable(already_AddRefed<Session>(mSession)));
if (NS_FAILED(NS_DispatchToMainThread(
new DestroyRunnable(mSession.forget())))) {
MOZ_ASSERT(false, "NS_DispatchToMainThread DestroyRunnable failed");
}
}
return NS_OK;
}
@ -229,7 +246,10 @@ class MediaRecorder::Session: public nsIObserver
ErrorResult result;
mSession->mStopIssued = true;
recorder->Stop(result);
NS_DispatchToMainThread(new DestroyRunnable(mSession.forget()));
if (NS_FAILED(NS_DispatchToMainThread(
new DestroyRunnable(mSession.forget())))) {
MOZ_ASSERT(false, "NS_DispatchToMainThread failed");
}
return NS_OK;
}
@ -261,7 +281,6 @@ public:
{
MOZ_ASSERT(NS_IsMainThread());
AddRef();
mEncodedBufferCache = new EncodedBufferCache(MAX_ALLOW_MEMORY_BUFFER);
mLastBlobTimeStamp = TimeStamp::Now();
}
@ -360,8 +379,11 @@ private:
pushBlob = true;
}
if (pushBlob || aForceFlush) {
NS_DispatchToMainThread(new PushBlobRunnable(this));
mLastBlobTimeStamp = TimeStamp::Now();
if (NS_FAILED(NS_DispatchToMainThread(new PushBlobRunnable(this)))) {
MOZ_ASSERT(false, "NS_DispatchToMainThread PushBlobRunnable failed");
} else {
mLastBlobTimeStamp = TimeStamp::Now();
}
}
}
@ -432,7 +454,10 @@ private:
// shutdown notification and stop Read Thread.
nsContentUtils::RegisterShutdownObserver(this);
mReadThread->Dispatch(new ExtractRunnable(this), NS_DISPATCH_NORMAL);
nsRefPtr<nsIRunnable> event = new ExtractRunnable(this);
if (NS_FAILED(mReadThread->Dispatch(event, NS_DISPATCH_NORMAL))) {
NS_WARNING("Failed to dispatch ExtractRunnable at beginning");
}
}
// application should get blob and onstop event
void DoSessionEndTask(nsresult rv)
@ -443,9 +468,13 @@ private:
}
CleanupStreams();
if (NS_FAILED(NS_DispatchToMainThread(new PushBlobRunnable(this)))) {
MOZ_ASSERT(false, "NS_DispatchToMainThread PushBlobRunnable failed");
}
// Destroy this session object in main thread.
NS_DispatchToMainThread(new PushBlobRunnable(this));
NS_DispatchToMainThread(new DestroyRunnable(already_AddRefed<Session>(this)));
if (NS_FAILED(NS_DispatchToMainThread(new DestroyRunnable(this)))) {
MOZ_ASSERT(false, "NS_DispatchToMainThread DestroyRunnable failed");
}
}
void CleanupStreams()
{
@ -608,10 +637,17 @@ MediaRecorder::Start(const Optional<int32_t>& aTimeSlice, ErrorResult& aResult)
}
mState = RecordingState::Recording;
// Start a session
// Start a session.
// Add session's reference here and pass to ExtractRunnable since the
// MediaRecorder doesn't hold any reference to Session. Also
// the DoSessionEndTask need this reference for DestroyRunnable.
// Note that the reference count is not balance here due to the
// DestroyRunnable will destroyed the last reference.
nsRefPtr<Session> session = new Session(this, timeSlice);
Session* rawPtr;
session.forget(&rawPtr);
mSessions.AppendElement();
mSessions.LastElement() = new Session(this, timeSlice);
mSessions.LastElement() = rawPtr;
mSessions.LastElement()->Start();
}
@ -691,9 +727,11 @@ MediaRecorder::RequestData(ErrorResult& aResult)
return;
}
MOZ_ASSERT(mSessions.Length() > 0);
NS_DispatchToMainThread(
new CreateAndDispatchBlobEventRunnable(mSessions.LastElement()->GetEncodedData(),
this));
if (NS_FAILED(NS_DispatchToMainThread(
new CreateAndDispatchBlobEventRunnable(
mSessions.LastElement()->GetEncodedData(), this)))) {
MOZ_ASSERT(false, "NS_DispatchToMainThread CreateAndDispatchBlobEventRunnable failed");
}
}
JSObject*

View File

@ -4,7 +4,6 @@
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "MediaStreamGraphImpl.h"
#include "mozilla/LinkedList.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/unused.h"
@ -90,7 +89,7 @@ void
MediaStreamGraphImpl::AddStream(MediaStream* aStream)
{
aStream->mBufferStartTime = mCurrentTime;
*mStreams.AppendElement() = already_AddRefed<MediaStream>(aStream);
mStreams.AppendElement(aStream);
STREAM_LOG(PR_LOG_DEBUG, ("Adding media stream %p to the graph", aStream));
SetStreamOrderDirty();
@ -113,8 +112,8 @@ MediaStreamGraphImpl::RemoveStream(MediaStream* aStream)
SetStreamOrderDirty();
// This unrefs the stream, probably destroying it
mStreams.RemoveElement(aStream);
NS_RELEASE(aStream); // probably destroying it
STREAM_LOG(PR_LOG_DEBUG, ("Removing media stream %p from the graph", aStream));
}
@ -530,70 +529,6 @@ MediaStreamGraphImpl::MarkConsumed(MediaStream* aStream)
}
}
void
MediaStreamGraphImpl::UpdateStreamOrderForStream(mozilla::LinkedList<MediaStream>* aStack,
already_AddRefed<MediaStream> aStream)
{
nsRefPtr<MediaStream> stream = aStream;
NS_ASSERTION(!stream->mHasBeenOrdered, "stream should not have already been ordered");
if (stream->mIsOnOrderingStack) {
MediaStream* iter = aStack->getLast();
AudioNodeStream* ns = stream->AsAudioNodeStream();
bool delayNodePresent = ns ? ns->Engine()->AsDelayNodeEngine() != nullptr : false;
bool cycleFound = false;
if (iter) {
do {
cycleFound = true;
iter->AsProcessedStream()->mInCycle = true;
AudioNodeStream* ns = iter->AsAudioNodeStream();
if (ns && ns->Engine()->AsDelayNodeEngine()) {
delayNodePresent = true;
}
iter = iter->getPrevious();
} while (iter && iter != stream);
}
if (cycleFound && !delayNodePresent) {
// If we have detected a cycle, the previous loop should exit with stream
// == iter, or the node is connected to itself. Go back in the cycle and
// mute all nodes we find, or just mute the node itself.
if (!iter) {
// The node is connected to itself.
// There can't be a non-AudioNodeStream here, because only AudioNodes
// can be self-connected.
iter = aStack->getLast();
MOZ_ASSERT(iter->AsAudioNodeStream());
iter->AsAudioNodeStream()->Mute();
} else {
MOZ_ASSERT(iter);
do {
AudioNodeStream* nodeStream = iter->AsAudioNodeStream();
if (nodeStream) {
nodeStream->Mute();
}
} while((iter = iter->getNext()));
}
}
return;
}
ProcessedMediaStream* ps = stream->AsProcessedStream();
if (ps) {
aStack->insertBack(stream);
stream->mIsOnOrderingStack = true;
for (uint32_t i = 0; i < ps->mInputs.Length(); ++i) {
MediaStream* source = ps->mInputs[i]->mSource;
if (!source->mHasBeenOrdered) {
nsRefPtr<MediaStream> s = source;
UpdateStreamOrderForStream(aStack, s.forget());
}
}
aStack->popLast();
stream->mIsOnOrderingStack = false;
}
stream->mHasBeenOrdered = true;
*mStreams.AppendElement() = stream.forget();
}
static void AudioMixerCallback(AudioDataValue* aMixedBuffer,
AudioSampleFormat aFormat,
uint32_t aChannels,
@ -615,27 +550,20 @@ static void AudioMixerCallback(AudioDataValue* aMixedBuffer,
void
MediaStreamGraphImpl::UpdateStreamOrder()
{
mOldStreams.SwapElements(mStreams);
mStreams.ClearAndRetainStorage();
bool shouldMix = false;
for (uint32_t i = 0; i < mOldStreams.Length(); ++i) {
MediaStream* stream = mOldStreams[i];
stream->mHasBeenOrdered = false;
// Value of mCycleMarker for unvisited streams in cycle detection.
const uint32_t NOT_VISITED = UINT32_MAX;
// Value of mCycleMarker for ordered streams in muted cycles.
const uint32_t IN_MUTED_CYCLE = 1;
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
MediaStream* stream = mStreams[i];
stream->mIsConsumed = false;
stream->mIsOnOrderingStack = false;
stream->mInBlockingSet = false;
if (stream->AsSourceStream() &&
stream->AsSourceStream()->NeedsMixing()) {
shouldMix = true;
}
ProcessedMediaStream* ps = stream->AsProcessedStream();
if (ps) {
ps->mInCycle = false;
AudioNodeStream* ns = ps->AsAudioNodeStream();
if (ns) {
ns->Unmute();
}
}
}
if (!mMixer && shouldMix) {
@ -644,16 +572,174 @@ MediaStreamGraphImpl::UpdateStreamOrder()
mMixer = nullptr;
}
mozilla::LinkedList<MediaStream> stack;
for (uint32_t i = 0; i < mOldStreams.Length(); ++i) {
nsRefPtr<MediaStream>& s = mOldStreams[i];
// The algorithm for finding cycles is based on Tim Leslie's iterative
// implementation [1][2] of Pearce's variant [3] of Tarjan's strongly
// connected components (SCC) algorithm. There are variations (a) to
// distinguish whether streams in SCCs of size 1 are in a cycle and (b) to
// re-run the algorithm over SCCs with breaks at DelayNodes.
//
// [1] http://www.timl.id.au/?p=327
// [2] https://github.com/scipy/scipy/blob/e2c502fca/scipy/sparse/csgraph/_traversal.pyx#L582
// [3] http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.102.1707
//
// There are two stacks. One for the depth-first search (DFS),
mozilla::LinkedList<MediaStream> dfsStack;
// and another for streams popped from the DFS stack, but still being
// considered as part of SCCs involving streams on the stack.
mozilla::LinkedList<MediaStream> sccStack;
// An index into mStreams for the next stream found with no unsatisfied
// upstream dependencies.
uint32_t orderedStreamCount = 0;
for (uint32_t i = 0; i < mStreams.Length(); ++i) {
MediaStream* s = mStreams[i];
if (s->IsIntrinsicallyConsumed()) {
MarkConsumed(s);
}
if (!s->mHasBeenOrdered) {
UpdateStreamOrderForStream(&stack, s.forget());
ProcessedMediaStream* ps = s->AsProcessedStream();
if (ps) {
// The dfsStack initially contains a list of all processed streams in
// unchanged order.
dfsStack.insertBack(s);
ps->mCycleMarker = NOT_VISITED;
} else {
// SourceMediaStreams have no inputs and so can be ordered now.
mStreams[orderedStreamCount] = s;
++orderedStreamCount;
}
}
// mNextStackMarker corresponds to "index" in Tarjan's algorithm. It is a
// counter to label mCycleMarker on the next visited stream in the DFS
// uniquely in the set of visited streams that are still being considered.
//
// In this implementation, the counter descends so that the values are
// strictly greater than the values that mCycleMarker takes when the stream
// has been ordered (0 or IN_MUTED_CYCLE).
//
// Each new stream labelled, as the DFS searches upstream, receives a value
// less than those used for all other streams being considered.
uint32_t nextStackMarker = NOT_VISITED - 1;
// Reset list of DelayNodes in cycles stored at the tail of mStreams.
mFirstCycleBreaker = mStreams.Length();
// Rearrange dfsStack order as required to DFS upstream and pop streams
// in processing order to place in mStreams.
while (auto ps = static_cast<ProcessedMediaStream*>(dfsStack.getFirst())) {
const auto& inputs = ps->mInputs;
MOZ_ASSERT(ps->AsProcessedStream());
if (ps->mCycleMarker == NOT_VISITED) {
// Record the position on the visited stack, so that any searches
// finding this stream again know how much of the stack is in the cycle.
ps->mCycleMarker = nextStackMarker;
--nextStackMarker;
// Not-visited input streams should be processed first.
// SourceMediaStreams have already been ordered.
for (uint32_t i = inputs.Length(); i--; ) {
auto input = inputs[i]->mSource->AsProcessedStream();
if (input && input->mCycleMarker == NOT_VISITED) {
input->remove();
dfsStack.insertFront(input);
}
}
continue;
}
// Returning from DFS. Pop from dfsStack.
ps->remove();
// cycleStackMarker keeps track of the highest marker value on any
// upstream stream, if any, found receiving input, directly or indirectly,
// from the visited stack (and so from |ps|, making a cycle). In a
// variation from Tarjan's SCC algorithm, this does not include |ps|
// unless it is part of the cycle.
uint32_t cycleStackMarker = 0;
for (uint32_t i = inputs.Length(); i--; ) {
auto input = inputs[i]->mSource->AsProcessedStream();
if (input) {
cycleStackMarker = std::max(cycleStackMarker, input->mCycleMarker);
}
}
if (cycleStackMarker <= IN_MUTED_CYCLE) {
// All inputs have been ordered and their stack markers have been removed.
// This stream is not part of a cycle. It can be processed next.
ps->mCycleMarker = 0;
mStreams[orderedStreamCount] = ps;
++orderedStreamCount;
continue;
}
// A cycle has been found. Record this stream for ordering when all
// streams in this SCC have been popped from the DFS stack.
sccStack.insertFront(ps);
if (cycleStackMarker > ps->mCycleMarker) {
// Cycles have been found that involve streams that remain on the stack.
// Leave mCycleMarker indicating the most downstream (last) stream on
// the stack known to be part of this SCC. In this way, any searches on
// other paths that find |ps| will know (without having to traverse from
// this stream again) that they are part of this SCC (i.e. part of an
// intersecting cycle).
ps->mCycleMarker = cycleStackMarker;
continue;
}
// |ps| is the root of an SCC involving no other streams on dfsStack, the
// complete SCC has been recorded, and streams in this SCC are part of at
// least one cycle.
MOZ_ASSERT(cycleStackMarker == ps->mCycleMarker);
// If there are DelayNodes in this SCC, then they may break the cycles.
bool haveDelayNode = false;
auto next = static_cast<ProcessedMediaStream*>(sccStack.getFirst());
// Streams in this SCC are identified by mCycleMarker <= cycleStackMarker.
// (There may be other streams later in sccStack from other incompletely
// searched SCCs, involving streams still on dfsStack.)
//
// DelayNodes in cycles must behave differently from those not in cycles,
// so all DelayNodes in the SCC must be identified.
while (next && next->mCycleMarker <= cycleStackMarker) {
auto ns = next->AsAudioNodeStream();
// Get next before perhaps removing from list below.
next = static_cast<ProcessedMediaStream*>(next->getNext());
if (ns && ns->Engine()->AsDelayNodeEngine()) {
haveDelayNode = true;
// DelayNodes break cycles by producing their output in a
// preprocessing phase; they do not need to be ordered before their
// consumers. Order them at the tail of mStreams so that they can be
// handled specially. Do so now, so that DFS ignores them.
ns->remove();
ns->mCycleMarker = 0;
--mFirstCycleBreaker;
mStreams[mFirstCycleBreaker] = ns;
}
}
auto after_scc = next;
while ((next = static_cast<ProcessedMediaStream*>(sccStack.popFirst()))
!= after_scc) {
if (haveDelayNode) {
// Return streams to the DFS stack again (to order and detect cycles
// without delayNodes). Any of these streams that are still inputs
// for streams on the visited stack must be returned to the front of
// the stack to be ordered before their dependents. We know that none
// of these streams need input from streams on the visited stack, so
// they can all be searched and ordered before the current stack head
// is popped.
next->mCycleMarker = NOT_VISITED;
dfsStack.insertFront(next);
} else {
// Streams in cycles without any DelayNodes must be muted, and so do
// not need input and can be ordered now. They must be ordered before
// their consumers so that their muted output is available.
next->mCycleMarker = IN_MUTED_CYCLE;
mStreams[orderedStreamCount] = next;
++orderedStreamCount;
}
}
}
MOZ_ASSERT(orderedStreamCount == mFirstCycleBreaker);
}
void
@ -1167,9 +1253,16 @@ MediaStreamGraphImpl::ProduceDataForStreamsBlockByBlock(uint32_t aStreamIndex,
GraphTime aFrom,
GraphTime aTo)
{
MOZ_ASSERT(aStreamIndex <= mFirstCycleBreaker,
"Cycle breaker is not AudioNodeStream?");
GraphTime t = aFrom;
while (t < aTo) {
GraphTime next = RoundUpToNextAudioBlock(aSampleRate, t);
for (uint32_t i = mFirstCycleBreaker; i < mStreams.Length(); ++i) {
auto ns = static_cast<AudioNodeStream*>(mStreams[i]);
MOZ_ASSERT(ns->AsAudioNodeStream());
ns->ProduceOutputBeforeInput(t);
}
for (uint32_t i = aStreamIndex; i < mStreams.Length(); ++i) {
ProcessedMediaStream* ps = mStreams[i]->AsProcessedStream();
if (ps) {

View File

@ -660,9 +660,6 @@ protected:
*/
bool mNotifiedHasCurrentData;
// Temporary data for ordering streams by dependency graph
bool mHasBeenOrdered;
bool mIsOnOrderingStack;
// True if the stream is being consumed (i.e. has track data being played,
// or is feeding into some stream that is being consumed).
bool mIsConsumed;
@ -1019,7 +1016,7 @@ private:
class ProcessedMediaStream : public MediaStream {
public:
ProcessedMediaStream(DOMMediaStream* aWrapper)
: MediaStream(aWrapper), mAutofinish(false), mInCycle(false)
: MediaStream(aWrapper), mAutofinish(false)
{}
// Control API.
@ -1088,7 +1085,10 @@ public:
*/
virtual void ForwardTrackEnabled(TrackID aOutputID, bool aEnabled) {};
bool InCycle() const { return mInCycle; }
// Only valid after MediaStreamGraphImpl::UpdateStreamOrder() has run.
// A DelayNode is considered to break a cycle and so this will not return
// true for echo loops, only for muted cycles.
bool InMutedCycle() const { return mCycleMarker; }
virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE
{
@ -1110,9 +1110,10 @@ protected:
// The list of all inputs that are currently enabled or waiting to be enabled.
nsTArray<MediaInputPort*> mInputs;
bool mAutofinish;
// True if and only if this stream is in a cycle.
// Updated by MediaStreamGraphImpl::UpdateStreamOrder.
bool mInCycle;
// After UpdateStreamOrder(), mCycleMarker is either 0 or 1 to indicate
// whether this stream is in a muted cycle. During ordering it can contain
// other marker values - see MediaStreamGraphImpl::UpdateStreamOrder().
uint32_t mCycleMarker;
};
/**

View File

@ -232,12 +232,6 @@ public:
* Update "have enough data" flags in aStream.
*/
void UpdateBufferSufficiencyState(SourceMediaStream* aStream);
/*
* If aStream hasn't already been ordered, push it onto aStack and order
* its children.
*/
void UpdateStreamOrderForStream(mozilla::LinkedList<MediaStream>* aStack,
already_AddRefed<MediaStream> aStream);
/**
* Mark aStream and all its inputs (recursively) as consumed.
*/
@ -426,12 +420,18 @@ public:
// mLifecycleState > LIFECYCLE_RUNNING in which case the graph thread
// is not running and this state can be used from the main thread.
nsTArray<nsRefPtr<MediaStream> > mStreams;
/**
* mOldStreams is used as temporary storage for streams when computing the
* order in which we compute them.
* The graph keeps a reference to each stream.
* References are maintained manually to simplify reordering without
* unnecessary thread-safe refcount changes.
*/
nsTArray<nsRefPtr<MediaStream> > mOldStreams;
nsTArray<MediaStream*> mStreams;
/**
* Streams from mFirstCycleBreaker to the end of mStreams produce output
* before they receive input. They correspond to DelayNodes that are in
* cycles.
*/
uint32_t mFirstCycleBreaker;
/**
* The current graph time for the current iteration of the RunThread control
* loop.

View File

@ -68,7 +68,7 @@ public:
void RequestAudioData() MOZ_OVERRIDE
{
if (!GetAudioReader()) {
MSE_DEBUG("%p DecodeAudioFrame called with no audio reader", this);
MSE_DEBUG("%p MSR::RequestAudioData called with no audio reader", this);
MOZ_ASSERT(mPendingDecoders.IsEmpty());
GetCallback()->OnDecodeError();
return;
@ -83,18 +83,21 @@ public:
void OnAudioEOS()
{
MSE_DEBUG("%p OnAudioEOS %d (%p) EOS (readers=%u)",
this, mActiveAudioDecoder, mDecoders[mActiveAudioDecoder].get(), mDecoders.Length());
GetCallback()->OnAudioEOS();
}
void RequestVideoData(bool aSkipToNextKeyframe, int64_t aTimeThreshold) MOZ_OVERRIDE
{
if (!GetVideoReader()) {
MSE_DEBUG("%p DecodeVideoFrame called with no video reader", this);
MSE_DEBUG("%p MSR::RequestVideoData called with no video reader", this);
MOZ_ASSERT(mPendingDecoders.IsEmpty());
GetCallback()->OnDecodeError();
return;
}
mTimeThreshold = aTimeThreshold;
SwitchVideoReaders(SWITCH_OPTIONAL);
GetVideoReader()->RequestVideoData(aSkipToNextKeyframe, aTimeThreshold);
}
@ -102,28 +105,28 @@ public:
{
if (mDropVideoBeforeThreshold) {
if (aSample->mTime < mTimeThreshold) {
MSE_DEBUG("%p MSR::OnVideoDecoded VideoData mTime %lld below mTimeThreshold %lld",
this, aSample->mTime, mTimeThreshold);
delete aSample;
GetVideoReader()->RequestVideoData(false, mTimeThreshold);
} else {
mDropVideoBeforeThreshold = false;
GetCallback()->OnVideoDecoded(aSample);
return;
}
} else {
GetCallback()->OnVideoDecoded(aSample);
mDropVideoBeforeThreshold = false;
}
GetCallback()->OnVideoDecoded(aSample);
}
void OnVideoEOS()
{
// End of stream. See if we can switch to another video decoder.
MSE_DEBUG("%p MSR::DecodeVF %d (%p) returned false (readers=%u)",
MSE_DEBUG("%p MSR::OnVideoEOS %d (%p) (readers=%u)",
this, mActiveVideoDecoder, mDecoders[mActiveVideoDecoder].get(), mDecoders.Length());
if (MaybeSwitchVideoReaders()) {
if (SwitchVideoReaders(SWITCH_FORCED)) {
// Success! Resume decoding with next video decoder.
RequestVideoData(false, mTimeThreshold);
} else {
// End of stream.
MSE_DEBUG("%p MSR::DecodeVF %d (%p) EOS (readers=%u)",
MSE_DEBUG("%p MSR::OnVideoEOS %d (%p) EOS (readers=%u)",
this, mActiveVideoDecoder, mDecoders[mActiveVideoDecoder].get(), mDecoders.Length());
GetCallback()->OnVideoEOS();
}
@ -175,17 +178,16 @@ public:
}
private:
// These are read and written on the decode task queue threads.
int64_t mTimeThreshold;
bool mDropVideoBeforeThreshold;
enum SwitchState {
SWITCHSTATE_SEEKING,
SWITCHSTATE_PLAYING
enum SwitchType {
SWITCH_OPTIONAL,
SWITCH_FORCED
};
bool MaybeSwitchVideoReaders(SwitchState aState = SWITCHSTATE_PLAYING) {
bool SwitchVideoReaders(SwitchType aType) {
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
MOZ_ASSERT(mActiveVideoDecoder != -1);
@ -197,11 +199,12 @@ private:
continue;
}
if (aState == SWITCHSTATE_SEEKING || mTimeThreshold >= mDecoders[i]->GetMediaStartTime()) {
if (aType == SWITCH_FORCED || mTimeThreshold >= mDecoders[i]->GetMediaStartTime()) {
GetVideoReader()->SetIdle();
mActiveVideoDecoder = i;
MSE_DEBUG("%p MSR::DecodeVF switching to %d", this, mActiveVideoDecoder);
mDropVideoBeforeThreshold = true;
MSE_DEBUG("%p MSR::SwitchVideoReaders(%d) switching to %d", this, aType, mActiveVideoDecoder);
return true;
}
}
@ -484,7 +487,7 @@ MediaSourceReader::Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime,
while (!mMediaSource->ActiveSourceBuffers()->AllContainsTime(target)
&& !IsShutdown()) {
mMediaSource->WaitForData();
MaybeSwitchVideoReaders(SWITCHSTATE_SEEKING);
SwitchVideoReaders(SWITCH_FORCED);
}
if (IsShutdown()) {

View File

@ -107,10 +107,6 @@ public:
return nullptr;
}
virtual const DelayNode* AsDelayNode() const {
return nullptr;
}
AudioContext* GetParentObject() const
{
return mContext;

View File

@ -42,7 +42,7 @@ public:
WebAudioUtils::ComputeSmoothingRate(0.02,
mDestination->SampleRate()))
, mMaxDelay(aMaxDelayTicks)
, mLastOutputPosition(-1)
, mHaveProducedBeforeInput(false)
, mLeftOverData(INT32_MIN)
{
}
@ -110,20 +110,17 @@ public:
mBuffer.Write(aInput);
UpdateOutputBlock(aOutput);
// Skip output update if mLastChunks has already been set by
// ProduceBlockBeforeInput() when in a cycle.
if (!mHaveProducedBeforeInput) {
UpdateOutputBlock(aOutput, 0.0);
}
mHaveProducedBeforeInput = false;
mBuffer.NextBlock();
}
void UpdateOutputBlock(AudioChunk* aOutput)
void UpdateOutputBlock(AudioChunk* aOutput, double minDelay)
{
TrackTicks tick = mSource->GetCurrentPosition();
if (tick == mLastOutputPosition) {
return; // mLastChunks is already set on the stream
}
mLastOutputPosition = tick;
bool inCycle = mSource->AsProcessedStream()->InCycle();
double minDelay = inCycle ? static_cast<double>(WEBAUDIO_BLOCK_SIZE) : 0.0;
double maxDelay = mMaxDelay;
double sampleRate = mSource->SampleRate();
ChannelInterpretation channelInterpretation =
@ -139,6 +136,7 @@ public:
// Compute the delay values for the duration of the input AudioChunk
// If this DelayNode is in a cycle, make sure the delay value is at least
// one block.
TrackTicks tick = mSource->GetCurrentPosition();
double computedDelay[WEBAUDIO_BLOCK_SIZE];
for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) {
double delayAtTick = mDelay.GetValueAtTime(tick, counter) * sampleRate;
@ -155,8 +153,9 @@ public:
if (mLeftOverData <= 0) {
aOutput->SetNull(WEBAUDIO_BLOCK_SIZE);
} else {
UpdateOutputBlock(aOutput);
UpdateOutputBlock(aOutput, WEBAUDIO_BLOCK_SIZE);
}
mHaveProducedBeforeInput = true;
}
virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const MOZ_OVERRIDE
@ -180,7 +179,7 @@ public:
AudioParamTimeline mDelay;
DelayBuffer mBuffer;
double mMaxDelay;
TrackTicks mLastOutputPosition;
bool mHaveProducedBeforeInput;
// How much data we have in our buffer which needs to be flushed out when our inputs
// finish.
int32_t mLeftOverData;

View File

@ -30,11 +30,6 @@ public:
return mDelay;
}
virtual const DelayNode* AsDelayNode() const MOZ_OVERRIDE
{
return this;
}
virtual const char* NodeType() const
{
return "DelayNode";

View File

@ -40,6 +40,16 @@ enum {
kAudioTrack = 2
};
// includes everything from dom::MediaSourceEnum (really video sources), plus audio sources
enum MediaSourceType {
Camera = (int) dom::MediaSourceEnum::Camera,
Screen = (int) dom::MediaSourceEnum::Screen,
Application = (int) dom::MediaSourceEnum::Application,
Window, // = (int) dom::MediaSourceEnum::Window, // XXX bug 1038926
//Browser = (int) dom::MediaSourceEnum::Browser, // proposed in WG, unclear if it's useful
Microphone
};
class MediaEngine
{
public:
@ -55,12 +65,12 @@ public:
/* Populate an array of video sources in the nsTArray. Also include devices
* that are currently unavailable. */
virtual void EnumerateVideoDevices(dom::MediaSourceEnum,
virtual void EnumerateVideoDevices(MediaSourceType,
nsTArray<nsRefPtr<MediaEngineVideoSource> >*) = 0;
/* Populate an array of audio sources in the nsTArray. Also include devices
* that are currently unavailable. */
virtual void EnumerateAudioDevices(dom::MediaSourceEnum,
virtual void EnumerateAudioDevices(MediaSourceType,
nsTArray<nsRefPtr<MediaEngineAudioSource> >*) = 0;
protected:
@ -119,6 +129,9 @@ public:
*/
virtual bool IsFake() = 0;
/* Returns the type of media source (camera, microphone, screen, window, etc) */
virtual const MediaSourceType GetMediaSource() = 0;
/* Return false if device is currently allocated or started */
bool IsAvailable() {
if (mState == kAllocated || mState == kStarted) {
@ -185,8 +198,8 @@ class MediaEngineVideoSource : public MediaEngineSource
public:
virtual ~MediaEngineVideoSource() {}
virtual const dom::MediaSourceEnum GetMediaSource() {
return dom::MediaSourceEnum::Camera;
virtual const MediaSourceType GetMediaSource() {
return MediaSourceType::Camera;
}
/* This call reserves but does not start the device. */
virtual nsresult Allocate(const VideoTrackConstraintsN &aConstraints,

View File

@ -478,12 +478,12 @@ MediaEngineDefaultAudioSource::Notify(nsITimer* aTimer)
}
void
MediaEngineDefault::EnumerateVideoDevices(dom::MediaSourceEnum aMediaSource,
MediaEngineDefault::EnumerateVideoDevices(MediaSourceType aMediaSource,
nsTArray<nsRefPtr<MediaEngineVideoSource> >* aVSources) {
MutexAutoLock lock(mMutex);
// only supports camera sources (for now). See Bug 1038241
if (aMediaSource != dom::MediaSourceEnum::Camera) {
if (aMediaSource != MediaSourceType::Camera) {
return;
}
@ -498,7 +498,7 @@ MediaEngineDefault::EnumerateVideoDevices(dom::MediaSourceEnum aMediaSource,
}
void
MediaEngineDefault::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
MediaEngineDefault::EnumerateAudioDevices(MediaSourceType aMediaSource,
nsTArray<nsRefPtr<MediaEngineAudioSource> >* aASources) {
MutexAutoLock lock(mMutex);
int32_t len = mASources.Length();

View File

@ -61,6 +61,10 @@ public:
return true;
}
virtual const MediaSourceType GetMediaSource() {
return MediaSourceType::Camera;
}
NS_DECL_THREADSAFE_ISUPPORTS
NS_DECL_NSITIMERCALLBACK
@ -117,6 +121,10 @@ public:
return true;
}
virtual const MediaSourceType GetMediaSource() {
return MediaSourceType::Microphone;
}
NS_DECL_THREADSAFE_ISUPPORTS
NS_DECL_NSITIMERCALLBACK
@ -138,9 +146,9 @@ public:
: mMutex("mozilla::MediaEngineDefault")
{}
virtual void EnumerateVideoDevices(dom::MediaSourceEnum,
virtual void EnumerateVideoDevices(MediaSourceType,
nsTArray<nsRefPtr<MediaEngineVideoSource> >*);
virtual void EnumerateAudioDevices(dom::MediaSourceEnum,
virtual void EnumerateAudioDevices(MediaSourceType,
nsTArray<nsRefPtr<MediaEngineAudioSource> >*);
private:

View File

@ -47,6 +47,7 @@ namespace mozilla {
MediaEngineWebRTC::MediaEngineWebRTC(MediaEnginePrefs &aPrefs)
: mMutex("mozilla::MediaEngineWebRTC")
, mScreenEngine(nullptr)
, mWinEngine(nullptr)
, mAppEngine(nullptr)
, mVideoEngine(nullptr)
, mVoiceEngine(nullptr)
@ -72,14 +73,14 @@ MediaEngineWebRTC::MediaEngineWebRTC(MediaEnginePrefs &aPrefs)
}
void
MediaEngineWebRTC::EnumerateVideoDevices(dom::MediaSourceEnum aMediaSource,
MediaEngineWebRTC::EnumerateVideoDevices(MediaSourceType aMediaSource,
nsTArray<nsRefPtr<MediaEngineVideoSource> >* aVSources)
{
// We spawn threads to handle gUM runnables, so we must protect the member vars
MutexAutoLock lock(mMutex);
#ifdef MOZ_B2G_CAMERA
if (aMediaSource != dom::MediaSourceEnum::Camera) {
if (aMediaSource != MediaSourceType::Camera) {
// only supports camera sources
return;
}
@ -137,7 +138,18 @@ MediaEngineWebRTC::EnumerateVideoDevices(dom::MediaSourceEnum aMediaSource,
#endif
switch (aMediaSource) {
case dom::MediaSourceEnum::Application:
case MediaSourceType::Window:
mWinEngineConfig.Set<webrtc::CaptureDeviceInfo>(
new webrtc::CaptureDeviceInfo(webrtc::CaptureDeviceType::Window));
if (!mWinEngine) {
if (!(mWinEngine = webrtc::VideoEngine::Create(mWinEngineConfig))) {
return;
}
}
videoEngine = mWinEngine;
videoEngineInit = &mWinEngineInit;
break;
case MediaSourceType::Application:
mAppEngineConfig.Set<webrtc::CaptureDeviceInfo>(
new webrtc::CaptureDeviceInfo(webrtc::CaptureDeviceType::Application));
if (!mAppEngine) {
@ -148,7 +160,7 @@ MediaEngineWebRTC::EnumerateVideoDevices(dom::MediaSourceEnum aMediaSource,
videoEngine = mAppEngine;
videoEngineInit = &mAppEngineInit;
break;
case dom::MediaSourceEnum::Screen:
case MediaSourceType::Screen:
mScreenEngineConfig.Set<webrtc::CaptureDeviceInfo>(
new webrtc::CaptureDeviceInfo(webrtc::CaptureDeviceType::Screen));
if (!mScreenEngine) {
@ -159,7 +171,7 @@ MediaEngineWebRTC::EnumerateVideoDevices(dom::MediaSourceEnum aMediaSource,
videoEngine = mScreenEngine;
videoEngineInit = &mScreenEngineInit;
break;
case dom::MediaSourceEnum::Camera:
case MediaSourceType::Camera:
// fall through
default:
if (!mVideoEngine) {
@ -259,7 +271,7 @@ MediaEngineWebRTC::EnumerateVideoDevices(dom::MediaSourceEnum aMediaSource,
}
void
MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
MediaEngineWebRTC::EnumerateAudioDevices(MediaSourceType aMediaSource,
nsTArray<nsRefPtr<MediaEngineAudioSource> >* aASources)
{
ScopedCustomReleasePtr<webrtc::VoEBase> ptrVoEBase;

View File

@ -96,7 +96,7 @@ class MediaEngineWebRTCVideoSource : public MediaEngineVideoSource
public:
#ifdef MOZ_B2G_CAMERA
MediaEngineWebRTCVideoSource(int aIndex,
dom::MediaSourceEnum aMediaSource = dom::MediaSourceEnum::Camera)
MediaSourceType aMediaSource = MediaSourceType::Camera)
: mCameraControl(nullptr)
, mCallbackMonitor("WebRTCCamera.CallbackMonitor")
, mRotation(0)
@ -127,7 +127,7 @@ public:
virtual bool IsTextureSupported() { return false; }
MediaEngineWebRTCVideoSource(webrtc::VideoEngine* aVideoEnginePtr, int aIndex,
dom::MediaSourceEnum aMediaSource = dom::MediaSourceEnum::Camera)
MediaSourceType aMediaSource = MediaSourceType::Camera)
: mVideoEngine(aVideoEnginePtr)
, mCaptureIndex(aIndex)
, mFps(-1)
@ -169,7 +169,7 @@ public:
return false;
}
virtual const dom::MediaSourceEnum GetMediaSource() {
virtual const MediaSourceType GetMediaSource() {
return mMediaSource;
}
@ -247,7 +247,7 @@ private:
int mCaptureIndex;
int mFps; // Track rate (30 fps by default)
int mMinFps; // Min rate we want to accept
dom::MediaSourceEnum mMediaSource; // source of media (camera | application | screen)
MediaSourceType mMediaSource; // source of media (camera | application | screen)
// mMonitor protects mImage access/changes, and transitions of mState
// from kStarted to kStopped (which are combined with EndTrack() and
@ -327,6 +327,10 @@ public:
return false;
}
virtual const MediaSourceType GetMediaSource() {
return MediaSourceType::Microphone;
}
// VoEMediaProcess.
void Process(int channel, webrtc::ProcessingTypes type,
int16_t audio10ms[], int length,
@ -390,9 +394,9 @@ public:
// before invoking Shutdown on this class.
void Shutdown();
virtual void EnumerateVideoDevices(dom::MediaSourceEnum,
virtual void EnumerateVideoDevices(MediaSourceType,
nsTArray<nsRefPtr<MediaEngineVideoSource> >*);
virtual void EnumerateAudioDevices(dom::MediaSourceEnum,
virtual void EnumerateAudioDevices(MediaSourceType,
nsTArray<nsRefPtr<MediaEngineAudioSource> >*);
private:
~MediaEngineWebRTC() {
@ -410,18 +414,21 @@ private:
// protected with mMutex:
webrtc::VideoEngine* mScreenEngine;
webrtc::VideoEngine* mWinEngine;
webrtc::VideoEngine* mAppEngine;
webrtc::VideoEngine* mVideoEngine;
webrtc::VoiceEngine* mVoiceEngine;
// specialized configurations
webrtc::Config mAppEngineConfig;
webrtc::Config mWinEngineConfig;
webrtc::Config mScreenEngineConfig;
// Need this to avoid unneccesary WebRTC calls while enumerating.
bool mVideoEngineInit;
bool mAudioEngineInit;
bool mScreenEngineInit;
bool mWinEngineInit;
bool mAppEngineInit;
bool mHasTabVideoSource;

View File

@ -46,6 +46,8 @@ public:
// treat MediaSource special because it's always required
mRequired.mMediaSource = mMediaSource;
// we guarantee (int) equivalence from MediaSourceEnum ->MediaSourceType
// (but not the other way)
if (mMediaSource != dom::MediaSourceEnum::Camera && mAdvanced.WasPassed()) {
// iterate through advanced, forcing mediaSource to match "root"
auto& array = mAdvanced.Value();

View File

@ -37,34 +37,30 @@
oldComplete();
}
};
let oldLog = TestRunner.log;
TestRunner.log = function(msg) {
TestRunner.structuredLogger._dumpMessage = function(msg) {
sendAsyncMessage("test:SpeechSynthesis:ipcTestMessage", { msg: msg });
}
}
let regex = /^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL|TEST-DEBUG-INFO) \| ([^\|]+) \|(.*)/;
let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
function validStructuredMessage(message) {
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
}
function onTestMessage(data) {
let message = SpecialPowers.wrap(data).json.msg;
let match = regex.exec(message);
if (match) {
let state = match[1];
let details = match[2] + " | " + match[3];
let message = SpecialPowers.wrap(data).data.msg;
switch (state) {
case "TEST-PASS":
case "TEST-KNOWN-FAIL":
ok(true, details);
if (validStructuredMessage(message)) {
if (message.test === undefined || message.message === undefined) {
return;
}
let details = message.test + " | " + message.message;
switch(message.action) {
case "test_status":
case "test_end":
ok(message.expected === undefined, message.test, message.message);
break;
case "TEST-UNEXPECTED-FAIL":
case "TEST-UNEXPECTED-PASS":
ok(false, details);
break;
case "TEST-DEBUG-INFO":
default:
info(details);
}

View File

@ -23,6 +23,13 @@
sendMessage("KO: " + msg);
}
function is(a, b, msg) {
if (a == b)
sendMessage("OK: " + a + " == " + b + " - " + msg);
else
sendMessage("KO: " + a + " != " + b + " - " + msg);
}
function testXHR(file, data_head, mapped, cb) {
var xhr = new XMLHttpRequest();
xhr.open('GET', file);
@ -41,8 +48,8 @@
var data = xhr.response;
ok(data, "Data is non-null");
var str = String.fromCharCode.apply(null, Uint8Array(data));
ok(str.length == data_head.length + gPaddingSize, "Data size is correct");
ok(str.slice(0, data_head.length) == data_head, "Data head is correct");
is(str.length, data_head.length + gPaddingSize, "Data size is correct");
is(str.slice(0, data_head.length), data_head, "Data head is correct");
ok(str.slice(data_head.length) == gPadding, "Data padding is correct");
cb();
} else {

View File

@ -91,8 +91,14 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=945152
};
yield undefined;
// Launch app.
launchApp(app, continueTest);
// Launch app non-OOP.
info("Launch app with non-OOP");
launchApp(app, continueTest, false);
yield undefined;
// Launch app OOP.
info("Launch app with OOP");
launchApp(app, continueTest, true);
yield undefined;
// Uninstall app.
@ -116,9 +122,10 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=945152
xhr.send();
}
function launchApp(app, cb) {
function launchApp(app, cb, oop) {
// Set up the app.
var ifr = document.createElement('iframe');
ifr.setAttribute('remote', oop ? 'true' : 'false');
ifr.setAttribute('mozbrowser', 'true');
ifr.setAttribute('mozapp', app.manifestURL);
ifr.setAttribute('src', app.origin + app.manifest.launch_path);

View File

@ -36,34 +36,30 @@
}
};
let oldLog = TestRunner.log;
TestRunner.log = function(msg) {
TestRunner.structuredLogger._dumpMessage = function(msg) {
sendAsyncMessage("test:DeviceStorage:ipcTestMessage", { msg: msg });
}
}
let regex = /^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL|TEST-DEBUG-INFO) \| ([^\|]+) \|(.*)/;
let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
function validStructuredMessage(message) {
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
}
function onTestMessage(data) {
let message = SpecialPowers.wrap(data).json.msg;
let match = regex.exec(message);
if (match) {
let state = match[1];
let details = match[2] + " | " + match[3];
let message = SpecialPowers.wrap(data).data.msg;
switch (state) {
case "TEST-PASS":
case "TEST-KNOWN-FAIL":
ok(true, details);
if (validStructuredMessage(message)) {
if (message.test === undefined || message.message === undefined) {
return;
}
switch (message.action) {
case "test_status":
case "test_end":
ok(message.expected === undefined, message.test, message.message);
break;
case "TEST-UNEXPECTED-FAIL":
case "TEST-UNEXPECTED-PASS":
ok(false, details);
break;
case "TEST-DEBUG-INFO":
default:
let details = message.test + " | " + message.message;
info(details);
}
}

View File

@ -25,6 +25,7 @@
#include "mozilla/EventDispatcher.h"
#include "mozilla/EventListenerManager.h"
#include "mozilla/InternalMutationEvent.h"
#include "mozilla/ipc/MessageChannel.h"
#include "mozilla/MiscEvents.h"
#include "mozilla/MouseEvents.h"
#include "mozilla/TextEvents.h"
@ -400,6 +401,10 @@ EventDispatcher::Dispatch(nsISupports* aTarget,
NS_ERROR_DOM_INVALID_STATE_ERR);
NS_ASSERTION(!aTargets || !aEvent->message, "Wrong parameters!");
#ifdef NIGHTLY_BUILD
MOZ_RELEASE_ASSERT(!mozilla::ipc::ProcessingUrgentMessages());
#endif
// If we're dispatching an already created DOMEvent object, make
// sure it is initialized!
// If aTargets is non-null, the event isn't going to be dispatched.

View File

@ -427,7 +427,7 @@ function testPreventDefault() {
[{ name: "touchstart", prevent: false },
{ name: "touchmove", prevent: false },
{ name: "touchmove", prevent: false },
{ name: "touchend", prevent: false, doPrevent: true }]
{ name: "touchend", prevent: true, doPrevent: true }]
];
var dotest = function(aTest) {

View File

@ -42,11 +42,17 @@ var W3CTest = {
/**
* Prefixes for the error logging. Indexed first by int(todo) and second by
* int(result).
* int(result). Also contains the test's status, and expected status.
*/
"prefixes": [
["TEST-UNEXPECTED-FAIL", "TEST-PASS"],
["TEST-KNOWN-FAIL", "TEST-UNEXPECTED-PASS"]
[
{status: 'FAIL', expected: 'PASS', message: "TEST-UNEXPECTED-FAIL"},
{status: 'PASS', expected: 'PASS', message: "TEST-PASS"}
],
[
{status: 'FAIL', expected: 'FAIL', message: "TEST-KNOWN-FAIL"},
{status: 'PASS', expected: 'FAIL', message: "TEST-UNEXPECTED-PASS"}
]
],
/**
@ -133,14 +139,21 @@ var W3CTest = {
*/
"_log": function(test) {
var url = this.getURL();
var msg = this.prefixes[+test.todo][+test.result] + " | ";
if (url) {
msg += url;
}
msg += " | " + this.formatTestMessage(test);
var message = this.formatTestMessage(test);
var result = this.prefixes[+test.todo][+test.result];
if (this.runner) {
this.runner[(test.result === !test.todo) ? "log" : "error"](msg);
this.runner.structuredLogger.testStatus(url,
test.name,
result.status,
result.expected,
message);
} else {
var msg = result.message + " | ";
if (url) {
msg += url;
}
msg += " | " + this.formatTestMessage(test);
dump(msg + "\n");
}
},

View File

@ -49,44 +49,33 @@
}
};
function sendTestMessage(msg) {
TestRunner.structuredLogger._dumpMessage = function(msg) {
sendAsyncMessage("test:indexedDB:ipcTestMessage", { msg: msg });
}
TestRunner.log = sendTestMessage;
TestRunner.error = sendTestMessage;
}
let regexString =
"^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL" +
"|TEST-DEBUG-INFO|TEST-INFO) \\| ([^\\|]+) \\|(.*)";
let regex = new RegExp(regexString);
let seenTestMessage = false;
let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
function validStructuredMessage(message) {
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
}
function onTestMessage(data) {
seenTestMessage = true;
let message = SpecialPowers.wrap(data).data.msg;
let match = regex.exec(message);
if (match) {
let state = match[1];
let details = match[2] + " | " + match[3];
switch (state) {
case "TEST-PASS":
case "TEST-KNOWN-FAIL":
ok(true, details);
if (validStructuredMessage(message)) {
if (message.test === undefined || message.message === undefined) {
return;
}
switch (message.action) {
case "test_status":
case "test_end":
ok(message.expected === undefined, message.test, message.message);
break;
case "TEST-UNEXPECTED-FAIL":
case "TEST-UNEXPECTED-PASS":
ok(false, details);
break;
case "TEST-INFO":
case "TEST-DEBUG-INFO":
default:
let details = message.test + " | " + message.message;
info(details);
}
}

View File

@ -30,7 +30,7 @@ inputmethod_setup(function() {
function appFrameScript() {
let input = content.document.getElementById('test-input');
input.onkeydown = input.onkeypress = input.onkeyup = function(event) {
dump('key event was fired in file_test_backspace_event.html.');
dump('key event was fired in file_test_backspace_event.html.\n');
sendAsyncMessage('test:KeyBoard:keyEvent', {'type':event.type});
};
}

View File

@ -729,6 +729,7 @@ TabChild::TabChild(nsIContentChild* aManager, const TabContext& aContext, uint32
, mOrientation(eScreenOrientation_PortraitPrimary)
, mUpdateHitRegion(false)
, mPendingTouchPreventedResponse(false)
, mTouchEndIsClick(Unknown)
, mIgnoreKeyPressEvent(false)
, mActiveElementManager(new ActiveElementManager())
, mHasValidInnerSize(false)
@ -1804,6 +1805,10 @@ TabChild::RecvHandleSingleTap(const CSSPoint& aPoint, const ScrollableLayerGuid&
return true;
}
if (mTouchEndIsClick == IsNotClick) {
return true;
}
LayoutDevicePoint currentPoint = APZCCallbackHelper::ApplyCallbackTransform(aPoint, aGuid) * mWidget->GetDefaultScale();;
MessageLoop::current()->PostDelayedTask(
@ -1894,7 +1899,7 @@ TabChild::RecvNotifyAPZStateChange(const ViewID& aViewId,
}
case APZStateChange::EndTouch:
{
mActiveElementManager->HandleTouchEnd(aArg);
mTouchEndIsClick = (aArg ? IsClick : IsNotClick);
break;
}
default:
@ -2113,6 +2118,7 @@ TabChild::RecvRealTouchEvent(const WidgetTouchEvent& aEvent,
localEvent.mFlags.mMultipleActionsPrevented;
switch (aEvent.message) {
case NS_TOUCH_START: {
mTouchEndIsClick = Unknown;
if (mPendingTouchPreventedResponse) {
// We can enter here if we get two TOUCH_STARTs in a row and didn't
// respond to the first one. Respond to it now.
@ -2128,9 +2134,17 @@ TabChild::RecvRealTouchEvent(const WidgetTouchEvent& aEvent,
break;
}
case NS_TOUCH_MOVE:
case NS_TOUCH_END:
case NS_TOUCH_CANCEL: {
if (isTouchPrevented && mTouchEndIsClick == IsClick) {
mTouchEndIsClick = IsNotClick;
}
// fall through
case NS_TOUCH_CANCEL:
if (mTouchEndIsClick != Unknown) {
mActiveElementManager->HandleTouchEnd(mTouchEndIsClick == IsClick);
}
// fall through
case NS_TOUCH_MOVE: {
if (mPendingTouchPreventedResponse) {
MOZ_ASSERT(aGuid == mPendingTouchPreventedGuid);
SendContentReceivedTouch(mPendingTouchPreventedGuid, isTouchPrevented);

View File

@ -591,6 +591,13 @@ private:
ScrollableLayerGuid mPendingTouchPreventedGuid;
void FireSingleTapEvent(LayoutDevicePoint aPoint);
enum ClickState {
Unknown,
IsClick,
IsNotClick
};
ClickState mTouchEndIsClick;
bool mIgnoreKeyPressEvent;
nsRefPtr<ActiveElementManager> mActiveElementManager;
bool mHasValidInnerSize;

View File

@ -311,7 +311,6 @@ VideoDevice::VideoDevice(MediaEngineVideoSource* aSource)
mFacingMode = dom::VideoFacingModeEnum::User;
}
// dom::MediaSourceEnum::Camera;
mMediaSource = aSource->GetMediaSource();
}
@ -367,9 +366,14 @@ MediaDevice::GetFacingMode(nsAString& aFacingMode)
NS_IMETHODIMP
MediaDevice::GetMediaSource(nsAString& aMediaSource)
{
aMediaSource.Assign(NS_ConvertUTF8toUTF16(
dom::MediaSourceEnumValues::strings[uint32_t(mMediaSource)].value));
if (mMediaSource == MediaSourceType::Microphone) {
aMediaSource.Assign(NS_LITERAL_STRING("microphone"));
} else if (mMediaSource == MediaSourceType::Window) { // this will go away
aMediaSource.Assign(NS_LITERAL_STRING("window"));
} else { // all the rest are shared
aMediaSource.Assign(NS_ConvertUTF8toUTF16(
dom::MediaSourceEnumValues::strings[uint32_t(mMediaSource)].value));
}
return NS_OK;
}
@ -759,7 +763,7 @@ template<class SourceType, class ConstraintsType>
static SourceSet *
GetSources(MediaEngine *engine,
ConstraintsType &aConstraints,
void (MediaEngine::* aEnumerate)(dom::MediaSourceEnum, nsTArray<nsRefPtr<SourceType> >*),
void (MediaEngine::* aEnumerate)(MediaSourceType, nsTArray<nsRefPtr<SourceType> >*),
const char* media_device_name = nullptr)
{
ScopedDeletePtr<SourceSet> result(new SourceSet);
@ -770,7 +774,8 @@ static SourceSet *
SourceSet candidateSet;
{
nsTArray<nsRefPtr<SourceType> > sources;
(engine->*aEnumerate)(aConstraints.mMediaSource, &sources);
// all MediaSourceEnums are contained in MediaSourceType
(engine->*aEnumerate)((MediaSourceType)((int)aConstraints.mMediaSource), &sources);
/**
* We're allowing multiple tabs to access the same camera for parity
* with Chrome. See bug 811757 for some of the issues surrounding
@ -1914,7 +1919,8 @@ WindowsHashToArrayFunc (const uint64_t& aId,
for (uint32_t i = 0; i < length; ++i) {
nsRefPtr<GetUserMediaCallbackMediaStreamListener> listener =
aData->ElementAt(i);
if (listener->CapturingVideo() || listener->CapturingAudio()) {
if (listener->CapturingVideo() || listener->CapturingAudio() ||
listener->CapturingScreen() || listener->CapturingWindow()) {
capturing = true;
break;
}
@ -1945,24 +1951,29 @@ MediaManager::GetActiveMediaCaptureWindows(nsISupportsArray **aArray)
NS_IMETHODIMP
MediaManager::MediaCaptureWindowState(nsIDOMWindow* aWindow, bool* aVideo,
bool* aAudio)
bool* aAudio, bool *aScreenShare,
bool* aWindowShare)
{
NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
*aVideo = false;
*aAudio = false;
*aScreenShare = false;
*aWindowShare = false;
nsresult rv = MediaCaptureWindowStateInternal(aWindow, aVideo, aAudio);
nsresult rv = MediaCaptureWindowStateInternal(aWindow, aVideo, aAudio, aScreenShare, aWindowShare);
#ifdef DEBUG
nsCOMPtr<nsPIDOMWindow> piWin = do_QueryInterface(aWindow);
LOG(("%s: window %lld capturing %s %s", __FUNCTION__, piWin ? piWin->WindowID() : -1,
*aVideo ? "video" : "", *aAudio ? "audio" : ""));
LOG(("%s: window %lld capturing %s %s %s %s", __FUNCTION__, piWin ? piWin->WindowID() : -1,
*aVideo ? "video" : "", *aAudio ? "audio" : "",
*aScreenShare ? "screenshare" : "", *aWindowShare ? "windowshare" : ""));
#endif
return rv;
}
nsresult
MediaManager::MediaCaptureWindowStateInternal(nsIDOMWindow* aWindow, bool* aVideo,
bool* aAudio)
bool* aAudio, bool *aScreenShare,
bool* aWindowShare)
{
// We need to return the union of all streams in all innerwindows that
// correspond to that outerwindow.
@ -1991,8 +2002,11 @@ MediaManager::MediaCaptureWindowStateInternal(nsIDOMWindow* aWindow, bool* aVide
if (listener->CapturingAudio()) {
*aAudio = true;
}
if (*aAudio && *aVideo) {
return NS_OK; // no need to continue iterating
if (listener->CapturingScreen()) {
*aScreenShare = true;
}
if (listener->CapturingWindow()) {
*aWindowShare = true;
}
}
}
@ -2008,10 +2022,7 @@ MediaManager::MediaCaptureWindowStateInternal(nsIDOMWindow* aWindow, bool* aVide
docShell->GetChildAt(i, getter_AddRefs(item));
nsCOMPtr<nsPIDOMWindow> win = item ? item->GetWindow() : nullptr;
MediaCaptureWindowStateInternal(win, aVideo, aAudio);
if (*aAudio && *aVideo) {
return NS_OK; // no need to continue iterating
}
MediaCaptureWindowStateInternal(win, aVideo, aAudio, aScreenShare, aWindowShare);
}
}
}

View File

@ -105,6 +105,7 @@ public:
{
NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
return mVideoSource && !mStopped &&
mVideoSource->GetMediaSource() == MediaSourceType::Camera &&
(!mVideoSource->IsFake() ||
Preferences::GetBool("media.navigator.permission.fake"));
}
@ -115,6 +116,18 @@ public:
(!mAudioSource->IsFake() ||
Preferences::GetBool("media.navigator.permission.fake"));
}
bool CapturingScreen()
{
NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
return mVideoSource && !mStopped &&
mVideoSource->GetMediaSource() == MediaSourceType::Screen;
}
bool CapturingWindow()
{
NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
return mVideoSource && !mStopped &&
mVideoSource->GetMediaSource() == MediaSourceType::Window;
}
void SetStopped()
{
@ -486,7 +499,7 @@ protected:
nsString mID;
bool mHasFacingMode;
dom::VideoFacingModeEnum mFacingMode;
dom::MediaSourceEnum mMediaSource;
MediaSourceType mMediaSource;
nsRefPtr<MediaEngineSource> mSource;
};
@ -579,7 +592,8 @@ private:
~MediaManager() {}
nsresult MediaCaptureWindowStateInternal(nsIDOMWindow* aWindow, bool* aVideo,
bool* aAudio);
bool* aAudio, bool *aScreenShare,
bool* aWindowShare);
void StopMediaStreams();

View File

@ -12,12 +12,13 @@ interface nsIDOMWindow;
#define MEDIAMANAGERSERVICE_CONTRACTID "@mozilla.org/mediaManagerService;1"
%}
[scriptable, builtinclass, uuid(2efff6ab-0e3e-4cc4-8f9b-4aaca59a1140)]
[scriptable, builtinclass, uuid(f431b523-4536-4ba7-a2c1-7e1bf670d32a)]
interface nsIMediaManagerService : nsISupports
{
/* return a array of inner windows that have active captures */
readonly attribute nsISupportsArray activeMediaCaptureWindows;
/* Get the capture state for the given window and all descendant windows (iframes, etc) */
void mediaCaptureWindowState(in nsIDOMWindow aWindow, out boolean aVideo, out boolean aAudio);
void mediaCaptureWindowState(in nsIDOMWindow aWindow, out boolean aVideo, out boolean aAudio,
[optional] out boolean aScreenShare, [optional] out boolean aWindowShare);
};

View File

@ -24,8 +24,8 @@
SpecialPowers.prototype.unregisterProcessCrashObservers = function() { };
content.wrappedJSObject.RunSet.reloadAndRunAll({
preventDefault: function() { },
__exposedProps__: { preventDefault: 'r' }
preventDefault: function() { },
__exposedProps__: { preventDefault: 'r' }
});
}
@ -44,37 +44,31 @@
oldComplete();
}
};
let oldLog = TestRunner.log;
TestRunner.log = function(msg) {
TestRunner.structuredLogger._dumpMessage = function(msg) {
sendAsyncMessage("test:PeerConnection:ipcTestMessage", { msg: msg });
};
TestRunner.error = function(msg) {
sendAsyncMessage("test:PeerConnection:ipcTestMessage", { msg: msg });
};
}
}
let regex = /^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL|TEST-DEBUG-INFO) \| ([^\|]+) \|(.*)/;
let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
function validStructuredMessage(message) {
return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
}
function onTestMessage(data) {
let message = SpecialPowers.wrap(data).json.msg;
let match = regex.exec(message);
if (match) {
let state = match[1];
let details = match[2] + " | " + match[3];
let message = SpecialPowers.wrap(data).data.msg;
switch (state) {
case "TEST-PASS":
case "TEST-KNOWN-FAIL":
ok(true, details);
if (validStructuredMessage(message)) {
if (message.test === undefined || message.message === undefined) {
return;
}
switch (message.action) {
case "test_status":
case "test_end":
ok(message.expected === undefined, message.test, message.message);
break;
case "TEST-UNEXPECTED-FAIL":
case "TEST-UNEXPECTED-PASS":
ok(false, details);
break;
case "TEST-DEBUG-INFO":
default:
let details = message.test + " | " + message.message;
info(details);
}
}

View File

@ -25,6 +25,10 @@ skip-if = (toolkit == 'gonk' && debug) # b2g emulator seems to be too slow (Bug
skip-if = (toolkit == 'gonk' && debug) # debug-only failure
[test_getUserMedia_basicVideo.html]
skip-if = (toolkit == 'gonk' && debug) # debug-only failure
[test_getUserMedia_basicScreenshare.html]
skip-if = toolkit == 'gonk' || toolkit == 'android' # no screenshare on b2g/android
[test_getUserMedia_basicWindowshare.html]
skip-if = toolkit == 'gonk' || toolkit == 'android' # no windowshare on b2g/android
[test_getUserMedia_basicVideoAudio.html]
skip-if = (toolkit == 'gonk' && debug) # debug-only failure, turned an intermittent (bug 962579) into a permanant orange
[test_getUserMedia_constraints.html]
@ -55,6 +59,10 @@ skip-if = toolkit == 'gonk' # b2g(Bug 960442, video support for WebRTC is disabl
skip-if = toolkit == 'gonk' # b2g(Bug 960442, video support for WebRTC is disabled on b2g)
[test_peerConnection_basicVideo.html]
skip-if = toolkit == 'gonk' # b2g(Bug 960442, video support for WebRTC is disabled on b2g)
[test_peerConnection_basicScreenshare.html]
skip-if = toolkit == 'gonk' || toolkit == 'android' # no screenshare on b2g/android
[test_peerConnection_basicWindowshare.html]
skip-if = toolkit == 'gonk' || toolkit == 'android' # no windowshare on b2g/android
[test_peerConnection_bug822674.html]
[test_peerConnection_bug825703.html]
[test_peerConnection_bug827843.html]

View File

@ -0,0 +1,55 @@
<!DOCTYPE HTML>
<html>
<!--
https://bugzilla.mozilla.org/show_bug.cgi?id=983504
-->
<head>
<meta charset="utf-8">
<title>mozGetUserMedia Basic Screenshare Test</title>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<script type="application/javascript" src="head.js"></script>
<script type="application/javascript" src="mediaStreamPlayback.js"></script>
</head>
<body>
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=983504">mozGetUserMedia Basic Screenshare Test</a>
<p id="display"></p>
<div id="content" style="display: none">
<video id="testVideo"></video>
</div>
<pre id="test">
<script type="application/javascript">
/**
* Run a test to verify that we can complete a start and stop media playback
* cycle for an screenshare LocalMediaStream on a video HTMLMediaElement.
*/
runTest(function () {
var testVideo = document.getElementById('testVideo');
var constraints = {
video: {
mandatory:{
chromeMediaSource:'screen',
maxWidth:screen.availWidth,
maxHeight:screen.availHeight
},
optional:[]
}
};
getUserMedia(constraints, function (aStream) {
checkMediaStreamTracks(constraints, aStream);
var playback = new LocalMediaStreamPlayback(testVideo, aStream);
playback.playMediaWithStreamStop(false, function () {
aStream.stop();
SimpleTest.finish();
}, generateErrorCallback());
}, generateErrorCallback());
});
</script>
</pre>
</body>
</html>

View File

@ -0,0 +1,55 @@
<!DOCTYPE HTML>
<html>
<!--
https://bugzilla.mozilla.org/show_bug.cgi?id=983504
-->
<head>
<meta charset="utf-8">
<title>mozGetUserMedia Basic Windowshare Test</title>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<script type="application/javascript" src="head.js"></script>
<script type="application/javascript" src="mediaStreamPlayback.js"></script>
</head>
<body>
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1038926">mozGetUserMedia Basic Windowshare Test</a>
<p id="display"></p>
<div id="content" style="display: none">
<video id="testVideo"></video>
</div>
<pre id="test">
<script type="application/javascript">
/**
* Run a test to verify that we can complete a start and stop media playback
* cycle for an screenshare LocalMediaStream on a video HTMLMediaElement.
*/
runTest(function () {
var testVideo = document.getElementById('testVideo');
var constraints = {
video: {
mandatory:{
chromeMediaSource:'window',
maxWidth:screen.availWidth,
maxHeight:screen.availHeight
},
optional:[]
}
};
getUserMedia(constraints, function (aStream) {
checkMediaStreamTracks(constraints, aStream);
var playback = new LocalMediaStreamPlayback(testVideo, aStream);
playback.playMediaWithStreamStop(false, function () {
aStream.stop();
SimpleTest.finish();
}, generateErrorCallback());
}, generateErrorCallback());
});
</script>
</pre>
</body>
</html>

View File

@ -0,0 +1,39 @@
<!DOCTYPE HTML>
<html>
<head>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<script type="application/javascript" src="head.js"></script>
<script type="application/javascript" src="mediaStreamPlayback.js"></script>
<script type="application/javascript" src="pc.js"></script>
<script type="application/javascript" src="templates.js"></script>
<script type="application/javascript" src="turnConfig.js"></script>
</head>
<body>
<pre id="test">
<script type="application/javascript">
createHTML({
bug: "1039666",
title: "Basic screenshare-only peer connection"
});
var test;
runNetworkTest(function (options) {
test = new PeerConnectionTest(options);
var constraints = {
video: {
mandatory:{
chromeMediaSource:'screen',
maxWidth:screen.availWidth,
maxHeight:screen.availHeight
},
optional:[]
}
};
test.setMediaConstraints([constraints], [constraints]);
test.run();
});
</script>
</pre>
</body>
</html>

View File

@ -0,0 +1,39 @@
<!DOCTYPE HTML>
<html>
<head>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<script type="application/javascript" src="head.js"></script>
<script type="application/javascript" src="mediaStreamPlayback.js"></script>
<script type="application/javascript" src="pc.js"></script>
<script type="application/javascript" src="templates.js"></script>
<script type="application/javascript" src="turnConfig.js"></script>
</head>
<body>
<pre id="test">
<script type="application/javascript">
createHTML({
bug: "1038926",
title: "Basic windowshare-only peer connection"
});
var test;
runNetworkTest(function (options) {
test = new PeerConnectionTest(options);
var constraints = {
video: {
mandatory:{
chromeMediaSource:'window',
maxWidth:screen.availWidth,
maxHeight:screen.availHeight
},
optional:[]
}
};
test.setMediaConstraints([constraints], [constraints]);
test.run();
});
</script>
</pre>
</body>
</html>

View File

@ -5,7 +5,7 @@
<body onload="runTests()">
<script class="testbody" type="application/javascript">
dump('lastScript');
dump('lastScript\n');
SimpleTest.waitForExplicitFinish();
setTestPluginEnabledState(SpecialPowers.Ci.nsIPluginTag.STATE_ENABLED);

View File

@ -17,7 +17,8 @@ enum VideoFacingModeEnum {
enum MediaSourceEnum {
"camera",
"screen",
"application"
"application",
"window"
};
dictionary ConstrainLongRange {

View File

@ -7,7 +7,7 @@ onmessage = function(evt) {
id = setTimeout(function() {}, 200);
postMessage(clearTimeout(id) == undefined);
postMessage(dump(42) == undefined);
postMessage(dump(42 + '\n') == undefined);
postMessage('finished');
}

View File

@ -155,6 +155,12 @@ LayerManager::CreateAsynchronousImageContainer()
return container.forget();
}
bool
LayerManager::AreComponentAlphaLayersEnabled()
{
return gfxPrefs::ComponentAlphaEnabled();
}
//--------------------------------------------------
// Layer
@ -1023,32 +1029,39 @@ ContainerLayer::DefaultComputeEffectiveTransforms(const Matrix4x4& aTransformToS
void
ContainerLayer::DefaultComputeSupportsComponentAlphaChildren(bool* aNeedsSurfaceCopy)
{
bool supportsComponentAlphaChildren = false;
if (!(GetContentFlags() & Layer::CONTENT_COMPONENT_ALPHA_DESCENDANT) ||
!Manager()->AreComponentAlphaLayersEnabled()) {
mSupportsComponentAlphaChildren = false;
if (aNeedsSurfaceCopy) {
*aNeedsSurfaceCopy = false;
}
return;
}
mSupportsComponentAlphaChildren = false;
bool needsSurfaceCopy = false;
CompositionOp blendMode = GetEffectiveMixBlendMode();
if (UseIntermediateSurface()) {
if (GetEffectiveVisibleRegion().GetNumRects() == 1 &&
(GetContentFlags() & Layer::CONTENT_OPAQUE))
{
supportsComponentAlphaChildren = true;
mSupportsComponentAlphaChildren = true;
} else {
gfx::Matrix transform;
if (HasOpaqueAncestorLayer(this) &&
GetEffectiveTransform().Is2D(&transform) &&
!gfx::ThebesMatrix(transform).HasNonIntegerTranslation() &&
blendMode == gfx::CompositionOp::OP_OVER) {
supportsComponentAlphaChildren = true;
mSupportsComponentAlphaChildren = true;
needsSurfaceCopy = true;
}
}
} else if (blendMode == gfx::CompositionOp::OP_OVER) {
supportsComponentAlphaChildren =
mSupportsComponentAlphaChildren =
(GetContentFlags() & Layer::CONTENT_OPAQUE) ||
(GetParent() && GetParent()->SupportsComponentAlphaChildren());
}
mSupportsComponentAlphaChildren = supportsComponentAlphaChildren &&
gfxPrefs::ComponentAlphaEnabled();
if (aNeedsSurfaceCopy) {
*aNeedsSurfaceCopy = mSupportsComponentAlphaChildren && needsSurfaceCopy;
}

View File

@ -308,12 +308,20 @@ public:
bool IsSnappingEffectiveTransforms() { return mSnapEffectiveTransforms; }
/**
* Returns true if the layer manager can't render component alpha
* layers, and layer building should do it's best to avoid
* creating them.
*/
virtual bool ShouldAvoidComponentAlphaLayers() { return false; }
/**
* Returns true if this LayerManager can properly support layers with
* SurfaceMode::SURFACE_COMPONENT_ALPHA. This can include disabling component
* alpha if required.
* SurfaceMode::SURFACE_COMPONENT_ALPHA. LayerManagers that can't will use
* transparent surfaces (and lose subpixel-AA for text).
*/
virtual bool AreComponentAlphaLayersEnabled() { return true; }
virtual bool AreComponentAlphaLayersEnabled();
/**
* CONSTRUCTION PHASE ONLY
@ -747,23 +755,29 @@ public:
*/
CONTENT_COMPONENT_ALPHA = 0x02,
/**
* If this is set then one of the descendant layers of this one has
* CONTENT_COMPONENT_ALPHA set.
*/
CONTENT_COMPONENT_ALPHA_DESCENDANT = 0x04,
/**
* If this is set then this layer is part of a preserve-3d group, and should
* be sorted with sibling layers that are also part of the same group.
*/
CONTENT_PRESERVE_3D = 0x04,
CONTENT_PRESERVE_3D = 0x08,
/**
* This indicates that the transform may be changed on during an empty
* transaction where there is no possibility of redrawing the content, so the
* implementation should be ready for that.
*/
CONTENT_MAY_CHANGE_TRANSFORM = 0x08,
CONTENT_MAY_CHANGE_TRANSFORM = 0x10,
/**
* Disable subpixel AA for this layer. This is used if the display isn't suited
* for subpixel AA like hidpi or rotated content.
*/
CONTENT_DISABLE_SUBPIXEL_AA = 0x10
CONTENT_DISABLE_SUBPIXEL_AA = 0x20
};
/**
* CONSTRUCTION PHASE ONLY

View File

@ -465,10 +465,8 @@ RotatedContentBuffer::BeginPaint(ThebesLayer* aLayer,
#else
if (!aLayer->GetParent() ||
!aLayer->GetParent()->SupportsComponentAlphaChildren() ||
!aLayer->Manager()->IsCompositingCheap() ||
!aLayer->AsShadowableLayer() ||
!aLayer->AsShadowableLayer()->HasShadow() ||
!gfxPrefs::ComponentAlphaEnabled()) {
!aLayer->AsShadowableLayer()->HasShadow()) {
mode = SurfaceMode::SURFACE_SINGLE_CHANNEL_ALPHA;
} else {
result.mContentType = gfxContentType::COLOR;

View File

@ -888,16 +888,26 @@ bool
APZCTreeManager::FlushRepaintsForOverscrollHandoffChain()
{
MonitorAutoLock lock(mTreeLock); // to access mOverscrollHandoffChain
if (mOverscrollHandoffChain.length() == 0) {
return false;
}
for (uint32_t i = 0; i < mOverscrollHandoffChain.length(); i++) {
nsRefPtr<AsyncPanZoomController> item = mOverscrollHandoffChain[i];
if (item) {
item->FlushRepaintForOverscrollHandoff();
}
}
return true;
return mOverscrollHandoffChain.length() > 0;
}
bool
APZCTreeManager::CancelAnimationsForOverscrollHandoffChain()
{
MonitorAutoLock lock(mTreeLock); // to access mOverscrollHandoffChain
for (uint32_t i = 0; i < mOverscrollHandoffChain.length(); i++) {
nsRefPtr<AsyncPanZoomController> item = mOverscrollHandoffChain[i];
if (item) {
item->CancelAnimation();
}
}
return mOverscrollHandoffChain.length() > 0;
}
bool

View File

@ -293,6 +293,7 @@ public:
bool HandOffFling(AsyncPanZoomController* aApzc, ScreenPoint aVelocity);
bool FlushRepaintsForOverscrollHandoffChain();
bool CancelAnimationsForOverscrollHandoffChain();
/**
* Determine whether |aApzc|, or any APZC along its overscroll handoff chain,

View File

@ -813,6 +813,18 @@ AsyncPanZoomController::GetTouchStartTolerance()
return static_cast<AxisLockMode>(gfxPrefs::APZAxisLockMode());
}
void
AsyncPanZoomController::CancelAnimationForHandoffChain()
{
APZCTreeManager* treeManagerLocal = mTreeManager;
if (treeManagerLocal && treeManagerLocal->CancelAnimationsForOverscrollHandoffChain()) {
return;
}
NS_WARNING("Overscroll handoff chain was empty in CancelAnimationForHandoffChain! This should not be the case.");
// Graceful handling of error condition
CancelAnimation();
}
nsEventStatus AsyncPanZoomController::ReceiveInputEvent(const InputData& aEvent) {
AssertOnControllerThread();
@ -824,6 +836,22 @@ nsEventStatus AsyncPanZoomController::ReceiveInputEvent(const InputData& aEvent)
if (aEvent.AsMultiTouchInput().mType == MultiTouchInput::MULTITOUCH_START) {
block = StartNewTouchBlock(false);
APZC_LOG("%p started new touch block %p\n", this, block);
// We want to cancel animations here as soon as possible (i.e. without waiting for
// content responses) because a finger has gone down and we don't want to keep moving
// the content under the finger. However, to prevent "future" touchstart events from
// interfering with "past" animations (i.e. from a previous touch block that is still
// being processed) we only do this animation-cancellation if there are no older
// touch blocks still in the queue.
if (block == CurrentTouchBlock()) {
if (GetVelocityVector().Length() > gfxPrefs::APZFlingStopOnTapThreshold()) {
// If we're already in a fast fling, then we want the touch event to stop the fling
// and to disallow the touch event from being used as part of a fling.
block->DisallowSingleTap();
}
CancelAnimationForHandoffChain();
}
if (mFrameMetrics.mMayHaveTouchListeners || mFrameMetrics.mMayHaveTouchCaret) {
// Content may intercept the touch events and prevent-default them. So we schedule
// a timeout to give content time to do that.
@ -952,19 +980,8 @@ nsEventStatus AsyncPanZoomController::OnTouchStart(const MultiTouchInput& aEvent
switch (mState) {
case FLING:
if (GetVelocityVector().Length() > gfxPrefs::APZFlingStopOnTapThreshold()) {
// This is ugly. Hopefully bug 1009733 can reorganize how events
// flow through APZC and change it so that events are handed to the
// gesture listener *after* we deal with them here. This should allow
// removal of this ugly code.
nsRefPtr<GestureEventListener> listener = GetGestureEventListener();
if (listener) {
listener->CancelSingleTouchDown();
}
}
// Fall through.
case ANIMATING_ZOOM:
CancelAnimation();
CancelAnimationForHandoffChain();
// Fall through.
case NOTHING: {
mX.StartTouch(point.x, aEvent.mTime);
@ -1290,7 +1307,7 @@ nsEventStatus AsyncPanZoomController::OnPanMayBegin(const PanGestureInput& aEven
mX.StartTouch(aEvent.mPanStartPoint.x, aEvent.mTime);
mY.StartTouch(aEvent.mPanStartPoint.y, aEvent.mTime);
CancelAnimation();
CancelAnimationForHandoffChain();
return nsEventStatus_eConsumeNoDefault;
}
@ -1407,7 +1424,9 @@ nsEventStatus AsyncPanZoomController::GenerateSingleTap(const ScreenIntPoint& aP
if (controller) {
CSSPoint geckoScreenPoint;
if (ConvertToGecko(aPoint, &geckoScreenPoint)) {
int32_t modifiers = WidgetModifiersToDOMModifiers(aModifiers);
if (!CurrentTouchBlock()->SetSingleTapOccurred()) {
return nsEventStatus_eIgnore;
}
// Because this may be being running as part of APZCTreeManager::ReceiveInputEvent,
// calling controller->HandleSingleTap directly might mean that content receives
// the single tap message before the corresponding touch-up. To avoid that we
@ -1415,9 +1434,9 @@ nsEventStatus AsyncPanZoomController::GenerateSingleTap(const ScreenIntPoint& aP
// See bug 965381 for the issue this was causing.
controller->PostDelayedTask(
NewRunnableMethod(controller.get(), &GeckoContentController::HandleSingleTap,
geckoScreenPoint, modifiers, GetGuid()),
geckoScreenPoint, WidgetModifiersToDOMModifiers(aModifiers),
GetGuid()),
0);
CurrentTouchBlock()->SetSingleTapOccurred();
return nsEventStatus_eConsumeNoDefault;
}
}
@ -1780,6 +1799,10 @@ void AsyncPanZoomController::CancelAnimation() {
APZC_LOG("%p running CancelAnimation in state %d\n", this, mState);
SetState(NOTHING);
mAnimation = nullptr;
// Since there is no animation in progress now the axes should
// have no velocity either.
mX.SetVelocity(0);
mY.SetVelocity(0);
// Setting the state to nothing and cancelling the animation can
// preempt normal mechanisms for relieving overscroll, so we need to clear
// overscroll here.

View File

@ -536,6 +536,12 @@ protected:
void FireAsyncScrollOnTimeout();
private:
/**
* Cancel animations all the way up the overscroll handoff chain if possible,
* or just the local APZC if not.
*/
void CancelAnimationForHandoffChain();
/**
* Helper to set the current state. Holds the monitor before actually setting
* it and fires content controller events based on state changes. Always set

View File

@ -109,23 +109,6 @@ nsEventStatus GestureEventListener::HandleInputEvent(const MultiTouchInput& aEve
return rv;
}
void GestureEventListener::CancelSingleTouchDown()
{
GEL_LOG("Cancelling touch-down while in state %d\n", mState);
switch (mState) {
case GESTURE_FIRST_SINGLE_TOUCH_DOWN:
CancelLongTapTimeoutTask();
CancelMaxTapTimeoutTask();
SetState(GESTURE_NONE);
break;
default:
NS_WARNING("IgnoreLastTouchStart() called while in unexpected state");
SetState(GESTURE_NONE);
break;
}
}
int32_t GestureEventListener::GetLastTouchIdentifier() const
{
if (mTouches.Length() != 1) {

View File

@ -53,12 +53,6 @@ public:
*/
nsEventStatus HandleInputEvent(const MultiTouchInput& aEvent);
/**
* Cancels any tap-related timeouts and clears any state that was set because
* we recently processed a touch-start.
*/
void CancelSingleTouchDown();
/**
* Returns the identifier of the touch in the last touch event processed by
* this GestureEventListener. This should only be called when the last touch

View File

@ -28,6 +28,7 @@ TouchBlockState::TouchBlockState()
, mPreventDefault(false)
, mContentResponded(false)
, mContentResponseTimerExpired(false)
, mSingleTapDisallowed(false)
, mSingleTapOccurred(false)
{
TBS_LOG("Creating %p\n", this);
@ -104,10 +105,21 @@ TouchBlockState::IsDefaultPrevented() const
}
void
TouchBlockState::DisallowSingleTap()
{
TBS_LOG("%p disallowing single-tap\n", this);
mSingleTapDisallowed = true;
}
bool
TouchBlockState::SetSingleTapOccurred()
{
TBS_LOG("%p setting single-tap occurred\n", this);
mSingleTapOccurred = true;
TBS_LOG("%p attempting to set single-tap occurred; disallowed=%d\n", this, mSingleTapDisallowed);
if (!mSingleTapDisallowed) {
mSingleTapOccurred = true;
return true;
}
return false;
}
bool

View File

@ -85,9 +85,14 @@ public:
bool IsDefaultPrevented() const;
/**
* Set a flag that indicates that this touch block triggered a single tap event.
* Set a flag that disables setting the single-tap flag on this block.
*/
void SetSingleTapOccurred();
void DisallowSingleTap();
/**
* Set a flag that indicates that this touch block triggered a single tap event.
* @return true iff DisallowSingleTap was not previously called.
*/
bool SetSingleTapOccurred();
/**
* @return true iff SetSingleTapOccurred was previously called on this block.
*/
@ -135,6 +140,7 @@ private:
bool mPreventDefault;
bool mContentResponded;
bool mContentResponseTimerExpired;
bool mSingleTapDisallowed;
bool mSingleTapOccurred;
nsTArray<MultiTouchInput> mEvents;
};

View File

@ -97,23 +97,8 @@ BasicCompositor::CreateRenderTargetFromSource(const IntRect &aRect,
const CompositingRenderTarget *aSource,
const IntPoint &aSourcePoint)
{
RefPtr<DrawTarget> target = mDrawTarget->CreateSimilarDrawTarget(aRect.Size(), SurfaceFormat::B8G8R8A8);
RefPtr<BasicCompositingRenderTarget> rt = new BasicCompositingRenderTarget(target, aRect);
DrawTarget *source;
if (aSource) {
const BasicCompositingRenderTarget* sourceSurface =
static_cast<const BasicCompositingRenderTarget*>(aSource);
source = sourceSurface->mDrawTarget;
} else {
source = mDrawTarget;
}
RefPtr<SourceSurface> snapshot = source->Snapshot();
IntRect sourceRect(aSourcePoint, aRect.Size());
rt->mDrawTarget->CopySurface(snapshot, sourceRect, IntPoint(0, 0));
return rt.forget();
MOZ_CRASH("Shouldn't be called!");
return nullptr;
}
TemporaryRef<DataTextureSource>

View File

@ -105,7 +105,7 @@ public:
virtual void EndTransaction(DrawThebesLayerCallback aCallback,
void* aCallbackData,
EndTransactionFlags aFlags = END_DEFAULT);
virtual bool AreComponentAlphaLayersEnabled() { return !IsWidgetLayerManager(); }
virtual bool ShouldAvoidComponentAlphaLayers() { return IsWidgetLayerManager(); }
void AbortTransaction();

View File

@ -537,6 +537,13 @@ ClientLayerManager::IsCompositingCheap()
LayerManager::IsCompositingCheap(mForwarder->GetCompositorBackendType());
}
bool
ClientLayerManager::AreComponentAlphaLayersEnabled()
{
return GetCompositorBackendType() != LayersBackend::LAYERS_BASIC &&
LayerManager::AreComponentAlphaLayersEnabled();
}
void
ClientLayerManager::SetIsFirstPaint()
{

View File

@ -145,6 +145,9 @@ public:
CompositorChild* GetCompositorChild();
// Disable component alpha layers with the software compositor.
virtual bool ShouldAvoidComponentAlphaLayers() { return !IsCompositingCheap(); }
/**
* Called for each iteration of a progressive tile update. Updates
* aMetrics with the current scroll offset and scale being used to composite
@ -185,6 +188,8 @@ public:
return (GetTextureFactoryIdentifier().mSupportedBlendModes & aMixBlendModes) == aMixBlendModes;
}
virtual bool AreComponentAlphaLayersEnabled() MOZ_OVERRIDE;
// Log APZ test data for the current paint. We supply the paint sequence
// number ourselves, and take care of calling APZTestData::StartNewPaint()
// when a new paint is started.

View File

@ -144,6 +144,13 @@ LayerManagerComposite::UpdateRenderBounds(const nsIntRect& aRect)
mRenderBounds = aRect;
}
bool
LayerManagerComposite::AreComponentAlphaLayersEnabled()
{
return Compositor::GetBackend() != LayersBackend::LAYERS_BASIC &&
LayerManager::AreComponentAlphaLayersEnabled();
}
void
LayerManagerComposite::BeginTransaction()
{

View File

@ -152,6 +152,8 @@ public:
MOZ_CRASH("Shouldn't be called for composited layer manager");
}
virtual bool AreComponentAlphaLayersEnabled() MOZ_OVERRIDE;
virtual TemporaryRef<DrawTarget>
CreateOptimalMaskDrawTarget(const IntSize &aSize) MOZ_OVERRIDE;

View File

@ -198,10 +198,16 @@ protected:
tm = new TestAPZCTreeManager();
apzc = new TestAsyncPanZoomController(0, mcc, tm, mGestureBehavior);
apzc->SetFrameMetrics(TestFrameMetrics());
// Since most tests pass inputs directly to the APZC instead of going through
// the tree manager, we need to build the overscroll handoff chain explicitly
// for panning and animation-cancelling to work correctly.
tm->BuildOverscrollHandoffChain(apzc);
}
virtual void TearDown()
{
tm->ClearOverscrollHandoffChain();
apzc->Destroy();
}
@ -251,27 +257,32 @@ ApzcUp(AsyncPanZoomController* apzc, int aX, int aY, int& aTime)
return apzc->ReceiveInputEvent(mti);
}
static nsEventStatus
ApzcTap(AsyncPanZoomController* apzc, int aX, int aY, int& aTime,
int aTapLength, MockContentControllerDelayed* mcc = nullptr)
static void
ApzcTap(AsyncPanZoomController* aApzc, int aX, int aY, int& aTime, int aTapLength,
nsEventStatus (*aOutEventStatuses)[2] = nullptr)
{
nsEventStatus status = ApzcDown(apzc, aX, aY, aTime);
if (mcc != nullptr) {
// There will be delayed tasks posted for the long-tap and MAX_TAP timeouts, but
// if we were provided a non-null mcc we want to clear them.
mcc->CheckHasDelayedTask();
mcc->ClearDelayedTask();
mcc->CheckHasDelayedTask();
mcc->ClearDelayedTask();
nsEventStatus status = ApzcDown(aApzc, aX, aY, aTime);
if (aOutEventStatuses) {
(*aOutEventStatuses)[0] = status;
}
EXPECT_EQ(nsEventStatus_eConsumeNoDefault, status);
aTime += aTapLength;
return ApzcUp(apzc, aX, aY, aTime);
status = ApzcUp(aApzc, aX, aY, aTime);
if (aOutEventStatuses) {
(*aOutEventStatuses)[1] = status;
}
}
static void
ApzcTapAndCheckStatus(AsyncPanZoomController* aApzc, int aX, int aY, int& aTime, int aTapLength)
{
nsEventStatus statuses[2];
ApzcTap(aApzc, aX, aY, aTime, aTapLength, &statuses);
EXPECT_EQ(nsEventStatus_eConsumeNoDefault, statuses[0]);
EXPECT_EQ(nsEventStatus_eIgnore, statuses[1]);
}
static void
ApzcPan(AsyncPanZoomController* aApzc,
TestAPZCTreeManager* aTreeManager,
int& aTime,
int aTouchStartY,
int aTouchEndY,
@ -282,11 +293,6 @@ ApzcPan(AsyncPanZoomController* aApzc,
const int TIME_BETWEEN_TOUCH_EVENT = 100;
const int OVERCOME_TOUCH_TOLERANCE = 100;
// Since we're passing inputs directly to the APZC instead of going through
// the tree manager, we need to build the overscroll handoff chain explicitly
// for panning to work correctly.
aTreeManager->BuildOverscrollHandoffChain(aApzc);
// Make sure the move is large enough to not be handled as a tap
nsEventStatus status = ApzcDown(aApzc, 10, aTouchStartY + OVERCOME_TOUCH_TOLERANCE, aTime);
if (aOutEventStatuses) {
@ -328,10 +334,6 @@ ApzcPan(AsyncPanZoomController* aApzc,
}
aTime += TIME_BETWEEN_TOUCH_EVENT;
// Since we've explicitly built the overscroll handoff chain before
// touch-start, we need to explicitly clear it after touch-end.
aTreeManager->ClearOverscrollHandoffChain();
}
/*
@ -340,7 +342,6 @@ ApzcPan(AsyncPanZoomController* aApzc,
*/
static void
ApzcPanAndCheckStatus(AsyncPanZoomController* aApzc,
TestAPZCTreeManager* aTreeManager,
int& aTime,
int aTouchStartY,
int aTouchEndY,
@ -349,7 +350,7 @@ ApzcPanAndCheckStatus(AsyncPanZoomController* aApzc,
nsTArray<uint32_t>* aAllowedTouchBehaviors)
{
nsEventStatus statuses[4]; // down, move, move, up
ApzcPan(aApzc, aTreeManager, aTime, aTouchStartY, aTouchEndY, false, aAllowedTouchBehaviors, &statuses);
ApzcPan(aApzc, aTime, aTouchStartY, aTouchEndY, false, aAllowedTouchBehaviors, &statuses);
nsEventStatus touchStartStatus;
if (hasTouchListeners || gfxPrefs::TouchActionEnabled()) {
@ -378,6 +379,16 @@ ApzcPanAndCheckStatus(AsyncPanZoomController* aApzc,
EXPECT_EQ(touchMoveStatus, statuses[2]);
}
static void
ApzcPanNoFling(AsyncPanZoomController* aApzc,
int& aTime,
int aTouchStartY,
int aTouchEndY)
{
ApzcPan(aApzc, aTime, aTouchStartY, aTouchEndY);
aApzc->CancelAnimation();
}
static void
ApzcPinchWithPinchInput(AsyncPanZoomController* aApzc,
int aFocusX, int aFocusY, float aScale,
@ -796,7 +807,7 @@ protected:
allowedTouchBehaviors.AppendElement(aBehavior);
// Pan down
ApzcPanAndCheckStatus(apzc, tm, time, touchStart, touchEnd, !aShouldTriggerScroll, false, &allowedTouchBehaviors);
ApzcPanAndCheckStatus(apzc, time, touchStart, touchEnd, !aShouldTriggerScroll, false, &allowedTouchBehaviors);
apzc->SampleContentTransformForFrame(testStartTime, &viewTransformOut, pointOut);
if (aShouldTriggerScroll) {
@ -807,8 +818,12 @@ protected:
EXPECT_EQ(ViewTransform(), viewTransformOut);
}
// Clear the fling from the previous pan, or stopping it will
// consume the next touchstart
apzc->CancelAnimation();
// Pan back
ApzcPanAndCheckStatus(apzc, tm, time, touchEnd, touchStart, !aShouldTriggerScroll, false, &allowedTouchBehaviors);
ApzcPanAndCheckStatus(apzc, time, touchEnd, touchStart, !aShouldTriggerScroll, false, &allowedTouchBehaviors);
apzc->SampleContentTransformForFrame(testStartTime, &viewTransformOut, pointOut);
EXPECT_EQ(ScreenPoint(), pointOut);
@ -828,7 +843,7 @@ protected:
// Pan down
nsTArray<uint32_t> allowedTouchBehaviors;
allowedTouchBehaviors.AppendElement(mozilla::layers::AllowedTouchBehavior::VERTICAL_PAN);
ApzcPanAndCheckStatus(apzc, tm, time, touchStart, touchEnd, true, true, &allowedTouchBehaviors);
ApzcPanAndCheckStatus(apzc, time, touchStart, touchEnd, true, true, &allowedTouchBehaviors);
// Send the signal that content has handled and preventDefaulted the touch
// events. This flushes the event queue.
@ -896,7 +911,7 @@ TEST_F(APZCBasicTester, Fling) {
ViewTransform viewTransformOut;
// Fling down. Each step scroll further down
ApzcPan(apzc, tm, time, touchStart, touchEnd);
ApzcPan(apzc, time, touchStart, touchEnd);
ScreenPoint lastPoint;
for (int i = 1; i < 50; i+=1) {
apzc->SampleContentTransformForFrame(testStartTime+TimeDuration::FromMilliseconds(i), &viewTransformOut, pointOut);
@ -919,16 +934,15 @@ protected:
int touchEnd = 10;
// Start the fling down.
ApzcPan(apzc, tm, time, touchStart, touchEnd);
ApzcPan(apzc, time, touchStart, touchEnd);
// The touchstart from the pan will leave some cancelled tasks in the queue, clear them out
EXPECT_EQ(2, mcc->RunThroughDelayedTasks());
while (mcc->RunThroughDelayedTasks());
// If we want to tap while the fling is fast, let the fling advance for 10ms only. If we want
// the fling to slow down more, advance to 2000ms. These numbers may need adjusting if our
// friction and threshold values change, but they should be deterministic at least.
int timeDelta = aSlow ? 2000 : 10;
int tapCallsExpected = aSlow ? 1 : 0;
int delayedTasksExpected = aSlow ? 3 : 2;
// Advance the fling animation by timeDelta milliseconds.
ScreenPoint pointOut;
@ -938,8 +952,8 @@ protected:
// Deliver a tap to abort the fling. Ensure that we get a HandleSingleTap
// call out of it if and only if the fling is slow.
EXPECT_CALL(*mcc, HandleSingleTap(_, 0, apzc->GetGuid())).Times(tapCallsExpected);
ApzcTap(apzc, 10, 10, time, 0, nullptr);
EXPECT_EQ(delayedTasksExpected, mcc->RunThroughDelayedTasks());
ApzcTap(apzc, 10, 10, time, 0);
while (mcc->RunThroughDelayedTasks());
// Verify that we didn't advance any further after the fling was aborted, in either case.
ScreenPoint finalPointOut;
@ -949,6 +963,50 @@ protected:
apzc->AssertStateIsReset();
}
void DoFlingStopWithSlowListener(bool aPreventDefault) {
SetMayHaveTouchListeners();
int time = 0;
int touchStart = 50;
int touchEnd = 10;
// Start the fling down.
ApzcPan(apzc, time, touchStart, touchEnd);
apzc->ContentReceivedTouch(false);
while (mcc->RunThroughDelayedTasks());
// Sample the fling a couple of times to ensure it's going.
ScreenPoint point, finalPoint;
ViewTransform viewTransform;
apzc->SampleContentTransformForFrame(testStartTime + TimeDuration::FromMilliseconds(10), &viewTransform, point);
apzc->SampleContentTransformForFrame(testStartTime + TimeDuration::FromMilliseconds(20), &viewTransform, finalPoint);
EXPECT_GT(finalPoint.y, point.y);
// Now we put our finger down to stop the fling
ApzcDown(apzc, 10, 10, time);
// Re-sample to make sure it hasn't moved
apzc->SampleContentTransformForFrame(testStartTime + TimeDuration::FromMilliseconds(30), &viewTransform, point);
EXPECT_EQ(finalPoint.x, point.x);
EXPECT_EQ(finalPoint.y, point.y);
// respond to the touchdown that stopped the fling.
// even if we do a prevent-default on it, the animation should remain stopped.
apzc->ContentReceivedTouch(aPreventDefault);
while (mcc->RunThroughDelayedTasks());
// Verify the page hasn't moved
apzc->SampleContentTransformForFrame(testStartTime + TimeDuration::FromMilliseconds(100), &viewTransform, point);
EXPECT_EQ(finalPoint.x, point.x);
EXPECT_EQ(finalPoint.y, point.y);
// clean up
ApzcUp(apzc, 10, 10, time);
while (mcc->RunThroughDelayedTasks());
apzc->AssertStateIsReset();
}
};
TEST_F(APZCFlingStopTester, FlingStop) {
@ -959,6 +1017,14 @@ TEST_F(APZCFlingStopTester, FlingStopTap) {
DoFlingStopTest(true);
}
TEST_F(APZCFlingStopTester, FlingStopSlowListener) {
DoFlingStopWithSlowListener(false);
}
TEST_F(APZCFlingStopTester, FlingStopPreventDefault) {
DoFlingStopWithSlowListener(true);
}
TEST_F(APZCBasicTester, OverScrollPanning) {
SCOPED_GFX_PREF(APZOverscrollEnabled, bool, true);
@ -966,7 +1032,7 @@ TEST_F(APZCBasicTester, OverScrollPanning) {
int time = 0;
int touchStart = 500;
int touchEnd = 10;
ApzcPan(apzc, tm, time, touchStart, touchEnd);
ApzcPan(apzc, time, touchStart, touchEnd);
EXPECT_TRUE(apzc->IsOverscrolled());
// Note that in the calls to SampleContentTransformForFrame below, the time
@ -1006,7 +1072,7 @@ TEST_F(APZCBasicTester, OverScrollAbort) {
int time = 0;
int touchStart = 500;
int touchEnd = 10;
ApzcPan(apzc, tm, time, touchStart, touchEnd);
ApzcPan(apzc, time, touchStart, touchEnd);
EXPECT_TRUE(apzc->IsOverscrolled());
ScreenPoint pointOut;
@ -1033,7 +1099,7 @@ TEST_F(APZCBasicTester, OverScrollPanningAbort) {
int time = 0;
int touchStart = 500;
int touchEnd = 10;
ApzcPan(apzc, tm, time, touchStart, touchEnd,
ApzcPan(apzc, time, touchStart, touchEnd,
true); // keep finger down
EXPECT_TRUE(apzc->IsOverscrolled());
@ -1049,8 +1115,11 @@ TEST_F(APZCGestureDetectorTester, ShortPress) {
MakeApzcUnzoomable();
int time = 0;
nsEventStatus status = ApzcTap(apzc, 10, 10, time, 100, mcc.get());
EXPECT_EQ(nsEventStatus_eIgnore, status);
ApzcTapAndCheckStatus(apzc, 10, 10, time, 100);
// There will be delayed tasks posted for the long-tap and MAX_TAP timeouts, but
// we want to clear those.
mcc->ClearDelayedTask();
mcc->ClearDelayedTask();
// This verifies that the single tap notification is sent after the
// touchdown is fully processed. The ordering here is important.
@ -1066,8 +1135,11 @@ TEST_F(APZCGestureDetectorTester, MediumPress) {
MakeApzcUnzoomable();
int time = 0;
nsEventStatus status = ApzcTap(apzc, 10, 10, time, 400, mcc.get());
EXPECT_EQ(nsEventStatus_eIgnore, status);
ApzcTapAndCheckStatus(apzc, 10, 10, time, 400);
// There will be delayed tasks posted for the long-tap and MAX_TAP timeouts, but
// we want to clear those.
mcc->ClearDelayedTask();
mcc->ClearDelayedTask();
// This verifies that the single tap notification is sent after the
// touchdown is fully processed. The ordering here is important.
@ -1601,7 +1673,9 @@ TEST_F(APZCTreeManagerTester, HitTesting2) {
// This first pan will move the APZC by 50 pixels, and dispatch a paint request.
// Since this paint request is in the queue to Gecko, transformToGecko will
// take it into account.
ApzcPan(apzcroot, manager, time, 100, 50);
manager->BuildOverscrollHandoffChain(apzcroot);
ApzcPanNoFling(apzcroot, time, 100, 50);
manager->ClearOverscrollHandoffChain();
// Hit where layers[3] used to be. It should now hit the root.
hit = GetTargetAPZC(manager, ScreenPoint(75, 75), transformToApzc, transformToGecko);
@ -1627,7 +1701,9 @@ TEST_F(APZCTreeManagerTester, HitTesting2) {
// request dispatched above has not "completed", we will not dispatch another
// one yet. Now we have an async transform on top of the pending paint request
// transform.
ApzcPan(apzcroot, manager, time, 100, 50);
manager->BuildOverscrollHandoffChain(apzcroot);
ApzcPanNoFling(apzcroot, time, 100, 50);
manager->ClearOverscrollHandoffChain();
// Hit where layers[3] used to be. It should now hit the root.
hit = GetTargetAPZC(manager, ScreenPoint(75, 75), transformToApzc, transformToGecko);

View File

@ -607,17 +607,7 @@ gfxUtils::DrawPixelSnapped(gfxContext* aContext,
// On Mobile, we don't ever want to do this; it has the potential for
// allocating very large temporary surfaces, especially since we'll
// do full-page snapshots often (see bug 749426).
#ifdef MOZ_GFX_OPTIMIZE_MOBILE
// If the pattern translation is large we can get into trouble with pixman's
// 16 bit coordinate limits. For now, we only do this on platforms where
// we know we have the pixman limits. 16384.0 is a somewhat arbitrary
// large number to make sure we avoid the expensive fmod when we can, but
// still maintain a safe margin from the actual limit
if (doTile && (userSpaceToImageSpace._32 > 16384.0 || userSpaceToImageSpace._31 > 16384.0)) {
userSpaceToImageSpace._31 = fmod(userSpaceToImageSpace._31, aImageRect.width);
userSpaceToImageSpace._32 = fmod(userSpaceToImageSpace._32, aImageRect.height);
}
#else
#ifndef MOZ_GFX_OPTIMIZE_MOBILE
// OK now, the hard part left is to account for the subimage sampling
// restriction. If all the transforms involved are just integer
// translations, then we assume no resampling will occur so there's

View File

@ -83,6 +83,8 @@ static const int kSupportedFeatureLevels[] =
class GfxD2DSurfaceReporter MOZ_FINAL : public nsIMemoryReporter
{
~GfxD2DSurfaceReporter() {}
public:
NS_DECL_ISUPPORTS

Some files were not shown because too many files have changed in this diff Show More