Merge inbound to m-c. a=merge

This commit is contained in:
Ryan VanderMeulen 2015-07-27 15:34:42 -04:00
commit d7986de05e
127 changed files with 3292 additions and 662 deletions

View File

@ -1100,11 +1100,14 @@ pref("dom.wakelock.enabled", true);
// Enable webapps add-ons
pref("dom.apps.customization.enabled", true);
// Enable touch caret by default
pref("touchcaret.enabled", true);
// Original caret implementation on collapsed selection.
pref("touchcaret.enabled", false);
// Enable selection caret by default
pref("selectioncaret.enabled", true);
// Original caret implementation on non-collapsed selection.
pref("selectioncaret.enabled", false);
// New implementation to unify touch-caret and selection-carets.
pref("layout.accessiblecaret.enabled", true);
// Enable sync and mozId with Firefox Accounts.
pref("services.sync.fxaccounts.enabled", true);

View File

@ -6,7 +6,7 @@
// to implement the dropdown list.
const PAGECONTENT =
"<html><body onload='document.body.firstChild.focus()'><select>" +
"<html><body onload='gChangeEvents = 0; document.body.firstChild.focus()'><select onchange='gChangeEvents++'>" +
" <optgroup label='First Group'>" +
" <option value=One>One" +
" <option value=Two>Two" +
@ -20,28 +20,40 @@ const PAGECONTENT =
" <optgroup label='Third Group'>" +
" <option value=Seven>Seven" +
" <option value=Eight>Eight" +
" </optgroup>" +
" </optgroup></select><input>" +
"</body></html>";
function openSelectPopup(selectPopup)
function openSelectPopup(selectPopup, withMouse)
{
return new Promise((resolve, reject) => {
selectPopup.addEventListener("popupshown", function popupListener(event) {
selectPopup.removeEventListener("popupshown", popupListener, false)
resolve();
}, false);
setTimeout(() => EventUtils.synthesizeKey("KEY_ArrowDown", { altKey: true, code: "ArrowDown" }), 1500);
});
let popupShownPromise = BrowserTestUtils.waitForEvent(selectPopup, "popupshown");
if (withMouse) {
return Promise.all([popupShownPromise,
BrowserTestUtils.synthesizeMouseAtCenter("select", { }, gBrowser.selectedBrowser)]);
}
setTimeout(() => EventUtils.synthesizeKey("KEY_ArrowDown", { altKey: true, code: "ArrowDown" }), 1500);
return popupShownPromise;
}
function hideSelectPopup(selectPopup)
function hideSelectPopup(selectPopup, withEscape)
{
return new Promise((resolve, reject) => {
selectPopup.addEventListener("popuphidden", function popupListener(event) {
selectPopup.removeEventListener("popuphidden", popupListener, false)
resolve();
}, false);
let popupShownPromise = BrowserTestUtils.waitForEvent(selectPopup, "popuphidden");
if (withEscape) {
EventUtils.synthesizeKey("KEY_Escape", { code: "Escape" });
}
else {
EventUtils.synthesizeKey("KEY_Enter", { code: "Enter" });
}
return popupShownPromise;
}
function getChangeEvents()
{
return ContentTask.spawn(gBrowser.selectedBrowser, {}, function() {
return content.wrappedJSObject.gChangeEvents;
});
}
@ -90,9 +102,28 @@ add_task(function*() {
is(menulist.menuBoxObject.activeChild, menulist.getItemAtIndex(3), "Select item 3 again");
is(menulist.selectedIndex, isWindows ? 3 : 1, "Select item 3 selectedIndex");
is((yield getChangeEvents()), 0, "Before closed - number of change events");
yield hideSelectPopup(selectPopup);
is(menulist.selectedIndex, 3, "Item 3 still selected");
is((yield getChangeEvents()), 1, "After closed - number of change events");
// Opening and closing the popup without changing the value should not fire a change event.
yield openSelectPopup(selectPopup, true);
yield hideSelectPopup(selectPopup, true);
is((yield getChangeEvents()), 1, "Open and close with no change - number of change events");
EventUtils.synthesizeKey("VK_TAB", { });
EventUtils.synthesizeKey("VK_TAB", { shiftKey: true });
is((yield getChangeEvents()), 1, "Tab away from select with no change - number of change events");
yield openSelectPopup(selectPopup, true);
EventUtils.synthesizeKey("KEY_ArrowDown", { code: "ArrowDown" });
yield hideSelectPopup(selectPopup, true);
is((yield getChangeEvents()), isWindows ? 2 : 1, "Open and close with change - number of change events");
EventUtils.synthesizeKey("VK_TAB", { });
EventUtils.synthesizeKey("VK_TAB", { shiftKey: true });
is((yield getChangeEvents()), isWindows ? 2 : 1, "Tab away from select with change - number of change events");
gBrowser.removeCurrentTab();
});

View File

@ -90,7 +90,7 @@ function test() {
},
{
name: "Search with Amazon.com from about:home",
searchURL: replaceUrl("http://www.amazon.com/exec/obidos/external-search/?field-keywords=foo&mode=blended&tag=mozilla-20&sourceid=Mozilla-search"),
searchURL: replaceUrl("https://www.amazon.com/exec/obidos/external-search/?field-keywords=foo&mode=blended&tag=mozilla-20&sourceid=Mozilla-search"),
run: function () {
verify_about_home_search("Amazon.com");
}

View File

@ -13,7 +13,7 @@ function test() {
let engine = Services.search.getEngineByName("Amazon.com");
ok(engine, "Amazon.com");
let base = "http://www.amazon.com/exec/obidos/external-search/?field-keywords=foo&mode=blended&tag=mozilla-20&sourceid=Mozilla-search";
let base = "https://www.amazon.com/exec/obidos/external-search/?field-keywords=foo&mode=blended&tag=mozilla-20&sourceid=Mozilla-search";
let url;
// Test search URLs (including purposes).
@ -29,7 +29,7 @@ function test() {
name: "Amazon.com",
alias: null,
description: "Amazon.com Search",
searchForm: "http://www.amazon.com/exec/obidos/external-search/?field-keywords=&mode=blended&tag=mozilla-20&sourceid=Mozilla-search",
searchForm: "https://www.amazon.com/exec/obidos/external-search/?field-keywords=&mode=blended&tag=mozilla-20&sourceid=Mozilla-search",
type: Ci.nsISearchEngine.TYPE_MOZSEARCH,
hidden: false,
wrappedJSObject: {
@ -45,7 +45,7 @@ function test() {
{
type: "text/html",
method: "GET",
template: "http://www.amazon.com/exec/obidos/external-search/",
template: "https://www.amazon.com/exec/obidos/external-search/",
params: [
{
name: "field-keywords",

View File

@ -18,7 +18,7 @@ function test() {
Services.search.currentEngine = engine;
engine.alias = "a";
let base = "http://www.amazon.com/exec/obidos/external-search/?field-keywords=foo&mode=blended&tag=mozilla-20&sourceid=Mozilla-search";
let base = "https://www.amazon.com/exec/obidos/external-search/?field-keywords=foo&mode=blended&tag=mozilla-20&sourceid=Mozilla-search";
let url;
// Test search URLs (including purposes).

View File

@ -28,8 +28,10 @@ function* ifTestingSupported() {
isnot($(".call-item-stack", callItem.target), null,
"There should be a stack container available now for the draw call.");
is($all(".call-item-stack-fn", callItem.target).length, 4,
"There should be 4 functions on the stack for the draw call.");
// We may have more than 4 functions, depending on whether async
// stacks are available.
ok($all(".call-item-stack-fn", callItem.target).length >= 4,
"There should be at least 4 functions on the stack for the draw call.");
ok($all(".call-item-stack-fn-name", callItem.target)[0].getAttribute("value")
.includes("C()"),

View File

@ -29,8 +29,10 @@ function* ifTestingSupported() {
isnot($(".call-item-stack", callItem.target), null,
"There should be a stack container available now for the draw call.");
is($all(".call-item-stack-fn", callItem.target).length, 4,
"There should be 4 functions on the stack for the draw call.");
// We may have more than 4 functions, depending on whether async
// stacks are available.
ok($all(".call-item-stack-fn", callItem.target).length >= 4,
"There should be at least 4 functions on the stack for the draw call.");
let jumpedToSource = once(window, EVENTS.SOURCE_SHOWN_IN_JS_DEBUGGER);
EventUtils.sendMouseEvent({ type: "mousedown" }, $(".call-item-location", callItem.target));

View File

@ -40,8 +40,10 @@ function* ifTestingSupported() {
"There should be a stack container available now for the draw call.");
is($(".call-item-stack", callItem.target).hidden, false,
"The stack container should now be visible.");
is($all(".call-item-stack-fn", callItem.target).length, 4,
"There should be 4 functions on the stack for the draw call.");
// We may have more than 4 functions, depending on whether async
// stacks are available.
ok($all(".call-item-stack-fn", callItem.target).length >= 4,
"There should be at least 4 functions on the stack for the draw call.");
EventUtils.sendMouseEvent({ type: "dblclick" }, contents, window);
@ -53,8 +55,10 @@ function* ifTestingSupported() {
"There should still be a stack container available for the draw call.");
is($(".call-item-stack", callItem.target).hidden, true,
"The stack container should now be hidden.");
is($all(".call-item-stack-fn", callItem.target).length, 4,
"There should still be 4 functions on the stack for the draw call.");
// We may have more than 4 functions, depending on whether async
// stacks are available.
ok($all(".call-item-stack-fn", callItem.target).length >= 4,
"There should still be at least 4 functions on the stack for the draw call.");
yield teardown(panel);
finish();

View File

@ -758,6 +758,7 @@ you can use these alternative items. Otherwise, their values should be empty. -
<!ENTITY getUserMedia.selectCamera.accesskey "C">
<!ENTITY getUserMedia.selectMicrophone.label "Microphone to share:">
<!ENTITY getUserMedia.selectMicrophone.accesskey "M">
<!ENTITY getUserMedia.audioCapture.label "Audio from the tab will be shared.">
<!ENTITY getUserMedia.allWindowsShared.message "All visible windows on your screen will be shared.">
<!-- Bad Content Blocker Doorhanger Notification -->

View File

@ -556,13 +556,17 @@ identity.loggedIn.signOut.accessKey = O
# LOCALIZATION NOTE (getUserMedia.shareCamera.message, getUserMedia.shareMicrophone.message,
# getUserMedia.shareScreen.message, getUserMedia.shareCameraAndMicrophone.message,
# getUserMedia.shareScreenAndMicrophone.message):
# getUserMedia.shareScreenAndMicrophone.message, getUserMedia.shareCameraAndAudioCapture.message,
# getUserMedia.shareAudioCapture.message, getUserMedia.shareScreenAndAudioCapture.message):
# %S is the website origin (e.g. www.mozilla.org)
getUserMedia.shareCamera.message = Would you like to share your camera with %S?
getUserMedia.shareMicrophone.message = Would you like to share your microphone with %S?
getUserMedia.shareScreen.message = Would you like to share your screen with %S?
getUserMedia.shareCameraAndMicrophone.message = Would you like to share your camera and microphone with %S?
getUserMedia.shareCameraAndAudioCapture.message = Would you like to share your camera and this tab's audio with %S?
getUserMedia.shareScreenAndMicrophone.message = Would you like to share your microphone and screen with %S?
getUserMedia.shareScreenAndAudioCapture.message = Would you like to share this tab's audio and your screen with %S?
getUserMedia.shareAudioCapture.message = Would you like to share this tab's audio with %S?
getUserMedia.selectWindow.label=Window to share:
getUserMedia.selectWindow.accesskey=W
getUserMedia.selectScreen.label=Screen to share:
@ -604,6 +608,7 @@ getUserMedia.sharingApplication.message = You are currently sharing an applicati
getUserMedia.sharingScreen.message = You are currently sharing your screen with this page.
getUserMedia.sharingWindow.message = You are currently sharing a window with this page.
getUserMedia.sharingBrowser.message = You are currently sharing a tab with this page.
getUserMedia.sharingAudioCapture.message = You are currently sharing a tab's audio with this page.
getUserMedia.continueSharing.label = Continue Sharing
getUserMedia.continueSharing.accesskey = C
getUserMedia.stopSharing.label = Stop Sharing
@ -613,6 +618,7 @@ getUserMedia.sharingMenu.label = Tabs sharing devices
getUserMedia.sharingMenu.accesskey = d
# LOCALIZATION NOTE (getUserMedia.sharingMenuCamera
# getUserMedia.sharingMenuMicrophone,
# getUserMedia.sharingMenuAudioCapture,
# getUserMedia.sharingMenuApplication,
# getUserMedia.sharingMenuScreen,
# getUserMedia.sharingMenuWindow,
@ -622,6 +628,11 @@ getUserMedia.sharingMenu.accesskey = d
# getUserMedia.sharingMenuCameraMicrophoneScreen,
# getUserMedia.sharingMenuCameraMicrophoneWindow,
# getUserMedia.sharingMenuCameraMicrophoneBrowser,
# getUserMedia.sharingMenuCameraAudioCapture,
# getUserMedia.sharingMenuCameraAudioCaptureApplication,
# getUserMedia.sharingMenuCameraAudioCaptureScreen,
# getUserMedia.sharingMenuCameraAudioCaptureWindow,
# getUserMedia.sharingMenuCameraAudioCaptureBrowser,
# getUserMedia.sharingMenuCameraApplication,
# getUserMedia.sharingMenuCameraScreen,
# getUserMedia.sharingMenuCameraWindow,
@ -633,6 +644,7 @@ getUserMedia.sharingMenu.accesskey = d
# %S is the website origin (e.g. www.mozilla.org)
getUserMedia.sharingMenuCamera = %S (camera)
getUserMedia.sharingMenuMicrophone = %S (microphone)
getUserMedia.sharingMenuAudioCapture = %S (tab audio)
getUserMedia.sharingMenuApplication = %S (application)
getUserMedia.sharingMenuScreen = %S (screen)
getUserMedia.sharingMenuWindow = %S (window)
@ -642,6 +654,11 @@ getUserMedia.sharingMenuCameraMicrophoneApplication = %S (camera, microphone and
getUserMedia.sharingMenuCameraMicrophoneScreen = %S (camera, microphone and screen)
getUserMedia.sharingMenuCameraMicrophoneWindow = %S (camera, microphone and window)
getUserMedia.sharingMenuCameraMicrophoneBrowser = %S (camera, microphone and tab)
getUserMedia.sharingMenuCameraAudioCapture = %S (camera and tab audio)
getUserMedia.sharingMenuCameraAudioCaptureApplication = %S (camera, tab audio and application)
getUserMedia.sharingMenuCameraAudioCaptureScreen = %S (camera, tab audio and screen)
getUserMedia.sharingMenuCameraAudioCaptureWindow = %S (camera, tab audio and window)
getUserMedia.sharingMenuCameraAudioCaptureBrowser = %S (camera, tab audio and tab)
getUserMedia.sharingMenuCameraApplication = %S (camera and application)
getUserMedia.sharingMenuCameraScreen = %S (camera and screen)
getUserMedia.sharingMenuCameraWindow = %S (camera and window)
@ -650,6 +667,10 @@ getUserMedia.sharingMenuMicrophoneApplication = %S (microphone and application)
getUserMedia.sharingMenuMicrophoneScreen = %S (microphone and screen)
getUserMedia.sharingMenuMicrophoneWindow = %S (microphone and window)
getUserMedia.sharingMenuMicrophoneBrowser = %S (microphone and tab)
getUserMedia.sharingMenuMicrophoneApplication = %S (tab audio and application)
getUserMedia.sharingMenuMicrophoneScreen = %S (tab audio and screen)
getUserMedia.sharingMenuMicrophoneWindow = %S (tab audio and window)
getUserMedia.sharingMenuMicrophoneBrowser = %S (tab audio and tab)
# LOCALIZATION NOTE(getUserMedia.sharingMenuUnknownHost): this is used for the website
# origin for the sharing menu if no readable origin could be deduced from the URL.
getUserMedia.sharingMenuUnknownHost = Unknown origin

File diff suppressed because one or more lines are too long

View File

@ -86,14 +86,21 @@ function prompt(aContentWindow, aWindowID, aCallID, aConstraints, aDevices, aSec
// MediaStreamConstraints defines video as 'boolean or MediaTrackConstraints'.
let video = aConstraints.video || aConstraints.picture;
let audio = aConstraints.audio;
let sharingScreen = video && typeof(video) != "boolean" &&
video.mediaSource != "camera";
let sharingAudio = audio && typeof(audio) != "boolean" &&
audio.mediaSource != "microphone";
for (let device of aDevices) {
device = device.QueryInterface(Ci.nsIMediaDevice);
switch (device.type) {
case "audio":
if (aConstraints.audio) {
audioDevices.push({name: device.name, deviceIndex: devices.length});
// Check that if we got a microphone, we have not requested an audio
// capture, and if we have requested an audio capture, we are not
// getting a microphone instead.
if (audio && (device.mediaSource == "microphone") != sharingAudio) {
audioDevices.push({name: device.name, deviceIndex: devices.length,
mediaSource: device.mediaSource});
devices.push(device);
}
break;
@ -113,7 +120,7 @@ function prompt(aContentWindow, aWindowID, aCallID, aConstraints, aDevices, aSec
if (videoDevices.length)
requestTypes.push(sharingScreen ? "Screen" : "Camera");
if (audioDevices.length)
requestTypes.push("Microphone");
requestTypes.push(sharingAudio ? "AudioCapture" : "Microphone");
if (!requestTypes.length) {
denyRequest({callID: aCallID}, "NotFoundError");
@ -133,6 +140,7 @@ function prompt(aContentWindow, aWindowID, aCallID, aConstraints, aDevices, aSec
secure: aSecure,
requestTypes: requestTypes,
sharingScreen: sharingScreen,
sharingAudio: sharingAudio,
audioDevices: audioDevices,
videoDevices: videoDevices
};

View File

@ -188,7 +188,8 @@ function getHost(uri, href) {
function prompt(aBrowser, aRequest) {
let {audioDevices: audioDevices, videoDevices: videoDevices,
sharingScreen: sharingScreen, requestTypes: requestTypes} = aRequest;
sharingScreen: sharingScreen, sharingAudio: sharingAudio,
requestTypes: requestTypes} = aRequest;
let uri = Services.io.newURI(aRequest.documentURI, null, null);
let host = getHost(uri);
let chromeDoc = aBrowser.ownerDocument;
@ -198,10 +199,9 @@ function prompt(aBrowser, aRequest) {
let message = stringBundle.getFormattedString(stringId, [host]);
let mainLabel;
if (sharingScreen) {
if (sharingScreen || sharingAudio) {
mainLabel = stringBundle.getString("getUserMedia.shareSelectedItems.label");
}
else {
} else {
let string = stringBundle.getString("getUserMedia.shareSelectedDevices.label");
mainLabel = PluralForm.get(requestTypes.length, string);
}
@ -225,8 +225,8 @@ function prompt(aBrowser, aRequest) {
}
}
];
if (!sharingScreen) { // Bug 1037438: implement 'never' for screen sharing.
// Bug 1037438: implement 'never' for screen sharing.
if (!sharingScreen && !sharingAudio) {
secondaryActions.push({
label: stringBundle.getString("getUserMedia.never.label"),
accessKey: stringBundle.getString("getUserMedia.never.accesskey"),
@ -243,10 +243,10 @@ function prompt(aBrowser, aRequest) {
});
}
if (aRequest.secure && !sharingScreen) {
if (aRequest.secure && !sharingScreen && !sharingAudio) {
// Don't show the 'Always' action if the connection isn't secure, or for
// screen sharing (because we can't guess which window the user wants to
// share without prompting).
// screen/audio sharing (because we can't guess which window the user wants
// to share without prompting).
secondaryActions.unshift({
label: stringBundle.getString("getUserMedia.always.label"),
accessKey: stringBundle.getString("getUserMedia.always.accesskey"),
@ -266,7 +266,8 @@ function prompt(aBrowser, aRequest) {
if (aTopic == "shown") {
let PopupNotifications = chromeDoc.defaultView.PopupNotifications;
let popupId = "Devices";
if (requestTypes.length == 1 && requestTypes[0] == "Microphone")
if (requestTypes.length == 1 && (requestTypes[0] == "Microphone" ||
requestTypes[0] == "AudioCapture"))
popupId = "Microphone";
if (requestTypes.indexOf("Screen") != -1)
popupId = "Screen";
@ -384,7 +385,7 @@ function prompt(aBrowser, aRequest) {
chromeDoc.getElementById("webRTC-selectCamera").hidden = !videoDevices.length || sharingScreen;
chromeDoc.getElementById("webRTC-selectWindowOrScreen").hidden = !sharingScreen || !videoDevices.length;
chromeDoc.getElementById("webRTC-selectMicrophone").hidden = !audioDevices.length;
chromeDoc.getElementById("webRTC-selectMicrophone").hidden = !audioDevices.length || sharingAudio;
let camMenupopup = chromeDoc.getElementById("webRTC-selectCamera-menupopup");
let windowMenupopup = chromeDoc.getElementById("webRTC-selectWindow-menupopup");
@ -393,12 +394,16 @@ function prompt(aBrowser, aRequest) {
listScreenShareDevices(windowMenupopup, videoDevices);
else
listDevices(camMenupopup, videoDevices);
listDevices(micMenupopup, audioDevices);
if (!sharingAudio)
listDevices(micMenupopup, audioDevices);
if (requestTypes.length == 2) {
let stringBundle = chromeDoc.defaultView.gNavigatorBundle;
if (!sharingScreen)
addDeviceToList(camMenupopup, stringBundle.getString("getUserMedia.noVideo.label"), "-1");
addDeviceToList(micMenupopup, stringBundle.getString("getUserMedia.noAudio.label"), "-1");
if (!sharingAudio)
addDeviceToList(micMenupopup, stringBundle.getString("getUserMedia.noAudio.label"), "-1");
}
this.mainAction.callback = function(aRemember) {
@ -416,13 +421,18 @@ function prompt(aBrowser, aRequest) {
}
}
if (audioDevices.length) {
let audioDeviceIndex = chromeDoc.getElementById("webRTC-selectMicrophone-menulist").value;
let allowMic = audioDeviceIndex != "-1";
if (allowMic)
allowedDevices.push(audioDeviceIndex);
if (aRemember) {
perms.add(uri, "microphone",
allowMic ? perms.ALLOW_ACTION : perms.DENY_ACTION);
if (!sharingAudio) {
let audioDeviceIndex = chromeDoc.getElementById("webRTC-selectMicrophone-menulist").value;
let allowMic = audioDeviceIndex != "-1";
if (allowMic)
allowedDevices.push(audioDeviceIndex);
if (aRemember) {
perms.add(uri, "microphone",
allowMic ? perms.ALLOW_ACTION : perms.DENY_ACTION);
}
} else {
// Only one device possible for audio capture.
allowedDevices.push(0);
}
}

View File

@ -1860,18 +1860,6 @@ if test -n "$MOZ_ENABLE_PROFILER_SPS"; then
AC_DEFINE(MOZ_ENABLE_PROFILER_SPS)
fi
dnl ========================================================
dnl shark
dnl ========================================================
MOZ_ARG_ENABLE_BOOL(shark,
[ --enable-shark Enable shark remote profiling. Implies --enable-profiling.],
MOZ_SHARK=1,
MOZ_SHARK= )
if test -n "$MOZ_SHARK"; then
MOZ_PROFILING=1
AC_DEFINE(MOZ_SHARK)
fi
dnl ========================================================
dnl instruments
dnl ========================================================
@ -8642,7 +8630,6 @@ AC_SUBST(WARNINGS_AS_ERRORS)
AC_SUBST(MOZ_EXTENSIONS)
AC_SUBST(MOZ_ENABLE_PROFILER_SPS)
AC_SUBST(MOZ_JPROF)
AC_SUBST(MOZ_SHARK)
AC_SUBST(MOZ_INSTRUMENTS)
AC_SUBST(MOZ_CALLGRIND)
AC_SUBST(MOZ_VTUNE)

View File

@ -94,7 +94,7 @@ if (Services.prefs.getBoolPref("javascript.options.asyncstack")) {
let frame = markers[0].endStack;
ok(frame.parent.asyncParent !== null, "Parent frame has async parent");
is(frame.parent.asyncParent.asyncCause, "Promise",
is(frame.parent.asyncParent.asyncCause, "promise callback",
"Async parent has correct cause");
is(frame.parent.asyncParent.functionDisplayName, "makePromise",
"Async parent has correct function name");

View File

@ -35,6 +35,7 @@ NS_IMPL_CYCLE_COLLECTING_RELEASE(AudioChannelAgent)
AudioChannelAgent::AudioChannelAgent()
: mAudioChannelType(AUDIO_AGENT_CHANNEL_ERROR)
, mInnerWindowID(0)
, mIsRegToService(false)
{
}
@ -104,6 +105,10 @@ AudioChannelAgent::InitInternal(nsIDOMWindow* aWindow, int32_t aChannelType,
}
if (aWindow) {
nsCOMPtr<nsPIDOMWindow> pInnerWindow = do_QueryInterface(aWindow);
MOZ_ASSERT(pInnerWindow->IsInnerWindow());
mInnerWindowID = pInnerWindow->WindowID();
nsCOMPtr<nsIDOMWindow> topWindow;
aWindow->GetScriptableTop(getter_AddRefs(topWindow));
mWindow = do_QueryInterface(topWindow);
@ -191,3 +196,18 @@ AudioChannelAgent::WindowID() const
{
return mWindow ? mWindow->WindowID() : 0;
}
void
AudioChannelAgent::WindowAudioCaptureChanged(uint64_t aInnerWindowID)
{
if (aInnerWindowID != mInnerWindowID) {
return;
}
nsCOMPtr<nsIAudioChannelAgentCallback> callback = GetCallback();
if (!callback) {
return;
}
callback->WindowAudioCaptureChanged();
}

View File

@ -34,6 +34,7 @@ public:
AudioChannelAgent();
void WindowVolumeChanged();
void WindowAudioCaptureChanged(uint64_t aInnerWindowID);
nsPIDOMWindow* Window() const
{
@ -61,6 +62,7 @@ private:
nsWeakPtr mWeakCallback;
int32_t mAudioChannelType;
uint64_t mInnerWindowID;
bool mIsRegToService;
};

View File

@ -546,6 +546,38 @@ AudioChannelService::RefreshAgentsVolume(nsPIDOMWindow* aWindow)
}
}
void
AudioChannelService::RefreshAgentsCapture(nsPIDOMWindow* aWindow,
uint64_t aInnerWindowID)
{
MOZ_ASSERT(aWindow);
MOZ_ASSERT(aWindow->IsOuterWindow());
nsCOMPtr<nsIDOMWindow> topWindow;
aWindow->GetScriptableTop(getter_AddRefs(topWindow));
nsCOMPtr<nsPIDOMWindow> pTopWindow = do_QueryInterface(topWindow);
if (!pTopWindow) {
return;
}
AudioChannelWindow* winData = GetWindowData(pTopWindow->WindowID());
// This can happen, but only during shutdown, because the the outer window
// changes ScriptableTop, so that its ID is different.
// In this case either we are capturing, and it's too late because the window
// has been closed anyways, or we are un-capturing, and everything has already
// been cleaned up by the HTMLMediaElements or the AudioContexts.
if (!winData) {
return;
}
nsTObserverArray<AudioChannelAgent*>::ForwardIterator
iter(winData->mAgents);
while (iter.HasMore()) {
iter.GetNext()->WindowAudioCaptureChanged(aInnerWindowID);
}
}
/* static */ const nsAttrValue::EnumTable*
AudioChannelService::GetAudioChannelTable()
{

View File

@ -102,6 +102,14 @@ public:
void RefreshAgentsVolume(nsPIDOMWindow* aWindow);
// This method needs to know the inner window that wants to capture audio. We
// group agents per top outer window, but we can have multiple innerWindow per
// top outerWindow (subiframes, etc.) and we have to identify all the agents
// just for a particular innerWindow.
void RefreshAgentsCapture(nsPIDOMWindow* aWindow,
uint64_t aInnerWindowID);
#ifdef MOZ_WIDGET_GONK
void RegisterSpeakerManager(SpeakerManagerService* aSpeakerManager)
{

View File

@ -6,13 +6,18 @@
interface nsIDOMWindow;
[uuid(4f537c88-3722-4946-9a09-ce559fa0591d)]
[uuid(5fe83b24-38b9-4901-a4a1-d1bd57d3fe18)]
interface nsIAudioChannelAgentCallback : nsISupports
{
/**
* Notified when the window volume/mute is changed
*/
void windowVolumeChanged(in float aVolume, in bool aMuted);
/**
* Notified when the capture state is changed.
*/
void windowAudioCaptureChanged();
};
/**

View File

@ -10,4 +10,4 @@
* designed to be used as input to the C preprocessor *only*.
*/
DOCUMENT_WARNING(WillChangeBudget)
DOCUMENT_WARNING(WillChangeOverBudgetIgnored)

View File

@ -564,7 +564,7 @@ nsPIDOMWindow::nsPIDOMWindow(nsPIDOMWindow *aOuterWindow)
mMayHavePointerEnterLeaveEventListener(false),
mIsModalContentWindow(false),
mIsActive(false), mIsBackground(false),
mAudioMuted(false), mAudioVolume(1.0),
mAudioMuted(false), mAudioVolume(1.0), mAudioCaptured(false),
mDesktopModeViewport(false), mInnerWindow(nullptr),
mOuterWindow(aOuterWindow),
// Make sure no actual window ends up with mWindowID == 0
@ -3745,6 +3745,26 @@ nsPIDOMWindow::RefreshMediaElements()
service->RefreshAgentsVolume(GetOuterWindow());
}
bool
nsPIDOMWindow::GetAudioCaptured() const
{
MOZ_ASSERT(IsInnerWindow());
return mAudioCaptured;
}
nsresult
nsPIDOMWindow::SetAudioCapture(bool aCapture)
{
MOZ_ASSERT(IsInnerWindow());
mAudioCaptured = aCapture;
nsRefPtr<AudioChannelService> service = AudioChannelService::GetOrCreate();
service->RefreshAgentsCapture(GetOuterWindow(), mWindowID);
return NS_OK;
}
// nsISpeechSynthesisGetter
#ifdef MOZ_WEBSPEECH
@ -5533,7 +5553,7 @@ nsGlobalWindow::RequestAnimationFrame(JS::Handle<JS::Value> aCallback,
JS::Rooted<JSObject*> callbackObj(cx, &aCallback.toObject());
nsRefPtr<FrameRequestCallback> callback =
new FrameRequestCallback(callbackObj, GetIncumbentGlobal());
new FrameRequestCallback(cx, callbackObj, GetIncumbentGlobal());
ErrorResult rv;
*aHandle = RequestAnimationFrame(*callback, rv);

View File

@ -185,6 +185,9 @@ public:
float GetAudioVolume() const;
nsresult SetAudioVolume(float aVolume);
bool GetAudioCaptured() const;
nsresult SetAudioCapture(bool aCapture);
virtual void SetServiceWorkersTestingEnabled(bool aEnabled)
{
MOZ_ASSERT(IsOuterWindow());
@ -822,6 +825,8 @@ protected:
bool mAudioMuted;
float mAudioVolume;
bool mAudioCaptured;
// current desktop mode flag.
bool mDesktopModeViewport;

View File

@ -248,6 +248,8 @@ support-files =
[test_anonymousContent_insert.html]
[test_anonymousContent_manipulate_content.html]
[test_appname_override.html]
[test_async_setTimeout_stack.html]
[test_async_setTimeout_stack_across_globals.html]
[test_audioWindowUtils.html]
[test_audioNotification.html]
skip-if = buildapp == 'mulet'

View File

@ -0,0 +1,60 @@
<!DOCTYPE HTML>
<html>
<!--
https://bugzilla.mozilla.org/show_bug.cgi?id=1142577
-->
<head>
<meta charset="utf-8">
<title>Test for Bug 1142577 - Async stacks for setTimeout</title>
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
</head>
<body>
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1142577">Mozilla Bug 1142577</a>
<pre id="stack"></pre>
<script type="application/javascript">
SimpleTest.waitForExplicitFinish();
SimpleTest.requestFlakyTimeout("Testing async stacks across setTimeout");
function getFunctionName(frame) {
return frame.slice(0, frame.indexOf("@"));
}
function a() { b() }
function b() { c() }
function c() { setTimeout(d, 1) }
function d() { e() }
function e() { f() }
function f() { setTimeout(g, 1) }
function g() { h() }
function h() { i() }
function i() {
var stackString = Error().stack;
document.getElementById("stack").textContent = stackString;
var frames = stackString
.split("\n")
.map(getFunctionName)
.filter(function (name) { return !!name; });
is(frames[0], "i");
is(frames[1], "h");
is(frames[2], "g");
is(frames[3], "setTimeout handler*SimpleTest_setTimeoutShim");
is(frames[4], "f");
is(frames[5], "e");
is(frames[6], "d");
is(frames[7], "setTimeout handler*SimpleTest_setTimeoutShim");
is(frames[8], "c");
is(frames[9], "b");
is(frames[10], "a");
SimpleTest.finish();
}
SpecialPowers.pushPrefEnv(
{"set": [['javascript.options.asyncstack', true]]},
a);
</script>
</body>
</html>

View File

@ -0,0 +1,60 @@
<!DOCTYPE HTML>
<html>
<!--
https://bugzilla.mozilla.org/show_bug.cgi?id=1142577
-->
<head>
<meta charset="utf-8">
<title>Test for Bug 1142577 - Async stacks for setTimeout</title>
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
</head>
<body>
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1142577">Mozilla Bug 1142577</a>
<pre id="stack"></pre>
<iframe id="iframe"></iframe>
<script type="application/javascript">
SimpleTest.waitForExplicitFinish();
var otherGlobal = document.getElementById("iframe").contentWindow;
function getFunctionName(frame) {
return frame.slice(0, frame.indexOf("@"));
}
function a() { b() }
function b() { c() }
function c() { otherGlobal.setTimeout(d, 1) }
function d() { e() }
function e() { f() }
function f() { otherGlobal.setTimeout(g, 1) }
function g() { h() }
function h() { i() }
function i() {
var stackString = Error().stack;
document.getElementById("stack").textContent = stackString;
var frames = stackString
.split("\n")
.map(getFunctionName)
.filter(function (name) { return !!name; });
is(frames[0], "i");
is(frames[1], "h");
is(frames[2], "g");
is(frames[3], "setTimeout handler*f");
is(frames[4], "e");
is(frames[5], "d");
is(frames[6], "setTimeout handler*c");
is(frames[7], "b");
is(frames[8], "a");
SimpleTest.finish();
}
SpecialPowers.pushPrefEnv(
{"set": [['javascript.options.asyncstack', true]]},
a);
</script>
</body>
</html>

View File

@ -25,9 +25,10 @@ namespace dom {
class CallbackFunction : public CallbackObject
{
public:
explicit CallbackFunction(JS::Handle<JSObject*> aCallable,
// See CallbackObject for an explanation of the arguments.
explicit CallbackFunction(JSContext* aCx, JS::Handle<JSObject*> aCallable,
nsIGlobalObject* aIncumbentGlobal)
: CallbackObject(aCallable, aIncumbentGlobal)
: CallbackObject(aCx, aCallable, aIncumbentGlobal)
{
}

View File

@ -24,9 +24,10 @@ namespace dom {
class CallbackInterface : public CallbackObject
{
public:
explicit CallbackInterface(JS::Handle<JSObject*> aCallback,
// See CallbackObject for an explanation of the arguments.
explicit CallbackInterface(JSContext* aCx, JS::Handle<JSObject*> aCallback,
nsIGlobalObject *aIncumbentGlobal)
: CallbackObject(aCallback, aIncumbentGlobal)
: CallbackObject(aCx, aCallback, aIncumbentGlobal)
{
}

View File

@ -43,6 +43,7 @@ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(CallbackObject)
NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END
NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(CallbackObject)
NS_IMPL_CYCLE_COLLECTION_TRACE_JS_MEMBER_CALLBACK(mCallback)
NS_IMPL_CYCLE_COLLECTION_TRACE_JS_MEMBER_CALLBACK(mCreationStack)
NS_IMPL_CYCLE_COLLECTION_TRACE_JS_MEMBER_CALLBACK(mIncumbentJSGlobal)
NS_IMPL_CYCLE_COLLECTION_TRACE_END
@ -169,6 +170,16 @@ CallbackObject::CallSetup::CallSetup(CallbackObject* aCallback,
}
}
mAsyncStack.emplace(cx, aCallback->GetCreationStack());
if (*mAsyncStack) {
mAsyncCause.emplace(cx, JS_NewStringCopyZ(cx, aExecutionReason));
if (*mAsyncCause) {
mAsyncStackSetter.emplace(cx, *mAsyncStack, *mAsyncCause);
} else {
JS_ClearPendingException(cx);
}
}
// Enter the compartment of our callback, so we can actually work with it.
//
// Note that if the callback is a wrapper, this will not be the same

View File

@ -30,6 +30,7 @@
#include "nsWrapperCache.h"
#include "nsJSEnvironment.h"
#include "xpcpublic.h"
#include "jsapi.h"
namespace mozilla {
namespace dom {
@ -49,10 +50,22 @@ public:
// The caller may pass a global object which will act as an override for the
// incumbent script settings object when the callback is invoked (overriding
// the entry point computed from aCallback). If no override is required, the
// caller should pass null.
explicit CallbackObject(JS::Handle<JSObject*> aCallback, nsIGlobalObject *aIncumbentGlobal)
// caller should pass null. |aCx| is used to capture the current
// stack, which is later used as an async parent when the callback
// is invoked. aCx can be nullptr, in which case no stack is
// captured.
explicit CallbackObject(JSContext* aCx, JS::Handle<JSObject*> aCallback,
nsIGlobalObject *aIncumbentGlobal)
{
Init(aCallback, aIncumbentGlobal);
if (aCx && JS::RuntimeOptionsRef(aCx).asyncStack()) {
JS::RootedObject stack(aCx);
if (!JS::CaptureCurrentStack(aCx, &stack)) {
JS_ClearPendingException(aCx);
}
Init(aCallback, stack, aIncumbentGlobal);
} else {
Init(aCallback, nullptr, aIncumbentGlobal);
}
}
JS::Handle<JSObject*> Callback() const
@ -61,6 +74,15 @@ public:
return CallbackPreserveColor();
}
JSObject* GetCreationStack() const
{
JSObject* result = mCreationStack;
if (result) {
JS::ExposeObjectToActiveJS(result);
}
return result;
}
/*
* This getter does not change the color of the JSObject meaning that the
* object returned is not guaranteed to be kept alive past the next CC.
@ -108,7 +130,8 @@ protected:
explicit CallbackObject(CallbackObject* aCallbackObject)
{
Init(aCallbackObject->mCallback, aCallbackObject->mIncumbentGlobal);
Init(aCallbackObject->mCallback, aCallbackObject->mCreationStack,
aCallbackObject->mIncumbentGlobal);
}
bool operator==(const CallbackObject& aOther) const
@ -121,12 +144,14 @@ protected:
}
private:
inline void Init(JSObject* aCallback, nsIGlobalObject* aIncumbentGlobal)
inline void Init(JSObject* aCallback, JSObject* aCreationStack,
nsIGlobalObject* aIncumbentGlobal)
{
MOZ_ASSERT(aCallback && !mCallback);
// Set script objects before we hold, on the off chance that a GC could
// somehow happen in there... (which would be pretty odd, granted).
mCallback = aCallback;
mCreationStack = aCreationStack;
if (aIncumbentGlobal) {
mIncumbentGlobal = aIncumbentGlobal;
mIncumbentJSGlobal = aIncumbentGlobal->GetGlobalJSObject();
@ -143,12 +168,14 @@ protected:
MOZ_ASSERT_IF(mIncumbentJSGlobal, mCallback);
if (mCallback) {
mCallback = nullptr;
mCreationStack = nullptr;
mIncumbentJSGlobal = nullptr;
mozilla::DropJSObjects(this);
}
}
JS::Heap<JSObject*> mCallback;
JS::Heap<JSObject*> mCreationStack;
// Ideally, we'd just hold a reference to the nsIGlobalObject, since that's
// what we need to pass to AutoIncumbentScript. Unfortunately, that doesn't
// hold the actual JS global alive. So we maintain an additional pointer to
@ -209,6 +236,11 @@ protected:
// always within a request during its lifetime.
Maybe<JS::Rooted<JSObject*> > mRootedCallable;
// Members which are used to set the async stack.
Maybe<JS::Rooted<JSObject*>> mAsyncStack;
Maybe<JS::Rooted<JSString*>> mAsyncCause;
Maybe<JS::AutoSetAsyncStackForNewCalls> mAsyncStackSetter;
// Can't construct a JSAutoCompartment without a JSContext either. Also,
// Put mAc after mAutoEntryScript so that we exit the compartment before
// we pop the JSContext. Though in practice we'll often manually order

View File

@ -3976,7 +3976,7 @@ class CGCallbackTempRoot(CGGeneric):
define = dedent("""
{ // Scope for tempRoot
JS::Rooted<JSObject*> tempRoot(cx, &${val}.toObject());
${declName} = new %s(tempRoot, mozilla::dom::GetIncumbentGlobal());
${declName} = new %s(cx, tempRoot, mozilla::dom::GetIncumbentGlobal());
}
""") % name
CGGeneric.__init__(self, define=define)
@ -13908,7 +13908,7 @@ class CGJSImplClass(CGBindingImplClass):
destructor = ClassDestructor(virtual=False, visibility="private")
baseConstructors = [
("mImpl(new %s(aJSImplObject, /* aIncumbentGlobal = */ nullptr))" %
("mImpl(new %s(nullptr, aJSImplObject, /* aIncumbentGlobal = */ nullptr))" %
jsImplName(descriptor.name)),
"mParent(aParent)"]
parentInterface = descriptor.interface.parent
@ -14053,13 +14053,14 @@ class CGCallback(CGClass):
# CallbackObject does that already.
body = ""
return [ClassConstructor(
[Argument("JS::Handle<JSObject*>", "aCallback"),
[Argument("JSContext*", "aCx"),
Argument("JS::Handle<JSObject*>", "aCallback"),
Argument("nsIGlobalObject*", "aIncumbentGlobal")],
bodyInHeader=True,
visibility="public",
explicit=True,
baseConstructors=[
"%s(aCallback, aIncumbentGlobal)" % self.baseName,
"%s(aCx, aCallback, aIncumbentGlobal)" % self.baseName,
],
body=body)]

View File

@ -8,6 +8,7 @@ support-files =
file_proxies_via_xray.html
forOf_iframe.html
[test_async_stacks.html]
[test_ByteString.html]
[test_InstanceOf.html]
[test_bug560072.html]

View File

@ -0,0 +1,108 @@
<!DOCTYPE HTML>
<html>
<!--
https://bugzilla.mozilla.org/show_bug.cgi?id=1148593
-->
<head>
<meta charset="utf-8">
<title>Test for Bug 1148593</title>
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
<script type="application/javascript">
/** Test for Bug 1148593 **/
SimpleTest.waitForExplicitFinish();
var TESTS;
function nextTest() {
var t = TESTS.pop();
if (t) {
t();
} else {
SimpleTest.finish();
}
}
function checkStack(functionName) {
try {
noSuchFunction();
} catch (e) {
ok(e.stack.indexOf(functionName) >= 0, "stack includes " + functionName);
}
nextTest();
}
function eventListener() {
checkStack("registerEventListener");
}
function registerEventListener(link) {
link.onload = eventListener;
}
function eventTest() {
var link = document.createElement("link");
link.rel = "stylesheet";
link.href = "data:text/css,";
registerEventListener(link);
document.body.appendChild(link);
}
function xhrListener() {
checkStack("xhrTest");
}
function xhrTest() {
var ourFile = location.href;
var x = new XMLHttpRequest();
x.onload = xhrListener;
x.open("get", ourFile, true);
x.send();
}
function rafListener() {
checkStack("rafTest");
}
function rafTest() {
requestAnimationFrame(rafListener);
}
var intervalId;
function intervalHandler() {
clearInterval(intervalId);
checkStack("intervalTest");
}
function intervalTest() {
intervalId = setInterval(intervalHandler, 5);
}
function postMessageHandler(ev) {
ev.stopPropagation();
checkStack("postMessageTest");
}
function postMessageTest() {
window.addEventListener("message", postMessageHandler, true);
window.postMessage("whatever", "*");
}
function runTests() {
TESTS = [postMessageTest, intervalTest, rafTest, xhrTest, eventTest];
nextTest();
}
addLoadEvent(function() {
SpecialPowers.pushPrefEnv(
{"set": [['javascript.options.asyncstack', true]]},
runTests);
});
</script>
</head>
<body>
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1148593">Mozilla Bug 1148593</a>
<p id="display"></p>
<div id="content" style="display: none">
</div>
<pre id="test">
</pre>
</body>
</html>

View File

@ -15,6 +15,17 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1107592
SimpleTest.waitForExplicitFinish();
function doTest() {
var file = location.href;
var asyncFrame;
/* Async parent frames from pushPrefEnv don't show up in e10s. */
var isE10S = !SpecialPowers.Services.wm.getMostRecentWindow("navigator:browser");
if (!isE10S && SpecialPowers.getBoolPref("javascript.options.asyncstack")) {
asyncFrame = `Async*@${file}:153:1
`;
} else {
asyncFrame = "";
}
var t = new TestInterfaceJS();
try {
t.testThrowError();
@ -25,12 +36,13 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1107592
is(e.name, "Error", "Should not have an interesting name here");
is(e.message, "We are an Error", "Should have the right message");
is(e.stack,
"doTest@http://mochi.test:8888/tests/dom/bindings/test/test_exception_options_from_jsimplemented.html:20:7\n",
`doTest@${file}:31:7
${asyncFrame}`,
"Exception stack should still only show our code");
is(e.fileName,
"http://mochi.test:8888/tests/dom/bindings/test/test_exception_options_from_jsimplemented.html",
file,
"Should have the right file name");
is(e.lineNumber, 20, "Should have the right line number");
is(e.lineNumber, 31, "Should have the right line number");
is(e.columnNumber, 7, "Should have the right column number");
}
@ -45,12 +57,13 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1107592
is(e.code, DOMException.NOT_SUPPORTED_ERR,
"Should have the right 'code'");
is(e.stack,
"doTest@http://mochi.test:8888/tests/dom/bindings/test/test_exception_options_from_jsimplemented.html:38:7\n",
`doTest@${file}:50:7
${asyncFrame}`,
"Exception stack should still only show our code");
is(e.filename,
"http://mochi.test:8888/tests/dom/bindings/test/test_exception_options_from_jsimplemented.html",
file,
"Should still have the right file name");
is(e.lineNumber, 38, "Should still have the right line number");
is(e.lineNumber, 50, "Should still have the right line number");
todo_isnot(e.columnNumber, 0,
"No column number support for DOMException yet");
}
@ -65,12 +78,13 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1107592
is(e.message, "We are a TypeError",
"Should also have the right message (2)");
is(e.stack,
"doTest@http://mochi.test:8888/tests/dom/bindings/test/test_exception_options_from_jsimplemented.html:59:7\n",
`doTest@${file}:72:7
${asyncFrame}`,
"Exception stack for TypeError should only show our code");
is(e.fileName,
"http://mochi.test:8888/tests/dom/bindings/test/test_exception_options_from_jsimplemented.html",
file,
"Should still have the right file name for TypeError");
is(e.lineNumber, 59, "Should still have the right line number for TypeError");
is(e.lineNumber, 72, "Should still have the right line number for TypeError");
is(e.columnNumber, 7, "Should have the right column number for TypeError");
}
@ -84,14 +98,14 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1107592
is(e.message, "missing argument 0 when calling function Array.indexOf",
"Should also have the right message (3)");
is(e.stack,
"doTest/<@http://mochi.test:8888/tests/dom/bindings/test/test_exception_options_from_jsimplemented.html:78:45\n" +
"doTest@http://mochi.test:8888/tests/dom/bindings/test/test_exception_options_from_jsimplemented.html:78:7\n"
,
`doTest/<@${file}:92:45
doTest@${file}:92:7
${asyncFrame}`,
"Exception stack for TypeError should only show our code (3)");
is(e.fileName,
"http://mochi.test:8888/tests/dom/bindings/test/test_exception_options_from_jsimplemented.html",
file,
"Should still have the right file name for TypeError (3)");
is(e.lineNumber, 78, "Should still have the right line number for TypeError (3)");
is(e.lineNumber, 92, "Should still have the right line number for TypeError (3)");
is(e.columnNumber, 45, "Should have the right column number for TypeError (3)");
}
@ -104,12 +118,13 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1107592
is(e.name, "NS_ERROR_UNEXPECTED", "Name should be sanitized (4)");
is(e.message, "", "Message should be sanitized (5)");
is(e.stack,
"doTest@http://mochi.test:8888/tests/dom/bindings/test/test_exception_options_from_jsimplemented.html:99:7\n",
`doTest@${file}:113:7
${asyncFrame}`,
"Exception stack for sanitized exception should only show our code (4)");
is(e.filename,
"http://mochi.test:8888/tests/dom/bindings/test/test_exception_options_from_jsimplemented.html",
file,
"Should still have the right file name for sanitized exception (4)");
is(e.lineNumber, 99, "Should still have the right line number for sanitized exception (4)");
is(e.lineNumber, 113, "Should still have the right line number for sanitized exception (4)");
todo_isnot(e.columnNumber, 0, "Should have the right column number for sanitized exception (4)");
}
@ -122,12 +137,13 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1107592
is(e.name, "NS_ERROR_UNEXPECTED", "Name should be sanitized (5)");
is(e.message, "", "Message should be sanitized (5)");
is(e.stack,
"doTest@http://mochi.test:8888/tests/dom/bindings/test/test_exception_options_from_jsimplemented.html:117:7\n",
`doTest@${file}:132:7
${asyncFrame}`,
"Exception stack for sanitized exception should only show our code (5)");
is(e.filename,
"http://mochi.test:8888/tests/dom/bindings/test/test_exception_options_from_jsimplemented.html",
file,
"Should still have the right file name for sanitized exception (5)");
is(e.lineNumber, 117, "Should still have the right line number for sanitized exception (5)");
is(e.lineNumber, 132, "Should still have the right line number for sanitized exception (5)");
todo_isnot(e.columnNumber, 0, "Should have the right column number for sanitized exception (5)");
}

View File

@ -37,23 +37,32 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1107592
function doTest() {
var t = new TestInterfaceJS();
/* Async parent frames from pushPrefEnv don't show up in e10s. */
var isE10S = !SpecialPowers.Services.wm.getMostRecentWindow("navigator:browser");
var asyncStack = SpecialPowers.getBoolPref("javascript.options.asyncstack");
var ourFile = "http://mochi.test:8888/tests/dom/bindings/test/test_promise_rejections_from_jsimplemented.html";
var ourFile = location.href;
var parentFrame = (asyncStack && !isE10S) ? `Async*@${ourFile}:121:1
` : "";
Promise.all([
t.testPromiseWithThrowingChromePromiseInit().then(
ensurePromiseFail.bind(null, 1),
checkExn.bind(null, 44, "NS_ERROR_UNEXPECTED", "", undefined,
checkExn.bind(null, 48, "NS_ERROR_UNEXPECTED", "", undefined,
ourFile, 1,
"doTest@http://mochi.test:8888/tests/dom/bindings/test/test_promise_rejections_from_jsimplemented.html:44:7\n")),
`doTest@${ourFile}:48:7
` +
parentFrame)),
t.testPromiseWithThrowingContentPromiseInit(function() {
thereIsNoSuchContentFunction1();
}).then(
ensurePromiseFail.bind(null, 2),
checkExn.bind(null, 50, "ReferenceError",
checkExn.bind(null, 56, "ReferenceError",
"thereIsNoSuchContentFunction1 is not defined",
undefined, ourFile, 2,
"doTest/<@http://mochi.test:8888/tests/dom/bindings/test/test_promise_rejections_from_jsimplemented.html:50:11\ndoTest@http://mochi.test:8888/tests/dom/bindings/test/test_promise_rejections_from_jsimplemented.html:49:7\n")),
`doTest/<@${ourFile}:56:11
doTest@${ourFile}:55:7
` +
parentFrame)),
t.testPromiseWithThrowingChromeThenFunction().then(
ensurePromiseFail.bind(null, 3),
checkExn.bind(null, 0, "NS_ERROR_UNEXPECTED", "", undefined, "", 3, "")),
@ -61,10 +70,14 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1107592
thereIsNoSuchContentFunction2();
}).then(
ensurePromiseFail.bind(null, 4),
checkExn.bind(null, 61, "ReferenceError",
checkExn.bind(null, 70, "ReferenceError",
"thereIsNoSuchContentFunction2 is not defined",
undefined, ourFile, 4,
"doTest/<@http://mochi.test:8888/tests/dom/bindings/test/test_promise_rejections_from_jsimplemented.html:61:11\n" + (asyncStack ? "Async*doTest@http://mochi.test:8888/tests/dom/bindings/test/test_promise_rejections_from_jsimplemented.html:60:7\n" : ""))),
`doTest/<@${ourFile}:70:11
` +
(asyncStack ? `Async*doTest@${ourFile}:69:7
` : "") +
parentFrame)),
t.testPromiseWithThrowingChromeThenable().then(
ensurePromiseFail.bind(null, 5),
checkExn.bind(null, 0, "NS_ERROR_UNEXPECTED", "", undefined, "", 5, "")),
@ -72,22 +85,27 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=1107592
then: function() { thereIsNoSuchContentFunction3(); }
}).then(
ensurePromiseFail.bind(null, 6),
checkExn.bind(null, 72, "ReferenceError",
checkExn.bind(null, 85, "ReferenceError",
"thereIsNoSuchContentFunction3 is not defined",
undefined, ourFile, 6,
"doTest/<.then@http://mochi.test:8888/tests/dom/bindings/test/test_promise_rejections_from_jsimplemented.html:72:32\n")),
`doTest/<.then@${ourFile}:85:32
`)),
t.testPromiseWithDOMExceptionThrowingPromiseInit().then(
ensurePromiseFail.bind(null, 7),
checkExn.bind(null, 79, "NotFoundError",
checkExn.bind(null, 93, "NotFoundError",
"We are a second DOMException",
DOMException.NOT_FOUND_ERR, ourFile, 7,
"doTest@http://mochi.test:8888/tests/dom/bindings/test/test_promise_rejections_from_jsimplemented.html:79:7\n")),
`doTest@${ourFile}:93:7
` +
parentFrame)),
t.testPromiseWithDOMExceptionThrowingThenFunction().then(
ensurePromiseFail.bind(null, 8),
checkExn.bind(null, asyncStack ? 85 : 0, "NetworkError",
checkExn.bind(null, asyncStack ? 101 : 0, "NetworkError",
"We are a third DOMException",
DOMException.NETWORK_ERR, asyncStack ? ourFile : "", 8,
asyncStack ? "Async*doTest@http://mochi.test:8888/tests/dom/bindings/test/test_promise_rejections_from_jsimplemented.html:85:7\n" : "")),
(asyncStack ? `Async*doTest@${ourFile}:101:7
` +
parentFrame : ""))),
t.testPromiseWithDOMExceptionThrowingThenable().then(
ensurePromiseFail.bind(null, 9),
checkExn.bind(null, 0, "TypeMismatchError",

View File

@ -287,7 +287,7 @@ DOMEventTargetHelper::SetEventHandler(nsIAtom* aType,
nsRefPtr<EventHandlerNonNull> handler;
JS::Rooted<JSObject*> callable(aCx);
if (aValue.isObject() && JS::IsCallable(callable = &aValue.toObject())) {
handler = new EventHandlerNonNull(callable, dom::GetIncumbentGlobal());
handler = new EventHandlerNonNull(aCx, callable, dom::GetIncumbentGlobal());
}
SetEventHandler(aType, EmptyString(), handler);
return NS_OK;

View File

@ -953,15 +953,15 @@ EventListenerManager::CompileEventHandlerInternal(Listener* aListener,
if (jsEventHandler->EventName() == nsGkAtoms::onerror && win) {
nsRefPtr<OnErrorEventHandlerNonNull> handlerCallback =
new OnErrorEventHandlerNonNull(handler, /* aIncumbentGlobal = */ nullptr);
new OnErrorEventHandlerNonNull(nullptr, handler, /* aIncumbentGlobal = */ nullptr);
jsEventHandler->SetHandler(handlerCallback);
} else if (jsEventHandler->EventName() == nsGkAtoms::onbeforeunload && win) {
nsRefPtr<OnBeforeUnloadEventHandlerNonNull> handlerCallback =
new OnBeforeUnloadEventHandlerNonNull(handler, /* aIncumbentGlobal = */ nullptr);
new OnBeforeUnloadEventHandlerNonNull(nullptr, handler, /* aIncumbentGlobal = */ nullptr);
jsEventHandler->SetHandler(handlerCallback);
} else {
nsRefPtr<EventHandlerNonNull> handlerCallback =
new EventHandlerNonNull(handler, /* aIncumbentGlobal = */ nullptr);
new EventHandlerNonNull(nullptr, handler, /* aIncumbentGlobal = */ nullptr);
jsEventHandler->SetHandler(handlerCallback);
}

View File

@ -52,7 +52,8 @@ function isEnabledMiddleClickPaste()
function isEnabledTouchCaret()
{
try {
return SpecialPowers.getBoolPref("touchcaret.enabled");
return SpecialPowers.getBoolPref("touchcaret.enabled") ||
SpecialPowers.getBoolPref("layout.accessiblecaret.enabled");
} catch (e) {
return false;
}

View File

@ -471,6 +471,12 @@ FMRadio::WindowVolumeChanged(float aVolume, bool aMuted)
return NS_OK;
}
NS_IMETHODIMP
FMRadio::WindowAudioCaptureChanged()
{
return NS_OK;
}
NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(FMRadio)
NS_INTERFACE_MAP_ENTRY(nsISupportsWeakReference)
NS_INTERFACE_MAP_ENTRY(nsIAudioChannelAgentCallback)

View File

@ -2030,6 +2030,7 @@ HTMLMediaElement::HTMLMediaElement(already_AddRefed<mozilla::dom::NodeInfo>& aNo
mAllowCasting(false),
mIsCasting(false),
mAudioCaptured(false),
mAudioCapturedByWindow(false),
mPlayingBeforeSeek(false),
mPlayingThroughTheAudioChannelBeforeSeek(false),
mPausedForInactiveDocumentOrChannel(false),
@ -2097,6 +2098,11 @@ HTMLMediaElement::~HTMLMediaElement()
EndSrcMediaStreamPlayback();
}
if (mCaptureStreamPort) {
mCaptureStreamPort->Destroy();
mCaptureStreamPort = nullptr;
}
NS_ASSERTION(MediaElementTableCount(this, mLoadingSrc) == 0,
"Destroyed media element should no longer be in element table");
@ -4475,8 +4481,7 @@ void HTMLMediaElement::UpdateAudioChannelPlayingState()
(!mPaused &&
(HasAttr(kNameSpaceID_None, nsGkAtoms::loop) ||
(mReadyState >= nsIDOMHTMLMediaElement::HAVE_CURRENT_DATA &&
!IsPlaybackEnded() &&
(!mSrcStream || HasAudio())) ||
!IsPlaybackEnded()) ||
mPlayingThroughTheAudioChannelBeforeSeek));
if (playingThroughTheAudioChannel != mPlayingThroughTheAudioChannel) {
mPlayingThroughTheAudioChannel = playingThroughTheAudioChannel;
@ -4492,7 +4497,7 @@ void HTMLMediaElement::UpdateAudioChannelPlayingState()
if (!mAudioChannelAgent) {
return;
}
mAudioChannelAgent->InitWithWeakCallback(OwnerDoc()->GetWindow(),
mAudioChannelAgent->InitWithWeakCallback(OwnerDoc()->GetInnerWindow(),
static_cast<int32_t>(mAudioChannel),
this);
}
@ -4504,6 +4509,10 @@ void HTMLMediaElement::UpdateAudioChannelPlayingState()
void
HTMLMediaElement::NotifyAudioChannelAgent(bool aPlaying)
{
// Immediately check if this should go to the MSG instead of the normal
// media playback route.
WindowAudioCaptureChanged();
// This is needed to pass nsContentUtils::IsCallerChrome().
// AudioChannel API should not called from content but it can happen that
// this method has some content JS in its stack.
@ -4675,6 +4684,53 @@ HTMLMediaElement::GetTopLevelPrincipal()
}
#endif // MOZ_EME
NS_IMETHODIMP HTMLMediaElement::WindowAudioCaptureChanged()
{
MOZ_ASSERT(mAudioChannelAgent);
if (!OwnerDoc()->GetInnerWindow()) {
return NS_OK;
}
bool captured = OwnerDoc()->GetInnerWindow()->GetAudioCaptured();
if (captured != mAudioCapturedByWindow) {
if (captured) {
mAudioCapturedByWindow = true;
nsCOMPtr<nsPIDOMWindow> window =
do_QueryInterface(OwnerDoc()->GetParentObject());
uint64_t id = window->WindowID();
MediaStreamGraph* msg = MediaStreamGraph::GetInstance();
if (!mPlaybackStream) {
nsRefPtr<DOMMediaStream> stream = CaptureStreamInternal(false, msg);
mCaptureStreamPort = msg->ConnectToCaptureStream(id, stream->GetStream());
} else {
mCaptureStreamPort = msg->ConnectToCaptureStream(id, mPlaybackStream->GetStream());
}
} else {
mAudioCapturedByWindow = false;
if (mDecoder) {
ProcessedMediaStream* ps =
mCaptureStreamPort->GetSource()->AsProcessedStream();
MOZ_ASSERT(ps);
for (uint32_t i = 0; i < mOutputStreams.Length(); i++) {
if (mOutputStreams[i].mStream->GetStream() == ps) {
mOutputStreams.RemoveElementAt(i);
break;
}
}
mDecoder->RemoveOutputStream(ps);
}
mCaptureStreamPort->Destroy();
mCaptureStreamPort = nullptr;
}
}
return NS_OK;
}
AudioTrackList*
HTMLMediaElement::AudioTracks()
{

View File

@ -1074,6 +1074,9 @@ protected:
// Holds a reference to a MediaInputPort connecting mSrcStream to mPlaybackStream.
nsRefPtr<MediaInputPort> mPlaybackStreamInputPort;
// Holds a reference to the stream connecting this stream to the capture sink.
nsRefPtr<MediaInputPort> mCaptureStreamPort;
// Holds a reference to a stream with mSrcStream as input but intended for
// playback. Used so we don't block playback of other video elements
// playing the same mSrcStream.
@ -1283,6 +1286,9 @@ protected:
// True if the sound is being captured.
bool mAudioCaptured;
// True if the sound is being captured by the window.
bool mAudioCapturedByWindow;
// If TRUE then the media element was actively playing before the currently
// in progress seeking. If FALSE then the media element is either not seeking
// or was not actively playing before the current seek. Used to decide whether

View File

@ -155,8 +155,8 @@ ImportXULIntoContentWarning=Importing XUL nodes into a content document is depre
XMLDocumentLoadPrincipalMismatch=Use of document.load forbidden on Documents that come from other Windows. Only the Window in which a Document was created is allowed to call .load on that Document. Preferably, use XMLHttpRequest instead.
# LOCALIZATION NOTE: Do not translate "IndexedDB".
IndexedDBTransactionAbortNavigation=An IndexedDB transaction that was not yet complete has been aborted due to page navigation.
# LOCALIZATION NOTE (WillChangeBudgetWarning): Do not translate Will-change, %1$S,%2$S,%3$S are numbers.
WillChangeBudgetWarning=Will-change memory consumption is too high. Surface area covers %1$S pixels, budget is the document surface area multiplied by %2$S (%3$S pixels). Occurences of will-change over the budget will be ignored.
# LOCALIZATION NOTE: Do not translate Will-change, %1$S,%2$S,%3$S are numbers.
WillChangeOverBudgetIgnoredWarning=Will-change memory consumption is too high. Surface area covers %1$S px, budget is the document surface area multiplied by %2$S (%3$S px). Occurences of will-change over the budget will be ignored.
# LOCALIZATION NOTE: Do not translate "ServiceWorker".
HittingMaxWorkersPerDomain=A ServiceWorker could not be started immediately because other documents in the same origin are already using the maximum number of workers. The ServiceWorker is now queued and will be started after some of the other workers have completed.
# LOCALIZATION NOTE: Do no translate "setVelocity", "PannerNode", "AudioListener", "speedOfSound" and "dopplerFactor"

View File

@ -0,0 +1,133 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "MediaStreamGraphImpl.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/unused.h"
#include "AudioSegment.h"
#include "mozilla/Logging.h"
#include "mozilla/Attributes.h"
#include "AudioCaptureStream.h"
#include "ImageContainer.h"
#include "AudioNodeEngine.h"
#include "AudioNodeStream.h"
#include "AudioNodeExternalInputStream.h"
#include "webaudio/MediaStreamAudioDestinationNode.h"
#include <algorithm>
#include "DOMMediaStream.h"
using namespace mozilla::layers;
using namespace mozilla::dom;
using namespace mozilla::gfx;
namespace mozilla
{
// We are mixing to mono until PeerConnection can accept stereo
static const uint32_t MONO = 1;
AudioCaptureStream::AudioCaptureStream(DOMMediaStream* aWrapper)
: ProcessedMediaStream(aWrapper), mTrackCreated(false)
{
MOZ_ASSERT(NS_IsMainThread());
MOZ_COUNT_CTOR(AudioCaptureStream);
mMixer.AddCallback(this);
}
AudioCaptureStream::~AudioCaptureStream()
{
MOZ_COUNT_DTOR(AudioCaptureStream);
mMixer.RemoveCallback(this);
}
void
AudioCaptureStream::ProcessInput(GraphTime aFrom, GraphTime aTo,
uint32_t aFlags)
{
uint32_t inputCount = mInputs.Length();
StreamBuffer::Track* track = EnsureTrack(AUDIO_TRACK);
// Notify the DOM everything is in order.
if (!mTrackCreated) {
for (uint32_t i = 0; i < mListeners.Length(); i++) {
MediaStreamListener* l = mListeners[i];
AudioSegment tmp;
l->NotifyQueuedTrackChanges(
Graph(), AUDIO_TRACK, 0, MediaStreamListener::TRACK_EVENT_CREATED, tmp);
l->NotifyFinishedTrackCreation(Graph());
}
mTrackCreated = true;
}
// If the captured stream is connected back to a object on the page (be it an
// HTMLMediaElement with a stream as source, or an AudioContext), a cycle
// situation occur. This can work if it's an AudioContext with at least one
// DelayNode, but the MSG will mute the whole cycle otherwise.
bool blocked = mFinished || mBlocked.GetAt(aFrom);
if (blocked || InMutedCycle() || inputCount == 0) {
track->Get<AudioSegment>()->AppendNullData(aTo - aFrom);
} else {
// We mix down all the tracks of all inputs, to a stereo track. Everything
// is {up,down}-mixed to stereo.
mMixer.StartMixing();
AudioSegment output;
for (uint32_t i = 0; i < inputCount; i++) {
MediaStream* s = mInputs[i]->GetSource();
StreamBuffer::TrackIter tracks(s->GetStreamBuffer(), MediaSegment::AUDIO);
while (!tracks.IsEnded()) {
AudioSegment* inputSegment = tracks->Get<AudioSegment>();
StreamTime inputStart = s->GraphTimeToStreamTime(aFrom);
StreamTime inputEnd = s->GraphTimeToStreamTime(aTo);
AudioSegment toMix;
toMix.AppendSlice(*inputSegment, inputStart, inputEnd);
// Care for streams blocked in the [aTo, aFrom] range.
if (inputEnd - inputStart < aTo - aFrom) {
toMix.AppendNullData((aTo - aFrom) - (inputEnd - inputStart));
}
toMix.Mix(mMixer, MONO, Graph()->GraphRate());
tracks.Next();
}
}
// This calls MixerCallback below
mMixer.FinishMixing();
}
// Regardless of the status of the input tracks, we go foward.
mBuffer.AdvanceKnownTracksTime(GraphTimeToStreamTime((aTo)));
}
void
AudioCaptureStream::MixerCallback(AudioDataValue* aMixedBuffer,
AudioSampleFormat aFormat, uint32_t aChannels,
uint32_t aFrames, uint32_t aSampleRate)
{
nsAutoTArray<nsTArray<AudioDataValue>, MONO> output;
nsAutoTArray<const AudioDataValue*, MONO> bufferPtrs;
output.SetLength(MONO);
bufferPtrs.SetLength(MONO);
uint32_t written = 0;
// We need to copy here, because the mixer will reuse the storage, we should
// not hold onto it. Buffers are in planar format.
for (uint32_t channel = 0; channel < aChannels; channel++) {
AudioDataValue* out = output[channel].AppendElements(aFrames);
PodCopy(out, aMixedBuffer + written, aFrames);
bufferPtrs[channel] = out;
written += aFrames;
}
AudioChunk chunk;
chunk.mBuffer = new mozilla::SharedChannelArrayBuffer<AudioDataValue>(&output);
chunk.mDuration = aFrames;
chunk.mBufferFormat = aFormat;
chunk.mVolume = 1.0f;
chunk.mChannelData.SetLength(MONO);
for (uint32_t channel = 0; channel < aChannels; channel++) {
chunk.mChannelData[channel] = bufferPtrs[channel];
}
// Now we have mixed data, simply append it to out track.
EnsureTrack(AUDIO_TRACK)->Get<AudioSegment>()->AppendAndConsumeChunk(&chunk);
}
}

View File

@ -0,0 +1,40 @@
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef MOZILLA_AUDIOCAPTURESTREAM_H_
#define MOZILLA_AUDIOCAPTURESTREAM_H_
#include "MediaStreamGraph.h"
#include "AudioMixer.h"
#include <algorithm>
namespace mozilla
{
class DOMMediaStream;
/**
* See MediaStreamGraph::CreateAudioCaptureStream.
*/
class AudioCaptureStream : public ProcessedMediaStream,
public MixerCallbackReceiver
{
public:
explicit AudioCaptureStream(DOMMediaStream* aWrapper);
virtual ~AudioCaptureStream();
void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override;
protected:
enum { AUDIO_TRACK = 1 };
void MixerCallback(AudioDataValue* aMixedBuffer, AudioSampleFormat aFormat,
uint32_t aChannels, uint32_t aFrames,
uint32_t aSampleRate) override;
AudioMixer mMixer;
bool mTrackCreated;
};
}
#endif /* MOZILLA_AUDIOCAPTURESTREAM_H_ */

View File

@ -4,26 +4,11 @@
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "AudioChannelFormat.h"
#include "nsTArray.h"
#include <algorithm>
namespace mozilla {
enum {
SURROUND_L,
SURROUND_R,
SURROUND_C,
SURROUND_LFE,
SURROUND_SL,
SURROUND_SR
};
static const uint32_t CUSTOM_CHANNEL_LAYOUTS = 6;
static const int IGNORE = CUSTOM_CHANNEL_LAYOUTS;
static const float IGNORE_F = 0.0f;
uint32_t
GetAudioChannelsSuperset(uint32_t aChannels1, uint32_t aChannels2)
{
@ -63,9 +48,6 @@ gUpMixMatrices[CUSTOM_CHANNEL_LAYOUTS*(CUSTOM_CHANNEL_LAYOUTS - 1)/2] =
{ { 0, 1, 2, 3, 4, IGNORE } }
};
static const int gMixingMatrixIndexByChannels[CUSTOM_CHANNEL_LAYOUTS - 1] =
{ 0, 5, 9, 12, 14 };
void
AudioChannelsUpMix(nsTArray<const void*>* aChannelArray,
uint32_t aOutputChannelCount,
@ -76,8 +58,8 @@ AudioChannelsUpMix(nsTArray<const void*>* aChannelArray,
GetAudioChannelsSuperset(aOutputChannelCount, inputChannelCount);
NS_ASSERTION(outputChannelCount > inputChannelCount,
"No up-mix needed");
NS_ASSERTION(inputChannelCount > 0, "Bad number of channels");
NS_ASSERTION(outputChannelCount > 0, "Bad number of channels");
MOZ_ASSERT(inputChannelCount > 0, "Bad number of channels");
MOZ_ASSERT(outputChannelCount > 0, "Bad number of channels");
aChannelArray->SetLength(outputChannelCount);
@ -108,94 +90,4 @@ AudioChannelsUpMix(nsTArray<const void*>* aChannelArray,
}
}
/**
* DownMixMatrix represents a conversion matrix efficiently by exploiting the
* fact that each input channel contributes to at most one output channel,
* except possibly for the C input channel in layouts that have one. Also,
* every input channel is multiplied by the same coefficient for every output
* channel it contributes to.
*/
struct DownMixMatrix {
// Every input channel c is copied to output channel mInputDestination[c]
// after multiplying by mInputCoefficient[c].
uint8_t mInputDestination[CUSTOM_CHANNEL_LAYOUTS];
// If not IGNORE, then the C channel is copied to this output channel after
// multiplying by its coefficient.
uint8_t mCExtraDestination;
float mInputCoefficient[CUSTOM_CHANNEL_LAYOUTS];
};
static const DownMixMatrix
gDownMixMatrices[CUSTOM_CHANNEL_LAYOUTS*(CUSTOM_CHANNEL_LAYOUTS - 1)/2] =
{
// Downmixes to mono
{ { 0, 0 }, IGNORE, { 0.5f, 0.5f } },
{ { 0, IGNORE, IGNORE }, IGNORE, { 1.0f, IGNORE_F, IGNORE_F } },
{ { 0, 0, 0, 0 }, IGNORE, { 0.25f, 0.25f, 0.25f, 0.25f } },
{ { 0, IGNORE, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, IGNORE_F, IGNORE_F, IGNORE_F, IGNORE_F } },
{ { 0, 0, 0, IGNORE, 0, 0 }, IGNORE, { 0.7071f, 0.7071f, 1.0f, IGNORE_F, 0.5f, 0.5f } },
// Downmixes to stereo
{ { 0, 1, IGNORE }, IGNORE, { 1.0f, 1.0f, IGNORE_F } },
{ { 0, 1, 0, 1 }, IGNORE, { 0.5f, 0.5f, 0.5f, 0.5f } },
{ { 0, 1, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F } },
{ { 0, 1, 0, IGNORE, 0, 1 }, 1, { 1.0f, 1.0f, 0.7071f, IGNORE_F, 0.7071f, 0.7071f } },
// Downmixes to 3-channel
{ { 0, 1, 2, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F } },
{ { 0, 1, 2, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F } },
{ { 0, 1, 2, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F } },
// Downmixes to quad
{ { 0, 1, 2, 3, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F } },
{ { 0, 1, 0, IGNORE, 2, 3 }, 1, { 1.0f, 1.0f, 0.7071f, IGNORE_F, 1.0f, 1.0f } },
// Downmixes to 5-channel
{ { 0, 1, 2, 3, 4, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F } }
};
void
AudioChannelsDownMix(const nsTArray<const void*>& aChannelArray,
float** aOutputChannels,
uint32_t aOutputChannelCount,
uint32_t aDuration)
{
uint32_t inputChannelCount = aChannelArray.Length();
const void* const* inputChannels = aChannelArray.Elements();
NS_ASSERTION(inputChannelCount > aOutputChannelCount, "Nothing to do");
if (inputChannelCount > 6) {
// Just drop the unknown channels.
for (uint32_t o = 0; o < aOutputChannelCount; ++o) {
memcpy(aOutputChannels[o], inputChannels[o], aDuration*sizeof(float));
}
return;
}
// Ignore unknown channels, they're just dropped.
inputChannelCount = std::min<uint32_t>(6, inputChannelCount);
const DownMixMatrix& m = gDownMixMatrices[
gMixingMatrixIndexByChannels[aOutputChannelCount - 1] +
inputChannelCount - aOutputChannelCount - 1];
// This is slow, but general. We can define custom code for special
// cases later.
for (uint32_t s = 0; s < aDuration; ++s) {
// Reserve an extra junk channel at the end for the cases where we
// want an input channel to contribute to nothing
float outputChannels[CUSTOM_CHANNEL_LAYOUTS + 1];
memset(outputChannels, 0, sizeof(float)*(CUSTOM_CHANNEL_LAYOUTS));
for (uint32_t c = 0; c < inputChannelCount; ++c) {
outputChannels[m.mInputDestination[c]] +=
m.mInputCoefficient[c]*(static_cast<const float*>(inputChannels[c]))[s];
}
// Utilize the fact that in every layout, C is the third channel.
if (m.mCExtraDestination != IGNORE) {
outputChannels[m.mCExtraDestination] +=
m.mInputCoefficient[SURROUND_C]*(static_cast<const float*>(inputChannels[SURROUND_C]))[s];
}
for (uint32_t c = 0; c < aOutputChannelCount; ++c) {
aOutputChannels[c][s] = outputChannels[c];
}
}
}
} // namespace mozilla

View File

@ -9,6 +9,8 @@
#include <stdint.h>
#include "nsTArrayForwardDeclare.h"
#include "AudioSampleFormat.h"
#include "nsTArray.h"
namespace mozilla {
@ -29,6 +31,26 @@ namespace mozilla {
* Only 1, 2, 4 and 6 are currently defined in Web Audio.
*/
enum {
SURROUND_L,
SURROUND_R,
SURROUND_C,
SURROUND_LFE,
SURROUND_SL,
SURROUND_SR
};
const uint32_t CUSTOM_CHANNEL_LAYOUTS = 6;
// This is defined by some Windows SDK header.
#undef IGNORE
const int IGNORE = CUSTOM_CHANNEL_LAYOUTS;
const float IGNORE_F = 0.0f;
const int gMixingMatrixIndexByChannels[CUSTOM_CHANNEL_LAYOUTS - 1] =
{ 0, 5, 9, 12, 14 };
/**
* Return a channel count whose channel layout includes all the channels from
* aChannels1 and aChannels2.
@ -53,19 +75,102 @@ AudioChannelsUpMix(nsTArray<const void*>* aChannelArray,
uint32_t aOutputChannelCount,
const void* aZeroChannel);
/**
* Given an array of input channels (which must be float format!),
* downmix to aOutputChannelCount, and copy the results to the
* channel buffers in aOutputChannels.
* Don't call this with input count <= output count.
*/
void
AudioChannelsDownMix(const nsTArray<const void*>& aChannelArray,
float** aOutputChannels,
uint32_t aOutputChannelCount,
uint32_t aDuration);
// A version of AudioChannelsDownMix that downmixes int16_ts may be required.
/**
* DownMixMatrix represents a conversion matrix efficiently by exploiting the
* fact that each input channel contributes to at most one output channel,
* except possibly for the C input channel in layouts that have one. Also,
* every input channel is multiplied by the same coefficient for every output
* channel it contributes to.
*/
struct DownMixMatrix {
// Every input channel c is copied to output channel mInputDestination[c]
// after multiplying by mInputCoefficient[c].
uint8_t mInputDestination[CUSTOM_CHANNEL_LAYOUTS];
// If not IGNORE, then the C channel is copied to this output channel after
// multiplying by its coefficient.
uint8_t mCExtraDestination;
float mInputCoefficient[CUSTOM_CHANNEL_LAYOUTS];
};
static const DownMixMatrix
gDownMixMatrices[CUSTOM_CHANNEL_LAYOUTS*(CUSTOM_CHANNEL_LAYOUTS - 1)/2] =
{
// Downmixes to mono
{ { 0, 0 }, IGNORE, { 0.5f, 0.5f } },
{ { 0, IGNORE, IGNORE }, IGNORE, { 1.0f, IGNORE_F, IGNORE_F } },
{ { 0, 0, 0, 0 }, IGNORE, { 0.25f, 0.25f, 0.25f, 0.25f } },
{ { 0, IGNORE, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, IGNORE_F, IGNORE_F, IGNORE_F, IGNORE_F } },
{ { 0, 0, 0, IGNORE, 0, 0 }, IGNORE, { 0.7071f, 0.7071f, 1.0f, IGNORE_F, 0.5f, 0.5f } },
// Downmixes to stereo
{ { 0, 1, IGNORE }, IGNORE, { 1.0f, 1.0f, IGNORE_F } },
{ { 0, 1, 0, 1 }, IGNORE, { 0.5f, 0.5f, 0.5f, 0.5f } },
{ { 0, 1, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F } },
{ { 0, 1, 0, IGNORE, 0, 1 }, 1, { 1.0f, 1.0f, 0.7071f, IGNORE_F, 0.7071f, 0.7071f } },
// Downmixes to 3-channel
{ { 0, 1, 2, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F } },
{ { 0, 1, 2, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F } },
{ { 0, 1, 2, IGNORE, IGNORE, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, IGNORE_F, IGNORE_F, IGNORE_F } },
// Downmixes to quad
{ { 0, 1, 2, 3, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F } },
{ { 0, 1, 0, IGNORE, 2, 3 }, 1, { 1.0f, 1.0f, 0.7071f, IGNORE_F, 1.0f, 1.0f } },
// Downmixes to 5-channel
{ { 0, 1, 2, 3, 4, IGNORE }, IGNORE, { 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, IGNORE_F } }
};
/**
* Given an array of input channels, downmix to aOutputChannelCount, and copy
* the results to the channel buffers in aOutputChannels. Don't call this with
* input count <= output count.
*/
template<typename T>
void AudioChannelsDownMix(const nsTArray<const void*>& aChannelArray,
T** aOutputChannels,
uint32_t aOutputChannelCount,
uint32_t aDuration)
{
uint32_t inputChannelCount = aChannelArray.Length();
const void* const* inputChannels = aChannelArray.Elements();
NS_ASSERTION(inputChannelCount > aOutputChannelCount, "Nothing to do");
if (inputChannelCount > 6) {
// Just drop the unknown channels.
for (uint32_t o = 0; o < aOutputChannelCount; ++o) {
memcpy(aOutputChannels[o], inputChannels[o], aDuration*sizeof(T));
}
return;
}
// Ignore unknown channels, they're just dropped.
inputChannelCount = std::min<uint32_t>(6, inputChannelCount);
const DownMixMatrix& m = gDownMixMatrices[
gMixingMatrixIndexByChannels[aOutputChannelCount - 1] +
inputChannelCount - aOutputChannelCount - 1];
// This is slow, but general. We can define custom code for special
// cases later.
for (uint32_t s = 0; s < aDuration; ++s) {
// Reserve an extra junk channel at the end for the cases where we
// want an input channel to contribute to nothing
T outputChannels[CUSTOM_CHANNEL_LAYOUTS + 1];
memset(outputChannels, 0, sizeof(T)*(CUSTOM_CHANNEL_LAYOUTS));
for (uint32_t c = 0; c < inputChannelCount; ++c) {
outputChannels[m.mInputDestination[c]] +=
m.mInputCoefficient[c]*(static_cast<const T*>(inputChannels[c]))[s];
}
// Utilize the fact that in every layout, C is the third channel.
if (m.mCExtraDestination != IGNORE) {
outputChannels[m.mCExtraDestination] +=
m.mInputCoefficient[SURROUND_C]*(static_cast<const T*>(inputChannels[SURROUND_C]))[s];
}
for (uint32_t c = 0; c < aOutputChannelCount; ++c) {
aOutputChannels[c][s] = outputChannels[c];
}
}
}
} // namespace mozilla

View File

@ -26,7 +26,9 @@ struct MixerCallbackReceiver {
* stream.
*
* AudioMixer::Mix is to be called repeatedly with buffers that have the same
* length, sample rate, sample format and channel count.
* length, sample rate, sample format and channel count. This class works with
* interleaved and plannar buffers, but the buffer mixed must be of the same
* type during a mixing cycle.
*
* When all the tracks have been mixed, calling FinishMixing will call back with
* a buffer containing the mixed audio data.
@ -71,7 +73,7 @@ public:
mSampleRate = mChannels = mFrames = 0;
}
/* Add a buffer to the mix. aSamples is interleaved. */
/* Add a buffer to the mix. */
void Mix(AudioDataValue* aSamples,
uint32_t aChannels,
uint32_t aFrames,

View File

@ -146,6 +146,103 @@ void AudioSegment::ResampleChunks(SpeexResamplerState* aResampler, uint32_t aInR
}
}
// This helps to to safely get a pointer to the position we want to start
// writing a planar audio buffer, depending on the channel and the offset in the
// buffer.
static AudioDataValue*
PointerForOffsetInChannel(AudioDataValue* aData, size_t aLengthSamples,
uint32_t aChannelCount, uint32_t aChannel,
uint32_t aOffsetSamples)
{
size_t samplesPerChannel = aLengthSamples / aChannelCount;
size_t beginningOfChannel = samplesPerChannel * aChannel;
MOZ_ASSERT(aChannel * samplesPerChannel + aOffsetSamples < aLengthSamples,
"Offset request out of bounds.");
return aData + beginningOfChannel + aOffsetSamples;
}
void
AudioSegment::Mix(AudioMixer& aMixer, uint32_t aOutputChannels,
uint32_t aSampleRate)
{
nsAutoTArray<AudioDataValue, AUDIO_PROCESSING_FRAMES* GUESS_AUDIO_CHANNELS>
buf;
nsAutoTArray<const void*, GUESS_AUDIO_CHANNELS> channelData;
uint32_t offsetSamples = 0;
uint32_t duration = GetDuration();
if (duration <= 0) {
MOZ_ASSERT(duration == 0);
return;
}
uint32_t outBufferLength = duration * aOutputChannels;
buf.SetLength(outBufferLength);
for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) {
AudioChunk& c = *ci;
uint32_t frames = c.mDuration;
// If the chunk is silent, simply write the right number of silence in the
// buffers.
if (c.mBufferFormat == AUDIO_FORMAT_SILENCE) {
for (uint32_t channel = 0; channel < aOutputChannels; channel++) {
AudioDataValue* ptr =
PointerForOffsetInChannel(buf.Elements(), outBufferLength,
aOutputChannels, channel, offsetSamples);
PodZero(ptr, frames);
}
} else {
// Othewise, we need to upmix or downmix appropriately, depending on the
// desired input and output channels.
channelData.SetLength(c.mChannelData.Length());
for (uint32_t i = 0; i < channelData.Length(); ++i) {
channelData[i] = c.mChannelData[i];
}
if (channelData.Length() < aOutputChannels) {
// Up-mix.
AudioChannelsUpMix(&channelData, aOutputChannels, gZeroChannel);
for (uint32_t channel = 0; channel < aOutputChannels; channel++) {
AudioDataValue* ptr =
PointerForOffsetInChannel(buf.Elements(), outBufferLength,
aOutputChannels, channel, offsetSamples);
PodCopy(ptr, reinterpret_cast<const AudioDataValue*>(channelData[channel]),
frames);
}
MOZ_ASSERT(channelData.Length() == aOutputChannels);
} else if (channelData.Length() > aOutputChannels) {
// Down mix.
nsAutoTArray<AudioDataValue*, GUESS_AUDIO_CHANNELS> outChannelPtrs;
outChannelPtrs.SetLength(aOutputChannels);
uint32_t offsetSamples = 0;
for (uint32_t channel = 0; channel < aOutputChannels; channel++) {
outChannelPtrs[channel] =
PointerForOffsetInChannel(buf.Elements(), outBufferLength,
aOutputChannels, channel, offsetSamples);
}
AudioChannelsDownMix(channelData, outChannelPtrs.Elements(),
aOutputChannels, frames);
} else {
// The channel count is already what we want, just copy it over.
for (uint32_t channel = 0; channel < aOutputChannels; channel++) {
AudioDataValue* ptr =
PointerForOffsetInChannel(buf.Elements(), outBufferLength,
aOutputChannels, channel, offsetSamples);
PodCopy(ptr, reinterpret_cast<const AudioDataValue*>(channelData[channel]),
frames);
}
}
}
offsetSamples += frames;
}
if (offsetSamples) {
MOZ_ASSERT(offsetSamples == outBufferLength / aOutputChannels,
"We forgot to write some samples?");
aMixer.Mix(buf.Elements(), aOutputChannels, offsetSamples, aSampleRate);
}
}
void
AudioSegment::WriteTo(uint64_t aID, AudioMixer& aMixer, uint32_t aOutputChannels, uint32_t aSampleRate)
{

View File

@ -299,7 +299,14 @@ public:
return chunk;
}
void ApplyVolume(float aVolume);
void WriteTo(uint64_t aID, AudioMixer& aMixer, uint32_t aChannelCount, uint32_t aSampleRate);
// Mix the segment into a mixer, interleaved. This is useful to output a
// segment to a system audio callback. It up or down mixes to aChannelCount
// channels.
void WriteTo(uint64_t aID, AudioMixer& aMixer, uint32_t aChannelCount,
uint32_t aSampleRate);
// Mix the segment into a mixer, keeping it planar, up or down mixing to
// aChannelCount channels.
void Mix(AudioMixer& aMixer, uint32_t aChannelCount, uint32_t aSampleRate);
int ChannelCount() {
NS_WARN_IF_FALSE(!mChunks.IsEmpty(),

View File

@ -301,6 +301,18 @@ DOMMediaStream::InitTrackUnionStream(nsIDOMWindow* aWindow,
InitStreamCommon(aGraph->CreateTrackUnionStream(this));
}
void
DOMMediaStream::InitAudioCaptureStream(nsIDOMWindow* aWindow,
MediaStreamGraph* aGraph)
{
mWindow = aWindow;
if (!aGraph) {
aGraph = MediaStreamGraph::GetInstance();
}
InitStreamCommon(aGraph->CreateAudioCaptureStream(this));
}
void
DOMMediaStream::InitStreamCommon(MediaStream* aStream)
{
@ -329,6 +341,15 @@ DOMMediaStream::CreateTrackUnionStream(nsIDOMWindow* aWindow,
return stream.forget();
}
already_AddRefed<DOMMediaStream>
DOMMediaStream::CreateAudioCaptureStream(nsIDOMWindow* aWindow,
MediaStreamGraph* aGraph)
{
nsRefPtr<DOMMediaStream> stream = new DOMMediaStream();
stream->InitAudioCaptureStream(aWindow, aGraph);
return stream.forget();
}
void
DOMMediaStream::SetTrackEnabled(TrackID aTrackID, bool aEnabled)
{
@ -653,6 +674,15 @@ DOMLocalMediaStream::CreateTrackUnionStream(nsIDOMWindow* aWindow,
return stream.forget();
}
already_AddRefed<DOMLocalMediaStream>
DOMLocalMediaStream::CreateAudioCaptureStream(nsIDOMWindow* aWindow,
MediaStreamGraph* aGraph)
{
nsRefPtr<DOMLocalMediaStream> stream = new DOMLocalMediaStream();
stream->InitAudioCaptureStream(aWindow, aGraph);
return stream.forget();
}
DOMAudioNodeMediaStream::DOMAudioNodeMediaStream(AudioNode* aNode)
: mStreamNode(aNode)
{

View File

@ -198,6 +198,13 @@ public:
static already_AddRefed<DOMMediaStream> CreateTrackUnionStream(nsIDOMWindow* aWindow,
MediaStreamGraph* aGraph = nullptr);
/**
* Create an nsDOMMediaStream whose underlying stream is an
* AudioCaptureStream
*/
static already_AddRefed<DOMMediaStream> CreateAudioCaptureStream(
nsIDOMWindow* aWindow, MediaStreamGraph* aGraph = nullptr);
void SetLogicalStreamStartTime(StreamTime aTime)
{
mLogicalStreamStartTime = aTime;
@ -261,6 +268,8 @@ protected:
MediaStreamGraph* aGraph = nullptr);
void InitTrackUnionStream(nsIDOMWindow* aWindow,
MediaStreamGraph* aGraph = nullptr);
void InitAudioCaptureStream(nsIDOMWindow* aWindow,
MediaStreamGraph* aGraph = nullptr);
void InitStreamCommon(MediaStream* aStream);
already_AddRefed<AudioTrack> CreateAudioTrack(AudioStreamTrack* aStreamTrack);
already_AddRefed<VideoTrack> CreateVideoTrack(VideoStreamTrack* aStreamTrack);
@ -351,6 +360,12 @@ public:
CreateTrackUnionStream(nsIDOMWindow* aWindow,
MediaStreamGraph* aGraph = nullptr);
/**
* Create an nsDOMLocalMediaStream whose underlying stream is an
* AudioCaptureStream. */
static already_AddRefed<DOMLocalMediaStream> CreateAudioCaptureStream(
nsIDOMWindow* aWindow, MediaStreamGraph* aGraph = nullptr);
protected:
virtual ~DOMLocalMediaStream();
};

View File

@ -289,6 +289,14 @@ DecodedStream::OutputStreams()
return mOutputStreams;
}
bool
DecodedStream::HasConsumers() const
{
MOZ_ASSERT(NS_IsMainThread());
ReentrantMonitorAutoEnter mon(GetReentrantMonitor());
return mOutputStreams.IsEmpty();
}
ReentrantMonitor&
DecodedStream::GetReentrantMonitor() const
{

View File

@ -114,6 +114,7 @@ public:
int64_t AudioEndTime() const;
int64_t GetPosition() const;
bool IsFinished() const;
bool HasConsumers() const;
// Return true if stream is finished.
bool SendData(double aVolume, bool aIsSameOrigin);

View File

@ -326,6 +326,13 @@ void MediaDecoder::AddOutputStream(ProcessedMediaStream* aStream,
mDecoderStateMachine->AddOutputStream(aStream, aFinishWhenEnded);
}
void MediaDecoder::RemoveOutputStream(MediaStream* aStream)
{
MOZ_ASSERT(NS_IsMainThread());
MOZ_ASSERT(mDecoderStateMachine, "Must be called after Load().");
mDecoderStateMachine->RemoveOutputStream(aStream);
}
double MediaDecoder::GetDuration()
{
MOZ_ASSERT(NS_IsMainThread());

View File

@ -399,6 +399,8 @@ public:
// The stream is initially blocked. The decoder is responsible for unblocking
// it while it is playing back.
virtual void AddOutputStream(ProcessedMediaStream* aStream, bool aFinishWhenEnded);
// Remove an output stream added with AddOutputStream.
virtual void RemoveOutputStream(MediaStream* aStream);
// Return the duration of the video in seconds.
virtual double GetDuration();

View File

@ -3140,6 +3140,25 @@ void MediaDecoderStateMachine::DispatchAudioCaptured()
OwnerThread()->Dispatch(r.forget());
}
void MediaDecoderStateMachine::DispatchAudioUncaptured()
{
nsRefPtr<MediaDecoderStateMachine> self = this;
nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([self] () -> void
{
MOZ_ASSERT(self->OnTaskQueue());
ReentrantMonitorAutoEnter mon(self->mDecoder->GetReentrantMonitor());
if (self->mAudioCaptured) {
// Start again the audio sink
self->mAudioCaptured = false;
if (self->IsPlaying()) {
self->StartAudioThread();
}
self->ScheduleStateMachine();
}
});
OwnerThread()->Dispatch(r.forget());
}
void MediaDecoderStateMachine::AddOutputStream(ProcessedMediaStream* aStream,
bool aFinishWhenEnded)
{
@ -3149,6 +3168,16 @@ void MediaDecoderStateMachine::AddOutputStream(ProcessedMediaStream* aStream,
DispatchAudioCaptured();
}
void MediaDecoderStateMachine::RemoveOutputStream(MediaStream* aStream)
{
MOZ_ASSERT(NS_IsMainThread());
DECODER_LOG("RemoveOutputStream=%p!", aStream);
mDecodedStream->Remove(aStream);
if (!mDecodedStream->HasConsumers()) {
DispatchAudioUncaptured();
}
}
} // namespace mozilla
// avoid redefined macro in unified build

View File

@ -148,6 +148,8 @@ public:
};
void AddOutputStream(ProcessedMediaStream* aStream, bool aFinishWhenEnded);
// Remove an output stream added with AddOutputStream.
void RemoveOutputStream(MediaStream* aStream);
// Set/Unset dormant state.
void SetDormant(bool aDormant);
@ -159,6 +161,7 @@ private:
void InitializationTask();
void DispatchAudioCaptured();
void DispatchAudioUncaptured();
void Shutdown();
public:

View File

@ -300,7 +300,8 @@ protected:
NS_IMPL_ISUPPORTS(MediaDevice, nsIMediaDevice)
MediaDevice::MediaDevice(MediaEngineSource* aSource, bool aIsVideo)
: mSource(aSource)
: mMediaSource(aSource->GetMediaSource())
, mSource(aSource)
, mIsVideo(aIsVideo)
{
mSource->GetName(mName);
@ -311,9 +312,7 @@ MediaDevice::MediaDevice(MediaEngineSource* aSource, bool aIsVideo)
VideoDevice::VideoDevice(MediaEngineVideoSource* aSource)
: MediaDevice(aSource, true)
{
mMediaSource = aSource->GetMediaSource();
}
{}
/**
* Helper functions that implement the constraints algorithm from
@ -439,6 +438,8 @@ MediaDevice::GetMediaSource(nsAString& aMediaSource)
{
if (mMediaSource == dom::MediaSourceEnum::Microphone) {
aMediaSource.Assign(NS_LITERAL_STRING("microphone"));
} else if (mMediaSource == dom::MediaSourceEnum::AudioCapture) {
aMediaSource.Assign(NS_LITERAL_STRING("audioCapture"));
} else if (mMediaSource == dom::MediaSourceEnum::Window) { // this will go away
aMediaSource.Assign(NS_LITERAL_STRING("window"));
} else { // all the rest are shared
@ -784,11 +785,55 @@ public:
}
}
#endif
// Create a media stream.
nsRefPtr<nsDOMUserMediaStream> trackunion =
nsDOMUserMediaStream::CreateTrackUnionStream(window, mListener,
mAudioSource, mVideoSource);
if (!trackunion || sInShutdown) {
MediaStreamGraph* msg = MediaStreamGraph::GetInstance();
nsRefPtr<SourceMediaStream> stream = msg->CreateSourceStream(nullptr);
nsRefPtr<DOMLocalMediaStream> domStream;
// AudioCapture is a special case, here, in the sense that we're not really
// using the audio source and the SourceMediaStream, which acts as
// placeholders. We re-route a number of stream internaly in the MSG and mix
// them down instead.
if (mAudioSource &&
mAudioSource->GetMediaSource() == dom::MediaSourceEnum::AudioCapture) {
domStream = DOMLocalMediaStream::CreateAudioCaptureStream(window);
// It should be possible to pipe the capture stream to anything. CORS is
// not a problem here, we got explicit user content.
domStream->SetPrincipal(window->GetExtantDoc()->NodePrincipal());
msg->RegisterCaptureStreamForWindow(
mWindowID, domStream->GetStream()->AsProcessedStream());
window->SetAudioCapture(true);
} else {
// Normal case, connect the source stream to the track union stream to
// avoid us blocking
nsRefPtr<nsDOMUserMediaStream> trackunion =
nsDOMUserMediaStream::CreateTrackUnionStream(window, mListener,
mAudioSource, mVideoSource);
trackunion->GetStream()->AsProcessedStream()->SetAutofinish(true);
nsRefPtr<MediaInputPort> port = trackunion->GetStream()->AsProcessedStream()->
AllocateInputPort(stream, MediaInputPort::FLAG_BLOCK_OUTPUT);
trackunion->mSourceStream = stream;
trackunion->mPort = port.forget();
// Log the relationship between SourceMediaStream and TrackUnion stream
// Make sure logger starts before capture
AsyncLatencyLogger::Get(true);
LogLatency(AsyncLatencyLogger::MediaStreamCreate,
reinterpret_cast<uint64_t>(stream.get()),
reinterpret_cast<int64_t>(trackunion->GetStream()));
nsCOMPtr<nsIPrincipal> principal;
if (mPeerIdentity) {
principal = nsNullPrincipal::Create();
trackunion->SetPeerIdentity(mPeerIdentity.forget());
} else {
principal = window->GetExtantDoc()->NodePrincipal();
}
trackunion->CombineWithPrincipal(principal);
domStream = trackunion.forget();
}
if (!domStream || sInShutdown) {
nsCOMPtr<nsIDOMGetUserMediaErrorCallback> onFailure = mOnFailure.forget();
LOG(("Returning error for getUserMedia() - no stream"));
@ -802,36 +847,6 @@ public:
}
return NS_OK;
}
trackunion->AudioConfig(aec_on, (uint32_t) aec,
agc_on, (uint32_t) agc,
noise_on, (uint32_t) noise,
playout_delay);
MediaStreamGraph* gm = MediaStreamGraph::GetInstance();
nsRefPtr<SourceMediaStream> stream = gm->CreateSourceStream(nullptr);
// connect the source stream to the track union stream to avoid us blocking
trackunion->GetStream()->AsProcessedStream()->SetAutofinish(true);
nsRefPtr<MediaInputPort> port = trackunion->GetStream()->AsProcessedStream()->
AllocateInputPort(stream, MediaInputPort::FLAG_BLOCK_OUTPUT);
trackunion->mSourceStream = stream;
trackunion->mPort = port.forget();
// Log the relationship between SourceMediaStream and TrackUnion stream
// Make sure logger starts before capture
AsyncLatencyLogger::Get(true);
LogLatency(AsyncLatencyLogger::MediaStreamCreate,
reinterpret_cast<uint64_t>(stream.get()),
reinterpret_cast<int64_t>(trackunion->GetStream()));
nsCOMPtr<nsIPrincipal> principal;
if (mPeerIdentity) {
principal = nsNullPrincipal::Create();
trackunion->SetPeerIdentity(mPeerIdentity.forget());
} else {
principal = window->GetExtantDoc()->NodePrincipal();
}
trackunion->CombineWithPrincipal(principal);
// The listener was added at the beginning in an inactive state.
// Activate our listener. We'll call Start() on the source when get a callback
@ -841,7 +856,7 @@ public:
// Note: includes JS callbacks; must be released on MainThread
TracksAvailableCallback* tracksAvailableCallback =
new TracksAvailableCallback(mManager, mOnSuccess, mWindowID, trackunion);
new TracksAvailableCallback(mManager, mOnSuccess, mWindowID, domStream);
mListener->AudioConfig(aec_on, (uint32_t) aec,
agc_on, (uint32_t) agc,
@ -852,11 +867,11 @@ public:
// because that can take a while.
// Pass ownership of trackunion to the MediaOperationTask
// to ensure it's kept alive until the MediaOperationTask runs (at least).
MediaManager::PostTask(FROM_HERE,
new MediaOperationTask(MEDIA_START, mListener, trackunion,
tracksAvailableCallback,
mAudioSource, mVideoSource, false, mWindowID,
mOnFailure.forget()));
MediaManager::PostTask(
FROM_HERE, new MediaOperationTask(MEDIA_START, mListener, domStream,
tracksAvailableCallback, mAudioSource,
mVideoSource, false, mWindowID,
mOnFailure.forget()));
// We won't need mOnFailure now.
mOnFailure = nullptr;
@ -1245,7 +1260,9 @@ static auto& MediaManager_AnonymizeDevices = MediaManager::AnonymizeDevices;
*/
already_AddRefed<MediaManager::PledgeSourceSet>
MediaManager::EnumerateRawDevices(uint64_t aWindowId, MediaSourceEnum aVideoType,
MediaManager::EnumerateRawDevices(uint64_t aWindowId,
MediaSourceEnum aVideoType,
MediaSourceEnum aAudioType,
bool aFake, bool aFakeTracks)
{
MOZ_ASSERT(NS_IsMainThread());
@ -1275,7 +1292,8 @@ MediaManager::EnumerateRawDevices(uint64_t aWindowId, MediaSourceEnum aVideoType
MediaManager::PostTask(FROM_HERE, NewTaskFrom([id, aWindowId, audioLoopDev,
videoLoopDev, aVideoType,
aFake, aFakeTracks]() mutable {
aAudioType, aFake,
aFakeTracks]() mutable {
nsRefPtr<MediaEngine> backend;
if (aFake) {
backend = new MediaEngineDefault(aFakeTracks);
@ -1294,7 +1312,7 @@ MediaManager::EnumerateRawDevices(uint64_t aWindowId, MediaSourceEnum aVideoType
}
nsTArray<nsRefPtr<AudioDevice>> audios;
GetSources(backend, dom::MediaSourceEnum::Microphone,
GetSources(backend, aAudioType,
&MediaEngine::EnumerateAudioDevices, audios, audioLoopDev);
for (auto& source : audios) {
result->AppendElement(source);
@ -1616,6 +1634,7 @@ MediaManager::GetUserMedia(nsPIDOMWindow* aWindow,
}
MediaSourceEnum videoType = dom::MediaSourceEnum::Camera;
MediaSourceEnum audioType = dom::MediaSourceEnum::Microphone;
if (c.mVideo.IsMediaTrackConstraints()) {
auto& vc = c.mVideo.GetAsMediaTrackConstraints();
@ -1704,6 +1723,23 @@ MediaManager::GetUserMedia(nsPIDOMWindow* aWindow,
privileged = false;
}
}
if (c.mAudio.IsMediaTrackConstraints()) {
auto& ac = c.mAudio.GetAsMediaTrackConstraints();
audioType = StringToEnum(dom::MediaSourceEnumValues::strings,
ac.mMediaSource,
audioType);
// Only enable AudioCapture if the pref is enabled. If it's not, we can deny
// right away.
if (audioType == dom::MediaSourceEnum::AudioCapture &&
!Preferences::GetBool("media.getusermedia.audiocapture.enabled")) {
nsRefPtr<MediaStreamError> error =
new MediaStreamError(aWindow,
NS_LITERAL_STRING("PermissionDeniedError"));
onFailure->OnError(error);
return NS_OK;
}
}
StreamListeners* listeners = AddWindowID(windowID);
// Create a disabled listener to act as a placeholder
@ -1766,7 +1802,8 @@ MediaManager::GetUserMedia(nsPIDOMWindow* aWindow,
(!fake || Preferences::GetBool("media.navigator.permission.fake"));
nsRefPtr<PledgeSourceSet> p = EnumerateDevicesImpl(windowID, videoType,
fake, fakeTracks);
audioType, fake,
fakeTracks);
p->Then([this, onSuccess, onFailure, windowID, c, listener, askPermission,
prefs, isHTTPS, callID, origin](SourceSet*& aDevices) mutable {
ScopedDeletePtr<SourceSet> devices(aDevices); // grab result
@ -1922,7 +1959,9 @@ MediaManager::ToJSArray(SourceSet& aDevices)
}
already_AddRefed<MediaManager::PledgeSourceSet>
MediaManager::EnumerateDevicesImpl(uint64_t aWindowId, MediaSourceEnum aVideoType,
MediaManager::EnumerateDevicesImpl(uint64_t aWindowId,
MediaSourceEnum aVideoType,
MediaSourceEnum aAudioType,
bool aFake, bool aFakeTracks)
{
MOZ_ASSERT(NS_IsMainThread());
@ -1951,12 +1990,13 @@ MediaManager::EnumerateDevicesImpl(uint64_t aWindowId, MediaSourceEnum aVideoTyp
nsRefPtr<Pledge<nsCString>> p = media::GetOriginKey(origin, privateBrowsing,
persist);
p->Then([id, aWindowId, aVideoType,
p->Then([id, aWindowId, aVideoType, aAudioType,
aFake, aFakeTracks](const nsCString& aOriginKey) mutable {
MOZ_ASSERT(NS_IsMainThread());
nsRefPtr<MediaManager> mgr = MediaManager_GetInstance();
nsRefPtr<PledgeSourceSet> p = mgr->EnumerateRawDevices(aWindowId, aVideoType,
nsRefPtr<PledgeSourceSet> p = mgr->EnumerateRawDevices(aWindowId,
aVideoType, aAudioType,
aFake, aFakeTracks);
p->Then([id, aWindowId, aOriginKey](SourceSet*& aDevices) mutable {
ScopedDeletePtr<SourceSet> devices(aDevices); // secondary result
@ -1995,6 +2035,7 @@ MediaManager::EnumerateDevices(nsPIDOMWindow* aWindow,
nsRefPtr<PledgeSourceSet> p = EnumerateDevicesImpl(windowId,
dom::MediaSourceEnum::Camera,
dom::MediaSourceEnum::Microphone,
fake);
p->Then([onSuccess](SourceSet*& aDevices) mutable {
ScopedDeletePtr<SourceSet> devices(aDevices); // grab result
@ -2075,7 +2116,7 @@ StopSharingCallback(MediaManager *aThis,
listener->Invalidate();
}
listener->Remove();
listener->StopScreenWindowSharing();
listener->StopSharing();
}
aListeners->Clear();
aThis->RemoveWindowID(aWindowID);
@ -2398,7 +2439,7 @@ MediaManager::Observe(nsISupports* aSubject, const char* aTopic,
uint64_t windowID = PromiseFlatString(Substring(data, strlen("screen:"))).ToInteger64(&rv);
MOZ_ASSERT(NS_SUCCEEDED(rv));
if (NS_SUCCEEDED(rv)) {
LOG(("Revoking Screeen/windowCapture access for window %llu", windowID));
LOG(("Revoking Screen/windowCapture access for window %llu", windowID));
StopScreensharing(windowID);
}
} else {
@ -2579,7 +2620,7 @@ StopScreensharingCallback(MediaManager *aThis,
if (aListeners) {
auto length = aListeners->Length();
for (size_t i = 0; i < length; ++i) {
aListeners->ElementAt(i)->StopScreenWindowSharing();
aListeners->ElementAt(i)->StopSharing();
}
}
}
@ -2741,7 +2782,7 @@ GetUserMediaCallbackMediaStreamListener::Invalidate()
// Doesn't kill audio
// XXX refactor to combine with Invalidate()?
void
GetUserMediaCallbackMediaStreamListener::StopScreenWindowSharing()
GetUserMediaCallbackMediaStreamListener::StopSharing()
{
NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
if (mVideoSource && !mStopped &&
@ -2754,6 +2795,13 @@ GetUserMediaCallbackMediaStreamListener::StopScreenWindowSharing()
this, nullptr, nullptr,
nullptr, mVideoSource,
mFinished, mWindowID, nullptr));
} else if (mAudioSource &&
mAudioSource->GetMediaSource() == dom::MediaSourceEnum::AudioCapture) {
nsCOMPtr<nsPIDOMWindow> window = nsGlobalWindow::GetInnerWindowWithId(mWindowID);
MOZ_ASSERT(window);
window->SetAudioCapture(false);
MediaStreamGraph::GetInstance()->UnregisterCaptureStreamForWindow(mWindowID);
mStream->Destroy();
}
}

View File

@ -103,7 +103,7 @@ public:
return mStream->AsSourceStream();
}
void StopScreenWindowSharing();
void StopSharing();
void StopTrack(TrackID aID, bool aIsAudio);
@ -597,10 +597,14 @@ public: // TODO: make private once we upgrade to GCC 4.8+ on linux.
static already_AddRefed<nsIWritableVariant> ToJSArray(SourceSet& aDevices);
private:
already_AddRefed<PledgeSourceSet>
EnumerateRawDevices(uint64_t aWindowId, dom::MediaSourceEnum aSrcType,
EnumerateRawDevices(uint64_t aWindowId,
dom::MediaSourceEnum aVideoType,
dom::MediaSourceEnum aAudioType,
bool aFake, bool aFakeTracks);
already_AddRefed<PledgeSourceSet>
EnumerateDevicesImpl(uint64_t aWindowId, dom::MediaSourceEnum aSrcType,
EnumerateDevicesImpl(uint64_t aWindowId,
dom::MediaSourceEnum aVideoSrcType,
dom::MediaSourceEnum aAudioSrcType,
bool aFake = false, bool aFakeTracks = false);
StreamListeners* AddWindowID(uint64_t aWindowId);

View File

@ -18,6 +18,7 @@
#include "mozilla/Attributes.h"
#include "TrackUnionStream.h"
#include "ImageContainer.h"
#include "AudioCaptureStream.h"
#include "AudioChannelService.h"
#include "AudioNodeEngine.h"
#include "AudioNodeStream.h"
@ -3192,6 +3193,17 @@ MediaStreamGraph::CreateTrackUnionStream(DOMMediaStream* aWrapper)
return stream;
}
ProcessedMediaStream*
MediaStreamGraph::CreateAudioCaptureStream(DOMMediaStream* aWrapper)
{
AudioCaptureStream* stream = new AudioCaptureStream(aWrapper);
NS_ADDREF(stream);
MediaStreamGraphImpl* graph = static_cast<MediaStreamGraphImpl*>(this);
stream->SetGraphImpl(graph);
graph->AppendMessage(new CreateMessage(stream));
return stream;
}
AudioNodeExternalInputStream*
MediaStreamGraph::CreateAudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate)
{
@ -3556,4 +3568,65 @@ ProcessedMediaStream::AddInput(MediaInputPort* aPort)
GraphImpl()->SetStreamOrderDirty();
}
void
MediaStreamGraph::RegisterCaptureStreamForWindow(
uint64_t aWindowId, ProcessedMediaStream* aCaptureStream)
{
MOZ_ASSERT(NS_IsMainThread());
MediaStreamGraphImpl* graphImpl = static_cast<MediaStreamGraphImpl*>(this);
graphImpl->RegisterCaptureStreamForWindow(aWindowId, aCaptureStream);
}
void
MediaStreamGraphImpl::RegisterCaptureStreamForWindow(
uint64_t aWindowId, ProcessedMediaStream* aCaptureStream)
{
MOZ_ASSERT(NS_IsMainThread());
WindowAndStream winAndStream;
winAndStream.mWindowId = aWindowId;
winAndStream.mCaptureStreamSink = aCaptureStream;
mWindowCaptureStreams.AppendElement(winAndStream);
}
void
MediaStreamGraph::UnregisterCaptureStreamForWindow(uint64_t aWindowId)
{
MOZ_ASSERT(NS_IsMainThread());
MediaStreamGraphImpl* graphImpl = static_cast<MediaStreamGraphImpl*>(this);
graphImpl->UnregisterCaptureStreamForWindow(aWindowId);
}
void
MediaStreamGraphImpl::UnregisterCaptureStreamForWindow(uint64_t aWindowId)
{
MOZ_ASSERT(NS_IsMainThread());
for (uint32_t i = 0; i < mWindowCaptureStreams.Length(); i++) {
if (mWindowCaptureStreams[i].mWindowId == aWindowId) {
mWindowCaptureStreams.RemoveElementAt(i);
}
}
}
already_AddRefed<MediaInputPort>
MediaStreamGraph::ConnectToCaptureStream(uint64_t aWindowId,
MediaStream* aMediaStream)
{
return aMediaStream->GraphImpl()->ConnectToCaptureStream(aWindowId,
aMediaStream);
}
already_AddRefed<MediaInputPort>
MediaStreamGraphImpl::ConnectToCaptureStream(uint64_t aWindowId,
MediaStream* aMediaStream)
{
MOZ_ASSERT(NS_IsMainThread());
for (uint32_t i = 0; i < mWindowCaptureStreams.Length(); i++) {
if (mWindowCaptureStreams[i].mWindowId == aWindowId) {
ProcessedMediaStream* sink = mWindowCaptureStreams[i].mCaptureStreamSink;
return sink->AllocateInputPort(aMediaStream, 0);
}
}
return nullptr;
}
} // namespace mozilla

View File

@ -1262,6 +1262,10 @@ public:
* particular tracks of each input stream.
*/
ProcessedMediaStream* CreateTrackUnionStream(DOMMediaStream* aWrapper);
/**
* Create a stream that will mix all its audio input.
*/
ProcessedMediaStream* CreateAudioCaptureStream(DOMMediaStream* aWrapper);
// Internal AudioNodeStreams can only pass their output to another
// AudioNode, whereas external AudioNodeStreams can pass their output
// to an nsAudioStream for playback.
@ -1318,6 +1322,12 @@ public:
*/
TrackRate GraphRate() const { return mSampleRate; }
void RegisterCaptureStreamForWindow(uint64_t aWindowId,
ProcessedMediaStream* aCaptureStream);
void UnregisterCaptureStreamForWindow(uint64_t aWindowId);
already_AddRefed<MediaInputPort> ConnectToCaptureStream(
uint64_t aWindowId, MediaStream* aMediaStream);
protected:
explicit MediaStreamGraph(TrackRate aSampleRate)
: mNextGraphUpdateIndex(1)

View File

@ -532,6 +532,13 @@ public:
}
}
// Capture Stream API. This allows to get a mixed-down output for a window.
void RegisterCaptureStreamForWindow(uint64_t aWindowId,
ProcessedMediaStream* aCaptureStream);
void UnregisterCaptureStreamForWindow(uint64_t aWindowId);
already_AddRefed<MediaInputPort>
ConnectToCaptureStream(uint64_t aWindowId, MediaStream* aMediaStream);
// Data members
//
/**
@ -755,6 +762,16 @@ private:
* Used to pass memory report information across threads.
*/
nsTArray<AudioNodeSizes> mAudioStreamSizes;
struct WindowAndStream
{
uint64_t mWindowId;
nsRefPtr<ProcessedMediaStream> mCaptureStreamSink;
};
/**
* Stream for window audio capture.
*/
nsTArray<WindowAndStream> mWindowCaptureStreams;
/**
* Indicates that the MSG thread should gather data for a memory report.
*/

View File

@ -196,6 +196,7 @@ EXPORTS.mozilla.dom += [
UNIFIED_SOURCES += [
'AbstractThread.cpp',
'AudioCaptureStream.cpp',
'AudioChannelFormat.cpp',
'AudioCompactor.cpp',
'AudioSegment.cpp',

View File

@ -20,6 +20,114 @@ try {
FAKE_ENABLED = true;
}
/**
* This class provides helpers around analysing the audio content in a stream
* using WebAudio AnalyserNodes.
*
* @constructor
* @param {object} stream
* A MediaStream object whose audio track we shall analyse.
*/
function AudioStreamAnalyser(ac, stream) {
if (stream.getAudioTracks().length === 0) {
throw new Error("No audio track in stream");
}
this.audioContext = ac;
this.stream = stream;
this.sourceNode = this.audioContext.createMediaStreamSource(this.stream);
this.analyser = this.audioContext.createAnalyser();
this.sourceNode.connect(this.analyser);
this.data = new Uint8Array(this.analyser.frequencyBinCount);
}
AudioStreamAnalyser.prototype = {
/**
* Get an array of frequency domain data for our stream's audio track.
*
* @returns {array} A Uint8Array containing the frequency domain data.
*/
getByteFrequencyData: function() {
this.analyser.getByteFrequencyData(this.data);
return this.data;
},
/**
* Append a canvas to the DOM where the frequency data are drawn.
* Useful to debug tests.
*/
enableDebugCanvas: function() {
var cvs = document.createElement("canvas");
document.getElementById("content").appendChild(cvs);
// Easy: 1px per bin
cvs.width = this.analyser.frequencyBinCount;
cvs.height = 256;
cvs.style.border = "1px solid red";
var c = cvs.getContext('2d');
var self = this;
function render() {
c.clearRect(0, 0, cvs.width, cvs.height);
var array = self.getByteFrequencyData();
for (var i = 0; i < array.length; i++) {
c.fillRect(i, (256 - (array[i])), 1, 256);
}
requestAnimationFrame(render);
}
requestAnimationFrame(render);
},
/**
* Return a Promise, that will be resolved when the function passed as
* argument, when called, returns true (meaning the analysis was a
* success).
*
* @param {function} analysisFunction
* A fonction that performs an analysis, and returns true if the
* analysis was a success (i.e. it found what it was looking for)
*/
waitForAnalysisSuccess: function(analysisFunction) {
var self = this;
return new Promise((resolve, reject) => {
function analysisLoop() {
var success = analysisFunction(self.getByteFrequencyData());
if (success) {
resolve();
return;
}
// else, we need more time
requestAnimationFrame(analysisLoop);
}
analysisLoop();
});
},
/**
* Return the FFT bin index for a given frequency.
*
* @param {double} frequency
* The frequency for whicht to return the bin number.
* @returns {integer} the index of the bin in the FFT array.
*/
binIndexForFrequency: function(frequency) {
return 1 + Math.round(frequency *
this.analyser.fftSize /
this.audioContext.sampleRate);
},
/**
* Reverse operation, get the frequency for a bin index.
*
* @param {integer} index an index in an FFT array
* @returns {double} the frequency for this bin
*/
frequencyForBinIndex: function(index) {
return (index - 1) *
this.audioContext.sampleRate /
this.analyser.fftSize;
}
};
/**
* Create the necessary HTML elements for head and body as used by Mochitests
@ -136,7 +244,10 @@ function setupEnvironment() {
['media.navigator.permission.disabled', true],
['media.navigator.streams.fake', FAKE_ENABLED],
['media.getusermedia.screensharing.enabled', true],
['media.getusermedia.screensharing.allowed_domains', "mochi.test"]
['media.getusermedia.screensharing.allowed_domains', "mochi.test"],
['media.getusermedia.audiocapture.enabled', true],
['media.useAudioChannelService', true],
['media.recorder.audio_node.enabled', true]
]
}, setTestOptions);

View File

@ -30,6 +30,8 @@ skip-if = toolkit == 'gonk' || buildapp == 'mulet' # b2g emulator seems to be to
[test_dataChannel_noOffer.html]
[test_enumerateDevices.html]
skip-if = buildapp == 'mulet'
[test_getUserMedia_audioCapture.html]
skip-if = toolkit == 'gonk' || buildapp == 'mulet' # b2g emulator seems to be too slow (Bug 1016498 and 1008080)
[test_getUserMedia_basicAudio.html]
skip-if = (toolkit == 'gonk' || buildapp == 'mulet' && debug) # debug-only failure
[test_getUserMedia_basicVideo.html]

View File

@ -642,39 +642,6 @@ DataChannelWrapper.prototype = {
};
/**
* This class provides helpers around analysing the audio content in a stream
* using WebAudio AnalyserNodes.
*
* @constructor
* @param {object} stream
* A MediaStream object whose audio track we shall analyse.
*/
function AudioStreamAnalyser(stream) {
if (stream.getAudioTracks().length === 0) {
throw new Error("No audio track in stream");
}
this.stream = stream;
this.audioContext = new AudioContext();
this.sourceNode = this.audioContext.createMediaStreamSource(this.stream);
this.analyser = this.audioContext.createAnalyser();
this.sourceNode.connect(this.analyser);
this.data = new Uint8Array(this.analyser.frequencyBinCount);
}
AudioStreamAnalyser.prototype = {
/**
* Get an array of frequency domain data for our stream's audio track.
*
* @returns {array} A Uint8Array containing the frequency domain data.
*/
getByteFrequencyData: function() {
this.analyser.getByteFrequencyData(this.data);
return this.data;
}
};
/**
* This class acts as a wrapper around a PeerConnection instance.
*
@ -1559,20 +1526,20 @@ PeerConnectionWrapper.prototype = {
* @returns {Promise}
* A promise that resolves when we're receiving the tone from |from|.
*/
checkReceivingToneFrom : function(from) {
checkReceivingToneFrom : function(audiocontext, from) {
var inputElem = from.localMediaElements[0];
// As input we use the stream of |from|'s first available audio sender.
var inputSenderTracks = from._pc.getSenders().map(sn => sn.track);
var inputAudioStream = from._pc.getLocalStreams()
.find(s => s.getAudioTracks().some(t => inputSenderTracks.some(t2 => t == t2)));
var inputAnalyser = new AudioStreamAnalyser(inputAudioStream);
var inputAnalyser = new AudioStreamAnalyser(audiocontext, inputAudioStream);
// It would have been nice to have a working getReceivers() here, but until
// we do, let's use what remote streams we have.
var outputAudioStream = this._pc.getRemoteStreams()
.find(s => s.getAudioTracks().length > 0);
var outputAnalyser = new AudioStreamAnalyser(outputAudioStream);
var outputAnalyser = new AudioStreamAnalyser(audiocontext, outputAudioStream);
var maxWithIndex = (a, b, i) => (b >= a.value) ? { value: b, index: i } : a;
var initial = { value: -1, index: -1 };

View File

@ -0,0 +1,110 @@
<!DOCTYPE HTML>
<html>
<head>
<title>Test AudioCapture </title>
<script type="application/javascript" src="mediaStreamPlayback.js"></script>
</head>
<body>
<pre id="test">
<script>
createHTML({
bug: "1156472",
title: "Test AudioCapture with regular HTMLMediaElement, AudioContext, and HTMLMediaElement playing a MediaStream",
visible: true
});
scriptsReady
.then(() => FAKE_ENABLED = false)
.then(() => {
runTestWhenReady(function() {
// Get an opus file containing a sine wave at maximum amplitude, of duration
// `lengthSeconds`, and of frequency `frequency`.
function getSineWaveFile(frequency, lengthSeconds, callback) {
var chunks = [];
var off = new OfflineAudioContext(1, lengthSeconds * 48000, 48000);
var osc = off.createOscillator();
var rec = new MediaRecorder(osc);
rec.ondataavailable = function(e) {
chunks.push(e.data);
};
rec.onstop = function(e) {
var blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs=opus' });
callback(blob);
}
osc.frequency.value = frequency;
osc.start();
rec.start();
off.startRendering().then(function(buffer) {
rec.stop();
});
}
/**
* Get two HTMLMediaElements:
* - One playing a sine tone from a blob (of an opus file created on the fly)
* - One being the output for an AudioContext's OscillatorNode, connected to
* a MediaSourceDestinationNode.
*
* Also, use the AudioContext playing through its AudioDestinationNode another
* tone, using another OscillatorNode.
*
* Capture the output of the document, feed that back into the AudioContext,
* with an AnalyserNode, and check the frequency content to make sure we
* have recorded the three sources.
*
* The three sine tones have frequencies far apart from each other, so that we
* can check that the spectrum of the capture stream contains three
* components with a high magnitude.
*/
var wavtone = createMediaElement("audio", "WaveTone");
var acTone = createMediaElement("audio", "audioContextTone");
var ac = new AudioContext();
var oscThroughMediaElement = ac.createOscillator();
oscThroughMediaElement.frequency.value = 1000;
var oscThroughAudioDestinationNode = ac.createOscillator();
oscThroughAudioDestinationNode.frequency.value = 5000;
var msDest = ac.createMediaStreamDestination();
oscThroughMediaElement.connect(msDest);
oscThroughAudioDestinationNode.connect(ac.destination);
acTone.mozSrcObject = msDest.stream;
getSineWaveFile(10000, 10, function(blob) {
wavtone.src = URL.createObjectURL(blob);
oscThroughMediaElement.start();
oscThroughAudioDestinationNode.start();
wavtone.loop = true;
wavtone.play();
acTone.play();
});
var constraints = {audio: {mediaSource: "audioCapture"}};
return getUserMedia(constraints).then((stream) => {
checkMediaStreamTracks(constraints, stream);
window.grip = stream;
var analyser = new AudioStreamAnalyser(ac, stream);
analyser.enableDebugCanvas();
return analyser.waitForAnalysisSuccess(function(array) {
// We want to find three frequency components here, around 1000, 5000
// and 10000Hz. Frequency are logarithmic. Also make sure we have low
// energy in between, not just a flat white noise.
return (array[analyser.binIndexForFrequency(50)] < 50 &&
array[analyser.binIndexForFrequency(1000)] > 200 &&
array[analyser.binIndexForFrequency(2500)] < 50 &&
array[analyser.binIndexForFrequency(5000)] > 200 &&
array[analyser.binIndexForFrequency(7500)] < 50 &&
array[analyser.binIndexForFrequency(10000)] > 200);
}).then(finish);
}).catch(finish);
});
});
</script>
</pre>
</body>
</html>

View File

@ -136,7 +136,7 @@
]);
test.chain.append([
function PC_LOCAL_CHECK_WEBAUDIO_FLOW_PRESENT(test) {
return test.pcRemote.checkReceivingToneFrom(test.pcLocal);
return test.pcRemote.checkReceivingToneFrom(test.audioCtx, test.pcLocal);
}
]);
test.chain.append([

View File

@ -32,7 +32,7 @@ runNetworkTest(function() {
]);
test.chain.append([
function CHECK_AUDIO_FLOW(test) {
return test.pcRemote.checkReceivingToneFrom(test.pcLocal);
return test.pcRemote.checkReceivingToneFrom(test.audioContext, test.pcLocal);
}
]);
test.run();

View File

@ -313,12 +313,9 @@ AudioDestinationNode::AudioDestinationNode(AudioContext* aContext,
bool aIsOffline,
AudioChannel aChannel,
uint32_t aNumberOfChannels,
uint32_t aLength,
float aSampleRate)
: AudioNode(aContext,
aIsOffline ? aNumberOfChannels : 2,
ChannelCountMode::Explicit,
ChannelInterpretation::Speakers)
uint32_t aLength, float aSampleRate)
: AudioNode(aContext, aIsOffline ? aNumberOfChannels : 2,
ChannelCountMode::Explicit, ChannelInterpretation::Speakers)
, mFramesToProduce(aLength)
, mAudioChannel(AudioChannel::Normal)
, mIsOffline(aIsOffline)
@ -326,6 +323,7 @@ AudioDestinationNode::AudioDestinationNode(AudioContext* aContext,
, mExtraCurrentTime(0)
, mExtraCurrentTimeSinceLastStartedBlocking(0)
, mExtraCurrentTimeUpdatedSinceLastStableState(false)
, mCaptured(false)
{
bool startWithAudioDriver = true;
MediaStreamGraph* graph = aIsOffline ?
@ -505,6 +503,33 @@ AudioDestinationNode::WindowVolumeChanged(float aVolume, bool aMuted)
return NS_OK;
}
NS_IMETHODIMP
AudioDestinationNode::WindowAudioCaptureChanged()
{
MOZ_ASSERT(mAudioChannelAgent);
if (!mStream || Context()->IsOffline()) {
return NS_OK;
}
bool captured = GetOwner()->GetAudioCaptured();
if (captured != mCaptured) {
if (captured) {
nsCOMPtr<nsPIDOMWindow> window = Context()->GetParentObject();
uint64_t id = window->WindowID();
mCaptureStreamPort =
mStream->Graph()->ConnectToCaptureStream(id, mStream);
} else {
mCaptureStreamPort->Disconnect();
mCaptureStreamPort->Destroy();
}
mCaptured = captured;
}
return NS_OK;
}
AudioChannel
AudioDestinationNode::MozAudioChannelType() const
{
@ -591,6 +616,8 @@ AudioDestinationNode::CreateAudioChannelAgent()
// The AudioChannelAgent must start playing immediately in order to avoid
// race conditions with mozinterruptbegin/end events.
InputMuted(false);
WindowAudioCaptureChanged();
}
void
@ -682,6 +709,7 @@ AudioDestinationNode::InputMuted(bool aMuted)
return;
}
WindowAudioCaptureChanged();
WindowVolumeChanged(volume, muted);
}

View File

@ -99,6 +99,7 @@ private:
uint32_t mFramesToProduce;
nsCOMPtr<nsIAudioChannelAgent> mAudioChannelAgent;
nsRefPtr<MediaInputPort> mCaptureStreamPort;
nsRefPtr<Promise> mOfflineRenderingPromise;
@ -111,6 +112,7 @@ private:
double mExtraCurrentTime;
double mExtraCurrentTimeSinceLastStartedBlocking;
bool mExtraCurrentTimeUpdatedSinceLastStableState;
bool mCaptured;
};
} // namespace dom

View File

@ -291,6 +291,13 @@ MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
// We spawn threads to handle gUM runnables, so we must protect the member vars
MutexAutoLock lock(mMutex);
if (aMediaSource == dom::MediaSourceEnum::AudioCapture) {
nsRefPtr<MediaEngineWebRTCAudioCaptureSource> audioCaptureSource =
new MediaEngineWebRTCAudioCaptureSource(nullptr);
aASources->AppendElement(audioCaptureSource);
return;
}
#ifdef MOZ_WIDGET_ANDROID
jobject context = mozilla::AndroidBridge::Bridge()->GetGlobalContextRef();
@ -358,15 +365,14 @@ MediaEngineWebRTC::EnumerateAudioDevices(dom::MediaSourceEnum aMediaSource,
strcpy(uniqueId,deviceName); // safe given assert and initialization/error-check
}
nsRefPtr<MediaEngineWebRTCAudioSource> aSource;
nsRefPtr<MediaEngineAudioSource> aSource;
NS_ConvertUTF8toUTF16 uuid(uniqueId);
if (mAudioSources.Get(uuid, getter_AddRefs(aSource))) {
// We've already seen this device, just append.
aASources->AppendElement(aSource.get());
} else {
aSource = new MediaEngineWebRTCAudioSource(
mThread, mVoiceEngine, i, deviceName, uniqueId
);
aSource = new MediaEngineWebRTCMicrophoneSource(mThread, mVoiceEngine, i,
deviceName, uniqueId);
mAudioSources.Put(uuid, aSource); // Hashtable takes ownership.
aASources->AppendElement(aSource);
}
@ -385,9 +391,8 @@ ClearVideoSource (const nsAString&, // unused
}
static PLDHashOperator
ClearAudioSource (const nsAString&, // unused
MediaEngineWebRTCAudioSource* aData,
void *userArg)
ClearAudioSource(const nsAString &, // unused
MediaEngineAudioSource *aData, void *userArg)
{
if (aData) {
aData->Shutdown();

View File

@ -133,13 +133,77 @@ private:
void GetCapability(size_t aIndex, webrtc::CaptureCapability& aOut) override;
};
class MediaEngineWebRTCAudioSource : public MediaEngineAudioSource,
public webrtc::VoEMediaProcess,
private MediaConstraintsHelper
class MediaEngineWebRTCAudioCaptureSource : public MediaEngineAudioSource
{
public:
MediaEngineWebRTCAudioSource(nsIThread* aThread, webrtc::VoiceEngine* aVoiceEnginePtr,
int aIndex, const char* name, const char* uuid)
NS_DECL_THREADSAFE_ISUPPORTS
explicit MediaEngineWebRTCAudioCaptureSource(const char* aUuid)
: MediaEngineAudioSource(kReleased)
{
}
void GetName(nsAString& aName) override;
void GetUUID(nsACString& aUUID) override;
nsresult Allocate(const dom::MediaTrackConstraints& aConstraints,
const MediaEnginePrefs& aPrefs,
const nsString& aDeviceId) override
{
// Nothing to do here, everything is managed in MediaManager.cpp
return NS_OK;
}
nsresult Deallocate() override
{
// Nothing to do here, everything is managed in MediaManager.cpp
return NS_OK;
}
void Shutdown() override
{
// Nothing to do here, everything is managed in MediaManager.cpp
}
nsresult Start(SourceMediaStream* aMediaStream, TrackID aId) override;
nsresult Stop(SourceMediaStream* aMediaStream, TrackID aId) override;
void SetDirectListeners(bool aDirect) override
{}
nsresult Config(bool aEchoOn, uint32_t aEcho, bool aAgcOn,
uint32_t aAGC, bool aNoiseOn, uint32_t aNoise,
int32_t aPlayoutDelay) override
{
return NS_OK;
}
void NotifyPull(MediaStreamGraph* aGraph, SourceMediaStream* aSource,
TrackID aID, StreamTime aDesiredTime) override
{}
const dom::MediaSourceEnum GetMediaSource() override
{
return dom::MediaSourceEnum::AudioCapture;
}
bool IsFake() override
{
return false;
}
nsresult TakePhoto(PhotoCallback* aCallback) override
{
return NS_ERROR_NOT_IMPLEMENTED;
}
uint32_t GetBestFitnessDistance(
const nsTArray<const dom::MediaTrackConstraintSet*>& aConstraintSets,
const nsString& aDeviceId) override;
protected:
virtual ~MediaEngineWebRTCAudioCaptureSource() { Shutdown(); }
nsCString mUUID;
};
class MediaEngineWebRTCMicrophoneSource : public MediaEngineAudioSource,
public webrtc::VoEMediaProcess,
private MediaConstraintsHelper
{
public:
MediaEngineWebRTCMicrophoneSource(nsIThread* aThread,
webrtc::VoiceEngine* aVoiceEnginePtr,
int aIndex,
const char* name,
const char* uuid)
: MediaEngineAudioSource(kReleased)
, mVoiceEngine(aVoiceEnginePtr)
, mMonitor("WebRTCMic.Monitor")
@ -207,7 +271,7 @@ public:
virtual void Shutdown() override;
protected:
~MediaEngineWebRTCAudioSource() { Shutdown(); }
~MediaEngineWebRTCMicrophoneSource() { Shutdown(); }
private:
void Init();
@ -294,7 +358,7 @@ private:
// Store devices we've already seen in a hashtable for quick return.
// Maps UUID to MediaEngineSource (one set for audio, one for video).
nsRefPtrHashtable<nsStringHashKey, MediaEngineVideoSource> mVideoSources;
nsRefPtrHashtable<nsStringHashKey, MediaEngineWebRTCAudioSource> mAudioSources;
nsRefPtrHashtable<nsStringHashKey, MediaEngineAudioSource> mAudioSources;
};
}

View File

@ -41,9 +41,10 @@ extern PRLogModuleInfo* GetMediaManagerLog();
#define LOG_FRAMES(msg) MOZ_LOG(GetMediaManagerLog(), mozilla::LogLevel::Verbose, msg)
/**
* Webrtc audio source.
* Webrtc microphone source source.
*/
NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioSource)
NS_IMPL_ISUPPORTS0(MediaEngineWebRTCMicrophoneSource)
NS_IMPL_ISUPPORTS0(MediaEngineWebRTCAudioCaptureSource)
// XXX temp until MSG supports registration
StaticRefPtr<AudioOutputObserver> gFarendObserver;
@ -177,7 +178,7 @@ AudioOutputObserver::InsertFarEnd(const AudioDataValue *aBuffer, uint32_t aFrame
}
void
MediaEngineWebRTCAudioSource::GetName(nsAString& aName)
MediaEngineWebRTCMicrophoneSource::GetName(nsAString& aName)
{
if (mInitDone) {
aName.Assign(mDeviceName);
@ -187,7 +188,7 @@ MediaEngineWebRTCAudioSource::GetName(nsAString& aName)
}
void
MediaEngineWebRTCAudioSource::GetUUID(nsACString& aUUID)
MediaEngineWebRTCMicrophoneSource::GetUUID(nsACString& aUUID)
{
if (mInitDone) {
aUUID.Assign(mDeviceUUID);
@ -197,10 +198,10 @@ MediaEngineWebRTCAudioSource::GetUUID(nsACString& aUUID)
}
nsresult
MediaEngineWebRTCAudioSource::Config(bool aEchoOn, uint32_t aEcho,
bool aAgcOn, uint32_t aAGC,
bool aNoiseOn, uint32_t aNoise,
int32_t aPlayoutDelay)
MediaEngineWebRTCMicrophoneSource::Config(bool aEchoOn, uint32_t aEcho,
bool aAgcOn, uint32_t aAGC,
bool aNoiseOn, uint32_t aNoise,
int32_t aPlayoutDelay)
{
LOG(("Audio config: aec: %d, agc: %d, noise: %d",
aEchoOn ? aEcho : -1,
@ -267,7 +268,7 @@ MediaEngineWebRTCAudioSource::Config(bool aEchoOn, uint32_t aEcho,
// Infinity = UINT32_MAX e.g. device cannot satisfy accumulated ConstraintSets.
// A finite result may be used to calculate this device's ranking as a choice.
uint32_t MediaEngineWebRTCAudioSource::GetBestFitnessDistance(
uint32_t MediaEngineWebRTCMicrophoneSource::GetBestFitnessDistance(
const nsTArray<const dom::MediaTrackConstraintSet*>& aConstraintSets,
const nsString& aDeviceId)
{
@ -281,9 +282,9 @@ uint32_t MediaEngineWebRTCAudioSource::GetBestFitnessDistance(
}
nsresult
MediaEngineWebRTCAudioSource::Allocate(const dom::MediaTrackConstraints &aConstraints,
const MediaEnginePrefs &aPrefs,
const nsString& aDeviceId)
MediaEngineWebRTCMicrophoneSource::Allocate(const dom::MediaTrackConstraints &aConstraints,
const MediaEnginePrefs &aPrefs,
const nsString& aDeviceId)
{
if (mState == kReleased) {
if (mInitDone) {
@ -309,7 +310,7 @@ MediaEngineWebRTCAudioSource::Allocate(const dom::MediaTrackConstraints &aConstr
}
nsresult
MediaEngineWebRTCAudioSource::Deallocate()
MediaEngineWebRTCMicrophoneSource::Deallocate()
{
bool empty;
{
@ -331,7 +332,8 @@ MediaEngineWebRTCAudioSource::Deallocate()
}
nsresult
MediaEngineWebRTCAudioSource::Start(SourceMediaStream* aStream, TrackID aID)
MediaEngineWebRTCMicrophoneSource::Start(SourceMediaStream *aStream,
TrackID aID)
{
if (!mInitDone || !aStream) {
return NS_ERROR_FAILURE;
@ -384,7 +386,7 @@ MediaEngineWebRTCAudioSource::Start(SourceMediaStream* aStream, TrackID aID)
}
nsresult
MediaEngineWebRTCAudioSource::Stop(SourceMediaStream *aSource, TrackID aID)
MediaEngineWebRTCMicrophoneSource::Stop(SourceMediaStream *aSource, TrackID aID)
{
{
MonitorAutoLock lock(mMonitor);
@ -421,17 +423,17 @@ MediaEngineWebRTCAudioSource::Stop(SourceMediaStream *aSource, TrackID aID)
}
void
MediaEngineWebRTCAudioSource::NotifyPull(MediaStreamGraph* aGraph,
SourceMediaStream *aSource,
TrackID aID,
StreamTime aDesiredTime)
MediaEngineWebRTCMicrophoneSource::NotifyPull(MediaStreamGraph *aGraph,
SourceMediaStream *aSource,
TrackID aID,
StreamTime aDesiredTime)
{
// Ignore - we push audio data
LOG_FRAMES(("NotifyPull, desired = %ld", (int64_t) aDesiredTime));
}
void
MediaEngineWebRTCAudioSource::Init()
MediaEngineWebRTCMicrophoneSource::Init()
{
mVoEBase = webrtc::VoEBase::GetInterface(mVoiceEngine);
@ -496,7 +498,7 @@ MediaEngineWebRTCAudioSource::Init()
}
void
MediaEngineWebRTCAudioSource::Shutdown()
MediaEngineWebRTCMicrophoneSource::Shutdown()
{
if (!mInitDone) {
// duplicate these here in case we failed during Init()
@ -551,9 +553,10 @@ MediaEngineWebRTCAudioSource::Shutdown()
typedef int16_t sample;
void
MediaEngineWebRTCAudioSource::Process(int channel,
webrtc::ProcessingTypes type, sample* audio10ms,
int length, int samplingFreq, bool isStereo)
MediaEngineWebRTCMicrophoneSource::Process(int channel,
webrtc::ProcessingTypes type,
sample *audio10ms, int length,
int samplingFreq, bool isStereo)
{
// On initial capture, throw away all far-end data except the most recent sample
// since it's already irrelevant and we want to keep avoid confusing the AEC far-end
@ -618,4 +621,55 @@ MediaEngineWebRTCAudioSource::Process(int channel,
return;
}
void
MediaEngineWebRTCAudioCaptureSource::GetName(nsAString &aName)
{
aName.AssignLiteral("AudioCapture");
}
void
MediaEngineWebRTCAudioCaptureSource::GetUUID(nsACString &aUUID)
{
nsID uuid;
char uuidBuffer[NSID_LENGTH];
nsCString asciiString;
ErrorResult rv;
rv = nsContentUtils::GenerateUUIDInPlace(uuid);
if (rv.Failed()) {
aUUID.AssignLiteral("");
return;
}
uuid.ToProvidedString(uuidBuffer);
asciiString.AssignASCII(uuidBuffer);
// Remove {} and the null terminator
aUUID.Assign(Substring(asciiString, 1, NSID_LENGTH - 3));
}
nsresult
MediaEngineWebRTCAudioCaptureSource::Start(SourceMediaStream *aMediaStream,
TrackID aId)
{
aMediaStream->AddTrack(aId, 0, new AudioSegment());
return NS_OK;
}
nsresult
MediaEngineWebRTCAudioCaptureSource::Stop(SourceMediaStream *aMediaStream,
TrackID aId)
{
aMediaStream->EndAllTrackAndFinish();
return NS_OK;
}
uint32_t
MediaEngineWebRTCAudioCaptureSource::GetBestFitnessDistance(
const nsTArray<const dom::MediaTrackConstraintSet*>& aConstraintSets,
const nsString& aDeviceId)
{
// There is only one way of capturing audio for now, and it's always adequate.
return 0;
}
}

View File

@ -1312,7 +1312,7 @@ Promise::ResolveInternal(JSContext* aCx,
}
nsRefPtr<PromiseInit> thenCallback =
new PromiseInit(thenObj, mozilla::dom::GetIncumbentGlobal());
new PromiseInit(nullptr, thenObj, mozilla::dom::GetIncumbentGlobal());
nsRefPtr<ThenableResolverTask> task =
new ThenableResolverTask(this, valueObj, thenCallback);
DispatchToMicroTask(task);

View File

@ -25,6 +25,7 @@ enum MediaSourceEnum {
"window",
"browser",
"microphone",
"audioCapture",
"other"
};

View File

@ -386,7 +386,11 @@ FetchEvent::GetClient()
return nullptr;
}
mClient = new ServiceWorkerClient(GetParentObject(), *mClientInfo);
WorkerPrivate* worker = GetCurrentThreadWorkerPrivate();
MOZ_ASSERT(worker);
nsRefPtr<nsIGlobalObject> global = worker->GlobalScope();
mClient = new ServiceWorkerClient(global, *mClientInfo);
}
nsRefPtr<ServiceWorkerClient> client = mClient;
return client.forget();

View File

@ -0,0 +1,6 @@
var CLIENT_URL =
"http://mochi.test:8888/tests/dom/workers/test/serviceworkers/sw_clients/dummy.html"
self.addEventListener("fetch", function(event) {
event.client.postMessage({status: event.client.url === CLIENT_URL});
});

View File

@ -154,6 +154,8 @@ support-files =
opaque_intercept_worker.js
notify_loaded.js
test_request_context.js
fetch_event_client.js
sw_clients/dummy.html
[test_app_protocol.html]
skip-if = release_build
@ -240,3 +242,4 @@ skip-if = release_build
[test_workerUpdate.html]
[test_workerupdatefoundevent.html]
[test_opaque_intercept.html]
[test_fetch_event_client_postmessage.html]

View File

@ -0,0 +1,19 @@
<!--
Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/
-->
<!DOCTYPE HTML>
<html>
<head>
<title>Bug 1158735 - Dummy page</title>
</head>
<body>
<script type="text/javascript" >
window.onload = function() {
navigator.serviceWorker.ready.then(function(swr) {
fetch("foo.txt");
});
}
</script>
</body>
</html>

View File

@ -0,0 +1,71 @@
<!--
Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/
-->
<!DOCTYPE HTML>
<html>
<head>
<title>Bug 1158735 - FetchEvent.client asserting in onFetch when there's no document.</title>
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
</head>
<body>
<p id="display"></p>
<div id="content" style="display: none"></div>
<pre id="test"></pre>
<script class="testbody" type="text/javascript">
SimpleTest.requestCompleteLog();
var registration;
function start() {
return navigator.serviceWorker.register("fetch_event_client.js", { scope: "./"})
.then((swr) => registration = swr);
}
function unregister() {
return registration.unregister().then(function(result) {
ok(result, "Unregister should return true.");
}, function(e) {
dump("Unregistering the SW failed with " + e + "\n");
});
}
function testFetchEvent() {
var p = new Promise(function(resolve, reject) {
var content = document.getElementById("content");
ok(content, "parent exists.");
var iframe = document.createElement('iframe');
iframe.setAttribute('src', "sw_clients/dummy.html");
content.appendChild(iframe);
var w = iframe.contentWindow;
w.navigator.serviceWorker.onmessage = function(msg) {
ok(msg.data.status, "Receive message posted by client successfully");
resolve();
}
});
return p;
}
function runTest() {
start()
.then(testFetchEvent)
.then(unregister)
.then(function() {
}).catch(function(e) {
ok(false, "Some test failed with error " + e);
}).then(SimpleTest.finish);
}
SimpleTest.waitForExplicitFinish();
SpecialPowers.pushPrefEnv({"set": [
["dom.serviceWorkers.exemptFromPerDomainMax", true],
["dom.serviceWorkers.enabled", true],
["dom.serviceWorkers.testing.enabled", true],
]}, runTest);
</script>
</body>
</html>

View File

@ -316,7 +316,7 @@ nsXBLPrototypeHandler::ExecuteHandler(EventTarget* aTarget,
NS_ENSURE_TRUE(bound, NS_ERROR_FAILURE);
nsRefPtr<EventHandlerNonNull> handlerCallback =
new EventHandlerNonNull(bound, /* aIncumbentGlobal = */ nullptr);
new EventHandlerNonNull(nullptr, bound, /* aIncumbentGlobal = */ nullptr);
TypedEventHandler typedHandler(handlerCallback);

View File

@ -104,15 +104,15 @@ skip-if(Android||B2G||Mulet) needs-focus == 462758-grabbers-resizers.html 462758
== 388980-1.html 388980-1-ref.html
needs-focus == spellcheck-superscript-1.html spellcheck-superscript-1-ref.html
skip-if(B2G||Mulet) fails-if(Android) needs-focus != spellcheck-superscript-2.html spellcheck-superscript-2-ref.html # bug 783658 # Initial mulet triage: parity with B2G/B2G Desktop
needs-focus pref(selectioncaret.enabled,false) == 824080-1.html 824080-1-ref.html
needs-focus pref(selectioncaret.enabled,false) == 824080-2.html 824080-2-ref.html
needs-focus pref(selectioncaret.enabled,false) == 824080-3.html 824080-3-ref.html
needs-focus pref(selectioncaret.enabled,false) pref(layout.accessiblecaret.enabled,false) == 824080-1.html 824080-1-ref.html
needs-focus pref(selectioncaret.enabled,false) pref(layout.accessiblecaret.enabled,false) == 824080-2.html 824080-2-ref.html
needs-focus pref(selectioncaret.enabled,false) pref(layout.accessiblecaret.enabled,false) == 824080-3.html 824080-3-ref.html
needs-focus != 824080-2.html 824080-3.html
needs-focus pref(selectioncaret.enabled,false) == 824080-4.html 824080-4-ref.html
needs-focus pref(selectioncaret.enabled,false) == 824080-5.html 824080-5-ref.html
needs-focus pref(selectioncaret.enabled,false) pref(layout.accessiblecaret.enabled,false) == 824080-4.html 824080-4-ref.html
needs-focus pref(selectioncaret.enabled,false) pref(layout.accessiblecaret.enabled,false) == 824080-5.html 824080-5-ref.html
needs-focus != 824080-4.html 824080-5.html
needs-focus == 824080-6.html 824080-6-ref.html
needs-focus pref(selectioncaret.enabled,false) == 824080-7.html 824080-7-ref.html
needs-focus pref(selectioncaret.enabled,false) pref(layout.accessiblecaret.enabled,false) == 824080-7.html 824080-7-ref.html
needs-focus != 824080-6.html 824080-7.html
# Bug 674927: copy spellcheck-textarea tests to contenteditable
== spellcheck-contenteditable-attr.html spellcheck-contenteditable-nofocus-ref.html

View File

@ -853,6 +853,7 @@ MessageChannel::Send(Message* aMsg, Message* aReply)
#ifdef OS_WIN
SyncStackFrame frame(this, false);
NeuteredWindowRegion neuteredRgn(mFlags & REQUIRE_DEFERRED_MESSAGE_PROTECTION);
#endif
CxxStackFrame f(*this, OUT_MESSAGE, msg);
@ -994,6 +995,7 @@ MessageChannel::Call(Message* aMsg, Message* aReply)
#ifdef OS_WIN
SyncStackFrame frame(this, true);
NeuteredWindowRegion neuteredRgn(mFlags & REQUIRE_DEFERRED_MESSAGE_PROTECTION);
#endif
// This must come before MonitorAutoLock, as its destructor acquires the
@ -1032,6 +1034,12 @@ MessageChannel::Call(Message* aMsg, Message* aReply)
return false;
}
#ifdef OS_WIN
/* We should pump messages at this point to ensure that the IPC peer
does not become deadlocked on a pending inter-thread SendMessage() */
neuteredRgn.PumpOnce();
#endif
// Now might be the time to process a message deferred because of race
// resolution.
MaybeUndeferIncall();
@ -1148,6 +1156,7 @@ MessageChannel::WaitForIncomingMessage()
{
#ifdef OS_WIN
SyncStackFrame frame(this, true);
NeuteredWindowRegion neuteredRgn(mFlags & REQUIRE_DEFERRED_MESSAGE_PROTECTION);
#endif
{ // Scope for lock

View File

@ -15,6 +15,9 @@
#include "mozilla/Monitor.h"
#include "mozilla/Vector.h"
#include "mozilla/WeakPtr.h"
#if defined(OS_WIN)
#include "mozilla/ipc/Neutering.h"
#endif // defined(OS_WIN)
#include "mozilla/ipc/Transport.h"
#include "MessageLink.h"
#include "nsAutoPtr.h"

64
ipc/glue/Neutering.h Normal file
View File

@ -0,0 +1,64 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef mozilla_ipc_Neutering_h
#define mozilla_ipc_Neutering_h
#include "mozilla/GuardObjects.h"
/**
* This header declares RAII wrappers for Window neutering. See
* WindowsMessageLoop.cpp for more details.
*/
namespace mozilla {
namespace ipc {
/**
* This class is a RAII wrapper around Window neutering. As long as a
* NeuteredWindowRegion object is instantiated, Win32 windows belonging to the
* current thread will be neutered. It is safe to nest multiple instances of
* this class.
*/
class MOZ_STACK_CLASS NeuteredWindowRegion
{
public:
explicit NeuteredWindowRegion(bool aDoNeuter MOZ_GUARD_OBJECT_NOTIFIER_PARAM);
~NeuteredWindowRegion();
/**
* This function clears any backlog of nonqueued messages that are pending for
* the current thread.
*/
void PumpOnce();
private:
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
bool mNeuteredByThis;
};
/**
* This class is analagous to MutexAutoUnlock for Mutex; it is an RAII class
* that is to be instantiated within a NeuteredWindowRegion, thus temporarily
* disabling neutering for the remainder of its enclosing block.
* @see NeuteredWindowRegion
*/
class MOZ_STACK_CLASS DeneuteredWindowRegion
{
public:
DeneuteredWindowRegion(MOZ_GUARD_OBJECT_NOTIFIER_ONLY_PARAM);
~DeneuteredWindowRegion();
private:
MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER
bool mReneuter;
};
} // namespace ipc
} // namespace mozilla
#endif // mozilla_ipc_Neutering_h

View File

@ -8,6 +8,7 @@
#include "mozilla/DebugOnly.h"
#include "WindowsMessageLoop.h"
#include "Neutering.h"
#include "MessageChannel.h"
#include "nsAutoPtr.h"
@ -862,6 +863,85 @@ IsTimeoutExpired(PRIntervalTime aStart, PRIntervalTime aTimeout)
(aTimeout <= (PR_IntervalNow() - aStart));
}
static HHOOK gWindowHook;
static inline void
StartNeutering()
{
MOZ_ASSERT(gUIThreadId);
MOZ_ASSERT(!gWindowHook);
NS_ASSERTION(!MessageChannel::IsPumpingMessages(),
"Shouldn't be pumping already!");
MessageChannel::SetIsPumpingMessages(true);
gWindowHook = ::SetWindowsHookEx(WH_CALLWNDPROC, CallWindowProcedureHook,
nullptr, gUIThreadId);
NS_ASSERTION(gWindowHook, "Failed to set hook!");
}
static void
StopNeutering()
{
MOZ_ASSERT(MessageChannel::IsPumpingMessages());
::UnhookWindowsHookEx(gWindowHook);
gWindowHook = NULL;
::UnhookNeuteredWindows();
// Before returning we need to set a hook to run any deferred messages that
// we received during the IPC call. The hook will unset itself as soon as
// someone else calls GetMessage, PeekMessage, or runs code that generates
// a "nonqueued" message.
::ScheduleDeferredMessageRun();
MessageChannel::SetIsPumpingMessages(false);
}
NeuteredWindowRegion::NeuteredWindowRegion(bool aDoNeuter MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
: mNeuteredByThis(!gWindowHook)
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
if (aDoNeuter && mNeuteredByThis) {
StartNeutering();
}
}
NeuteredWindowRegion::~NeuteredWindowRegion()
{
if (gWindowHook && mNeuteredByThis) {
StopNeutering();
}
}
void
NeuteredWindowRegion::PumpOnce()
{
MSG msg = {0};
if (gCOMWindow) {
// Pump any COM messages so that we don't hang due to STA marshaling.
// This call will also expunge any nonqueued messages on the current thread
if (::PeekMessageW(&msg, gCOMWindow, 0, 0, PM_REMOVE)) {
::TranslateMessage(&msg);
::DispatchMessageW(&msg);
}
} else {
// Expunge any nonqueued messages on the current thread
::PeekMessageW(&msg, nullptr, 0, 0, PM_NOREMOVE);
}
}
DeneuteredWindowRegion::DeneuteredWindowRegion(MOZ_GUARD_OBJECT_NOTIFIER_ONLY_PARAM_IN_IMPL)
: mReneuter(gWindowHook != NULL)
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
if (mReneuter) {
StopNeutering();
}
}
DeneuteredWindowRegion::~DeneuteredWindowRegion()
{
if (mReneuter) {
StartNeutering();
}
}
bool
MessageChannel::WaitForSyncNotify()
{
@ -916,15 +996,6 @@ MessageChannel::WaitForSyncNotify()
NS_ASSERTION(timerId, "SetTimer failed!");
}
// Setup deferred processing of native events while we wait for a response.
NS_ASSERTION(!MessageChannel::IsPumpingMessages(),
"Shouldn't be pumping already!");
MessageChannel::SetIsPumpingMessages(true);
HHOOK windowHook = SetWindowsHookEx(WH_CALLWNDPROC, CallWindowProcedureHook,
nullptr, gUIThreadId);
NS_ASSERTION(windowHook, "Failed to set hook!");
{
while (1) {
MSG msg = { 0 };
@ -998,25 +1069,11 @@ MessageChannel::WaitForSyncNotify()
}
}
// Unhook the neutered window procedure hook.
UnhookWindowsHookEx(windowHook);
// Unhook any neutered windows procedures so messages can be delivered
// normally.
UnhookNeuteredWindows();
// Before returning we need to set a hook to run any deferred messages that
// we received during the IPC call. The hook will unset itself as soon as
// someone else calls GetMessage, PeekMessage, or runs code that generates
// a "nonqueued" message.
ScheduleDeferredMessageRun();
if (timerId) {
KillTimer(nullptr, timerId);
timerId = 0;
}
MessageChannel::SetIsPumpingMessages(false);
return WaitResponse(timedout);
}
@ -1050,56 +1107,28 @@ MessageChannel::WaitForInterruptNotify()
UINT_PTR timerId = 0;
TimeoutData timeoutData = { 0 };
// windowHook is used as a flag variable for the loop below: if it is set
// gWindowHook is used as a flag variable for the loop below: if it is set
// and we start to spin a nested event loop, we need to clear the hook and
// process deferred/pending messages.
// If windowHook is nullptr, MessageChannel::IsPumpingMessages should be false.
HHOOK windowHook = nullptr;
while (1) {
NS_ASSERTION((!!windowHook) == MessageChannel::IsPumpingMessages(),
"windowHook out of sync with reality");
NS_ASSERTION((!!gWindowHook) == MessageChannel::IsPumpingMessages(),
"gWindowHook out of sync with reality");
if (mTopFrame->mSpinNestedEvents) {
if (windowHook) {
UnhookWindowsHookEx(windowHook);
windowHook = nullptr;
if (timerId) {
KillTimer(nullptr, timerId);
timerId = 0;
}
// Used by widget to assert on incoming native events
MessageChannel::SetIsPumpingMessages(false);
// Unhook any neutered windows procedures so messages can be delievered
// normally.
UnhookNeuteredWindows();
// Send all deferred "nonqueued" message to the intended receiver.
// We're dropping into SpinInternalEventLoop so we should be fairly
// certain these will get delivered soohn.
ScheduleDeferredMessageRun();
if (gWindowHook && timerId) {
KillTimer(nullptr, timerId);
timerId = 0;
}
DeneuteredWindowRegion deneuteredRgn;
SpinInternalEventLoop();
ResetEvent(mEvent);
return true;
}
if (!windowHook) {
MessageChannel::SetIsPumpingMessages(true);
windowHook = SetWindowsHookEx(WH_CALLWNDPROC, CallWindowProcedureHook,
nullptr, gUIThreadId);
NS_ASSERTION(windowHook, "Failed to set hook!");
NS_ASSERTION(!timerId, "Timer already initialized?");
if (mTimeoutMs != kNoTimeout) {
InitTimeoutData(&timeoutData, mTimeoutMs);
timerId = SetTimer(nullptr, 0, mTimeoutMs, nullptr);
NS_ASSERTION(timerId, "SetTimer failed!");
}
if (mTimeoutMs != kNoTimeout && !timerId) {
InitTimeoutData(&timeoutData, mTimeoutMs);
timerId = SetTimer(nullptr, 0, mTimeoutMs, nullptr);
NS_ASSERTION(timerId, "SetTimer failed!");
}
MSG msg = { 0 };
@ -1151,27 +1180,11 @@ MessageChannel::WaitForInterruptNotify()
}
}
if (windowHook) {
// Unhook the neutered window procedure hook.
UnhookWindowsHookEx(windowHook);
// Unhook any neutered windows procedures so messages can be delivered
// normally.
UnhookNeuteredWindows();
// Before returning we need to set a hook to run any deferred messages that
// we received during the IPC call. The hook will unset itself as soon as
// someone else calls GetMessage, PeekMessage, or runs code that generates
// a "nonqueued" message.
ScheduleDeferredMessageRun();
if (timerId) {
KillTimer(nullptr, timerId);
}
if (timerId) {
KillTimer(nullptr, timerId);
timerId = 0;
}
MessageChannel::SetIsPumpingMessages(false);
return WaitResponse(timedout);
}

View File

@ -25,6 +25,7 @@ EXPORTS.mozilla.ipc += [
'IOThreadChild.h',
'MessageChannel.h',
'MessageLink.h',
'Neutering.h',
'ProcessChild.h',
'ProtocolUtils.h',
'ScopedXREEmbed.h',

View File

@ -17,6 +17,7 @@
#include "mozilla/ReentrancyGuard.h"
#include "mozilla/TemplateLib.h"
#include "mozilla/TypeTraits.h"
#include "mozilla/UniquePtr.h"
#include "js/Utility.h"
@ -586,6 +587,25 @@ template <class T>
struct DefaultHasher<T*> : PointerHasher<T*, mozilla::tl::FloorLog2<sizeof(void*)>::value>
{};
// Specialize hashing policy for mozilla::UniquePtr<T> to proxy the UniquePtr's
// raw pointer to PointerHasher.
template <class T>
struct DefaultHasher<mozilla::UniquePtr<T>>
{
using Lookup = mozilla::UniquePtr<T>;
using PtrHasher = PointerHasher<T*, mozilla::tl::FloorLog2<sizeof(void*)>::value>;
static HashNumber hash(const Lookup& l) {
return PtrHasher::hash(l.get());
}
static bool match(const mozilla::UniquePtr<T>& k, const Lookup& l) {
return PtrHasher::match(k.get(), l.get());
}
static void rekey(mozilla::UniquePtr<T>& k, mozilla::UniquePtr<T>&& newKey) {
k = mozilla::Move(newKey);
}
};
// For doubles, we can xor the two uint32s.
template <>
struct DefaultHasher<double>

View File

@ -18,9 +18,6 @@
#ifdef MOZ_INSTRUMENTS
# include "devtools/Instruments.h"
#endif
#ifdef MOZ_SHARK
# include "devtools/sharkctl.h"
#endif
#endif
#ifdef XP_WIN
@ -68,10 +65,6 @@ StartOSXProfiling(const char* profileName, pid_t pid)
{
bool ok = true;
const char* profiler = nullptr;
#ifdef MOZ_SHARK
ok = Shark::Start();
profiler = "Shark";
#endif
#ifdef MOZ_INSTRUMENTS
ok = Instruments::Start(pid);
profiler = "Instruments";
@ -106,9 +99,6 @@ JS_StopProfiling(const char* profileName)
{
bool ok = true;
#ifdef __APPLE__
#ifdef MOZ_SHARK
Shark::Stop();
#endif
#ifdef MOZ_INSTRUMENTS
Instruments::Stop(profileName);
#endif
@ -131,12 +121,8 @@ ControlProfilers(bool toState)
if (! probes::ProfilingActive && toState) {
#ifdef __APPLE__
#if defined(MOZ_SHARK) || defined(MOZ_INSTRUMENTS)
#if defined(MOZ_INSTRUMENTS)
const char* profiler;
#ifdef MOZ_SHARK
ok = Shark::Start();
profiler = "Shark";
#endif
#ifdef MOZ_INSTRUMENTS
ok = Instruments::Resume();
profiler = "Instruments";
@ -154,9 +140,6 @@ ControlProfilers(bool toState)
#endif
} else if (probes::ProfilingActive && ! toState) {
#ifdef __APPLE__
#ifdef MOZ_SHARK
Shark::Stop();
#endif
#ifdef MOZ_INSTRUMENTS
Instruments::Pause();
#endif
@ -347,7 +330,7 @@ ClearMaxGCPauseAccumulator(JSContext* cx, unsigned argc, Value* vp)
return true;
}
#if defined(MOZ_SHARK) || defined(MOZ_INSTRUMENTS)
#if defined(MOZ_INSTRUMENTS)
static bool
IgnoreAndReturnTrue(JSContext* cx, unsigned argc, Value* vp)
@ -402,7 +385,7 @@ static const JSFunctionSpec profiling_functions[] = {
JS_FN("dumpProfile", DumpProfile, 2,0),
JS_FN("getMaxGCPauseSinceClear", GetMaxGCPauseSinceClear, 0, 0),
JS_FN("clearMaxGCPauseAccumulator", ClearMaxGCPauseAccumulator, 0, 0),
#if defined(MOZ_SHARK) || defined(MOZ_INSTRUMENTS)
#if defined(MOZ_INSTRUMENTS)
/* Keep users of the old shark API happy. */
JS_FN("connectShark", IgnoreAndReturnTrue, 0,0),
JS_FN("disconnectShark", IgnoreAndReturnTrue, 0,0),

View File

@ -943,7 +943,8 @@ CallFunctionWithAsyncStack(JSContext* cx, unsigned argc, Value* vp)
RootedObject stack(cx, &args[1].toObject());
RootedString asyncCause(cx, args[2].toString());
JS::AutoSetAsyncStackForNewCalls sas(cx, stack, asyncCause);
JS::AutoSetAsyncStackForNewCalls sas(cx, stack, asyncCause,
JS::AutoSetAsyncStackForNewCalls::AsyncCallKind::EXPLICIT);
return Call(cx, UndefinedHandleValue, function,
JS::HandleValueArray::empty(), args.rval());
}

View File

@ -3220,18 +3220,6 @@ if test -n "$MOZ_JPROF"; then
AC_DEFINE(MOZ_JPROF)
fi
dnl ========================================================
dnl shark
dnl ========================================================
MOZ_ARG_ENABLE_BOOL(shark,
[ --enable-shark Enable shark remote profiling. Implies --enable-profiling.],
MOZ_SHARK=1,
MOZ_SHARK= )
if test -n "$MOZ_SHARK"; then
MOZ_PROFILING=1
AC_DEFINE(MOZ_SHARK)
fi
dnl ========================================================
dnl instruments
dnl ========================================================
@ -3699,7 +3687,6 @@ AC_SUBST(MOZ_DEBUG_DISABLE_DEFS)
AC_SUBST(MOZ_DEBUG_LDFLAGS)
AC_SUBST(WARNINGS_AS_ERRORS)
AC_SUBST(MOZ_JPROF)
AC_SUBST(MOZ_SHARK)
AC_SUBST(MOZ_INSTRUMENTS)
AC_SUBST(MOZ_CALLGRIND)
AC_SUBST(MOZ_VTUNE)

View File

@ -550,39 +550,45 @@ js::Nursery::collect(JSRuntime* rt, JS::gcreason::Reason reason, ObjectGroupList
TraceMinorGCEnd();
if (enableProfiling_ && totalTime >= profileThreshold_) {
static bool printedHeader = false;
if (!printedHeader) {
fprintf(stderr,
"MinorGC: Reason PRate Size Time mkVals mkClls mkSlts mkWCll mkGnrc ckTbls mkRntm mkDbgr clrNOC collct swpABO updtIn runFin frSlts clrSB sweep resize pretnr logPtT\n");
printedHeader = true;
struct {
const char* name;
int64_t time;
} PrintList[] = {
{"canIon", TIME_TOTAL(cancelIonCompilations)},
{"mkVals", TIME_TOTAL(traceValues)},
{"mkClls", TIME_TOTAL(traceCells)},
{"mkSlts", TIME_TOTAL(traceSlots)},
{"mcWCll", TIME_TOTAL(traceWholeCells)},
{"mkGnrc", TIME_TOTAL(traceGenericEntries)},
{"ckTbls", TIME_TOTAL(checkHashTables)},
{"mkRntm", TIME_TOTAL(markRuntime)},
{"mkDbgr", TIME_TOTAL(markDebugger)},
{"clrNOC", TIME_TOTAL(clearNewObjectCache)},
{"collct", TIME_TOTAL(collectToFP)},
{"swpABO", TIME_TOTAL(sweepArrayBufferViewList)},
{"updtIn", TIME_TOTAL(updateJitActivations)},
{"frSlts", TIME_TOTAL(freeMallocedBuffers)},
{" clrSB", TIME_TOTAL(clearStoreBuffer)},
{" sweep", TIME_TOTAL(sweep)},
{"resize", TIME_TOTAL(resize)},
{"pretnr", TIME_TOTAL(pretenure)},
{"logPtT", TIME_TOTAL(logPromotionsToTenured)}
};
static int printedHeader = 0;
if ((printedHeader++ % 200) == 0) {
fprintf(stderr, "MinorGC: Reason PRate Size Time");
for (auto &entry : PrintList)
fprintf(stderr, " %s", entry.name);
fprintf(stderr, "\n");
}
#define FMT " %6" PRIu64
fprintf(stderr,
"MinorGC: %20s %5.1f%% %4d" FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT FMT "\n",
js::gcstats::ExplainReason(reason),
promotionRate * 100,
numActiveChunks_,
totalTime,
TIME_TOTAL(cancelIonCompilations),
TIME_TOTAL(traceValues),
TIME_TOTAL(traceCells),
TIME_TOTAL(traceSlots),
TIME_TOTAL(traceWholeCells),
TIME_TOTAL(traceGenericEntries),
TIME_TOTAL(checkHashTables),
TIME_TOTAL(markRuntime),
TIME_TOTAL(markDebugger),
TIME_TOTAL(clearNewObjectCache),
TIME_TOTAL(collectToFP),
TIME_TOTAL(sweepArrayBufferViewList),
TIME_TOTAL(updateJitActivations),
TIME_TOTAL(freeMallocedBuffers),
TIME_TOTAL(clearStoreBuffer),
TIME_TOTAL(sweep),
TIME_TOTAL(resize),
TIME_TOTAL(pretenure),
TIME_TOTAL(logPromotionsToTenured));
fprintf(stderr, "MinorGC: %20s %5.1f%% %4d " FMT, js::gcstats::ExplainReason(reason),
promotionRate * 100, numActiveChunks_, totalTime);
for (auto &entry : PrintList) {
fprintf(stderr, FMT, entry.time);
}
fprintf(stderr, "\n");
#undef FMT
}
}

View File

@ -4766,10 +4766,12 @@ JS_RestoreFrameChain(JSContext* cx)
}
JS::AutoSetAsyncStackForNewCalls::AutoSetAsyncStackForNewCalls(
JSContext* cx, HandleObject stack, HandleString asyncCause)
JSContext* cx, HandleObject stack, HandleString asyncCause,
JS::AutoSetAsyncStackForNewCalls::AsyncCallKind kind)
: cx(cx),
oldAsyncStack(cx, cx->runtime()->asyncStackForNewActivations),
oldAsyncCause(cx, cx->runtime()->asyncCauseForNewActivations)
oldAsyncCause(cx, cx->runtime()->asyncCauseForNewActivations),
oldAsyncCallIsExplicit(cx->runtime()->asyncCallIsExplicit)
{
CHECK_REQUEST(cx);
@ -4784,6 +4786,7 @@ JS::AutoSetAsyncStackForNewCalls::AutoSetAsyncStackForNewCalls(
cx->runtime()->asyncStackForNewActivations = asyncStack;
cx->runtime()->asyncCauseForNewActivations = asyncCause;
cx->runtime()->asyncCallIsExplicit = kind == AsyncCallKind::EXPLICIT;
}
JS::AutoSetAsyncStackForNewCalls::~AutoSetAsyncStackForNewCalls()
@ -4791,6 +4794,7 @@ JS::AutoSetAsyncStackForNewCalls::~AutoSetAsyncStackForNewCalls()
cx->runtime()->asyncCauseForNewActivations = oldAsyncCause;
cx->runtime()->asyncStackForNewActivations =
oldAsyncStack ? &oldAsyncStack->as<SavedFrame>() : nullptr;
cx->runtime()->asyncCallIsExplicit = oldAsyncCallIsExplicit;
}
/************************************************************************/

View File

@ -4027,14 +4027,25 @@ class MOZ_STACK_CLASS JS_PUBLIC_API(AutoSetAsyncStackForNewCalls)
JSContext* cx;
RootedObject oldAsyncStack;
RootedString oldAsyncCause;
bool oldAsyncCallIsExplicit;
public:
enum class AsyncCallKind {
// The ordinary kind of call, where we may apply an async
// parent if there is no ordinary parent.
IMPLICIT,
// An explicit async parent, e.g., callFunctionWithAsyncStack,
// where we always want to override any ordinary parent.
EXPLICIT
};
// The stack parameter cannot be null by design, because it would be
// ambiguous whether that would clear any scheduled async stack and make the
// normal stack reappear in the new call, or just keep the async stack
// already scheduled for the new call, if any.
AutoSetAsyncStackForNewCalls(JSContext* cx, HandleObject stack,
HandleString asyncCause);
HandleString asyncCause,
AsyncCallKind kind = AsyncCallKind::IMPLICIT);
~AutoSetAsyncStackForNewCalls();
};

View File

@ -128,6 +128,7 @@ JSRuntime::JSRuntime(JSRuntime* parentRuntime)
asmJSActivationStack_(nullptr),
asyncStackForNewActivations(nullptr),
asyncCauseForNewActivations(nullptr),
asyncCallIsExplicit(false),
entryMonitor(nullptr),
parentRuntime(parentRuntime),
interrupt_(false),

View File

@ -697,6 +697,12 @@ struct JSRuntime : public JS::shadow::Runtime,
*/
JSString* asyncCauseForNewActivations;
/*
* True if the async call was explicitly requested, e.g. via
* callFunctionWithAsyncStack.
*/
bool asyncCallIsExplicit;
/* If non-null, report JavaScript entry points to this monitor. */
JS::dbg::AutoEntryMonitor* entryMonitor;

View File

@ -912,6 +912,17 @@ SavedStacks::insertFrames(JSContext* cx, FrameIter& iter, MutableHandleSavedFram
while (!iter.done()) {
Activation& activation = *iter.activation();
if (asyncActivation && asyncActivation != &activation) {
// We found an async stack in the previous activation, and we
// walked past the oldest frame of that activation, we're done.
// However, we only want to use the async parent if it was
// explicitly requested; if we got here otherwise, we have
// a direct parent, which we prefer.
if (asyncActivation->asyncCallIsExplicit())
break;
asyncActivation = nullptr;
}
if (!asyncActivation) {
asyncStack = activation.asyncStack();
if (asyncStack) {
@ -923,10 +934,6 @@ SavedStacks::insertFrames(JSContext* cx, FrameIter& iter, MutableHandleSavedFram
asyncCause = activation.asyncCause();
asyncActivation = &activation;
}
} else if (asyncActivation != &activation) {
// We found an async stack in the previous activation, and we
// walked past the oldest frame of that activation, we're done.
break;
}
AutoLocationValueRooter location(cx);

View File

@ -868,11 +868,13 @@ Activation::Activation(JSContext* cx, Kind kind)
hideScriptedCallerCount_(0),
asyncStack_(cx, cx->runtime_->asyncStackForNewActivations),
asyncCause_(cx, cx->runtime_->asyncCauseForNewActivations),
asyncCallIsExplicit_(cx->runtime_->asyncCallIsExplicit),
entryMonitor_(cx->runtime_->entryMonitor),
kind_(kind)
{
cx->runtime_->asyncStackForNewActivations = nullptr;
cx->runtime_->asyncCauseForNewActivations = nullptr;
cx->runtime_->asyncCallIsExplicit = false;
cx->runtime_->entryMonitor = nullptr;
cx->runtime_->activation_ = this;
}
@ -886,6 +888,7 @@ Activation::~Activation()
cx_->runtime_->entryMonitor = entryMonitor_;
cx_->runtime_->asyncCauseForNewActivations = asyncCause_;
cx_->runtime_->asyncStackForNewActivations = asyncStack_;
cx_->runtime_->asyncCallIsExplicit = asyncCallIsExplicit_;
}
bool

View File

@ -1130,6 +1130,10 @@ class Activation
// Value of asyncCause to be attached to asyncStack_.
RootedString asyncCause_;
// True if the async call was explicitly requested, e.g. via
// callFunctionWithAsyncStack.
bool asyncCallIsExplicit_;
// The entry point monitor that was set on cx_->runtime() when this
// Activation was created. Subclasses should report their entry frame's
// function or script here.
@ -1215,6 +1219,10 @@ class Activation
return asyncCause_;
}
bool asyncCallIsExplicit() const {
return asyncCallIsExplicit_;
}
private:
Activation(const Activation& other) = delete;
void operator=(const Activation& other) = delete;

Some files were not shown because too many files have changed in this diff Show More