mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Merge inbound to m-c a=merge
This commit is contained in:
commit
b86ded9ee9
@ -1560,9 +1560,9 @@ nsAccessibilityService::CreateAccessibleByFrameType(nsIFrame* aFrame,
|
||||
case eHTMLTableRowType: {
|
||||
// Accessible HTML table row may be a child of tbody/tfoot/thead of
|
||||
// accessible HTML table or a direct child of accessible of HTML table.
|
||||
Accessible* table = aContext->IsTable() ?
|
||||
aContext :
|
||||
(aContext->Parent()->IsTable() ? aContext->Parent() : nullptr);
|
||||
Accessible* table = aContext->IsTable() ? aContext : nullptr;
|
||||
if (!table && aContext->Parent() && aContext->Parent()->IsTable())
|
||||
table = aContext->Parent();
|
||||
|
||||
if (table) {
|
||||
nsIContent* parentContent = aContent->GetParent();
|
||||
|
@ -27,8 +27,6 @@
|
||||
"magazineluiza.com.br": "\\(Mobile#(Android; Mobile",
|
||||
// bug 827628, groupon.com.br
|
||||
"groupon.com.br": "\\(Mobile#(Android; Mobile",
|
||||
// bug 827632, tecmundo.com.br
|
||||
"tecmundo.com.br": "\\(Mobile#(Android; Mobile",
|
||||
// bug 827633, hao123.com
|
||||
"hao123.com": "\\(Mobile#(Android; Mobile",
|
||||
// bug 827573, webmotors.com.br
|
||||
@ -91,8 +89,6 @@
|
||||
"livescore.com": "\\(Mobile#(Android; Mobile",
|
||||
// bug 878277, naslovi.net
|
||||
"naslovi.net": "\\(Mobile#(Android; Mobile",
|
||||
// bug 878632, banorte.com
|
||||
"banorte.com": "\\(Mobile#(Android; Mobile",
|
||||
// bug 878649, univision.com
|
||||
"univision.com": "\\(Mobile#(Android; Mobile",
|
||||
// bug 878653, redstarbelgrade.info
|
||||
|
@ -68,3 +68,5 @@ MOZ_BUNDLED_FONTS=1
|
||||
|
||||
# Enable exact rooting on b2g.
|
||||
JSGC_USE_EXACT_ROOTING=1
|
||||
|
||||
JS_GC_SMALL_CHUNK_SIZE=1
|
||||
|
@ -651,6 +651,9 @@
|
||||
; [Layout Engine Resources]
|
||||
; Style Sheets, Graphics and other Resources used by the layout engine.
|
||||
@BINPATH@/res/EditorOverride.css
|
||||
@BINPATH@/res/caret_left.svg
|
||||
@BINPATH@/res/caret_middle.svg
|
||||
@BINPATH@/res/caret_right.svg
|
||||
@BINPATH@/res/contenteditable.css
|
||||
@BINPATH@/res/designmode.css
|
||||
@BINPATH@/res/ImageDocument.css
|
||||
@ -674,21 +677,6 @@
|
||||
@BINPATH@/res/table-remove-row-active.gif
|
||||
@BINPATH@/res/table-remove-row-hover.gif
|
||||
@BINPATH@/res/table-remove-row.gif
|
||||
@BINPATH@/res/text_caret.png
|
||||
@BINPATH@/res/text_caret@1.5x.png
|
||||
@BINPATH@/res/text_caret@2.25x.png
|
||||
@BINPATH@/res/text_caret@2x.png
|
||||
@BINPATH@/res/text_caret_tilt_left.png
|
||||
@BINPATH@/res/text_caret_tilt_left@1.5x.png
|
||||
@BINPATH@/res/text_caret_tilt_left@2.25x.png
|
||||
@BINPATH@/res/text_caret_tilt_left@2x.png
|
||||
@BINPATH@/res/text_caret_tilt_right.png
|
||||
@BINPATH@/res/text_caret_tilt_right@1.5x.png
|
||||
@BINPATH@/res/text_caret_tilt_right@2.25x.png
|
||||
@BINPATH@/res/text_caret_tilt_right@2x.png
|
||||
@BINPATH@/res/text_selection_handle.png
|
||||
@BINPATH@/res/text_selection_handle@1.5.png
|
||||
@BINPATH@/res/text_selection_handle@2.png
|
||||
@BINPATH@/res/grabber.gif
|
||||
#ifdef XP_MACOSX
|
||||
@BINPATH@/res/cursors/*
|
||||
|
@ -136,6 +136,9 @@ def main(platform):
|
||||
add_dir_to_zip(xpi_path, os.path.join(distdir, "b2g"), "b2g", ("gaia", "B2G.app/Contents/MacOS/gaia"))
|
||||
# Then ship our own gaia profile
|
||||
add_dir_to_zip(xpi_path, os.path.join(gaia_path, "profile"), "profile")
|
||||
# Add "defaults" directory (required by add-on runner in Firefox 31 and
|
||||
# earlier)
|
||||
add_dir_to_zip(xpi_path, os.path.join(srcdir, "defaults"), "defaults")
|
||||
|
||||
if __name__ == '__main__':
|
||||
if 2 != len(sys.argv):
|
||||
|
0
b2g/simulator/defaults/preferences/prefs.js
Normal file
0
b2g/simulator/defaults/preferences/prefs.js
Normal file
@ -12,6 +12,9 @@ Cu.import("resource://gre/modules/FxAccounts.jsm");
|
||||
let fxAccountsCommon = {};
|
||||
Cu.import("resource://gre/modules/FxAccountsCommon.js", fxAccountsCommon);
|
||||
|
||||
// for master-password utilities
|
||||
Cu.import("resource://services-sync/util.js");
|
||||
|
||||
const PREF_LAST_FXA_USER = "identity.fxaccounts.lastSignedInUserHash";
|
||||
const PREF_SYNC_SHOW_CUSTOMIZATION = "services.sync.ui.showCustomizationDialog";
|
||||
|
||||
@ -104,6 +107,12 @@ let wrapper = {
|
||||
return;
|
||||
}
|
||||
|
||||
// If a master-password is enabled, we want to encourage the user to
|
||||
// unlock it. Things still work if not, but the user will probably need
|
||||
// to re-auth next startup (in which case we will get here again and
|
||||
// re-prompt)
|
||||
Utils.ensureMPUnlocked();
|
||||
|
||||
let iframe = document.getElementById("remote");
|
||||
this.iframe = iframe;
|
||||
iframe.addEventListener("load", this);
|
||||
|
@ -4,20 +4,6 @@
|
||||
|
||||
"use strict";
|
||||
|
||||
const {classes: Cc, interfaces: Ci, utils: Cu} = Components;
|
||||
|
||||
Cu.import("resource://gre/modules/Services.jsm");
|
||||
|
||||
let service = Cc["@mozilla.org/weave/service;1"]
|
||||
.getService(Ci.nsISupports)
|
||||
.wrappedJSObject;
|
||||
|
||||
if (!service.allowPasswordsEngine) {
|
||||
let checkbox = document.getElementById("fxa-pweng-chk");
|
||||
checkbox.checked = false;
|
||||
checkbox.disabled = true;
|
||||
}
|
||||
|
||||
addEventListener("dialogaccept", function () {
|
||||
let pane = document.getElementById("sync-customize-pane");
|
||||
pane.writePreferences(true);
|
||||
|
@ -45,8 +45,7 @@
|
||||
<checkbox label="&engine.bookmarks.label;"
|
||||
accesskey="&engine.bookmarks.accesskey;"
|
||||
preference="engine.bookmarks"/>
|
||||
<checkbox id="fxa-pweng-chk"
|
||||
label="&engine.passwords.label;"
|
||||
<checkbox label="&engine.passwords.label;"
|
||||
accesskey="&engine.passwords.accesskey;"
|
||||
preference="engine.passwords"/>
|
||||
<checkbox label="&engine.history.label;"
|
||||
|
@ -87,12 +87,6 @@ let gSyncUtils = {
|
||||
this._openLink(Weave.Svc.Prefs.get(root + "privacyURL"));
|
||||
},
|
||||
|
||||
openMPInfoPage: function (event) {
|
||||
event.stopPropagation();
|
||||
let baseURL = Services.urlFormatter.formatURLPref("app.support.baseURL");
|
||||
this._openLink(baseURL + "sync-master-password");
|
||||
},
|
||||
|
||||
openFirstSyncProgressPage: function () {
|
||||
this._openLink("about:sync-progress");
|
||||
},
|
||||
|
@ -10,6 +10,10 @@ JS_MODULES_PATH = 'modules/loop'
|
||||
|
||||
XPCSHELL_TESTS_MANIFESTS += ['test/xpcshell/xpcshell.ini']
|
||||
|
||||
BROWSER_CHROME_MANIFESTS += [
|
||||
'test/mochitest/browser.ini',
|
||||
]
|
||||
|
||||
EXTRA_JS_MODULES += [
|
||||
'MozLoopAPI.jsm',
|
||||
'MozLoopPushHandler.jsm',
|
||||
|
6
browser/components/loop/test/mochitest/browser.ini
Normal file
6
browser/components/loop/test/mochitest/browser.ini
Normal file
@ -0,0 +1,6 @@
|
||||
[DEFAULT]
|
||||
support-files =
|
||||
head.js
|
||||
|
||||
[browser_mozLoop_charPref.js]
|
||||
[browser_mozLoop_doNotDisturb.js]
|
@ -0,0 +1,26 @@
|
||||
/* Any copyright is dedicated to the Public Domain.
|
||||
http://creativecommons.org/publicdomain/zero/1.0/ */
|
||||
|
||||
/**
|
||||
* This is an integration test from navigator.mozLoop through to the end
|
||||
* effects - rather than just testing MozLoopAPI alone.
|
||||
*/
|
||||
|
||||
add_task(loadLoopPanel);
|
||||
|
||||
add_task(function* test_mozLoop_charPref() {
|
||||
registerCleanupFunction(function () {
|
||||
Services.prefs.clearUserPref("loop.test");
|
||||
});
|
||||
|
||||
Assert.ok(gMozLoopAPI, "mozLoop should exist");
|
||||
|
||||
// Test setLoopCharPref
|
||||
gMozLoopAPI.setLoopCharPref("test", "foo");
|
||||
Assert.equal(Services.prefs.getCharPref("loop.test"), "foo",
|
||||
"should set loop pref value correctly");
|
||||
|
||||
// Test getLoopCharPref
|
||||
Assert.equal(gMozLoopAPI.getLoopCharPref("test"), "foo",
|
||||
"should get loop pref value correctly");
|
||||
});
|
@ -0,0 +1,27 @@
|
||||
/* Any copyright is dedicated to the Public Domain.
|
||||
http://creativecommons.org/publicdomain/zero/1.0/ */
|
||||
|
||||
/**
|
||||
* This is an integration test from navigator.mozLoop through to the end
|
||||
* effects - rather than just testing MozLoopAPI alone.
|
||||
*/
|
||||
|
||||
add_task(loadLoopPanel);
|
||||
|
||||
add_task(function* test_mozLoop_doNotDisturb() {
|
||||
registerCleanupFunction(function () {
|
||||
Services.prefs.clearUserPref("loop.do_not_disturb");
|
||||
});
|
||||
|
||||
Assert.ok(gMozLoopAPI, "mozLoop should exist");
|
||||
|
||||
// Test doNotDisturb (getter)
|
||||
Services.prefs.setBoolPref("loop.do_not_disturb", true);
|
||||
Assert.equal(gMozLoopAPI.doNotDisturb, true,
|
||||
"Do not disturb should be true");
|
||||
|
||||
// Test doNotDisturb (setter)
|
||||
gMozLoopAPI.doNotDisturb = false;
|
||||
Assert.equal(Services.prefs.getBoolPref("loop.do_not_disturb"), false,
|
||||
"Do not disturb should be false");
|
||||
});
|
71
browser/components/loop/test/mochitest/head.js
Normal file
71
browser/components/loop/test/mochitest/head.js
Normal file
@ -0,0 +1,71 @@
|
||||
/* Any copyright is dedicated to the Public Domain.
|
||||
http://creativecommons.org/publicdomain/zero/1.0/ */
|
||||
|
||||
var gMozLoopAPI;
|
||||
|
||||
function promiseGetMozLoopAPI() {
|
||||
let deferred = Promise.defer();
|
||||
let loopPanel = document.getElementById("loop-notification-panel");
|
||||
let btn = document.getElementById("loop-call-button");
|
||||
|
||||
// Wait for the popup to be shown, then we can get the iframe and
|
||||
// wait for the iframe's load to be completed.
|
||||
loopPanel.addEventListener("popupshown", function onpopupshown() {
|
||||
loopPanel.removeEventListener("popupshown", onpopupshown, true);
|
||||
let iframe = document.getElementById(btn.getAttribute("notificationFrameId"));
|
||||
|
||||
if (iframe.contentDocument &&
|
||||
iframe.contentDocument.readyState == "complete") {
|
||||
gMozLoopAPI = iframe.contentWindow.navigator.wrappedJSObject.mozLoop;
|
||||
|
||||
deferred.resolve();
|
||||
} else {
|
||||
iframe.addEventListener("load", function panelOnLoad(e) {
|
||||
iframe.removeEventListener("load", panelOnLoad, true);
|
||||
|
||||
gMozLoopAPI = iframe.contentWindow.navigator.wrappedJSObject.mozLoop;
|
||||
|
||||
// We do this in an execute soon to allow any other event listeners to
|
||||
// be handled, just in case.
|
||||
deferred.resolve();
|
||||
}, true);
|
||||
}
|
||||
}, true);
|
||||
|
||||
// Now we're setup, click the button.
|
||||
btn.click();
|
||||
|
||||
// Remove the iframe after each test. This also avoids mochitest complaining
|
||||
// about leaks on shutdown as we intentionally hold the iframe open for the
|
||||
// life of the application.
|
||||
registerCleanupFunction(function() {
|
||||
loopPanel.hidePopup();
|
||||
loopPanel.removeChild(document.getElementById(btn.getAttribute("notificationFrameId")));
|
||||
});
|
||||
|
||||
return deferred.promise;
|
||||
}
|
||||
|
||||
/**
|
||||
* Loads the loop panel by clicking the button and waits for its open to complete.
|
||||
* It also registers
|
||||
*
|
||||
* This assumes that the tests are running in a generatorTest.
|
||||
*/
|
||||
function loadLoopPanel() {
|
||||
// Set prefs to ensure we don't access the network externally.
|
||||
Services.prefs.setCharPref("services.push.serverURL", "ws://localhost/");
|
||||
Services.prefs.setCharPref("loop.server", "http://localhost/");
|
||||
|
||||
registerCleanupFunction(function() {
|
||||
Services.prefs.clearUserPref("services.push.serverURL");
|
||||
Services.prefs.clearUserPref("loop.server");
|
||||
});
|
||||
|
||||
// Turn off animations to make tests quicker.
|
||||
let loopPanel = document.getElementById("loop-notification-panel");
|
||||
loopPanel.setAttribute("animate", "false");
|
||||
|
||||
// Now get the actual API.
|
||||
yield promiseGetMozLoopAPI();
|
||||
}
|
@ -154,17 +154,6 @@ let gSyncPane = {
|
||||
for (let checkbox of engines.querySelectorAll("checkbox")) {
|
||||
checkbox.disabled = enginesListDisabled;
|
||||
}
|
||||
|
||||
let checkbox = document.getElementById("fxa-pweng-chk");
|
||||
let help = document.getElementById("fxa-pweng-help");
|
||||
let allowPasswordsEngine = service.allowPasswordsEngine;
|
||||
|
||||
if (!allowPasswordsEngine) {
|
||||
checkbox.checked = false;
|
||||
}
|
||||
|
||||
checkbox.disabled = !allowPasswordsEngine || enginesListDisabled;
|
||||
help.hidden = allowPasswordsEngine || enginesListDisabled;
|
||||
});
|
||||
// If fxAccountEnabled is false and we are in a "not configured" state,
|
||||
// then fxAccounts is probably fully disabled rather than just unconfigured,
|
||||
|
@ -283,20 +283,9 @@
|
||||
<checkbox label="&engine.bookmarks.label;"
|
||||
accesskey="&engine.bookmarks.accesskey;"
|
||||
preference="engine.bookmarks"/>
|
||||
<hbox>
|
||||
<checkbox id="fxa-pweng-chk"
|
||||
label="&engine.passwords.label;"
|
||||
accesskey="&engine.passwords.accesskey;"
|
||||
preference="engine.passwords"/>
|
||||
|
||||
<vbox id="fxa-pweng-help">
|
||||
<spacer flex="1"/>
|
||||
<hbox id="fxa-pweng-help-link">
|
||||
<image onclick="gSyncUtils.openMPInfoPage(event);" />
|
||||
</hbox>
|
||||
<spacer flex="1"/>
|
||||
</vbox>
|
||||
</hbox>
|
||||
<checkbox label="&engine.passwords.label;"
|
||||
accesskey="&engine.passwords.accesskey;"
|
||||
preference="engine.passwords"/>
|
||||
<checkbox label="&engine.history.label;"
|
||||
accesskey="&engine.history.accesskey;"
|
||||
preference="engine.history"/>
|
||||
|
@ -154,17 +154,6 @@ let gSyncPane = {
|
||||
for (let checkbox of engines.querySelectorAll("checkbox")) {
|
||||
checkbox.disabled = enginesListDisabled;
|
||||
}
|
||||
|
||||
let checkbox = document.getElementById("fxa-pweng-chk");
|
||||
let help = document.getElementById("fxa-pweng-help");
|
||||
let allowPasswordsEngine = service.allowPasswordsEngine;
|
||||
|
||||
if (!allowPasswordsEngine) {
|
||||
checkbox.checked = false;
|
||||
}
|
||||
|
||||
checkbox.disabled = !allowPasswordsEngine || enginesListDisabled;
|
||||
help.hidden = allowPasswordsEngine || enginesListDisabled;
|
||||
});
|
||||
// If fxAccountEnabled is false and we are in a "not configured" state,
|
||||
// then fxAccounts is probably fully disabled rather than just unconfigured,
|
||||
|
@ -265,20 +265,9 @@
|
||||
<checkbox label="&engine.bookmarks.label;"
|
||||
accesskey="&engine.bookmarks.accesskey;"
|
||||
preference="engine.bookmarks"/>
|
||||
<hbox>
|
||||
<checkbox id="fxa-pweng-chk"
|
||||
label="&engine.passwords.label;"
|
||||
accesskey="&engine.passwords.accesskey;"
|
||||
preference="engine.passwords"/>
|
||||
|
||||
<vbox id="fxa-pweng-help">
|
||||
<spacer flex="1"/>
|
||||
<hbox id="fxa-pweng-help-link">
|
||||
<image onclick="gSyncUtils.openMPInfoPage(event);" />
|
||||
</hbox>
|
||||
<spacer flex="1"/>
|
||||
</vbox>
|
||||
</hbox>
|
||||
<checkbox label="&engine.passwords.label;"
|
||||
accesskey="&engine.passwords.accesskey;"
|
||||
preference="engine.passwords"/>
|
||||
<checkbox label="&engine.history.label;"
|
||||
accesskey="&engine.history.accesskey;"
|
||||
preference="engine.history"/>
|
||||
|
@ -171,12 +171,4 @@ label.small {
|
||||
margin-bottom: 0.6em;
|
||||
}
|
||||
|
||||
#fxa-pweng-help-link > label {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
#fxa-pweng-help-link > image {
|
||||
list-style-image: url("chrome://global/skin/icons/question-16.png");
|
||||
}
|
||||
|
||||
%endif
|
||||
|
@ -233,20 +233,4 @@ html|a.inline-link:-moz-focusring {
|
||||
margin-bottom: 0.6em;
|
||||
}
|
||||
|
||||
#fxa-pweng-help-link > label {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
#fxa-pweng-help-link > image {
|
||||
width: 16px;
|
||||
height: 16px;
|
||||
list-style-image: url("chrome://global/skin/icons/question-16.png");
|
||||
}
|
||||
|
||||
@media (min-resolution: 2dppx) {
|
||||
#fxa-pweng-help-link > image {
|
||||
list-style-image: url("chrome://global/skin/icons/question-32.png");
|
||||
}
|
||||
}
|
||||
|
||||
%endif
|
||||
|
@ -161,12 +161,4 @@ label.small {
|
||||
margin-bottom: 0.6em;
|
||||
}
|
||||
|
||||
#fxa-pweng-help-link > label {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
#fxa-pweng-help-link > image {
|
||||
list-style-image: url("chrome://global/skin/icons/question-16.png");
|
||||
}
|
||||
|
||||
%endif
|
||||
|
14
configure.in
14
configure.in
@ -7229,6 +7229,17 @@ if test -n "$JSGC_GENERATIONAL"; then
|
||||
AC_DEFINE(JSGC_GENERATIONAL)
|
||||
fi
|
||||
|
||||
dnl ========================================================
|
||||
dnl = Use a smaller chunk size for GC chunks
|
||||
dnl ========================================================
|
||||
MOZ_ARG_ENABLE_BOOL(small-chunk-size,
|
||||
[ --enable-small-chunk-size Allocate memory for JS GC things in smaller chunks],
|
||||
JS_GC_SMALL_CHUNK_SIZE=1,
|
||||
JS_GC_SMALL_CHUNK_SIZE= )
|
||||
if test -n "$JS_GC_SMALL_CHUNK_SIZE"; then
|
||||
AC_DEFINE(JS_GC_SMALL_CHUNK_SIZE)
|
||||
fi
|
||||
|
||||
dnl ========================================================
|
||||
dnl Zealous JavaScript GC
|
||||
dnl ========================================================
|
||||
@ -9252,6 +9263,9 @@ fi
|
||||
if test -z "$JSGC_GENERATIONAL" ; then
|
||||
ac_configure_args="$ac_configure_args --disable-gcgenerational"
|
||||
fi
|
||||
if test -n "$JS_GC_SMALL_CHUNK_SIZE" ; then
|
||||
ac_configure_args="$ac_configure_args --enable-small-chunk-size"
|
||||
fi
|
||||
if test -z "$MOZ_NATIVE_NSPR"; then
|
||||
ac_configure_args="$ac_configure_args --with-nspr-cflags='$NSPR_CFLAGS'"
|
||||
ac_configure_args="$ac_configure_args --with-nspr-libs='$NSPR_LIBS'"
|
||||
|
@ -397,6 +397,12 @@ public:
|
||||
|
||||
virtual JSObject* WrapObject(JSContext *aCx) MOZ_OVERRIDE;
|
||||
|
||||
/**
|
||||
* returns true if we are in priviliged code or
|
||||
* layout.css.getBoxQuads.enabled == true.
|
||||
*/
|
||||
static bool HasBoxQuadsSupport(JSContext* aCx, JSObject* /* unused */);
|
||||
|
||||
protected:
|
||||
/**
|
||||
* WrapNode is called from WrapObject to actually wrap this node, WrapObject
|
||||
|
@ -91,6 +91,7 @@
|
||||
#include "nsUnicharUtils.h"
|
||||
#include "nsXBLBinding.h"
|
||||
#include "nsXBLPrototypeBinding.h"
|
||||
#include "mozilla/Preferences.h"
|
||||
#include "prprf.h"
|
||||
#include "xpcpublic.h"
|
||||
#include "nsCSSRuleProcessor.h"
|
||||
@ -2758,3 +2759,11 @@ nsINode::GetParentElementCrossingShadowRoot() const
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool
|
||||
nsINode::HasBoxQuadsSupport(JSContext* aCx, JSObject* /* unused */)
|
||||
{
|
||||
return xpc::AccessCheck::isChrome(js::GetContextCompartment(aCx)) ||
|
||||
Preferences::GetBool("layout.css.getBoxQuads.enabled");
|
||||
}
|
||||
|
||||
|
@ -1430,13 +1430,13 @@ HTMLMediaElement::Seek(double aTime,
|
||||
}
|
||||
|
||||
// Clamp the seek target to inside the seekable ranges.
|
||||
dom::TimeRanges seekable;
|
||||
if (NS_FAILED(mDecoder->GetSeekable(&seekable))) {
|
||||
nsRefPtr<dom::TimeRanges> seekable = new dom::TimeRanges();
|
||||
if (NS_FAILED(mDecoder->GetSeekable(seekable))) {
|
||||
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
|
||||
return;
|
||||
}
|
||||
uint32_t length = 0;
|
||||
seekable.GetLength(&length);
|
||||
seekable->GetLength(&length);
|
||||
if (!length) {
|
||||
return;
|
||||
}
|
||||
@ -1448,7 +1448,7 @@ HTMLMediaElement::Seek(double aTime,
|
||||
// http://www.whatwg.org/specs/web-apps/current-work/multipage/the-video-element.html#seeking
|
||||
int32_t range = 0;
|
||||
bool isInRange = false;
|
||||
if (NS_FAILED(IsInRanges(seekable, aTime, isInRange, range))) {
|
||||
if (NS_FAILED(IsInRanges(*seekable, aTime, isInRange, range))) {
|
||||
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
|
||||
return;
|
||||
}
|
||||
@ -1458,11 +1458,11 @@ HTMLMediaElement::Seek(double aTime,
|
||||
// for |range| is -1.
|
||||
if (uint32_t(range + 1) < length) {
|
||||
double leftBound, rightBound;
|
||||
if (NS_FAILED(seekable.End(range, &leftBound))) {
|
||||
if (NS_FAILED(seekable->End(range, &leftBound))) {
|
||||
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
|
||||
return;
|
||||
}
|
||||
if (NS_FAILED(seekable.Start(range + 1, &rightBound))) {
|
||||
if (NS_FAILED(seekable->Start(range + 1, &rightBound))) {
|
||||
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
|
||||
return;
|
||||
}
|
||||
@ -1477,7 +1477,7 @@ HTMLMediaElement::Seek(double aTime,
|
||||
} else {
|
||||
// Seek target is after the end last range in seekable data.
|
||||
// Clamp the seek target to the end of the last seekable range.
|
||||
if (NS_FAILED(seekable.End(length - 1, &aTime))) {
|
||||
if (NS_FAILED(seekable->End(length - 1, &aTime))) {
|
||||
aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR);
|
||||
return;
|
||||
}
|
||||
@ -1485,7 +1485,7 @@ HTMLMediaElement::Seek(double aTime,
|
||||
} else {
|
||||
// aTime is before the first range in |seekable|, the closest point we can
|
||||
// seek to is the start of the first range.
|
||||
seekable.Start(0, &aTime);
|
||||
seekable->Start(0, &aTime);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -21,12 +21,6 @@ class TimeRanges;
|
||||
|
||||
}
|
||||
|
||||
template<>
|
||||
struct HasDangerousPublicDestructor<dom::TimeRanges>
|
||||
{
|
||||
static const bool value = true;
|
||||
};
|
||||
|
||||
namespace dom {
|
||||
|
||||
// Implements media TimeRanges:
|
||||
@ -38,7 +32,6 @@ public:
|
||||
NS_DECL_NSIDOMTIMERANGES
|
||||
|
||||
TimeRanges();
|
||||
~TimeRanges();
|
||||
|
||||
void Add(double aStart, double aEnd);
|
||||
|
||||
@ -63,6 +56,7 @@ public:
|
||||
virtual double End(uint32_t aIndex, ErrorResult& aRv);
|
||||
|
||||
private:
|
||||
~TimeRanges();
|
||||
|
||||
// Comparator which orders TimeRanges by start time. Used by Normalize().
|
||||
struct TimeRange
|
||||
|
@ -1429,15 +1429,15 @@ void MediaDecoderStateMachine::NotifyDataArrived(const char* aBuffer,
|
||||
// faster than played, mEndTime won't reflect the end of playable data
|
||||
// since we haven't played the frame at the end of buffered data. So update
|
||||
// mEndTime here as new data is downloaded to prevent such a lag.
|
||||
dom::TimeRanges buffered;
|
||||
nsRefPtr<dom::TimeRanges> buffered = new dom::TimeRanges();
|
||||
if (mDecoder->IsInfinite() &&
|
||||
NS_SUCCEEDED(mDecoder->GetBuffered(&buffered)))
|
||||
NS_SUCCEEDED(mDecoder->GetBuffered(buffered)))
|
||||
{
|
||||
uint32_t length = 0;
|
||||
buffered.GetLength(&length);
|
||||
buffered->GetLength(&length);
|
||||
if (length) {
|
||||
double end = 0;
|
||||
buffered.End(length - 1, &end);
|
||||
buffered->End(length - 1, &end);
|
||||
ReentrantMonitorAutoEnter mon(mDecoder->GetReentrantMonitor());
|
||||
mEndTime = std::max<int64_t>(mEndTime, end * USECS_PER_S);
|
||||
}
|
||||
|
@ -54,8 +54,7 @@ JSErrorFormatString ErrorFormatString[] = {
|
||||
};
|
||||
|
||||
const JSErrorFormatString*
|
||||
GetErrorMessage(void* aUserRef, const char* aLocale,
|
||||
const unsigned aErrorNumber)
|
||||
GetErrorMessage(void* aUserRef, const unsigned aErrorNumber)
|
||||
{
|
||||
MOZ_ASSERT(aErrorNumber < ArrayLength(ErrorFormatString));
|
||||
return &ErrorFormatString[aErrorNumber];
|
||||
@ -139,8 +138,7 @@ ErrorResult::ThrowTypeError(const dom::ErrNum errorNumber, ...)
|
||||
mResult = NS_ERROR_TYPE_ERR;
|
||||
Message* message = new Message();
|
||||
message->mErrorNumber = errorNumber;
|
||||
uint16_t argCount =
|
||||
dom::GetErrorMessage(nullptr, nullptr, errorNumber)->argCount;
|
||||
uint16_t argCount = dom::GetErrorMessage(nullptr, errorNumber)->argCount;
|
||||
MOZ_ASSERT(argCount <= 10);
|
||||
argCount = std::min<uint16_t>(argCount, 10);
|
||||
while (argCount--) {
|
||||
|
@ -23,7 +23,7 @@ dictionary ConvertCoordinateOptions {
|
||||
|
||||
[NoInterfaceObject]
|
||||
interface GeometryUtils {
|
||||
[Throws, Pref="layout.css.getBoxQuads.enabled"]
|
||||
[Throws, Func="nsINode::HasBoxQuadsSupport"]
|
||||
sequence<DOMQuad> getBoxQuads(optional BoxQuadOptions options);
|
||||
[Throws, Pref="layout.css.convertFromNode.enabled"]
|
||||
DOMQuad convertQuadFromNode(DOMQuad quad, GeometryNode from, optional ConvertCoordinateOptions options);
|
||||
@ -38,4 +38,4 @@ Element implements GeometryUtils;
|
||||
// PseudoElement implements GeometryUtils;
|
||||
Document implements GeometryUtils;
|
||||
|
||||
typedef (Text or Element /* or PseudoElement */ or Document) GeometryNode;
|
||||
typedef (Text or Element /* or PseudoElement */ or Document) GeometryNode;
|
||||
|
@ -27,6 +27,9 @@ FAIL_ON_WARNINGS = True
|
||||
|
||||
FINAL_LIBRARY = 'xul'
|
||||
RESOURCE_FILES += [
|
||||
'res/caret_left.svg',
|
||||
'res/caret_middle.svg',
|
||||
'res/caret_right.svg',
|
||||
'res/EditorOverride.css',
|
||||
'res/grabber.gif',
|
||||
'res/table-add-column-after-active.gif',
|
||||
@ -47,19 +50,4 @@ RESOURCE_FILES += [
|
||||
'res/table-remove-row-active.gif',
|
||||
'res/table-remove-row-hover.gif',
|
||||
'res/table-remove-row.gif',
|
||||
'res/text_caret.png',
|
||||
'res/text_caret@1.5x.png',
|
||||
'res/text_caret@2.25x.png',
|
||||
'res/text_caret@2x.png',
|
||||
'res/text_caret_tilt_left.png',
|
||||
'res/text_caret_tilt_left@1.5x.png',
|
||||
'res/text_caret_tilt_left@2.25x.png',
|
||||
'res/text_caret_tilt_left@2x.png',
|
||||
'res/text_caret_tilt_right.png',
|
||||
'res/text_caret_tilt_right@1.5x.png',
|
||||
'res/text_caret_tilt_right@2.25x.png',
|
||||
'res/text_caret_tilt_right@2x.png',
|
||||
'res/text_selection_handle.png',
|
||||
'res/text_selection_handle@1.5.png',
|
||||
'res/text_selection_handle@2.png',
|
||||
]
|
||||
|
24
editor/composer/res/caret_left.svg
Normal file
24
editor/composer/res/caret_left.svg
Normal file
@ -0,0 +1,24 @@
|
||||
<?xml version="1.0" encoding="iso-8859-1"?>
|
||||
<!-- Generator: Adobe Illustrator 16.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
width="29px" height="31px" viewBox="0 0 29 31" style="enable-background:new 0 0 29 31;" xml:space="preserve">
|
||||
<!-- TODO: Enable shadow after bug 1015575 is resolved.
|
||||
<defs>
|
||||
<filter id="caretFilter">
|
||||
<feOffset result="offsetOut" in="SourceAlpha" dx="1" dy="1" />
|
||||
<feGaussianBlur result="blurOut" in="offsetOut" stdDeviation="0.5" />
|
||||
<feBlend in="SourceGraphic" in2="blurOut" mode="normal" />
|
||||
</filter>
|
||||
</defs>
|
||||
<g fill="#2da9e3" filter="url(#caretFilter)">
|
||||
-->
|
||||
<g fill="#2da9e3">
|
||||
<path d="M25.368,2.674c-0.049,0.104-0.09,0.209-0.134,0.314C25.304,2.893,25.347,2.786,25.368,2.674z"/>
|
||||
<path d="M24.27,1.734c0.003-0.001,0.008-0.003,0.013-0.004C24.277,1.73,24.272,1.733,24.27,1.734z"/>
|
||||
<path d="M24.583,8.574C24.25,6.7,24.478,4.755,25.234,2.989c0.044-0.105,0.085-0.21,0.134-0.314
|
||||
c0.053-0.254-0.016-0.528-0.204-0.73c-0.232-0.249-0.581-0.322-0.882-0.215c-0.005,0.001-0.01,0.003-0.013,0.004
|
||||
c-1.915,0.71-4.001,0.798-5.954,0.277C15.015,0.898,11.222,1.587,8.5,4.134c-3.947,3.691-4.155,9.882-0.464,13.828
|
||||
c3.691,3.947,9.881,4.154,13.828,0.462C24.64,15.828,25.562,11.994,24.583,8.574z"/>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 1.5 KiB |
24
editor/composer/res/caret_middle.svg
Normal file
24
editor/composer/res/caret_middle.svg
Normal file
@ -0,0 +1,24 @@
|
||||
<?xml version="1.0" encoding="iso-8859-1"?>
|
||||
<!-- Generator: Adobe Illustrator 16.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
width="29px" height="31px" style="enable-background:new 0 0 29 31;" xml:space="preserve">
|
||||
<!-- TODO: Enable shadow after bug 1015575 is resolved.
|
||||
<defs>
|
||||
<filter id="caretFilter">
|
||||
<feOffset result="offsetOut" in="SourceAlpha" dx="1" dy="1" />
|
||||
<feGaussianBlur result="blurOut" in="offsetOut" stdDeviation="0.5" />
|
||||
<feBlend in="SourceGraphic" in2="blurOut" mode="normal" />
|
||||
</filter>
|
||||
</defs>
|
||||
<g fill="#2da9e3" filter="url(#caretFilter)">
|
||||
-->
|
||||
<g fill="#2da9e3">
|
||||
<path d="M15.174,1.374c0.042,0.106,0.091,0.208,0.138,0.312C15.288,1.57,15.239,1.466,15.174,1.374z"/>
|
||||
<path d="M13.735,1.534c0.002-0.003,0.004-0.009,0.006-0.013C13.739,1.525,13.737,1.531,13.735,1.534z"/>
|
||||
<path d="M18.945,5.978c-1.596-1.038-2.861-2.532-3.634-4.292c-0.047-0.104-0.096-0.206-0.138-0.312
|
||||
c-0.15-0.212-0.396-0.349-0.674-0.349c-0.34,0-0.631,0.204-0.759,0.497c-0.002,0.004-0.004,0.009-0.006,0.013
|
||||
c-0.789,1.883-2.149,3.467-3.864,4.538c-3.068,1.651-5.155,4.892-5.155,8.62c0,5.404,4.379,9.784,9.783,9.784
|
||||
c5.403,0,9.783-4.38,9.783-9.784C24.283,10.891,22.113,7.598,18.945,5.978z"/>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 1.5 KiB |
24
editor/composer/res/caret_right.svg
Normal file
24
editor/composer/res/caret_right.svg
Normal file
@ -0,0 +1,24 @@
|
||||
<?xml version="1.0" encoding="iso-8859-1"?>
|
||||
<!-- Generator: Adobe Illustrator 16.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
width="29px" height="31px" viewBox="0 0 29 31" style="enable-background:new 0 0 29 31;" xml:space="preserve">
|
||||
<!-- TODO: Enable shadow after bug 1015575 is resolved.
|
||||
<defs>
|
||||
<filter id="caretFilter">
|
||||
<feOffset result="offsetOut" in="SourceAlpha" dx="1" dy="1" />
|
||||
<feGaussianBlur result="blurOut" in="offsetOut" stdDeviation="0.5" />
|
||||
<feBlend in="SourceGraphic" in2="blurOut" mode="normal" />
|
||||
</filter>
|
||||
</defs>
|
||||
<g fill="#2da9e3" filter="url(#caretFilter)">
|
||||
-->
|
||||
<g fill="#2da9e3">
|
||||
<path fill="#2da9e3" d="M27.296,2.674c-0.049,0.104-0.09,0.209-0.134,0.314C27.231,2.893,27.274,2.786,27.296,2.674z"/>
|
||||
<path fill="#2da9e3" d="M26.197,1.734C26.2,1.733,26.205,1.73,26.21,1.729C26.205,1.73,26.2,1.733,26.197,1.734z"/>
|
||||
<path fill="#2da9e3" d="M4.299,8.574C4.632,6.7,4.404,4.755,3.647,2.989c-0.044-0.105-0.085-0.21-0.134-0.314C3.461,2.42,3.529,2.146,3.718,1.944
|
||||
C3.95,1.696,4.299,1.623,4.6,1.729c0.005,0.001,0.01,0.003,0.013,0.004c1.915,0.71,4.001,0.798,5.954,0.277
|
||||
c3.301-1.113,7.094-0.423,9.815,2.123c3.947,3.691,4.156,9.882,0.465,13.828c-3.691,3.947-9.881,4.154-13.828,0.462
|
||||
C4.242,15.828,3.319,11.994,4.299,8.574z"/>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 1.6 KiB |
@ -247,6 +247,16 @@ GLBlitHelper::InitTexQuadProgram(BlitType target)
|
||||
} \n\
|
||||
";
|
||||
#endif
|
||||
/* From Rec601:
|
||||
[R] [1.1643835616438356, 0.0, 1.5960267857142858] [ Y - 16]
|
||||
[G] = [1.1643835616438358, -0.3917622900949137, -0.8129676472377708] x [Cb - 128]
|
||||
[B] [1.1643835616438356, 2.017232142857143, 8.862867620416422e-17] [Cr - 128]
|
||||
|
||||
For [0,1] instead of [0,255], and to 5 places:
|
||||
[R] [1.16438, 0.00000, 1.59603] [ Y - 0.06275]
|
||||
[G] = [1.16438, -0.39176, -0.81297] x [Cb - 0.50196]
|
||||
[B] [1.16438, 2.01723, 0.00000] [Cr - 0.50196]
|
||||
*/
|
||||
const char kTexYUVPlanarBlit_FragShaderSource[] = "\
|
||||
varying vec2 vTexCoord; \n\
|
||||
uniform sampler2D uYTexture; \n\
|
||||
@ -259,12 +269,12 @@ GLBlitHelper::InitTexQuadProgram(BlitType target)
|
||||
float y = texture2D(uYTexture, vTexCoord * uYTexScale).r; \n\
|
||||
float cb = texture2D(uCbTexture, vTexCoord * uCbCrTexScale).r; \n\
|
||||
float cr = texture2D(uCrTexture, vTexCoord * uCbCrTexScale).r; \n\
|
||||
y = (y - 0.0625) * 1.164; \n\
|
||||
cb = cb - 0.504; \n\
|
||||
cr = cr - 0.5; \n\
|
||||
gl_FragColor.r = floor((y + cr * 1.596) * 256.0)/256.0; \n\
|
||||
gl_FragColor.g = floor((y - 0.813 * cr - 0.391 * cb) * 256.0)/256.0; \n\
|
||||
gl_FragColor.b = floor((y + cb * 2.018) * 256.0) /256.0; \n\
|
||||
y = (y - 0.06275) * 1.16438; \n\
|
||||
cb = cb - 0.50196; \n\
|
||||
cr = cr - 0.50196; \n\
|
||||
gl_FragColor.r = y + cr * 1.59603; \n\
|
||||
gl_FragColor.g = y - 0.81297 * cr - 0.39176 * cb; \n\
|
||||
gl_FragColor.b = y + cb * 2.01723; \n\
|
||||
gl_FragColor.a = 1.0; \n\
|
||||
} \n\
|
||||
";
|
||||
|
@ -778,6 +778,7 @@ static GrGLInterface* CreateGrGLInterfaceFromGLContext(GLContext* context)
|
||||
|
||||
GrGLExtensions extensions;
|
||||
if (!extensions.init(i->fStandard, glGetString_mozilla, NULL, glGetIntegerv_mozilla)) {
|
||||
delete i;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -58,11 +58,16 @@ public:
|
||||
void
|
||||
RemoveTextureFromCompositableTracker::ReleaseTextureClient()
|
||||
{
|
||||
if (mTextureClient) {
|
||||
if (mTextureClient &&
|
||||
mTextureClient->GetAllocator() &&
|
||||
!mTextureClient->GetAllocator()->IsImageBridgeChild())
|
||||
{
|
||||
TextureClientReleaseTask* task = new TextureClientReleaseTask(mTextureClient);
|
||||
RefPtr<ISurfaceAllocator> allocator = mTextureClient->GetAllocator();
|
||||
mTextureClient = nullptr;
|
||||
allocator->GetMessageLoop()->PostTask(FROM_HERE, task);
|
||||
} else {
|
||||
mTextureClient = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7,22 +7,15 @@
|
||||
#include <stdint.h> // for uint8_t, uint32_t, etc
|
||||
#include "Layers.h" // for Layer, etc
|
||||
#include "gfx2DGlue.h"
|
||||
#include "gfxContext.h" // for gfxContext, etc
|
||||
#include "gfxPlatform.h" // for gfxPlatform
|
||||
#include "gfxPoint.h" // for gfxIntSize, gfxSize
|
||||
#include "gfxReusableSurfaceWrapper.h" // for gfxReusableSurfaceWrapper
|
||||
#include "mozilla/gfx/BaseSize.h" // for BaseSize
|
||||
#include "mozilla/ipc/SharedMemory.h" // for SharedMemory, etc
|
||||
#include "mozilla/layers/CompositableClient.h" // for CompositableClient
|
||||
#include "mozilla/layers/CompositableForwarder.h"
|
||||
#include "mozilla/layers/ISurfaceAllocator.h"
|
||||
#include "mozilla/layers/ImageDataSerializer.h"
|
||||
#include "mozilla/layers/ShadowLayers.h" // for ShadowLayerForwarder
|
||||
#include "mozilla/layers/SharedPlanarYCbCrImage.h"
|
||||
#include "mozilla/layers/YCbCrImageDataSerializer.h"
|
||||
#include "nsDebug.h" // for NS_ASSERTION, NS_WARNING, etc
|
||||
#include "nsISupportsImpl.h" // for MOZ_COUNT_CTOR, etc
|
||||
#include "ImageContainer.h" // for PlanarYCbCrImage, etc
|
||||
#include "ImageContainer.h" // for PlanarYCbCrData, etc
|
||||
#include "mozilla/gfx/2D.h"
|
||||
#include "mozilla/layers/TextureClientOGL.h"
|
||||
#include "mozilla/layers/PTextureChild.h"
|
||||
|
@ -198,8 +198,6 @@ public:
|
||||
|
||||
bool IsOnCompositorSide() const MOZ_OVERRIDE { return false; }
|
||||
|
||||
virtual bool IsImageBridgeChild() const { return false; }
|
||||
|
||||
/**
|
||||
* Returns the type of backend that is used off the main thread.
|
||||
* We only don't allow changing the backend type at runtime so this value can
|
||||
|
@ -165,6 +165,8 @@ public:
|
||||
virtual bool IPCOpen() const { return true; }
|
||||
virtual bool IsSameProcess() const = 0;
|
||||
|
||||
virtual bool IsImageBridgeChild() const { return false; }
|
||||
|
||||
virtual MessageLoop * GetMessageLoop() const
|
||||
{
|
||||
return mDefaultMessageLoop;
|
||||
|
@ -32,7 +32,11 @@ const size_t ArenaShift = 12;
|
||||
const size_t ArenaSize = size_t(1) << ArenaShift;
|
||||
const size_t ArenaMask = ArenaSize - 1;
|
||||
|
||||
#ifdef JS_GC_SMALL_CHUNK_SIZE
|
||||
const size_t ChunkShift = 18;
|
||||
#else
|
||||
const size_t ChunkShift = 20;
|
||||
#endif
|
||||
const size_t ChunkSize = size_t(1) << ChunkShift;
|
||||
const size_t ChunkMask = ChunkSize - 1;
|
||||
|
||||
@ -41,8 +45,13 @@ const size_t CellSize = size_t(1) << CellShift;
|
||||
const size_t CellMask = CellSize - 1;
|
||||
|
||||
/* These are magic constants derived from actual offsets in gc/Heap.h. */
|
||||
#ifdef JS_GC_SMALL_CHUNK_SIZE
|
||||
const size_t ChunkMarkBitmapOffset = 258104;
|
||||
const size_t ChunkMarkBitmapBits = 31744;
|
||||
#else
|
||||
const size_t ChunkMarkBitmapOffset = 1032352;
|
||||
const size_t ChunkMarkBitmapBits = 129024;
|
||||
#endif
|
||||
const size_t ChunkRuntimeOffset = ChunkSize - sizeof(void*);
|
||||
const size_t ChunkLocationOffset = ChunkSize - 2 * sizeof(void*) - sizeof(uint64_t);
|
||||
|
||||
|
@ -3214,6 +3214,19 @@ if test -n "$JSGC_USE_EXACT_ROOTING"; then
|
||||
AC_DEFINE(JSGC_USE_EXACT_ROOTING)
|
||||
fi
|
||||
|
||||
dnl ========================================================
|
||||
dnl = Use a smaller chunk size for GC chunks
|
||||
dnl ========================================================
|
||||
dnl Use large (1MB) chunks by default. For B2G this option is used to give
|
||||
dnl smaller (currently 256K) chunks.
|
||||
MOZ_ARG_ENABLE_BOOL(small-chunk-size,
|
||||
[ --enable-small-chunk-size Allocate memory for JS GC things in smaller chunks],
|
||||
JS_GC_SMALL_CHUNK_SIZE=1,
|
||||
JS_GC_SMALL_CHUNK_SIZE= )
|
||||
if test -n "$JS_GC_SMALL_CHUNK_SIZE"; then
|
||||
AC_DEFINE(JS_GC_SMALL_CHUNK_SIZE)
|
||||
fi
|
||||
|
||||
dnl ========================================================
|
||||
dnl = Use GC tracing
|
||||
dnl ========================================================
|
||||
|
@ -877,7 +877,7 @@ static const JSErrorFormatString ErrorFormatString[CTYPESERR_LIMIT] = {
|
||||
};
|
||||
|
||||
static const JSErrorFormatString*
|
||||
GetErrorMessage(void* userRef, const char* locale, const unsigned errorNumber)
|
||||
GetErrorMessage(void* userRef, const unsigned errorNumber)
|
||||
{
|
||||
if (0 < errorNumber && errorNumber < CTYPESERR_LIMIT)
|
||||
return &ErrorFormatString[errorNumber];
|
||||
|
@ -419,8 +419,6 @@ class GCRuntime
|
||||
/* List of compartments and zones (protected by the GC lock). */
|
||||
js::gc::ZoneVector zones;
|
||||
|
||||
js::gc::SystemPageAllocator pageAllocator;
|
||||
|
||||
#ifdef JSGC_GENERATIONAL
|
||||
js::Nursery nursery;
|
||||
js::gc::StoreBuffer storeBuffer;
|
||||
|
@ -707,7 +707,12 @@ const size_t BytesPerArenaWithHeader = ArenaSize + ArenaBitmapBytes;
|
||||
const size_t ChunkDecommitBitmapBytes = ChunkSize / ArenaSize / JS_BITS_PER_BYTE;
|
||||
const size_t ChunkBytesAvailable = ChunkSize - sizeof(ChunkInfo) - ChunkDecommitBitmapBytes;
|
||||
const size_t ArenasPerChunk = ChunkBytesAvailable / BytesPerArenaWithHeader;
|
||||
|
||||
#ifdef JS_GC_SMALL_CHUNK_SIZE
|
||||
static_assert(ArenasPerChunk == 62, "Do not accidentally change our heap's density.");
|
||||
#else
|
||||
static_assert(ArenasPerChunk == 252, "Do not accidentally change our heap's density.");
|
||||
#endif
|
||||
|
||||
/* A chunk bitmap contains enough mark bits for all the cells in a chunk. */
|
||||
struct ChunkBitmap
|
||||
|
@ -11,11 +11,63 @@
|
||||
#include "js/HeapAPI.h"
|
||||
#include "vm/Runtime.h"
|
||||
|
||||
using namespace js;
|
||||
using namespace js::gc;
|
||||
#if defined(XP_WIN)
|
||||
|
||||
bool
|
||||
SystemPageAllocator::decommitEnabled()
|
||||
#include "jswin.h"
|
||||
#include <psapi.h>
|
||||
|
||||
#elif defined(SOLARIS)
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#elif defined(XP_UNIX)
|
||||
|
||||
#include <algorithm>
|
||||
#include <errno.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#endif
|
||||
|
||||
namespace js {
|
||||
namespace gc {
|
||||
|
||||
// The GC can only safely decommit memory when the page size of the
|
||||
// running process matches the compiled arena size.
|
||||
static size_t pageSize = 0;
|
||||
|
||||
// The OS allocation granularity may not match the page size.
|
||||
static size_t allocGranularity = 0;
|
||||
|
||||
#if defined(XP_UNIX)
|
||||
// The addresses handed out by mmap may grow up or down.
|
||||
static int growthDirection = 0;
|
||||
#endif
|
||||
|
||||
// The maximum number of unalignable chunks to temporarily keep alive in
|
||||
// the last ditch allocation pass. OOM crash reports generally show <= 7
|
||||
// unaligned chunks available (bug 1005844 comment #16).
|
||||
static const int MaxLastDitchAttempts = 8;
|
||||
|
||||
static void GetNewChunk(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize, size_t size,
|
||||
size_t alignment);
|
||||
static bool GetNewChunkInner(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize,
|
||||
size_t size, size_t alignment, bool addrsGrowDown);
|
||||
static void *MapAlignedPagesSlow(size_t size, size_t alignment);
|
||||
static void *MapAlignedPagesLastDitch(size_t size, size_t alignment);
|
||||
|
||||
size_t
|
||||
SystemPageSize()
|
||||
{
|
||||
return pageSize;
|
||||
}
|
||||
|
||||
static bool
|
||||
DecommitEnabled()
|
||||
{
|
||||
return pageSize == ArenaSize;
|
||||
}
|
||||
@ -32,16 +84,24 @@ OffsetFromAligned(void *p, size_t alignment)
|
||||
return uintptr_t(p) % alignment;
|
||||
}
|
||||
|
||||
#if defined(XP_WIN)
|
||||
#include "jswin.h"
|
||||
#include <psapi.h>
|
||||
|
||||
SystemPageAllocator::SystemPageAllocator()
|
||||
void *
|
||||
TestMapAlignedPagesLastDitch(size_t size, size_t alignment)
|
||||
{
|
||||
SYSTEM_INFO sysinfo;
|
||||
GetSystemInfo(&sysinfo);
|
||||
pageSize = sysinfo.dwPageSize;
|
||||
allocGranularity = sysinfo.dwAllocationGranularity;
|
||||
return MapAlignedPagesLastDitch(size, alignment);
|
||||
}
|
||||
|
||||
|
||||
#if defined(XP_WIN)
|
||||
|
||||
void
|
||||
InitMemorySubsystem()
|
||||
{
|
||||
if (pageSize == 0) {
|
||||
SYSTEM_INFO sysinfo;
|
||||
GetSystemInfo(&sysinfo);
|
||||
pageSize = sysinfo.dwPageSize;
|
||||
allocGranularity = sysinfo.dwAllocationGranularity;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void *
|
||||
@ -57,7 +117,7 @@ MapMemory(size_t length, int flags, int prot = PAGE_READWRITE)
|
||||
}
|
||||
|
||||
void *
|
||||
SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
|
||||
MapAlignedPages(size_t size, size_t alignment)
|
||||
{
|
||||
MOZ_ASSERT(size >= alignment);
|
||||
MOZ_ASSERT(size % alignment == 0);
|
||||
@ -75,25 +135,25 @@ SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
|
||||
|
||||
void *retainedAddr;
|
||||
size_t retainedSize;
|
||||
getNewChunk(&p, &retainedAddr, &retainedSize, size, alignment);
|
||||
GetNewChunk(&p, &retainedAddr, &retainedSize, size, alignment);
|
||||
if (retainedAddr)
|
||||
unmapPages(retainedAddr, retainedSize);
|
||||
UnmapPages(retainedAddr, retainedSize);
|
||||
if (p) {
|
||||
if (OffsetFromAligned(p, alignment) == 0)
|
||||
return p;
|
||||
unmapPages(p, size);
|
||||
UnmapPages(p, size);
|
||||
}
|
||||
|
||||
p = mapAlignedPagesSlow(size, alignment);
|
||||
p = MapAlignedPagesSlow(size, alignment);
|
||||
if (!p)
|
||||
return mapAlignedPagesLastDitch(size, alignment);
|
||||
return MapAlignedPagesLastDitch(size, alignment);
|
||||
|
||||
MOZ_ASSERT(OffsetFromAligned(p, alignment) == 0);
|
||||
return p;
|
||||
}
|
||||
|
||||
void *
|
||||
SystemPageAllocator::mapAlignedPagesSlow(size_t size, size_t alignment)
|
||||
static void *
|
||||
MapAlignedPagesSlow(size_t size, size_t alignment)
|
||||
{
|
||||
/*
|
||||
* Windows requires that there be a 1:1 mapping between VM allocation
|
||||
@ -116,7 +176,7 @@ SystemPageAllocator::mapAlignedPagesSlow(size_t size, size_t alignment)
|
||||
if (!p)
|
||||
return nullptr;
|
||||
void *chunkStart = (void *)AlignBytes(uintptr_t(p), alignment);
|
||||
unmapPages(p, reserveSize);
|
||||
UnmapPages(p, reserveSize);
|
||||
p = MapMemoryAt(chunkStart, size, MEM_COMMIT | MEM_RESERVE);
|
||||
|
||||
/* Failure here indicates a race with another thread, so try again. */
|
||||
@ -133,32 +193,32 @@ SystemPageAllocator::mapAlignedPagesSlow(size_t size, size_t alignment)
|
||||
* address each time, we temporarily hold onto the unaligned part of each chunk
|
||||
* until the allocator gives us a chunk that either is, or can be aligned.
|
||||
*/
|
||||
void *
|
||||
SystemPageAllocator::mapAlignedPagesLastDitch(size_t size, size_t alignment)
|
||||
static void *
|
||||
MapAlignedPagesLastDitch(size_t size, size_t alignment)
|
||||
{
|
||||
void *p = nullptr;
|
||||
void *tempMaps[MaxLastDitchAttempts];
|
||||
int attempt = 0;
|
||||
for (; attempt < MaxLastDitchAttempts; ++attempt) {
|
||||
size_t retainedSize;
|
||||
getNewChunk(&p, tempMaps + attempt, &retainedSize, size, alignment);
|
||||
GetNewChunk(&p, tempMaps + attempt, &retainedSize, size, alignment);
|
||||
if (OffsetFromAligned(p, alignment) == 0) {
|
||||
if (tempMaps[attempt])
|
||||
unmapPages(tempMaps[attempt], retainedSize);
|
||||
UnmapPages(tempMaps[attempt], retainedSize);
|
||||
break;
|
||||
}
|
||||
if (!tempMaps[attempt]) {
|
||||
/* getNewChunk failed, but we can still try the simpler method. */
|
||||
/* GetNewChunk failed, but we can still try the simpler method. */
|
||||
tempMaps[attempt] = p;
|
||||
p = nullptr;
|
||||
}
|
||||
}
|
||||
if (OffsetFromAligned(p, alignment)) {
|
||||
unmapPages(p, size);
|
||||
UnmapPages(p, size);
|
||||
p = nullptr;
|
||||
}
|
||||
while (--attempt >= 0)
|
||||
unmapPages(tempMaps[attempt], 0);
|
||||
UnmapPages(tempMaps[attempt], 0);
|
||||
return p;
|
||||
}
|
||||
|
||||
@ -167,9 +227,9 @@ SystemPageAllocator::mapAlignedPagesLastDitch(size_t size, size_t alignment)
|
||||
* unaligned chunk, then reallocate the unaligned part to block off the
|
||||
* old address and force the allocator to give us a new one.
|
||||
*/
|
||||
void
|
||||
SystemPageAllocator::getNewChunk(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize,
|
||||
size_t size, size_t alignment)
|
||||
static void
|
||||
GetNewChunk(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize, size_t size,
|
||||
size_t alignment)
|
||||
{
|
||||
void *address = *aAddress;
|
||||
void *retainedAddr = nullptr;
|
||||
@ -180,7 +240,7 @@ SystemPageAllocator::getNewChunk(void **aAddress, void **aRetainedAddr, size_t *
|
||||
size_t offset = OffsetFromAligned(address, alignment);
|
||||
if (!offset)
|
||||
break;
|
||||
unmapPages(address, size);
|
||||
UnmapPages(address, size);
|
||||
retainedSize = alignment - offset;
|
||||
retainedAddr = MapMemoryAt(address, retainedSize, MEM_RESERVE);
|
||||
address = MapMemory(size, MEM_COMMIT | MEM_RESERVE);
|
||||
@ -192,15 +252,15 @@ SystemPageAllocator::getNewChunk(void **aAddress, void **aRetainedAddr, size_t *
|
||||
}
|
||||
|
||||
void
|
||||
SystemPageAllocator::unmapPages(void *p, size_t size)
|
||||
UnmapPages(void *p, size_t size)
|
||||
{
|
||||
MOZ_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
|
||||
}
|
||||
|
||||
bool
|
||||
SystemPageAllocator::markPagesUnused(void *p, size_t size)
|
||||
MarkPagesUnused(void *p, size_t size)
|
||||
{
|
||||
if (!decommitEnabled())
|
||||
if (!DecommitEnabled())
|
||||
return true;
|
||||
|
||||
MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
|
||||
@ -209,14 +269,14 @@ SystemPageAllocator::markPagesUnused(void *p, size_t size)
|
||||
}
|
||||
|
||||
bool
|
||||
SystemPageAllocator::markPagesInUse(void *p, size_t size)
|
||||
MarkPagesInUse(void *p, size_t size)
|
||||
{
|
||||
MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t
|
||||
SystemPageAllocator::GetPageFaultCount()
|
||||
GetPageFaultCount()
|
||||
{
|
||||
PROCESS_MEMORY_COUNTERS pmc;
|
||||
if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)))
|
||||
@ -225,7 +285,7 @@ SystemPageAllocator::GetPageFaultCount()
|
||||
}
|
||||
|
||||
void *
|
||||
SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
|
||||
AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
|
||||
{
|
||||
// TODO: Bug 988813 - Support memory mapped array buffer for Windows platform.
|
||||
return nullptr;
|
||||
@ -233,27 +293,26 @@ SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length,
|
||||
|
||||
// Deallocate mapped memory for object.
|
||||
void
|
||||
SystemPageAllocator::DeallocateMappedContent(void *p, size_t length)
|
||||
DeallocateMappedContent(void *p, size_t length)
|
||||
{
|
||||
// TODO: Bug 988813 - Support memory mapped array buffer for Windows platform.
|
||||
}
|
||||
|
||||
#elif defined(SOLARIS)
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#ifndef MAP_NOSYNC
|
||||
# define MAP_NOSYNC 0
|
||||
#endif
|
||||
|
||||
SystemPageAllocator::SystemPageAllocator()
|
||||
void
|
||||
InitMemorySubsystem()
|
||||
{
|
||||
pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
|
||||
if (pageSize == 0)
|
||||
pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
|
||||
}
|
||||
|
||||
void *
|
||||
SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
|
||||
MapAlignedPages(size_t size, size_t alignment)
|
||||
{
|
||||
MOZ_ASSERT(size >= alignment);
|
||||
MOZ_ASSERT(size % alignment == 0);
|
||||
@ -270,33 +329,33 @@ SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
|
||||
}
|
||||
|
||||
void
|
||||
SystemPageAllocator::unmapPages(void *p, size_t size)
|
||||
UnmapPages(void *p, size_t size)
|
||||
{
|
||||
MOZ_ALWAYS_TRUE(0 == munmap((caddr_t)p, size));
|
||||
}
|
||||
|
||||
bool
|
||||
SystemPageAllocator::markPagesUnused(void *p, size_t size)
|
||||
MarkPagesUnused(void *p, size_t size)
|
||||
{
|
||||
MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
SystemPageAllocator::markPagesInUse(void *p, size_t size)
|
||||
MarkPagesInUse(void *p, size_t size)
|
||||
{
|
||||
MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t
|
||||
SystemPageAllocator::GetPageFaultCount()
|
||||
GetPageFaultCount()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *
|
||||
SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
|
||||
AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
|
||||
{
|
||||
// Not implemented.
|
||||
return nullptr;
|
||||
@ -304,25 +363,20 @@ SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length,
|
||||
|
||||
// Deallocate mapped memory for object.
|
||||
void
|
||||
SystemPageAllocator::DeallocateMappedContent(void *p, size_t length)
|
||||
DeallocateMappedContent(void *p, size_t length)
|
||||
{
|
||||
// Not implemented.
|
||||
}
|
||||
|
||||
#elif defined(XP_UNIX)
|
||||
|
||||
#include <algorithm>
|
||||
#include <errno.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/resource.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
SystemPageAllocator::SystemPageAllocator()
|
||||
void
|
||||
InitMemorySubsystem()
|
||||
{
|
||||
pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
|
||||
growthDirection = 0;
|
||||
if (pageSize == 0) {
|
||||
pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
|
||||
growthDirection = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void *
|
||||
@ -388,7 +442,7 @@ MapMemory(size_t length, int prot = PROT_READ | PROT_WRITE,
|
||||
}
|
||||
|
||||
void *
|
||||
SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
|
||||
MapAlignedPages(size_t size, size_t alignment)
|
||||
{
|
||||
MOZ_ASSERT(size >= alignment);
|
||||
MOZ_ASSERT(size % alignment == 0);
|
||||
@ -406,25 +460,25 @@ SystemPageAllocator::mapAlignedPages(size_t size, size_t alignment)
|
||||
|
||||
void *retainedAddr;
|
||||
size_t retainedSize;
|
||||
getNewChunk(&p, &retainedAddr, &retainedSize, size, alignment);
|
||||
GetNewChunk(&p, &retainedAddr, &retainedSize, size, alignment);
|
||||
if (retainedAddr)
|
||||
unmapPages(retainedAddr, retainedSize);
|
||||
UnmapPages(retainedAddr, retainedSize);
|
||||
if (p) {
|
||||
if (OffsetFromAligned(p, alignment) == 0)
|
||||
return p;
|
||||
unmapPages(p, size);
|
||||
UnmapPages(p, size);
|
||||
}
|
||||
|
||||
p = mapAlignedPagesSlow(size, alignment);
|
||||
p = MapAlignedPagesSlow(size, alignment);
|
||||
if (!p)
|
||||
return mapAlignedPagesLastDitch(size, alignment);
|
||||
return MapAlignedPagesLastDitch(size, alignment);
|
||||
|
||||
MOZ_ASSERT(OffsetFromAligned(p, alignment) == 0);
|
||||
return p;
|
||||
}
|
||||
|
||||
void *
|
||||
SystemPageAllocator::mapAlignedPagesSlow(size_t size, size_t alignment)
|
||||
static void *
|
||||
MapAlignedPagesSlow(size_t size, size_t alignment)
|
||||
{
|
||||
/* Overallocate and unmap the region's edges. */
|
||||
size_t reqSize = size + alignment - pageSize;
|
||||
@ -446,9 +500,9 @@ SystemPageAllocator::mapAlignedPagesSlow(size_t size, size_t alignment)
|
||||
}
|
||||
|
||||
if (front != region)
|
||||
unmapPages(region, uintptr_t(front) - uintptr_t(region));
|
||||
UnmapPages(region, uintptr_t(front) - uintptr_t(region));
|
||||
if (end != regionEnd)
|
||||
unmapPages(end, uintptr_t(regionEnd) - uintptr_t(end));
|
||||
UnmapPages(end, uintptr_t(regionEnd) - uintptr_t(end));
|
||||
|
||||
return front;
|
||||
}
|
||||
@ -461,33 +515,33 @@ SystemPageAllocator::mapAlignedPagesSlow(size_t size, size_t alignment)
|
||||
* address each time, we temporarily hold onto the unaligned part of each chunk
|
||||
* until the allocator gives us a chunk that either is, or can be aligned.
|
||||
*/
|
||||
void *
|
||||
SystemPageAllocator::mapAlignedPagesLastDitch(size_t size, size_t alignment)
|
||||
static void *
|
||||
MapAlignedPagesLastDitch(size_t size, size_t alignment)
|
||||
{
|
||||
void *p = nullptr;
|
||||
void *tempMaps[MaxLastDitchAttempts];
|
||||
size_t tempSizes[MaxLastDitchAttempts];
|
||||
int attempt = 0;
|
||||
for (; attempt < MaxLastDitchAttempts; ++attempt) {
|
||||
getNewChunk(&p, tempMaps + attempt, tempSizes + attempt, size, alignment);
|
||||
GetNewChunk(&p, tempMaps + attempt, tempSizes + attempt, size, alignment);
|
||||
if (OffsetFromAligned(p, alignment) == 0) {
|
||||
if (tempMaps[attempt])
|
||||
unmapPages(tempMaps[attempt], tempSizes[attempt]);
|
||||
UnmapPages(tempMaps[attempt], tempSizes[attempt]);
|
||||
break;
|
||||
}
|
||||
if (!tempMaps[attempt]) {
|
||||
/* getNewChunk failed, but we can still try the simpler method. */
|
||||
/* GetNewChunk failed, but we can still try the simpler method. */
|
||||
tempMaps[attempt] = p;
|
||||
tempSizes[attempt] = size;
|
||||
p = nullptr;
|
||||
}
|
||||
}
|
||||
if (OffsetFromAligned(p, alignment)) {
|
||||
unmapPages(p, size);
|
||||
UnmapPages(p, size);
|
||||
p = nullptr;
|
||||
}
|
||||
while (--attempt >= 0)
|
||||
unmapPages(tempMaps[attempt], tempSizes[attempt]);
|
||||
UnmapPages(tempMaps[attempt], tempSizes[attempt]);
|
||||
return p;
|
||||
}
|
||||
|
||||
@ -497,9 +551,9 @@ SystemPageAllocator::mapAlignedPagesLastDitch(size_t size, size_t alignment)
|
||||
* are handed out in increasing or decreasing order, we have to try both
|
||||
* directions (depending on the environment, one will always fail).
|
||||
*/
|
||||
void
|
||||
SystemPageAllocator::getNewChunk(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize,
|
||||
size_t size, size_t alignment)
|
||||
static void
|
||||
GetNewChunk(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize, size_t size,
|
||||
size_t alignment)
|
||||
{
|
||||
void *address = *aAddress;
|
||||
void *retainedAddr = nullptr;
|
||||
@ -507,12 +561,12 @@ SystemPageAllocator::getNewChunk(void **aAddress, void **aRetainedAddr, size_t *
|
||||
do {
|
||||
bool addrsGrowDown = growthDirection <= 0;
|
||||
/* Try the direction indicated by growthDirection. */
|
||||
if (getNewChunkInner(&address, &retainedAddr, &retainedSize, size,
|
||||
if (GetNewChunkInner(&address, &retainedAddr, &retainedSize, size,
|
||||
alignment, addrsGrowDown)) {
|
||||
break;
|
||||
}
|
||||
/* If that failed, try the opposite direction. */
|
||||
if (getNewChunkInner(&address, &retainedAddr, &retainedSize, size,
|
||||
if (GetNewChunkInner(&address, &retainedAddr, &retainedSize, size,
|
||||
alignment, !addrsGrowDown)) {
|
||||
break;
|
||||
}
|
||||
@ -529,9 +583,9 @@ SystemPageAllocator::getNewChunk(void **aAddress, void **aRetainedAddr, size_t *
|
||||
*aRetainedSize = retainedSize_; return toReturn_; \
|
||||
} while(false)
|
||||
|
||||
bool
|
||||
SystemPageAllocator::getNewChunkInner(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize,
|
||||
size_t size, size_t alignment, bool addrsGrowDown)
|
||||
static bool
|
||||
GetNewChunkInner(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize, size_t size,
|
||||
size_t alignment, bool addrsGrowDown)
|
||||
{
|
||||
void *initial = *aAddress;
|
||||
if (!initial)
|
||||
@ -555,13 +609,13 @@ SystemPageAllocator::getNewChunkInner(void **aAddress, void **aRetainedAddr, siz
|
||||
delta = 1;
|
||||
}
|
||||
/* Keep only the |offset| unaligned bytes. */
|
||||
unmapPages(discardedAddr, size - offset);
|
||||
UnmapPages(discardedAddr, size - offset);
|
||||
void *address = MapMemory(size);
|
||||
if (!address) {
|
||||
/* Map the rest of the original chunk again in case we can recover. */
|
||||
address = MapMemoryAt(initial, size - offset);
|
||||
if (!address)
|
||||
unmapPages(retainedAddr, offset);
|
||||
UnmapPages(retainedAddr, offset);
|
||||
SET_OUT_PARAMS_AND_RETURN(address, nullptr, 0, false);
|
||||
}
|
||||
if ((addrsGrowDown && address < retainedAddr) || (!addrsGrowDown && address > retainedAddr)) {
|
||||
@ -573,12 +627,12 @@ SystemPageAllocator::getNewChunkInner(void **aAddress, void **aRetainedAddr, siz
|
||||
/* Accept an aligned address if growthDirection didn't just flip. */
|
||||
if (OffsetFromAligned(address, alignment) == 0 && growthDirection + delta != 0)
|
||||
SET_OUT_PARAMS_AND_RETURN(address, retainedAddr, offset, true);
|
||||
unmapPages(address, size);
|
||||
UnmapPages(address, size);
|
||||
/* Map the original chunk again since we chose the wrong direction. */
|
||||
address = MapMemoryAt(initial, size - offset);
|
||||
if (!address) {
|
||||
/* Return non-null retainedAddr to indicate thread-related failure. */
|
||||
unmapPages(retainedAddr, offset);
|
||||
UnmapPages(retainedAddr, offset);
|
||||
SET_OUT_PARAMS_AND_RETURN(nullptr, retainedAddr, 0, false);
|
||||
}
|
||||
SET_OUT_PARAMS_AND_RETURN(address, nullptr, 0, false);
|
||||
@ -587,16 +641,16 @@ SystemPageAllocator::getNewChunkInner(void **aAddress, void **aRetainedAddr, siz
|
||||
#undef SET_OUT_PARAMS_AND_RETURN
|
||||
|
||||
void
|
||||
SystemPageAllocator::unmapPages(void *p, size_t size)
|
||||
UnmapPages(void *p, size_t size)
|
||||
{
|
||||
if (munmap(p, size))
|
||||
MOZ_ASSERT(errno == ENOMEM);
|
||||
}
|
||||
|
||||
bool
|
||||
SystemPageAllocator::markPagesUnused(void *p, size_t size)
|
||||
MarkPagesUnused(void *p, size_t size)
|
||||
{
|
||||
if (!decommitEnabled())
|
||||
if (!DecommitEnabled())
|
||||
return false;
|
||||
|
||||
MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
|
||||
@ -605,14 +659,14 @@ SystemPageAllocator::markPagesUnused(void *p, size_t size)
|
||||
}
|
||||
|
||||
bool
|
||||
SystemPageAllocator::markPagesInUse(void *p, size_t size)
|
||||
MarkPagesInUse(void *p, size_t size)
|
||||
{
|
||||
MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t
|
||||
SystemPageAllocator::GetPageFaultCount()
|
||||
GetPageFaultCount()
|
||||
{
|
||||
struct rusage usage;
|
||||
int err = getrusage(RUSAGE_SELF, &usage);
|
||||
@ -622,13 +676,12 @@ SystemPageAllocator::GetPageFaultCount()
|
||||
}
|
||||
|
||||
void *
|
||||
SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
|
||||
AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
|
||||
{
|
||||
#define NEED_PAGE_ALIGNED 0
|
||||
size_t pa_start; // Page aligned starting
|
||||
size_t pa_end; // Page aligned ending
|
||||
size_t pa_size; // Total page aligned size
|
||||
size_t page_size = sysconf(_SC_PAGESIZE); // Page size
|
||||
struct stat st;
|
||||
uint8_t *buf;
|
||||
|
||||
@ -639,16 +692,16 @@ SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length,
|
||||
|
||||
// Check for minimal alignment requirement.
|
||||
#if NEED_PAGE_ALIGNED
|
||||
alignment = std::max(alignment, page_size);
|
||||
alignment = std::max(alignment, pageSize);
|
||||
#endif
|
||||
if (offset & (alignment - 1))
|
||||
return nullptr;
|
||||
|
||||
// Page aligned starting of the offset.
|
||||
pa_start = offset & ~(page_size - 1);
|
||||
pa_start = offset & ~(pageSize - 1);
|
||||
// Calculate page aligned ending by adding one page to the page aligned
|
||||
// starting of data end position(offset + length - 1).
|
||||
pa_end = ((offset + length - 1) & ~(page_size - 1)) + page_size;
|
||||
pa_end = ((offset + length - 1) & ~(pageSize - 1)) + pageSize;
|
||||
pa_size = pa_end - pa_start;
|
||||
|
||||
// Ask for a continuous memory location.
|
||||
@ -671,14 +724,13 @@ SystemPageAllocator::AllocateMappedContent(int fd, size_t offset, size_t length,
|
||||
}
|
||||
|
||||
void
|
||||
SystemPageAllocator::DeallocateMappedContent(void *p, size_t length)
|
||||
DeallocateMappedContent(void *p, size_t length)
|
||||
{
|
||||
void *pa_start; // Page aligned starting
|
||||
size_t page_size = sysconf(_SC_PAGESIZE); // Page size
|
||||
size_t total_size; // Total allocated size
|
||||
|
||||
pa_start = (void *)(uintptr_t(p) & ~(page_size - 1));
|
||||
total_size = ((uintptr_t(p) + length) & ~(page_size - 1)) + page_size - uintptr_t(pa_start);
|
||||
pa_start = (void *)(uintptr_t(p) & ~(pageSize - 1));
|
||||
total_size = ((uintptr_t(p) + length) & ~(pageSize - 1)) + pageSize - uintptr_t(pa_start);
|
||||
if (munmap(pa_start, total_size))
|
||||
MOZ_ASSERT(errno == ENOMEM);
|
||||
}
|
||||
@ -686,3 +738,6 @@ SystemPageAllocator::DeallocateMappedContent(void *p, size_t length)
|
||||
#else
|
||||
#error "Memory mapping functions are not defined for your OS."
|
||||
#endif
|
||||
|
||||
} // namespace gc
|
||||
} // namespace js
|
||||
|
@ -14,70 +14,36 @@ struct JSRuntime;
|
||||
namespace js {
|
||||
namespace gc {
|
||||
|
||||
class SystemPageAllocator
|
||||
{
|
||||
public:
|
||||
// Sanity check that our compiled configuration matches the currently
|
||||
// running instance and initialize any runtime data needed for allocation.
|
||||
SystemPageAllocator();
|
||||
// Sanity check that our compiled configuration matches the currently
|
||||
// running instance and initialize any runtime data needed for allocation.
|
||||
void InitMemorySubsystem();
|
||||
|
||||
size_t systemPageSize() { return pageSize; }
|
||||
size_t systemAllocGranularity() { return allocGranularity; }
|
||||
size_t SystemPageSize();
|
||||
|
||||
// Allocate or deallocate pages from the system with the given alignment.
|
||||
void *mapAlignedPages(size_t size, size_t alignment);
|
||||
void unmapPages(void *p, size_t size);
|
||||
// Allocate or deallocate pages from the system with the given alignment.
|
||||
void *MapAlignedPages(size_t size, size_t alignment);
|
||||
void UnmapPages(void *p, size_t size);
|
||||
|
||||
// Tell the OS that the given pages are not in use, so they should not be
|
||||
// written to a paging file. This may be a no-op on some platforms.
|
||||
bool markPagesUnused(void *p, size_t size);
|
||||
// Tell the OS that the given pages are not in use, so they should not be
|
||||
// written to a paging file. This may be a no-op on some platforms.
|
||||
bool MarkPagesUnused(void *p, size_t size);
|
||||
|
||||
// Undo |MarkPagesUnused|: tell the OS that the given pages are of interest
|
||||
// and should be paged in and out normally. This may be a no-op on some
|
||||
// platforms.
|
||||
bool markPagesInUse(void *p, size_t size);
|
||||
// Undo |MarkPagesUnused|: tell the OS that the given pages are of interest
|
||||
// and should be paged in and out normally. This may be a no-op on some
|
||||
// platforms.
|
||||
bool MarkPagesInUse(void *p, size_t size);
|
||||
|
||||
// Returns #(hard faults) + #(soft faults)
|
||||
static size_t GetPageFaultCount();
|
||||
// Returns #(hard faults) + #(soft faults)
|
||||
size_t GetPageFaultCount();
|
||||
|
||||
// Allocate memory mapped content.
|
||||
// The offset must be aligned according to alignment requirement.
|
||||
static void *AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment);
|
||||
// Allocate memory mapped content.
|
||||
// The offset must be aligned according to alignment requirement.
|
||||
void *AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment);
|
||||
|
||||
// Deallocate memory mapped content.
|
||||
static void DeallocateMappedContent(void *p, size_t length);
|
||||
// Deallocate memory mapped content.
|
||||
void DeallocateMappedContent(void *p, size_t length);
|
||||
|
||||
private:
|
||||
bool decommitEnabled();
|
||||
void *mapAlignedPagesSlow(size_t size, size_t alignment);
|
||||
void *mapAlignedPagesLastDitch(size_t size, size_t alignment);
|
||||
void getNewChunk(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize,
|
||||
size_t size, size_t alignment);
|
||||
bool getNewChunkInner(void **aAddress, void **aRetainedAddr, size_t *aRetainedSize,
|
||||
size_t size, size_t alignment, bool addrsGrowDown);
|
||||
|
||||
// The GC can only safely decommit memory when the page size of the
|
||||
// running process matches the compiled arena size.
|
||||
size_t pageSize;
|
||||
|
||||
// The OS allocation granularity may not match the page size.
|
||||
size_t allocGranularity;
|
||||
|
||||
#if defined(XP_UNIX)
|
||||
// The addresses handed out by mmap may grow up or down.
|
||||
int growthDirection;
|
||||
#endif
|
||||
|
||||
// The maximum number of unalignable chunks to temporarily keep alive in
|
||||
// the last ditch allocation pass. OOM crash reports generally show <= 7
|
||||
// unaligned chunks available (bug 1005844 comment #16).
|
||||
static const int MaxLastDitchAttempts = 8;
|
||||
|
||||
public:
|
||||
void *testMapAlignedPagesLastDitch(size_t size, size_t alignment) {
|
||||
return mapAlignedPagesLastDitch(size, alignment);
|
||||
}
|
||||
};
|
||||
void *TestMapAlignedPagesLastDitch(size_t size, size_t alignment);
|
||||
|
||||
} // namespace gc
|
||||
} // namespace js
|
||||
|
@ -61,7 +61,7 @@ js::Nursery::init(uint32_t maxNurseryBytes)
|
||||
if (!hugeSlots.init())
|
||||
return false;
|
||||
|
||||
void *heap = runtime()->gc.pageAllocator.mapAlignedPages(nurserySize(), Alignment);
|
||||
void *heap = MapAlignedPages(nurserySize(), Alignment);
|
||||
if (!heap)
|
||||
return false;
|
||||
|
||||
@ -86,7 +86,7 @@ js::Nursery::init(uint32_t maxNurseryBytes)
|
||||
js::Nursery::~Nursery()
|
||||
{
|
||||
if (start())
|
||||
runtime()->gc.pageAllocator.unmapPages((void *)start(), nurserySize());
|
||||
UnmapPages((void *)start(), nurserySize());
|
||||
}
|
||||
|
||||
void
|
||||
@ -101,7 +101,7 @@ js::Nursery::updateDecommittedRegion()
|
||||
uintptr_t decommitSize = heapEnd() - decommitStart;
|
||||
JS_ASSERT(decommitStart == AlignBytes(decommitStart, Alignment));
|
||||
JS_ASSERT(decommitSize == AlignBytes(decommitStart, Alignment));
|
||||
runtime()->gc.pageAllocator.markPagesUnused((void *)decommitStart, decommitSize);
|
||||
MarkPagesUnused((void *)decommitStart, decommitSize);
|
||||
# endif
|
||||
}
|
||||
#endif
|
||||
|
@ -581,7 +581,7 @@ Statistics::beginSlice(int collectedCount, int zoneCount, int compartmentCount,
|
||||
if (first)
|
||||
beginGC();
|
||||
|
||||
SliceData data(reason, PRMJ_Now(), SystemPageAllocator::GetPageFaultCount());
|
||||
SliceData data(reason, PRMJ_Now(), GetPageFaultCount());
|
||||
(void) slices.append(data); /* Ignore any OOMs here. */
|
||||
|
||||
if (JSAccumulateTelemetryDataCallback cb = runtime->telemetryCallback)
|
||||
@ -600,7 +600,7 @@ void
|
||||
Statistics::endSlice()
|
||||
{
|
||||
slices.back().end = PRMJ_Now();
|
||||
slices.back().endFaults = SystemPageAllocator::GetPageFaultCount();
|
||||
slices.back().endFaults = GetPageFaultCount();
|
||||
|
||||
if (JSAccumulateTelemetryDataCallback cb = runtime->telemetryCallback) {
|
||||
(*cb)(JS_TELEMETRY_GC_SLICE_MS, t(slices.back().end - slices.back().start));
|
||||
|
@ -1239,7 +1239,7 @@ static const VMFunction DeepCloneObjectLiteralInfo =
|
||||
bool
|
||||
BaselineCompiler::emit_JSOP_OBJECT()
|
||||
{
|
||||
if (JS::CompartmentOptionsRef(cx).cloneSingletons(cx)) {
|
||||
if (JS::CompartmentOptionsRef(cx).cloneSingletons()) {
|
||||
RootedObject obj(cx, script->getObject(GET_UINT32_INDEX(pc)));
|
||||
if (!obj)
|
||||
return false;
|
||||
|
@ -280,7 +280,7 @@ JitCompileOptions::JitCompileOptions()
|
||||
JitCompileOptions::JitCompileOptions(JSContext *cx)
|
||||
{
|
||||
JS::CompartmentOptions &options = cx->compartment()->options();
|
||||
cloneSingletons_ = options.cloneSingletons(cx);
|
||||
cloneSingletons_ = options.cloneSingletons();
|
||||
spsSlowAssertionsEnabled_ = cx->runtime()->spsProfiler.enabled() &&
|
||||
cx->runtime()->spsProfiler.slowAssertionsEnabled();
|
||||
}
|
||||
|
@ -204,8 +204,8 @@ StupidAllocator::evictRegister(LInstruction *ins, RegisterIndex index)
|
||||
void
|
||||
StupidAllocator::evictAliasedRegister(LInstruction *ins, RegisterIndex index)
|
||||
{
|
||||
for (int i = 0; i < registers[index].reg.numAliased(); i++) {
|
||||
int aindex = registerIndex(registers[index].reg.aliased(i));
|
||||
for (size_t i = 0; i < registers[index].reg.numAliased(); i++) {
|
||||
uint32_t aindex = registerIndex(registers[index].reg.aliased(i));
|
||||
syncRegister(ins, aindex);
|
||||
registers[aindex].set(MISSING_ALLOCATION);
|
||||
}
|
||||
|
@ -92,20 +92,18 @@ testGCAllocatorUp(const size_t PageSize)
|
||||
return false;
|
||||
// Unmap the staging area so we can set it up for testing.
|
||||
unmapPages(stagingArea, StagingSize);
|
||||
// Reuse the same allocator so it learns the address growth direction.
|
||||
js::gc::SystemPageAllocator GCAlloc;
|
||||
// Check that the first chunk is used if it is aligned.
|
||||
CHECK(positionIsCorrect("xxooxxx---------", stagingArea, chunkPool, tempChunks, GCAlloc));
|
||||
CHECK(positionIsCorrect("xxooxxx---------", stagingArea, chunkPool, tempChunks));
|
||||
// Check that the first chunk is used if it can be aligned.
|
||||
CHECK(positionIsCorrect("x-ooxxx---------", stagingArea, chunkPool, tempChunks, GCAlloc));
|
||||
CHECK(positionIsCorrect("x-ooxxx---------", stagingArea, chunkPool, tempChunks));
|
||||
// Check that an aligned chunk after a single unalignable chunk is used.
|
||||
CHECK(positionIsCorrect("x--xooxxx-------", stagingArea, chunkPool, tempChunks, GCAlloc));
|
||||
CHECK(positionIsCorrect("x--xooxxx-------", stagingArea, chunkPool, tempChunks));
|
||||
// Check that we fall back to the slow path after two unalignable chunks.
|
||||
CHECK(positionIsCorrect("x--xx--xoo--xxx-", stagingArea, chunkPool, tempChunks, GCAlloc));
|
||||
CHECK(positionIsCorrect("x--xx--xoo--xxx-", stagingArea, chunkPool, tempChunks));
|
||||
// Check that we also fall back after an unalignable and an alignable chunk.
|
||||
CHECK(positionIsCorrect("x--xx---x-oo--x-", stagingArea, chunkPool, tempChunks, GCAlloc));
|
||||
CHECK(positionIsCorrect("x--xx---x-oo--x-", stagingArea, chunkPool, tempChunks));
|
||||
// Check that the last ditch allocator works as expected.
|
||||
CHECK(positionIsCorrect("x--xx--xx-oox---", stagingArea, chunkPool, tempChunks, GCAlloc,
|
||||
CHECK(positionIsCorrect("x--xx--xx-oox---", stagingArea, chunkPool, tempChunks,
|
||||
UseLastDitchAllocator));
|
||||
|
||||
// Clean up.
|
||||
@ -138,20 +136,18 @@ testGCAllocatorDown(const size_t PageSize)
|
||||
return false;
|
||||
// Unmap the staging area so we can set it up for testing.
|
||||
unmapPages(stagingArea, StagingSize);
|
||||
// Reuse the same allocator so it learns the address growth direction.
|
||||
js::gc::SystemPageAllocator GCAlloc;
|
||||
// Check that the first chunk is used if it is aligned.
|
||||
CHECK(positionIsCorrect("---------xxxooxx", stagingArea, chunkPool, tempChunks, GCAlloc));
|
||||
CHECK(positionIsCorrect("---------xxxooxx", stagingArea, chunkPool, tempChunks));
|
||||
// Check that the first chunk is used if it can be aligned.
|
||||
CHECK(positionIsCorrect("---------xxxoo-x", stagingArea, chunkPool, tempChunks, GCAlloc));
|
||||
CHECK(positionIsCorrect("---------xxxoo-x", stagingArea, chunkPool, tempChunks));
|
||||
// Check that an aligned chunk after a single unalignable chunk is used.
|
||||
CHECK(positionIsCorrect("-------xxxoox--x", stagingArea, chunkPool, tempChunks, GCAlloc));
|
||||
CHECK(positionIsCorrect("-------xxxoox--x", stagingArea, chunkPool, tempChunks));
|
||||
// Check that we fall back to the slow path after two unalignable chunks.
|
||||
CHECK(positionIsCorrect("-xxx--oox--xx--x", stagingArea, chunkPool, tempChunks, GCAlloc));
|
||||
CHECK(positionIsCorrect("-xxx--oox--xx--x", stagingArea, chunkPool, tempChunks));
|
||||
// Check that we also fall back after an unalignable and an alignable chunk.
|
||||
CHECK(positionIsCorrect("-x--oo-x---xx--x", stagingArea, chunkPool, tempChunks, GCAlloc));
|
||||
CHECK(positionIsCorrect("-x--oo-x---xx--x", stagingArea, chunkPool, tempChunks));
|
||||
// Check that the last ditch allocator works as expected.
|
||||
CHECK(positionIsCorrect("---xoo-xx--xx--x", stagingArea, chunkPool, tempChunks, GCAlloc,
|
||||
CHECK(positionIsCorrect("---xoo-xx--xx--x", stagingArea, chunkPool, tempChunks,
|
||||
UseLastDitchAllocator));
|
||||
|
||||
// Clean up.
|
||||
@ -194,7 +190,7 @@ fillSpaceBeforeStagingArea(int &tempChunks, void *stagingArea,
|
||||
|
||||
bool
|
||||
positionIsCorrect(const char *str, void *base, void **chunkPool, int tempChunks,
|
||||
js::gc::SystemPageAllocator& GCAlloc, AllocType allocator = UseNormalAllocator)
|
||||
AllocType allocator = UseNormalAllocator)
|
||||
{
|
||||
// str represents a region of memory, with each character representing a
|
||||
// region of Chunk bytes. str should contain only x, o and -, where
|
||||
@ -216,20 +212,20 @@ positionIsCorrect(const char *str, void *base, void **chunkPool, int tempChunks,
|
||||
// Allocate using the GC's allocator.
|
||||
void *result;
|
||||
if (allocator == UseNormalAllocator)
|
||||
result = GCAlloc.mapAlignedPages(2 * Chunk, Alignment);
|
||||
result = js::gc::MapAlignedPages(2 * Chunk, Alignment);
|
||||
else
|
||||
result = GCAlloc.testMapAlignedPagesLastDitch(2 * Chunk, Alignment);
|
||||
result = js::gc::TestMapAlignedPagesLastDitch(2 * Chunk, Alignment);
|
||||
// Clean up the mapped regions.
|
||||
if (result)
|
||||
GCAlloc.unmapPages(result, 2 * Chunk);
|
||||
js::gc::UnmapPages(result, 2 * Chunk);
|
||||
for (--i; i >= 0; --i) {
|
||||
if (str[i] == 'x')
|
||||
unmapPages((void *)(uintptr_t(base) + i * Chunk), Chunk);
|
||||
js::gc::UnmapPages((void *)(uintptr_t(base) + i * Chunk), Chunk);
|
||||
}
|
||||
// CHECK returns, so clean up on failure.
|
||||
if (result != desired) {
|
||||
while (--tempChunks >= 0)
|
||||
unmapPages(chunkPool[tempChunks], 2 * Chunk);
|
||||
js::gc::UnmapPages(chunkPool[tempChunks], 2 * Chunk);
|
||||
}
|
||||
return result == desired;
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ BEGIN_TEST(testJSEvaluateScript)
|
||||
JS::RootedObject obj(cx, JS_NewObject(cx, nullptr, JS::NullPtr(), global));
|
||||
CHECK(obj);
|
||||
|
||||
CHECK(JS::ContextOptionsRef(cx).varObjFix());
|
||||
CHECK(JS::RuntimeOptionsRef(cx).varObjFix());
|
||||
|
||||
static const char src[] = "var x = 5;";
|
||||
|
||||
@ -25,7 +25,7 @@ BEGIN_TEST(testJSEvaluateScript)
|
||||
CHECK(hasProp);
|
||||
|
||||
// Now do the same thing, but without JSOPTION_VAROBJFIX
|
||||
JS::ContextOptionsRef(cx).setVarObjFix(false);
|
||||
JS::RuntimeOptionsRef(cx).setVarObjFix(false);
|
||||
|
||||
static const char src2[] = "var y = 5;";
|
||||
|
||||
|
@ -298,6 +298,7 @@ class JSAPITest
|
||||
if (!rt)
|
||||
return nullptr;
|
||||
setNativeStackQuota(rt);
|
||||
JS::RuntimeOptionsRef(rt).setVarObjFix(true);
|
||||
return rt;
|
||||
}
|
||||
|
||||
@ -318,7 +319,6 @@ class JSAPITest
|
||||
JSContext *cx = JS_NewContext(rt, 8192);
|
||||
if (!cx)
|
||||
return nullptr;
|
||||
JS::ContextOptionsRef(cx).setVarObjFix(true);
|
||||
JS_SetErrorReporter(cx, &reportError);
|
||||
return cx;
|
||||
}
|
||||
|
@ -2367,12 +2367,6 @@ class AutoCompartmentRooter : private JS::CustomAutoRooter
|
||||
|
||||
} /* anonymous namespace */
|
||||
|
||||
bool
|
||||
JS::CompartmentOptions::cloneSingletons(JSContext *cx) const
|
||||
{
|
||||
return cloneSingletonsOverride_.get(cx->options().cloneSingletons());
|
||||
}
|
||||
|
||||
JS::CompartmentOptions &
|
||||
JS::CompartmentOptions::setZone(ZoneSpecifier spec)
|
||||
{
|
||||
@ -4478,7 +4472,7 @@ JS::CompileOptions::CompileOptions(JSContext *cx, JSVersion version)
|
||||
|
||||
compileAndGo = false;
|
||||
noScriptRval = cx->options().noScriptRval();
|
||||
strictOption = cx->options().strictMode();
|
||||
strictOption = cx->runtime()->options().strictMode();
|
||||
extraWarningsOption = cx->options().extraWarnings();
|
||||
werrorOption = cx->runtime()->options().werror();
|
||||
asmJSOption = cx->runtime()->options().asmJS();
|
||||
|
@ -733,8 +733,7 @@ typedef struct JSErrorFormatString {
|
||||
} JSErrorFormatString;
|
||||
|
||||
typedef const JSErrorFormatString *
|
||||
(* JSErrorCallback)(void *userRef, const char *locale,
|
||||
const unsigned errorNumber);
|
||||
(* JSErrorCallback)(void *userRef, const unsigned errorNumber);
|
||||
|
||||
typedef bool
|
||||
(* JSLocaleToUpperCase)(JSContext *cx, JS::HandleString src, JS::MutableHandleValue rval);
|
||||
@ -1423,7 +1422,9 @@ class JS_PUBLIC_API(RuntimeOptions) {
|
||||
ion_(false),
|
||||
asmJS_(false),
|
||||
nativeRegExp_(false),
|
||||
werror_(false)
|
||||
werror_(false),
|
||||
strictMode_(false),
|
||||
varObjFix_(false)
|
||||
{
|
||||
}
|
||||
|
||||
@ -1473,12 +1474,34 @@ class JS_PUBLIC_API(RuntimeOptions) {
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool strictMode() const { return strictMode_; }
|
||||
RuntimeOptions &setStrictMode(bool flag) {
|
||||
strictMode_ = flag;
|
||||
return *this;
|
||||
}
|
||||
RuntimeOptions &toggleStrictMode() {
|
||||
strictMode_ = !strictMode_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool varObjFix() const { return varObjFix_; }
|
||||
RuntimeOptions &setVarObjFix(bool flag) {
|
||||
varObjFix_ = flag;
|
||||
return *this;
|
||||
}
|
||||
RuntimeOptions &toggleVarObjFix() {
|
||||
varObjFix_ = !varObjFix_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
bool baseline_ : 1;
|
||||
bool ion_ : 1;
|
||||
bool asmJS_ : 1;
|
||||
bool nativeRegExp_ : 1;
|
||||
bool werror_ : 1;
|
||||
bool strictMode_ : 1;
|
||||
bool varObjFix_ : 1;
|
||||
};
|
||||
|
||||
JS_PUBLIC_API(RuntimeOptions &)
|
||||
@ -1491,13 +1514,10 @@ class JS_PUBLIC_API(ContextOptions) {
|
||||
public:
|
||||
ContextOptions()
|
||||
: extraWarnings_(false),
|
||||
varObjFix_(false),
|
||||
privateIsNSISupports_(false),
|
||||
dontReportUncaught_(false),
|
||||
noDefaultCompartmentObject_(false),
|
||||
noScriptRval_(false),
|
||||
strictMode_(false),
|
||||
cloneSingletons_(false)
|
||||
noScriptRval_(false)
|
||||
{
|
||||
}
|
||||
|
||||
@ -1511,16 +1531,6 @@ class JS_PUBLIC_API(ContextOptions) {
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool varObjFix() const { return varObjFix_; }
|
||||
ContextOptions &setVarObjFix(bool flag) {
|
||||
varObjFix_ = flag;
|
||||
return *this;
|
||||
}
|
||||
ContextOptions &toggleVarObjFix() {
|
||||
varObjFix_ = !varObjFix_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool privateIsNSISupports() const { return privateIsNSISupports_; }
|
||||
ContextOptions &setPrivateIsNSISupports(bool flag) {
|
||||
privateIsNSISupports_ = flag;
|
||||
@ -1561,35 +1571,12 @@ class JS_PUBLIC_API(ContextOptions) {
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool strictMode() const { return strictMode_; }
|
||||
ContextOptions &setStrictMode(bool flag) {
|
||||
strictMode_ = flag;
|
||||
return *this;
|
||||
}
|
||||
ContextOptions &toggleStrictMode() {
|
||||
strictMode_ = !strictMode_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
bool cloneSingletons() const { return cloneSingletons_; }
|
||||
ContextOptions &setCloneSingletons(bool flag) {
|
||||
cloneSingletons_ = flag;
|
||||
return *this;
|
||||
}
|
||||
ContextOptions &toggleCloneSingletons() {
|
||||
cloneSingletons_ = !cloneSingletons_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
bool extraWarnings_ : 1;
|
||||
bool varObjFix_ : 1;
|
||||
bool privateIsNSISupports_ : 1;
|
||||
bool dontReportUncaught_ : 1;
|
||||
bool noDefaultCompartmentObject_ : 1;
|
||||
bool noScriptRval_ : 1;
|
||||
bool strictMode_ : 1;
|
||||
bool cloneSingletons_ : 1;
|
||||
};
|
||||
|
||||
JS_PUBLIC_API(ContextOptions &)
|
||||
@ -2572,6 +2559,7 @@ class JS_PUBLIC_API(CompartmentOptions)
|
||||
, invisibleToDebugger_(false)
|
||||
, mergeable_(false)
|
||||
, discardSource_(false)
|
||||
, cloneSingletons_(false)
|
||||
, traceGlobal_(nullptr)
|
||||
, singletonsAsTemplates_(true)
|
||||
, addonId_(nullptr)
|
||||
@ -2615,8 +2603,11 @@ class JS_PUBLIC_API(CompartmentOptions)
|
||||
}
|
||||
|
||||
|
||||
bool cloneSingletons(JSContext *cx) const;
|
||||
Override &cloneSingletonsOverride() { return cloneSingletonsOverride_; }
|
||||
bool cloneSingletons() const { return cloneSingletons_; }
|
||||
CompartmentOptions &setCloneSingletons(bool flag) {
|
||||
cloneSingletons_ = flag;
|
||||
return *this;
|
||||
}
|
||||
|
||||
void *zonePointer() const {
|
||||
JS_ASSERT(uintptr_t(zone_.pointer) > uintptr_t(JS::SystemZone));
|
||||
@ -2654,7 +2645,7 @@ class JS_PUBLIC_API(CompartmentOptions)
|
||||
bool invisibleToDebugger_;
|
||||
bool mergeable_;
|
||||
bool discardSource_;
|
||||
Override cloneSingletonsOverride_;
|
||||
bool cloneSingletons_;
|
||||
union {
|
||||
ZoneSpecifier spec;
|
||||
void *pointer; // js::Zone* is not exposed in the API.
|
||||
@ -4536,7 +4527,6 @@ struct JSLocaleCallbacks {
|
||||
JSLocaleToLowerCase localeToLowerCase;
|
||||
JSLocaleCompare localeCompare; // not used #if EXPOSE_INTL_API
|
||||
JSLocaleToUnicode localeToUnicode;
|
||||
JSErrorCallback localeGetErrorMessage;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -391,8 +391,7 @@ js_ReportOutOfMemory(ThreadSafeContext *cxArg)
|
||||
}
|
||||
|
||||
/* Get the message for this error, but we don't expand any arguments. */
|
||||
const JSErrorFormatString *efs =
|
||||
js_GetLocalizedErrorMessage(cx, nullptr, nullptr, JSMSG_OUT_OF_MEMORY);
|
||||
const JSErrorFormatString *efs = js_GetErrorMessage(nullptr, JSMSG_OUT_OF_MEMORY);
|
||||
const char *msg = efs ? efs->format : "Out of memory";
|
||||
|
||||
/* Fill out the report, but don't do anything that requires allocation. */
|
||||
@ -671,13 +670,14 @@ js_ExpandErrorArguments(ExclusiveContext *cx, JSErrorCallback callback,
|
||||
|
||||
*messagep = nullptr;
|
||||
|
||||
/* Most calls supply js_GetErrorMessage; if this is so, assume nullptr. */
|
||||
if (!callback || callback == js_GetErrorMessage) {
|
||||
efs = js_GetLocalizedErrorMessage(cx, userRef, nullptr, errorNumber);
|
||||
} else {
|
||||
if (!callback)
|
||||
callback = js_GetErrorMessage;
|
||||
|
||||
{
|
||||
AutoSuppressGC suppressGC(cx);
|
||||
efs = callback(userRef, nullptr, errorNumber);
|
||||
efs = callback(userRef, errorNumber);
|
||||
}
|
||||
|
||||
if (efs) {
|
||||
reportp->exnType = efs->exnType;
|
||||
|
||||
@ -1004,9 +1004,9 @@ const JSErrorFormatString js_ErrorFormatString[JSErr_Limit] = {
|
||||
};
|
||||
|
||||
JS_FRIEND_API(const JSErrorFormatString *)
|
||||
js_GetErrorMessage(void *userRef, const char *locale, const unsigned errorNumber)
|
||||
js_GetErrorMessage(void *userRef, const unsigned errorNumber)
|
||||
{
|
||||
if ((errorNumber > 0) && (errorNumber < JSErr_Limit))
|
||||
if (errorNumber > 0 && errorNumber < JSErr_Limit)
|
||||
return &js_ErrorFormatString[errorNumber];
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -276,7 +276,7 @@ struct ThreadSafeContext : ContextFriendFields,
|
||||
void *runtimeAddressForJit() { return runtime_; }
|
||||
void *stackLimitAddress(StackKind kind) { return &runtime_->mainThread.nativeStackLimit[kind]; }
|
||||
void *stackLimitAddressForJitCode(StackKind kind);
|
||||
size_t gcSystemPageSize() { return runtime_->gc.pageAllocator.systemPageSize(); }
|
||||
size_t gcSystemPageSize() { return gc::SystemPageSize(); }
|
||||
bool signalHandlersInstalled() const { return runtime_->signalHandlersInstalled(); }
|
||||
bool jitSupportsFloatingPoint() const { return runtime_->jitSupportsFloatingPoint; }
|
||||
|
||||
|
@ -579,28 +579,6 @@ ErrorObject::createConstructor(JSContext *cx, JSProtoKey key)
|
||||
return ctor;
|
||||
}
|
||||
|
||||
const JSErrorFormatString*
|
||||
js_GetLocalizedErrorMessage(ExclusiveContext *cx, void *userRef, const char *locale,
|
||||
const unsigned errorNumber)
|
||||
{
|
||||
const JSErrorFormatString *errorString = nullptr;
|
||||
|
||||
// The locale callbacks might not be thread safe, so don't call them if
|
||||
// we're not on the main thread. When used with XPConnect,
|
||||
// |localeGetErrorMessage| will be nullptr anyways.
|
||||
if (cx->isJSContext() &&
|
||||
cx->asJSContext()->runtime()->localeCallbacks &&
|
||||
cx->asJSContext()->runtime()->localeCallbacks->localeGetErrorMessage)
|
||||
{
|
||||
JSLocaleCallbacks *callbacks = cx->asJSContext()->runtime()->localeCallbacks;
|
||||
errorString = callbacks->localeGetErrorMessage(userRef, locale, errorNumber);
|
||||
}
|
||||
|
||||
if (!errorString)
|
||||
errorString = js_GetErrorMessage(userRef, locale, errorNumber);
|
||||
return errorString;
|
||||
}
|
||||
|
||||
JS_FRIEND_API(JSFlatString *)
|
||||
js::GetErrorTypeName(JSRuntime *rt, int16_t exnType)
|
||||
{
|
||||
@ -628,11 +606,9 @@ js_ErrorToException(JSContext *cx, const char *message, JSErrorReport *reportp,
|
||||
|
||||
// Find the exception index associated with this error.
|
||||
JSErrNum errorNumber = static_cast<JSErrNum>(reportp->errorNumber);
|
||||
const JSErrorFormatString *errorString;
|
||||
if (!callback || callback == js_GetErrorMessage)
|
||||
errorString = js_GetLocalizedErrorMessage(cx, nullptr, nullptr, errorNumber);
|
||||
else
|
||||
errorString = callback(userRef, nullptr, errorNumber);
|
||||
if (!callback)
|
||||
callback = js_GetErrorMessage;
|
||||
const JSErrorFormatString *errorString = callback(userRef, errorNumber);
|
||||
JSExnType exnType = errorString ? static_cast<JSExnType>(errorString->exnType) : JSEXN_NONE;
|
||||
MOZ_ASSERT(exnType < JSEXN_LIMIT);
|
||||
|
||||
|
@ -76,10 +76,6 @@ js_ReportUncaughtException(JSContext *cx);
|
||||
extern JSErrorReport *
|
||||
js_ErrorFromException(JSContext *cx, js::HandleObject obj);
|
||||
|
||||
extern const JSErrorFormatString *
|
||||
js_GetLocalizedErrorMessage(js::ExclusiveContext *cx, void *userRef, const char *locale,
|
||||
const unsigned errorNumber);
|
||||
|
||||
/*
|
||||
* Make a copy of errobj parented to cx's compartment's global.
|
||||
*
|
||||
|
@ -1242,7 +1242,7 @@ typedef enum JSErrNum {
|
||||
} JSErrNum;
|
||||
|
||||
extern JS_FRIEND_API(const JSErrorFormatString *)
|
||||
js_GetErrorMessage(void *userRef, const char *locale, const unsigned errorNumber);
|
||||
js_GetErrorMessage(void *userRef, const unsigned errorNumber);
|
||||
|
||||
namespace js {
|
||||
|
||||
|
@ -627,13 +627,13 @@ FinalizeArenas(FreeOp *fop,
|
||||
static inline Chunk *
|
||||
AllocChunk(JSRuntime *rt)
|
||||
{
|
||||
return static_cast<Chunk *>(rt->gc.pageAllocator.mapAlignedPages(ChunkSize, ChunkSize));
|
||||
return static_cast<Chunk *>(MapAlignedPages(ChunkSize, ChunkSize));
|
||||
}
|
||||
|
||||
static inline void
|
||||
FreeChunk(JSRuntime *rt, Chunk *p)
|
||||
{
|
||||
rt->gc.pageAllocator.unmapPages(static_cast<void *>(p), ChunkSize);
|
||||
UnmapPages(static_cast<void *>(p), ChunkSize);
|
||||
}
|
||||
|
||||
/* Must be called with the GC lock taken. */
|
||||
@ -775,7 +775,7 @@ GCRuntime::prepareToFreeChunk(ChunkInfo &info)
|
||||
void Chunk::decommitAllArenas(JSRuntime *rt)
|
||||
{
|
||||
decommittedArenas.clear(true);
|
||||
rt->gc.pageAllocator.markPagesUnused(&arenas[0], ArenasPerChunk * ArenaSize);
|
||||
MarkPagesUnused(&arenas[0], ArenasPerChunk * ArenaSize);
|
||||
|
||||
info.freeArenasHead = nullptr;
|
||||
info.lastDecommittedArenaOffset = 0;
|
||||
@ -884,7 +884,7 @@ Chunk::fetchNextDecommittedArena()
|
||||
decommittedArenas.unset(offset);
|
||||
|
||||
Arena *arena = &arenas[offset];
|
||||
info.trailer.runtime->gc.pageAllocator.markPagesInUse(arena, ArenaSize);
|
||||
MarkPagesInUse(arena, ArenaSize);
|
||||
arena->aheader.setAsNotAllocated();
|
||||
|
||||
return &arena->aheader;
|
||||
@ -912,7 +912,7 @@ Chunk::fetchNextFreeArena(JSRuntime *rt)
|
||||
return aheader;
|
||||
}
|
||||
|
||||
void
|
||||
inline void
|
||||
GCRuntime::updateBytesAllocated(ptrdiff_t size)
|
||||
{
|
||||
JS_ASSERT_IF(size < 0, bytes >= size_t(-size));
|
||||
@ -1276,6 +1276,8 @@ static const int64_t JIT_SCRIPT_RELEASE_TYPES_INTERVAL = 60 * 1000 * 1000;
|
||||
bool
|
||||
GCRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
|
||||
{
|
||||
InitMemorySubsystem();
|
||||
|
||||
#ifdef JS_THREADSAFE
|
||||
lock = PR_NewLock();
|
||||
if (!lock)
|
||||
@ -2506,7 +2508,7 @@ GCRuntime::decommitArenasFromAvailableList(Chunk **availableListHeadp)
|
||||
Maybe<AutoUnlockGC> maybeUnlock;
|
||||
if (!isHeapBusy())
|
||||
maybeUnlock.construct(rt);
|
||||
ok = pageAllocator.markPagesUnused(aheader->getArena(), ArenaSize);
|
||||
ok = MarkPagesUnused(aheader->getArena(), ArenaSize);
|
||||
}
|
||||
|
||||
if (ok) {
|
||||
|
@ -198,7 +198,7 @@ NewGlobalObject(JSContext *cx, JS::CompartmentOptions &options,
|
||||
JSPrincipals *principals);
|
||||
|
||||
static const JSErrorFormatString *
|
||||
my_GetErrorMessage(void *userRef, const char *locale, const unsigned errorNumber);
|
||||
my_GetErrorMessage(void *userRef, const unsigned errorNumber);
|
||||
|
||||
|
||||
/*
|
||||
@ -790,7 +790,7 @@ Options(JSContext *cx, unsigned argc, jsval *vp)
|
||||
else if (strcmp(opt.ptr(), "werror") == 0)
|
||||
JS::RuntimeOptionsRef(cx).toggleWerror();
|
||||
else if (strcmp(opt.ptr(), "strict_mode") == 0)
|
||||
JS::ContextOptionsRef(cx).toggleStrictMode();
|
||||
JS::RuntimeOptionsRef(cx).toggleStrictMode();
|
||||
else {
|
||||
JS_ReportError(cx,
|
||||
"unknown option name '%s'."
|
||||
@ -811,7 +811,7 @@ Options(JSContext *cx, unsigned argc, jsval *vp)
|
||||
names = JS_sprintf_append(names, "%s%s", found ? "," : "", "werror");
|
||||
found = true;
|
||||
}
|
||||
if (names && oldContextOptions.strictMode()) {
|
||||
if (names && oldRuntimeOptions.strictMode()) {
|
||||
names = JS_sprintf_append(names, "%s%s", found ? "," : "", "strict_mode");
|
||||
found = true;
|
||||
}
|
||||
@ -1261,7 +1261,7 @@ Evaluate(JSContext *cx, unsigned argc, jsval *vp)
|
||||
JSSMSG_CACHE_SINGLETON_FAILED);
|
||||
return false;
|
||||
}
|
||||
JS::CompartmentOptionsRef(cx).cloneSingletonsOverride().set(true);
|
||||
JS::CompartmentOptionsRef(cx).setCloneSingletons(true);
|
||||
}
|
||||
|
||||
if (loadBytecode) {
|
||||
@ -5115,7 +5115,7 @@ static const JSErrorFormatString jsShell_ErrorFormatString[JSShellErr_Limit] = {
|
||||
};
|
||||
|
||||
static const JSErrorFormatString *
|
||||
my_GetErrorMessage(void *userRef, const char *locale, const unsigned errorNumber)
|
||||
my_GetErrorMessage(void *userRef, const unsigned errorNumber)
|
||||
{
|
||||
if (errorNumber == 0 || errorNumber >= JSShellErr_Limit)
|
||||
return nullptr;
|
||||
|
@ -536,7 +536,7 @@ ArrayBufferObject::canNeuterAsmJSArrayBuffer(JSContext *cx, ArrayBufferObject &b
|
||||
void *
|
||||
ArrayBufferObject::createMappedContents(int fd, size_t offset, size_t length)
|
||||
{
|
||||
return SystemPageAllocator::AllocateMappedContent(fd, offset, length, ARRAY_BUFFER_ALIGNMENT);
|
||||
return AllocateMappedContent(fd, offset, length, ARRAY_BUFFER_ALIGNMENT);
|
||||
}
|
||||
|
||||
void
|
||||
@ -545,7 +545,7 @@ ArrayBufferObject::releaseMappedArray()
|
||||
if(!isMappedArrayBuffer() || isNeutered())
|
||||
return;
|
||||
|
||||
SystemPageAllocator::DeallocateMappedContent(dataPointer(), byteLength());
|
||||
DeallocateMappedContent(dataPointer(), byteLength());
|
||||
}
|
||||
|
||||
void
|
||||
@ -1147,7 +1147,7 @@ JS_CreateMappedArrayBufferContents(int fd, size_t offset, size_t length)
|
||||
JS_PUBLIC_API(void)
|
||||
JS_ReleaseMappedArrayBufferContents(void *contents, size_t length)
|
||||
{
|
||||
SystemPageAllocator::DeallocateMappedContent(contents, length);
|
||||
DeallocateMappedContent(contents, length);
|
||||
}
|
||||
|
||||
JS_FRIEND_API(bool)
|
||||
|
@ -638,7 +638,7 @@ js::Execute(JSContext *cx, HandleScript script, JSObject &scopeChainArg, Value *
|
||||
#endif
|
||||
|
||||
/* The VAROBJFIX option makes varObj == globalObj in global code. */
|
||||
if (!cx->options().varObjFix()) {
|
||||
if (!cx->runtime()->options().varObjFix()) {
|
||||
if (!scopeChain->setVarObj(cx))
|
||||
return false;
|
||||
}
|
||||
@ -2733,7 +2733,7 @@ CASE(JSOP_OBJECT)
|
||||
{
|
||||
RootedObject &ref = rootObject0;
|
||||
ref = script->getObject(REGS.pc);
|
||||
if (JS::CompartmentOptionsRef(cx).cloneSingletons(cx)) {
|
||||
if (JS::CompartmentOptionsRef(cx).cloneSingletons()) {
|
||||
JSObject *obj = js::DeepCloneObjectLiteral(cx, ref, js::MaybeSingletonObject);
|
||||
if (!obj)
|
||||
goto error;
|
||||
|
@ -120,8 +120,7 @@ js::intrinsic_ThrowError(JSContext *cx, unsigned argc, Value *vp)
|
||||
uint32_t errorNumber = args[0].toInt32();
|
||||
|
||||
#ifdef DEBUG
|
||||
const JSErrorFormatString *efs =
|
||||
js_GetLocalizedErrorMessage(cx, nullptr, nullptr, errorNumber);
|
||||
const JSErrorFormatString *efs = js_GetErrorMessage(nullptr, errorNumber);
|
||||
JS_ASSERT(efs->argCount == args.length() - 1);
|
||||
#endif
|
||||
|
||||
|
@ -502,10 +502,10 @@ ThreadPool::abortJob()
|
||||
// that a small number of chunks will be used intensively for a short
|
||||
// while and then be abandoned at the next GC.
|
||||
//
|
||||
// It's an open question whether it's best to go directly to the
|
||||
// pageAllocator, as now, or go via the GC's chunk pool. Either way
|
||||
// there's a need to manage a predictable chunk cache here as we don't
|
||||
// want chunks to be deallocated during a parallel section.
|
||||
// It's an open question whether it's best to map the chunk directly,
|
||||
// as now, or go via the GC's chunk pool. Either way there's a need
|
||||
// to manage a predictable chunk cache here as we don't want chunks to
|
||||
// be deallocated during a parallel section.
|
||||
|
||||
gc::ForkJoinNurseryChunk *
|
||||
ThreadPool::getChunk()
|
||||
@ -524,7 +524,7 @@ ThreadPool::getChunk()
|
||||
}
|
||||
gc::ForkJoinNurseryChunk *c =
|
||||
reinterpret_cast<gc::ForkJoinNurseryChunk *>(
|
||||
runtime_->gc.pageAllocator.mapAlignedPages(gc::ChunkSize, gc::ChunkSize));
|
||||
gc::MapAlignedPages(gc::ChunkSize, gc::ChunkSize));
|
||||
if (!c)
|
||||
return c;
|
||||
poisonChunk(c);
|
||||
@ -580,7 +580,7 @@ ThreadPool::clearChunkCache()
|
||||
while (p) {
|
||||
ChunkFreeList *victim = p;
|
||||
p = p->next;
|
||||
runtime_->gc.pageAllocator.unmapPages(victim, gc::ChunkSize);
|
||||
gc::UnmapPages(victim, gc::ChunkSize);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -56,24 +56,18 @@ XPCCallContext::XPCCallContext(XPCContext::LangType callerLanguage,
|
||||
|
||||
mTearOff = nullptr;
|
||||
|
||||
// If the object is a security wrapper, GetWrappedNativeOfJSObject can't
|
||||
// handle it. Do special handling here to make cross-origin Xrays work.
|
||||
JSObject *unwrapped = js::CheckedUnwrap(obj, /* stopAtOuter = */ false);
|
||||
if (!unwrapped) {
|
||||
mWrapper = UnwrapThisIfAllowed(obj, funobj, argc);
|
||||
if (!mWrapper) {
|
||||
JS_ReportError(mJSContext, "Permission denied to call method on |this|");
|
||||
mState = INIT_FAILED;
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
const js::Class *clasp = js::GetObjectClass(unwrapped);
|
||||
if (IS_WN_CLASS(clasp)) {
|
||||
mWrapper = XPCWrappedNative::Get(unwrapped);
|
||||
} else if (IS_TEAROFF_CLASS(clasp)) {
|
||||
mTearOff = (XPCWrappedNativeTearOff*)js::GetObjectPrivate(unwrapped);
|
||||
mWrapper = XPCWrappedNative::Get(js::GetObjectParent(unwrapped));
|
||||
}
|
||||
JS_ReportError(mJSContext, "Permission denied to call method on |this|");
|
||||
mState = INIT_FAILED;
|
||||
return;
|
||||
}
|
||||
const js::Class *clasp = js::GetObjectClass(unwrapped);
|
||||
if (IS_WN_CLASS(clasp)) {
|
||||
mWrapper = XPCWrappedNative::Get(unwrapped);
|
||||
} else if (IS_TEAROFF_CLASS(clasp)) {
|
||||
mTearOff = (XPCWrappedNativeTearOff*)js::GetObjectPrivate(unwrapped);
|
||||
mWrapper = XPCWrappedNative::Get(js::GetObjectParent(unwrapped));
|
||||
}
|
||||
if (mWrapper) {
|
||||
if (mTearOff)
|
||||
@ -311,65 +305,3 @@ XPCCallContext::GetLanguage(uint16_t *aResult)
|
||||
*aResult = GetCallerLanguage();
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
XPCWrappedNative*
|
||||
XPCCallContext::UnwrapThisIfAllowed(HandleObject obj, HandleObject fun, unsigned argc)
|
||||
{
|
||||
// We should only get here for objects that aren't safe to unwrap.
|
||||
MOZ_ASSERT(!js::CheckedUnwrap(obj));
|
||||
MOZ_ASSERT(js::IsObjectInContextCompartment(obj, mJSContext));
|
||||
|
||||
// We can't do anything here without a function.
|
||||
if (!fun)
|
||||
return nullptr;
|
||||
|
||||
// Determine if we're allowed to unwrap the security wrapper to invoke the
|
||||
// method.
|
||||
//
|
||||
// We have the Interface and Member that this corresponds to, but
|
||||
// unfortunately our access checks are based on the object class name and
|
||||
// property name. So we cheat a little bit here - we verify that the object
|
||||
// does indeed implement the method's Interface, and then just check that we
|
||||
// can successfully access property with method's name from the object.
|
||||
|
||||
// First, get the XPCWN out of the underlying object. We should have a wrapper
|
||||
// here, potentially an outer window proxy, and then an XPCWN.
|
||||
MOZ_ASSERT(js::IsWrapper(obj));
|
||||
RootedObject unwrapped(mJSContext, js::UncheckedUnwrap(obj, /* stopAtOuter = */ false));
|
||||
#ifdef DEBUG
|
||||
JS::Rooted<JSObject*> wrappedObj(mJSContext, js::Wrapper::wrappedObject(obj));
|
||||
MOZ_ASSERT(unwrapped == JS_ObjectToInnerObject(mJSContext, wrappedObj));
|
||||
#endif
|
||||
|
||||
// Make sure we have an XPCWN, and grab it.
|
||||
if (!IS_WN_REFLECTOR(unwrapped))
|
||||
return nullptr;
|
||||
XPCWrappedNative *wn = XPCWrappedNative::Get(unwrapped);
|
||||
|
||||
// Next, get the call info off the function object.
|
||||
XPCNativeInterface *interface;
|
||||
XPCNativeMember *member;
|
||||
XPCNativeMember::GetCallInfo(fun, &interface, &member);
|
||||
|
||||
// To be extra safe, make sure that the underlying native implements the
|
||||
// interface before unwrapping. Even if we didn't check this, we'd still
|
||||
// theoretically fail during tearoff lookup for mismatched methods.
|
||||
if (!wn->HasInterfaceNoQI(*interface->GetIID()))
|
||||
return nullptr;
|
||||
|
||||
// See if the access is permitted.
|
||||
//
|
||||
// NB: This calculation of SET vs GET is a bit wonky, but that's what
|
||||
// XPC_WN_GetterSetter does.
|
||||
bool set = argc && argc != NO_ARGS && member->IsWritableAttribute();
|
||||
js::Wrapper::Action act = set ? js::Wrapper::SET : js::Wrapper::GET;
|
||||
const js::Wrapper *handler = js::Wrapper::wrapperHandler(obj);
|
||||
bool ignored;
|
||||
JS::Rooted<jsid> id(mJSContext, member->GetName());
|
||||
if (!handler->enter(mJSContext, obj, id, act, &ignored))
|
||||
return nullptr;
|
||||
|
||||
// Ok, this call is safe.
|
||||
return wn;
|
||||
}
|
||||
|
||||
|
@ -3244,7 +3244,7 @@ nsXPCComponents_Utils::Dispatch(HandleValue runnableArg, HandleValue scope,
|
||||
|
||||
GENERATE_JSCONTEXTOPTION_GETTER_SETTER(Strict, extraWarnings, setExtraWarnings)
|
||||
GENERATE_JSRUNTIMEOPTION_GETTER_SETTER(Werror, werror, setWerror)
|
||||
GENERATE_JSCONTEXTOPTION_GETTER_SETTER(Strict_mode, strictMode, setStrictMode)
|
||||
GENERATE_JSRUNTIMEOPTION_GETTER_SETTER(Strict_mode, strictMode, setStrictMode)
|
||||
GENERATE_JSRUNTIMEOPTION_GETTER_SETTER(Ion, ion, setIon)
|
||||
|
||||
#undef GENERATE_JSCONTEXTOPTION_GETTER_SETTER
|
||||
|
@ -43,7 +43,6 @@ struct XPCLocaleCallbacks : public JSLocaleCallbacks
|
||||
localeToLowerCase = LocaleToLowerCase;
|
||||
localeCompare = LocaleCompare;
|
||||
localeToUnicode = LocaleToUnicode;
|
||||
localeGetErrorMessage = nullptr;
|
||||
}
|
||||
|
||||
~XPCLocaleCallbacks()
|
||||
|
@ -495,7 +495,7 @@ Options(JSContext *cx, unsigned argc, jsval *vp)
|
||||
else if (strcmp(opt.ptr(), "werror") == 0)
|
||||
RuntimeOptionsRef(cx).toggleWerror();
|
||||
else if (strcmp(opt.ptr(), "strict_mode") == 0)
|
||||
ContextOptionsRef(cx).toggleStrictMode();
|
||||
RuntimeOptionsRef(cx).toggleStrictMode();
|
||||
else {
|
||||
JS_ReportError(cx, "unknown option name '%s'. The valid names are "
|
||||
"strict, werror, and strict_mode.", opt.ptr());
|
||||
@ -518,7 +518,7 @@ Options(JSContext *cx, unsigned argc, jsval *vp)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (names && oldContextOptions.strictMode()) {
|
||||
if (names && oldRuntimeOptions.strictMode()) {
|
||||
names = JS_sprintf_append(names, "%s%s", names ? "," : "", "strict_mode");
|
||||
if (!names) {
|
||||
JS_ReportOutOfMemory(cx);
|
||||
@ -881,7 +881,7 @@ static const JSErrorFormatString jsShell_ErrorFormatString[JSShellErr_Limit] = {
|
||||
};
|
||||
|
||||
static const JSErrorFormatString *
|
||||
my_GetErrorMessage(void *userRef, const char *locale, const unsigned errorNumber)
|
||||
my_GetErrorMessage(void *userRef, const unsigned errorNumber)
|
||||
{
|
||||
if (errorNumber == 0 || errorNumber >= JSShellErr_Limit)
|
||||
return nullptr;
|
||||
|
@ -811,9 +811,6 @@ private:
|
||||
XPCCallContext(const XPCCallContext& r); // not implemented
|
||||
XPCCallContext& operator= (const XPCCallContext& r); // not implemented
|
||||
|
||||
XPCWrappedNative* UnwrapThisIfAllowed(JS::HandleObject obj, JS::HandleObject fun,
|
||||
unsigned argc);
|
||||
|
||||
private:
|
||||
// posible values for mState
|
||||
enum State {
|
||||
|
@ -91,9 +91,7 @@ AccessCheck::getPrincipal(JSCompartment *compartment)
|
||||
return GetCompartmentPrincipal(compartment);
|
||||
}
|
||||
|
||||
// Hardcoded policy for cross origin property access. This was culled from the
|
||||
// preferences file (all.js). We don't want users to overwrite highly sensitive
|
||||
// security policies.
|
||||
// Hardcoded policy for cross origin property access. See the HTML5 Spec.
|
||||
static bool
|
||||
IsPermitted(const char *name, JSFlatString *prop, bool set)
|
||||
{
|
||||
@ -102,15 +100,11 @@ IsPermitted(const char *name, JSFlatString *prop, bool set)
|
||||
return false;
|
||||
|
||||
jschar propChar0 = JS_GetFlatStringCharAt(prop, 0);
|
||||
switch (name[0]) {
|
||||
case 'L':
|
||||
if (!strcmp(name, "Location"))
|
||||
return dom::LocationBinding::IsPermitted(prop, propChar0, set);
|
||||
case 'W':
|
||||
if (!strcmp(name, "Window"))
|
||||
return dom::WindowBinding::IsPermitted(prop, propChar0, set);
|
||||
break;
|
||||
}
|
||||
if (name[0] == 'L' && !strcmp(name, "Location"))
|
||||
return dom::LocationBinding::IsPermitted(prop, propChar0, set);
|
||||
if (name[0] == 'W' && !strcmp(name, "Window"))
|
||||
return dom::WindowBinding::IsPermitted(prop, propChar0, set);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,16 @@
|
||||
|
||||
using namespace mozilla;
|
||||
|
||||
#define TOUCHCARET_LOG(...)
|
||||
// #define TOUCHCARET_LOG(...) printf_stderr("TouchCaret: " __VA_ARGS__)
|
||||
// To enable all the TOUCHCARET_LOG print statements, change the 0 to 1 in the
|
||||
// following #define.
|
||||
#define ENABLE_TOUCHCARET_LOG 0
|
||||
|
||||
#if ENABLE_TOUCHCARET_LOG
|
||||
#define TOUCHCARET_LOG(message, ...) \
|
||||
printf_stderr("TouchCaret (%p): %s:%d : " message "\n", this, __func__, __LINE__, ##__VA_ARGS__);
|
||||
#else
|
||||
#define TOUCHCARET_LOG(message, ...)
|
||||
#endif
|
||||
|
||||
// Click on the boundary of input/textarea will place the caret at the
|
||||
// front/end of the content. To advoid this, we need to deflate the content
|
||||
@ -50,6 +58,7 @@ TouchCaret::TouchCaret(nsIPresShell* aPresShell)
|
||||
mCaretCenterToDownPointOffsetY(0),
|
||||
mVisible(false)
|
||||
{
|
||||
TOUCHCARET_LOG("Constructor");
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
|
||||
static bool addedTouchCaretPref = false;
|
||||
@ -68,6 +77,7 @@ TouchCaret::TouchCaret(nsIPresShell* aPresShell)
|
||||
|
||||
TouchCaret::~TouchCaret()
|
||||
{
|
||||
TOUCHCARET_LOG("Destructor");
|
||||
MOZ_ASSERT(NS_IsMainThread());
|
||||
|
||||
if (mTouchCaretExpirationTimer) {
|
||||
@ -90,8 +100,10 @@ void
|
||||
TouchCaret::SetVisibility(bool aVisible)
|
||||
{
|
||||
if (mVisible == aVisible) {
|
||||
TOUCHCARET_LOG("Visibility not changed");
|
||||
return;
|
||||
}
|
||||
|
||||
mVisible = aVisible;
|
||||
|
||||
nsCOMPtr<nsIPresShell> presShell = do_QueryReferent(mPresShell);
|
||||
@ -108,6 +120,8 @@ TouchCaret::SetVisibility(bool aVisible)
|
||||
touchCaretElement->ClassList()->Toggle(NS_LITERAL_STRING("hidden"),
|
||||
dom::Optional<bool>(!mVisible),
|
||||
err);
|
||||
TOUCHCARET_LOG("Visibility %s", (mVisible ? "shown" : "hidden"));
|
||||
|
||||
// Set touch caret expiration time.
|
||||
mVisible ? LaunchExpirationTimer() : CancelExpirationTimer();
|
||||
|
||||
@ -230,6 +244,8 @@ TouchCaret::SetTouchFramePos(const nsPoint& aOrigin)
|
||||
styleStr.AppendInt(y);
|
||||
styleStr.AppendLiteral("px;");
|
||||
|
||||
TOUCHCARET_LOG("Set style: %s", NS_ConvertUTF16toUTF8(styleStr).get());
|
||||
|
||||
touchCaretElement->SetAttr(kNameSpaceID_None, nsGkAtoms::style,
|
||||
styleStr, true);
|
||||
}
|
||||
@ -367,6 +383,7 @@ TouchCaret::UpdateTouchCaret(bool aVisible)
|
||||
bool caretVisible = false;
|
||||
caret->GetCaretVisible(&caretVisible);
|
||||
if (!caretVisible) {
|
||||
TOUCHCARET_LOG("Caret is not visible");
|
||||
SetVisibility(false);
|
||||
return;
|
||||
}
|
||||
@ -376,6 +393,7 @@ TouchCaret::UpdateTouchCaret(bool aVisible)
|
||||
nsRect focusRect;
|
||||
nsIFrame* focusFrame = caret->GetGeometry(caretSelection, &focusRect);
|
||||
if (!focusFrame || focusRect.IsEmpty()) {
|
||||
TOUCHCARET_LOG("Focus frame not valid");
|
||||
SetVisibility(false);
|
||||
return;
|
||||
}
|
||||
@ -492,6 +510,7 @@ TouchCaret::HandleEvent(WidgetEvent* aEvent)
|
||||
case NS_KEY_PRESS:
|
||||
case NS_WHEEL_EVENT_START:
|
||||
// Disable touch caret while key/wheel event is received.
|
||||
TOUCHCARET_LOG("Receive key/wheel event");
|
||||
SetVisibility(false);
|
||||
break;
|
||||
default:
|
||||
@ -532,7 +551,7 @@ TouchCaret::GetEventPosition(WidgetMouseEvent* aEvent)
|
||||
nsEventStatus
|
||||
TouchCaret::HandleMouseMoveEvent(WidgetMouseEvent* aEvent)
|
||||
{
|
||||
TOUCHCARET_LOG("%p got a mouse-move in state %d\n", this, mState);
|
||||
TOUCHCARET_LOG("Got a mouse-move in state %d", mState);
|
||||
nsEventStatus status = nsEventStatus_eIgnore;
|
||||
|
||||
switch (mState) {
|
||||
@ -564,7 +583,7 @@ TouchCaret::HandleMouseMoveEvent(WidgetMouseEvent* aEvent)
|
||||
nsEventStatus
|
||||
TouchCaret::HandleTouchMoveEvent(WidgetTouchEvent* aEvent)
|
||||
{
|
||||
TOUCHCARET_LOG("%p got a touch-move in state %d\n", this, mState);
|
||||
TOUCHCARET_LOG("Got a touch-move in state %d", mState);
|
||||
nsEventStatus status = nsEventStatus_eIgnore;
|
||||
|
||||
switch (mState) {
|
||||
@ -600,7 +619,7 @@ TouchCaret::HandleTouchMoveEvent(WidgetTouchEvent* aEvent)
|
||||
nsEventStatus
|
||||
TouchCaret::HandleMouseUpEvent(WidgetMouseEvent* aEvent)
|
||||
{
|
||||
TOUCHCARET_LOG("%p got a mouse-up in state %d\n", this, mState);
|
||||
TOUCHCARET_LOG("Got a mouse-up in state %d", mState);
|
||||
nsEventStatus status = nsEventStatus_eIgnore;
|
||||
|
||||
switch (mState) {
|
||||
@ -628,7 +647,7 @@ TouchCaret::HandleMouseUpEvent(WidgetMouseEvent* aEvent)
|
||||
nsEventStatus
|
||||
TouchCaret::HandleTouchUpEvent(WidgetTouchEvent* aEvent)
|
||||
{
|
||||
TOUCHCARET_LOG("%p got a touch-end in state %d\n", this, mState);
|
||||
TOUCHCARET_LOG("Got a touch-end in state %d", mState);
|
||||
// Remove touches from cache if the stroke is gone in TOUCHDRAG states.
|
||||
if (mState == TOUCHCARET_TOUCHDRAG_ACTIVE ||
|
||||
mState == TOUCHCARET_TOUCHDRAG_INACTIVE) {
|
||||
@ -685,7 +704,7 @@ TouchCaret::HandleTouchUpEvent(WidgetTouchEvent* aEvent)
|
||||
nsEventStatus
|
||||
TouchCaret::HandleMouseDownEvent(WidgetMouseEvent* aEvent)
|
||||
{
|
||||
TOUCHCARET_LOG("%p got a mouse-down in state %d\n", this, mState);
|
||||
TOUCHCARET_LOG("Got a mouse-down in state %d", mState);
|
||||
if (!GetVisibility()) {
|
||||
// If touch caret is invisible, bypass event.
|
||||
return nsEventStatus_eIgnore;
|
||||
@ -734,7 +753,7 @@ TouchCaret::HandleMouseDownEvent(WidgetMouseEvent* aEvent)
|
||||
nsEventStatus
|
||||
TouchCaret::HandleTouchDownEvent(WidgetTouchEvent* aEvent)
|
||||
{
|
||||
TOUCHCARET_LOG("%p got a touch-start in state %d\n", this, mState);
|
||||
TOUCHCARET_LOG("Got a touch-start in state %d", mState);
|
||||
|
||||
nsEventStatus status = nsEventStatus_eIgnore;
|
||||
|
||||
@ -793,7 +812,7 @@ TouchCaret::HandleTouchDownEvent(WidgetTouchEvent* aEvent)
|
||||
void
|
||||
TouchCaret::SetState(TouchCaretState aState)
|
||||
{
|
||||
TOUCHCARET_LOG("%p state changed from %d to %d\n", this, mState, aState);
|
||||
TOUCHCARET_LOG("state changed from %d to %d", mState, aState);
|
||||
if (mState == TOUCHCARET_NONE) {
|
||||
MOZ_ASSERT(aState != TOUCHCARET_TOUCHDRAG_INACTIVE,
|
||||
"mState: NONE => TOUCHDRAG_INACTIVE isn't allowed!");
|
||||
|
@ -46,8 +46,8 @@ class nsCaret : public nsISelectionListener
|
||||
/** GetCaretVisible will get the visibility of the caret
|
||||
* This function is virtual so that it can be used by nsCaretAccessible
|
||||
* without linking
|
||||
* @param inMakeVisible true it is shown, false it is hidden
|
||||
* @return false if and only if inMakeVisible is null, otherwise true
|
||||
* @param outMakeVisible true if it is shown, false if it is hidden
|
||||
* @return NS_OK
|
||||
*/
|
||||
virtual nsresult GetCaretVisible(bool *outMakeVisible);
|
||||
|
||||
@ -58,13 +58,13 @@ class nsCaret : public nsISelectionListener
|
||||
|
||||
/** SetCaretReadOnly set the appearance of the caret
|
||||
* @param inMakeReadonly true to show the caret in a 'read only' state,
|
||||
* false to show the caret in normal, editing state
|
||||
* false to show the caret in normal, editing state
|
||||
*/
|
||||
void SetCaretReadOnly(bool inMakeReadonly);
|
||||
|
||||
/** GetCaretReadOnly get the appearance of the caret
|
||||
* @return true if the caret is in 'read only' state, otherwise,
|
||||
* returns false
|
||||
* @return true if the caret is in 'read only' state, otherwise,
|
||||
* returns false
|
||||
*/
|
||||
bool GetCaretReadOnly()
|
||||
{
|
||||
@ -232,13 +232,13 @@ protected:
|
||||
uint32_t mBlinkRate; // time for one cyle (on then off), in milliseconds
|
||||
nscoord mCaretWidthCSSPx; // caret width in CSS pixels
|
||||
float mCaretAspectRatio; // caret width/height aspect ratio
|
||||
|
||||
|
||||
bool mVisible; // is the caret blinking
|
||||
|
||||
bool mDrawn; // Denotes when the caret is physically drawn on the screen.
|
||||
bool mPendingDraw; // True when the last on-state draw was suppressed.
|
||||
|
||||
bool mReadOnly; // it the caret in readonly state (draws differently)
|
||||
bool mReadOnly; // it the caret in readonly state (draws differently)
|
||||
bool mShowDuringSelection; // show when text is selected
|
||||
|
||||
bool mIgnoreUserModify;
|
||||
|
@ -53,7 +53,7 @@
|
||||
display: table-row !important;
|
||||
}
|
||||
|
||||
/* The ::-moz-table-column pseudo-element is for extra columns at the end
|
||||
/* The ::-moz-table-column pseudo-element is for extra columns at the end
|
||||
of a table. */
|
||||
*|*::-moz-table-column {
|
||||
display: table-column !important;
|
||||
@ -62,7 +62,7 @@
|
||||
*|*::-moz-table-column-group {
|
||||
display: table-column-group !important;
|
||||
}
|
||||
|
||||
|
||||
*|*::-moz-table-row-group {
|
||||
display: table-row-group !important;
|
||||
}
|
||||
@ -167,7 +167,7 @@
|
||||
%endif
|
||||
}
|
||||
|
||||
*|*::-moz-column-content {
|
||||
*|*::-moz-column-content {
|
||||
/* the column boxes inside a column-flowed block */
|
||||
/* make unicode-bidi inherit, otherwise it has no effect on column boxes */
|
||||
unicode-bidi: inherit;
|
||||
@ -288,103 +288,27 @@ parsererror|sourcetext {
|
||||
font-size: 12pt;
|
||||
}
|
||||
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-touchcaret {
|
||||
background-image: url("resource://gre/res/text_selection_handle.png");
|
||||
position: absolute;
|
||||
width: 19px;
|
||||
height: 24px;
|
||||
margin-left: -10px;
|
||||
background-position: center center;
|
||||
z-index: 2147483647;
|
||||
}
|
||||
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-touchcaret,
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-selectioncaret-left,
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-selectioncaret-right {
|
||||
background-image: url("resource://gre/res/text_caret.png");
|
||||
background-image: url("resource://gre/res/caret_middle.svg");
|
||||
position: absolute;
|
||||
width: 21px;
|
||||
height: 26px;
|
||||
margin-left: -11px;
|
||||
width: 29px;
|
||||
height: 31px;
|
||||
margin-left: -15px;
|
||||
background-position: center center;
|
||||
background-size: 100% 100%;
|
||||
z-index: 2147483647;
|
||||
}
|
||||
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-selectioncaret-left.tilt {
|
||||
background-image: url("resource://gre/res/text_caret_tilt_left.png");
|
||||
margin-left: -22px;
|
||||
width: 22px;
|
||||
background-image: url("resource://gre/res/caret_left.svg");
|
||||
margin-left: -29px;
|
||||
}
|
||||
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-selectioncaret-right.tilt {
|
||||
background-image: url("resource://gre/res/text_caret_tilt_right.png");
|
||||
background-image: url("resource://gre/res/caret_right.svg");
|
||||
margin-left: 0px;
|
||||
width: 22px;
|
||||
}
|
||||
|
||||
@media (min-resolution: 1.5dppx) {
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-touchcaret {
|
||||
background-image: url("resource://gre/res/text_selection_handle@1.5.png");
|
||||
position: absolute;
|
||||
width: 29px;
|
||||
height: 36px;
|
||||
margin-left: -15px;
|
||||
background-position: center center;
|
||||
z-index: 2147483647;
|
||||
}
|
||||
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-selectioncaret-left,
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-selectioncaret-right {
|
||||
background-image: url("resource://gre/res/text_caret@1.5x.png");
|
||||
}
|
||||
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-selectioncaret-left.tilt {
|
||||
background-image: url("resource://gre/res/text_caret_tilt_left@1.5x.png");
|
||||
}
|
||||
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-selectioncaret-right.tilt {
|
||||
background-image: url("resource://gre/res/text_caret_tilt_right@1.5x.png");
|
||||
}
|
||||
}
|
||||
|
||||
@media (min-resolution: 2dppx) {
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-touchcaret {
|
||||
background-image: url("resource://gre/res/text_selection_handle@2.png");
|
||||
position: absolute;
|
||||
width: 38px;
|
||||
height: 48px;
|
||||
margin-left: -19px;
|
||||
background-position: center center;
|
||||
z-index: 2147483647;
|
||||
}
|
||||
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-selectioncaret-left,
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-selectioncaret-right {
|
||||
background-image: url("resource://gre/res/text_caret@2x.png");
|
||||
}
|
||||
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-selectioncaret-left.tilt {
|
||||
background-image: url("resource://gre/res/text_caret_tilt_left@2x.png");
|
||||
}
|
||||
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-selectioncaret-right.tilt {
|
||||
background-image: url("resource://gre/res/text_caret_tilt_right@2x.png");
|
||||
}
|
||||
}
|
||||
|
||||
@media (min-resolution: 2.25dppx) {
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-selectioncaret-left
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-selectioncaret-right {
|
||||
background-image: url("resource://gre/res/text_caret@2.25x.png");
|
||||
}
|
||||
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-selectioncaret-left.tilt {
|
||||
background-image: url("resource://gre/res/text_caret_tilt_left@2.25x.png");
|
||||
}
|
||||
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-selectioncaret-right.tilt {
|
||||
background-image: url("resource://gre/res/text_caret_tilt_right@2.25x.png");
|
||||
}
|
||||
}
|
||||
|
||||
div[\_moz_anonclass="mozTouchCaret"].moz-touchcaret.hidden,
|
||||
|
@ -105,7 +105,7 @@ class RemoteOptions(ReftestOptions):
|
||||
|
||||
# Ensure our defaults are set properly for everything we can infer
|
||||
if not options.remoteTestRoot:
|
||||
options.remoteTestRoot = self.automation._devicemanager.getDeviceRoot() + '/reftest'
|
||||
options.remoteTestRoot = self.automation._devicemanager.deviceRoot + '/reftest'
|
||||
options.remoteProfile = options.remoteTestRoot + "/profile"
|
||||
|
||||
# Verify that our remotewebserver is set properly
|
||||
@ -402,7 +402,7 @@ class RemoteReftest(RefTest):
|
||||
logcat = self._devicemanager.getLogcat(filterOutRegexps=fennecLogcatFilters)
|
||||
print ''.join(logcat)
|
||||
print "Device info: %s" % self._devicemanager.getInfo()
|
||||
print "Test root: %s" % self._devicemanager.getDeviceRoot()
|
||||
print "Test root: %s" % self._devicemanager.deviceRoot
|
||||
except devicemanager.DMError:
|
||||
print "WARNING: Error getting device information"
|
||||
|
||||
|
@ -140,7 +140,7 @@ class B2GOptions(ReftestOptions):
|
||||
self.error("Cannot run parallel tests here")
|
||||
|
||||
if not options.remoteTestRoot:
|
||||
options.remoteTestRoot = auto._devicemanager.getDeviceRoot() + "/reftest"
|
||||
options.remoteTestRoot = auto._devicemanager.deviceRoot + "/reftest"
|
||||
|
||||
options.remoteProfile = options.remoteTestRoot + "/profile"
|
||||
|
||||
|
@ -38,7 +38,8 @@ extern "C" {
|
||||
var = 0
|
||||
|
||||
#if defined(__pnacl__) || defined(__CLR_VER) || defined(COVERAGE_ENABLED) || \
|
||||
defined(TARGET_IPHONE_SIMULATOR)
|
||||
defined(TARGET_IPHONE_SIMULATOR) || \
|
||||
(defined(_MSC_VER) && defined(__clang__))
|
||||
#define LIBYUV_DISABLE_X86
|
||||
#endif
|
||||
// True if compiling for SSSE3 as a requirement.
|
||||
|
@ -115,7 +115,7 @@ enum FourCC {
|
||||
FOURCC_H264 = FOURCC('H', '2', '6', '4'),
|
||||
|
||||
// Match any fourcc.
|
||||
FOURCC_ANY = 0xFFFFFFFF,
|
||||
FOURCC_ANY = -1,
|
||||
};
|
||||
|
||||
enum FourCCBpp {
|
||||
|
@ -10,7 +10,7 @@
|
||||
|
||||
#include "libyuv/cpu_id.h"
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#if defined(_MSC_VER) && !defined(__clang__)
|
||||
#include <intrin.h> // For __cpuidex()
|
||||
#endif
|
||||
#if !defined(__pnacl__) && !defined(__CLR_VER) && \
|
||||
@ -48,7 +48,7 @@ extern "C" {
|
||||
defined(__i386__) || defined(__x86_64__))
|
||||
LIBYUV_API
|
||||
void CpuId(uint32 info_eax, uint32 info_ecx, uint32* cpu_info) {
|
||||
#if defined(_MSC_VER)
|
||||
#if defined(_MSC_VER) && !defined(__clang__)
|
||||
#if (_MSC_FULL_VER >= 160040219)
|
||||
__cpuidex((int*)(cpu_info), info_eax, info_ecx);
|
||||
#elif defined(_M_IX86)
|
||||
|
@ -96,6 +96,7 @@ static int nr_ice_pre_answer_request_destroy(nr_ice_pre_answer_request **parp)
|
||||
nr_stun_message_destroy(&par->req.response);
|
||||
|
||||
RFREE(par->username);
|
||||
RFREE(par);
|
||||
|
||||
return(0);
|
||||
}
|
||||
|
@ -521,6 +521,10 @@ int nr_stun_client_process_response(nr_stun_client_ctx *ctx, UCHAR *msg, int len
|
||||
password = &hmac_key;
|
||||
}
|
||||
|
||||
if (ctx->response) {
|
||||
nr_stun_message_destroy(&ctx->response);
|
||||
}
|
||||
|
||||
if ((r=nr_stun_message_create2(&ctx->response, msg, len)))
|
||||
ABORT(r);
|
||||
|
||||
|
48
mfbt/LinuxSignal.h
Normal file
48
mfbt/LinuxSignal.h
Normal file
@ -0,0 +1,48 @@
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef mozilla_LinuxSignal_h
|
||||
#define mozilla_LinuxSignal_h
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
#if defined(__arm__)
|
||||
|
||||
// Some (old) Linux kernels on ARM have a bug where a signal handler
|
||||
// can be called without clearing the IT bits in CPSR first. The result
|
||||
// is that the first few instructions of the handler could be skipped,
|
||||
// ultimately resulting in crashes. To workaround this bug, the handler
|
||||
// on ARM is a trampoline that starts with enough NOP instructions, so
|
||||
// that even if the IT bits are not cleared, only the NOP instructions
|
||||
// will be skipped over.
|
||||
|
||||
template <void (*H)(int, siginfo_t*, void*)>
|
||||
__attribute__((naked)) void
|
||||
SignalTrampoline(int aSignal, siginfo_t* aInfo, void* aContext)
|
||||
{
|
||||
asm volatile (
|
||||
"nop; nop; nop; nop"
|
||||
: : : "memory");
|
||||
|
||||
// Because the assembler may generate additional insturctions below, we
|
||||
// need to ensure NOPs are inserted first by separating them out above.
|
||||
|
||||
asm volatile (
|
||||
"bx %0"
|
||||
:
|
||||
: "r"(H), "l"(aSignal), "l"(aInfo), "l"(aContext)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
# define MOZ_SIGNAL_TRAMPOLINE(h) (mozilla::SignalTrampoline<h>)
|
||||
|
||||
#else // __arm__
|
||||
|
||||
# define MOZ_SIGNAL_TRAMPOLINE(h) (h)
|
||||
|
||||
#endif // __arm__
|
||||
|
||||
} // namespace mozilla
|
||||
|
||||
#endif // mozilla_LinuxSignal_h
|
@ -78,6 +78,10 @@ if CONFIG['OS_ARCH'] == 'WINNT':
|
||||
EXPORTS.mozilla += [
|
||||
'WindowsVersion.h',
|
||||
]
|
||||
elif CONFIG['OS_ARCH'] == 'Linux':
|
||||
EXPORTS.mozilla += [
|
||||
'LinuxSignal.h',
|
||||
]
|
||||
|
||||
UNIFIED_SOURCES = [
|
||||
'double-conversion/bignum-dtoa.cc',
|
||||
|
@ -24,8 +24,8 @@ interface nsIHttpChannel : nsIChannel
|
||||
*/
|
||||
|
||||
/**
|
||||
* Set/get the HTTP request method (default is "GET"). Setter is case
|
||||
* insensitive; getter returns an uppercase string.
|
||||
* Set/get the HTTP request method (default is "GET"). Both setter and
|
||||
* getter are case sensitive.
|
||||
*
|
||||
* This attribute may only be set before the channel is opened.
|
||||
*
|
||||
|
@ -20,6 +20,7 @@ SOURCES += [
|
||||
'pkixder_universal_types_tests.cpp',
|
||||
'pkixgtest.cpp',
|
||||
'pkixocsp_CreateEncodedOCSPRequest_tests.cpp',
|
||||
'pkixocsp_VerifyEncodedOCSPResponse.cpp',
|
||||
]
|
||||
|
||||
LOCAL_INCLUDES += [
|
||||
|
885
security/pkix/test/gtest/pkixocsp_VerifyEncodedOCSPResponse.cpp
Normal file
885
security/pkix/test/gtest/pkixocsp_VerifyEncodedOCSPResponse.cpp
Normal file
@ -0,0 +1,885 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This code is made available to you under your choice of the following sets
|
||||
* of licensing terms:
|
||||
*/
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
*/
|
||||
/* Copyright 2014 Mozilla Contributors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "nss.h"
|
||||
#include "nssgtest.h"
|
||||
#include "pkix/pkix.h"
|
||||
#include "pkixtestutil.h"
|
||||
#include "prinit.h"
|
||||
#include "secerr.h"
|
||||
|
||||
using namespace mozilla::pkix;
|
||||
using namespace mozilla::pkix::test;
|
||||
|
||||
const uint16_t END_ENTITY_MAX_LIFETIME_IN_DAYS = 10;
|
||||
|
||||
class OCSPTestTrustDomain : public TrustDomain
|
||||
{
|
||||
public:
|
||||
OCSPTestTrustDomain()
|
||||
{
|
||||
}
|
||||
|
||||
virtual SECStatus GetCertTrust(EndEntityOrCA endEntityOrCA,
|
||||
const CertPolicyId&,
|
||||
const SECItem& candidateCert,
|
||||
/*out*/ TrustLevel* trustLevel)
|
||||
{
|
||||
EXPECT_EQ(endEntityOrCA, EndEntityOrCA::MustBeEndEntity);
|
||||
EXPECT_TRUE(trustLevel);
|
||||
*trustLevel = TrustLevel::InheritsTrust;
|
||||
return SECSuccess;
|
||||
}
|
||||
|
||||
virtual SECStatus FindIssuer(const SECItem&, IssuerChecker&, PRTime)
|
||||
{
|
||||
ADD_FAILURE();
|
||||
PR_SetError(SEC_ERROR_LIBRARY_FAILURE, 0);
|
||||
return SECFailure;
|
||||
}
|
||||
|
||||
virtual SECStatus CheckRevocation(EndEntityOrCA endEntityOrCA, const CertID&,
|
||||
PRTime time,
|
||||
/*optional*/ const SECItem*,
|
||||
/*optional*/ const SECItem*)
|
||||
{
|
||||
// TODO: I guess mozilla::pkix should support revocation of designated
|
||||
// OCSP responder eventually, but we don't now, so this function should
|
||||
// never get called.
|
||||
ADD_FAILURE();
|
||||
PR_SetError(SEC_ERROR_LIBRARY_FAILURE, 0);
|
||||
return SECFailure;
|
||||
}
|
||||
|
||||
virtual SECStatus IsChainValid(const DERArray&)
|
||||
{
|
||||
ADD_FAILURE();
|
||||
PR_SetError(SEC_ERROR_LIBRARY_FAILURE, 0);
|
||||
return SECFailure;
|
||||
}
|
||||
|
||||
virtual SECStatus VerifySignedData(const SignedDataWithSignature& signedData,
|
||||
const SECItem& subjectPublicKeyInfo)
|
||||
{
|
||||
return ::mozilla::pkix::VerifySignedData(signedData, subjectPublicKeyInfo,
|
||||
nullptr);
|
||||
}
|
||||
|
||||
virtual SECStatus DigestBuf(const SECItem& item, /*out*/ uint8_t* digestBuf,
|
||||
size_t digestBufLen)
|
||||
{
|
||||
return ::mozilla::pkix::DigestBuf(item, digestBuf, digestBufLen);
|
||||
}
|
||||
|
||||
private:
|
||||
OCSPTestTrustDomain(const OCSPTestTrustDomain&) /*delete*/;
|
||||
void operator=(const OCSPTestTrustDomain&) /*delete*/;
|
||||
};
|
||||
|
||||
namespace {
|
||||
char const* const rootName = "CN=Test CA 1";
|
||||
void deleteCertID(CertID* certID) { delete certID; }
|
||||
} // unnamed namespace
|
||||
|
||||
class pkixocsp_VerifyEncodedResponse : public NSSTest
|
||||
{
|
||||
public:
|
||||
static bool SetUpTestCaseInner()
|
||||
{
|
||||
ScopedSECKEYPublicKey rootPublicKey;
|
||||
if (GenerateKeyPair(rootPublicKey, rootPrivateKey) != SECSuccess) {
|
||||
return false;
|
||||
}
|
||||
rootSPKI = SECKEY_EncodeDERSubjectPublicKeyInfo(rootPublicKey.get());
|
||||
if (!rootSPKI) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void SetUpTestCase()
|
||||
{
|
||||
NSSTest::SetUpTestCase();
|
||||
if (!SetUpTestCaseInner()) {
|
||||
PR_Abort();
|
||||
}
|
||||
}
|
||||
|
||||
void SetUp()
|
||||
{
|
||||
NSSTest::SetUp();
|
||||
|
||||
const SECItem* rootNameDER = ASCIIToDERName(arena.get(), rootName);
|
||||
if (!rootNameDER) {
|
||||
PR_Abort();
|
||||
}
|
||||
const SECItem*
|
||||
endEntitySerialNumber(CreateEncodedSerialNumber(arena.get(),
|
||||
++rootIssuedCount));
|
||||
if (!endEntitySerialNumber) {
|
||||
PR_Abort();
|
||||
}
|
||||
endEntityCertID = new (std::nothrow) CertID(*rootNameDER, *rootSPKI,
|
||||
*endEntitySerialNumber);
|
||||
if (!endEntityCertID) {
|
||||
PR_Abort();
|
||||
}
|
||||
}
|
||||
|
||||
static ScopedSECKEYPrivateKey rootPrivateKey;
|
||||
static ScopedSECItem rootSPKI;
|
||||
static long rootIssuedCount;
|
||||
|
||||
OCSPTestTrustDomain trustDomain;
|
||||
// endEntityCertID references items owned by arena and rootSPKI.
|
||||
ScopedPtr<CertID, deleteCertID> endEntityCertID;
|
||||
};
|
||||
|
||||
/*static*/ ScopedSECKEYPrivateKey
|
||||
pkixocsp_VerifyEncodedResponse::rootPrivateKey;
|
||||
/*static*/ ScopedSECItem pkixocsp_VerifyEncodedResponse::rootSPKI;
|
||||
/*static*/ long pkixocsp_VerifyEncodedResponse::rootIssuedCount = 0;
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// responseStatus
|
||||
|
||||
struct WithoutResponseBytes
|
||||
{
|
||||
uint8_t responseStatus;
|
||||
PRErrorCode expectedError;
|
||||
};
|
||||
|
||||
static const WithoutResponseBytes WITHOUT_RESPONSEBYTES[] = {
|
||||
{ OCSPResponseContext::successful, SEC_ERROR_OCSP_MALFORMED_RESPONSE },
|
||||
{ OCSPResponseContext::malformedRequest, SEC_ERROR_OCSP_MALFORMED_REQUEST },
|
||||
{ OCSPResponseContext::internalError, SEC_ERROR_OCSP_SERVER_ERROR },
|
||||
{ OCSPResponseContext::tryLater, SEC_ERROR_OCSP_TRY_SERVER_LATER },
|
||||
{ 4/*unused*/, SEC_ERROR_OCSP_UNKNOWN_RESPONSE_STATUS },
|
||||
{ OCSPResponseContext::sigRequired, SEC_ERROR_OCSP_REQUEST_NEEDS_SIG },
|
||||
{ OCSPResponseContext::unauthorized, SEC_ERROR_OCSP_UNAUTHORIZED_REQUEST },
|
||||
{ OCSPResponseContext::unauthorized + 1,
|
||||
SEC_ERROR_OCSP_UNKNOWN_RESPONSE_STATUS
|
||||
},
|
||||
};
|
||||
|
||||
class pkixocsp_VerifyEncodedResponse_WithoutResponseBytes
|
||||
: public pkixocsp_VerifyEncodedResponse
|
||||
, public ::testing::WithParamInterface<WithoutResponseBytes>
|
||||
{
|
||||
protected:
|
||||
SECItem* CreateEncodedOCSPErrorResponse(uint8_t status)
|
||||
{
|
||||
static const SECItem EMPTY = { siBuffer, nullptr, 0 };
|
||||
OCSPResponseContext context(arena.get(),
|
||||
CertID(EMPTY, EMPTY, EMPTY),
|
||||
oneDayBeforeNow);
|
||||
context.responseStatus = status;
|
||||
context.skipResponseBytes = true;
|
||||
return CreateEncodedOCSPResponse(context);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_P(pkixocsp_VerifyEncodedResponse_WithoutResponseBytes, CorrectErrorCode)
|
||||
{
|
||||
SECItem* response(CreateEncodedOCSPErrorResponse(
|
||||
GetParam().responseStatus));
|
||||
ASSERT_TRUE(response);
|
||||
bool expired;
|
||||
ASSERT_SECFailure(GetParam().expectedError,
|
||||
VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(pkixocsp_VerifyEncodedResponse_WithoutResponseBytes,
|
||||
pkixocsp_VerifyEncodedResponse_WithoutResponseBytes,
|
||||
testing::ValuesIn(WITHOUT_RESPONSEBYTES));
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// "successful" responses
|
||||
|
||||
namespace {
|
||||
|
||||
// Alias for nullptr to aid readability in the code below.
|
||||
static const char* byKey = nullptr;
|
||||
|
||||
} // unnamed namespcae
|
||||
|
||||
class pkixocsp_VerifyEncodedResponse_successful
|
||||
: public pkixocsp_VerifyEncodedResponse
|
||||
{
|
||||
public:
|
||||
void SetUp()
|
||||
{
|
||||
pkixocsp_VerifyEncodedResponse::SetUp();
|
||||
}
|
||||
|
||||
static void SetUpTestCase()
|
||||
{
|
||||
pkixocsp_VerifyEncodedResponse::SetUpTestCase();
|
||||
}
|
||||
|
||||
SECItem* CreateEncodedOCSPSuccessfulResponse(
|
||||
OCSPResponseContext::CertStatus certStatus,
|
||||
const CertID& certID,
|
||||
/*optional*/ const char* signerName,
|
||||
const ScopedSECKEYPrivateKey& signerPrivateKey,
|
||||
PRTime producedAt, PRTime thisUpdate,
|
||||
/*optional*/ const PRTime* nextUpdate,
|
||||
/*optional*/ SECItem const* const* certs = nullptr)
|
||||
{
|
||||
OCSPResponseContext context(arena.get(), certID, producedAt);
|
||||
if (signerName) {
|
||||
context.signerNameDER = ASCIIToDERName(arena.get(), signerName);
|
||||
if (!context.signerNameDER) {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
context.signerPrivateKey = SECKEY_CopyPrivateKey(signerPrivateKey.get());
|
||||
if (!context.signerPrivateKey) {
|
||||
return nullptr;
|
||||
}
|
||||
context.responseStatus = OCSPResponseContext::successful;
|
||||
context.producedAt = producedAt;
|
||||
context.certs = certs;
|
||||
|
||||
context.certIDHashAlg = SEC_OID_SHA1;
|
||||
context.certStatus = certStatus;
|
||||
context.thisUpdate = thisUpdate;
|
||||
context.nextUpdate = nextUpdate ? *nextUpdate : 0;
|
||||
context.includeNextUpdate = nextUpdate != nullptr;
|
||||
|
||||
return CreateEncodedOCSPResponse(context);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_successful, good_byKey)
|
||||
{
|
||||
SECItem* response(CreateEncodedOCSPSuccessfulResponse(
|
||||
OCSPResponseContext::good, *endEntityCertID, byKey,
|
||||
rootPrivateKey, oneDayBeforeNow, oneDayBeforeNow,
|
||||
&oneDayAfterNow));
|
||||
ASSERT_TRUE(response);
|
||||
bool expired;
|
||||
ASSERT_SECSuccess(VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
||||
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_successful, good_byName)
|
||||
{
|
||||
SECItem* response(CreateEncodedOCSPSuccessfulResponse(
|
||||
OCSPResponseContext::good, *endEntityCertID, rootName,
|
||||
rootPrivateKey, oneDayBeforeNow, oneDayBeforeNow,
|
||||
&oneDayAfterNow));
|
||||
ASSERT_TRUE(response);
|
||||
bool expired;
|
||||
ASSERT_SECSuccess(VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
||||
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_successful, good_byKey_without_nextUpdate)
|
||||
{
|
||||
SECItem* response(CreateEncodedOCSPSuccessfulResponse(
|
||||
OCSPResponseContext::good, *endEntityCertID, byKey,
|
||||
rootPrivateKey, oneDayBeforeNow, oneDayBeforeNow,
|
||||
nullptr));
|
||||
ASSERT_TRUE(response);
|
||||
bool expired;
|
||||
ASSERT_SECSuccess(VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
||||
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_successful, revoked)
|
||||
{
|
||||
SECItem* response(CreateEncodedOCSPSuccessfulResponse(
|
||||
OCSPResponseContext::revoked, *endEntityCertID, byKey,
|
||||
rootPrivateKey, oneDayBeforeNow, oneDayBeforeNow,
|
||||
&oneDayAfterNow));
|
||||
ASSERT_TRUE(response);
|
||||
bool expired;
|
||||
ASSERT_SECFailure(SEC_ERROR_REVOKED_CERTIFICATE,
|
||||
VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
||||
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_successful, unknown)
|
||||
{
|
||||
SECItem* response(CreateEncodedOCSPSuccessfulResponse(
|
||||
OCSPResponseContext::unknown, *endEntityCertID, byKey,
|
||||
rootPrivateKey, oneDayBeforeNow, oneDayBeforeNow,
|
||||
&oneDayAfterNow));
|
||||
ASSERT_TRUE(response);
|
||||
bool expired;
|
||||
ASSERT_SECFailure(SEC_ERROR_OCSP_UNKNOWN_CERT,
|
||||
VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// indirect responses (signed by a delegated OCSP responder cert)
|
||||
|
||||
class pkixocsp_VerifyEncodedResponse_DelegatedResponder
|
||||
: public pkixocsp_VerifyEncodedResponse_successful
|
||||
{
|
||||
protected:
|
||||
// certSubjectName should be unique for each call. This way, we avoid any
|
||||
// issues with NSS caching the certificates internally. For the same reason,
|
||||
// we generate a new keypair on each call. Either one of these should be
|
||||
// sufficient to avoid issues with the NSS cache, but we do both to be
|
||||
// cautious.
|
||||
//
|
||||
// signerName should be byKey to use the byKey ResponderID construction, or
|
||||
// another value (usually equal to certSubjectName) to use the byName
|
||||
// ResponderID construction.
|
||||
//
|
||||
// If signerEKU is omitted, then the certificate will have the
|
||||
// id-kp-OCSPSigning EKU. If signerEKU is SEC_OID_UNKNOWN then it will not
|
||||
// have any EKU extension. Otherwise, the certificate will have the given
|
||||
// EKU.
|
||||
//
|
||||
// signerDEROut is owned by the arena
|
||||
SECItem* CreateEncodedIndirectOCSPSuccessfulResponse(
|
||||
const char* certSubjectName,
|
||||
OCSPResponseContext::CertStatus certStatus,
|
||||
const char* signerName,
|
||||
SECOidTag signerEKU = SEC_OID_OCSP_RESPONDER,
|
||||
/*optional, out*/ const SECItem** signerDEROut = nullptr)
|
||||
{
|
||||
PR_ASSERT(certSubjectName);
|
||||
|
||||
const SECItem* extensions[] = {
|
||||
signerEKU != SEC_OID_UNKNOWN
|
||||
? CreateEncodedEKUExtension(arena.get(), &signerEKU, 1,
|
||||
ExtensionCriticality::NotCritical)
|
||||
: nullptr,
|
||||
nullptr
|
||||
};
|
||||
ScopedSECKEYPrivateKey signerPrivateKey;
|
||||
SECItem* signerDER(CreateEncodedCertificate(
|
||||
arena.get(), ++rootIssuedCount, rootName,
|
||||
oneDayBeforeNow, oneDayAfterNow, certSubjectName,
|
||||
signerEKU != SEC_OID_UNKNOWN ? extensions : nullptr,
|
||||
rootPrivateKey.get(), signerPrivateKey));
|
||||
EXPECT_TRUE(signerDER);
|
||||
if (!signerDER) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const SECItem* signerNameDER = nullptr;
|
||||
if (signerName) {
|
||||
signerNameDER = ASCIIToDERName(arena.get(), signerName);
|
||||
if (!signerNameDER) {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
if (signerDEROut) {
|
||||
*signerDEROut = signerDER;
|
||||
}
|
||||
SECItem const* const certs[] = { signerDER, nullptr };
|
||||
return CreateEncodedOCSPSuccessfulResponse(certStatus, *endEntityCertID,
|
||||
signerName, signerPrivateKey,
|
||||
oneDayBeforeNow, oneDayBeforeNow,
|
||||
&oneDayAfterNow, certs);
|
||||
}
|
||||
|
||||
static SECItem* CreateEncodedCertificate(PLArenaPool* arena,
|
||||
uint32_t serialNumber,
|
||||
const char* issuer,
|
||||
PRTime notBefore,
|
||||
PRTime notAfter,
|
||||
const char* subject,
|
||||
/*optional*/ SECItem const* const* extensions,
|
||||
/*optional*/ SECKEYPrivateKey* signerKey,
|
||||
/*out*/ ScopedSECKEYPrivateKey& privateKey)
|
||||
{
|
||||
const SECItem* serialNumberDER(CreateEncodedSerialNumber(arena,
|
||||
serialNumber));
|
||||
if (!serialNumberDER) {
|
||||
return nullptr;
|
||||
}
|
||||
const SECItem* issuerDER(ASCIIToDERName(arena, issuer));
|
||||
if (!issuerDER) {
|
||||
return nullptr;
|
||||
}
|
||||
const SECItem* subjectDER(ASCIIToDERName(arena, subject));
|
||||
if (!subjectDER) {
|
||||
return nullptr;
|
||||
}
|
||||
return ::mozilla::pkix::test::CreateEncodedCertificate(
|
||||
arena, v3,
|
||||
SEC_OID_PKCS1_SHA256_WITH_RSA_ENCRYPTION,
|
||||
serialNumberDER, issuerDER, notBefore,
|
||||
notAfter, subjectDER, extensions,
|
||||
signerKey, SEC_OID_SHA256, privateKey);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_DelegatedResponder, good_byKey)
|
||||
{
|
||||
SECItem* response(CreateEncodedIndirectOCSPSuccessfulResponse(
|
||||
"CN=good_indirect_byKey", OCSPResponseContext::good,
|
||||
byKey));
|
||||
ASSERT_TRUE(response);
|
||||
bool expired;
|
||||
ASSERT_SECSuccess(VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
||||
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_DelegatedResponder, good_byName)
|
||||
{
|
||||
SECItem* response(CreateEncodedIndirectOCSPSuccessfulResponse(
|
||||
"CN=good_indirect_byName", OCSPResponseContext::good,
|
||||
"CN=good_indirect_byName"));
|
||||
ASSERT_TRUE(response);
|
||||
bool expired;
|
||||
ASSERT_SECSuccess(VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
||||
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_DelegatedResponder,
|
||||
good_byKey_missing_signer)
|
||||
{
|
||||
ScopedSECKEYPublicKey missingSignerPublicKey;
|
||||
ScopedSECKEYPrivateKey missingSignerPrivateKey;
|
||||
ASSERT_SECSuccess(GenerateKeyPair(missingSignerPublicKey,
|
||||
missingSignerPrivateKey));
|
||||
SECItem* response(CreateEncodedOCSPSuccessfulResponse(
|
||||
OCSPResponseContext::good, *endEntityCertID, byKey,
|
||||
missingSignerPrivateKey, oneDayBeforeNow,
|
||||
oneDayBeforeNow, nullptr));
|
||||
ASSERT_TRUE(response);
|
||||
bool expired;
|
||||
ASSERT_SECFailure(SEC_ERROR_OCSP_INVALID_SIGNING_CERT,
|
||||
VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
||||
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_DelegatedResponder,
|
||||
good_byName_missing_signer)
|
||||
{
|
||||
ScopedSECKEYPublicKey missingSignerPublicKey;
|
||||
ScopedSECKEYPrivateKey missingSignerPrivateKey;
|
||||
ASSERT_SECSuccess(GenerateKeyPair(missingSignerPublicKey,
|
||||
missingSignerPrivateKey));
|
||||
SECItem* response(CreateEncodedOCSPSuccessfulResponse(
|
||||
OCSPResponseContext::good, *endEntityCertID, "CN=missing",
|
||||
missingSignerPrivateKey, oneDayBeforeNow,
|
||||
oneDayBeforeNow, nullptr));
|
||||
ASSERT_TRUE(response);
|
||||
bool expired;
|
||||
ASSERT_SECFailure(SEC_ERROR_OCSP_INVALID_SIGNING_CERT,
|
||||
VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
||||
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_DelegatedResponder, good_expired)
|
||||
{
|
||||
static const SECOidTag signerEKU = SEC_OID_OCSP_RESPONDER;
|
||||
static const char* signerName = "CN=good_indirect_expired";
|
||||
|
||||
const SECItem* extensions[] = {
|
||||
CreateEncodedEKUExtension(arena.get(), &signerEKU, 1,
|
||||
ExtensionCriticality::NotCritical),
|
||||
nullptr
|
||||
};
|
||||
ScopedSECKEYPrivateKey signerPrivateKey;
|
||||
SECItem* signerDER(CreateEncodedCertificate(arena.get(), ++rootIssuedCount,
|
||||
rootName,
|
||||
now - (10 * ONE_DAY),
|
||||
now - (2 * ONE_DAY),
|
||||
signerName, extensions,
|
||||
rootPrivateKey.get(),
|
||||
signerPrivateKey));
|
||||
ASSERT_TRUE(signerDER);
|
||||
|
||||
SECItem const* const certs[] = { signerDER, nullptr };
|
||||
SECItem* response(CreateEncodedOCSPSuccessfulResponse(
|
||||
OCSPResponseContext::good, *endEntityCertID, signerName,
|
||||
signerPrivateKey, oneDayBeforeNow, oneDayBeforeNow,
|
||||
&oneDayAfterNow,
|
||||
certs));
|
||||
ASSERT_TRUE(response);
|
||||
|
||||
bool expired;
|
||||
ASSERT_SECFailure(SEC_ERROR_OCSP_INVALID_SIGNING_CERT,
|
||||
VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
}
|
||||
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_DelegatedResponder, good_future)
|
||||
{
|
||||
static const SECOidTag signerEKU = SEC_OID_OCSP_RESPONDER;
|
||||
static const char* signerName = "CN=good_indirect_future";
|
||||
|
||||
const SECItem* extensions[] = {
|
||||
CreateEncodedEKUExtension(arena.get(), &signerEKU, 1,
|
||||
ExtensionCriticality::NotCritical),
|
||||
nullptr
|
||||
};
|
||||
ScopedSECKEYPrivateKey signerPrivateKey;
|
||||
SECItem* signerDER(CreateEncodedCertificate(arena.get(), ++rootIssuedCount,
|
||||
rootName,
|
||||
now + (2 * ONE_DAY),
|
||||
now + (10 * ONE_DAY),
|
||||
signerName, extensions,
|
||||
rootPrivateKey.get(),
|
||||
signerPrivateKey));
|
||||
ASSERT_TRUE(signerDER);
|
||||
|
||||
SECItem const* const certs[] = { signerDER, nullptr };
|
||||
SECItem* response(CreateEncodedOCSPSuccessfulResponse(
|
||||
OCSPResponseContext::good, *endEntityCertID,
|
||||
signerName, signerPrivateKey, oneDayBeforeNow,
|
||||
oneDayBeforeNow, &oneDayAfterNow, certs));
|
||||
ASSERT_TRUE(response);
|
||||
|
||||
bool expired;
|
||||
ASSERT_SECFailure(SEC_ERROR_OCSP_INVALID_SIGNING_CERT,
|
||||
VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
||||
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_DelegatedResponder, good_no_eku)
|
||||
{
|
||||
SECItem* response(CreateEncodedIndirectOCSPSuccessfulResponse(
|
||||
"CN=good_indirect_wrong_eku", OCSPResponseContext::good,
|
||||
byKey, SEC_OID_UNKNOWN));
|
||||
ASSERT_TRUE(response);
|
||||
bool expired;
|
||||
ASSERT_SECFailure(SEC_ERROR_OCSP_INVALID_SIGNING_CERT,
|
||||
VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
||||
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_DelegatedResponder,
|
||||
good_indirect_wrong_eku)
|
||||
{
|
||||
SECItem* response(CreateEncodedIndirectOCSPSuccessfulResponse(
|
||||
"CN=good_indirect_wrong_eku", OCSPResponseContext::good,
|
||||
byKey, SEC_OID_EXT_KEY_USAGE_SERVER_AUTH));
|
||||
ASSERT_TRUE(response);
|
||||
bool expired;
|
||||
ASSERT_SECFailure(SEC_ERROR_OCSP_INVALID_SIGNING_CERT,
|
||||
VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
||||
|
||||
// Test that signature of OCSP response signer cert is verified
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_DelegatedResponder, good_tampered_eku)
|
||||
{
|
||||
SECItem* response(CreateEncodedIndirectOCSPSuccessfulResponse(
|
||||
"CN=good_indirect_tampered_eku",
|
||||
OCSPResponseContext::good, byKey,
|
||||
SEC_OID_EXT_KEY_USAGE_SERVER_AUTH));
|
||||
ASSERT_TRUE(response);
|
||||
|
||||
#define EKU_PREFIX \
|
||||
0x06, 8, /* OBJECT IDENTIFIER, 8 bytes */ \
|
||||
0x2B, 6, 1, 5, 5, 7, /* id-pkix */ \
|
||||
0x03 /* id-kp */
|
||||
static const uint8_t EKU_SERVER_AUTH[] = { EKU_PREFIX, 0x01 }; // serverAuth
|
||||
static const uint8_t EKU_OCSP_SIGNER[] = { EKU_PREFIX, 0x09 }; // OCSPSigning
|
||||
#undef EKU_PREFIX
|
||||
ASSERT_SECSuccess(TamperOnce(*response,
|
||||
EKU_SERVER_AUTH, PR_ARRAY_SIZE(EKU_SERVER_AUTH),
|
||||
EKU_OCSP_SIGNER, PR_ARRAY_SIZE(EKU_OCSP_SIGNER)));
|
||||
|
||||
bool expired;
|
||||
ASSERT_SECFailure(SEC_ERROR_OCSP_INVALID_SIGNING_CERT,
|
||||
VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
||||
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_DelegatedResponder, good_unknown_issuer)
|
||||
{
|
||||
static const char* subCAName = "CN=good_indirect_unknown_issuer sub-CA";
|
||||
static const char* signerName = "CN=good_indirect_unknown_issuer OCSP signer";
|
||||
|
||||
// unknown issuer
|
||||
ScopedSECKEYPublicKey unknownPublicKey;
|
||||
ScopedSECKEYPrivateKey unknownPrivateKey;
|
||||
ASSERT_SECSuccess(GenerateKeyPair(unknownPublicKey, unknownPrivateKey));
|
||||
|
||||
// Delegated responder cert signed by unknown issuer
|
||||
static const SECOidTag signerEKU = SEC_OID_OCSP_RESPONDER;
|
||||
const SECItem* extensions[] = {
|
||||
CreateEncodedEKUExtension(arena.get(), &signerEKU, 1,
|
||||
ExtensionCriticality::NotCritical),
|
||||
nullptr
|
||||
};
|
||||
ScopedSECKEYPrivateKey signerPrivateKey;
|
||||
SECItem* signerDER(CreateEncodedCertificate(arena.get(), 1,
|
||||
subCAName, oneDayBeforeNow, oneDayAfterNow,
|
||||
signerName, extensions, unknownPrivateKey.get(),
|
||||
signerPrivateKey));
|
||||
ASSERT_TRUE(signerDER);
|
||||
|
||||
// OCSP response signed by that delegated responder
|
||||
SECItem const* const certs[] = { signerDER, nullptr };
|
||||
SECItem* response(CreateEncodedOCSPSuccessfulResponse(
|
||||
OCSPResponseContext::good, *endEntityCertID,
|
||||
signerName, signerPrivateKey, oneDayBeforeNow,
|
||||
oneDayBeforeNow, &oneDayAfterNow, certs));
|
||||
ASSERT_TRUE(response);
|
||||
|
||||
bool expired;
|
||||
ASSERT_SECFailure(SEC_ERROR_OCSP_INVALID_SIGNING_CERT,
|
||||
VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
||||
|
||||
// The CA that issued the OCSP responder cert is a sub-CA of the issuer of
|
||||
// the certificate that the OCSP response is for. That sub-CA cert is included
|
||||
// in the OCSP response before the OCSP responder cert.
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_DelegatedResponder,
|
||||
good_indirect_subca_1_first)
|
||||
{
|
||||
static const char* subCAName = "CN=good_indirect_subca_1_first sub-CA";
|
||||
static const char* signerName = "CN=good_indirect_subca_1_first OCSP signer";
|
||||
|
||||
// sub-CA of root (root is the direct issuer of endEntity)
|
||||
const SECItem* subCAExtensions[] = {
|
||||
CreateEncodedBasicConstraints(arena.get(), true, 0,
|
||||
ExtensionCriticality::NotCritical),
|
||||
nullptr
|
||||
};
|
||||
ScopedSECKEYPrivateKey subCAPrivateKey;
|
||||
SECItem* subCADER(CreateEncodedCertificate(arena.get(), ++rootIssuedCount,
|
||||
rootName,
|
||||
oneDayBeforeNow, oneDayAfterNow,
|
||||
subCAName, subCAExtensions,
|
||||
rootPrivateKey.get(),
|
||||
subCAPrivateKey));
|
||||
ASSERT_TRUE(subCADER);
|
||||
|
||||
// Delegated responder cert signed by that sub-CA
|
||||
static const SECOidTag signerEKU = SEC_OID_OCSP_RESPONDER;
|
||||
const SECItem* extensions[] = {
|
||||
CreateEncodedEKUExtension(arena.get(), &signerEKU, 1,
|
||||
ExtensionCriticality::NotCritical),
|
||||
nullptr
|
||||
};
|
||||
ScopedSECKEYPrivateKey signerPrivateKey;
|
||||
SECItem* signerDER(CreateEncodedCertificate(arena.get(), 1, subCAName,
|
||||
oneDayBeforeNow, oneDayAfterNow,
|
||||
signerName, extensions,
|
||||
subCAPrivateKey.get(),
|
||||
signerPrivateKey));
|
||||
ASSERT_TRUE(signerDER);
|
||||
|
||||
// OCSP response signed by the delegated responder issued by the sub-CA
|
||||
// that is trying to impersonate the root.
|
||||
SECItem const* const certs[] = { subCADER, signerDER, nullptr };
|
||||
SECItem* response(CreateEncodedOCSPSuccessfulResponse(
|
||||
OCSPResponseContext::good, *endEntityCertID, signerName,
|
||||
signerPrivateKey, oneDayBeforeNow, oneDayBeforeNow,
|
||||
&oneDayAfterNow,
|
||||
certs));
|
||||
ASSERT_TRUE(response);
|
||||
|
||||
bool expired;
|
||||
ASSERT_SECFailure(SEC_ERROR_OCSP_INVALID_SIGNING_CERT,
|
||||
VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
||||
|
||||
// The CA that issued the OCSP responder cert is a sub-CA of the issuer of
|
||||
// the certificate that the OCSP response is for. That sub-CA cert is included
|
||||
// in the OCSP response after the OCSP responder cert.
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_DelegatedResponder,
|
||||
good_indirect_subca_1_second)
|
||||
{
|
||||
static const char* subCAName = "CN=good_indirect_subca_1_second sub-CA";
|
||||
static const char* signerName = "CN=good_indirect_subca_1_second OCSP signer";
|
||||
|
||||
// sub-CA of root (root is the direct issuer of endEntity)
|
||||
const SECItem* subCAExtensions[] = {
|
||||
CreateEncodedBasicConstraints(arena.get(), true, 0,
|
||||
ExtensionCriticality::NotCritical),
|
||||
nullptr
|
||||
};
|
||||
ScopedSECKEYPrivateKey subCAPrivateKey;
|
||||
SECItem* subCADER(CreateEncodedCertificate(arena.get(), ++rootIssuedCount,
|
||||
rootName,
|
||||
oneDayBeforeNow, oneDayAfterNow,
|
||||
subCAName, subCAExtensions,
|
||||
rootPrivateKey.get(),
|
||||
subCAPrivateKey));
|
||||
ASSERT_TRUE(subCADER);
|
||||
|
||||
// Delegated responder cert signed by that sub-CA
|
||||
static const SECOidTag signerEKU = SEC_OID_OCSP_RESPONDER;
|
||||
const SECItem* extensions[] = {
|
||||
CreateEncodedEKUExtension(arena.get(), &signerEKU, 1,
|
||||
ExtensionCriticality::NotCritical),
|
||||
nullptr
|
||||
};
|
||||
ScopedSECKEYPrivateKey signerPrivateKey;
|
||||
SECItem* signerDER(CreateEncodedCertificate(arena.get(), 1, subCAName,
|
||||
oneDayBeforeNow, oneDayAfterNow,
|
||||
signerName, extensions,
|
||||
subCAPrivateKey.get(),
|
||||
signerPrivateKey));
|
||||
ASSERT_TRUE(signerDER);
|
||||
|
||||
// OCSP response signed by the delegated responder issued by the sub-CA
|
||||
// that is trying to impersonate the root.
|
||||
SECItem const* const certs[] = { signerDER, subCADER, nullptr };
|
||||
SECItem* response(CreateEncodedOCSPSuccessfulResponse(
|
||||
OCSPResponseContext::good, *endEntityCertID,
|
||||
signerName, signerPrivateKey, oneDayBeforeNow,
|
||||
oneDayBeforeNow, &oneDayAfterNow, certs));
|
||||
ASSERT_TRUE(response);
|
||||
|
||||
bool expired;
|
||||
ASSERT_SECFailure(SEC_ERROR_OCSP_INVALID_SIGNING_CERT,
|
||||
VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
||||
|
||||
class pkixocsp_VerifyEncodedResponse_GetCertTrust
|
||||
: public pkixocsp_VerifyEncodedResponse_DelegatedResponder {
|
||||
public:
|
||||
pkixocsp_VerifyEncodedResponse_GetCertTrust()
|
||||
: signerCertDER(nullptr)
|
||||
, response(nullptr)
|
||||
{
|
||||
}
|
||||
|
||||
void SetUp()
|
||||
{
|
||||
pkixocsp_VerifyEncodedResponse_DelegatedResponder::SetUp();
|
||||
response = CreateEncodedIndirectOCSPSuccessfulResponse(
|
||||
"CN=OCSPGetCertTrustTest Signer",
|
||||
OCSPResponseContext::good, byKey,
|
||||
SEC_OID_OCSP_RESPONDER, &signerCertDER);
|
||||
if (!response || !signerCertDER) {
|
||||
PR_Abort();
|
||||
}
|
||||
}
|
||||
|
||||
class TrustDomain : public OCSPTestTrustDomain
|
||||
{
|
||||
public:
|
||||
TrustDomain()
|
||||
: certTrustLevel(TrustLevel::InheritsTrust)
|
||||
{
|
||||
}
|
||||
|
||||
bool SetCertTrust(const SECItem* certDER, TrustLevel certTrustLevel)
|
||||
{
|
||||
this->certDER = certDER;
|
||||
this->certTrustLevel = certTrustLevel;
|
||||
return true;
|
||||
}
|
||||
private:
|
||||
virtual SECStatus GetCertTrust(EndEntityOrCA endEntityOrCA,
|
||||
const CertPolicyId&,
|
||||
const SECItem& candidateCert,
|
||||
/*out*/ TrustLevel* trustLevel)
|
||||
{
|
||||
EXPECT_EQ(endEntityOrCA, EndEntityOrCA::MustBeEndEntity);
|
||||
EXPECT_TRUE(trustLevel);
|
||||
EXPECT_TRUE(certDER);
|
||||
EXPECT_TRUE(SECITEM_ItemsAreEqual(certDER, &candidateCert));
|
||||
*trustLevel = certTrustLevel;
|
||||
return SECSuccess;
|
||||
}
|
||||
|
||||
const SECItem* certDER; // weak pointer
|
||||
TrustLevel certTrustLevel;
|
||||
};
|
||||
|
||||
TrustDomain trustDomain;
|
||||
const SECItem* signerCertDER; // owned by arena
|
||||
SECItem* response; // owned by arena
|
||||
};
|
||||
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_GetCertTrust, InheritTrust)
|
||||
{
|
||||
ASSERT_TRUE(trustDomain.SetCertTrust(signerCertDER,
|
||||
TrustLevel::InheritsTrust));
|
||||
bool expired;
|
||||
ASSERT_SECSuccess(VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
||||
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_GetCertTrust, TrustAnchor)
|
||||
{
|
||||
ASSERT_TRUE(trustDomain.SetCertTrust(signerCertDER,
|
||||
TrustLevel::TrustAnchor));
|
||||
bool expired;
|
||||
ASSERT_SECSuccess(VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
||||
|
||||
TEST_F(pkixocsp_VerifyEncodedResponse_GetCertTrust, ActivelyDistrusted)
|
||||
{
|
||||
ASSERT_TRUE(trustDomain.SetCertTrust(signerCertDER,
|
||||
TrustLevel::ActivelyDistrusted));
|
||||
bool expired;
|
||||
ASSERT_SECFailure(SEC_ERROR_OCSP_INVALID_SIGNING_CERT,
|
||||
VerifyEncodedOCSPResponse(trustDomain, *endEntityCertID, now,
|
||||
END_ENTITY_MAX_LIFETIME_IN_DAYS,
|
||||
*response, expired));
|
||||
ASSERT_FALSE(expired);
|
||||
}
|
@ -93,6 +93,59 @@ OpenFile(const char* dir, const char* filename, const char* mode)
|
||||
return file.release();
|
||||
}
|
||||
|
||||
SECStatus
|
||||
TamperOnce(SECItem& item,
|
||||
const uint8_t* from, size_t fromLen,
|
||||
const uint8_t* to, size_t toLen)
|
||||
{
|
||||
if (!item.data || !from || !to || fromLen != toLen) {
|
||||
PR_NOT_REACHED("invalid args to TamperOnce");
|
||||
PR_SetError(SEC_ERROR_INVALID_ARGS, 0);
|
||||
return SECFailure;
|
||||
}
|
||||
|
||||
if (fromLen < 8) {
|
||||
PR_NOT_REACHED("invalid parameter to TamperOnce; fromLen must be at least 8");
|
||||
PR_SetError(SEC_ERROR_INVALID_ARGS, 0);
|
||||
return SECFailure;
|
||||
}
|
||||
|
||||
uint8_t* p = item.data;
|
||||
size_t remaining = item.len;
|
||||
bool alreadyFoundMatch = false;
|
||||
for (;;) {
|
||||
uint8_t* foundFirstByte = static_cast<uint8_t*>(memchr(p, from[0],
|
||||
remaining));
|
||||
if (!foundFirstByte) {
|
||||
if (alreadyFoundMatch) {
|
||||
return SECSuccess;
|
||||
}
|
||||
PR_SetError(SEC_ERROR_BAD_DATA, 0);
|
||||
return SECFailure;
|
||||
}
|
||||
remaining -= (foundFirstByte - p);
|
||||
if (remaining < fromLen) {
|
||||
if (alreadyFoundMatch) {
|
||||
return SECSuccess;
|
||||
}
|
||||
PR_SetError(SEC_ERROR_BAD_DATA, 0);
|
||||
return SECFailure;
|
||||
}
|
||||
if (!memcmp(foundFirstByte, from, fromLen)) {
|
||||
if (alreadyFoundMatch) {
|
||||
PR_SetError(SEC_ERROR_BAD_DATA, 0);
|
||||
return SECFailure;
|
||||
}
|
||||
alreadyFoundMatch = true;
|
||||
memmove(foundFirstByte, to, toLen);
|
||||
p = foundFirstByte + toLen;
|
||||
} else {
|
||||
p = foundFirstByte + 1;
|
||||
--remaining;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class Output
|
||||
{
|
||||
public:
|
||||
|
@ -75,6 +75,16 @@ SECStatus GenerateKeyPair(/*out*/ ScopedSECKEYPublicKey& publicKey,
|
||||
// The result will be owned by the arena
|
||||
const SECItem* ASCIIToDERName(PLArenaPool* arena, const char* cn);
|
||||
|
||||
// Replace one substring in item with another of the same length, but only if
|
||||
// the substring was found exactly once. The "only once" restriction is helpful
|
||||
// for avoiding making multiple changes at once.
|
||||
//
|
||||
// The string to search for must be 8 or more bytes long so that it is
|
||||
// extremely unlikely that there will ever be any false positive matches
|
||||
// in digital signatures, keys, hashes, etc.
|
||||
SECStatus TamperOnce(SECItem& item, const uint8_t* from, size_t fromLen,
|
||||
const uint8_t* to, size_t toLen);
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// Encode Certificates
|
||||
|
||||
|
@ -303,7 +303,12 @@ function FxAccountsInternal() {
|
||||
|
||||
// We don't reference |profileDir| in the top-level module scope
|
||||
// as we may be imported before we know where it is.
|
||||
// We only want the fancy new LoginManagerStorage on desktop.
|
||||
#if defined(MOZ_B2G)
|
||||
this.signedInUserStorage = new JSONStorage({
|
||||
#else
|
||||
this.signedInUserStorage = new LoginManagerStorage({
|
||||
#endif
|
||||
filename: DEFAULT_STORAGE_FILENAME,
|
||||
baseDir: OS.Constants.Path.profileDir,
|
||||
});
|
||||
@ -901,6 +906,194 @@ JSONStorage.prototype = {
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* LoginManagerStorage constructor that creates instances that may set/get
|
||||
* from a combination of a clear-text JSON file and stored securely in
|
||||
* the nsILoginManager.
|
||||
*
|
||||
* @param options {
|
||||
* filename: of the plain-text file to write to
|
||||
* baseDir: directory where the file resides
|
||||
* }
|
||||
* @return instance
|
||||
*/
|
||||
|
||||
function LoginManagerStorage(options) {
|
||||
// we reuse the JSONStorage for writing the plain-text stuff.
|
||||
this.jsonStorage = new JSONStorage(options);
|
||||
}
|
||||
|
||||
LoginManagerStorage.prototype = {
|
||||
// The fields in the credentials JSON object that are stored in plain-text
|
||||
// in the profile directory. All other fields are stored in the login manager,
|
||||
// and thus are only available when the master-password is unlocked.
|
||||
|
||||
// a hook point for testing.
|
||||
get _isLoggedIn() {
|
||||
return Services.logins.isLoggedIn;
|
||||
},
|
||||
|
||||
// Clear any data from the login manager. Returns true if the login manager
|
||||
// was unlocked (even if no existing logins existed) or false if it was
|
||||
// locked (meaning we don't even know if it existed or not.)
|
||||
_clearLoginMgrData: Task.async(function* () {
|
||||
try { // Services.logins might be third-party and broken...
|
||||
yield Services.logins.initializationPromise;
|
||||
if (!this._isLoggedIn) {
|
||||
return false;
|
||||
}
|
||||
let logins = Services.logins.findLogins({}, FXA_PWDMGR_HOST, null, FXA_PWDMGR_REALM);
|
||||
for (let login of logins) {
|
||||
Services.logins.removeLogin(login);
|
||||
}
|
||||
return true;
|
||||
} catch (ex) {
|
||||
log.error("Failed to clear login data: ${}", ex);
|
||||
return false;
|
||||
}
|
||||
}),
|
||||
|
||||
set: Task.async(function* (contents) {
|
||||
if (!contents) {
|
||||
// User is signing out - write the null to the json file.
|
||||
yield this.jsonStorage.set(contents);
|
||||
|
||||
// And nuke it from the login manager.
|
||||
let cleared = yield this._clearLoginMgrData();
|
||||
if (!cleared) {
|
||||
// just log a message - we verify that the email address matches when
|
||||
// we reload it, so having a stale entry doesn't really hurt.
|
||||
log.info("not removing credentials from login manager - not logged in");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// We are saving actual data.
|
||||
// Split the data into 2 chunks - one to go to the plain-text, and the
|
||||
// other to write to the login manager.
|
||||
let toWriteJSON = {version: contents.version};
|
||||
let accountDataJSON = toWriteJSON.accountData = {};
|
||||
let toWriteLoginMgr = {version: contents.version};
|
||||
let accountDataLoginMgr = toWriteLoginMgr.accountData = {};
|
||||
for (let [name, value] of Iterator(contents.accountData)) {
|
||||
if (FXA_PWDMGR_PLAINTEXT_FIELDS.indexOf(name) >= 0) {
|
||||
accountDataJSON[name] = value;
|
||||
} else {
|
||||
accountDataLoginMgr[name] = value;
|
||||
}
|
||||
}
|
||||
yield this.jsonStorage.set(toWriteJSON);
|
||||
|
||||
try { // Services.logins might be third-party and broken...
|
||||
// and the stuff into the login manager.
|
||||
yield Services.logins.initializationPromise;
|
||||
// If MP is locked we silently fail - the user may need to re-auth
|
||||
// next startup.
|
||||
if (!this._isLoggedIn) {
|
||||
log.info("not saving credentials to login manager - not logged in");
|
||||
return;
|
||||
}
|
||||
// write the rest of the data to the login manager.
|
||||
let loginInfo = new Components.Constructor(
|
||||
"@mozilla.org/login-manager/loginInfo;1", Ci.nsILoginInfo, "init");
|
||||
let login = new loginInfo(FXA_PWDMGR_HOST,
|
||||
null, // aFormSubmitURL,
|
||||
FXA_PWDMGR_REALM, // aHttpRealm,
|
||||
contents.accountData.email, // aUsername
|
||||
JSON.stringify(toWriteLoginMgr), // aPassword
|
||||
"", // aUsernameField
|
||||
"");// aPasswordField
|
||||
|
||||
let existingLogins = Services.logins.findLogins({}, FXA_PWDMGR_HOST, null,
|
||||
FXA_PWDMGR_REALM);
|
||||
if (existingLogins.length) {
|
||||
Services.logins.modifyLogin(existingLogins[0], login);
|
||||
} else {
|
||||
Services.logins.addLogin(login);
|
||||
}
|
||||
} catch (ex) {
|
||||
log.error("Failed to save data to the login manager: ${}", ex);
|
||||
}
|
||||
}),
|
||||
|
||||
get: Task.async(function* () {
|
||||
// we need to suck some data from the .json file in the profile dir and
|
||||
// some other from the login manager.
|
||||
let data = yield this.jsonStorage.get();
|
||||
if (!data) {
|
||||
// no user logged in, nuke the storage data incase we couldn't remove
|
||||
// it previously and then we are done.
|
||||
yield this._clearLoginMgrData();
|
||||
return null;
|
||||
}
|
||||
|
||||
// if we have encryption keys it must have been saved before we
|
||||
// used the login manager, so re-save it.
|
||||
if (data.accountData.kA || data.accountData.kB || data.keyFetchToken) {
|
||||
// We need to migrate, but the MP might be locked (eg, on the first run
|
||||
// with this enabled, we will get here very soon after startup, so will
|
||||
// certainly be locked.) This means we can't actually store the data in
|
||||
// the login manager (and thus might lose it if we migrated now)
|
||||
// So if the MP is locked, we *don't* migrate, but still just return
|
||||
// the subset of data we now store in the JSON.
|
||||
// This will cause sync to notice the lack of keys, force an unlock then
|
||||
// re-fetch the account data to see if the keys are there. At *that*
|
||||
// point we will end up back here, but because the MP is now unlocked
|
||||
// we can actually perform the migration.
|
||||
if (!this._isLoggedIn) {
|
||||
// return the "safe" subset but leave the storage alone.
|
||||
log.info("account data needs migration to the login manager but the MP is locked.");
|
||||
let result = {
|
||||
version: data.version,
|
||||
accountData: {},
|
||||
};
|
||||
for (let fieldName of FXA_PWDMGR_PLAINTEXT_FIELDS) {
|
||||
result.accountData[fieldName] = data.accountData[fieldName];
|
||||
}
|
||||
return result;
|
||||
}
|
||||
// actually migrate - just calling .set() will split everything up.
|
||||
log.info("account data is being migrated to the login manager.");
|
||||
yield this.set(data);
|
||||
}
|
||||
|
||||
try { // Services.logins might be third-party and broken...
|
||||
// read the data from the login manager and merge it for return.
|
||||
yield Services.logins.initializationPromise;
|
||||
|
||||
if (!this._isLoggedIn) {
|
||||
log.info("returning partial account data as the login manager is locked.");
|
||||
return data;
|
||||
}
|
||||
|
||||
let logins = Services.logins.findLogins({}, FXA_PWDMGR_HOST, null, FXA_PWDMGR_REALM);
|
||||
if (logins.length == 0) {
|
||||
// This could happen if the MP was locked when we wrote the data.
|
||||
log.info("Can't find the rest of the credentials in the login manager");
|
||||
return data;
|
||||
}
|
||||
let login = logins[0];
|
||||
if (login.username == data.accountData.email) {
|
||||
let lmData = JSON.parse(login.password);
|
||||
if (lmData.version == data.version) {
|
||||
// Merge the login manager data
|
||||
copyObjectProperties(lmData.accountData, data.accountData);
|
||||
} else {
|
||||
log.info("version field in the login manager doesn't match - ignoring it");
|
||||
yield this._clearLoginMgrData();
|
||||
}
|
||||
} else {
|
||||
log.info("username in the login manager doesn't match - ignoring it");
|
||||
yield this._clearLoginMgrData();
|
||||
}
|
||||
} catch (ex) {
|
||||
log.error("Failed to get data from the login manager: ${}", ex);
|
||||
}
|
||||
return data;
|
||||
}),
|
||||
|
||||
}
|
||||
|
||||
// A getter for the instance to export
|
||||
XPCOMUtils.defineLazyGetter(this, "fxAccounts", function() {
|
||||
let a = new FxAccounts();
|
||||
|
@ -178,5 +178,18 @@ SERVER_ERRNO_TO_ERROR[ERRNO_INCORRECT_EMAIL_CASE] = ERROR_INCORRECT_EM
|
||||
SERVER_ERRNO_TO_ERROR[ERRNO_SERVICE_TEMP_UNAVAILABLE] = ERROR_SERVICE_TEMP_UNAVAILABLE;
|
||||
SERVER_ERRNO_TO_ERROR[ERRNO_UNKNOWN_ERROR] = ERROR_UNKNOWN;
|
||||
|
||||
// FxAccounts has the ability to "split" the credentials between a plain-text
|
||||
// JSON file in the profile dir and in the login manager.
|
||||
// These constants relate to that.
|
||||
|
||||
// The fields we save in the plaintext JSON.
|
||||
// See bug 1013064 comments 23-25 for why the sessionToken is "safe"
|
||||
this.FXA_PWDMGR_PLAINTEXT_FIELDS = ["email", "verified", "authAt",
|
||||
"sessionToken", "uid"];
|
||||
// The pseudo-host we use in the login manager
|
||||
this.FXA_PWDMGR_HOST = "chrome://FirefoxAccounts";
|
||||
// The realm we use in the login manager.
|
||||
this.FXA_PWDMGR_REALM = "Firefox Accounts credentials";
|
||||
|
||||
// Allow this file to be imported via Components.utils.import().
|
||||
this.EXPORTED_SYMBOLS = Object.keys(this);
|
||||
|
@ -10,11 +10,14 @@ TEST_DIRS += ['tests']
|
||||
|
||||
EXTRA_JS_MODULES += [
|
||||
'Credentials.jsm',
|
||||
'FxAccounts.jsm',
|
||||
'FxAccountsClient.jsm',
|
||||
'FxAccountsCommon.js'
|
||||
]
|
||||
|
||||
EXTRA_PP_JS_MODULES += [
|
||||
'FxAccounts.jsm',
|
||||
]
|
||||
|
||||
# For now, we will only be using the FxA manager in B2G.
|
||||
if CONFIG['MOZ_B2G']:
|
||||
EXTRA_JS_MODULES += ['FxAccountsManager.jsm']
|
||||
|
309
services/fxaccounts/tests/xpcshell/test_loginmgr_storage.js
Normal file
309
services/fxaccounts/tests/xpcshell/test_loginmgr_storage.js
Normal file
@ -0,0 +1,309 @@
|
||||
/* Any copyright is dedicated to the Public Domain.
|
||||
* http://creativecommons.org/publicdomain/zero/1.0/ */
|
||||
|
||||
"use strict";
|
||||
|
||||
// Tests for FxAccounts, storage and the master password.
|
||||
|
||||
// Stop us hitting the real auth server.
|
||||
Services.prefs.setCharPref("identity.fxaccounts.auth.uri", "http://localhost");
|
||||
|
||||
Cu.import("resource://gre/modules/Services.jsm");
|
||||
Cu.import("resource://gre/modules/FxAccounts.jsm");
|
||||
Cu.import("resource://gre/modules/FxAccountsClient.jsm");
|
||||
Cu.import("resource://gre/modules/FxAccountsCommon.js");
|
||||
Cu.import("resource://gre/modules/osfile.jsm");
|
||||
Cu.import("resource://services-common/utils.js");
|
||||
Cu.import("resource://gre/modules/FxAccountsCommon.js");
|
||||
|
||||
initTestLogging("Trace");
|
||||
// See verbose logging from FxAccounts.jsm
|
||||
Services.prefs.setCharPref("identity.fxaccounts.loglevel", "DEBUG");
|
||||
|
||||
function run_test() {
|
||||
run_next_test();
|
||||
}
|
||||
|
||||
function getLoginMgrData() {
|
||||
let logins = Services.logins.findLogins({}, FXA_PWDMGR_HOST, null, FXA_PWDMGR_REALM);
|
||||
if (logins.length == 0) {
|
||||
return null;
|
||||
}
|
||||
Assert.equal(logins.length, 1, "only 1 login available");
|
||||
return logins[0];
|
||||
}
|
||||
|
||||
add_task(function test_simple() {
|
||||
let fxa = new FxAccounts({});
|
||||
|
||||
let creds = {
|
||||
email: "test@example.com",
|
||||
sessionToken: "sessionToken",
|
||||
kA: "the kA value",
|
||||
kB: "the kB value",
|
||||
verified: true
|
||||
};
|
||||
yield fxa.setSignedInUser(creds);
|
||||
|
||||
// This should have stored stuff in both the .json file in the profile
|
||||
// dir, and the login dir.
|
||||
let path = OS.Path.join(OS.Constants.Path.profileDir, "signedInUser.json");
|
||||
let data = yield CommonUtils.readJSON(path);
|
||||
|
||||
Assert.strictEqual(data.accountData.email, creds.email, "correct email in the clear text");
|
||||
Assert.strictEqual(data.accountData.sessionToken, creds.sessionToken, "correct sessionToken in the clear text");
|
||||
Assert.strictEqual(data.accountData.verified, creds.verified, "correct verified flag");
|
||||
|
||||
Assert.ok(!("kA" in data.accountData), "kA not stored in clear text");
|
||||
Assert.ok(!("kB" in data.accountData), "kB not stored in clear text");
|
||||
|
||||
let login = getLoginMgrData();
|
||||
Assert.strictEqual(login.username, creds.email, "email matches");
|
||||
let loginData = JSON.parse(login.password);
|
||||
Assert.strictEqual(loginData.version, data.version, "same version flag in both places");
|
||||
Assert.strictEqual(loginData.accountData.kA, creds.kA, "correct kA in the login mgr");
|
||||
Assert.strictEqual(loginData.accountData.kB, creds.kB, "correct kB in the login mgr");
|
||||
|
||||
Assert.ok(!("email" in loginData), "email not stored in the login mgr json");
|
||||
Assert.ok(!("sessionToken" in loginData), "sessionToken not stored in the login mgr json");
|
||||
Assert.ok(!("verified" in loginData), "verified not stored in the login mgr json");
|
||||
|
||||
yield fxa.signOut(/* localOnly = */ true);
|
||||
Assert.strictEqual(getLoginMgrData(), null, "login mgr data deleted on logout");
|
||||
});
|
||||
|
||||
add_task(function test_MPLocked() {
|
||||
let fxa = new FxAccounts({});
|
||||
|
||||
let creds = {
|
||||
email: "test@example.com",
|
||||
sessionToken: "sessionToken",
|
||||
kA: "the kA value",
|
||||
kB: "the kB value",
|
||||
verified: true
|
||||
};
|
||||
|
||||
// tell the storage that the MP is locked.
|
||||
fxa.internal.signedInUserStorage.__defineGetter__("_isLoggedIn", function() false);
|
||||
yield fxa.setSignedInUser(creds);
|
||||
|
||||
// This should have stored stuff in the .json, and the login manager stuff
|
||||
// will not exist.
|
||||
let path = OS.Path.join(OS.Constants.Path.profileDir, "signedInUser.json");
|
||||
let data = yield CommonUtils.readJSON(path);
|
||||
|
||||
Assert.strictEqual(data.accountData.email, creds.email, "correct email in the clear text");
|
||||
Assert.strictEqual(data.accountData.sessionToken, creds.sessionToken, "correct sessionToken in the clear text");
|
||||
Assert.strictEqual(data.accountData.verified, creds.verified, "correct verified flag");
|
||||
|
||||
Assert.ok(!("kA" in data.accountData), "kA not stored in clear text");
|
||||
Assert.ok(!("kB" in data.accountData), "kB not stored in clear text");
|
||||
|
||||
Assert.strictEqual(getLoginMgrData(), null, "login mgr data doesn't exist");
|
||||
yield fxa.signOut(/* localOnly = */ true)
|
||||
});
|
||||
|
||||
add_task(function test_migrationMPUnlocked() {
|
||||
// first manually save a signedInUser.json to simulate a first-run with
|
||||
// pre-migrated data.
|
||||
let fxa = new FxAccounts({});
|
||||
|
||||
let creds = {
|
||||
email: "test@example.com",
|
||||
sessionToken: "sessionToken",
|
||||
kA: "the kA value",
|
||||
kB: "the kB value",
|
||||
verified: true
|
||||
};
|
||||
let toWrite = {
|
||||
version: fxa.version,
|
||||
accountData: creds,
|
||||
}
|
||||
|
||||
let path = OS.Path.join(OS.Constants.Path.profileDir, "signedInUser.json");
|
||||
yield CommonUtils.writeJSON(toWrite, path);
|
||||
|
||||
// now load it - it should migrate.
|
||||
let data = yield fxa.getSignedInUser();
|
||||
Assert.deepEqual(data, creds, "we got all the data back");
|
||||
|
||||
// and verify it was actually migrated - re-read signedInUser back.
|
||||
let data = yield CommonUtils.readJSON(path);
|
||||
|
||||
Assert.strictEqual(data.accountData.email, creds.email, "correct email in the clear text");
|
||||
Assert.strictEqual(data.accountData.sessionToken, creds.sessionToken, "correct sessionToken in the clear text");
|
||||
Assert.strictEqual(data.accountData.verified, creds.verified, "correct verified flag");
|
||||
|
||||
Assert.ok(!("kA" in data.accountData), "kA not stored in clear text");
|
||||
Assert.ok(!("kB" in data.accountData), "kB not stored in clear text");
|
||||
|
||||
let login = getLoginMgrData();
|
||||
Assert.strictEqual(login.username, creds.email, "email matches");
|
||||
let loginData = JSON.parse(login.password);
|
||||
Assert.strictEqual(loginData.version, data.version, "same version flag in both places");
|
||||
Assert.strictEqual(loginData.accountData.kA, creds.kA, "correct kA in the login mgr");
|
||||
Assert.strictEqual(loginData.accountData.kB, creds.kB, "correct kB in the login mgr");
|
||||
|
||||
Assert.ok(!("email" in loginData), "email not stored in the login mgr json");
|
||||
Assert.ok(!("sessionToken" in loginData), "sessionToken not stored in the login mgr json");
|
||||
Assert.ok(!("verified" in loginData), "verified not stored in the login mgr json");
|
||||
|
||||
yield fxa.signOut(/* localOnly = */ true);
|
||||
Assert.strictEqual(getLoginMgrData(), null, "login mgr data deleted on logout");
|
||||
});
|
||||
|
||||
add_task(function test_migrationMPLocked() {
|
||||
// first manually save a signedInUser.json to simulate a first-run with
|
||||
// pre-migrated data.
|
||||
let fxa = new FxAccounts({});
|
||||
|
||||
let creds = {
|
||||
email: "test@example.com",
|
||||
sessionToken: "sessionToken",
|
||||
kA: "the kA value",
|
||||
kB: "the kB value",
|
||||
verified: true
|
||||
};
|
||||
let toWrite = {
|
||||
version: fxa.version,
|
||||
accountData: creds,
|
||||
}
|
||||
|
||||
let path = OS.Path.join(OS.Constants.Path.profileDir, "signedInUser.json");
|
||||
yield CommonUtils.writeJSON(toWrite, path);
|
||||
|
||||
// pretend the MP is locked.
|
||||
fxa.internal.signedInUserStorage.__defineGetter__("_isLoggedIn", function() false);
|
||||
|
||||
// now load it - it should *not* migrate, but should only give the JSON-safe
|
||||
// data back.
|
||||
let data = yield fxa.getSignedInUser();
|
||||
Assert.ok(!data.kA);
|
||||
Assert.ok(!data.kB);
|
||||
|
||||
// and verify the data on disk wan't migrated.
|
||||
data = yield CommonUtils.readJSON(path);
|
||||
Assert.deepEqual(data, toWrite);
|
||||
|
||||
// Now "unlock" and re-ask for the signedInUser - it should migrate.
|
||||
fxa.internal.signedInUserStorage.__defineGetter__("_isLoggedIn", function() true);
|
||||
data = yield fxa.getSignedInUser();
|
||||
// this time we should have got all the data, not just the JSON-safe fields.
|
||||
Assert.strictEqual(data.kA, creds.kA);
|
||||
Assert.strictEqual(data.kB, creds.kB);
|
||||
|
||||
// And verify the data in the JSON was migrated
|
||||
data = yield CommonUtils.readJSON(path);
|
||||
Assert.strictEqual(data.accountData.email, creds.email, "correct email in the clear text");
|
||||
Assert.strictEqual(data.accountData.sessionToken, creds.sessionToken, "correct sessionToken in the clear text");
|
||||
Assert.strictEqual(data.accountData.verified, creds.verified, "correct verified flag");
|
||||
|
||||
Assert.ok(!("kA" in data.accountData), "kA not stored in clear text");
|
||||
Assert.ok(!("kB" in data.accountData), "kB not stored in clear text");
|
||||
|
||||
let login = getLoginMgrData();
|
||||
Assert.strictEqual(login.username, creds.email, "email matches");
|
||||
let loginData = JSON.parse(login.password);
|
||||
Assert.strictEqual(loginData.version, data.version, "same version flag in both places");
|
||||
Assert.strictEqual(loginData.accountData.kA, creds.kA, "correct kA in the login mgr");
|
||||
Assert.strictEqual(loginData.accountData.kB, creds.kB, "correct kB in the login mgr");
|
||||
|
||||
Assert.ok(!("email" in loginData), "email not stored in the login mgr json");
|
||||
Assert.ok(!("sessionToken" in loginData), "sessionToken not stored in the login mgr json");
|
||||
Assert.ok(!("verified" in loginData), "verified not stored in the login mgr json");
|
||||
|
||||
yield fxa.signOut(/* localOnly = */ true);
|
||||
Assert.strictEqual(getLoginMgrData(), null, "login mgr data deleted on logout");
|
||||
});
|
||||
|
||||
add_task(function test_consistentWithMPEdgeCases() {
|
||||
let fxa = new FxAccounts({});
|
||||
|
||||
let creds1 = {
|
||||
email: "test@example.com",
|
||||
sessionToken: "sessionToken",
|
||||
kA: "the kA value",
|
||||
kB: "the kB value",
|
||||
verified: true
|
||||
};
|
||||
|
||||
let creds2 = {
|
||||
email: "test2@example.com",
|
||||
sessionToken: "sessionToken2",
|
||||
kA: "the kA value2",
|
||||
kB: "the kB value2",
|
||||
verified: false,
|
||||
};
|
||||
|
||||
// Log a user in while MP is unlocked.
|
||||
yield fxa.setSignedInUser(creds1);
|
||||
|
||||
// tell the storage that the MP is locked - this will prevent logout from
|
||||
// being able to clear the data.
|
||||
fxa.internal.signedInUserStorage.__defineGetter__("_isLoggedIn", function() false);
|
||||
|
||||
// now set the second credentials.
|
||||
yield fxa.setSignedInUser(creds2);
|
||||
|
||||
// We should still have creds1 data in the login manager.
|
||||
let login = getLoginMgrData();
|
||||
Assert.strictEqual(login.username, creds1.email);
|
||||
// and that we do have the first kA in the login manager.
|
||||
Assert.strictEqual(JSON.parse(login.password).accountData.kA, creds1.kA,
|
||||
"stale data still in login mgr");
|
||||
|
||||
// Make a new FxA instance (otherwise the values in memory will be used.)
|
||||
// Because we haven't overridden _isLoggedIn for this new instance it will
|
||||
// treat the MP as unlocked.
|
||||
let fxa = new FxAccounts({});
|
||||
|
||||
let accountData = yield fxa.getSignedInUser();
|
||||
Assert.strictEqual(accountData.email, creds2.email);
|
||||
// we should have no kA at all.
|
||||
Assert.strictEqual(accountData.kA, undefined, "stale kA wasn't used");
|
||||
yield fxa.signOut(/* localOnly = */ true)
|
||||
});
|
||||
|
||||
add_task(function test_migration() {
|
||||
// manually write out the full creds data to the JSON - this will look like
|
||||
// old data that needs migration.
|
||||
let creds = {
|
||||
email: "test@example.com",
|
||||
sessionToken: "sessionToken",
|
||||
kA: "the kA value",
|
||||
kB: "the kB value",
|
||||
verified: true
|
||||
};
|
||||
let toWrite = {
|
||||
version: 1,
|
||||
accountData: creds,
|
||||
};
|
||||
|
||||
let path = OS.Path.join(OS.Constants.Path.profileDir, "signedInUser.json");
|
||||
let data = yield CommonUtils.writeJSON(toWrite, path);
|
||||
|
||||
// Create an FxA object - and tell it to load the data.
|
||||
let fxa = new FxAccounts({});
|
||||
data = yield fxa.getSignedInUser();
|
||||
|
||||
Assert.deepEqual(data, creds, "we should have everything available");
|
||||
|
||||
// now sniff the data on disk - it should have been magically migrated.
|
||||
data = yield CommonUtils.readJSON(path);
|
||||
|
||||
Assert.strictEqual(data.accountData.email, creds.email, "correct email in the clear text");
|
||||
Assert.strictEqual(data.accountData.sessionToken, creds.sessionToken, "correct sessionToken in the clear text");
|
||||
Assert.strictEqual(data.accountData.verified, creds.verified, "correct verified flag");
|
||||
|
||||
Assert.ok(!("kA" in data.accountData), "kA not stored in clear text");
|
||||
Assert.ok(!("kB" in data.accountData), "kB not stored in clear text");
|
||||
|
||||
// and it should magically be in the login manager.
|
||||
let login = getLoginMgrData();
|
||||
Assert.strictEqual(login.username, creds.email);
|
||||
// and that we do have the first kA in the login manager.
|
||||
Assert.strictEqual(JSON.parse(login.password).accountData.kA, creds.kA,
|
||||
"kA was migrated");
|
||||
|
||||
yield fxa.signOut(/* localOnly = */ true)
|
||||
});
|
@ -5,6 +5,8 @@ tail =
|
||||
[test_accounts.js]
|
||||
[test_client.js]
|
||||
[test_credentials.js]
|
||||
[test_loginmgr_storage.js]
|
||||
skip-if = appname == 'b2g' # login manager storage only used on desktop.
|
||||
[test_manager.js]
|
||||
run-if = appname == 'b2g'
|
||||
reason = FxAccountsManager is only available for B2G for now
|
||||
|
@ -108,16 +108,6 @@ WeaveService.prototype = {
|
||||
}
|
||||
},
|
||||
|
||||
/**
|
||||
* Returns whether the password engine is allowed. We explicitly disallow
|
||||
* the password engine when a master password is used to ensure those can't
|
||||
* be accessed without the master key.
|
||||
*/
|
||||
get allowPasswordsEngine() {
|
||||
// This doesn't apply to old-style sync, it's only an issue for FxA.
|
||||
return !this.fxAccountsEnabled || !Utils.mpEnabled();
|
||||
},
|
||||
|
||||
/**
|
||||
* Whether Sync appears to be enabled.
|
||||
*
|
||||
|
@ -399,7 +399,10 @@ this.BrowserIDManager.prototype = {
|
||||
* The current state of the auth credentials.
|
||||
*
|
||||
* This essentially validates that enough credentials are available to use
|
||||
* Sync.
|
||||
* Sync, although it effectively ignores the state of the master-password -
|
||||
* if that's locked and that's the only problem we can see, say everything
|
||||
* is OK - unlockAndVerifyAuthState will be used to perform the unlock
|
||||
* and re-verification if necessary.
|
||||
*/
|
||||
get currentAuthState() {
|
||||
if (this._authFailureReason) {
|
||||
@ -416,14 +419,53 @@ this.BrowserIDManager.prototype = {
|
||||
|
||||
// No need to check this.syncKey as our getter for that attribute
|
||||
// uses this.syncKeyBundle
|
||||
// If bundle creation started, but failed.
|
||||
if (this._shouldHaveSyncKeyBundle && !this.syncKeyBundle) {
|
||||
return LOGIN_FAILED_NO_PASSPHRASE;
|
||||
// If bundle creation started, but failed due to any reason other than
|
||||
// the MP being locked...
|
||||
if (this._shouldHaveSyncKeyBundle && !this.syncKeyBundle && !Utils.mpLocked()) {
|
||||
// Return a state that says a re-auth is necessary so we can get keys.
|
||||
return LOGIN_FAILED_LOGIN_REJECTED;
|
||||
}
|
||||
|
||||
return STATUS_OK;
|
||||
},
|
||||
|
||||
// Do we currently have keys, or do we have enough that we should be able
|
||||
// to successfully fetch them?
|
||||
_canFetchKeys: function() {
|
||||
let userData = this._signedInUser;
|
||||
// a keyFetchToken means we can almost certainly grab them.
|
||||
// kA and kB means we already have them.
|
||||
return userData && (userData.keyFetchToken || (userData.kA && userData.kB));
|
||||
},
|
||||
|
||||
/**
|
||||
* Verify the current auth state, unlocking the master-password if necessary.
|
||||
*
|
||||
* Returns a promise that resolves with the current auth state after
|
||||
* attempting to unlock.
|
||||
*/
|
||||
unlockAndVerifyAuthState: function() {
|
||||
if (this._canFetchKeys()) {
|
||||
return Promise.resolve(STATUS_OK);
|
||||
}
|
||||
// so no keys - ensure MP unlocked.
|
||||
if (!Utils.ensureMPUnlocked()) {
|
||||
// user declined to unlock, so we don't know if they are stored there.
|
||||
return Promise.resolve(MASTER_PASSWORD_LOCKED);
|
||||
}
|
||||
// now we are unlocked we must re-fetch the user data as we may now have
|
||||
// the details that were previously locked away.
|
||||
return this._fxaService.getSignedInUser().then(
|
||||
accountData => {
|
||||
this._updateSignedInUser(accountData);
|
||||
// If we still can't get keys it probably means the user authenticated
|
||||
// without unlocking the MP or cleared the saved logins, so we've now
|
||||
// lost them - the user will need to reauth before continuing.
|
||||
return this._canFetchKeys() ? STATUS_OK : LOGIN_FAILED_LOGIN_REJECTED;
|
||||
}
|
||||
);
|
||||
},
|
||||
|
||||
/**
|
||||
* Do we have a non-null, not yet expired token for the user currently
|
||||
* signed in?
|
||||
@ -449,6 +491,14 @@ this.BrowserIDManager.prototype = {
|
||||
let fxa = this._fxaService;
|
||||
let userData = this._signedInUser;
|
||||
|
||||
// We need kA and kB for things to work. If we don't have them, just
|
||||
// return null for the token - sync calling unlockAndVerifyAuthState()
|
||||
// before actually syncing will setup the error states if necessary.
|
||||
if (!this._canFetchKeys()) {
|
||||
log.info("_fetchTokenForUser has no keys to use.");
|
||||
return null;
|
||||
}
|
||||
|
||||
log.info("Fetching assertion and token from: " + tokenServerURI);
|
||||
|
||||
let maybeFetchKeys = () => {
|
||||
@ -524,7 +574,8 @@ this.BrowserIDManager.prototype = {
|
||||
// set it to the "fatal" LOGIN_FAILED_LOGIN_REJECTED reason.
|
||||
this._authFailureReason = LOGIN_FAILED_LOGIN_REJECTED;
|
||||
} else {
|
||||
this._log.error("Non-authentication error in _fetchTokenForUser: " + err.message);
|
||||
this._log.error("Non-authentication error in _fetchTokenForUser: "
|
||||
+ (err.message || err));
|
||||
// for now assume it is just a transient network related problem.
|
||||
this._authFailureReason = LOGIN_FAILED_NETWORK_ERROR;
|
||||
}
|
||||
|
@ -36,28 +36,6 @@ PasswordEngine.prototype = {
|
||||
_recordObj: LoginRec,
|
||||
applyIncomingBatchSize: PASSWORDS_STORE_BATCH_SIZE,
|
||||
|
||||
get isAllowed() {
|
||||
return Cc["@mozilla.org/weave/service;1"]
|
||||
.getService(Ci.nsISupports)
|
||||
.wrappedJSObject
|
||||
.allowPasswordsEngine;
|
||||
},
|
||||
|
||||
get enabled() {
|
||||
// If we are disabled due to !isAllowed(), we must take care to ensure the
|
||||
// engine has actually had the enabled setter called which reflects this state.
|
||||
let prefVal = SyncEngine.prototype.__lookupGetter__("enabled").call(this);
|
||||
let newVal = this.isAllowed && prefVal;
|
||||
if (newVal != prefVal) {
|
||||
this.enabled = newVal;
|
||||
}
|
||||
return newVal;
|
||||
},
|
||||
|
||||
set enabled(val) {
|
||||
SyncEngine.prototype.__lookupSetter__("enabled").call(this, this.isAllowed && val);
|
||||
},
|
||||
|
||||
_syncFinish: function _syncFinish() {
|
||||
SyncEngine.prototype._syncFinish.call(this);
|
||||
|
||||
|
@ -377,6 +377,25 @@ IdentityManager.prototype = {
|
||||
return STATUS_OK;
|
||||
},
|
||||
|
||||
/**
|
||||
* Verify the current auth state, unlocking the master-password if necessary.
|
||||
*
|
||||
* Returns a promise that resolves with the current auth state after
|
||||
* attempting to unlock.
|
||||
*/
|
||||
unlockAndVerifyAuthState: function() {
|
||||
// Try to fetch the passphrase - this will prompt for MP unlock as a
|
||||
// side-effect...
|
||||
try {
|
||||
this.syncKey;
|
||||
} catch (ex) {
|
||||
this._log.debug("Fetching passphrase threw " + ex +
|
||||
"; assuming master password locked.");
|
||||
return Promise.resolve(MASTER_PASSWORD_LOCKED);
|
||||
}
|
||||
return Promise.resolve(STATUS_OK);
|
||||
},
|
||||
|
||||
/**
|
||||
* Persist credentials to password store.
|
||||
*
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user