Merge inbound to m-c. a=merge

CLOSED TREE
This commit is contained in:
Ryan VanderMeulen 2015-02-12 17:53:41 -05:00
commit 0df0305fae
62 changed files with 1414 additions and 303 deletions

View File

@ -126,6 +126,9 @@ support-files =
test_bug_1010953_cspro.html
test_bug1045902_console_csp_ignore_reflected_xss_message.html^headers^
test_bug1045902_console_csp_ignore_reflected_xss_message.html
test_bug1092055_shouldwarn.js^headers^
test_bug1092055_shouldwarn.js
test_bug1092055_shouldwarn.html
[browser_bug1045902_console_csp_ignore_reflected_xss_message.js]
[browser_bug664688_sandbox_update_after_navigation.js]
@ -301,6 +304,8 @@ skip-if = e10s # Bug 1042253 - webconsole e10s tests (Linux debug intermittent)
[browser_webconsole_bug_1010953_cspro.js]
[browser_webconsole_certificate_messages.js]
skip-if = e10s # Bug 1042253 - webconsole tests disabled with e10s
[browser_webconsole_show_subresource_security_errors.js]
skip-if = e10s # Bug 1042253 - webconsole tests disabled with e10s
[browser_webconsole_cached_autocomplete.js]
[browser_webconsole_change_font_size.js]
[browser_webconsole_chrome.js]

View File

@ -0,0 +1,30 @@
/* vim:set ts=2 sw=2 sts=2 et: */
/*
* Any copyright is dedicated to the Public Domain.
* http://creativecommons.org/publicdomain/zero/1.0/
*/
// Ensure non-toplevel security errors are displayed
const TEST_URI = "data:text/html;charset=utf8,Web Console subresource STS warning test";
const TEST_DOC = "https://example.com/browser/browser/devtools/webconsole/test/test_bug1092055_shouldwarn.html";
const SAMPLE_MSG = 'invalid Strict-Transport-Security header'
let test = asyncTest(function* () {
let { browser } = yield loadTab(TEST_URI);
let hud = yield openConsole();
hud.jsterm.clearOutput();
let loaded = loadBrowser(browser);
content.location = TEST_DOC;
yield loaded;
yield waitForSuccess({
name: "Subresource STS warning displayed successfully",
validator: function() {
return hud.outputNode.textContent.indexOf(SAMPLE_MSG) > -1;
}
});
});

View File

@ -7,6 +7,8 @@
<head>
<meta charset="utf8">
<title>Security warning test - no violations</title>
<!-- ensure no subresource errors so window re-use doesn't cause failures -->
<link rel="icon" href="data:;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAIAAACQd1PeAAAADElEQVQI12P4//8/AAX+Av7czFnnAAAAAElFTkSuQmCC">
<script>
console.log("If you haven't seen ssl warnings yet, you won't");
</script>

View File

@ -0,0 +1,15 @@
<!DOCTYPE HTML>
<html>
<head>
<meta charset="UTF-8">
<title>Bug 1092055 - Log console messages for non-top-level security errors</title>
<script src="test_bug1092055_shouldwarn.js"></script>
<!--
Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/
-->
</head>
<body>
Bug 1092055 - Log console messages for non-top-level security errors
</body>
</html>

View File

@ -0,0 +1,2 @@
// It doesn't matter what this script does, but the broken HSTS header sent
// with it should result in warnings in the webconsole

View File

@ -0,0 +1 @@
Strict-Transport-Security: some complete nonsense

View File

@ -49,7 +49,7 @@ gyp_vars = {
'use_temporal_layers': 0,
# Creates AEC internal sample dump files in current directory
'aec_debug_dump': 0,
'aec_debug_dump': 1,
# Enable and force use of hardware AEC
'hardware_aec_ns': 1 if CONFIG['MOZ_WEBRTC_HARDWARE_AEC_NS'] else 0,

View File

@ -58,6 +58,7 @@ RejectPromises(const uint32_t& aKey,
void* aClosure)
{
aPromise->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR);
((MediaKeys*)aClosure)->Release();
return PL_DHASH_NEXT;
}
@ -82,7 +83,6 @@ CloseSessions(const nsAString& aKey,
void* aClosure)
{
aSession->OnClosed();
((MediaKeys*)aClosure)->Release();
return PL_DHASH_NEXT;
}

View File

@ -698,7 +698,8 @@ nsCORSListenerProxy::AsyncOnChannelRedirect(nsIChannel *aOldChannel,
nsIAsyncVerifyRedirectCallback *cb)
{
nsresult rv;
if (!NS_IsInternalSameURIRedirect(aOldChannel, aNewChannel, aFlags)) {
if (!NS_IsInternalSameURIRedirect(aOldChannel, aNewChannel, aFlags) &&
!NS_IsHSTSUpgradeRedirect(aOldChannel, aNewChannel, aFlags)) {
rv = CheckRequestApproved(aOldChannel);
if (NS_FAILED(rv)) {
if (sPreflightCache) {
@ -1110,7 +1111,8 @@ nsCORSPreflightListener::AsyncOnChannelRedirect(nsIChannel *aOldChannel,
nsIAsyncVerifyRedirectCallback *callback)
{
// Only internal redirects allowed for now.
if (!NS_IsInternalSameURIRedirect(aOldChannel, aNewChannel, aFlags))
if (!NS_IsInternalSameURIRedirect(aOldChannel, aNewChannel, aFlags) &&
!NS_IsHSTSUpgradeRedirect(aOldChannel, aNewChannel, aFlags))
return NS_ERROR_DOM_BAD_URI;
callback->OnRedirectVerifyCallback(NS_OK);

View File

@ -135,7 +135,8 @@ public:
principal = doc->NodePrincipal();
} else {
MOZ_ASSERT_IF(!mWorkerPrivate->GetParent(), mWorkerPrivate->IsChromeWorker());
// We use the worker Principal in case this is a SharedWorker, a
// ChromeWorker or a ServiceWorker.
principal = mWorkerPrivate->GetPrincipal();
}
@ -191,7 +192,8 @@ public:
principal = doc->NodePrincipal();
} else {
MOZ_ASSERT_IF(!mWorkerPrivate->GetParent(), mWorkerPrivate->IsChromeWorker());
// We use the worker Principal in case this is a SharedWorker, a
// ChromeWorker or a ServiceWorker.
principal = mWorkerPrivate->GetPrincipal();
}

View File

@ -0,0 +1,5 @@
onconnect = function(evt) {
var blob = new Blob(['123'], { type: 'text/plain' });
var url = URL.createObjectURL(blob);
evt.ports[0].postMessage('alive \\o/');
}

View File

@ -98,6 +98,7 @@ support-files =
webSocket_sharedWorker.js
bug1104064_worker.js
worker_consoleAndBlobs.js
bug1132395_sharedWorker.js
[test_404.html]
[test_atob.html]
@ -199,3 +200,4 @@ skip-if = buildapp == 'b2g' || toolkit == 'android' || e10s #bug 982828
[test_websocket_pref.html]
[test_bug1104064.html]
[test_consoleAndBlobs.html]
[test_bug1132395.html]

View File

@ -0,0 +1,29 @@
<!--
Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/
-->
<!DOCTYPE HTML>
<html>
<head>
<title>Test for 1132395</title>
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
</head>
<body>
<script class="testbody" type="text/javascript">
SpecialPowers.pushPrefEnv({ set: [["dom.workers.sharedWorkers.enabled", true]] }, function() {
var sw = new SharedWorker('bug1132395_sharedWorker.js');
sw.port.onmessage = function(event) {
ok(true, "We didn't crash.");
SimpleTest.finish();
}
});
SimpleTest.waitForExplicitFinish();
</script>
</pre>
</body>
</html>

View File

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -25,13 +25,13 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
// This file is an internal atomic implementation, use base/atomicops.h instead.
//
// LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
#ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
namespace base {
namespace subtle {
@ -61,7 +61,7 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
"2:\n"
".set pop\n"
: "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
: "Ir" (old_value), "r" (new_value), "m" (*ptr)
: "r" (old_value), "r" (new_value), "m" (*ptr)
: "memory");
return prev;
}
@ -74,7 +74,7 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"ll %1, %2\n" // old = *ptr
"ll %1, %4\n" // old = *ptr
"move %0, %3\n" // temp = new_value
"sc %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
@ -96,7 +96,7 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"ll %0, %2\n" // temp = *ptr
"ll %0, %4\n" // temp = *ptr
"addu %1, %0, %3\n" // temp2 = temp + increment
"sc %1, %2\n" // *ptr = temp2 (with atomic check)
"beqz %1, 1b\n" // start again on atomic error
@ -111,9 +111,9 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
ATOMICOPS_COMPILER_BARRIER();
MemoryBarrier();
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
ATOMICOPS_COMPILER_BARRIER();
MemoryBarrier();
return res;
}
@ -126,19 +126,16 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
ATOMICOPS_COMPILER_BARRIER();
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
ATOMICOPS_COMPILER_BARRIER();
MemoryBarrier();
return res;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
ATOMICOPS_COMPILER_BARRIER();
Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
ATOMICOPS_COMPILER_BARRIER();
return res;
MemoryBarrier();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
@ -174,9 +171,133 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
return *ptr;
}
} // namespace subtle
} // namespace base
#if defined(__LP64__)
// 64-bit versions of the atomic ops.
#undef ATOMICOPS_COMPILER_BARRIER
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 prev, tmp;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"lld %0, %5\n" // prev = *ptr
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2
"move %2, %4\n" // tmp = new_value
"scd %2, %1\n" // *ptr = tmp (with atomic check)
"beqz %2, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
"2:\n"
".set pop\n"
: "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
: "r" (old_value), "r" (new_value), "m" (*ptr)
: "memory");
return prev;
}
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
Atomic64 temp, old;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"lld %1, %4\n" // old = *ptr
"move %0, %3\n" // temp = new_value
"scd %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
".set pop\n"
: "=&r" (temp), "=&r" (old), "=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory");
return old;
}
// Atomically increment *ptr by "increment". Returns the new value of
// *ptr with the increment applied. This routine implies no memory barriers.
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
Atomic64 temp, temp2;
__asm__ __volatile__(".set push\n"
".set noreorder\n"
"1:\n"
"lld %0, %4\n" // temp = *ptr
"daddu %1, %0, %3\n" // temp2 = temp + increment
"scd %1, %2\n" // *ptr = temp2 (with atomic check)
"beqz %1, 1b\n" // start again on atomic error
"daddu %1, %0, %3\n" // temp2 = temp + increment
".set pop\n"
: "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
: "Ir" (increment), "m" (*ptr)
: "memory");
// temp2 now holds the final value.
return temp2;
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
MemoryBarrier();
Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
MemoryBarrier();
return res;
}
// "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
// access.
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
MemoryBarrier();
return res;
}
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
MemoryBarrier();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
}
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
return *ptr;
}
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
MemoryBarrier();
return *ptr;
}
#endif
} // namespace base::subtle
} // namespace base
#endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_

View File

@ -87,6 +87,9 @@
#elif defined(__sparc__)
#define ARCH_CPU_SPARC 1
#define ARCH_CPU_32_BITS 1
#elif defined(__mips64) && defined(__LP64__)
#define ARCH_CPU_MIPS 1
#define ARCH_CPU_64_BITS 1
#elif defined(__mips__)
#define ARCH_CPU_MIPS 1
#define ARCH_CPU_32_BITS 1

View File

@ -85,6 +85,11 @@ following accessor properties from its prototype:
at a time. This accessor can be both fetched and stored to. Its default
value is `5000`.
<code id='allocationsLogOverflowed'>allocationsLogOverflowed</a>
: Returns `true` if there have been more than
[`maxAllocationsLogLength`][#max-alloc-log] allocations since the last time
[`drainAllocationsLog`][#drain-alloc-log] was called and some data has been
lost. Returns `false` otherwise.
Function Properties of the `Debugger.Memory.prototype` Object
-------------------------------------------------------------

View File

@ -0,0 +1,24 @@
// Test basic usage of `Debugger.Memory.prototype.allocationsLogOverflowed`.
const root = newGlobal();
const dbg = new Debugger(root);
dbg.memory.trackingAllocationSites = true;
dbg.memory.maxAllocationsLogLength = 1;
root.eval("(" + function immediate() {
// Allocate more than the max log length.
this.objs = [{}, {}, {}, {}];
} + "());");
// The log should have overflowed.
assertEq(dbg.memory.allocationsLogOverflowed, true);
// Once drained, the flag should be reset.
const allocs = dbg.memory.drainAllocationsLog();
assertEq(dbg.memory.allocationsLogOverflowed, false);
// If we keep allocations under the max log length, then we shouldn't have
// overflowed.
dbg.memory.maxAllocationsLogLength = 10000;
root.eval("this.objs = [{}, {}, {}, {}];");
assertEq(dbg.memory.allocationsLogOverflowed, false);

View File

@ -0,0 +1,12 @@
// |jit-test| error:foo
var g = newGlobal();
g.parent = this;
g.eval("(" + function() {
var dbg = new Debugger(parent);
count = 0;
dbg.onExceptionUnwind = function(frame) {
frame.onPop = function() { if (count++ < 30) frame.eval("x = 3"); };
};
} + ")()");
Object.defineProperty(this, "x", {set: [].map});
throw "foo";

View File

@ -1,20 +0,0 @@
function roundf(y) {
return Math.round(Math.fround(y));
}
var x = -1;
assertEq(roundf(x), x);
assertEq(roundf(x), x);
var x = -2;
assertEq(roundf(x), x);
assertEq(roundf(x), x);
var x = -1024;
assertEq(roundf(x), x);
var x = -14680050;
assertEq(roundf(x), Math.fround(x));
var x = -8388610;
assertEq(roundf(x), Math.fround(x));

View File

@ -1,7 +0,0 @@
function f(x) {
var y = Math.fround(x);
assertEq(y, Math.pow(y, 1));
}
f(0);
f(2147483647);

View File

@ -0,0 +1,103 @@
// Bug 1073910
(function() {
function roundf(y) {
return Math.round(Math.fround(y));
}
var x = -1;
assertEq(roundf(x), x);
assertEq(roundf(x), x);
var x = -2;
assertEq(roundf(x), x);
assertEq(roundf(x), x);
var x = -1024;
assertEq(roundf(x), x);
var x = -14680050;
assertEq(roundf(x), Math.fround(x));
var x = -8388610;
assertEq(roundf(x), Math.fround(x));
})();
// Bug 1000606
(function() {
function f() {
var d = Math.fround(0.4999999701976776);
return Math.round(d);
}
assertEq(f(), f());
function g() {
var c = Math.fround(8886111);
return Math.round(c);
}
assertEq(g(), g());
})();
// Bug 1124485
(function() {
function h(x) {
var y = Math.fround(x);
assertEq(y, Math.pow(y, 1));
}
h(0);
h(2147483647);
})();
// Bug 1122344
(function() {
function f() {
return Math.round(Math.fround(-13527757));
};
assertEq(f(), f());
})();
(function() {
// Test values around -0.5 and +0.5
var f32 = new Float32Array(1);
var i32 = new Int32Array(f32.buffer);
function round(x) { return Math.round(x); }
function roundf(x) { return Math.round(Math.fround(x)); }
// Warm up
round(2.5);
round(3.5);
roundf(2.5);
roundf(3.5);
f32[0] = 0.5;
i32[0] += 1;
print('0.5+e =', f32[0]);
var x = f32[0];
assertEq(round(x), 1);
assertEq(roundf(x), 1);
f32[0] = 0.5;
i32[0] -= 1;
print('0.5-e =', f32[0]);
var x = f32[0];
assertEq(round(x), 0);
assertEq(roundf(x), 0);
f32[0] = -0.5;
i32[0] += 1;
print('-0.5-e =', f32[0]);
var x = f32[0];
assertEq(round(x), -1);
assertEq(roundf(x), -1);
f32[0] = -0.5;
i32[0] -= 1;
print('-0.5+e =', f32[0]);
var x = f32[0];
assertEq(round(x), -0);
assertEq(roundf(x), -0);
})();

View File

@ -53,19 +53,114 @@ for (var l = 0; l < 4; l++) {
ionFrameSize_args[l][a] = gen_ionFrameSize(30 + l, a, "ionFrameSize_callee_verify");;
}
// Check ion frames during function apply calls with the argument vector.
function ionFrame_funApply_0() {
assertJitStackInvariants.apply(this, arguments);
}
function ionFrame_funApply_1() {
ionFrame_funApply_0.apply(this, arguments);
}
// Check ion frames during function apply calls with an array of arguments.
function ionFrame_funApply_2() {
var arr = Array.apply(Array, arguments);
assertJitStackInvariants.apply(this, arr);
}
function ionFrame_funApply_3() {
var arr = Array.apply(Array, arguments);
ionFrame_funApply_2.apply(this, arr);
}
// Check ion frames during function .call calls.
function ionFrame_funCall_0() {
assertJitStackInvariants.call(this);
}
function ionFrame_funCall_1(a) {
assertJitStackInvariants.call(this, a);
}
function ionFrame_funCall_2(a, b) {
assertJitStackInvariants.call(this, a, b);
}
function ionFrame_funCall_3(a, b, c) {
assertJitStackInvariants.call(this, a, b, c);
}
function ionFrame_funCall_x0() {
ionFrame_funCall_0.call(this);
}
function ionFrame_funCall_x1(a) {
ionFrame_funCall_1.call(this, a);
}
function ionFrame_funCall_x2(a, b) {
ionFrame_funCall_2.call(this, a, b);
}
function ionFrame_funCall_x3(a, b, c) {
ionFrame_funCall_3.call(this, a, b, c);
}
// Check ion frames during spread calls.
function ionFrame_spreadCall_0() {
var arr = Array.apply(Array, arguments);
assertJitStackInvariants(...arr);
}
function ionFrame_spreadCall_1() {
var arr = Array.apply(Array, arguments);
ionFrame_spreadCall_0(...arr);
}
for (i = 0; i < 40; i++) {
entryFrame_1();
entryFrame_1(0);
entryFrame_1(0, 1);
rectifierFrame_1(i);
rectifierFrame_2(i);
rectifierFrame_3(i);
rectifierFrame_4(i);
ionFrameSize_0(i);
ionFrameSize_1(i);
ionFrameSize_2(i);
ionFrameSize_3(i);
for (var l = 0; l < 4; l++)
for (var a = 0; a < 4; a++)
ionFrameSize_args[l][a](i);
ionFrame_funApply_0();
ionFrame_funApply_0(1);
ionFrame_funApply_0(1, 2);
ionFrame_funApply_0(1, 2, 3);
ionFrame_funApply_1();
ionFrame_funApply_1(1);
ionFrame_funApply_1(1, 2);
ionFrame_funApply_1(1, 2, 3);
ionFrame_funApply_2();
ionFrame_funApply_2(1);
ionFrame_funApply_2(1, 2);
ionFrame_funApply_2(1, 2, 3);
ionFrame_funApply_3();
ionFrame_funApply_3(1);
ionFrame_funApply_3(1, 2);
ionFrame_funApply_3(1, 2, 3);
ionFrame_funCall_0();
ionFrame_funCall_1(1);
ionFrame_funCall_2(1, 2);
ionFrame_funCall_3(1, 2, 3);
ionFrame_funCall_x0();
ionFrame_funCall_x1(1);
ionFrame_funCall_x2(1, 2);
ionFrame_funCall_x3(1, 2, 3);
ionFrame_spreadCall_0();
ionFrame_spreadCall_0(1);
ionFrame_spreadCall_0(1, 2);
ionFrame_spreadCall_0(1, 2, 3);
ionFrame_spreadCall_1();
ionFrame_spreadCall_1(1);
ionFrame_spreadCall_1(1, 2);
ionFrame_spreadCall_1(1, 2, 3);
}

View File

@ -207,7 +207,7 @@ CollectJitStackScripts(JSContext *cx, const Debugger::ExecutionObservableSet &ob
// See cases F and G in PatchBaselineFramesForDebugMode.
if (!entries.append(DebugModeOSREntry(script, info)))
return false;
} else if (frame->isDebuggerHandlingException()) {
} else if (frame->isHandlingException()) {
// We are in the middle of handling an exception and the frame
// must have an override pc.
uint32_t offset = script->pcToOffset(frame->overridePc());
@ -335,7 +335,7 @@ SpewPatchStubFrame(ICStub *oldStub, ICStub *newStub)
{
JitSpew(JitSpew_BaselineDebugModeOSR,
"Patch stub %p -> %p on BaselineStub frame (%s)",
oldStub, newStub, ICStub::KindString(newStub->kind()));
oldStub, newStub, newStub ? ICStub::KindString(newStub->kind()) : "exception handler");
}
static void
@ -431,7 +431,7 @@ PatchBaselineFramesForDebugMode(JSContext *cx, const Debugger::ExecutionObservab
//
// Patch the resume address to nullptr, to ensure the old
// address is not used anywhere.
MOZ_ASSERT(iter.baselineFrame()->isDebuggerHandlingException());
MOZ_ASSERT(iter.baselineFrame()->isHandlingException());
MOZ_ASSERT(iter.baselineFrame()->overridePc() == pc);
uint8_t *retAddr = nullptr;
SpewPatchBaselineFrameFromExceptionHandler(prev->returnAddress(), retAddr,
@ -592,7 +592,7 @@ PatchBaselineFramesForDebugMode(JSContext *cx, const Debugger::ExecutionObservab
// (i.e. fallback calls), we need to check for recompilation using
// DebugModeOSRVolatileStub.
if (layout->maybeStubPtr()) {
MOZ_ASSERT(entry.newStub);
MOZ_ASSERT(entry.newStub || prevFrame->isHandlingException());
SpewPatchStubFrame(entry.oldStub, entry.newStub);
layout->setStubPtr(entry.newStub);
}
@ -709,6 +709,16 @@ CloneOldBaselineStub(JSContext *cx, DebugModeOSREntryVector &entries, size_t ent
ICStub *oldStub = entry.oldStub;
MOZ_ASSERT(ICStub::CanMakeCalls(oldStub->kind()));
if (entry.frameKind == ICEntry::Kind_Invalid) {
// The exception handler can modify the frame's override pc while
// unwinding scopes. This is fine, but if we have a stub frame, the code
// code below will get confused: the entry's pcOffset doesn't match the
// stub that's still on the stack. To prevent that, we just set the new
// stub to nullptr as we will never return to this stub frame anyway.
entry.newStub = nullptr;
return true;
}
// Get the new fallback stub from the recompiled baseline script.
ICFallbackStub *fallbackStub = entry.fallbackStub();
@ -721,9 +731,11 @@ CloneOldBaselineStub(JSContext *cx, DebugModeOSREntryVector &entries, size_t ent
return true;
}
// Check if we have already cloned the stub on a younger frame.
// Check if we have already cloned the stub on a younger frame. Ignore
// frames that entered the exception handler (entries[i].newStub is nullptr
// in that case, see above).
for (size_t i = 0; i < entryIndex; i++) {
if (oldStub == entries[i].oldStub) {
if (oldStub == entries[i].oldStub && entries[i].frameKind != ICEntry::Kind_Invalid) {
MOZ_ASSERT(entries[i].newStub);
entry.newStub = entries[i].newStub;
return true;

View File

@ -53,6 +53,7 @@ class DebugModeOSRVolatileStub
{ }
bool invalid() const {
MOZ_ASSERT(!frame_->isHandlingException());
ICEntry &entry = frame_->script()->baselineScript()->icEntryFromPCOffset(pcOffset_);
return stub_ != entry.fallbackStub();
}

View File

@ -74,11 +74,10 @@ class BaselineFrame
// This flag should never be set when we're executing JIT code.
HAS_OVERRIDE_PC = 1 << 11,
// Frame has called out to Debugger code from
// HandleExceptionBaseline. This is set for debug mode OSR sanity
// checking when it handles corner cases which only arise during
// exception handling.
DEBUGGER_HANDLING_EXCEPTION = 1 << 12
// If set, we're handling an exception for this frame. This is set for
// debug mode OSR sanity checking when it handles corner cases which
// only arise during exception handling.
HANDLING_EXCEPTION = 1 << 12
};
protected: // Silence Clang warning about unused private fields.
@ -290,14 +289,14 @@ class BaselineFrame
}
inline void unsetIsDebuggee();
bool isDebuggerHandlingException() const {
return flags_ & DEBUGGER_HANDLING_EXCEPTION;
bool isHandlingException() const {
return flags_ & HANDLING_EXCEPTION;
}
void setIsDebuggerHandlingException() {
flags_ |= DEBUGGER_HANDLING_EXCEPTION;
void setIsHandlingException() {
flags_ |= HANDLING_EXCEPTION;
}
void unsetIsDebuggerHandlingException() {
flags_ &= ~DEBUGGER_HANDLING_EXCEPTION;
void unsetIsHandlingException() {
flags_ &= ~HANDLING_EXCEPTION;
}
JSScript *evalScript() const {

View File

@ -4099,6 +4099,10 @@ ICGetElemNativeCompiler::emitCallScripted(MacroAssembler &masm, Register objReg)
// Enter stub frame.
enterStubFrame(masm, regs.getAny());
// Align the stack such that the JitFrameLayout is aligned on
// JitStackAlignment.
masm.alignJitStackBasedOnNArgs(0);
// Push |this| for getter (target object).
{
ValueOperand val = regs.takeAnyValue();
@ -7372,6 +7376,10 @@ ICGetProp_CallScripted::Compiler::generateStubCode(MacroAssembler &masm)
masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), code);
masm.loadBaselineOrIonRaw(code, code, &failureLeaveStubFrame);
// Align the stack such that the JitFrameLayout is aligned on
// JitStackAlignment.
masm.alignJitStackBasedOnNArgs(0);
// Getter is called with 0 arguments, just |obj| as thisv.
// Note that we use Push, not push, so that callJit will align the stack
// properly on ARM.
@ -8866,14 +8874,17 @@ ICSetProp_CallScripted::Compiler::generateStubCode(MacroAssembler &masm)
masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), code);
masm.loadBaselineOrIonRaw(code, code, &failureLeaveStubFrame);
// Align the stack such that the JitFrameLayout is aligned on
// JitStackAlignment.
masm.alignJitStackBasedOnNArgs(1);
// Setter is called with the new value as the only argument, and |obj| as thisv.
// Note that we use Push, not push, so that callJit will align the stack
// properly on ARM.
// To Push R1, read it off of the stowed values on stack.
// Stack: [ ..., R0, R1, ..STUBFRAME-HEADER.. ]
masm.movePtr(BaselineStackReg, scratch);
masm.PushValue(Address(scratch, STUB_FRAME_SIZE));
// Stack: [ ..., R0, R1, ..STUBFRAME-HEADER.., padding? ]
masm.PushValue(Address(BaselineFrameReg, STUB_FRAME_SIZE));
masm.Push(R0);
EmitCreateStubFrameDescriptor(masm, scratch);
masm.Push(Imm32(1)); // ActualArgc is 1
@ -9622,7 +9633,8 @@ DoSpreadCallFallback(JSContext *cx, BaselineFrame *frame, ICCall_Fallback *stub_
}
void
ICCallStubCompiler::pushCallArguments(MacroAssembler &masm, GeneralRegisterSet regs, Register argcReg)
ICCallStubCompiler::pushCallArguments(MacroAssembler &masm, GeneralRegisterSet regs,
Register argcReg, bool isJitCall)
{
MOZ_ASSERT(!regs.has(argcReg));
@ -9639,6 +9651,11 @@ ICCallStubCompiler::pushCallArguments(MacroAssembler &masm, GeneralRegisterSet r
// return address, old frame pointer and stub reg.
masm.addPtr(Imm32(STUB_FRAME_SIZE), argPtr);
// Align the stack such that the JitFrameLayout is aligned on the
// JitStackAlignment.
if (isJitCall)
masm.alignJitStackBasedOnNArgs(argcReg);
// Push all values, starting at the last one.
Label loop, done;
masm.bind(&loop);
@ -9670,7 +9687,7 @@ ICCallStubCompiler::guardSpreadCall(MacroAssembler &masm, Register argcReg, Labe
void
ICCallStubCompiler::pushSpreadCallArguments(MacroAssembler &masm, GeneralRegisterSet regs,
Register argcReg)
Register argcReg, bool isJitCall)
{
// Push arguments
Register startReg = regs.takeAny();
@ -9682,6 +9699,11 @@ ICCallStubCompiler::pushSpreadCallArguments(MacroAssembler &masm, GeneralRegiste
masm.lshiftPtr(Imm32(3), endReg);
masm.addPtr(startReg, endReg);
// Align the stack such that the JitFrameLayout is aligned on the
// JitStackAlignment.
if (isJitCall)
masm.alignJitStackBasedOnNArgs(argcReg);
// Copying pre-decrements endReg by 8 until startReg is reached
Label copyDone;
Label copyStart;
@ -9824,6 +9846,7 @@ ICCallStubCompiler::pushCallerArguments(MacroAssembler &masm, GeneralRegisterSet
masm.loadPtr(Address(BaselineFrameReg, 0), startReg);
masm.loadPtr(Address(startReg, BaselineFrame::offsetOfNumActualArgs()), endReg);
masm.addPtr(Imm32(BaselineFrame::offsetOfArg(0)), startReg);
masm.alignJitStackBasedOnNArgs(endReg);
masm.lshiftPtr(Imm32(ValueShift), endReg);
masm.addPtr(startReg, endReg);
@ -9850,6 +9873,7 @@ ICCallStubCompiler::pushArrayArguments(MacroAssembler &masm, Address arrayVal,
masm.extractObject(arrayVal, startReg);
masm.loadPtr(Address(startReg, NativeObject::offsetOfElements()), startReg);
masm.load32(Address(startReg, ObjectElements::offsetOfInitializedLength()), endReg);
masm.alignJitStackBasedOnNArgs(endReg);
masm.lshiftPtr(Imm32(ValueShift), endReg);
masm.addPtr(startReg, endReg);
@ -9914,7 +9938,7 @@ ICCall_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
regs.take(R0.scratchReg()); // argc.
pushCallArguments(masm, regs, R0.scratchReg());
pushCallArguments(masm, regs, R0.scratchReg(), /* isJitCall = */ false);
masm.push(BaselineStackReg);
masm.push(R0.scratchReg());
@ -10144,9 +10168,9 @@ ICCallScriptedCompiler::generateStubCode(MacroAssembler &masm)
// right-to-left so duplicate them on the stack in reverse order.
// |this| and callee are pushed last.
if (isSpread_)
pushSpreadCallArguments(masm, regs, argcReg);
pushSpreadCallArguments(masm, regs, argcReg, /* isJitCall = */ true);
else
pushCallArguments(masm, regs, argcReg);
pushCallArguments(masm, regs, argcReg, /* isJitCall = */ true);
// The callee is on top of the stack. Pop and unbox it.
ValueOperand val = regs.takeAnyValue();
@ -10187,27 +10211,40 @@ ICCallScriptedCompiler::generateStubCode(MacroAssembler &masm)
Label skipThisReplace;
masm.branchTestObject(Assembler::Equal, JSReturnOperand, &skipThisReplace);
Register scratchReg = JSReturnOperand.scratchReg();
// Current stack: [ ARGVALS..., ThisVal, ActualArgc, Callee, Descriptor ]
// Current stack: [ Padding?, ARGVALS..., ThisVal, ActualArgc, Callee, Descriptor ]
// However, we can't use this ThisVal, because it hasn't been traced. We need to use
// The ThisVal higher up the stack:
// Current stack: [ ThisVal, ARGVALS..., ...STUB FRAME...,
// ARGVALS..., ThisVal, ActualArgc, Callee, Descriptor ]
masm.loadPtr(Address(BaselineStackReg, 2*sizeof(size_t)), scratchReg);
// Padding?, ARGVALS..., ThisVal, ActualArgc, Callee, Descriptor ]
// scratchReg now contains actualArgCount. Double it to account for skipping past two
// pushed copies of argument values for non-spread, increment it to account for skipping
// actual argument values and array object for spread. Additionally, we need to add:
// STUB_FRAME_SIZE + sizeof(ThisVal) + sizeof(size_t) + sizeof(void *) + sizoef(size_t)
// for: stub frame, this value, actual argc, callee, and descriptor
if (isSpread_)
masm.add32(Imm32(1), scratchReg);
else
masm.lshiftPtr(Imm32(1), scratchReg);
BaseValueIndex reloadThisSlot(BaselineStackReg, scratchReg,
STUB_FRAME_SIZE + sizeof(Value) + 3 * sizeof(size_t));
masm.loadValue(reloadThisSlot, JSReturnOperand);
// Restore the BaselineFrameReg based on the frame descriptor.
//
// BaselineFrameReg = BaselineStackReg
// + sizeof(Descriptor) + sizeof(Callee) + sizeof(ActualArgc)
// + stubFrameSize(Descriptor)
// - sizeof(BaselineStubReg) - sizeof(BaselineFrameReg)
Address descriptorAddr(BaselineStackReg, 0);
masm.loadPtr(descriptorAddr, BaselineFrameReg);
masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), BaselineFrameReg);
masm.addPtr(Imm32((3 - 2) * sizeof(size_t)), BaselineFrameReg);
masm.addPtr(BaselineStackReg, BaselineFrameReg);
// Load the number of arguments present before the stub frame.
Register argcReg = JSReturnOperand.scratchReg();
if (isSpread_) {
// Account for the Array object.
masm.move32(Imm32(1), argcReg);
} else {
Address argcAddr(BaselineStackReg, 2 * sizeof(size_t));
masm.loadPtr(argcAddr, argcReg);
}
// Current stack: [ ThisVal, ARGVALS..., ...STUB FRAME..., <-- BaselineFrameReg
// Padding?, ARGVALS..., ThisVal, ActualArgc, Callee, Descriptor ]
//
// &ThisVal = BaselineFrameReg + argc * sizeof(Value) + STUB_FRAME_SIZE
BaseValueIndex thisSlotAddr(BaselineFrameReg, argcReg, STUB_FRAME_SIZE);
masm.loadValue(thisSlotAddr, JSReturnOperand);
#ifdef DEBUG
masm.branchTestObject(Assembler::Equal, JSReturnOperand, &skipThisReplace);
masm.assumeUnreachable("Return of constructing call should be an object.");
@ -10407,9 +10444,9 @@ ICCall_Native::Compiler::generateStubCode(MacroAssembler &masm)
// right-to-left so duplicate them on the stack in reverse order.
// |this| and callee are pushed last.
if (isSpread_)
pushSpreadCallArguments(masm, regs, argcReg);
pushSpreadCallArguments(masm, regs, argcReg, /* isJitCall = */ false);
else
pushCallArguments(masm, regs, argcReg);
pushCallArguments(masm, regs, argcReg, /* isJitCall = */ false);
if (isConstructing_) {
// Stack looks like: [ ..., Arg0Val, ThisVal, CalleeVal ]
@ -10504,7 +10541,7 @@ ICCall_ClassHook::Compiler::generateStubCode(MacroAssembler &masm)
enterStubFrame(masm, regs.getAny());
regs.add(scratch);
pushCallArguments(masm, regs, argcReg);
pushCallArguments(masm, regs, argcReg, /* isJitCall = */ false);
regs.take(scratch);
if (isConstructing_) {
@ -10809,25 +10846,34 @@ ICCall_ScriptedFunCall::Compiler::generateStubCode(MacroAssembler &masm)
if (canUseTailCallReg)
regs.add(BaselineTailCallReg);
// Decrement argc if argc > 0. If argc == 0, push |undefined| as |this|.
Label zeroArgs, done;
masm.branchTest32(Assembler::Zero, argcReg, argcReg, &zeroArgs);
// Avoid the copy of the callee (function.call).
masm.sub32(Imm32(1), argcReg);
// Values are on the stack left-to-right. Calling convention wants them
// right-to-left so duplicate them on the stack in reverse order.
pushCallArguments(masm, regs, argcReg);
// Discard callee (function.call).
masm.addPtr(Imm32(sizeof(Value)), StackPointer);
pushCallArguments(masm, regs, argcReg, /* isJitCall = */ true);
// Pop scripted callee (the original |this|).
ValueOperand val = regs.takeAnyValue();
masm.popValue(val);
// Decrement argc if argc > 0. If argc == 0, push |undefined| as |this|.
Label zeroArgs, done;
masm.branchTest32(Assembler::Zero, argcReg, argcReg, &zeroArgs);
masm.sub32(Imm32(1), argcReg);
masm.jump(&done);
masm.bind(&zeroArgs);
// Copy scripted callee (the original |this|).
Address thisSlotFromStubFrame(BaselineFrameReg, STUB_FRAME_SIZE);
masm.loadValue(thisSlotFromStubFrame, val);
// Align the stack.
masm.alignJitStackBasedOnNArgs(0);
// Store the new |this|.
masm.pushValue(UndefinedValue());
masm.bind(&done);
// Unbox scripted callee.

View File

@ -5908,8 +5908,10 @@ class ICCallStubCompiler : public ICStubCompiler
FunApply_Array
};
void pushCallArguments(MacroAssembler &masm, GeneralRegisterSet regs, Register argcReg);
void pushSpreadCallArguments(MacroAssembler &masm, GeneralRegisterSet regs, Register argcReg);
void pushCallArguments(MacroAssembler &masm, GeneralRegisterSet regs, Register argcReg,
bool isJitCall);
void pushSpreadCallArguments(MacroAssembler &masm, GeneralRegisterSet regs, Register argcReg,
bool isJitCall);
void guardSpreadCall(MacroAssembler &masm, Register argcReg, Label *failure);
Register guardFunApply(MacroAssembler &masm, GeneralRegisterSet regs, Register argcReg,
bool checkNative, FunApplyThing applyThing, Label *failure);

View File

@ -2006,6 +2006,10 @@ CodeGenerator::visitOsrEntry(LOsrEntry *lir)
// to 0, before reserving the stack.
MOZ_ASSERT(masm.framePushed() == frameSize());
masm.setFramePushed(0);
// Ensure that the Ion frames is properly aligned.
masm.assertStackAlignment(JitStackAlignment, 0);
masm.reserveStack(frameSize());
}
@ -3075,43 +3079,100 @@ CodeGenerator::emitPushArguments(LApplyArgsGeneric *apply, Register extraStackSp
{
// Holds the function nargs. Initially undefined.
Register argcreg = ToRegister(apply->getArgc());
Register copyreg = ToRegister(apply->getTempObject());
size_t argvOffset = frameSize() + JitFrameLayout::offsetOfActualArgs();
Label end;
// Initialize the loop counter AND Compute the stack usage (if == 0)
masm.movePtr(argcreg, extraStackSpace);
// Align the JitFrameLayout on the JitStackAlignment.
const uint32_t alignment = JitStackAlignment / sizeof(Value);
if (alignment > 1) {
MOZ_ASSERT(frameSize() % JitStackAlignment == 0,
"Stack padding assumes that the frameSize is correct");
MOZ_ASSERT(alignment == 2);
Label noPaddingNeeded;
// if the number of arguments is odd, then we do not need any padding.
masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
masm.addPtr(Imm32(1), extraStackSpace);
masm.bind(&noPaddingNeeded);
}
// Reserve space for copying the arguments.
NativeObject::elementsSizeMustNotOverflow();
masm.lshiftPtr(Imm32(ValueShift), extraStackSpace);
masm.subPtr(extraStackSpace, StackPointer);
#ifdef DEBUG
// Put a magic value in the space reserved for padding. Note, this code
// cannot be merged with the previous test, as not all architectures can
// write below their stack pointers.
if (alignment > 1) {
MOZ_ASSERT(alignment == 2);
Label noPaddingNeeded;
// if the number of arguments is odd, then we do not need any padding.
masm.branchTestPtr(Assembler::NonZero, argcreg, Imm32(1), &noPaddingNeeded);
BaseValueIndex dstPtr(StackPointer, argcreg);
masm.storeValue(MagicValue(JS_ARG_POISON), dstPtr);
masm.bind(&noPaddingNeeded);
}
#endif
// Skip the copy of arguments.
Label end;
masm.branchTestPtr(Assembler::Zero, argcreg, argcreg, &end);
// We are making a copy of the arguments which are above the JitFrameLayout
// of the current Ion frame.
//
// [arg1] [arg0] <- src [this] [JitFrameLayout] [.. frameSize ..] [pad] [arg1] [arg0] <- dst
// Compute the source and destination offsets into the stack.
size_t argvSrcOffset = frameSize() + JitFrameLayout::offsetOfActualArgs();
size_t argvDstOffset = 0;
// Save the extra stack space, and re-use the register as a base.
masm.push(extraStackSpace);
Register argvSrcBase = extraStackSpace;
argvSrcOffset += sizeof(void *);
argvDstOffset += sizeof(void *);
// Save the actual number of register, and re-use the register as an index register.
masm.push(argcreg);
Register argvIndex = argcreg;
argvSrcOffset += sizeof(void *);
argvDstOffset += sizeof(void *);
// srcPtr = (StackPointer + extraStackSpace) + argvSrcOffset
// dstPtr = (StackPointer ) + argvDstOffset
masm.addPtr(StackPointer, argvSrcBase);
// Copy arguments.
{
Register count = extraStackSpace; // <- argcreg
Label loop;
masm.bind(&loop);
// We remove sizeof(void*) from argvOffset because without it we target
// the address after the memory area that we want to copy.
BaseValueIndex disp(StackPointer, argcreg, argvOffset - sizeof(void*));
// Do not use Push here because other this account to 1 in the framePushed
// instead of 0. These push are only counted by argcreg.
masm.loadPtr(disp, copyreg);
masm.push(copyreg);
// As argvIndex is off by 1, and we use the decBranchPtr instruction
// to loop back, we have to substract the size of the word which are
// copied.
BaseValueIndex srcPtr(argvSrcBase, argvIndex, argvSrcOffset - sizeof(void *));
BaseValueIndex dstPtr(StackPointer, argvIndex, argvDstOffset - sizeof(void *));
masm.loadPtr(srcPtr, copyreg);
masm.storePtr(copyreg, dstPtr);
// Handle 32 bits architectures.
if (sizeof(Value) == 2 * sizeof(void*)) {
masm.loadPtr(disp, copyreg);
masm.push(copyreg);
BaseValueIndex srcPtrLow(argvSrcBase, argvIndex, argvSrcOffset - 2 * sizeof(void *));
BaseValueIndex dstPtrLow(StackPointer, argvIndex, argvDstOffset - 2 * sizeof(void *));
masm.loadPtr(srcPtrLow, copyreg);
masm.storePtr(copyreg, dstPtrLow);
}
masm.decBranchPtr(Assembler::NonZero, count, Imm32(1), &loop);
masm.decBranchPtr(Assembler::NonZero, argvIndex, Imm32(1), &loop);
}
// Compute the stack usage.
masm.movePtr(argcreg, extraStackSpace);
NativeObject::elementsSizeMustNotOverflow();
masm.lshiftPtr(Imm32(ValueShift), extraStackSpace);
// Restore argcreg and the extra stack space counter.
masm.pop(argcreg);
masm.pop(extraStackSpace);
// Join with all arguments copied and the extra stack usage computed.
masm.bind(&end);
@ -3136,7 +3197,7 @@ CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric *apply)
// Temporary register for modifying the function object.
Register objreg = ToRegister(apply->getTempObject());
Register copyreg = ToRegister(apply->getTempCopy());
Register extraStackSpace = ToRegister(apply->getTempStackCounter());
// Holds the function nargs. Initially undefined.
Register argcreg = ToRegister(apply->getArgc());
@ -3150,14 +3211,14 @@ CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric *apply)
}
// Copy the arguments of the current function.
emitPushArguments(apply, copyreg);
emitPushArguments(apply, extraStackSpace);
masm.checkStackAlignment();
// If the function is native, only emit the call to InvokeFunction.
if (apply->hasSingleTarget() && apply->getSingleTarget()->isNative()) {
emitCallInvokeFunction(apply, copyreg);
emitPopArguments(apply, copyreg);
emitCallInvokeFunction(apply, extraStackSpace);
emitPopArguments(apply, extraStackSpace);
return;
}
@ -3176,19 +3237,21 @@ CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric *apply)
{
// Create the frame descriptor.
unsigned pushed = masm.framePushed();
masm.addPtr(Imm32(pushed), copyreg);
masm.makeFrameDescriptor(copyreg, JitFrame_IonJS);
Register stackSpace = extraStackSpace;
masm.addPtr(Imm32(pushed), stackSpace);
masm.makeFrameDescriptor(stackSpace, JitFrame_IonJS);
masm.Push(argcreg);
masm.Push(calleereg);
masm.Push(copyreg); // descriptor
masm.Push(stackSpace); // descriptor
Label underflow, rejoin;
// Check whether the provided arguments satisfy target argc.
if (!apply->hasSingleTarget()) {
masm.load16ZeroExtend(Address(calleereg, JSFunction::offsetOfNargs()), copyreg);
masm.branch32(Assembler::Below, argcreg, copyreg, &underflow);
Register nformals = extraStackSpace;
masm.load16ZeroExtend(Address(calleereg, JSFunction::offsetOfNargs()), nformals);
masm.branch32(Assembler::Below, argcreg, nformals, &underflow);
} else {
masm.branch32(Assembler::Below, argcreg, Imm32(apply->getSingleTarget()->nargs()),
&underflow);
@ -3218,9 +3281,9 @@ CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric *apply)
markSafepointAt(callOffset, apply);
// Recover the number of arguments from the frame descriptor.
masm.loadPtr(Address(StackPointer, 0), copyreg);
masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), copyreg);
masm.subPtr(Imm32(pushed), copyreg);
masm.loadPtr(Address(StackPointer, 0), stackSpace);
masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), stackSpace);
masm.subPtr(Imm32(pushed), stackSpace);
// Increment to remove IonFramePrefix; decrement to fill FrameSizeClass.
// The return address has already been removed from the Ion frame.
@ -3232,12 +3295,12 @@ CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric *apply)
// Handle uncompiled or native functions.
{
masm.bind(&invoke);
emitCallInvokeFunction(apply, copyreg);
emitCallInvokeFunction(apply, extraStackSpace);
}
// Pop arguments and continue.
masm.bind(&end);
emitPopArguments(apply, copyreg);
emitPopArguments(apply, extraStackSpace);
}
typedef bool (*ArraySpliceDenseFn)(JSContext *, HandleObject, uint32_t, uint32_t);

View File

@ -532,31 +532,28 @@ HandleClosingGeneratorReturn(JSContext *cx, const JitFrameIterator &frame, jsbyt
ForcedReturn(cx, frame, pc, rfe, calledDebugEpilogue);
}
struct AutoDebuggerHandlingException
struct AutoBaselineHandlingException
{
BaselineFrame *frame;
AutoDebuggerHandlingException(BaselineFrame *frame, jsbytecode *pc)
AutoBaselineHandlingException(BaselineFrame *frame, jsbytecode *pc)
: frame(frame)
{
frame->setIsDebuggerHandlingException();
frame->setOverridePc(pc); // Will be cleared in HandleException.
frame->setIsHandlingException();
frame->setOverridePc(pc);
}
~AutoDebuggerHandlingException() {
frame->unsetIsDebuggerHandlingException();
~AutoBaselineHandlingException() {
frame->unsetIsHandlingException();
frame->clearOverridePc();
}
};
static void
HandleExceptionBaseline(JSContext *cx, const JitFrameIterator &frame, ResumeFromException *rfe,
jsbytecode **unwoundScopeToPc, bool *calledDebugEpilogue)
jsbytecode *pc, jsbytecode **unwoundScopeToPc, bool *calledDebugEpilogue)
{
MOZ_ASSERT(frame.isBaselineJS());
MOZ_ASSERT(!*calledDebugEpilogue);
RootedScript script(cx);
jsbytecode *pc;
frame.baselineScriptAndPc(script.address(), &pc);
// We may be propagating a forced return from the interrupt
// callback, which cannot easily force a return.
if (cx->isPropagatingForcedReturn()) {
@ -569,10 +566,6 @@ HandleExceptionBaseline(JSContext *cx, const JitFrameIterator &frame, ResumeFrom
if (cx->isExceptionPending() && cx->compartment()->isDebuggee() &&
cx->getPendingException(&exception) && !exception.isMagic(JS_GENERATOR_CLOSING))
{
// Set for debug mode OSR. See note concerning
// 'isDebuggerHandlingException' in CollectJitStackScripts.
AutoDebuggerHandlingException debuggerHandling(frame.baselineFrame(), pc);
switch (Debugger::onExceptionUnwind(cx, frame.baselineFrame())) {
case JSTRAP_ERROR:
// Uncatchable exception.
@ -593,6 +586,8 @@ HandleExceptionBaseline(JSContext *cx, const JitFrameIterator &frame, ResumeFrom
}
}
RootedScript script(cx, frame.baselineFrame()->script());
if (!script->hasTrynotes()) {
HandleClosingGeneratorReturn(cx, frame, pc, *unwoundScopeToPc, rfe, calledDebugEpilogue);
return;
@ -691,13 +686,6 @@ struct AutoDeleteDebugModeOSRInfo
~AutoDeleteDebugModeOSRInfo() { frame->deleteDebugModeOSRInfo(); }
};
struct AutoClearBaselineOverridePc
{
BaselineFrame *frame;
explicit AutoClearBaselineOverridePc(BaselineFrame *frame) : frame(frame) { MOZ_ASSERT(frame); }
~AutoClearBaselineOverridePc() { frame->clearOverridePc(); }
};
struct AutoResetLastProfilerFrameOnReturnFromException
{
JSContext *cx;
@ -814,8 +802,11 @@ HandleException(ResumeFromException *rfe)
// Remember the pc we unwound the scope to.
jsbytecode *unwoundScopeToPc = nullptr;
// Clear the frame's override pc when we leave this block. This is
// fine because we're either:
// Set a flag on the frame to signal to DebugModeOSR that we're
// handling an exception. Also ensure the frame has an override
// pc. We clear the frame's override pc when we leave this block,
// this is fine because we're either:
//
// (1) Going to enter a catch or finally block. We don't want to
// keep the old pc when we're executing JIT code.
// (2) Going to pop the frame, either here or a forced return.
@ -823,17 +814,16 @@ HandleException(ResumeFromException *rfe)
// (3) Performing an exception bailout. In this case
// FinishBailoutToBaseline will set the pc to the resume pc
// and clear it before it returns to JIT code.
AutoClearBaselineOverridePc clearPc(iter.baselineFrame());
jsbytecode *pc;
iter.baselineScriptAndPc(nullptr, &pc);
AutoBaselineHandlingException handlingException(iter.baselineFrame(), pc);
HandleExceptionBaseline(cx, iter, rfe, &unwoundScopeToPc, &calledDebugEpilogue);
HandleExceptionBaseline(cx, iter, rfe, pc, &unwoundScopeToPc, &calledDebugEpilogue);
// If we are propagating an exception through a frame with
// on-stack recompile info, we should free the allocated
// RecompileInfo struct before we leave this block, as we will not
// be returning to the recompile handler.
//
// We cannot delete it immediately because of the call to
// iter.baselineScriptAndPc below.
AutoDeleteDebugModeOSRInfo deleteDebugModeOSRInfo(iter.baselineFrame());
if (rfe->kind != ResumeFromException::RESUME_ENTRY_FRAME)
@ -3097,14 +3087,17 @@ AssertJitStackInvariants(JSContext *cx)
{
for (JitActivationIterator activations(cx->runtime()); !activations.done(); ++activations) {
JitFrameIterator frames(activations);
size_t prevFrameSize = 0;
size_t frameSize = 0;
bool isScriptedCallee = false;
for (; !frames.done(); ++frames) {
size_t calleeFp = reinterpret_cast<size_t>(frames.fp());
size_t callerFp = reinterpret_cast<size_t>(frames.prevFp());
MOZ_ASSERT(callerFp >= calleeFp);
prevFrameSize = frameSize;
frameSize = callerFp - calleeFp;
if (frames.prevType() == JitFrame_Rectifier) {
size_t calleeFp = reinterpret_cast<size_t>(frames.fp());
size_t callerFp = reinterpret_cast<size_t>(frames.prevFp());
MOZ_ASSERT(callerFp >= calleeFp);
size_t frameSize = callerFp - calleeFp;
MOZ_RELEASE_ASSERT(frameSize % JitStackAlignment == 0,
"The rectifier frame should keep the alignment");
@ -3120,6 +3113,12 @@ AssertJitStackInvariants(JSContext *cx)
"The frame size is optimal");
}
if (frames.type() == JitFrame_Exit) {
// For the moment, we do not keep the JitStackAlignment
// alignment for exit frames.
frameSize -= ExitFrameLayout::Size();
}
if (frames.isIonJS()) {
// Ideally, we should not have such requirement, but keep the
// alignment-delta as part of the Safepoint such that we can pad
@ -3128,8 +3127,23 @@ AssertJitStackInvariants(JSContext *cx)
// everything can properly be aligned before adding complexity.
MOZ_RELEASE_ASSERT(frames.ionScript()->frameSize() % JitStackAlignment == 0,
"Ensure that if the Ion frame is aligned, then the spill base is also aligned");
if (isScriptedCallee) {
MOZ_RELEASE_ASSERT(prevFrameSize % JitStackAlignment == 0,
"The ion frame should keep the alignment");
}
}
// The stack is dynamically aligned by baseline stubs before calling
// any jitted code.
if (frames.prevType() == JitFrame_BaselineStub && isScriptedCallee) {
MOZ_RELEASE_ASSERT(calleeFp % JitStackAlignment == 0,
"The baseline stub restores the stack alignment");
}
isScriptedCallee = false
|| frames.isScripted()
|| frames.type() == JitFrame_Rectifier;
}
MOZ_RELEASE_ASSERT(frames.type() == JitFrame_Entry,

View File

@ -1768,7 +1768,7 @@ class LApplyArgsGeneric : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES +
const LDefinition *getTempObject() {
return getTemp(0);
}
const LDefinition *getTempCopy() {
const LDefinition *getTempStackCounter() {
return getTemp(1);
}
};

View File

@ -487,7 +487,7 @@ LIRGenerator::visitApplyArgs(MApplyArgs *apply)
useFixed(apply->getFunction(), CallTempReg3),
useFixed(apply->getArgc(), CallTempReg0),
tempFixed(CallTempReg1), // object register
tempFixed(CallTempReg2)); // copy register
tempFixed(CallTempReg2)); // stack counter register
MDefinition *self = apply->getThis();
useBoxFixed(lir, LApplyArgsGeneric::ThisIndex, self, CallTempReg4, CallTempReg5);

View File

@ -2296,3 +2296,86 @@ MacroAssembler::profilerPreCallImpl(Register reg, Register reg2)
appendProfilerCallSite(label);
}
void
MacroAssembler::alignJitStackBasedOnNArgs(Register nargs)
{
const uint32_t alignment = JitStackAlignment / sizeof(Value);
if (alignment == 1)
return;
// A JitFrameLayout is composed of the following:
// [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
//
// We want to ensure that the |raddr| address is aligned.
// Which implies that we want to ensure that |this| is aligned.
static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
"No need to consider the JitFrameLayout for aligning the stack");
// Which implies that |argN| is aligned if |nargs| is even, and offset by
// |sizeof(Value)| if |nargs| is odd.
MOZ_ASSERT(alignment == 2);
// Thus the |padding| is offset by |sizeof(Value)| if |nargs| is even, and
// aligned if |nargs| is odd.
// if (nargs % 2 == 0) {
// if (sp % JitStackAlignment == 0)
// sp -= sizeof(Value);
// MOZ_ASSERT(sp % JitStackAlignment == JitStackAlignment - sizeof(Value));
// } else {
// sp = sp & ~(JitStackAlignment - 1);
// }
Label odd, end;
Label *maybeAssert = &end;
#ifdef DEBUG
Label assert;
maybeAssert = &assert;
#endif
assertStackAlignment(sizeof(Value), 0);
branchTestPtr(Assembler::NonZero, nargs, Imm32(1), &odd);
branchTestPtr(Assembler::NonZero, StackPointer, Imm32(JitStackAlignment - 1), maybeAssert);
subPtr(Imm32(sizeof(Value)), StackPointer);
#ifdef DEBUG
bind(&assert);
#endif
assertStackAlignment(JitStackAlignment, sizeof(Value));
jump(&end);
bind(&odd);
andPtr(Imm32(~(JitStackAlignment - 1)), StackPointer);
bind(&end);
}
void
MacroAssembler::alignJitStackBasedOnNArgs(uint32_t nargs)
{
const uint32_t alignment = JitStackAlignment / sizeof(Value);
if (alignment == 1)
return;
// A JitFrameLayout is composed of the following:
// [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
//
// We want to ensure that the |raddr| address is aligned.
// Which implies that we want to ensure that |this| is aligned.
static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
"No need to consider the JitFrameLayout for aligning the stack");
// Which implies that |argN| is aligned if |nargs| is even, and offset by
// |sizeof(Value)| if |nargs| is odd.
MOZ_ASSERT(alignment == 2);
// Thus the |padding| is offset by |sizeof(Value)| if |nargs| is even, and
// aligned if |nargs| is odd.
assertStackAlignment(sizeof(Value), 0);
if (nargs % 2 == 0) {
Label end;
branchTestPtr(Assembler::NonZero, StackPointer, Imm32(JitStackAlignment - 1), &end);
subPtr(Imm32(sizeof(Value)), StackPointer);
bind(&end);
assertStackAlignment(JitStackAlignment, sizeof(Value));
} else {
andPtr(Imm32(~(JitStackAlignment - 1)), StackPointer);
}
}

View File

@ -1252,6 +1252,12 @@ class MacroAssembler : public MacroAssemblerSpecific
PopRegsInMask(liveRegs);
}
// Align the stack pointer based on the number of arguments which are pushed
// on the stack, such that the JitFrameLayout would be correctly aligned on
// the JitStackAlignment.
void alignJitStackBasedOnNArgs(Register nargs);
void alignJitStackBasedOnNArgs(uint32_t nargs);
void assertStackAlignment(uint32_t alignment, int32_t offset = 0) {
#ifdef DEBUG
Label ok, bad;

View File

@ -52,6 +52,9 @@ CodeGeneratorARM::generatePrologue()
if (isProfilerInstrumentationEnabled())
masm.profilerEnterFrame(StackPointer, CallTempReg0);
// Ensure that the Ion frames is properly aligned.
masm.assertStackAlignment(JitStackAlignment, 0);
// Note that this automatically sets MacroAssembler::framePushed().
masm.reserveStack(frameSize());
masm.checkStackAlignment();

View File

@ -49,6 +49,9 @@ CodeGeneratorMIPS::generatePrologue()
if (isProfilerInstrumentationEnabled())
masm.profilerEnterFrame(StackPointer, CallTempReg0);
// Ensure that the Ion frames is properly aligned.
masm.assertStackAlignment(JitStackAlignment, 0);
// Note that this automatically sets MacroAssembler::framePushed().
masm.reserveStack(frameSize());
masm.checkStackAlignment();

View File

@ -48,6 +48,9 @@ CodeGeneratorX86Shared::generatePrologue()
if (isProfilerInstrumentationEnabled())
masm.profilerEnterFrame(StackPointer, CallTempReg0);
// Ensure that the Ion frames is properly aligned.
masm.assertStackAlignment(JitStackAlignment, 0);
// Note that this automatically sets MacroAssembler::framePushed().
masm.reserveStack(frameSize());
@ -1812,6 +1815,7 @@ CodeGeneratorX86Shared::visitRound(LRound *lir)
// Branch to a slow path for non-positive inputs. Doesn't catch NaN.
masm.zeroDouble(scratch);
masm.loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
masm.branchDouble(Assembler::DoubleLessThanOrEqual, input, scratch, &negativeOrZero);
// Input is positive. Add the biggest double less than 0.5 and
@ -1819,7 +1823,6 @@ CodeGeneratorX86Shared::visitRound(LRound *lir)
// than 0.5, adding 0.5 would undesirably round up to 1). Note that we have
// to add the input to the temp register because we're not allowed to
// modify the input register.
masm.loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
masm.addDouble(input, temp);
bailoutCvttsd2si(temp, output, lir->snapshot());
@ -1840,7 +1843,14 @@ CodeGeneratorX86Shared::visitRound(LRound *lir)
// Input is negative.
masm.bind(&negative);
// Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
// be added the biggest double less than 0.5.
Label loadJoin;
masm.loadConstantDouble(-0.5, scratch);
masm.branchDouble(Assembler::DoubleLessThan, input, scratch, &loadJoin);
masm.loadConstantDouble(0.5, temp);
masm.bind(&loadJoin);
if (AssemblerX86Shared::HasSSE41()) {
// Add 0.5 and round toward -Infinity. The result is stored in the temp
@ -1894,6 +1904,7 @@ CodeGeneratorX86Shared::visitRoundF(LRoundF *lir)
// Branch to a slow path for non-positive inputs. Doesn't catch NaN.
masm.zeroFloat32(scratch);
masm.loadConstantFloat32(GetBiggestNumberLessThan(0.5f), temp);
masm.branchFloat(Assembler::DoubleLessThanOrEqual, input, scratch, &negativeOrZero);
// Input is non-negative. Add the biggest float less than 0.5 and truncate,
@ -1901,7 +1912,6 @@ CodeGeneratorX86Shared::visitRoundF(LRoundF *lir)
// adding 0.5 would undesirably round up to 1). Note that we have to add
// the input to the temp register because we're not allowed to modify the
// input register.
masm.loadConstantFloat32(GetBiggestNumberLessThan(0.5f), temp);
masm.addFloat32(input, temp);
bailoutCvttss2si(temp, output, lir->snapshot());
@ -1923,7 +1933,14 @@ CodeGeneratorX86Shared::visitRoundF(LRoundF *lir)
// Input is negative.
masm.bind(&negative);
// Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
// be added the biggest double less than 0.5.
Label loadJoin;
masm.loadConstantFloat32(-0.5f, scratch);
masm.branchFloat(Assembler::DoubleLessThan, input, scratch, &loadJoin);
masm.loadConstantFloat32(0.5f, temp);
masm.bind(&loadJoin);
if (AssemblerX86Shared::HasSSE41()) {
// Add 0.5 and round toward -Infinity. The result is stored in the temp

View File

@ -69,9 +69,11 @@ static MOZ_CONSTEXPR_VAR js::jit::FloatRegister double13(26, js::jit::FloatRegis
static MOZ_CONSTEXPR_VAR js::jit::FloatRegister double14(28, js::jit::FloatRegister::Double);
static MOZ_CONSTEXPR_VAR js::jit::FloatRegister double15(30, js::jit::FloatRegister::Double);
static JitCode *
linkAndAllocate(JSContext *cx, MacroAssembler *masm)
static js::jit::JitCode *
linkAndAllocate(JSContext *cx, js::jit::MacroAssembler *masm)
{
using namespace js;
using namespace js::jit;
AutoFlushICache afc("test");
Linker l(*masm);
return l.newCode<CanGC>(cx, ION_CODE);

View File

@ -361,6 +361,7 @@ Debugger::Debugger(JSContext *cx, NativeObject *dbg)
allocationSamplingProbability(1.0),
allocationsLogLength(0),
maxAllocationsLogLength(DEFAULT_MAX_ALLOCATIONS_LOG_LENGTH),
allocationsLogOverflowed(false),
frames(cx->runtime()),
scripts(cx),
sources(cx),
@ -1623,6 +1624,7 @@ Debugger::appendAllocationSite(JSContext *cx, HandleSavedFrame frame, int64_t wh
if (allocationsLogLength >= maxAllocationsLogLength) {
js_delete(allocationsLog.getFirst());
allocationsLogOverflowed = true;
} else {
allocationsLogLength++;
}

View File

@ -258,7 +258,10 @@ class Debugger : private mozilla::LinkedListElement<Debugger>
AllocationSiteList allocationsLog;
size_t allocationsLogLength;
size_t maxAllocationsLogLength;
bool allocationsLogOverflowed;
static const size_t DEFAULT_MAX_ALLOCATIONS_LOG_LENGTH = 5000;
bool appendAllocationSite(JSContext *cx, HandleSavedFrame frame, int64_t when);
void emptyAllocationsLog();

View File

@ -220,6 +220,7 @@ DebuggerMemory::drainAllocationsLog(JSContext *cx, unsigned argc, Value *vp)
js_delete(allocSite);
}
dbg->allocationsLogOverflowed = false;
dbg->allocationsLogLength = 0;
args.rval().setObject(*result);
return true;
@ -294,6 +295,14 @@ DebuggerMemory::setAllocationSamplingProbability(JSContext *cx, unsigned argc, V
return true;
}
/* static */ bool
DebuggerMemory::getAllocationsLogOverflowed(JSContext *cx, unsigned argc, Value *vp)
{
THIS_DEBUGGER_MEMORY(cx, argc, vp, "(get allocationsLogOverflowed)", args, memory);
args.rval().setBoolean(memory->getDebugger()->allocationsLogOverflowed);
return true;
}
/* Debugger.Memory.prototype.takeCensus */
@ -805,6 +814,7 @@ DebuggerMemory::takeCensus(JSContext *cx, unsigned argc, Value *vp)
JS_PSGS("trackingAllocationSites", getTrackingAllocationSites, setTrackingAllocationSites, 0),
JS_PSGS("maxAllocationsLogLength", getMaxAllocationsLogLength, setMaxAllocationsLogLength, 0),
JS_PSGS("allocationSamplingProbability", getAllocationSamplingProbability, setAllocationSamplingProbability, 0),
JS_PSG("allocationsLogOverflowed", getAllocationsLogOverflowed, 0),
JS_PS_END
};

View File

@ -38,10 +38,11 @@ class DebuggerMemory : public NativeObject {
// Accessor properties of Debugger.Memory.prototype.
static bool setTrackingAllocationSites(JSContext *cx, unsigned argc, Value *vp);
static bool getTrackingAllocationSites(JSContext *cx, unsigned argc, Value *vp);
static bool setMaxAllocationsLogLength(JSContext*cx, unsigned argc, Value *vp);
static bool getMaxAllocationsLogLength(JSContext*cx, unsigned argc, Value *vp);
static bool setAllocationSamplingProbability(JSContext*cx, unsigned argc, Value *vp);
static bool getAllocationSamplingProbability(JSContext*cx, unsigned argc, Value *vp);
static bool setMaxAllocationsLogLength(JSContext *cx, unsigned argc, Value *vp);
static bool getMaxAllocationsLogLength(JSContext *cx, unsigned argc, Value *vp);
static bool setAllocationSamplingProbability(JSContext *cx, unsigned argc, Value *vp);
static bool getAllocationSamplingProbability(JSContext *cx, unsigned argc, Value *vp);
static bool getAllocationsLogOverflowed(JSContext *cx, unsigned argc, Value *vp);
// Function properties of Debugger.Memory.prototype.
static bool takeCensus(JSContext *cx, unsigned argc, Value *vp);

View File

@ -9,5 +9,5 @@
== box-sizing-padding-box-002.xht box-sizing-padding-box-002-ref.xht
== box-sizing-padding-box-003.xht box-sizing-padding-box-003-ref.xht
random-if(Android) skip-if(B2G&&browserIsRemote) == box-sizing-replaced-001.xht box-sizing-replaced-001-ref.xht #bug 982547
fuzzy-if(Android,14,874) random-if(B2G&&browserIsRemote) == box-sizing-replaced-002.xht box-sizing-replaced-002-ref.xht # Bug 1128229
fuzzy-if(Android,27,874) random-if(B2G&&browserIsRemote) == box-sizing-replaced-002.xht box-sizing-replaced-002-ref.xht # Bug 1128229
fuzzy-if(Android,14,813) random-if(B2G&&browserIsRemote) == box-sizing-replaced-003.xht box-sizing-replaced-003-ref.xht # Bug 1128229

View File

@ -797,61 +797,72 @@ static void TimeToFrequency(float time_data[PART_LEN2],
}
#ifdef WEBRTC_AEC_DEBUG_DUMP
static void
OpenCoreDebugFiles(AecCore* aec,
int *instance_count)
{
int error = 0;
// XXX If this impacts performance (opening files here), move file open
// to Trace::set_aec_debug(), and just grab them here
if (AECDebug() && !aec->farFile) {
if (!aec->farFile) {
char path[1024];
char *filename;
path[0] = '\0';
AECDebugFilenameBase(path, sizeof(path));
filename = path + strlen(path);
if (&path[sizeof(path)] - filename < 128) {
return; // avoid a lot of snprintf's and checks lower
}
if (filename > path) {
#ifdef XP_WIN
if (*(filename-1) != '\\') {
*filename++ = '\\';
}
#else
if (*(filename-1) != '/') {
*filename++ = '/';
}
#endif
}
sprintf(filename, "aec_far%d.pcm", webrtc_aec_instance_count);
aec->farFile = fopen(path, "wb");
sprintf(filename, "aec_near%d.pcm", webrtc_aec_instance_count);
aec->nearFile = fopen(path, "wb");
sprintf(filename, "aec_out%d.pcm", webrtc_aec_instance_count);
aec->outFile = fopen(path, "wb");
sprintf(filename, "aec_out_linear%d.pcm", webrtc_aec_instance_count);
aec->outLinearFile = fopen(path, "wb");
aec->debugWritten = 0;
if (!aec->outLinearFile || !aec->outFile || !aec->nearFile || !aec->farFile) {
error = 1;
}
}
// Open a new Wav file for writing. If it was already open with a different
// sample frequency, close it first.
static void ReopenWav(rtc_WavWriter** wav_file,
const char* name,
int seq1,
int seq2,
int sample_rate) {
int written UNUSED;
char path[1024];
char *filename;
if (*wav_file) {
if (rtc_WavSampleRate(*wav_file) == sample_rate)
return;
rtc_WavClose(*wav_file);
*wav_file = NULL;
}
if (error ||
(!AECDebug() && aec->farFile)) {
AECDebugFilenameBase(path, sizeof(path));
filename = path + strlen(path);
if (filename > path) {
#ifdef XP_WIN
if (*(filename-1) != '\\') {
*filename++ = '\\';
}
#else
if (*(filename-1) != '/') {
*filename++ = '/';
}
#endif
}
written = snprintf(filename, sizeof(path) - (filename-path), "%s%d-%d.wav",
name, seq1, seq2);
assert(written >= 0); // no output error
assert(filename+written < path + sizeof(path)-1); // buffer was large enough
*wav_file = rtc_WavOpen(path, sample_rate, 1);
}
static void
OpenCoreDebugFiles(AecCore* aec, int *aec_instance_count)
{
if (AECDebug())
{
if (!aec->farFile)
{
int process_rate = aec->sampFreq > 16000 ? 16000 : aec->sampFreq;
ReopenWav(&aec->farFile, "aec_far",
aec->instance_index, aec->debug_dump_count, process_rate);
ReopenWav(&aec->nearFile, "aec_near",
aec->instance_index, aec->debug_dump_count, process_rate);
ReopenWav(&aec->outFile, "aec_out",
aec->instance_index, aec->debug_dump_count, process_rate);
ReopenWav(&aec->outLinearFile, "aec_out_linear",
aec->instance_index, aec->debug_dump_count, process_rate);
++aec->debug_dump_count;
}
} else {
if (aec->farFile) {
fclose(aec->farFile);
rtc_WavClose(aec->farFile);
}
if (aec->nearFile) {
fclose(aec->nearFile);
rtc_WavClose(aec->nearFile);
}
if (aec->outFile) {
fclose(aec->outFile);
rtc_WavClose(aec->outFile);
}
if (aec->outLinearFile) {
fclose(aec->outLinearFile);
rtc_WavClose(aec->outLinearFile);
}
aec->outLinearFile = aec->outFile = aec->nearFile = aec->farFile = NULL;
aec->debugWritten = 0;
@ -1288,11 +1299,11 @@ static void ProcessBlock(AecCore* aec) {
}
#ifdef WEBRTC_AEC_DEBUG_DUMP
OpenCoreDebugFiles(aec, &webrtc_aec_instance_count);
if (aec->outLinearFile) {
rtc_WavWriteSamples(aec->outLinearFile, e, PART_LEN);
rtc_WavWriteSamples(aec->outFile, output, PART_LEN);
}
OpenCoreDebugFiles(aec, &webrtc_aec_instance_count);
if (aec->outLinearFile) {
rtc_WavWriteSamples(aec->outLinearFile, e, PART_LEN);
rtc_WavWriteSamples(aec->outFile, output, PART_LEN);
}
#endif
}
@ -1432,29 +1443,6 @@ int WebRtcAec_FreeAec(AecCore* aec) {
return 0;
}
#ifdef WEBRTC_AEC_DEBUG_DUMP
// Open a new Wav file for writing. If it was already open with a different
// sample frequency, close it first.
static void ReopenWav(rtc_WavWriter** wav_file,
const char* name,
int seq1,
int seq2,
int sample_rate) {
int written UNUSED;
char filename[64];
if (*wav_file) {
if (rtc_WavSampleRate(*wav_file) == sample_rate)
return;
rtc_WavClose(*wav_file);
}
written = snprintf(filename, sizeof(filename), "%s%d-%d.wav",
name, seq1, seq2);
assert(written >= 0); // no output error
assert((size_t)written < sizeof(filename)); // buffer was large enough
*wav_file = rtc_WavOpen(filename, sample_rate, 1);
}
#endif // WEBRTC_AEC_DEBUG_DUMP
int WebRtcAec_InitAec(AecCore* aec, int sampFreq) {
int i;
@ -1495,18 +1483,8 @@ int WebRtcAec_InitAec(AecCore* aec, int sampFreq) {
if (WebRtc_InitBuffer(aec->far_time_buf) == -1) {
return -1;
}
{
int process_rate = sampFreq > 16000 ? 16000 : sampFreq;
ReopenWav(&aec->farFile, "aec_far",
aec->instance_index, aec->debug_dump_count, process_rate);
ReopenWav(&aec->nearFile, "aec_near",
aec->instance_index, aec->debug_dump_count, process_rate);
ReopenWav(&aec->outFile, "aec_out",
aec->instance_index, aec->debug_dump_count, process_rate);
ReopenWav(&aec->outLinearFile, "aec_out_linear",
aec->instance_index, aec->debug_dump_count, process_rate);
}
++aec->debug_dump_count;
aec->instance_index = webrtc_aec_instance_count;
OpenCoreDebugFiles(aec, &webrtc_aec_instance_count);
#endif
aec->system_delay = 0;

View File

@ -17,7 +17,7 @@ interface nsIAsyncVerifyRedirectCallback;
*
* These methods are called before onStartRequest.
*/
[scriptable, uuid(a430d870-df77-4502-9570-d46a8de33154)]
[scriptable, uuid(0197720d-37ed-4e75-8956-d0d296e4d8a6)]
interface nsIChannelEventSink : nsISupports
{
/**
@ -45,6 +45,13 @@ interface nsIChannelEventSink : nsISupports
*/
const unsigned long REDIRECT_INTERNAL = 1 << 2;
/**
* This is a special-cased redirect coming from hitting HSTS upgrade
* redirect from http to https only. In some cases this type of redirect
* may be considered as safe despite not being the-same-origin redirect.
*/
const unsigned long REDIRECT_STS_UPGRADE = 1 << 3;
/**
* Called when a redirect occurs. This may happen due to an HTTP 3xx status
* code. The purpose of this method is to notify the sink that a redirect

View File

@ -2421,6 +2421,56 @@ NS_IsInternalSameURIRedirect(nsIChannel *aOldChannel,
return NS_SUCCEEDED(oldURI->Equals(newURI, &res)) && res;
}
inline bool
NS_IsHSTSUpgradeRedirect(nsIChannel *aOldChannel,
nsIChannel *aNewChannel,
uint32_t aFlags)
{
if (!(aFlags & nsIChannelEventSink::REDIRECT_STS_UPGRADE)) {
return false;
}
nsCOMPtr<nsIURI> oldURI, newURI;
aOldChannel->GetURI(getter_AddRefs(oldURI));
aNewChannel->GetURI(getter_AddRefs(newURI));
if (!oldURI || !newURI) {
return false;
}
bool isHttp;
if (NS_FAILED(oldURI->SchemeIs("http", &isHttp)) || !isHttp) {
return false;
}
bool isHttps;
if (NS_FAILED(newURI->SchemeIs("https", &isHttps)) || !isHttps) {
return false;
}
nsCOMPtr<nsIURI> upgradedURI;
if (NS_FAILED(oldURI->Clone(getter_AddRefs(upgradedURI)))) {
return false;
}
if (NS_FAILED(upgradedURI->SetScheme(NS_LITERAL_CSTRING("https")))) {
return false;
}
int32_t oldPort = -1;
if (NS_FAILED(oldURI->GetPort(&oldPort))) {
return false;
}
if (oldPort == 80 || oldPort == -1) {
upgradedURI->SetPort(-1);
} else {
upgradedURI->SetPort(oldPort);
}
bool res;
return NS_SUCCEEDED(upgradedURI->Equals(newURI, &res)) && res;
}
inline nsresult
NS_LinkRedirectChannels(uint32_t channelId,
nsIParentChannel *parentChannel,

View File

@ -2612,7 +2612,7 @@ CacheFileIOManager::OverLimitEvictionInternal()
SHA1Sum::Hash hash;
uint32_t cnt;
static uint32_t consecutiveFailures = 0;
rv = CacheIndex::GetEntryForEviction(&hash, &cnt);
rv = CacheIndex::GetEntryForEviction(false, &hash, &cnt);
NS_ENSURE_SUCCESS(rv, rv);
rv = DoomFileByKeyInternal(&hash, true);
@ -3600,7 +3600,8 @@ CacheFileIOManager::OpenNSPRHandle(CacheFileHandle *aHandle, bool aCreate)
if (aCreate) {
rv = aHandle->mFile->OpenNSPRFileDesc(
PR_RDWR | PR_CREATE_FILE | PR_TRUNCATE, 0600, &aHandle->mFD);
if (rv == NS_ERROR_FILE_NO_DEVICE_SPACE) {
if (rv == NS_ERROR_FILE_ALREADY_EXISTS || // error from nsLocalFileWin
rv == NS_ERROR_FILE_NO_DEVICE_SPACE) { // error from nsLocalFileUnix
LOG(("CacheFileIOManager::OpenNSPRHandle() - Cannot create a new file, we"
" might reached a limit on FAT32. Will evict a single entry and try "
"again. [hash=%08x%08x%08x%08x%08x]", LOGSHA1(aHandle->Hash())));
@ -3608,7 +3609,7 @@ CacheFileIOManager::OpenNSPRHandle(CacheFileHandle *aHandle, bool aCreate)
SHA1Sum::Hash hash;
uint32_t cnt;
rv = CacheIndex::GetEntryForEviction(&hash, &cnt);
rv = CacheIndex::GetEntryForEviction(true, &hash, &cnt);
if (NS_SUCCEEDED(rv)) {
rv = DoomFileByKeyInternal(&hash, true);
}

View File

@ -1171,7 +1171,7 @@ CacheIndex::HasEntry(const nsACString &aKey, EntryStatus *_retval)
// static
nsresult
CacheIndex::GetEntryForEviction(SHA1Sum::Hash *aHash, uint32_t *aCnt)
CacheIndex::GetEntryForEviction(bool aIgnoreEmptyEntries, SHA1Sum::Hash *aHash, uint32_t *aCnt)
{
LOG(("CacheIndex::GetEntryForEviction()"));
@ -1204,11 +1204,17 @@ CacheIndex::GetEntryForEviction(SHA1Sum::Hash *aHash, uint32_t *aCnt)
if (index->mExpirationArray[i]->mExpirationTime < now) {
memcpy(&hash, &index->mExpirationArray[i]->mHash, sizeof(SHA1Sum::Hash));
if (!IsForcedValidEntry(&hash)) {
foundEntry = true;
break;
if (IsForcedValidEntry(&hash)) {
continue;
}
if (aIgnoreEmptyEntries &&
!CacheIndexEntry::GetFileSize(index->mExpirationArray[i])) {
continue;
}
foundEntry = true;
break;
} else {
// all further entries have not expired yet
break;
@ -1233,10 +1239,17 @@ CacheIndex::GetEntryForEviction(SHA1Sum::Hash *aHash, uint32_t *aCnt)
for (j = 0; j < index->mFrecencyArray.Length(); j++) {
memcpy(&hash, &index->mFrecencyArray[j]->mHash, sizeof(SHA1Sum::Hash));
if (!IsForcedValidEntry(&hash)) {
foundEntry = true;
break;
if (IsForcedValidEntry(&hash)) {
continue;
}
if (aIgnoreEmptyEntries &&
!CacheIndexEntry::GetFileSize(index->mFrecencyArray[j])) {
continue;
}
foundEntry = true;
break;
}
if (!foundEntry)

View File

@ -641,7 +641,7 @@ public:
// cache size is over limit and also returns a total number of all entries in
// the index minus the number of forced valid entries that we encounter
// when searching (see below)
static nsresult GetEntryForEviction(SHA1Sum::Hash *aHash, uint32_t *aCnt);
static nsresult GetEntryForEviction(bool aIgnoreEmptyEntries, SHA1Sum::Hash *aHash, uint32_t *aCnt);
// Checks if a cache entry is currently forced valid. Used to prevent an entry
// (that has been forced valid) from being evicted when the cache size reaches

View File

@ -16,6 +16,7 @@
#include "nsICachingChannel.h"
#include "nsIPrincipal.h"
#include "nsIScriptError.h"
#include "nsISeekableStream.h"
#include "nsITimedChannel.h"
#include "nsIEncodedChannel.h"
@ -1672,6 +1673,39 @@ HttpBaseChannel::AddSecurityMessage(const nsAString &aMessageTag,
message->SetTag(aMessageTag);
message->SetCategory(aMessageCategory);
mSecurityConsoleMessages.AppendElement(message);
nsCOMPtr<nsIConsoleService> console(do_GetService(NS_CONSOLESERVICE_CONTRACTID));
if (!console) {
return NS_ERROR_FAILURE;
}
nsCOMPtr<nsILoadInfo> loadInfo;
GetLoadInfo(getter_AddRefs(loadInfo));
if (!loadInfo) {
return NS_ERROR_FAILURE;
}
uint32_t innerWindowID = loadInfo->GetInnerWindowID();
nsXPIDLString errorText;
rv = nsContentUtils::GetLocalizedString(
nsContentUtils::eSECURITY_PROPERTIES,
NS_ConvertUTF16toUTF8(aMessageTag).get(),
errorText);
NS_ENSURE_SUCCESS(rv, rv);
nsAutoCString spec;
if (mURI) {
mURI->GetSpec(spec);
}
nsCOMPtr<nsIScriptError> error(do_CreateInstance(NS_SCRIPTERROR_CONTRACTID));
error->InitWithWindowID(errorText, NS_ConvertUTF8toUTF16(spec),
EmptyString(), 0, 0, nsIScriptError::warningFlag,
NS_ConvertUTF16toUTF8(aMessageCategory),
innerWindowID);
console->LogMessage(error);
return NS_OK;
}

View File

@ -1757,7 +1757,8 @@ nsHttpChannel::StartRedirectChannelToHttps()
upgradedURI->SetPort(oldPort);
return StartRedirectChannelToURI(upgradedURI,
nsIChannelEventSink::REDIRECT_PERMANENT);
nsIChannelEventSink::REDIRECT_PERMANENT |
nsIChannelEventSink::REDIRECT_STS_UPGRADE);
}
void

View File

@ -104,7 +104,7 @@ class HTMLFormatter(base.BaseFormatter):
if status != expected:
status_name = "UNEXPECTED_" + status
elif status != "PASS":
elif status not in ("PASS", "SKIP"):
status_name = "EXPECTED_" + status
self.test_count[status_name] += 1

View File

@ -0,0 +1,39 @@
Common ping format
==================
This defines the top-level structure of a Telemetry ping.
It contains basic information shared between different ping types, which enables proper storage and processing of the raw pings server-side.
It also contains optional further information:
* the :doc:`environment data <environment>`, which contains important info to correlate the measurements against
* the ``clientId``, a UUID identifying a profile and allowing user-oriented correlation of data
*Note:* Both are not submitted with all ping types due to privacy concerns. This and the data it that can be correlated against is inspected under the `data collection policy <https://wiki.mozilla.org/Firefox/Data_Collection>`_.
Finally, the structure also contains the `payload`, which is the specific data submitted for the respective *ping type*.
Structure::
{
type: <string>, // "main", "activation", "deletion", ...
id: <UUID>, // a UUID that identifies this ping
creationDate: <ISO date>, // the date the ping was generated
version: <number>, // the version of the ping format, currently 2
application: {
architecture: <string>, // build architecture, e.g. x86
buildId: <string>, // "20141126041045"
name: <string>, // "Firefox"
version: <string>, // "35.0"
vendor: <string>, // "Mozilla"
platformVersion: <string>, // "35.0"
xpcomAbi: <string>, // e.g. "x86-msvc"
channel: <string>, // "beta"
},
clientId: <UUID>, // optional
environment: { ... }, // optional, not all pings contain the environment
payload: { ... }, // the actual payload data for this ping type
}

View File

@ -0,0 +1,180 @@
Environment
===========
The environment consists of data that is expected to be characteristic for performance and other behavior and not expected to change too often.
Changes to most of these data points are detected (where possible and sensible) and will lead to a session split in the :doc:`main-ping`.
The environment data may also be submitted by other ping types.
*Note:* This is not submitted with all ping types due to privacy concerns. This and other data is inspected under the `data collection policy <https://wiki.mozilla.org/Firefox/Data_Collection>`_.
Structure::
{
build: {
applicationId: <string>, // nsIXULAppInfo.ID
architecture: <string>, // e.g. "x86", build architecture for the active build
architecturesInBinary: <string>, // e.g. "i386-x86_64", from nsIMacUtils.architecturesInBinary, only present for mac universal builds
buildId: <string>, // e.g. "20141126041045"
version: <string>, // e.g. "35.0"
vendor: <string>, // e.g. "Mozilla"
platformVersion: <string>, // e.g. "35.0"
xpcomAbi: <string>, // e.g. "x86-msvc"
hotfixVersion: <string>, // e.g. "20141211.01"
},
settings: {
blocklistEnabled: <bool>, // false on failure
isDefaultBrowser: <bool>, // null on failure
e10sEnabled: <bool>, // false on failure
telemetryEnabled: <bool>, // false on failure
locale: <string>, // e.g. "it", null on failure
update: {
channel: <string>, // e.g. "release", null on failure
enabled: <bool>, // false on failure
autoDownload: <bool>, // false on failure
},
userPrefs: {
// Two possible behaviours: values of the whitelisted prefs, or for some prefs we
// only record they are present with value being set to null.
},
},
profile: {
creationDate: <integer>, // integer days since UNIX epoch, e.g. 16446
resetDate: <integer>, // integer days since UNIX epoch, e.g. 16446 - optional
},
partner: {
distributionId: <string>, // pref "distribution.id", null on failure
distributionVersion: <string>, // pref "distribution.version", null on failure
partnerId: <string>, // pref mozilla.partner.id, null on failure
distributor: <string>, // pref app.distributor, null on failure
distributorChannel: <string>, // pref app.distributor.channel, null on failure
partnerNames: [
// list from prefs app.partner.<name>=<name>
],
},
system: {
memoryMB: <number>,
isWow64: <bool>, // windows-only
cpu: {
count: <number>, // e.g. 8, or null on failure
vendor: <string>, // e.g. "GenuineIntel", or null on failure
family: <string>, // null on failure
model: <string>, // null on failure
stepping: <string>, // null on failure
extensions: [
<string>,
...
// as applicable:
// "MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE4A", "SSE4_1",
// "SSE4_2", "EDSP", "ARMv6", "ARMv7", "NEON"
],
},
device: { // This section is only available on mobile devices.
model: <string>, // the "device" from FHR, null on failure
manufacturer: <string>, // null on failure
hardware: <string>, // null on failure
isTablet: <bool>, // null on failure
},
os: {
name: <string>, // "Windows_NT" or null on failure
version: <string>, // e.g. "6.1", null on failure
kernelVersion: <string>, // android/b2g only or null on failure
servicePackMajor: <number>, // windows only or null on failure
servicePackMinor: <number>, // windows only or null on failure
locale: <string>, // "en" or null on failure
},
hdd: {
profile: { // hdd where the profile folder is located
model: <string>, // null on failure
revision: <string>, // null on failure
},
binary: { // hdd where the application binary is located
model: <string>, // null on failure
revision: <string>, // null on failure
},
system: { // hdd where the system files are located
model: <string>, // null on failure
revision: <string>, // null on failure
},
},
gfx: {
D2DEnabled: <bool>, // null on failure
DWriteEnabled: <bool>, // null on failure
DWriteVersion: <string>, // null on failure
adapters: [
{
description: <string>, // e.g. "Intel(R) HD Graphics 4600", null on failure
vendorID: <string>, // null on failure
deviceID: <string>, // null on failure
subsysID: <string>, // null on failure
RAM: <number>, // in MB, null on failure
driver: <string>, // null on failure
driverVersion: <string>, // null on failure
driverDate: <string>, // null on failure
GPUActive: <bool>, // currently always true for the first adapter
},
...
],
},
},
addons: {
activeAddons: { // the currently enabled addons
<addon id>: {
blocklisted: <bool>,
description: <string>,
name: <string>,
userDisabled: <bool>,
appDisabled: <bool>,
version: <string>,
scope: <integer>,
type: <string>, // "extension", "service", ...
foreignInstall: <bool>,
hasBinaryComponents: <bool>
installDay: <number>, // days since UNIX epoch
updateDay: <number>, // days since UNIX epoch
},
...
},
theme: { // the active theme
id: <string>,
blocklisted: <bool>,
description: <string>,
name: <string>,
userDisabled: <bool>,
appDisabled: <bool>,
version: <string>,
scope: <integer>,
foreignInstall: <bool>,
hasBinaryComponents: <bool>
installDay: <number>, // days since UNIX epoch
updateDay: <number>, // days since UNIX epoch
},
activePlugins: [
{
name: <string>,
version: <string>,
description: <string>,
blocklisted: <bool>,
disabled: <bool>,
clicktoplay: <bool>,
mimeTypes: [<string>, ...],
updateDay: <number>, // days since UNIX epoch
},
...
],
activeGMPlugins: {
<gmp id>: {
version: <string>,
userDisabled: <bool>,
applyBackgroundUpdates: <bool>,
},
...
],
activeExperiment: { // section is empty if there's no active experiment
id: <string>, // id
branch: <string>, // branch name
},
persona: <string>, // id of the current persona, null on GONK
},
}

View File

@ -0,0 +1,21 @@
=========
Telemetry
=========
Telemetry is a feature that allows data collection. This is being used to collect performance metrics and other information about how Firefox performs in the wild.
Client-side, this consists of:
* data collection in `Histograms <https://developer.mozilla.org/en-US/docs/Mozilla/Performance/Adding_a_new_Telemetry_probe>`_ and other data structures
* assembling :doc:`pings` with the general information and the data payload
* sending them to the server and local ping retention
*Note:* the `data collection policy <https://wiki.mozilla.org/Firefox/Data_Collection>`_ documents the process and requirements that are applied here.
.. toctree::
:maxdepth: 2
pings
common-ping
environment
main-ping

View File

@ -0,0 +1,56 @@
"main" ping
===========
This is the "main" Telemetry ping type, whose payload contains most of the measurements that are used to track the performance and health of Firefox in the wild.
It includes the histograms and other performance and diagnostic data.
This ping is triggered by different scenarios, which is documented by the ``reason`` field:
* ``environment-change`` - the :doc:`environment` changed, so the session measurements got reset and a new subsession starts
* ``shutdown`` - triggered when the browser session ends
* ``daily`` - a session split triggered in 24h hour intervals at local midnight
* ``saved-session`` - the *"classic"* Telemetry payload with measurements covering the whole browser session (only submitted for a transition period)
Most reasons lead to a session split, initiating a new *subsession*. We reset important measurements for those subsessions.
Structure::
{
version: 4,
info: {
reason: <string>, // what triggered this ping: "saved-session", "environment-change", "shutdown", ...
revision: <string>, // the Histograms.json revision
timezoneOffset: <number>, // time-zone offset from UTC, in minutes, for the current locale
previousBuildId: <string>,
sessionId: <uuid>, // random session id, shared by subsessions
subsessionId: <uuid>, // random subsession id
previousSubsessionId: <uuid>, // subsession id of the previous subsession (even if it was in a different session),
// null on first run.
subsessionCounter: <number>, // the running no. of this subsession since the start of the browser session
profileSubsessionCounter: <number>, // the running no. of all subsessions for the whole profile life time
sessionStartDate: <ISO date>, // daily precision
subsessionStartDate: <ISO date>, // daily precision
subsessionLength: <number>, // the subsession length in seconds
},
childPayloads: {...}, // only present with e10s; a reduced payload from content processes
simpleMeasurements: { ... },
histograms: {},
keyedHistograms: {},
chromeHangs: {},
threadHangStats: {},
log: [],
fileIOReports: {...},
lateWrites: {...},
addonDetails: { ... },
addonHistograms: {...},
UIMeasurements: {...},
slowSQL: {...},
slowSQLstartup: {...},
}

View File

@ -0,0 +1,28 @@
.. _telemetry_pings:
=====================
Telemetry pings
=====================
A *Telemetry ping* is the data that we send to Mozillas Telemetry servers.
That data is stored as a JSON object client-side and contains common information to all pings and a payload specific to a certain *ping types*.
The top-level structure is defined by the :doc:`common-ping` format.
It contains some basic information shared between different ping types, the :doc:`environment` data (optional) and the data specific to the *ping type*, the *payload*.
Submission
==========
Pings are submitted via a common API on ``TelemetryPing``. It allows callers to choose a custom retention period that determines how long pings are kept on disk if submission wasn't successful.
If a ping failed to submit (e.g. because of missing internet connection), Telemetry will retry to submit it until its retention period is up.
*Note:* the :doc:`main pings <main-ping>` are kept locally even after successful submission to enable the HealthReport and SelfSupport features. They will be deleted after their retention period of 180 days.
Ping types
==========
* :doc:`main <main-ping>` - contains the information collected by Telemetry (Histograms, hang stacks, ...)
* ``activation`` - *planned* - sent right after installation or profile creation
* ``upgrade`` - *planned* - sent right after an upgrade
* ``deletion`` - *planned* - on opt-out we may have to tell the server to delete user data

View File

@ -60,3 +60,4 @@ LOCAL_INCLUDES += [
'/xpcom/threads',
]
SPHINX_TREES['telemetry'] = 'docs'

View File

@ -115,8 +115,12 @@ static bool gAppShellMethodsSwizzled = false;
if (expiration) {
mozilla::HangMonitor::Suspend();
}
return [super nextEventMatchingMask:mask
untilDate:expiration inMode:mode dequeue:flag];
NSEvent* nextEvent = [super nextEventMatchingMask:mask
untilDate:expiration inMode:mode dequeue:flag];
if (expiration) {
mozilla::HangMonitor::NotifyActivity();
}
return nextEvent;
}
@end

View File

@ -51,15 +51,15 @@ NESTED(_NS_InvokeByIndex, FRAMESZ, ra)
# assume full size of 16 bytes per param to be safe
sll v0, 4 # 16 bytes * num params
subu sp, sp, v0 # make room
PTR_SUBU sp, sp, v0 # make room
move a0, sp # a0 - param stack address
# create temporary stack space to write int and fp regs
subu sp, 64 # 64 = 8 regs of 8 bytes
PTR_SUBU sp, 64 # 64 = 8 regs of 8 bytes
move a3, sp
# save the old sp and save the arg stack
subu sp, sp, 16
PTR_SUBU sp, sp, 16
REG_S t0, 0(sp)
REG_S a0, 8(sp)
@ -78,12 +78,12 @@ NESTED(_NS_InvokeByIndex, FRAMESZ, ra)
# calculate the function we need to jump to,
# which must then be saved in t9
lw t9, 0(a0)
addu t9, t9, t1
lw t9, (t9)
PTR_L t9, 0(a0)
PTR_ADDU t9, t9, t1
PTR_L t9, (t9)
# get register save area from invoke_copy_to_stack
subu t1, t3, 64
PTR_SUBU t1, t3, 64
# a1..a7 and f13..f19 should now be set to what
# invoke_copy_to_stack told us. skip a0 and f12

View File

@ -7,8 +7,8 @@
#include "xptcprivate.h"
#if (_MIPS_SIM != _ABIN32)
#error "This code is for MIPS N32 only"
#if (_MIPS_SIM != _ABIN32) && (_MIPS_SIM != _ABI64)
#error "This code is for MIPS n32/n64 only"
#endif
extern "C" uint32_t

View File

@ -88,15 +88,15 @@ sharedstub:
# a2 is stack address where extra function params
# are stored that do not fit in registers
move a2, sp
addi a2, FRAMESZ
PTR_ADDI a2, FRAMESZ
# a3 is stack address of a1..a7
move a3, sp
addi a3, A1OFF
PTR_ADDI a3, A1OFF
# a4 is stack address of f13..f19
move a4, sp
addi a4, F13OFF
PTR_ADDI a4, F13OFF
# PrepareAndDispatch(that, methodIndex, args, gprArgs, fpArgs)
# a0 a1 a2 a3 a4

View File

@ -6,12 +6,12 @@
#include "xptcprivate.h"
#include "xptiprivate.h"
#if (_MIPS_SIM != _ABIN32)
#error "This code is for MIPS N32 only"
#if (_MIPS_SIM != _ABIN32) && (_MIPS_SIM != _ABI64)
#error "This code is for MIPS n32/n64 only"
#endif
/*
* This is for MIPS N32 ABI
* This is for MIPS n32/n64 ABI
*
* When we're called, the "gp" registers are stored in gprData and
* the "fp" registers are stored in fprData. There are 8 regs