Bug 1218597 - Limit the number of stack frames serialized in core dumps; r=froydnj

This commit is contained in:
Nick Fitzgerald 2015-10-27 14:13:39 -07:00
parent e090929fbf
commit 7ef7888689
5 changed files with 94 additions and 5 deletions

View File

@ -110,7 +110,7 @@ message Node {
bytes typeName = 2;
uint64 typeNameRef = 3;
}
optional uint64 size = 4;
repeated Edge edges = 5;
optional StackFrame allocationStack = 6;

View File

@ -104,6 +104,15 @@ parseMessage(ZeroCopyInputStream& stream, MessageType& message)
// 64MB limit is applied per-message rather than to the whole stream.
CodedInputStream codedStream(&stream);
// The protobuf message nesting that core dumps exhibit is dominated by
// allocation stacks' frames. In the most deeply nested case, each frame has
// two messages: a StackFrame message and a StackFrame::Data message. These
// frames are on top of a small constant of other messages. There are a
// MAX_STACK_DEPTH number of frames, so we multiply this by 3 to make room for
// the two messages per frame plus some head room for the constant number of
// non-dominating messages.
codedStream.SetRecursionLimit(HeapSnapshot::MAX_STACK_DEPTH * 3);
// Because protobuf messages aren't self-delimiting, we serialize each message
// preceeded by its size in bytes. When deserializing, we read this size and
// then limit reading from the stream to the given byte size. If we didn't,
@ -115,7 +124,8 @@ parseMessage(ZeroCopyInputStream& stream, MessageType& message)
auto limit = codedStream.PushLimit(size);
if (NS_WARN_IF(!message.ParseFromCodedStream(&codedStream)) ||
NS_WARN_IF(!codedStream.ConsumedEntireMessage()))
NS_WARN_IF(!codedStream.ConsumedEntireMessage()) ||
NS_WARN_IF(codedStream.BytesUntilLimit() != 0))
{
return false;
}
@ -909,7 +919,8 @@ class MOZ_STACK_CLASS StreamWriter : public CoreDumpWriter
return true;
}
protobuf::StackFrame* getProtobufStackFrame(JS::ubi::StackFrame& frame) {
protobuf::StackFrame* getProtobufStackFrame(JS::ubi::StackFrame& frame,
size_t depth = 1) {
// NB: de-duplicated string properties must be written in the same order
// here as they are read in `HeapSnapshot::saveStackFrame` or else indices
// in references to already serialized strings will be off.
@ -957,8 +968,8 @@ class MOZ_STACK_CLASS StreamWriter : public CoreDumpWriter
}
auto parent = frame.parent();
if (parent) {
auto protobufParent = getProtobufStackFrame(parent);
if (parent && depth < HeapSnapshot::MAX_STACK_DEPTH) {
auto protobufParent = getProtobufStackFrame(parent, depth + 1);
if (!protobufParent)
return nullptr;
data->set_allocated_parent(protobufParent);

View File

@ -71,6 +71,13 @@ class HeapSnapshot final : public nsISupports
bool saveStackFrame(const protobuf::StackFrame& frame,
StackFrameId& outFrameId);
public:
// The maximum number of stack frames that we will serialize into a core
// dump. This helps prevent over-recursion in the protobuf library when
// deserializing stacks.
static const size_t MAX_STACK_DEPTH = 60;
private:
// If present, a timestamp in the same units that `PR_Now` gives.
Maybe<uint64_t> timestamp;

View File

@ -0,0 +1,70 @@
/* Any copyright is dedicated to the Public Domain.
http://creativecommons.org/publicdomain/zero/1.0/ */
// Test that we can save a core dump with very deep allocation stacks and read
// it back into a HeapSnapshot.
function stackDepth(stack) {
return stack ? 1 + stackDepth(stack.parent) : 0;
}
function run_test() {
// Create a Debugger observing a debuggee's allocations.
const debuggee = new Cu.Sandbox(null);
const dbg = new Debugger(debuggee);
dbg.memory.trackingAllocationSites = true;
// Allocate some objects in the debuggee that will have their allocation
// stacks recorded by the Debugger.
debuggee.eval("this.objects = []");
debuggee.eval(
(function recursiveAllocate(n) {
if (n <= 0)
return;
// Make sure to recurse before pushing the object so that when TCO is
// implemented sometime in the future, it doesn't invalidate this test.
recursiveAllocate(n - 1);
this.objects.push({});
}).toString()
);
debuggee.eval("recursiveAllocate = recursiveAllocate.bind(this);");
debuggee.eval("recursiveAllocate(200);");
// Now save a snapshot that will include the allocation stacks and read it
// back again.
const filePath = ChromeUtils.saveHeapSnapshot({ runtime: true });
ok(true, "Should be able to save a snapshot.");
const snapshot = ChromeUtils.readHeapSnapshot(filePath);
ok(snapshot, "Should be able to read a heap snapshot");
ok(snapshot instanceof HeapSnapshot, "Should be an instanceof HeapSnapshot");
const report = snapshot.takeCensus({
breakdown: { by: "allocationStack",
then: { by: "count", bytes: true, count: true },
noStack: { by: "count", bytes: true, count: true }
}
});
// Keep this synchronized with `HeapSnapshot::MAX_STACK_DEPTH`!
const MAX_STACK_DEPTH = 60;
let foundStacks = false;
report.forEach((v, k) => {
if (k === "noStack") {
return;
}
foundStacks = true;
const depth = stackDepth(k);
dumpn("Stack depth is " + depth);
ok(depth <= MAX_STACK_DEPTH,
"Every stack should have depth less than or equal to the maximum stack depth");
});
ok(foundStacks);
do_test_finished();
}

View File

@ -34,6 +34,7 @@ support-files =
[test_HeapAnalyses_takeCensus_06.js]
[test_HeapAnalyses_takeCensus_07.js]
[test_HeapSnapshot_creationTime_01.js]
[test_HeapSnapshot_deepStack_01.js]
[test_HeapSnapshot_takeCensus_01.js]
[test_HeapSnapshot_takeCensus_02.js]
[test_HeapSnapshot_takeCensus_03.js]