Imported Upstream version 6.10.0.49

Former-commit-id: 1d6753294b2993e1fbf92de9366bb9544db4189b
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2020-01-16 16:38:04 +00:00
parent d94e79959b
commit 468663ddbb
48518 changed files with 2789335 additions and 61176 deletions

View File

@@ -0,0 +1,5 @@
LEVEL = ../../make
CXX_SOURCES := main.cpp
include $(LEVEL)/Makefile.rules

View File

@@ -0,0 +1,73 @@
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbbench import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestBenchmarkContinue(BenchBase):
mydir = TestBase.compute_mydir(__file__)
@benchmarks_test
def test_run_command(self):
"""Benchmark different ways to continue a process"""
self.build()
self.data_formatter_commands()
def setUp(self):
# Call super's setUp().
BenchBase.setUp(self)
def data_formatter_commands(self):
"""Benchmark different ways to continue a process"""
self.runCmd("file a.out", CURRENT_EXECUTABLE_SET)
bkpt = self.target().FindBreakpointByID(
lldbutil.run_break_set_by_source_regexp(
self, "// break here"))
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type filter clear', check=False)
self.runCmd('type synth clear', check=False)
self.runCmd(
"settings set target.max-children-count 256",
check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
runCmd_sw = Stopwatch()
lldbutil_sw = Stopwatch()
for i in range(0, 15):
runCmd_sw.start()
self.runCmd("continue")
runCmd_sw.stop()
for i in range(0, 15):
lldbutil_sw.start()
lldbutil.continue_to_breakpoint(self.process(), bkpt)
lldbutil_sw.stop()
print("runCmd: %s\nlldbutil: %s" % (runCmd_sw, lldbutil_sw))

View File

@@ -0,0 +1,36 @@
#include <map>
#define intint_map std::map<int, int>
int g_the_foo = 0;
int thefoo_rw(int arg = 1)
{
if (arg < 0)
arg = 0;
if (!arg)
arg = 1;
g_the_foo += arg;
return g_the_foo;
}
int main()
{
intint_map ii;
for (int i = 0; i < 15; i++)
{
ii[i] = i + 1;
thefoo_rw(i); // break here
}
ii.clear();
for (int j = 0; j < 15; j++)
{
ii[j] = j + 1;
thefoo_rw(j); // break here
}
return 0;
}

View File

@@ -0,0 +1,165 @@
"""Disassemble lldb's Driver::MainLoop() functions comparing lldb against gdb."""
from __future__ import print_function
import os
import sys
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbbench import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
def is_exe(fpath):
"""Returns true if fpath is an executable."""
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
class DisassembleDriverMainLoop(BenchBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
"""
Note that lldbtest_config.lldbExec can be specified with the LLDB_EXEC env variable (see
dotest.py), and gdbExec can be specified with the GDB_EXEC env variable.
This provides a flexibility in specifying different versions of gdb for
comparison purposes.
"""
BenchBase.setUp(self)
# If env var GDB_EXEC is specified, use it; otherwise, use gdb in your
# PATH env var.
if "GDB_EXEC" in os.environ and is_exe(os.environ["GDB_EXEC"]):
self.gdbExec = os.environ["GDB_EXEC"]
else:
self.gdbExec = "gdb"
self.exe = lldbtest_config.lldbExec
self.function = 'Driver::MainLoop()'
self.lldb_avg = None
self.gdb_avg = None
self.count = 5
@benchmarks_test
@no_debug_info_test
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")
def test_run_lldb_then_gdb(self):
"""Test disassembly on a large function with lldb vs. gdb."""
print()
print("lldb path: %s" % lldbtest_config.lldbExec)
print("gdb path: %s" % self.gdbExec)
print()
self.run_lldb_disassembly(self.exe, self.function, self.count)
print("lldb benchmark:", self.stopwatch)
self.run_gdb_disassembly(self.exe, self.function, self.count)
print("gdb benchmark:", self.stopwatch)
print("lldb_avg/gdb_avg: %f" % (self.lldb_avg / self.gdb_avg))
@benchmarks_test
@no_debug_info_test
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")
def test_run_gdb_then_lldb(self):
"""Test disassembly on a large function with lldb vs. gdb."""
print()
print("lldb path: %s" % lldbtest_config.lldbExec)
print("gdb path: %s" % self.gdbExec)
print()
self.run_gdb_disassembly(self.exe, self.function, self.count)
print("gdb benchmark:", self.stopwatch)
self.run_lldb_disassembly(self.exe, self.function, self.count)
print("lldb benchmark:", self.stopwatch)
print("lldb_avg/gdb_avg: %f" % (self.lldb_avg / self.gdb_avg))
def run_lldb_disassembly(self, exe, function, count):
import pexpect
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# So that the child gets torn down after the test.
self.child = pexpect.spawn(
'%s %s %s' %
(lldbtest_config.lldbExec, self.lldbOption, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline('breakpoint set -F %s' % function)
child.expect_exact(prompt)
child.sendline('run')
child.expect_exact(prompt)
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
with self.stopwatch:
# Disassemble the function.
child.sendline('disassemble -f')
child.expect_exact(prompt)
child.sendline('next')
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.lldb_avg = self.stopwatch.avg()
if self.TraceOn():
print("lldb disassembly benchmark:", str(self.stopwatch))
self.child = None
def run_gdb_disassembly(self, exe, function, count):
import pexpect
# Set self.child_prompt, which is "(gdb) ".
self.child_prompt = '(gdb) '
prompt = self.child_prompt
# So that the child gets torn down after the test.
self.child = pexpect.spawn('%s --nx %s' % (self.gdbExec, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline('break %s' % function)
child.expect_exact(prompt)
child.sendline('run')
child.expect_exact(prompt)
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
with self.stopwatch:
# Disassemble the function.
child.sendline('disassemble')
child.expect_exact(prompt)
child.sendline('next')
child.expect_exact(prompt)
child.sendline('quit')
child.expect_exact('The program is running. Exit anyway?')
child.sendline('y')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.gdb_avg = self.stopwatch.avg()
if self.TraceOn():
print("gdb disassembly benchmark:", str(self.stopwatch))
self.child = None

View File

@@ -0,0 +1,70 @@
"""Test lldb's disassemblt speed. This bench deliberately attaches to an lldb
inferior and traverses the stack for thread0 to arrive at frame with function
'MainLoop'. It is important to specify an lldb executable as the inferior."""
from __future__ import print_function
import os
import sys
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbbench import *
from lldbsuite.test.lldbtest import *
class AttachThenDisassemblyBench(BenchBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
BenchBase.setUp(self)
self.exe = lldbtest_config.lldbExec
self.count = 10
@benchmarks_test
@no_debug_info_test
def test_attach_then_disassembly(self):
"""Attach to a spawned lldb process then run disassembly benchmarks."""
print()
self.run_lldb_attach_then_disassembly(self.exe, self.count)
print("lldb disassembly benchmark:", self.stopwatch)
def run_lldb_attach_then_disassembly(self, exe, count):
target = self.dbg.CreateTarget(exe)
# Spawn a new process and don't display the stdout if not in TraceOn()
# mode.
import subprocess
popen = subprocess.Popen([exe, self.lldbOption], stdout=open(
os.devnull, 'w') if not self.TraceOn() else None)
if self.TraceOn():
print("pid of spawned process: %d" % popen.pid)
# Attach to the launched lldb process.
listener = lldb.SBListener("my.attach.listener")
error = lldb.SBError()
process = target.AttachToProcessWithID(listener, popen.pid, error)
# Set thread0 as the selected thread, followed by the 'MainLoop' frame
# as the selected frame. Then do disassembly on the function.
thread0 = process.GetThreadAtIndex(0)
process.SetSelectedThread(thread0)
i = 0
found = False
for f in thread0:
# print("frame#%d %s" % (i, f.GetFunctionName()))
if "MainLoop" in f.GetFunctionName():
found = True
thread0.SetSelectedFrame(i)
if self.TraceOn():
print("Found frame#%d for function 'MainLoop'" % i)
break
i += 1
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
with self.stopwatch:
# Disassemble the function.
self.runCmd("disassemble -f")

View File

@@ -0,0 +1,120 @@
"""Disassemble lldb's Driver::MainLoop() functions comparing Xcode 4.1 vs. 4.2's gdb."""
from __future__ import print_function
import os
import sys
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbbench import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import configuration
from lldbsuite.test import lldbutil
class XCode41Vs42GDBDisassembly(BenchBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
BenchBase.setUp(self)
self.gdb_41_exe = '/Xcode41/usr/bin/gdb'
self.gdb_42_exe = '/Developer/usr/bin/gdb'
self.exe = lldbtest_config.lldbExec
self.function = 'Driver::MainLoop()'
self.gdb_41_avg = None
self.gdb_42_avg = None
self.count = 5
@benchmarks_test
@no_debug_info_test
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")
def test_run_41_then_42(self):
"""Test disassembly on a large function with 4.1 vs. 4.2's gdb."""
print()
self.run_gdb_disassembly(
self.gdb_41_exe,
self.exe,
self.function,
self.count)
print("4.1 gdb benchmark:", self.stopwatch)
self.gdb_41_avg = self.stopwatch.avg()
self.run_gdb_disassembly(
self.gdb_42_exe,
self.exe,
self.function,
self.count)
print("4.2 gdb benchmark:", self.stopwatch)
self.gdb_42_avg = self.stopwatch.avg()
print("gdb_42_avg/gdb_41_avg: %f" %
(self.gdb_42_avg / self.gdb_41_avg))
@benchmarks_test
@no_debug_info_test
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")
def test_run_42_then_41(self):
"""Test disassembly on a large function with 4.1 vs. 4.2's gdb."""
print()
self.run_gdb_disassembly(
self.gdb_42_exe,
self.exe,
self.function,
self.count)
print("4.2 gdb benchmark:", self.stopwatch)
self.gdb_42_avg = self.stopwatch.avg()
self.run_gdb_disassembly(
self.gdb_41_exe,
self.exe,
self.function,
self.count)
print("4.1 gdb benchmark:", self.stopwatch)
self.gdb_41_avg = self.stopwatch.avg()
print("gdb_42_avg/gdb_41_avg: %f" %
(self.gdb_42_avg / self.gdb_41_avg))
def run_gdb_disassembly(self, gdb_exe_path, exe, function, count):
import pexpect
# Set self.child_prompt, which is "(gdb) ".
self.child_prompt = '(gdb) '
prompt = self.child_prompt
# So that the child gets torn down after the test.
self.child = pexpect.spawn('%s --nx %s' % (gdb_exe_path, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline('break %s' % function)
child.expect_exact(prompt)
child.sendline('run')
child.expect_exact(prompt)
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
with self.stopwatch:
# Disassemble the function.
child.sendline('disassemble')
child.expect_exact(prompt)
child.sendline('next')
child.expect_exact(prompt)
child.sendline('quit')
child.expect_exact('The program is running. Exit anyway?')
child.sendline('y')
try:
self.child.expect(pexpect.EOF)
except:
pass
if self.TraceOn():
print("gdb disassembly benchmark:", str(self.stopwatch))
self.child = None

View File

@@ -0,0 +1,5 @@
LEVEL = ../../make
CXX_SOURCES := main.cpp
include $(LEVEL)/Makefile.rules

View File

@@ -0,0 +1,83 @@
"""Test lldb's expression evaluations and collect statistics."""
from __future__ import print_function
import os
import sys
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbbench import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import configuration
from lldbsuite.test import lldbutil
class ExpressionEvaluationCase(BenchBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
BenchBase.setUp(self)
self.source = 'main.cpp'
self.line_to_break = line_number(
self.source, '// Set breakpoint here.')
self.count = 25
@benchmarks_test
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")
def test_expr_cmd(self):
"""Test lldb's expression commands and collect statistics."""
self.build()
self.exe_name = 'a.out'
print()
self.run_lldb_repeated_exprs(self.exe_name, self.count)
print("lldb expr cmd benchmark:", self.stopwatch)
def run_lldb_repeated_exprs(self, exe_name, count):
import pexpect
exe = os.path.join(os.getcwd(), exe_name)
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
# So that the child gets torn down after the test.
self.child = pexpect.spawn(
'%s %s %s' %
(lldbtest_config.lldbExec, self.lldbOption, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline(
'breakpoint set -f %s -l %d' %
(self.source, self.line_to_break))
child.expect_exact(prompt)
child.sendline('run')
child.expect_exact(prompt)
expr_cmd1 = 'expr ptr[j]->point.x'
expr_cmd2 = 'expr ptr[j]->point.y'
with self.stopwatch:
child.sendline(expr_cmd1)
child.expect_exact(prompt)
child.sendline(expr_cmd2)
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.child = None

View File

@@ -0,0 +1,141 @@
"""Test evaluating expressions repeatedly comparing lldb against gdb."""
from __future__ import print_function
import os
import sys
import lldb
from lldbsuite.test.lldbbench import BenchBase
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import configuration
from lldbsuite.test import lldbutil
class RepeatedExprsCase(BenchBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
BenchBase.setUp(self)
self.source = 'main.cpp'
self.line_to_break = line_number(
self.source, '// Set breakpoint here.')
self.lldb_avg = None
self.gdb_avg = None
self.count = 100
@benchmarks_test
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")
def test_compare_lldb_to_gdb(self):
"""Test repeated expressions with lldb vs. gdb."""
self.build()
self.exe_name = 'a.out'
print()
self.run_lldb_repeated_exprs(self.exe_name, self.count)
print("lldb benchmark:", self.stopwatch)
self.run_gdb_repeated_exprs(self.exe_name, self.count)
print("gdb benchmark:", self.stopwatch)
print("lldb_avg/gdb_avg: %f" % (self.lldb_avg / self.gdb_avg))
def run_lldb_repeated_exprs(self, exe_name, count):
import pexpect
exe = os.path.join(os.getcwd(), exe_name)
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# So that the child gets torn down after the test.
self.child = pexpect.spawn(
'%s %s %s' %
(lldbtest_config.lldbExec, self.lldbOption, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline(
'breakpoint set -f %s -l %d' %
(self.source, self.line_to_break))
child.expect_exact(prompt)
child.sendline('run')
child.expect_exact(prompt)
expr_cmd1 = 'expr ptr[j]->point.x'
expr_cmd2 = 'expr ptr[j]->point.y'
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
with self.stopwatch:
child.sendline(expr_cmd1)
child.expect_exact(prompt)
child.sendline(expr_cmd2)
child.expect_exact(prompt)
child.sendline('process continue')
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.lldb_avg = self.stopwatch.avg()
if self.TraceOn():
print("lldb expression benchmark:", str(self.stopwatch))
self.child = None
def run_gdb_repeated_exprs(self, exe_name, count):
import pexpect
exe = os.path.join(os.getcwd(), exe_name)
# Set self.child_prompt, which is "(gdb) ".
self.child_prompt = '(gdb) '
prompt = self.child_prompt
# So that the child gets torn down after the test.
self.child = pexpect.spawn('gdb --nx %s' % exe)
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline('break %s:%d' % (self.source, self.line_to_break))
child.expect_exact(prompt)
child.sendline('run')
child.expect_exact(prompt)
expr_cmd1 = 'print ptr[j]->point.x'
expr_cmd2 = 'print ptr[j]->point.y'
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
with self.stopwatch:
child.sendline(expr_cmd1)
child.expect_exact(prompt)
child.sendline(expr_cmd2)
child.expect_exact(prompt)
child.sendline('continue')
child.expect_exact(prompt)
child.sendline('quit')
child.expect_exact('The program is running. Exit anyway?')
child.sendline('y')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.gdb_avg = self.stopwatch.avg()
if self.TraceOn():
print("gdb expression benchmark:", str(self.stopwatch))
self.child = None

View File

@@ -0,0 +1,51 @@
//===-- main.cpp ------------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include <stdio.h>
class Point {
public:
int x;
int y;
Point(int a, int b):
x(a),
y(b)
{}
};
class Data {
public:
int id;
Point point;
Data(int i):
id(i),
point(0, 0)
{}
};
int main(int argc, char const *argv[]) {
Data *data[1000];
Data **ptr = data;
for (int i = 0; i < 1000; ++i) {
ptr[i] = new Data(i);
ptr[i]->point.x = i;
ptr[i]->point.y = i+1;
}
printf("Finished populating data.\n");
for (int j = 0; j < 1000; ++j) {
bool dump = argc > 1; // Set breakpoint here.
// Evaluate a couple of expressions (2*1000 = 2000 exprs):
// expr ptr[j]->point.x
// expr ptr[j]->point.y
if (dump) {
printf("data[%d] = %d (%d, %d)\n", j, ptr[j]->id, ptr[j]->point.x, ptr[j]->point.y);
}
}
return 0;
}

View File

@@ -0,0 +1,76 @@
"""Test lldb's response time for 'frame variable' command."""
from __future__ import print_function
import os
import sys
import lldb
from lldbsuite.test import configuration
from lldbsuite.test import lldbtest_config
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbbench import *
class FrameVariableResponseBench(BenchBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
BenchBase.setUp(self)
self.exe = lldbtest_config.lldbExec
self.break_spec = '-n main'
self.count = 20
@benchmarks_test
@no_debug_info_test
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")
def test_startup_delay(self):
"""Test response time for the 'frame variable' command."""
print()
self.run_frame_variable_bench(self.exe, self.break_spec, self.count)
print("lldb frame variable benchmark:", self.stopwatch)
def run_frame_variable_bench(self, exe, break_spec, count):
import pexpect
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# Reset the stopwatchs now.
self.stopwatch.reset()
for i in range(count):
# So that the child gets torn down after the test.
self.child = pexpect.spawn(
'%s %s %s' %
(lldbtest_config.lldbExec, self.lldbOption, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
# Set our breakpoint.
child.sendline('breakpoint set %s' % break_spec)
child.expect_exact(prompt)
# Run the target and expect it to be stopped due to breakpoint.
child.sendline('run') # Aka 'process launch'.
child.expect_exact(prompt)
with self.stopwatch:
# Measure the 'frame variable' response time.
child.sendline('frame variable')
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
# The test is about to end and if we come to here, the child process has
# been terminated. Mark it so.
self.child = None

View File

@@ -0,0 +1,5 @@
LEVEL = ../../make
CXX_SOURCES := main.cpp
include $(LEVEL)/Makefile.rules

View File

@@ -0,0 +1,66 @@
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbbench import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestBenchmarkLibcxxList(BenchBase):
mydir = TestBase.compute_mydir(__file__)
@benchmarks_test
def test_run_command(self):
"""Benchmark the std::list data formatter (libc++)"""
self.build()
self.data_formatter_commands()
def setUp(self):
# Call super's setUp().
BenchBase.setUp(self)
def data_formatter_commands(self):
"""Benchmark the std::list data formatter (libc++)"""
self.runCmd("file a.out", CURRENT_EXECUTABLE_SET)
bkpt = self.target().FindBreakpointByID(
lldbutil.run_break_set_by_source_regexp(
self, "break here"))
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type filter clear', check=False)
self.runCmd('type synth clear', check=False)
self.runCmd(
"settings set target.max-children-count 256",
check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
sw = Stopwatch()
sw.start()
self.expect('frame variable -A list', substrs=['[300]', '300'])
sw.stop()
print("time to print: %s" % (sw))

View File

@@ -0,0 +1,11 @@
#include <list>
int main()
{
std::list<int> list;
for (int i = 0;
i < 1500;
i++)
list.push_back(i);
return list.size(); // break here
}

View File

@@ -0,0 +1,5 @@
LEVEL = ../../make
CXX_SOURCES := main.cpp
include $(LEVEL)/Makefile.rules

View File

@@ -0,0 +1,66 @@
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.lldbbench import *
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestBenchmarkLibcxxMap(BenchBase):
mydir = TestBase.compute_mydir(__file__)
@benchmarks_test
def test_run_command(self):
"""Benchmark the std::map data formatter (libc++)"""
self.build()
self.data_formatter_commands()
def setUp(self):
# Call super's setUp().
BenchBase.setUp(self)
def data_formatter_commands(self):
"""Benchmark the std::map data formatter (libc++)"""
self.runCmd("file a.out", CURRENT_EXECUTABLE_SET)
bkpt = self.target().FindBreakpointByID(
lldbutil.run_break_set_by_source_regexp(
self, "break here"))
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type filter clear', check=False)
self.runCmd('type synth clear', check=False)
self.runCmd(
"settings set target.max-children-count 256",
check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
sw = Stopwatch()
sw.start()
self.expect('frame variable -A map', substrs=['[300]', '300'])
sw.stop()
print("time to print: %s" % (sw))

View File

@@ -0,0 +1,11 @@
#include <map>
int main()
{
std::map<int, int> map;
for (int i = 0;
i < 1500;
i++)
map[i] = i;
return map.size(); // break here
}

View File

@@ -0,0 +1,92 @@
"""Test lldb's startup delays creating a target, setting a breakpoint, and run to breakpoint stop."""
from __future__ import print_function
import os
import sys
import lldb
from lldbsuite.test import configuration
from lldbsuite.test import lldbtest_config
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbbench import *
class StartupDelaysBench(BenchBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
BenchBase.setUp(self)
# Create self.stopwatch2 for measuring "set first breakpoint".
# The default self.stopwatch is for "create fresh target".
self.stopwatch2 = Stopwatch()
# Create self.stopwatch3 for measuring "run to breakpoint".
self.stopwatch3 = Stopwatch()
self.exe = lldbtest_config.lldbExec
self.break_spec = '-n main'
self.count = 30
@benchmarks_test
@no_debug_info_test
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")
def test_startup_delay(self):
"""Test start up delays creating a target, setting a breakpoint, and run to breakpoint stop."""
print()
self.run_startup_delays_bench(self.exe, self.break_spec, self.count)
print(
"lldb startup delay (create fresh target) benchmark:",
self.stopwatch)
print(
"lldb startup delay (set first breakpoint) benchmark:",
self.stopwatch2)
print(
"lldb startup delay (run to breakpoint) benchmark:",
self.stopwatch3)
def run_startup_delays_bench(self, exe, break_spec, count):
import pexpect
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# Reset the stopwatchs now.
self.stopwatch.reset()
self.stopwatch2.reset()
for i in range(count):
# So that the child gets torn down after the test.
self.child = pexpect.spawn(
'%s %s' %
(lldbtest_config.lldbExec, self.lldbOption))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
with self.stopwatch:
# Create a fresh target.
child.sendline('file %s' % exe) # Aka 'target create'.
child.expect_exact(prompt)
with self.stopwatch2:
# Read debug info and set the first breakpoint.
child.sendline('breakpoint set %s' % break_spec)
child.expect_exact(prompt)
with self.stopwatch3:
# Run to the breakpoint just set.
child.sendline('run')
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
# The test is about to end and if we come to here, the child process has
# been terminated. Mark it so.
self.child = None

View File

@@ -0,0 +1,76 @@
"""Test lldb's stepping speed."""
from __future__ import print_function
import os
import sys
import lldb
from lldbsuite.test import configuration
from lldbsuite.test import lldbtest_config
from lldbsuite.test.lldbbench import *
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class SteppingSpeedBench(BenchBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
BenchBase.setUp(self)
self.exe = lldbtest_config.lldbExec
self.break_spec = '-n main'
self.count = 50
#print("self.exe=%s" % self.exe)
#print("self.break_spec=%s" % self.break_spec)
@benchmarks_test
@no_debug_info_test
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")
def test_run_lldb_steppings(self):
"""Test lldb steppings on a large executable."""
print()
self.run_lldb_steppings(self.exe, self.break_spec, self.count)
print("lldb stepping benchmark:", self.stopwatch)
def run_lldb_steppings(self, exe, break_spec, count):
import pexpect
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# So that the child gets torn down after the test.
self.child = pexpect.spawn(
'%s %s %s' %
(lldbtest_config.lldbExec, self.lldbOption, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline('breakpoint set %s' % break_spec)
child.expect_exact(prompt)
child.sendline('run')
child.expect_exact(prompt)
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
with self.stopwatch:
# Disassemble the function.
child.sendline('next') # Aka 'thread step-over'.
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.child = None

View File

@@ -0,0 +1,131 @@
"""Benchmark the turnaround time starting a debugger and run to the breakpont with lldb vs. gdb."""
from __future__ import print_function
import os
import sys
import lldb
from lldbsuite.test.lldbbench import *
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import configuration
from lldbsuite.test import lldbutil
class CompileRunToBreakpointBench(BenchBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
BenchBase.setUp(self)
self.exe = lldbtest_config.lldbExec
self.function = 'Driver::MainLoop()'
self.count = 3
self.lldb_avg = None
self.gdb_avg = None
@benchmarks_test
@no_debug_info_test
@expectedFailureAll(
oslist=["windows"],
bugnumber="llvm.org/pr22274: need a pexpect replacement for windows")
def test_run_lldb_then_gdb(self):
"""Benchmark turnaround time with lldb vs. gdb."""
print()
self.run_lldb_turnaround(self.exe, self.function, self.count)
print("lldb turnaround benchmark:", self.stopwatch)
self.run_gdb_turnaround(self.exe, self.function, self.count)
print("gdb turnaround benchmark:", self.stopwatch)
print("lldb_avg/gdb_avg: %f" % (self.lldb_avg / self.gdb_avg))
def run_lldb_turnaround(self, exe, function, count):
import pexpect
def run_one_round():
prompt = self.child_prompt
# So that the child gets torn down after the test.
self.child = pexpect.spawn(
'%s %s %s' %
(lldbtest_config.lldbExec, self.lldbOption, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline('breakpoint set -F %s' % function)
child.expect_exact(prompt)
child.sendline('run')
child.expect_exact(prompt)
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count + 1):
# Ignore the first invoke lldb and run to the breakpoint turnaround
# time.
if i == 0:
run_one_round()
else:
with self.stopwatch:
run_one_round()
self.child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.lldb_avg = self.stopwatch.avg()
self.child = None
def run_gdb_turnaround(self, exe, function, count):
import pexpect
def run_one_round():
prompt = self.child_prompt
# So that the child gets torn down after the test.
self.child = pexpect.spawn('gdb --nx %s' % exe)
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline('break %s' % function)
child.expect_exact(prompt)
child.sendline('run')
child.expect_exact(prompt)
# Set self.child_prompt, which is "(gdb) ".
self.child_prompt = '(gdb) '
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count + 1):
# Ignore the first invoke lldb and run to the breakpoint turnaround
# time.
if i == 0:
run_one_round()
else:
with self.stopwatch:
run_one_round()
self.child.sendline('quit')
self.child.expect_exact('The program is running. Exit anyway?')
self.child.sendline('y')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.gdb_avg = self.stopwatch.avg()
self.child = None