You've already forked linux-packaging-mono
Imported Upstream version 5.18.0.205
Former-commit-id: 7f59f7e792705db773f1caecdaa823092f4e2927
This commit is contained in:
parent
5cd5df71cc
commit
8e12397d70
32
external/llvm/utils/lit/CMakeLists.txt
vendored
Normal file
32
external/llvm/utils/lit/CMakeLists.txt
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
# The configured file is not placed in the correct location
|
||||
# until the tests are run as we need to copy it into
|
||||
# a copy of the tests folder
|
||||
configure_lit_site_cfg(
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/tests/lit.site.cfg.in"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg"
|
||||
OUTPUT_MAPPING
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/tests/lit.site.cfg"
|
||||
)
|
||||
|
||||
# Lit's test suite creates output files next to the sources which makes the
|
||||
# source tree dirty. This is undesirable because we do out of source builds.
|
||||
# To work around this the tests and the configuration file are copied into the
|
||||
# build directory just before running them. The tests are not copied over at
|
||||
# configure time (i.e. `file(COPY ...)`) because this could lead to stale
|
||||
# tests being run.
|
||||
add_custom_target(prepare-check-lit
|
||||
COMMAND ${CMAKE_COMMAND} -E remove_directory "${CMAKE_CURRENT_BINARY_DIR}/tests"
|
||||
COMMAND ${CMAKE_COMMAND} -E copy_directory "${CMAKE_CURRENT_SOURCE_DIR}/tests" "${CMAKE_CURRENT_BINARY_DIR}/tests"
|
||||
COMMAND ${CMAKE_COMMAND} -E copy "${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg" "${CMAKE_CURRENT_BINARY_DIR}/tests"
|
||||
COMMENT "Preparing lit tests"
|
||||
)
|
||||
|
||||
# Add rules for lit's own test suite
|
||||
add_lit_testsuite(check-lit "Running lit's tests"
|
||||
${CMAKE_CURRENT_BINARY_DIR}
|
||||
DEPENDS "FileCheck" "not" "prepare-check-lit"
|
||||
)
|
||||
|
||||
# For IDEs
|
||||
set_target_properties(check-lit PROPERTIES FOLDER "Tests")
|
||||
set_target_properties(prepare-check-lit PROPERTIES FOLDER "Tests")
|
9
external/llvm/utils/lit/MANIFEST.in
vendored
Normal file
9
external/llvm/utils/lit/MANIFEST.in
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
include TODO lit.py
|
||||
recursive-include tests *
|
||||
recursive-include examples *
|
||||
global-exclude *pyc
|
||||
global-exclude *~
|
||||
prune tests/Output
|
||||
prune tests/*/Output
|
||||
prune tests/*/*/Output
|
||||
prune tests/*/*/*/Output
|
41
external/llvm/utils/lit/README.txt
vendored
Normal file
41
external/llvm/utils/lit/README.txt
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
===============================
|
||||
lit - A Software Testing Tool
|
||||
===============================
|
||||
|
||||
lit is a portable tool for executing LLVM and Clang style test suites,
|
||||
summarizing their results, and providing indication of failures. lit is designed
|
||||
to be a lightweight testing tool with as simple a user interface as possible.
|
||||
|
||||
=====================
|
||||
Contributing to lit
|
||||
=====================
|
||||
|
||||
Please browse the Test Suite > lit category in LLVM's Bugzilla for ideas on
|
||||
what to work on.
|
||||
|
||||
Before submitting patches, run the test suite to ensure nothing has regressed:
|
||||
|
||||
# From within your LLVM source directory.
|
||||
utils/lit/lit.py \
|
||||
--path /path/to/your/llvm/build/bin \
|
||||
utils/lit/tests
|
||||
|
||||
Note that lit's tests depend on 'not' and 'FileCheck', LLVM utilities.
|
||||
You will need to have built LLVM tools in order to run lit's test suite
|
||||
successfully.
|
||||
|
||||
You'll also want to confirm that lit continues to work when testing LLVM.
|
||||
Follow the instructions in http://llvm.org/docs/TestingGuide.html to run the
|
||||
regression test suite:
|
||||
|
||||
make check-llvm
|
||||
|
||||
And be sure to run the llvm-lit wrapper script as well:
|
||||
|
||||
/path/to/your/llvm/build/bin/llvm-lit utils/lit/tests
|
||||
|
||||
Finally, make sure lit works when installed via setuptools:
|
||||
|
||||
python utils/lit/setup.py install
|
||||
lit --path /path/to/your/llvm/build/bin utils/lit/tests
|
||||
|
7
external/llvm/utils/lit/examples/README.txt
vendored
Normal file
7
external/llvm/utils/lit/examples/README.txt
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
==============
|
||||
lit Examples
|
||||
==============
|
||||
|
||||
This directory contains examples of 'lit' test suite configurations. The test
|
||||
suites they define can be run with 'lit examples/example-name', for more details
|
||||
see the README in each example.
|
10
external/llvm/utils/lit/examples/many-tests/README.txt
vendored
Normal file
10
external/llvm/utils/lit/examples/many-tests/README.txt
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
========================
|
||||
Many Tests lit Example
|
||||
========================
|
||||
|
||||
This directory contains a trivial lit test suite configuration that defines a
|
||||
custom test format which just generates a large (N=10000) number of tests that
|
||||
do a small amount of work in the Python test execution code.
|
||||
|
||||
This test suite is useful for testing the performance of lit on large numbers of
|
||||
tests.
|
23
external/llvm/utils/lit/examples/many-tests/lit.cfg
vendored
Normal file
23
external/llvm/utils/lit/examples/many-tests/lit.cfg
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
# -*- Python -*-
|
||||
|
||||
from lit import Test
|
||||
|
||||
class ManyTests(object):
|
||||
def __init__(self, N=10000):
|
||||
self.N = N
|
||||
|
||||
def getTestsInDirectory(self, testSuite, path_in_suite,
|
||||
litConfig, localConfig):
|
||||
for i in range(self.N):
|
||||
test_name = 'test-%04d' % (i,)
|
||||
yield Test.Test(testSuite, path_in_suite + (test_name,),
|
||||
localConfig)
|
||||
|
||||
def execute(self, test, litConfig):
|
||||
# Do a "non-trivial" amount of Python work.
|
||||
sum = 0
|
||||
for i in range(10000):
|
||||
sum += i
|
||||
return Test.PASS,''
|
||||
|
||||
config.test_format = ManyTests()
|
7
external/llvm/utils/lit/lit.py
vendored
Executable file
7
external/llvm/utils/lit/lit.py
vendored
Executable file
@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
import sys
|
||||
|
||||
from lit.main import main
|
||||
|
||||
if __name__=='__main__':
|
||||
main()
|
251
external/llvm/utils/lit/lit/BooleanExpression.py
vendored
Normal file
251
external/llvm/utils/lit/lit/BooleanExpression.py
vendored
Normal file
@ -0,0 +1,251 @@
|
||||
import re
|
||||
|
||||
class BooleanExpression:
|
||||
# A simple evaluator of boolean expressions.
|
||||
#
|
||||
# Grammar:
|
||||
# expr :: or_expr
|
||||
# or_expr :: and_expr ('||' and_expr)*
|
||||
# and_expr :: not_expr ('&&' not_expr)*
|
||||
# not_expr :: '!' not_expr
|
||||
# '(' or_expr ')'
|
||||
# identifier
|
||||
# identifier :: [-+=._a-zA-Z0-9]+
|
||||
|
||||
# Evaluates `string` as a boolean expression.
|
||||
# Returns True or False. Throws a ValueError on syntax error.
|
||||
#
|
||||
# Variables in `variables` are true.
|
||||
# Substrings of `triple` are true.
|
||||
# 'true' is true.
|
||||
# All other identifiers are false.
|
||||
@staticmethod
|
||||
def evaluate(string, variables, triple=""):
|
||||
try:
|
||||
parser = BooleanExpression(string, set(variables), triple)
|
||||
return parser.parseAll()
|
||||
except ValueError as e:
|
||||
raise ValueError(str(e) + ('\nin expression: %r' % string))
|
||||
|
||||
#####
|
||||
|
||||
def __init__(self, string, variables, triple=""):
|
||||
self.tokens = BooleanExpression.tokenize(string)
|
||||
self.variables = variables
|
||||
self.variables.add('true')
|
||||
self.triple = triple
|
||||
self.value = None
|
||||
self.token = None
|
||||
|
||||
# Singleton end-of-expression marker.
|
||||
END = object()
|
||||
|
||||
# Tokenization pattern.
|
||||
Pattern = re.compile(r'\A\s*([()]|[-+=._a-zA-Z0-9]+|&&|\|\||!)\s*(.*)\Z')
|
||||
|
||||
@staticmethod
|
||||
def tokenize(string):
|
||||
while True:
|
||||
m = re.match(BooleanExpression.Pattern, string)
|
||||
if m is None:
|
||||
if string == "":
|
||||
yield BooleanExpression.END;
|
||||
return
|
||||
else:
|
||||
raise ValueError("couldn't parse text: %r" % string)
|
||||
|
||||
token = m.group(1)
|
||||
string = m.group(2)
|
||||
yield token
|
||||
|
||||
def quote(self, token):
|
||||
if token is BooleanExpression.END:
|
||||
return '<end of expression>'
|
||||
else:
|
||||
return repr(token)
|
||||
|
||||
def accept(self, t):
|
||||
if self.token == t:
|
||||
self.token = next(self.tokens)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def expect(self, t):
|
||||
if self.token == t:
|
||||
if self.token != BooleanExpression.END:
|
||||
self.token = next(self.tokens)
|
||||
else:
|
||||
raise ValueError("expected: %s\nhave: %s" %
|
||||
(self.quote(t), self.quote(self.token)))
|
||||
|
||||
def isIdentifier(self, t):
|
||||
if (t is BooleanExpression.END or t == '&&' or t == '||' or
|
||||
t == '!' or t == '(' or t == ')'):
|
||||
return False
|
||||
return True
|
||||
|
||||
def parseNOT(self):
|
||||
if self.accept('!'):
|
||||
self.parseNOT()
|
||||
self.value = not self.value
|
||||
elif self.accept('('):
|
||||
self.parseOR()
|
||||
self.expect(')')
|
||||
elif not self.isIdentifier(self.token):
|
||||
raise ValueError("expected: '!' or '(' or identifier\nhave: %s" %
|
||||
self.quote(self.token))
|
||||
else:
|
||||
self.value = (self.token in self.variables or
|
||||
self.token in self.triple)
|
||||
self.token = next(self.tokens)
|
||||
|
||||
def parseAND(self):
|
||||
self.parseNOT()
|
||||
while self.accept('&&'):
|
||||
left = self.value
|
||||
self.parseNOT()
|
||||
right = self.value
|
||||
# this is technically the wrong associativity, but it
|
||||
# doesn't matter for this limited expression grammar
|
||||
self.value = left and right
|
||||
|
||||
def parseOR(self):
|
||||
self.parseAND()
|
||||
while self.accept('||'):
|
||||
left = self.value
|
||||
self.parseAND()
|
||||
right = self.value
|
||||
# this is technically the wrong associativity, but it
|
||||
# doesn't matter for this limited expression grammar
|
||||
self.value = left or right
|
||||
|
||||
def parseAll(self):
|
||||
self.token = next(self.tokens)
|
||||
self.parseOR()
|
||||
self.expect(BooleanExpression.END)
|
||||
return self.value
|
||||
|
||||
|
||||
#######
|
||||
# Tests
|
||||
|
||||
import unittest
|
||||
|
||||
class TestBooleanExpression(unittest.TestCase):
|
||||
def test_variables(self):
|
||||
variables = {'its-true', 'false-lol-true', 'under_score',
|
||||
'e=quals', 'd1g1ts'}
|
||||
self.assertTrue(BooleanExpression.evaluate('true', variables))
|
||||
self.assertTrue(BooleanExpression.evaluate('its-true', variables))
|
||||
self.assertTrue(BooleanExpression.evaluate('false-lol-true', variables))
|
||||
self.assertTrue(BooleanExpression.evaluate('under_score', variables))
|
||||
self.assertTrue(BooleanExpression.evaluate('e=quals', variables))
|
||||
self.assertTrue(BooleanExpression.evaluate('d1g1ts', variables))
|
||||
|
||||
self.assertFalse(BooleanExpression.evaluate('false', variables))
|
||||
self.assertFalse(BooleanExpression.evaluate('True', variables))
|
||||
self.assertFalse(BooleanExpression.evaluate('true-ish', variables))
|
||||
self.assertFalse(BooleanExpression.evaluate('not_true', variables))
|
||||
self.assertFalse(BooleanExpression.evaluate('tru', variables))
|
||||
|
||||
def test_triple(self):
|
||||
triple = 'arch-vendor-os'
|
||||
self.assertTrue(BooleanExpression.evaluate('arch-', {}, triple))
|
||||
self.assertTrue(BooleanExpression.evaluate('ar', {}, triple))
|
||||
self.assertTrue(BooleanExpression.evaluate('ch-vend', {}, triple))
|
||||
self.assertTrue(BooleanExpression.evaluate('-vendor-', {}, triple))
|
||||
self.assertTrue(BooleanExpression.evaluate('-os', {}, triple))
|
||||
self.assertFalse(BooleanExpression.evaluate('arch-os', {}, triple))
|
||||
|
||||
def test_operators(self):
|
||||
self.assertTrue(BooleanExpression.evaluate('true || true', {}))
|
||||
self.assertTrue(BooleanExpression.evaluate('true || false', {}))
|
||||
self.assertTrue(BooleanExpression.evaluate('false || true', {}))
|
||||
self.assertFalse(BooleanExpression.evaluate('false || false', {}))
|
||||
|
||||
self.assertTrue(BooleanExpression.evaluate('true && true', {}))
|
||||
self.assertFalse(BooleanExpression.evaluate('true && false', {}))
|
||||
self.assertFalse(BooleanExpression.evaluate('false && true', {}))
|
||||
self.assertFalse(BooleanExpression.evaluate('false && false', {}))
|
||||
|
||||
self.assertFalse(BooleanExpression.evaluate('!true', {}))
|
||||
self.assertTrue(BooleanExpression.evaluate('!false', {}))
|
||||
|
||||
self.assertTrue(BooleanExpression.evaluate(' ((!((false) )) ) ', {}))
|
||||
self.assertTrue(BooleanExpression.evaluate('true && (true && (true))', {}))
|
||||
self.assertTrue(BooleanExpression.evaluate('!false && !false && !! !false', {}))
|
||||
self.assertTrue(BooleanExpression.evaluate('false && false || true', {}))
|
||||
self.assertTrue(BooleanExpression.evaluate('(false && false) || true', {}))
|
||||
self.assertFalse(BooleanExpression.evaluate('false && (false || true)', {}))
|
||||
|
||||
# Evaluate boolean expression `expr`.
|
||||
# Fail if it does not throw a ValueError containing the text `error`.
|
||||
def checkException(self, expr, error):
|
||||
try:
|
||||
BooleanExpression.evaluate(expr, {})
|
||||
self.fail("expression %r didn't cause an exception" % expr)
|
||||
except ValueError as e:
|
||||
if -1 == str(e).find(error):
|
||||
self.fail(("expression %r caused the wrong ValueError\n" +
|
||||
"actual error was:\n%s\n" +
|
||||
"expected error was:\n%s\n") % (expr, e, error))
|
||||
except BaseException as e:
|
||||
self.fail(("expression %r caused the wrong exception; actual " +
|
||||
"exception was: \n%r") % (expr, e))
|
||||
|
||||
def test_errors(self):
|
||||
self.checkException("ba#d",
|
||||
"couldn't parse text: '#d'\n" +
|
||||
"in expression: 'ba#d'")
|
||||
|
||||
self.checkException("true and true",
|
||||
"expected: <end of expression>\n" +
|
||||
"have: 'and'\n" +
|
||||
"in expression: 'true and true'")
|
||||
|
||||
self.checkException("|| true",
|
||||
"expected: '!' or '(' or identifier\n" +
|
||||
"have: '||'\n" +
|
||||
"in expression: '|| true'")
|
||||
|
||||
self.checkException("true &&",
|
||||
"expected: '!' or '(' or identifier\n" +
|
||||
"have: <end of expression>\n" +
|
||||
"in expression: 'true &&'")
|
||||
|
||||
self.checkException("",
|
||||
"expected: '!' or '(' or identifier\n" +
|
||||
"have: <end of expression>\n" +
|
||||
"in expression: ''")
|
||||
|
||||
self.checkException("*",
|
||||
"couldn't parse text: '*'\n" +
|
||||
"in expression: '*'")
|
||||
|
||||
self.checkException("no wait stop",
|
||||
"expected: <end of expression>\n" +
|
||||
"have: 'wait'\n" +
|
||||
"in expression: 'no wait stop'")
|
||||
|
||||
self.checkException("no-$-please",
|
||||
"couldn't parse text: '$-please'\n" +
|
||||
"in expression: 'no-$-please'")
|
||||
|
||||
self.checkException("(((true && true) || true)",
|
||||
"expected: ')'\n" +
|
||||
"have: <end of expression>\n" +
|
||||
"in expression: '(((true && true) || true)'")
|
||||
|
||||
self.checkException("true (true)",
|
||||
"expected: <end of expression>\n" +
|
||||
"have: '('\n" +
|
||||
"in expression: 'true (true)'")
|
||||
|
||||
self.checkException("( )",
|
||||
"expected: '!' or '(' or identifier\n" +
|
||||
"have: ')'\n" +
|
||||
"in expression: '( )'")
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
15
external/llvm/utils/lit/lit/ExampleTests.ObjDir/lit.site.cfg
vendored
Normal file
15
external/llvm/utils/lit/lit/ExampleTests.ObjDir/lit.site.cfg
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
# -*- Python -*-
|
||||
|
||||
# Site specific configuration file.
|
||||
#
|
||||
# Typically this will be generated by the build system to automatically set
|
||||
# certain configuration variables which cannot be autodetected, so that 'lit'
|
||||
# can easily be used on the command line.
|
||||
|
||||
import os
|
||||
|
||||
# Preserve the obj_root, for use by the main lit.cfg.
|
||||
config.example_obj_root = os.path.dirname(__file__)
|
||||
|
||||
lit.load_config(config, os.path.join(config.test_source_root,
|
||||
'lit.cfg'))
|
161
external/llvm/utils/lit/lit/LitConfig.py
vendored
Normal file
161
external/llvm/utils/lit/lit/LitConfig.py
vendored
Normal file
@ -0,0 +1,161 @@
|
||||
from __future__ import absolute_import
|
||||
import inspect
|
||||
import os
|
||||
import sys
|
||||
|
||||
import lit.Test
|
||||
import lit.formats
|
||||
import lit.TestingConfig
|
||||
import lit.util
|
||||
|
||||
# LitConfig must be a new style class for properties to work
|
||||
class LitConfig(object):
|
||||
"""LitConfig - Configuration data for a 'lit' test runner instance, shared
|
||||
across all tests.
|
||||
|
||||
The LitConfig object is also used to communicate with client configuration
|
||||
files, it is always passed in as the global variable 'lit' so that
|
||||
configuration files can access common functionality and internal components
|
||||
easily.
|
||||
"""
|
||||
|
||||
def __init__(self, progname, path, quiet,
|
||||
useValgrind, valgrindLeakCheck, valgrindArgs,
|
||||
noExecute, debug, isWindows, singleProcess,
|
||||
params, config_prefix = None,
|
||||
maxIndividualTestTime = 0,
|
||||
maxFailures = None,
|
||||
parallelism_groups = {},
|
||||
echo_all_commands = False):
|
||||
# The name of the test runner.
|
||||
self.progname = progname
|
||||
# The items to add to the PATH environment variable.
|
||||
self.path = [str(p) for p in path]
|
||||
self.quiet = bool(quiet)
|
||||
self.useValgrind = bool(useValgrind)
|
||||
self.valgrindLeakCheck = bool(valgrindLeakCheck)
|
||||
self.valgrindUserArgs = list(valgrindArgs)
|
||||
self.noExecute = noExecute
|
||||
self.debug = debug
|
||||
self.singleProcess = singleProcess
|
||||
self.isWindows = bool(isWindows)
|
||||
self.params = dict(params)
|
||||
self.bashPath = None
|
||||
|
||||
# Configuration files to look for when discovering test suites.
|
||||
self.config_prefix = config_prefix or 'lit'
|
||||
self.suffixes = ['cfg.py', 'cfg']
|
||||
self.config_names = ['%s.%s' % (self.config_prefix,x) for x in self.suffixes]
|
||||
self.site_config_names = ['%s.site.%s' % (self.config_prefix,x) for x in self.suffixes]
|
||||
self.local_config_names = ['%s.local.%s' % (self.config_prefix,x) for x in self.suffixes]
|
||||
|
||||
self.numErrors = 0
|
||||
self.numWarnings = 0
|
||||
|
||||
self.valgrindArgs = []
|
||||
if self.useValgrind:
|
||||
self.valgrindArgs = ['valgrind', '-q', '--run-libc-freeres=no',
|
||||
'--tool=memcheck', '--trace-children=yes',
|
||||
'--error-exitcode=123']
|
||||
if self.valgrindLeakCheck:
|
||||
self.valgrindArgs.append('--leak-check=full')
|
||||
else:
|
||||
# The default is 'summary'.
|
||||
self.valgrindArgs.append('--leak-check=no')
|
||||
self.valgrindArgs.extend(self.valgrindUserArgs)
|
||||
|
||||
self.maxIndividualTestTime = maxIndividualTestTime
|
||||
self.maxFailures = maxFailures
|
||||
self.parallelism_groups = parallelism_groups
|
||||
self.echo_all_commands = echo_all_commands
|
||||
|
||||
@property
|
||||
def maxIndividualTestTime(self):
|
||||
"""
|
||||
Interface for getting maximum time to spend executing
|
||||
a single test
|
||||
"""
|
||||
return self._maxIndividualTestTime
|
||||
|
||||
@maxIndividualTestTime.setter
|
||||
def maxIndividualTestTime(self, value):
|
||||
"""
|
||||
Interface for setting maximum time to spend executing
|
||||
a single test
|
||||
"""
|
||||
self._maxIndividualTestTime = value
|
||||
if self.maxIndividualTestTime > 0:
|
||||
# The current implementation needs psutil to set
|
||||
# a timeout per test. Check it's available.
|
||||
# See lit.util.killProcessAndChildren()
|
||||
try:
|
||||
import psutil # noqa: F401
|
||||
except ImportError:
|
||||
self.fatal("Setting a timeout per test requires the"
|
||||
" Python psutil module but it could not be"
|
||||
" found. Try installing it via pip or via"
|
||||
" your operating system's package manager.")
|
||||
elif self.maxIndividualTestTime < 0:
|
||||
self.fatal('The timeout per test must be >= 0 seconds')
|
||||
|
||||
def load_config(self, config, path):
|
||||
"""load_config(config, path) - Load a config object from an alternate
|
||||
path."""
|
||||
if self.debug:
|
||||
self.note('load_config from %r' % path)
|
||||
config.load_from_path(path, self)
|
||||
return config
|
||||
|
||||
def getBashPath(self):
|
||||
"""getBashPath - Get the path to 'bash'"""
|
||||
if self.bashPath is not None:
|
||||
return self.bashPath
|
||||
|
||||
self.bashPath = lit.util.which('bash', os.pathsep.join(self.path))
|
||||
if self.bashPath is None:
|
||||
self.bashPath = lit.util.which('bash')
|
||||
|
||||
if self.bashPath is None:
|
||||
self.bashPath = ''
|
||||
|
||||
return self.bashPath
|
||||
|
||||
def getToolsPath(self, dir, paths, tools):
|
||||
if dir is not None and os.path.isabs(dir) and os.path.isdir(dir):
|
||||
if not lit.util.checkToolsPath(dir, tools):
|
||||
return None
|
||||
else:
|
||||
dir = lit.util.whichTools(tools, paths)
|
||||
|
||||
# bash
|
||||
self.bashPath = lit.util.which('bash', dir)
|
||||
if self.bashPath is None:
|
||||
self.bashPath = ''
|
||||
|
||||
return dir
|
||||
|
||||
def _write_message(self, kind, message):
|
||||
# Get the file/line where this message was generated.
|
||||
f = inspect.currentframe()
|
||||
# Step out of _write_message, and then out of wrapper.
|
||||
f = f.f_back.f_back
|
||||
file,line,_,_,_ = inspect.getframeinfo(f)
|
||||
location = '%s:%d' % (file, line)
|
||||
|
||||
sys.stderr.write('%s: %s: %s: %s\n' % (self.progname, location,
|
||||
kind, message))
|
||||
|
||||
def note(self, message):
|
||||
self._write_message('note', message)
|
||||
|
||||
def warning(self, message):
|
||||
self._write_message('warning', message)
|
||||
self.numWarnings += 1
|
||||
|
||||
def error(self, message):
|
||||
self._write_message('error', message)
|
||||
self.numErrors += 1
|
||||
|
||||
def fatal(self, message):
|
||||
self._write_message('fatal', message)
|
||||
sys.exit(2)
|
34
external/llvm/utils/lit/lit/LitTestCase.py
vendored
Normal file
34
external/llvm/utils/lit/lit/LitTestCase.py
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
from __future__ import absolute_import
|
||||
import unittest
|
||||
|
||||
import lit.Test
|
||||
|
||||
"""
|
||||
TestCase adaptor for providing a 'unittest' compatible interface to 'lit' tests.
|
||||
"""
|
||||
|
||||
class UnresolvedError(RuntimeError):
|
||||
pass
|
||||
|
||||
class LitTestCase(unittest.TestCase):
|
||||
def __init__(self, test, run):
|
||||
unittest.TestCase.__init__(self)
|
||||
self._test = test
|
||||
self._run = run
|
||||
|
||||
def id(self):
|
||||
return self._test.getFullName()
|
||||
|
||||
def shortDescription(self):
|
||||
return self._test.getFullName()
|
||||
|
||||
def runTest(self):
|
||||
# Run the test.
|
||||
self._run.execute_test(self._test)
|
||||
|
||||
# Adapt the result to unittest.
|
||||
result = self._test.result
|
||||
if result.code is lit.Test.UNRESOLVED:
|
||||
raise UnresolvedError(result.output)
|
||||
elif result.code.isFailure:
|
||||
self.fail(result.output)
|
291
external/llvm/utils/lit/lit/ProgressBar.py
vendored
Normal file
291
external/llvm/utils/lit/lit/ProgressBar.py
vendored
Normal file
@ -0,0 +1,291 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Source: http://code.activestate.com/recipes/475116/, with
|
||||
# modifications by Daniel Dunbar.
|
||||
|
||||
import sys, re, time
|
||||
|
||||
def to_bytes(str):
|
||||
# Encode to UTF-8 to get binary data.
|
||||
return str.encode('utf-8')
|
||||
|
||||
class TerminalController:
|
||||
"""
|
||||
A class that can be used to portably generate formatted output to
|
||||
a terminal.
|
||||
|
||||
`TerminalController` defines a set of instance variables whose
|
||||
values are initialized to the control sequence necessary to
|
||||
perform a given action. These can be simply included in normal
|
||||
output to the terminal:
|
||||
|
||||
>>> term = TerminalController()
|
||||
>>> print('This is '+term.GREEN+'green'+term.NORMAL)
|
||||
|
||||
Alternatively, the `render()` method can used, which replaces
|
||||
'${action}' with the string required to perform 'action':
|
||||
|
||||
>>> term = TerminalController()
|
||||
>>> print(term.render('This is ${GREEN}green${NORMAL}'))
|
||||
|
||||
If the terminal doesn't support a given action, then the value of
|
||||
the corresponding instance variable will be set to ''. As a
|
||||
result, the above code will still work on terminals that do not
|
||||
support color, except that their output will not be colored.
|
||||
Also, this means that you can test whether the terminal supports a
|
||||
given action by simply testing the truth value of the
|
||||
corresponding instance variable:
|
||||
|
||||
>>> term = TerminalController()
|
||||
>>> if term.CLEAR_SCREEN:
|
||||
... print('This terminal supports clearning the screen.')
|
||||
|
||||
Finally, if the width and height of the terminal are known, then
|
||||
they will be stored in the `COLS` and `LINES` attributes.
|
||||
"""
|
||||
# Cursor movement:
|
||||
BOL = '' #: Move the cursor to the beginning of the line
|
||||
UP = '' #: Move the cursor up one line
|
||||
DOWN = '' #: Move the cursor down one line
|
||||
LEFT = '' #: Move the cursor left one char
|
||||
RIGHT = '' #: Move the cursor right one char
|
||||
|
||||
# Deletion:
|
||||
CLEAR_SCREEN = '' #: Clear the screen and move to home position
|
||||
CLEAR_EOL = '' #: Clear to the end of the line.
|
||||
CLEAR_BOL = '' #: Clear to the beginning of the line.
|
||||
CLEAR_EOS = '' #: Clear to the end of the screen
|
||||
|
||||
# Output modes:
|
||||
BOLD = '' #: Turn on bold mode
|
||||
BLINK = '' #: Turn on blink mode
|
||||
DIM = '' #: Turn on half-bright mode
|
||||
REVERSE = '' #: Turn on reverse-video mode
|
||||
NORMAL = '' #: Turn off all modes
|
||||
|
||||
# Cursor display:
|
||||
HIDE_CURSOR = '' #: Make the cursor invisible
|
||||
SHOW_CURSOR = '' #: Make the cursor visible
|
||||
|
||||
# Terminal size:
|
||||
COLS = None #: Width of the terminal (None for unknown)
|
||||
LINES = None #: Height of the terminal (None for unknown)
|
||||
|
||||
# Foreground colors:
|
||||
BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ''
|
||||
|
||||
# Background colors:
|
||||
BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = ''
|
||||
BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = ''
|
||||
|
||||
_STRING_CAPABILITIES = """
|
||||
BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1
|
||||
CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold
|
||||
BLINK=blink DIM=dim REVERSE=rev UNDERLINE=smul NORMAL=sgr0
|
||||
HIDE_CURSOR=cinvis SHOW_CURSOR=cnorm""".split()
|
||||
_COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split()
|
||||
_ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split()
|
||||
|
||||
def __init__(self, term_stream=sys.stdout):
|
||||
"""
|
||||
Create a `TerminalController` and initialize its attributes
|
||||
with appropriate values for the current terminal.
|
||||
`term_stream` is the stream that will be used for terminal
|
||||
output; if this stream is not a tty, then the terminal is
|
||||
assumed to be a dumb terminal (i.e., have no capabilities).
|
||||
"""
|
||||
# Curses isn't available on all platforms
|
||||
try: import curses
|
||||
except: return
|
||||
|
||||
# If the stream isn't a tty, then assume it has no capabilities.
|
||||
if not term_stream.isatty(): return
|
||||
|
||||
# Check the terminal type. If we fail, then assume that the
|
||||
# terminal has no capabilities.
|
||||
try: curses.setupterm()
|
||||
except: return
|
||||
|
||||
# Look up numeric capabilities.
|
||||
self.COLS = curses.tigetnum('cols')
|
||||
self.LINES = curses.tigetnum('lines')
|
||||
self.XN = curses.tigetflag('xenl')
|
||||
|
||||
# Look up string capabilities.
|
||||
for capability in self._STRING_CAPABILITIES:
|
||||
(attrib, cap_name) = capability.split('=')
|
||||
setattr(self, attrib, self._tigetstr(cap_name) or '')
|
||||
|
||||
# Colors
|
||||
set_fg = self._tigetstr('setf')
|
||||
if set_fg:
|
||||
for i,color in zip(range(len(self._COLORS)), self._COLORS):
|
||||
setattr(self, color, self._tparm(set_fg, i))
|
||||
set_fg_ansi = self._tigetstr('setaf')
|
||||
if set_fg_ansi:
|
||||
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
|
||||
setattr(self, color, self._tparm(set_fg_ansi, i))
|
||||
set_bg = self._tigetstr('setb')
|
||||
if set_bg:
|
||||
for i,color in zip(range(len(self._COLORS)), self._COLORS):
|
||||
setattr(self, 'BG_'+color, self._tparm(set_bg, i))
|
||||
set_bg_ansi = self._tigetstr('setab')
|
||||
if set_bg_ansi:
|
||||
for i,color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
|
||||
setattr(self, 'BG_'+color, self._tparm(set_bg_ansi, i))
|
||||
|
||||
def _tparm(self, arg, index):
|
||||
import curses
|
||||
return curses.tparm(to_bytes(arg), index).decode('utf-8') or ''
|
||||
|
||||
def _tigetstr(self, cap_name):
|
||||
# String capabilities can include "delays" of the form "$<2>".
|
||||
# For any modern terminal, we should be able to just ignore
|
||||
# these, so strip them out.
|
||||
import curses
|
||||
cap = curses.tigetstr(cap_name)
|
||||
if cap is None:
|
||||
cap = ''
|
||||
else:
|
||||
cap = cap.decode('utf-8')
|
||||
return re.sub(r'\$<\d+>[/*]?', '', cap)
|
||||
|
||||
def render(self, template):
|
||||
"""
|
||||
Replace each $-substitutions in the given template string with
|
||||
the corresponding terminal control string (if it's defined) or
|
||||
'' (if it's not).
|
||||
"""
|
||||
return re.sub(r'\$\$|\${\w+}', self._render_sub, template)
|
||||
|
||||
def _render_sub(self, match):
|
||||
s = match.group()
|
||||
if s == '$$': return s
|
||||
else: return getattr(self, s[2:-1])
|
||||
|
||||
#######################################################################
|
||||
# Example use case: progress bar
|
||||
#######################################################################
|
||||
|
||||
class SimpleProgressBar:
|
||||
"""
|
||||
A simple progress bar which doesn't need any terminal support.
|
||||
|
||||
This prints out a progress bar like:
|
||||
'Header: 0 .. 10.. 20.. ...'
|
||||
"""
|
||||
|
||||
def __init__(self, header):
|
||||
self.header = header
|
||||
self.atIndex = None
|
||||
|
||||
def update(self, percent, message):
|
||||
if self.atIndex is None:
|
||||
sys.stdout.write(self.header)
|
||||
self.atIndex = 0
|
||||
|
||||
next = int(percent*50)
|
||||
if next == self.atIndex:
|
||||
return
|
||||
|
||||
for i in range(self.atIndex, next):
|
||||
idx = i % 5
|
||||
if idx == 0:
|
||||
sys.stdout.write('%-2d' % (i*2))
|
||||
elif idx == 1:
|
||||
pass # Skip second char
|
||||
elif idx < 4:
|
||||
sys.stdout.write('.')
|
||||
else:
|
||||
sys.stdout.write(' ')
|
||||
sys.stdout.flush()
|
||||
self.atIndex = next
|
||||
|
||||
def clear(self):
|
||||
if self.atIndex is not None:
|
||||
sys.stdout.write('\n')
|
||||
sys.stdout.flush()
|
||||
self.atIndex = None
|
||||
|
||||
class ProgressBar:
|
||||
"""
|
||||
A 3-line progress bar, which looks like::
|
||||
|
||||
Header
|
||||
20% [===========----------------------------------]
|
||||
progress message
|
||||
|
||||
The progress bar is colored, if the terminal supports color
|
||||
output; and adjusts to the width of the terminal.
|
||||
"""
|
||||
BAR = '%s${GREEN}[${BOLD}%s%s${NORMAL}${GREEN}]${NORMAL}%s'
|
||||
HEADER = '${BOLD}${CYAN}%s${NORMAL}\n\n'
|
||||
|
||||
def __init__(self, term, header, useETA=True):
|
||||
self.term = term
|
||||
if not (self.term.CLEAR_EOL and self.term.UP and self.term.BOL):
|
||||
raise ValueError("Terminal isn't capable enough -- you "
|
||||
"should use a simpler progress dispaly.")
|
||||
self.BOL = self.term.BOL # BoL from col#79
|
||||
self.XNL = "\n" # Newline from col#79
|
||||
if self.term.COLS:
|
||||
self.width = self.term.COLS
|
||||
if not self.term.XN:
|
||||
self.BOL = self.term.UP + self.term.BOL
|
||||
self.XNL = "" # Cursor must be fed to the next line
|
||||
else:
|
||||
self.width = 75
|
||||
self.bar = term.render(self.BAR)
|
||||
self.header = self.term.render(self.HEADER % header.center(self.width))
|
||||
self.cleared = 1 #: true if we haven't drawn the bar yet.
|
||||
self.useETA = useETA
|
||||
if self.useETA:
|
||||
self.startTime = time.time()
|
||||
self.update(0, '')
|
||||
|
||||
def update(self, percent, message):
|
||||
if self.cleared:
|
||||
sys.stdout.write(self.header)
|
||||
self.cleared = 0
|
||||
prefix = '%3d%% ' % (percent*100,)
|
||||
suffix = ''
|
||||
if self.useETA:
|
||||
elapsed = time.time() - self.startTime
|
||||
if percent > .0001 and elapsed > 1:
|
||||
total = elapsed / percent
|
||||
eta = int(total - elapsed)
|
||||
h = eta//3600.
|
||||
m = (eta//60) % 60
|
||||
s = eta % 60
|
||||
suffix = ' ETA: %02d:%02d:%02d'%(h,m,s)
|
||||
barWidth = self.width - len(prefix) - len(suffix) - 2
|
||||
n = int(barWidth*percent)
|
||||
if len(message) < self.width:
|
||||
message = message + ' '*(self.width - len(message))
|
||||
else:
|
||||
message = '... ' + message[-(self.width-4):]
|
||||
sys.stdout.write(
|
||||
self.BOL + self.term.UP + self.term.CLEAR_EOL +
|
||||
(self.bar % (prefix, '='*n, '-'*(barWidth-n), suffix)) +
|
||||
self.XNL +
|
||||
self.term.CLEAR_EOL + message)
|
||||
if not self.term.XN:
|
||||
sys.stdout.flush()
|
||||
|
||||
def clear(self):
|
||||
if not self.cleared:
|
||||
sys.stdout.write(self.BOL + self.term.CLEAR_EOL +
|
||||
self.term.UP + self.term.CLEAR_EOL +
|
||||
self.term.UP + self.term.CLEAR_EOL)
|
||||
sys.stdout.flush()
|
||||
self.cleared = 1
|
||||
|
||||
def test():
|
||||
tc = TerminalController()
|
||||
p = ProgressBar(tc, 'Tests')
|
||||
for i in range(101):
|
||||
p.update(i/100., str(i))
|
||||
time.sleep(.3)
|
||||
|
||||
if __name__=='__main__':
|
||||
test()
|
108
external/llvm/utils/lit/lit/ShCommands.py
vendored
Normal file
108
external/llvm/utils/lit/lit/ShCommands.py
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
class Command:
|
||||
def __init__(self, args, redirects):
|
||||
self.args = list(args)
|
||||
self.redirects = list(redirects)
|
||||
|
||||
def __repr__(self):
|
||||
return 'Command(%r, %r)' % (self.args, self.redirects)
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, Command):
|
||||
return False
|
||||
|
||||
return ((self.args, self.redirects) ==
|
||||
(other.args, other.redirects))
|
||||
|
||||
def toShell(self, file):
|
||||
for arg in self.args:
|
||||
if "'" not in arg:
|
||||
quoted = "'%s'" % arg
|
||||
elif '"' not in arg and '$' not in arg:
|
||||
quoted = '"%s"' % arg
|
||||
else:
|
||||
raise NotImplementedError('Unable to quote %r' % arg)
|
||||
file.write(quoted)
|
||||
|
||||
# For debugging / validation.
|
||||
import ShUtil
|
||||
dequoted = list(ShUtil.ShLexer(quoted).lex())
|
||||
if dequoted != [arg]:
|
||||
raise NotImplementedError('Unable to quote %r' % arg)
|
||||
|
||||
for r in self.redirects:
|
||||
if len(r[0]) == 1:
|
||||
file.write("%s '%s'" % (r[0][0], r[1]))
|
||||
else:
|
||||
file.write("%s%s '%s'" % (r[0][1], r[0][0], r[1]))
|
||||
|
||||
class GlobItem:
|
||||
def __init__(self, pattern):
|
||||
self.pattern = pattern
|
||||
|
||||
def __repr__(self):
|
||||
return self.pattern
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, Command):
|
||||
return False
|
||||
|
||||
return (self.pattern == other.pattern)
|
||||
|
||||
def resolve(self, cwd):
|
||||
import glob
|
||||
import os
|
||||
if os.path.isabs(self.pattern):
|
||||
abspath = self.pattern
|
||||
else:
|
||||
abspath = os.path.join(cwd, self.pattern)
|
||||
results = glob.glob(abspath)
|
||||
return [self.pattern] if len(results) == 0 else results
|
||||
|
||||
class Pipeline:
|
||||
def __init__(self, commands, negate=False, pipe_err=False):
|
||||
self.commands = commands
|
||||
self.negate = negate
|
||||
self.pipe_err = pipe_err
|
||||
|
||||
def __repr__(self):
|
||||
return 'Pipeline(%r, %r, %r)' % (self.commands, self.negate,
|
||||
self.pipe_err)
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, Pipeline):
|
||||
return False
|
||||
|
||||
return ((self.commands, self.negate, self.pipe_err) ==
|
||||
(other.commands, other.negate, self.pipe_err))
|
||||
|
||||
def toShell(self, file, pipefail=False):
|
||||
if pipefail != self.pipe_err:
|
||||
raise ValueError('Inconsistent "pipefail" attribute!')
|
||||
if self.negate:
|
||||
file.write('! ')
|
||||
for cmd in self.commands:
|
||||
cmd.toShell(file)
|
||||
if cmd is not self.commands[-1]:
|
||||
file.write('|\n ')
|
||||
|
||||
class Seq:
|
||||
def __init__(self, lhs, op, rhs):
|
||||
assert op in (';', '&', '||', '&&')
|
||||
self.op = op
|
||||
self.lhs = lhs
|
||||
self.rhs = rhs
|
||||
|
||||
def __repr__(self):
|
||||
return 'Seq(%r, %r, %r)' % (self.lhs, self.op, self.rhs)
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, Seq):
|
||||
return False
|
||||
|
||||
return ((self.lhs, self.op, self.rhs) ==
|
||||
(other.lhs, other.op, other.rhs))
|
||||
|
||||
def toShell(self, file, pipefail=False):
|
||||
self.lhs.toShell(file, pipefail)
|
||||
file.write(' %s\n' % self.op)
|
||||
self.rhs.toShell(file, pipefail)
|
265
external/llvm/utils/lit/lit/ShUtil.py
vendored
Normal file
265
external/llvm/utils/lit/lit/ShUtil.py
vendored
Normal file
@ -0,0 +1,265 @@
|
||||
from __future__ import absolute_import
|
||||
import itertools
|
||||
|
||||
import lit.util
|
||||
from lit.ShCommands import Command, GlobItem, Pipeline, Seq
|
||||
|
||||
class ShLexer:
|
||||
def __init__(self, data, win32Escapes = False):
|
||||
self.data = data
|
||||
self.pos = 0
|
||||
self.end = len(data)
|
||||
self.win32Escapes = win32Escapes
|
||||
|
||||
def eat(self):
|
||||
c = self.data[self.pos]
|
||||
self.pos += 1
|
||||
return c
|
||||
|
||||
def look(self):
|
||||
return self.data[self.pos]
|
||||
|
||||
def maybe_eat(self, c):
|
||||
"""
|
||||
maybe_eat(c) - Consume the character c if it is the next character,
|
||||
returning True if a character was consumed. """
|
||||
if self.data[self.pos] == c:
|
||||
self.pos += 1
|
||||
return True
|
||||
return False
|
||||
|
||||
def lex_arg_fast(self, c):
|
||||
# Get the leading whitespace free section.
|
||||
chunk = self.data[self.pos - 1:].split(None, 1)[0]
|
||||
|
||||
# If it has special characters, the fast path failed.
|
||||
if ('|' in chunk or '&' in chunk or
|
||||
'<' in chunk or '>' in chunk or
|
||||
"'" in chunk or '"' in chunk or
|
||||
';' in chunk or '\\' in chunk):
|
||||
return None
|
||||
|
||||
self.pos = self.pos - 1 + len(chunk)
|
||||
return GlobItem(chunk) if '*' in chunk or '?' in chunk else chunk
|
||||
|
||||
def lex_arg_slow(self, c):
|
||||
if c in "'\"":
|
||||
str = self.lex_arg_quoted(c)
|
||||
else:
|
||||
str = c
|
||||
unquoted_glob_char = False
|
||||
quoted_glob_char = False
|
||||
while self.pos != self.end:
|
||||
c = self.look()
|
||||
if c.isspace() or c in "|&;":
|
||||
break
|
||||
elif c in '><':
|
||||
# This is an annoying case; we treat '2>' as a single token so
|
||||
# we don't have to track whitespace tokens.
|
||||
|
||||
# If the parse string isn't an integer, do the usual thing.
|
||||
if not str.isdigit():
|
||||
break
|
||||
|
||||
# Otherwise, lex the operator and convert to a redirection
|
||||
# token.
|
||||
num = int(str)
|
||||
tok = self.lex_one_token()
|
||||
assert isinstance(tok, tuple) and len(tok) == 1
|
||||
return (tok[0], num)
|
||||
elif c == '"' or c == "'":
|
||||
self.eat()
|
||||
quoted_arg = self.lex_arg_quoted(c)
|
||||
if '*' in quoted_arg or '?' in quoted_arg:
|
||||
quoted_glob_char = True
|
||||
str += quoted_arg
|
||||
elif not self.win32Escapes and c == '\\':
|
||||
# Outside of a string, '\\' escapes everything.
|
||||
self.eat()
|
||||
if self.pos == self.end:
|
||||
lit.util.warning(
|
||||
"escape at end of quoted argument in: %r" % self.data)
|
||||
return str
|
||||
str += self.eat()
|
||||
elif c in '*?':
|
||||
unquoted_glob_char = True
|
||||
str += self.eat()
|
||||
else:
|
||||
str += self.eat()
|
||||
# If a quote character is present, lex_arg_quoted will remove the quotes
|
||||
# and append the argument directly. This causes a problem when the
|
||||
# quoted portion contains a glob character, as the character will no
|
||||
# longer be treated literally. If glob characters occur *only* inside
|
||||
# of quotes, then we can handle this by not globbing at all, and if
|
||||
# glob characters occur *only* outside of quotes, we can still glob just
|
||||
# fine. But if a glob character occurs both inside and outside of
|
||||
# quotes this presents a problem. In practice this is such an obscure
|
||||
# edge case that it doesn't seem worth the added complexity to support.
|
||||
# By adding an assertion, it means some bot somewhere will catch this
|
||||
# and flag the user of a non-portable test (which could almost certainly
|
||||
# be re-written to work correctly without triggering this).
|
||||
assert not (quoted_glob_char and unquoted_glob_char)
|
||||
return GlobItem(str) if unquoted_glob_char else str
|
||||
|
||||
def lex_arg_quoted(self, delim):
|
||||
str = ''
|
||||
while self.pos != self.end:
|
||||
c = self.eat()
|
||||
if c == delim:
|
||||
return str
|
||||
elif c == '\\' and delim == '"':
|
||||
# Inside a '"' quoted string, '\\' only escapes the quote
|
||||
# character and backslash, otherwise it is preserved.
|
||||
if self.pos == self.end:
|
||||
lit.util.warning(
|
||||
"escape at end of quoted argument in: %r" % self.data)
|
||||
return str
|
||||
c = self.eat()
|
||||
if c == '"': #
|
||||
str += '"'
|
||||
elif c == '\\':
|
||||
str += '\\'
|
||||
else:
|
||||
str += '\\' + c
|
||||
else:
|
||||
str += c
|
||||
lit.util.warning("missing quote character in %r" % self.data)
|
||||
return str
|
||||
|
||||
def lex_arg_checked(self, c):
|
||||
pos = self.pos
|
||||
res = self.lex_arg_fast(c)
|
||||
end = self.pos
|
||||
|
||||
self.pos = pos
|
||||
reference = self.lex_arg_slow(c)
|
||||
if res is not None:
|
||||
if res != reference:
|
||||
raise ValueError("Fast path failure: %r != %r" % (
|
||||
res, reference))
|
||||
if self.pos != end:
|
||||
raise ValueError("Fast path failure: %r != %r" % (
|
||||
self.pos, end))
|
||||
return reference
|
||||
|
||||
def lex_arg(self, c):
|
||||
return self.lex_arg_fast(c) or self.lex_arg_slow(c)
|
||||
|
||||
def lex_one_token(self):
|
||||
"""
|
||||
lex_one_token - Lex a single 'sh' token. """
|
||||
|
||||
c = self.eat()
|
||||
if c == ';':
|
||||
return (c,)
|
||||
if c == '|':
|
||||
if self.maybe_eat('|'):
|
||||
return ('||',)
|
||||
return (c,)
|
||||
if c == '&':
|
||||
if self.maybe_eat('&'):
|
||||
return ('&&',)
|
||||
if self.maybe_eat('>'):
|
||||
return ('&>',)
|
||||
return (c,)
|
||||
if c == '>':
|
||||
if self.maybe_eat('&'):
|
||||
return ('>&',)
|
||||
if self.maybe_eat('>'):
|
||||
return ('>>',)
|
||||
return (c,)
|
||||
if c == '<':
|
||||
if self.maybe_eat('&'):
|
||||
return ('<&',)
|
||||
if self.maybe_eat('>'):
|
||||
return ('<<',)
|
||||
return (c,)
|
||||
|
||||
return self.lex_arg(c)
|
||||
|
||||
def lex(self):
|
||||
while self.pos != self.end:
|
||||
if self.look().isspace():
|
||||
self.eat()
|
||||
else:
|
||||
yield self.lex_one_token()
|
||||
|
||||
###
|
||||
|
||||
class ShParser:
|
||||
def __init__(self, data, win32Escapes = False, pipefail = False):
|
||||
self.data = data
|
||||
self.pipefail = pipefail
|
||||
self.tokens = ShLexer(data, win32Escapes = win32Escapes).lex()
|
||||
|
||||
def lex(self):
|
||||
for item in self.tokens:
|
||||
return item
|
||||
return None
|
||||
|
||||
def look(self):
|
||||
token = self.lex()
|
||||
if token is not None:
|
||||
self.tokens = itertools.chain([token], self.tokens)
|
||||
return token
|
||||
|
||||
def parse_command(self):
|
||||
tok = self.lex()
|
||||
if not tok:
|
||||
raise ValueError("empty command!")
|
||||
if isinstance(tok, tuple):
|
||||
raise ValueError("syntax error near unexpected token %r" % tok[0])
|
||||
|
||||
args = [tok]
|
||||
redirects = []
|
||||
while 1:
|
||||
tok = self.look()
|
||||
|
||||
# EOF?
|
||||
if tok is None:
|
||||
break
|
||||
|
||||
# If this is an argument, just add it to the current command.
|
||||
if isinstance(tok, (str, GlobItem)):
|
||||
args.append(self.lex())
|
||||
continue
|
||||
|
||||
# Otherwise see if it is a terminator.
|
||||
assert isinstance(tok, tuple)
|
||||
if tok[0] in ('|',';','&','||','&&'):
|
||||
break
|
||||
|
||||
# Otherwise it must be a redirection.
|
||||
op = self.lex()
|
||||
arg = self.lex()
|
||||
if not arg:
|
||||
raise ValueError("syntax error near token %r" % op[0])
|
||||
redirects.append((op, arg))
|
||||
|
||||
return Command(args, redirects)
|
||||
|
||||
def parse_pipeline(self):
|
||||
negate = False
|
||||
|
||||
commands = [self.parse_command()]
|
||||
while self.look() == ('|',):
|
||||
self.lex()
|
||||
commands.append(self.parse_command())
|
||||
return Pipeline(commands, negate, self.pipefail)
|
||||
|
||||
def parse(self):
|
||||
lhs = self.parse_pipeline()
|
||||
|
||||
while self.look():
|
||||
operator = self.lex()
|
||||
assert isinstance(operator, tuple) and len(operator) == 1
|
||||
|
||||
if not self.look():
|
||||
raise ValueError(
|
||||
"missing argument to operator %r" % operator[0])
|
||||
|
||||
# FIXME: Operator precedence!!
|
||||
lhs = Seq(lhs, operator[0], self.parse_pipeline())
|
||||
|
||||
return lhs
|
||||
|
362
external/llvm/utils/lit/lit/Test.py
vendored
Normal file
362
external/llvm/utils/lit/lit/Test.py
vendored
Normal file
@ -0,0 +1,362 @@
|
||||
import os
|
||||
from xml.sax.saxutils import escape
|
||||
from json import JSONEncoder
|
||||
|
||||
from lit.BooleanExpression import BooleanExpression
|
||||
|
||||
# Test result codes.
|
||||
|
||||
class ResultCode(object):
|
||||
"""Test result codes."""
|
||||
|
||||
# We override __new__ and __getnewargs__ to ensure that pickling still
|
||||
# provides unique ResultCode objects in any particular instance.
|
||||
_instances = {}
|
||||
def __new__(cls, name, isFailure):
|
||||
res = cls._instances.get(name)
|
||||
if res is None:
|
||||
cls._instances[name] = res = super(ResultCode, cls).__new__(cls)
|
||||
return res
|
||||
def __getnewargs__(self):
|
||||
return (self.name, self.isFailure)
|
||||
|
||||
def __init__(self, name, isFailure):
|
||||
self.name = name
|
||||
self.isFailure = isFailure
|
||||
|
||||
def __repr__(self):
|
||||
return '%s%r' % (self.__class__.__name__,
|
||||
(self.name, self.isFailure))
|
||||
|
||||
PASS = ResultCode('PASS', False)
|
||||
FLAKYPASS = ResultCode('FLAKYPASS', False)
|
||||
XFAIL = ResultCode('XFAIL', False)
|
||||
FAIL = ResultCode('FAIL', True)
|
||||
XPASS = ResultCode('XPASS', True)
|
||||
UNRESOLVED = ResultCode('UNRESOLVED', True)
|
||||
UNSUPPORTED = ResultCode('UNSUPPORTED', False)
|
||||
TIMEOUT = ResultCode('TIMEOUT', True)
|
||||
|
||||
# Test metric values.
|
||||
|
||||
class MetricValue(object):
|
||||
def format(self):
|
||||
"""
|
||||
format() -> str
|
||||
|
||||
Convert this metric to a string suitable for displaying as part of the
|
||||
console output.
|
||||
"""
|
||||
raise RuntimeError("abstract method")
|
||||
|
||||
def todata(self):
|
||||
"""
|
||||
todata() -> json-serializable data
|
||||
|
||||
Convert this metric to content suitable for serializing in the JSON test
|
||||
output.
|
||||
"""
|
||||
raise RuntimeError("abstract method")
|
||||
|
||||
class IntMetricValue(MetricValue):
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
|
||||
def format(self):
|
||||
return str(self.value)
|
||||
|
||||
def todata(self):
|
||||
return self.value
|
||||
|
||||
class RealMetricValue(MetricValue):
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
|
||||
def format(self):
|
||||
return '%.4f' % self.value
|
||||
|
||||
def todata(self):
|
||||
return self.value
|
||||
|
||||
class JSONMetricValue(MetricValue):
|
||||
"""
|
||||
JSONMetricValue is used for types that are representable in the output
|
||||
but that are otherwise uninterpreted.
|
||||
"""
|
||||
def __init__(self, value):
|
||||
# Ensure the value is a serializable by trying to encode it.
|
||||
# WARNING: The value may change before it is encoded again, and may
|
||||
# not be encodable after the change.
|
||||
try:
|
||||
e = JSONEncoder()
|
||||
e.encode(value)
|
||||
except TypeError:
|
||||
raise
|
||||
self.value = value
|
||||
|
||||
def format(self):
|
||||
e = JSONEncoder(indent=2, sort_keys=True)
|
||||
return e.encode(self.value)
|
||||
|
||||
def todata(self):
|
||||
return self.value
|
||||
|
||||
def toMetricValue(value):
|
||||
if isinstance(value, MetricValue):
|
||||
return value
|
||||
elif isinstance(value, int):
|
||||
return IntMetricValue(value)
|
||||
elif isinstance(value, float):
|
||||
return RealMetricValue(value)
|
||||
else:
|
||||
# 'long' is only present in python2
|
||||
try:
|
||||
if isinstance(value, long):
|
||||
return IntMetricValue(value)
|
||||
except NameError:
|
||||
pass
|
||||
|
||||
# Try to create a JSONMetricValue and let the constructor throw
|
||||
# if value is not a valid type.
|
||||
return JSONMetricValue(value)
|
||||
|
||||
|
||||
# Test results.
|
||||
|
||||
class Result(object):
|
||||
"""Wrapper for the results of executing an individual test."""
|
||||
|
||||
def __init__(self, code, output='', elapsed=None):
|
||||
# The result code.
|
||||
self.code = code
|
||||
# The test output.
|
||||
self.output = output
|
||||
# The wall timing to execute the test, if timing.
|
||||
self.elapsed = elapsed
|
||||
# The metrics reported by this test.
|
||||
self.metrics = {}
|
||||
|
||||
def addMetric(self, name, value):
|
||||
"""
|
||||
addMetric(name, value)
|
||||
|
||||
Attach a test metric to the test result, with the given name and list of
|
||||
values. It is an error to attempt to attach the metrics with the same
|
||||
name multiple times.
|
||||
|
||||
Each value must be an instance of a MetricValue subclass.
|
||||
"""
|
||||
if name in self.metrics:
|
||||
raise ValueError("result already includes metrics for %r" % (
|
||||
name,))
|
||||
if not isinstance(value, MetricValue):
|
||||
raise TypeError("unexpected metric value: %r" % (value,))
|
||||
self.metrics[name] = value
|
||||
|
||||
# Test classes.
|
||||
|
||||
class TestSuite:
|
||||
"""TestSuite - Information on a group of tests.
|
||||
|
||||
A test suite groups together a set of logically related tests.
|
||||
"""
|
||||
|
||||
def __init__(self, name, source_root, exec_root, config):
|
||||
self.name = name
|
||||
self.source_root = source_root
|
||||
self.exec_root = exec_root
|
||||
# The test suite configuration.
|
||||
self.config = config
|
||||
|
||||
def getSourcePath(self, components):
|
||||
return os.path.join(self.source_root, *components)
|
||||
|
||||
def getExecPath(self, components):
|
||||
return os.path.join(self.exec_root, *components)
|
||||
|
||||
class Test:
|
||||
"""Test - Information on a single test instance."""
|
||||
|
||||
def __init__(self, suite, path_in_suite, config, file_path = None):
|
||||
self.suite = suite
|
||||
self.path_in_suite = path_in_suite
|
||||
self.config = config
|
||||
self.file_path = file_path
|
||||
|
||||
# A list of conditions under which this test is expected to fail.
|
||||
# Each condition is a boolean expression of features and target
|
||||
# triple parts. These can optionally be provided by test format
|
||||
# handlers, and will be honored when the test result is supplied.
|
||||
self.xfails = []
|
||||
|
||||
# A list of conditions that must be satisfied before running the test.
|
||||
# Each condition is a boolean expression of features. All of them
|
||||
# must be True for the test to run.
|
||||
# FIXME should target triple parts count here too?
|
||||
self.requires = []
|
||||
|
||||
# A list of conditions that prevent execution of the test.
|
||||
# Each condition is a boolean expression of features and target
|
||||
# triple parts. All of them must be False for the test to run.
|
||||
self.unsupported = []
|
||||
|
||||
# The test result, once complete.
|
||||
self.result = None
|
||||
|
||||
def setResult(self, result):
|
||||
if self.result is not None:
|
||||
raise ValueError("test result already set")
|
||||
if not isinstance(result, Result):
|
||||
raise ValueError("unexpected result type")
|
||||
|
||||
self.result = result
|
||||
|
||||
# Apply the XFAIL handling to resolve the result exit code.
|
||||
try:
|
||||
if self.isExpectedToFail():
|
||||
if self.result.code == PASS:
|
||||
self.result.code = XPASS
|
||||
elif self.result.code == FAIL:
|
||||
self.result.code = XFAIL
|
||||
except ValueError as e:
|
||||
# Syntax error in an XFAIL line.
|
||||
self.result.code = UNRESOLVED
|
||||
self.result.output = str(e)
|
||||
|
||||
def getFullName(self):
|
||||
return self.suite.config.name + ' :: ' + '/'.join(self.path_in_suite)
|
||||
|
||||
def getFilePath(self):
|
||||
if self.file_path:
|
||||
return self.file_path
|
||||
return self.getSourcePath()
|
||||
|
||||
def getSourcePath(self):
|
||||
return self.suite.getSourcePath(self.path_in_suite)
|
||||
|
||||
def getExecPath(self):
|
||||
return self.suite.getExecPath(self.path_in_suite)
|
||||
|
||||
def isExpectedToFail(self):
|
||||
"""
|
||||
isExpectedToFail() -> bool
|
||||
|
||||
Check whether this test is expected to fail in the current
|
||||
configuration. This check relies on the test xfails property which by
|
||||
some test formats may not be computed until the test has first been
|
||||
executed.
|
||||
Throws ValueError if an XFAIL line has a syntax error.
|
||||
"""
|
||||
|
||||
features = self.config.available_features
|
||||
triple = getattr(self.suite.config, 'target_triple', "")
|
||||
|
||||
# Check if any of the xfails match an available feature or the target.
|
||||
for item in self.xfails:
|
||||
# If this is the wildcard, it always fails.
|
||||
if item == '*':
|
||||
return True
|
||||
|
||||
# If this is a True expression of features and target triple parts,
|
||||
# it fails.
|
||||
try:
|
||||
if BooleanExpression.evaluate(item, features, triple):
|
||||
return True
|
||||
except ValueError as e:
|
||||
raise ValueError('Error in XFAIL list:\n%s' % str(e))
|
||||
|
||||
return False
|
||||
|
||||
def isWithinFeatureLimits(self):
|
||||
"""
|
||||
isWithinFeatureLimits() -> bool
|
||||
|
||||
A test is within the feature limits set by run_only_tests if
|
||||
1. the test's requirements ARE satisfied by the available features
|
||||
2. the test's requirements ARE NOT satisfied after the limiting
|
||||
features are removed from the available features
|
||||
|
||||
Throws ValueError if a REQUIRES line has a syntax error.
|
||||
"""
|
||||
|
||||
if not self.config.limit_to_features:
|
||||
return True # No limits. Run it.
|
||||
|
||||
# Check the requirements as-is (#1)
|
||||
if self.getMissingRequiredFeatures():
|
||||
return False
|
||||
|
||||
# Check the requirements after removing the limiting features (#2)
|
||||
featuresMinusLimits = [f for f in self.config.available_features
|
||||
if not f in self.config.limit_to_features]
|
||||
if not self.getMissingRequiredFeaturesFromList(featuresMinusLimits):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def getMissingRequiredFeaturesFromList(self, features):
|
||||
try:
|
||||
return [item for item in self.requires
|
||||
if not BooleanExpression.evaluate(item, features)]
|
||||
except ValueError as e:
|
||||
raise ValueError('Error in REQUIRES list:\n%s' % str(e))
|
||||
|
||||
def getMissingRequiredFeatures(self):
|
||||
"""
|
||||
getMissingRequiredFeatures() -> list of strings
|
||||
|
||||
Returns a list of features from REQUIRES that are not satisfied."
|
||||
Throws ValueError if a REQUIRES line has a syntax error.
|
||||
"""
|
||||
|
||||
features = self.config.available_features
|
||||
return self.getMissingRequiredFeaturesFromList(features)
|
||||
|
||||
def getUnsupportedFeatures(self):
|
||||
"""
|
||||
getUnsupportedFeatures() -> list of strings
|
||||
|
||||
Returns a list of features from UNSUPPORTED that are present
|
||||
in the test configuration's features or target triple.
|
||||
Throws ValueError if an UNSUPPORTED line has a syntax error.
|
||||
"""
|
||||
|
||||
features = self.config.available_features
|
||||
triple = getattr(self.suite.config, 'target_triple', "")
|
||||
|
||||
try:
|
||||
return [item for item in self.unsupported
|
||||
if BooleanExpression.evaluate(item, features, triple)]
|
||||
except ValueError as e:
|
||||
raise ValueError('Error in UNSUPPORTED list:\n%s' % str(e))
|
||||
|
||||
def isEarlyTest(self):
|
||||
"""
|
||||
isEarlyTest() -> bool
|
||||
|
||||
Check whether this test should be executed early in a particular run.
|
||||
This can be used for test suites with long running tests to maximize
|
||||
parallelism or where it is desirable to surface their failures early.
|
||||
"""
|
||||
return self.suite.config.is_early
|
||||
|
||||
def getJUnitXML(self):
|
||||
test_name = self.path_in_suite[-1]
|
||||
test_path = self.path_in_suite[:-1]
|
||||
safe_test_path = [x.replace(".","_") for x in test_path]
|
||||
safe_name = self.suite.name.replace(".","-")
|
||||
|
||||
if safe_test_path:
|
||||
class_name = safe_name + "." + "/".join(safe_test_path)
|
||||
else:
|
||||
class_name = safe_name + "." + safe_name
|
||||
|
||||
xml = "<testcase classname='" + class_name + "' name='" + \
|
||||
test_name + "'"
|
||||
xml += " time='%.2f'" % (self.result.elapsed,)
|
||||
if self.result.code.isFailure:
|
||||
xml += ">\n\t<failure >\n" + escape(self.result.output)
|
||||
xml += "\n\t</failure>\n</testcase>"
|
||||
else:
|
||||
xml += "/>"
|
||||
return xml
|
1387
external/llvm/utils/lit/lit/TestRunner.py
vendored
Normal file
1387
external/llvm/utils/lit/lit/TestRunner.py
vendored
Normal file
File diff suppressed because it is too large
Load Diff
154
external/llvm/utils/lit/lit/TestingConfig.py
vendored
Normal file
154
external/llvm/utils/lit/lit/TestingConfig.py
vendored
Normal file
@ -0,0 +1,154 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
class TestingConfig:
|
||||
""""
|
||||
TestingConfig - Information on the tests inside a suite.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def fromdefaults(litConfig):
|
||||
"""
|
||||
fromdefaults(litConfig) -> TestingConfig
|
||||
|
||||
Create a TestingConfig object with default values.
|
||||
"""
|
||||
# Set the environment based on the command line arguments.
|
||||
environment = {
|
||||
'PATH' : os.pathsep.join(litConfig.path +
|
||||
[os.environ.get('PATH','')]),
|
||||
'LLVM_DISABLE_CRASH_REPORT' : '1',
|
||||
}
|
||||
|
||||
pass_vars = ['LIBRARY_PATH', 'LD_LIBRARY_PATH', 'SYSTEMROOT', 'TERM',
|
||||
'LD_PRELOAD', 'ASAN_OPTIONS', 'UBSAN_OPTIONS',
|
||||
'LSAN_OPTIONS', 'ADB', 'ANDROID_SERIAL',
|
||||
'SANITIZER_IGNORE_CVE_2016_2143', 'TMPDIR', 'TMP', 'TEMP',
|
||||
'TEMPDIR', 'AVRLIT_BOARD', 'AVRLIT_PORT']
|
||||
for var in pass_vars:
|
||||
val = os.environ.get(var, '')
|
||||
# Check for empty string as some variables such as LD_PRELOAD cannot be empty
|
||||
# ('') for OS's such as OpenBSD.
|
||||
if val:
|
||||
environment[var] = val
|
||||
|
||||
if sys.platform == 'win32':
|
||||
environment.update({
|
||||
'INCLUDE' : os.environ.get('INCLUDE',''),
|
||||
'PATHEXT' : os.environ.get('PATHEXT',''),
|
||||
'PYTHONUNBUFFERED' : '1',
|
||||
'TEMP' : os.environ.get('TEMP',''),
|
||||
'TMP' : os.environ.get('TMP',''),
|
||||
})
|
||||
|
||||
# Set the default available features based on the LitConfig.
|
||||
available_features = []
|
||||
if litConfig.useValgrind:
|
||||
available_features.append('valgrind')
|
||||
if litConfig.valgrindLeakCheck:
|
||||
available_features.append('vg_leak')
|
||||
|
||||
return TestingConfig(None,
|
||||
name = '<unnamed>',
|
||||
suffixes = set(),
|
||||
test_format = None,
|
||||
environment = environment,
|
||||
substitutions = [],
|
||||
unsupported = False,
|
||||
test_exec_root = None,
|
||||
test_source_root = None,
|
||||
excludes = [],
|
||||
available_features = available_features,
|
||||
pipefail = True)
|
||||
|
||||
def load_from_path(self, path, litConfig):
|
||||
"""
|
||||
load_from_path(path, litConfig)
|
||||
|
||||
Load the configuration module at the provided path into the given config
|
||||
object.
|
||||
"""
|
||||
|
||||
# Load the config script data.
|
||||
data = None
|
||||
f = open(path)
|
||||
try:
|
||||
data = f.read()
|
||||
except:
|
||||
litConfig.fatal('unable to load config file: %r' % (path,))
|
||||
f.close()
|
||||
|
||||
# Execute the config script to initialize the object.
|
||||
cfg_globals = dict(globals())
|
||||
cfg_globals['config'] = self
|
||||
cfg_globals['lit_config'] = litConfig
|
||||
cfg_globals['__file__'] = path
|
||||
try:
|
||||
exec(compile(data, path, 'exec'), cfg_globals, None)
|
||||
if litConfig.debug:
|
||||
litConfig.note('... loaded config %r' % path)
|
||||
except SystemExit:
|
||||
e = sys.exc_info()[1]
|
||||
# We allow normal system exit inside a config file to just
|
||||
# return control without error.
|
||||
if e.args:
|
||||
raise
|
||||
except:
|
||||
import traceback
|
||||
litConfig.fatal(
|
||||
'unable to parse config file %r, traceback: %s' % (
|
||||
path, traceback.format_exc()))
|
||||
|
||||
self.finish(litConfig)
|
||||
|
||||
def __init__(self, parent, name, suffixes, test_format,
|
||||
environment, substitutions, unsupported,
|
||||
test_exec_root, test_source_root, excludes,
|
||||
available_features, pipefail, limit_to_features = [],
|
||||
is_early = False, parallelism_group = ""):
|
||||
self.parent = parent
|
||||
self.name = str(name)
|
||||
self.suffixes = set(suffixes)
|
||||
self.test_format = test_format
|
||||
self.environment = dict(environment)
|
||||
self.substitutions = list(substitutions)
|
||||
self.unsupported = unsupported
|
||||
self.test_exec_root = test_exec_root
|
||||
self.test_source_root = test_source_root
|
||||
self.excludes = set(excludes)
|
||||
self.available_features = set(available_features)
|
||||
self.pipefail = pipefail
|
||||
# This list is used by TestRunner.py to restrict running only tests that
|
||||
# require one of the features in this list if this list is non-empty.
|
||||
# Configurations can set this list to restrict the set of tests to run.
|
||||
self.limit_to_features = set(limit_to_features)
|
||||
# Whether the suite should be tested early in a given run.
|
||||
self.is_early = bool(is_early)
|
||||
self.parallelism_group = parallelism_group
|
||||
|
||||
def finish(self, litConfig):
|
||||
"""finish() - Finish this config object, after loading is complete."""
|
||||
|
||||
self.name = str(self.name)
|
||||
self.suffixes = set(self.suffixes)
|
||||
self.environment = dict(self.environment)
|
||||
self.substitutions = list(self.substitutions)
|
||||
if self.test_exec_root is not None:
|
||||
# FIXME: This should really only be suite in test suite config
|
||||
# files. Should we distinguish them?
|
||||
self.test_exec_root = str(self.test_exec_root)
|
||||
if self.test_source_root is not None:
|
||||
# FIXME: This should really only be suite in test suite config
|
||||
# files. Should we distinguish them?
|
||||
self.test_source_root = str(self.test_source_root)
|
||||
self.excludes = set(self.excludes)
|
||||
|
||||
@property
|
||||
def root(self):
|
||||
"""root attribute - The root configuration for the test suite."""
|
||||
if self.parent is None:
|
||||
return self
|
||||
else:
|
||||
return self.parent.root
|
||||
|
10
external/llvm/utils/lit/lit/__init__.py
vendored
Normal file
10
external/llvm/utils/lit/lit/__init__.py
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
"""'lit' Testing Tool"""
|
||||
|
||||
__author__ = 'Daniel Dunbar'
|
||||
__email__ = 'daniel@minormatter.com'
|
||||
__versioninfo__ = (0, 6, 0)
|
||||
__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev'
|
||||
|
||||
__all__ = []
|
||||
|
||||
from .main import main
|
276
external/llvm/utils/lit/lit/discovery.py
vendored
Normal file
276
external/llvm/utils/lit/lit/discovery.py
vendored
Normal file
@ -0,0 +1,276 @@
|
||||
"""
|
||||
Test discovery functions.
|
||||
"""
|
||||
|
||||
import copy
|
||||
import os
|
||||
import sys
|
||||
|
||||
import lit.run
|
||||
from lit.TestingConfig import TestingConfig
|
||||
from lit import LitConfig, Test
|
||||
|
||||
def chooseConfigFileFromDir(dir, config_names):
|
||||
for name in config_names:
|
||||
p = os.path.join(dir, name)
|
||||
if os.path.exists(p):
|
||||
return p
|
||||
return None
|
||||
|
||||
def dirContainsTestSuite(path, lit_config):
|
||||
cfgpath = chooseConfigFileFromDir(path, lit_config.site_config_names)
|
||||
if not cfgpath:
|
||||
cfgpath = chooseConfigFileFromDir(path, lit_config.config_names)
|
||||
return cfgpath
|
||||
|
||||
def getTestSuite(item, litConfig, cache):
|
||||
"""getTestSuite(item, litConfig, cache) -> (suite, relative_path)
|
||||
|
||||
Find the test suite containing @arg item.
|
||||
|
||||
@retval (None, ...) - Indicates no test suite contains @arg item.
|
||||
@retval (suite, relative_path) - The suite that @arg item is in, and its
|
||||
relative path inside that suite.
|
||||
"""
|
||||
def search1(path):
|
||||
# Check for a site config or a lit config.
|
||||
cfgpath = dirContainsTestSuite(path, litConfig)
|
||||
|
||||
# If we didn't find a config file, keep looking.
|
||||
if not cfgpath:
|
||||
parent,base = os.path.split(path)
|
||||
if parent == path:
|
||||
return (None, ())
|
||||
|
||||
ts, relative = search(parent)
|
||||
return (ts, relative + (base,))
|
||||
|
||||
# This is a private builtin parameter which can be used to perform
|
||||
# translation of configuration paths. Specifically, this parameter
|
||||
# can be set to a dictionary that the discovery process will consult
|
||||
# when it finds a configuration it is about to load. If the given
|
||||
# path is in the map, the value of that key is a path to the
|
||||
# configuration to load instead.
|
||||
config_map = litConfig.params.get('config_map')
|
||||
if config_map:
|
||||
cfgpath = os.path.realpath(cfgpath)
|
||||
cfgpath = os.path.normcase(cfgpath)
|
||||
target = config_map.get(cfgpath)
|
||||
if target:
|
||||
cfgpath = target
|
||||
|
||||
# We found a test suite, create a new config for it and load it.
|
||||
if litConfig.debug:
|
||||
litConfig.note('loading suite config %r' % cfgpath)
|
||||
|
||||
cfg = TestingConfig.fromdefaults(litConfig)
|
||||
cfg.load_from_path(cfgpath, litConfig)
|
||||
source_root = os.path.realpath(cfg.test_source_root or path)
|
||||
exec_root = os.path.realpath(cfg.test_exec_root or path)
|
||||
return Test.TestSuite(cfg.name, source_root, exec_root, cfg), ()
|
||||
|
||||
def search(path):
|
||||
# Check for an already instantiated test suite.
|
||||
real_path = os.path.realpath(path)
|
||||
res = cache.get(real_path)
|
||||
if res is None:
|
||||
cache[real_path] = res = search1(path)
|
||||
return res
|
||||
|
||||
# Canonicalize the path.
|
||||
item = os.path.normpath(os.path.join(os.getcwd(), item))
|
||||
|
||||
# Skip files and virtual components.
|
||||
components = []
|
||||
while not os.path.isdir(item):
|
||||
parent,base = os.path.split(item)
|
||||
if parent == item:
|
||||
return (None, ())
|
||||
components.append(base)
|
||||
item = parent
|
||||
components.reverse()
|
||||
|
||||
ts, relative = search(item)
|
||||
return ts, tuple(relative + tuple(components))
|
||||
|
||||
def getLocalConfig(ts, path_in_suite, litConfig, cache):
|
||||
def search1(path_in_suite):
|
||||
# Get the parent config.
|
||||
if not path_in_suite:
|
||||
parent = ts.config
|
||||
else:
|
||||
parent = search(path_in_suite[:-1])
|
||||
|
||||
# Check if there is a local configuration file.
|
||||
source_path = ts.getSourcePath(path_in_suite)
|
||||
cfgpath = chooseConfigFileFromDir(source_path, litConfig.local_config_names)
|
||||
|
||||
# If not, just reuse the parent config.
|
||||
if not cfgpath:
|
||||
return parent
|
||||
|
||||
# Otherwise, copy the current config and load the local configuration
|
||||
# file into it.
|
||||
config = copy.deepcopy(parent)
|
||||
if litConfig.debug:
|
||||
litConfig.note('loading local config %r' % cfgpath)
|
||||
config.load_from_path(cfgpath, litConfig)
|
||||
return config
|
||||
|
||||
def search(path_in_suite):
|
||||
key = (ts, path_in_suite)
|
||||
res = cache.get(key)
|
||||
if res is None:
|
||||
cache[key] = res = search1(path_in_suite)
|
||||
return res
|
||||
|
||||
return search(path_in_suite)
|
||||
|
||||
def getTests(path, litConfig, testSuiteCache, localConfigCache):
|
||||
# Find the test suite for this input and its relative path.
|
||||
ts,path_in_suite = getTestSuite(path, litConfig, testSuiteCache)
|
||||
if ts is None:
|
||||
litConfig.warning('unable to find test suite for %r' % path)
|
||||
return (),()
|
||||
|
||||
if litConfig.debug:
|
||||
litConfig.note('resolved input %r to %r::%r' % (path, ts.name,
|
||||
path_in_suite))
|
||||
|
||||
return ts, getTestsInSuite(ts, path_in_suite, litConfig,
|
||||
testSuiteCache, localConfigCache)
|
||||
|
||||
def getTestsInSuite(ts, path_in_suite, litConfig,
|
||||
testSuiteCache, localConfigCache):
|
||||
# Check that the source path exists (errors here are reported by the
|
||||
# caller).
|
||||
source_path = ts.getSourcePath(path_in_suite)
|
||||
if not os.path.exists(source_path):
|
||||
return
|
||||
|
||||
# Check if the user named a test directly.
|
||||
if not os.path.isdir(source_path):
|
||||
lc = getLocalConfig(ts, path_in_suite[:-1], litConfig, localConfigCache)
|
||||
yield Test.Test(ts, path_in_suite, lc)
|
||||
return
|
||||
|
||||
# Otherwise we have a directory to search for tests, start by getting the
|
||||
# local configuration.
|
||||
lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache)
|
||||
|
||||
# Search for tests.
|
||||
if lc.test_format is not None:
|
||||
for res in lc.test_format.getTestsInDirectory(ts, path_in_suite,
|
||||
litConfig, lc):
|
||||
yield res
|
||||
|
||||
# Search subdirectories.
|
||||
for filename in os.listdir(source_path):
|
||||
# FIXME: This doesn't belong here?
|
||||
if filename in ('Output', '.svn', '.git') or filename in lc.excludes:
|
||||
continue
|
||||
|
||||
# Ignore non-directories.
|
||||
file_sourcepath = os.path.join(source_path, filename)
|
||||
if not os.path.isdir(file_sourcepath):
|
||||
continue
|
||||
|
||||
# Check for nested test suites, first in the execpath in case there is a
|
||||
# site configuration and then in the source path.
|
||||
subpath = path_in_suite + (filename,)
|
||||
file_execpath = ts.getExecPath(subpath)
|
||||
if dirContainsTestSuite(file_execpath, litConfig):
|
||||
sub_ts, subpath_in_suite = getTestSuite(file_execpath, litConfig,
|
||||
testSuiteCache)
|
||||
elif dirContainsTestSuite(file_sourcepath, litConfig):
|
||||
sub_ts, subpath_in_suite = getTestSuite(file_sourcepath, litConfig,
|
||||
testSuiteCache)
|
||||
else:
|
||||
sub_ts = None
|
||||
|
||||
# If the this directory recursively maps back to the current test suite,
|
||||
# disregard it (this can happen if the exec root is located inside the
|
||||
# current test suite, for example).
|
||||
if sub_ts is ts:
|
||||
continue
|
||||
|
||||
# Otherwise, load from the nested test suite, if present.
|
||||
if sub_ts is not None:
|
||||
subiter = getTestsInSuite(sub_ts, subpath_in_suite, litConfig,
|
||||
testSuiteCache, localConfigCache)
|
||||
else:
|
||||
subiter = getTestsInSuite(ts, subpath, litConfig, testSuiteCache,
|
||||
localConfigCache)
|
||||
|
||||
N = 0
|
||||
for res in subiter:
|
||||
N += 1
|
||||
yield res
|
||||
if sub_ts and not N:
|
||||
litConfig.warning('test suite %r contained no tests' % sub_ts.name)
|
||||
|
||||
def find_tests_for_inputs(lit_config, inputs):
|
||||
"""
|
||||
find_tests_for_inputs(lit_config, inputs) -> [Test]
|
||||
|
||||
Given a configuration object and a list of input specifiers, find all the
|
||||
tests to execute.
|
||||
"""
|
||||
|
||||
# Expand '@...' form in inputs.
|
||||
actual_inputs = []
|
||||
for input in inputs:
|
||||
if input.startswith('@'):
|
||||
f = open(input[1:])
|
||||
try:
|
||||
for ln in f:
|
||||
ln = ln.strip()
|
||||
if ln:
|
||||
actual_inputs.append(ln)
|
||||
finally:
|
||||
f.close()
|
||||
else:
|
||||
actual_inputs.append(input)
|
||||
|
||||
# Load the tests from the inputs.
|
||||
tests = []
|
||||
test_suite_cache = {}
|
||||
local_config_cache = {}
|
||||
for input in actual_inputs:
|
||||
prev = len(tests)
|
||||
tests.extend(getTests(input, lit_config,
|
||||
test_suite_cache, local_config_cache)[1])
|
||||
if prev == len(tests):
|
||||
lit_config.warning('input %r contained no tests' % input)
|
||||
|
||||
# If there were any errors during test discovery, exit now.
|
||||
if lit_config.numErrors:
|
||||
sys.stderr.write('%d errors, exiting.\n' % lit_config.numErrors)
|
||||
sys.exit(2)
|
||||
|
||||
return tests
|
||||
|
||||
def load_test_suite(inputs):
|
||||
import platform
|
||||
import unittest
|
||||
from lit.LitTestCase import LitTestCase
|
||||
|
||||
# Create the global config object.
|
||||
litConfig = LitConfig.LitConfig(progname = 'lit',
|
||||
path = [],
|
||||
quiet = False,
|
||||
useValgrind = False,
|
||||
valgrindLeakCheck = False,
|
||||
valgrindArgs = [],
|
||||
singleProcess=False,
|
||||
noExecute = False,
|
||||
debug = False,
|
||||
isWindows = (platform.system()=='Windows'),
|
||||
params = {})
|
||||
|
||||
# Perform test discovery.
|
||||
run = lit.run.Run(litConfig, find_tests_for_inputs(litConfig, inputs))
|
||||
|
||||
# Return a unittest test suite which just runs the tests in order.
|
||||
return unittest.TestSuite([LitTestCase(test, run)
|
||||
for test in run.tests])
|
8
external/llvm/utils/lit/lit/formats/__init__.py
vendored
Normal file
8
external/llvm/utils/lit/lit/formats/__init__.py
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
from lit.formats.base import ( # noqa: F401
|
||||
TestFormat,
|
||||
FileBasedTest,
|
||||
OneCommandPerFileTest
|
||||
)
|
||||
|
||||
from lit.formats.googletest import GoogleTest # noqa: F401
|
||||
from lit.formats.shtest import ShTest # noqa: F401
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user