mirror of
https://gitlab.winehq.org/wine/wine-gecko.git
synced 2024-09-13 09:24:08 -07:00
Bug 746829 - Group jsreftest options by category; r=dmandelin
Should be a mild usability improvement. Also reorganizes the code to make future changes easier.
This commit is contained in:
parent
07470e550b
commit
5d86dbd378
@ -1,14 +1,15 @@
|
||||
#!/usr/bin/env python
|
||||
"""
|
||||
The JS Shell Test Harness.
|
||||
|
||||
# Test harness for JSTests, controlled by manifest files.
|
||||
See the adjacent README.txt for more details.
|
||||
"""
|
||||
|
||||
import datetime, os, sys
|
||||
import os, sys
|
||||
from subprocess import list2cmdline, call
|
||||
|
||||
from results import NullTestOutput
|
||||
from tests import TestCase
|
||||
from tasks_win import Source
|
||||
from progressbar import ProgressBar
|
||||
from results import ResultsSink
|
||||
|
||||
if (sys.platform.startswith('linux') or
|
||||
@ -18,20 +19,10 @@ if (sys.platform.startswith('linux') or
|
||||
else:
|
||||
from tasks_win import run_all_tests
|
||||
|
||||
def exclude_tests(test_list, exclude_files):
|
||||
exclude_paths = []
|
||||
for filename in exclude_files:
|
||||
for line in open(filename):
|
||||
if line.startswith('#'): continue
|
||||
line = line.strip('\n')
|
||||
if not line: continue
|
||||
exclude_paths.append(line)
|
||||
return [ _ for _ in test_list if _.path not in exclude_paths ]
|
||||
|
||||
def run_tests(tests, results):
|
||||
def run_tests(options, tests, results):
|
||||
"""Run the given tests, sending raw results to the given results accumulator."""
|
||||
pb = None
|
||||
if not OPTIONS.hide_progress:
|
||||
if not options.hide_progress:
|
||||
try:
|
||||
from progressbar import ProgressBar
|
||||
pb = ProgressBar('', len(tests), 16)
|
||||
@ -40,175 +31,227 @@ def run_tests(tests, results):
|
||||
results.pb = pb
|
||||
|
||||
try:
|
||||
results.finished = run_all_tests(tests, results, OPTIONS)
|
||||
results.finished = run_all_tests(tests, results, options)
|
||||
except KeyboardInterrupt:
|
||||
results.finished = False
|
||||
|
||||
if pb:
|
||||
pb.finish()
|
||||
|
||||
if not OPTIONS.tinderbox:
|
||||
if not options.tinderbox:
|
||||
results.list()
|
||||
|
||||
if __name__ == '__main__':
|
||||
from optparse import OptionParser
|
||||
op = OptionParser(usage='%prog JS_SHELL [TEST-SPECS]')
|
||||
op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true',
|
||||
help='show js shell command run')
|
||||
op.add_option('-o', '--show-output', dest='show_output', action='store_true',
|
||||
help='show output from js shell')
|
||||
op.add_option('-O', '--output-file', dest='output_file',
|
||||
help='write command output to the given file')
|
||||
op.add_option('-f', '--file', dest='test_file', action='append',
|
||||
help='get tests from the given file')
|
||||
op.add_option('-x', '--exclude-file', dest='exclude_file', action='append',
|
||||
help='exclude tests from the given file')
|
||||
op.add_option('--no-progress', dest='hide_progress', action='store_true',
|
||||
help='hide progress bar')
|
||||
op.add_option('-j', '--worker-count', dest='worker_count', type=int, default=2,
|
||||
help='number of worker threads to run tests on (default 2)')
|
||||
op.add_option('-t', '--timeout', dest='timeout', type=float, default=150.0,
|
||||
help='set test timeout in seconds')
|
||||
op.add_option('-d', '--exclude-random', dest='random', action='store_false',
|
||||
help='exclude tests marked random', default=True)
|
||||
op.add_option('--run-skipped', dest='run_skipped', action='store_true',
|
||||
help='run skipped tests')
|
||||
op.add_option('--run-only-skipped', dest='run_only_skipped', action='store_true',
|
||||
help='run only skipped tests')
|
||||
op.add_option('--tinderbox', dest='tinderbox', action='store_true',
|
||||
help='Tinderbox-parseable output format')
|
||||
op.add_option('--args', dest='shell_args', default='',
|
||||
help='extra args to pass to the JS shell')
|
||||
op.add_option('-g', '--debug', dest='debug', action='store_true',
|
||||
help='run test in debugger')
|
||||
op.add_option('--debugger', dest='debugger', default='gdb -q --args',
|
||||
help='debugger command')
|
||||
op.add_option('--valgrind', dest='valgrind', action='store_true',
|
||||
help='run tests in valgrind')
|
||||
op.add_option('--valgrind-args', dest='valgrind_args',
|
||||
help='extra args to pass to valgrind')
|
||||
op.add_option('--failure-file', dest='failure_file',
|
||||
help='write tests that have not passed to the given file')
|
||||
op.add_option('--run-slow-tests', dest='run_slow_tests', action='store_true',
|
||||
help='run particularly slow tests as well as average-speed tests')
|
||||
def parse_args():
|
||||
"""
|
||||
Parse command line arguments.
|
||||
Returns a tuple of: (options, js_shell, requested_paths, excluded_paths)
|
||||
options :object: The raw OptionParser output.
|
||||
js_shell :str: The absolute location of the shell to test with.
|
||||
requested_paths :set<str>: Test paths specially requested on the CLI.
|
||||
excluded_paths :set<str>: Test paths specifically excluded by the CLI.
|
||||
"""
|
||||
from optparse import OptionParser, OptionGroup
|
||||
op = OptionParser(usage='%prog [OPTIONS] JS_SHELL [TESTS]')
|
||||
op.add_option('--xul-info', dest='xul_info_src',
|
||||
help='config data for xulRuntime (avoids search for config/autoconf.mk)')
|
||||
op.add_option('--no-extensions', dest='no_extensions', action='store_true',
|
||||
help='run only tests conforming to the ECMAScript 5 standard')
|
||||
op.add_option('--make-manifests', dest='make_manifests',
|
||||
help='generate manifest files for the reftest harness')
|
||||
(OPTIONS, args) = op.parse_args()
|
||||
if len(args) < 1:
|
||||
if not OPTIONS.make_manifests:
|
||||
|
||||
harness_og = OptionGroup(op, "Harness Controls", "Control how tests are run.")
|
||||
num_workers = 2
|
||||
num_workers_help ='Number of tests to run in parallel (default %s)' % num_workers
|
||||
harness_og.add_option('-j', '--worker-count', type=int,
|
||||
default=num_workers, help=num_workers_help)
|
||||
harness_og.add_option('-t', '--timeout', type=float, default=150.0,
|
||||
help='Set maximum time a test is allows to run (in seconds).')
|
||||
harness_og.add_option('-a', '--args', dest='shell_args', default='',
|
||||
help='Extra args to pass to the JS shell.')
|
||||
harness_og.add_option('-g', '--debug', action='store_true', help='Run a test in debugger.')
|
||||
harness_og.add_option('--debugger', default='gdb -q --args', help='Debugger command.')
|
||||
harness_og.add_option('--valgrind', action='store_true', help='Run tests in valgrind.')
|
||||
harness_og.add_option('--valgrind-args', default='', help='Extra args to pass to valgrind.')
|
||||
op.add_option_group(harness_og)
|
||||
|
||||
input_og = OptionGroup(op, "Inputs", "Change what tests are run.")
|
||||
input_og.add_option('-f', '--file', dest='test_file', action='append',
|
||||
help='Get tests from the given file.')
|
||||
input_og.add_option('-x', '--exclude-file', action='append',
|
||||
help='Exclude tests from the given file.')
|
||||
input_og.add_option('-d', '--exclude-random', dest='random', action='store_false',
|
||||
help='Exclude tests marked as "random."')
|
||||
input_og.add_option('--run-skipped', action='store_true', help='Run tests marked as "skip."')
|
||||
input_og.add_option('--run-only-skipped', action='store_true', help='Run only tests marked as "skip."')
|
||||
input_og.add_option('--run-slow-tests', action='store_true',
|
||||
help='Do not skip tests marked as "slow."')
|
||||
input_og.add_option('--no-extensions', action='store_true',
|
||||
help='Run only tests conforming to the ECMAScript 5 standard.')
|
||||
op.add_option_group(input_og)
|
||||
|
||||
output_og = OptionGroup(op, "Output", "Modify the harness and tests output.")
|
||||
output_og.add_option('-s', '--show-cmd', action='store_true',
|
||||
help='Show exact commandline used to run each test.')
|
||||
output_og.add_option('-o', '--show-output', action='store_true',
|
||||
help="Print each test's output to stdout.")
|
||||
output_og.add_option('-O', '--output-file',
|
||||
help='Write all output to the given file.')
|
||||
output_og.add_option('--failure-file',
|
||||
help='Write all not-passed tests to the given file.')
|
||||
output_og.add_option('--no-progress', dest='hide_progress', action='store_true',
|
||||
help='Do not show the progress bar.')
|
||||
output_og.add_option('--tinderbox', action='store_true',
|
||||
help='Use tinderbox-parseable output format.')
|
||||
op.add_option_group(output_og)
|
||||
|
||||
special_og = OptionGroup(op, "Special", "Special modes that do not run tests.")
|
||||
special_og.add_option('--make-manifests', metavar='BASE_TEST_PATH',
|
||||
help='Generate reftest manifest files.')
|
||||
op.add_option_group(special_og)
|
||||
options, args = op.parse_args()
|
||||
|
||||
# Acquire the JS shell given on the command line.
|
||||
js_shell = None
|
||||
requested_paths = set()
|
||||
if len(args) > 0:
|
||||
js_shell = os.path.abspath(args[0])
|
||||
requested_paths |= set(args[1:])
|
||||
|
||||
# If we do not have a shell, we must be in a special mode.
|
||||
if js_shell is None and not options.make_manifests:
|
||||
op.error('missing JS_SHELL argument')
|
||||
JS, args = None, []
|
||||
else:
|
||||
JS, args = args[0], args[1:]
|
||||
# Convert to an absolute path so we can run JS from a different directory.
|
||||
if JS is not None:
|
||||
JS = os.path.abspath(JS)
|
||||
|
||||
if OPTIONS.debug:
|
||||
if OPTIONS.valgrind:
|
||||
print >> sys.stderr, "--debug and --valgrind options are mutually exclusive"
|
||||
sys.exit(2)
|
||||
debugger_prefix = OPTIONS.debugger.split(' ')
|
||||
elif OPTIONS.valgrind:
|
||||
debugger_prefix = ['valgrind']
|
||||
# Valgrind and gdb are mutually exclusive.
|
||||
if options.valgrind and options.debug:
|
||||
op.error("--valgrind and --debug are mutually exclusive.")
|
||||
|
||||
# Fill the debugger field, as needed.
|
||||
prefix = options.debugger.split() if options.debug else []
|
||||
if options.valgrind:
|
||||
prefix = ['valgrind'] + options.valgrind_args.split()
|
||||
if os.uname()[0] == 'Darwin':
|
||||
debugger_prefix.append('--dsymutil=yes')
|
||||
if OPTIONS.valgrind_args:
|
||||
debugger_prefix.append(OPTIONS.valgrind_args)
|
||||
# Running under valgrind is not very useful if we don't show results.
|
||||
OPTIONS.show_output = True
|
||||
else:
|
||||
debugger_prefix = []
|
||||
prefix.append('--dsymutil=yes')
|
||||
options.show_output = True
|
||||
TestCase.set_js_cmd_prefix(js_shell, options.shell_args.split(), prefix)
|
||||
|
||||
TestCase.set_js_cmd_prefix(JS, OPTIONS.shell_args.split(), debugger_prefix)
|
||||
# If files with lists of tests to run were specified, add them to the
|
||||
# requested tests set.
|
||||
if options.test_file:
|
||||
for test_file in options.test_file:
|
||||
requested_paths |= set([line.strip() for line in open(test_file).readlines()])
|
||||
|
||||
# If files with lists of tests to exclude were specified, add them to the
|
||||
# excluded tests set.
|
||||
excluded_paths = set()
|
||||
if options.exclude_file:
|
||||
for filename in options.exclude_file:
|
||||
try:
|
||||
fp = open(filename, 'r')
|
||||
for line in fp:
|
||||
if line.startswith('#'): continue
|
||||
line = line.strip()
|
||||
if not line: continue
|
||||
excluded_paths |= set((line,))
|
||||
finally:
|
||||
fp.close()
|
||||
|
||||
# Handle output redirection, if requested and relevant.
|
||||
output_file = sys.stdout
|
||||
if OPTIONS.output_file and (OPTIONS.show_cmd or OPTIONS.show_output):
|
||||
output_file = open(OPTIONS.output_file, 'w')
|
||||
if options.output_file and (options.show_cmd or options.show_output):
|
||||
output_file = open(options.output_file, 'w')
|
||||
ResultsSink.output_file = output_file
|
||||
|
||||
if ((OPTIONS.show_cmd or OPTIONS.show_output) and
|
||||
output_file == sys.stdout or OPTIONS.tinderbox):
|
||||
OPTIONS.hide_progress = True
|
||||
# Hide the progress bar if it will get in the way of other output.
|
||||
if ((options.show_cmd or options.show_output) and
|
||||
output_file == sys.stdout or options.tinderbox):
|
||||
options.hide_progress = True
|
||||
|
||||
return (options, js_shell, requested_paths, excluded_paths)
|
||||
|
||||
def load_tests(options, js_shell, requested_paths, excluded_paths):
|
||||
"""
|
||||
Returns a tuple: (skipped_tests, test_list)
|
||||
skip_list: [iterable<Test>] Tests found but skipped.
|
||||
test_list: [iterable<Test>] Tests found that should be run.
|
||||
"""
|
||||
import manifest
|
||||
if JS is None:
|
||||
|
||||
if js_shell is None:
|
||||
xul_tester = manifest.NullXULInfoTester()
|
||||
else:
|
||||
if OPTIONS.xul_info_src is None:
|
||||
xul_info = manifest.XULInfo.create(JS)
|
||||
if options.xul_info_src is None:
|
||||
xul_info = manifest.XULInfo.create(js_shell)
|
||||
else:
|
||||
xul_abi, xul_os, xul_debug = OPTIONS.xul_info_src.split(r':')
|
||||
xul_abi, xul_os, xul_debug = options.xul_info_src.split(r':')
|
||||
xul_debug = xul_debug.lower() is 'true'
|
||||
xul_info = manifest.XULInfo(xul_abi, xul_os, xul_debug)
|
||||
xul_tester = manifest.XULInfoTester(xul_info, JS)
|
||||
xul_tester = manifest.XULInfoTester(xul_info, js_shell)
|
||||
|
||||
test_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
test_list = manifest.load(test_dir, xul_tester)
|
||||
skipped_list = []
|
||||
skip_list = []
|
||||
|
||||
if OPTIONS.make_manifests:
|
||||
manifest.make_manifests(OPTIONS.make_manifests, test_list)
|
||||
if JS is None:
|
||||
if options.make_manifests:
|
||||
manifest.make_manifests(options.make_manifests, test_list)
|
||||
sys.exit()
|
||||
|
||||
if OPTIONS.test_file:
|
||||
if options.test_file:
|
||||
paths = set()
|
||||
for test_file in OPTIONS.test_file:
|
||||
for test_file in options.test_file:
|
||||
paths |= set([ line.strip() for line in open(test_file).readlines()])
|
||||
test_list = [ _ for _ in test_list if _.path in paths ]
|
||||
|
||||
if args:
|
||||
if requested_paths:
|
||||
def p(path):
|
||||
for arg in args:
|
||||
for arg in requested_paths:
|
||||
if path.find(arg) != -1:
|
||||
return True
|
||||
return False
|
||||
test_list = [ _ for _ in test_list if p(_.path) ]
|
||||
|
||||
if OPTIONS.exclude_file:
|
||||
test_list = exclude_tests(test_list, OPTIONS.exclude_file)
|
||||
if options.exclude_file:
|
||||
test_list = [_ for _ in test_list if _.path not in excluded_paths]
|
||||
|
||||
if OPTIONS.no_extensions:
|
||||
if options.no_extensions:
|
||||
pattern = os.sep + 'extensions' + os.sep
|
||||
test_list = [_ for _ in test_list if pattern not in _.path]
|
||||
|
||||
if not OPTIONS.random:
|
||||
if not options.random:
|
||||
test_list = [ _ for _ in test_list if not _.random ]
|
||||
|
||||
if OPTIONS.run_only_skipped:
|
||||
OPTIONS.run_skipped = True
|
||||
if options.run_only_skipped:
|
||||
options.run_skipped = True
|
||||
test_list = [ _ for _ in test_list if not _.enable ]
|
||||
|
||||
if not OPTIONS.run_slow_tests:
|
||||
if not options.run_slow_tests:
|
||||
test_list = [ _ for _ in test_list if not _.slow ]
|
||||
|
||||
if not OPTIONS.run_skipped:
|
||||
skipped_list = [ _ for _ in test_list if not _.enable ]
|
||||
if not options.run_skipped:
|
||||
skip_list = [ _ for _ in test_list if not _.enable ]
|
||||
test_list = [ _ for _ in test_list if _.enable ]
|
||||
|
||||
return skip_list, test_list
|
||||
|
||||
def main():
|
||||
options, js_shell, requested_paths, excluded_paths = parse_args()
|
||||
skip_list, test_list = load_tests(options, js_shell, requested_paths, excluded_paths)
|
||||
|
||||
if not test_list:
|
||||
print 'no tests selected'
|
||||
sys.exit(1)
|
||||
return 1
|
||||
|
||||
if OPTIONS.debug:
|
||||
test_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
if options.debug:
|
||||
if len(test_list) > 1:
|
||||
print('Multiple tests match command line arguments, debugger can only run one')
|
||||
for tc in test_list:
|
||||
print(' %s'%tc.path)
|
||||
sys.exit(2)
|
||||
return 2
|
||||
|
||||
cmd = test_list[0].get_command(TestCase.js_cmd_prefix)
|
||||
if OPTIONS.show_cmd:
|
||||
if options.show_cmd:
|
||||
print list2cmdline(cmd)
|
||||
if test_dir not in ('', '.'):
|
||||
os.chdir(test_dir)
|
||||
call(cmd)
|
||||
sys.exit()
|
||||
return 0
|
||||
|
||||
curdir = os.getcwd()
|
||||
if test_dir not in ('', '.'):
|
||||
@ -216,15 +259,20 @@ if __name__ == '__main__':
|
||||
|
||||
results = None
|
||||
try:
|
||||
results = ResultsSink(output_file, OPTIONS)
|
||||
for t in skipped_list:
|
||||
results = ResultsSink(ResultsSink.output_file, options)
|
||||
for t in skip_list:
|
||||
results.push(NullTestOutput(t))
|
||||
run_tests(test_list, results)
|
||||
run_tests(options, test_list, results)
|
||||
finally:
|
||||
os.chdir(curdir)
|
||||
|
||||
if output_file != sys.stdout:
|
||||
output_file.close()
|
||||
if ResultsSink.output_file != sys.stdout:
|
||||
ResultsSink.output_file.close()
|
||||
|
||||
if results is None or not results.all_passed():
|
||||
sys.exit(1)
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
|
@ -4,7 +4,6 @@
|
||||
|
||||
import os, os.path, re, sys
|
||||
from subprocess import *
|
||||
from datetime import datetime
|
||||
|
||||
from tests import TestCase
|
||||
|
||||
|
@ -91,7 +91,7 @@ class ResultsSink:
|
||||
def push(self, output):
|
||||
if isinstance(output, NullTestOutput):
|
||||
if self.options.tinderbox:
|
||||
print_tinderbox_result('TEST-KNOWN-FAIL', output.test.path, time=output.dt, skip=True)
|
||||
self.print_tinderbox_result('TEST-KNOWN-FAIL', output.test.path, time=output.dt, skip=True)
|
||||
self.counts[2] += 1
|
||||
self.n += 1
|
||||
else:
|
||||
@ -125,8 +125,8 @@ class ResultsSink:
|
||||
label = self.LABELS[(sub_ok, result.test.expect, result.test.random)][0]
|
||||
if label == 'TEST-UNEXPECTED-PASS':
|
||||
label = 'TEST-PASS (EXPECTED RANDOM)'
|
||||
print_tinderbox_result(label, result.test.path, time=output.dt, message=msg)
|
||||
print_tinderbox_result(self.LABELS[
|
||||
self.print_tinderbox_result(label, result.test.path, time=output.dt, message=msg)
|
||||
self.print_tinderbox_result(self.LABELS[
|
||||
(result.result, result.test.expect, result.test.random)][0],
|
||||
result.test.path, time=output.dt)
|
||||
|
||||
@ -165,7 +165,11 @@ class ResultsSink:
|
||||
if self.options.failure_file:
|
||||
failure_file = open(self.options.failure_file, 'w')
|
||||
if not self.all_passed():
|
||||
for path in self.groups['REGRESSIONS'] + self.groups['TIMEOUTS']:
|
||||
if 'REGRESSIONS' in self.groups:
|
||||
for path in self.groups['REGRESSIONS']:
|
||||
print >> failure_file, path
|
||||
if 'TIMEOUTS' in self.groups:
|
||||
for path in self.groups['TIMEOUTS']:
|
||||
print >> failure_file, path
|
||||
failure_file.close()
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user