Backed out changeset 076fc4bc773c (bug 925398) for Gu bustage on a CLOSED TREE.

This commit is contained in:
Ryan VanderMeulen 2013-12-05 14:26:41 -05:00
parent 7db0fde37d
commit 75e87f8bee
12 changed files with 904 additions and 1641 deletions

View File

@ -5,7 +5,10 @@
from gestures import *
from by import By
from marionette import Marionette, HTMLElement, Actions, MultiActions
from marionette_test import MarionetteTestCase, MarionetteJSTestCase, CommonTestCase, expectedFailure, skip, SkipTest
from marionette_test import MarionetteTestCase, CommonTestCase, expectedFailure, skip, SkipTest
from emulator import Emulator
from runtests import MarionetteTestResult
from runtests import MarionetteTestRunner
from runtests import MarionetteTestOptions
from runtests import MarionetteTextTestRunner
from errors import *
from runner import *

View File

@ -1,6 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from base import *
from mixins import *

View File

@ -1,917 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from optparse import OptionParser
from datetime import datetime
import logging
import os
import unittest
import socket
import sys
import time
import traceback
import random
import moznetwork
import xml.dom.minidom as dom
from manifestparser import TestManifest
from mozhttpd import MozHttpd
from marionette import Marionette
from moztest.results import TestResultCollection, TestResult, relevant_line
class MarionetteTest(TestResult):
@property
def test_name(self):
if self.test_class is not None:
return '%s.py %s.%s' % (self.test_class.split('.')[0],
self.test_class,
self.name)
else:
return self.name
class MarionetteTestResult(unittest._TextTestResult, TestResultCollection):
resultClass = MarionetteTest
def __init__(self, *args, **kwargs):
self.marionette = kwargs.pop('marionette')
TestResultCollection.__init__(self, 'MarionetteTest')
unittest._TextTestResult.__init__(self, *args, **kwargs)
self.passed = 0
self.testsRun = 0
self.result_modifiers = [] # used by mixins to modify the result
@property
def skipped(self):
return [t for t in self if t.result == 'SKIPPED']
@skipped.setter
def skipped(self, value):
pass
@property
def expectedFailures(self):
return [t for t in self if t.result == 'KNOWN-FAIL']
@expectedFailures.setter
def expectedFailures(self, value):
pass
@property
def unexpectedSuccesses(self):
return [t for t in self if t.result == 'UNEXPECTED-PASS']
@unexpectedSuccesses.setter
def unexpectedSuccesses(self, value):
pass
@property
def tests_passed(self):
return [t for t in self if t.result == 'PASS']
@property
def errors(self):
return [t for t in self if t.result == 'ERROR']
@errors.setter
def errors(self, value):
pass
@property
def failures(self):
return [t for t in self if t.result == 'UNEXPECTED-FAIL']
@failures.setter
def failures(self, value):
pass
@property
def duration(self):
if self.stop_time:
return self.stop_time - self.start_time
else:
return 0
def add_test_result(self, test, result_expected='PASS',
result_actual='PASS', output='', context=None, **kwargs):
def get_class(test):
return test.__class__.__module__ + '.' + test.__class__.__name__
name = str(test).split()[0]
test_class = get_class(test)
if hasattr(test, 'jsFile'):
name = os.path.basename(test.jsFile)
test_class = None
t = self.resultClass(name=name, test_class=test_class,
time_start=test.start_time, result_expected=result_expected,
context=context, **kwargs)
# call any registered result modifiers
for modifier in self.result_modifiers:
modifier(t, result_expected, result_actual, output, context)
t.finish(result_actual,
time_end=time.time() if test.start_time else 0,
reason=relevant_line(output),
output=output)
self.append(t)
def addError(self, test, err):
self.add_test_result(test, output=self._exc_info_to_string(err, test), result_actual='ERROR')
self._mirrorOutput = True
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
self.add_test_result(test, output=self._exc_info_to_string(err, test), result_actual='UNEXPECTED-FAIL')
self._mirrorOutput = True
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def addSuccess(self, test):
self.passed += 1
self.add_test_result(test, result_actual='PASS')
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.add_test_result(test, output=self._exc_info_to_string(err, test),
result_actual='KNOWN-FAIL')
if self.showAll:
self.stream.writeln("expected failure")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.add_test_result(test, result_actual='UNEXPECTED-PASS')
if self.showAll:
self.stream.writeln("unexpected success")
elif self.dots:
self.stream.write("u")
self.stream.flush()
def addSkip(self, test, reason):
self.add_test_result(test, output=reason, result_actual='SKIPPED')
if self.showAll:
self.stream.writeln("skipped {0!r}".format(reason))
elif self.dots:
self.stream.write("s")
self.stream.flush()
def getInfo(self, test):
return test.test_name
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
desc = str(test)
if hasattr(test, 'jsFile'):
desc = "%s, %s" % (test.jsFile, desc)
return desc
def printLogs(self, test):
for testcase in test._tests:
if hasattr(testcase, 'loglines') and testcase.loglines:
# Don't dump loglines to the console if they only contain
# TEST-START and TEST-END.
skip_log = True
for line in testcase.loglines:
str_line = ' '.join(line)
if not 'TEST-END' in str_line and not 'TEST-START' in str_line:
skip_log = False
break
if skip_log:
return
self.stream.writeln('\nSTART LOG:')
for line in testcase.loglines:
self.stream.writeln(' '.join(line).encode('ascii', 'replace'))
self.stream.writeln('END LOG:')
def printErrorList(self, flavour, errors):
for error in errors:
err = error.output
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, error.description))
self.stream.writeln(self.separator2)
lastline = None
fail_present = None
for line in err:
if not line.startswith('\t'):
lastline = line
if 'TEST-UNEXPECTED-FAIL' in line:
fail_present = True
for line in err:
if line != lastline or fail_present:
self.stream.writeln("%s" % line)
else:
self.stream.writeln("TEST-UNEXPECTED-FAIL | %s | %s" %
(self.getInfo(error), line))
def stopTest(self, *args, **kwargs):
unittest._TextTestResult.stopTest(self, *args, **kwargs)
if self.marionette.check_for_crash():
# this tells unittest.TestSuite not to continue running tests
self.shouldStop = True
class MarionetteTextTestRunner(unittest.TextTestRunner):
resultclass = MarionetteTestResult
def __init__(self, **kwargs):
self.marionette = kwargs['marionette']
del kwargs['marionette']
unittest.TextTestRunner.__init__(self, **kwargs)
def _makeResult(self):
return self.resultclass(self.stream,
self.descriptions,
self.verbosity,
marionette=self.marionette)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
if hasattr(self, 'failfast'):
result.failfast = self.failfast
if hasattr(self, 'buffer'):
result.buffer = self.buffer
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
stopTime = time.time()
if hasattr(result, 'time_taken'):
result.time_taken = stopTime - startTime
result.printLogs(test)
result.printErrors()
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", result.time_taken))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
except AttributeError:
pass
else:
expectedFails, unexpectedSuccesses, skipped = results
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = map(len, (result.failures, result.errors))
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.writeln(" (%s)" % (", ".join(infos),))
else:
self.stream.write("\n")
return result
class BaseMarionetteOptions(OptionParser):
def __init__(self, **kwargs):
OptionParser.__init__(self, **kwargs)
self.parse_args_handlers = [] # Used by mixins
self.verify_usage_handlers = [] # Used by mixins
self.add_option('--autolog',
action='store_true',
dest='autolog',
default=False,
help='send test results to autolog')
self.add_option('--revision',
action='store',
dest='revision',
help='git revision for autolog submissions')
self.add_option('--testgroup',
action='store',
dest='testgroup',
help='testgroup names for autolog submissions')
self.add_option('--emulator',
action='store',
dest='emulator',
choices=['x86', 'arm'],
help='if no --address is given, then the harness will launch a B2G emulator on which to run '
'emulator tests. if --address is given, then the harness assumes you are running an '
'emulator already, and will run the emulator tests using that emulator. you need to '
'specify which architecture to emulate for both cases')
self.add_option('--emulator-binary',
action='store',
dest='emulatorBinary',
help='launch a specific emulator binary rather than launching from the B2G built emulator')
self.add_option('--emulator-img',
action='store',
dest='emulatorImg',
help='use a specific image file instead of a fresh one')
self.add_option('--emulator-res',
action='store',
dest='emulator_res',
type='str',
help='set a custom resolution for the emulator'
'Example: "480x800"')
self.add_option('--sdcard',
action='store',
dest='sdcard',
help='size of sdcard to create for the emulator')
self.add_option('--no-window',
action='store_true',
dest='noWindow',
default=False,
help='when Marionette launches an emulator, start it with the -no-window argument')
self.add_option('--logcat-dir',
dest='logcat_dir',
action='store',
help='directory to store logcat dump files')
self.add_option('--address',
dest='address',
action='store',
help='host:port of running Gecko instance to connect to')
self.add_option('--device',
dest='device_serial',
action='store',
help='serial ID of a device to use for adb / fastboot')
self.add_option('--type',
dest='type',
action='store',
default='browser+b2g',
help="the type of test to run, can be a combination of values defined in the manifest file; "
"individual values are combined with '+' or '-' characters. for example: 'browser+b2g' "
"means the set of tests which are compatible with both browser and b2g; 'b2g-qemu' means "
"the set of tests which are compatible with b2g but do not require an emulator. this "
"argument is only used when loading tests from manifest files")
self.add_option('--homedir',
dest='homedir',
action='store',
help='home directory of emulator files')
self.add_option('--app',
dest='app',
action='store',
help='application to use')
self.add_option('--app-arg',
dest='app_args',
action='append',
default=[],
help='specify a command line argument to be passed onto the application')
self.add_option('--binary',
dest='bin',
action='store',
help='gecko executable to launch before running the test')
self.add_option('--profile',
dest='profile',
action='store',
help='profile to use when launching the gecko process. if not passed, then a profile will be '
'constructed and used')
self.add_option('--repeat',
dest='repeat',
action='store',
type=int,
default=0,
help='number of times to repeat the test(s)')
self.add_option('-x', '--xml-output',
action='store',
dest='xml_output',
help='xml output')
self.add_option('--gecko-path',
dest='gecko_path',
action='store',
help='path to b2g gecko binaries that should be installed on the device or emulator')
self.add_option('--testvars',
dest='testvars',
action='store',
help='path to a json file with any test data required')
self.add_option('--tree',
dest='tree',
action='store',
default='b2g',
help='the tree that the revision parameter refers to')
self.add_option('--symbols-path',
dest='symbols_path',
action='store',
help='absolute path to directory containing breakpad symbols, or the url of a zip file containing symbols')
self.add_option('--timeout',
dest='timeout',
type=int,
help='if a --timeout value is given, it will set the default page load timeout, search timeout and script timeout to the given value. If not passed in, it will use the default values of 30000ms for page load, 0ms for search timeout and 10000ms for script timeout')
self.add_option('--es-server',
dest='es_servers',
action='append',
help='the ElasticSearch server to use for autolog submission')
self.add_option('--shuffle',
action='store_true',
dest='shuffle',
default=False,
help='run tests in a random order')
def parse_args(self, args=None, values=None):
options, tests = OptionParser.parse_args(self, args, values)
for handler in self.parse_args_handlers:
handler(options, tests, args, values)
return (options, tests)
def verify_usage(self, options, tests):
if not tests:
print 'must specify one or more test files, manifests, or directories'
sys.exit(1)
if not options.emulator and not options.address and not options.bin:
print 'must specify --binary, --emulator or --address'
sys.exit(1)
if not options.es_servers:
options.es_servers = ['elasticsearch-zlb.dev.vlan81.phx.mozilla.com:9200',
'elasticsearch-zlb.webapp.scl3.mozilla.com:9200']
# default to storing logcat output for emulator runs
if options.emulator and not options.logcat_dir:
options.logcat_dir = 'logcat'
# check for valid resolution string, strip whitespaces
try:
if options.emulator_res:
dims = options.emulator_res.split('x')
assert len(dims) == 2
width = str(int(dims[0]))
height = str(int(dims[1]))
options.emulator_res = 'x'.join([width, height])
except:
raise ValueError('Invalid emulator resolution format. '
'Should be like "480x800".')
for handler in self.verify_usage_handlers:
handler(options, tests)
return (options, tests)
class BaseMarionetteTestRunner(object):
textrunnerclass = MarionetteTextTestRunner
def __init__(self, address=None, emulator=None, emulatorBinary=None,
emulatorImg=None, emulator_res='480x800', homedir=None,
app=None, app_args=None, bin=None, profile=None, autolog=False,
revision=None, logger=None, testgroup="marionette", noWindow=False,
logcat_dir=None, xml_output=None, repeat=0, gecko_path=None,
testvars=None, tree=None, type=None, device_serial=None,
symbols_path=None, timeout=None, es_servers=None, shuffle=False,
sdcard=None, **kwargs):
self.address = address
self.emulator = emulator
self.emulatorBinary = emulatorBinary
self.emulatorImg = emulatorImg
self.emulator_res = emulator_res
self.homedir = homedir
self.app = app
self.app_args = app_args or []
self.bin = bin
self.profile = profile
self.autolog = autolog
self.testgroup = testgroup
self.revision = revision
self.logger = logger
self.noWindow = noWindow
self.httpd = None
self.baseurl = None
self.marionette = None
self.logcat_dir = logcat_dir
self.xml_output = xml_output
self.repeat = repeat
self.gecko_path = gecko_path
self.testvars = {}
self.test_kwargs = kwargs
self.tree = tree
self.type = type
self.device_serial = device_serial
self.symbols_path = symbols_path
self.timeout = timeout
self._device = None
self._capabilities = None
self._appName = None
self.es_servers = es_servers
self.shuffle = shuffle
self.sdcard = sdcard
self.mixin_run_tests = []
if testvars:
if not os.path.exists(testvars):
raise Exception('--testvars file does not exist')
import json
with open(testvars) as f:
self.testvars = json.loads(f.read())
# set up test handlers
self.test_handlers = []
self.reset_test_stats()
if self.logger is None:
self.logger = logging.getLogger('Marionette')
self.logger.setLevel(logging.INFO)
self.logger.addHandler(logging.StreamHandler())
if self.logcat_dir:
if not os.access(self.logcat_dir, os.F_OK):
os.mkdir(self.logcat_dir)
# for XML output
self.testvars['xml_output'] = self.xml_output
self.results = []
@property
def capabilities(self):
if self._capabilities:
return self._capabilities
self.marionette.start_session()
self._capabilities = self.marionette.session_capabilities
self.marionette.delete_session()
return self._capabilities
@property
def device(self):
if self._device:
return self._device
self._device = self.capabilities.get('device')
return self._device
@property
def appName(self):
if self._appName:
return self._appName
self._appName = self.capabilities.get('browserName')
return self._appName
def reset_test_stats(self):
self.passed = 0
self.failed = 0
self.todo = 0
self.failures = []
def start_httpd(self):
host = moznetwork.get_ip()
self.httpd = MozHttpd(host=host,
port=0,
docroot=os.path.join(os.path.dirname(os.path.dirname(__file__)), 'www'))
self.httpd.start()
self.baseurl = 'http://%s:%d/' % (host, self.httpd.httpd.server_port)
self.logger.info('running webserver on %s' % self.baseurl)
def start_marionette(self):
assert(self.baseurl is not None)
if self.bin:
if self.address:
host, port = self.address.split(':')
else:
host = 'localhost'
port = 2828
self.marionette = Marionette(host=host,
port=int(port),
app=self.app,
app_args=self.app_args,
bin=self.bin,
profile=self.profile,
baseurl=self.baseurl,
timeout=self.timeout,
device_serial=self.device_serial)
elif self.address:
host, port = self.address.split(':')
try:
#establish a socket connection so we can vertify the data come back
connection = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
connection.connect((host,int(port)))
connection.close()
except Exception, e:
raise Exception("Could not connect to given marionette host:port: %s" % e)
if self.emulator:
self.marionette = Marionette.getMarionetteOrExit(
host=host, port=int(port),
connectToRunningEmulator=True,
homedir=self.homedir,
baseurl=self.baseurl,
logcat_dir=self.logcat_dir,
gecko_path=self.gecko_path,
symbols_path=self.symbols_path,
timeout=self.timeout)
else:
self.marionette = Marionette(host=host,
port=int(port),
baseurl=self.baseurl,
timeout=self.timeout)
elif self.emulator:
self.marionette = Marionette.getMarionetteOrExit(
emulator=self.emulator,
emulatorBinary=self.emulatorBinary,
emulatorImg=self.emulatorImg,
emulator_res=self.emulator_res,
homedir=self.homedir,
baseurl=self.baseurl,
noWindow=self.noWindow,
logcat_dir=self.logcat_dir,
gecko_path=self.gecko_path,
symbols_path=self.symbols_path,
timeout=self.timeout,
sdcard=self.sdcard)
else:
raise Exception("must specify binary, address or emulator")
def post_to_autolog(self, elapsedtime):
self.logger.info('posting results to autolog')
logfile = None
if self.emulator:
filename = os.path.join(os.path.abspath(self.logcat_dir),
"emulator-%d.log" % self.marionette.emulator.port)
if os.access(filename, os.F_OK):
logfile = filename
for es_server in self.es_servers:
# This is all autolog stuff.
# See: https://wiki.mozilla.org/Auto-tools/Projects/Autolog
from mozautolog import RESTfulAutologTestGroup
testgroup = RESTfulAutologTestGroup(
testgroup=self.testgroup,
os='android',
platform='emulator',
harness='marionette',
server=es_server,
restserver=None,
machine=socket.gethostname(),
logfile=logfile)
testgroup.set_primary_product(
tree=self.tree,
buildtype='opt',
revision=self.revision)
testgroup.add_test_suite(
testsuite='b2g emulator testsuite',
elapsedtime=elapsedtime.seconds,
cmdline='',
passed=self.passed,
failed=self.failed,
todo=self.todo)
# Add in the test failures.
for f in self.failures:
testgroup.add_test_failure(test=f[0], text=f[1], status=f[2])
testgroup.submit()
def run_tests(self, tests):
self.reset_test_stats()
starttime = datetime.utcnow()
counter = self.repeat
while counter >=0:
round = self.repeat - counter
if round > 0:
self.logger.info('\nREPEAT %d\n-------' % round)
if self.shuffle:
random.shuffle(tests)
for test in tests:
self.run_test(test)
counter -= 1
self.logger.info('\nSUMMARY\n-------')
self.logger.info('passed: %d' % self.passed)
self.logger.info('failed: %d' % self.failed)
self.logger.info('todo: %d' % self.todo)
if self.failed > 0:
self.logger.info('\nFAILED TESTS\n-------')
for failed_test in self.failures:
self.logger.info('%s' % failed_test[0])
try:
self.marionette.check_for_crash()
except:
traceback.print_exc()
self.elapsedtime = datetime.utcnow() - starttime
if self.autolog:
self.post_to_autolog(self.elapsedtime)
if self.xml_output:
xml_dir = os.path.dirname(os.path.abspath(self.xml_output))
if not os.path.exists(xml_dir):
os.makedirs(xml_dir)
with open(self.xml_output, 'w') as f:
f.write(self.generate_xml(self.results))
if self.marionette.instance:
self.marionette.instance.close()
self.marionette.instance = None
del self.marionette
for run_tests in self.mixin_run_tests:
run_tests(tests)
def run_test(self, test, expected='pass'):
if not self.httpd:
print "starting httpd"
self.start_httpd()
if not self.marionette:
self.start_marionette()
filepath = os.path.abspath(test)
if os.path.isdir(filepath):
for root, dirs, files in os.walk(filepath):
if self.shuffle:
random.shuffle(files)
for filename in files:
if ((filename.startswith('test_') or filename.startswith('browser_')) and
(filename.endswith('.py') or filename.endswith('.js'))):
filepath = os.path.join(root, filename)
self.run_test(filepath)
if self.marionette.check_for_crash():
return
return
mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1])
testloader = unittest.TestLoader()
suite = unittest.TestSuite()
if file_ext == '.ini':
testargs = {}
if self.type is not None:
testtypes = self.type.replace('+', ' +').replace('-', ' -').split()
for atype in testtypes:
if atype.startswith('+'):
testargs.update({ atype[1:]: 'true' })
elif atype.startswith('-'):
testargs.update({ atype[1:]: 'false' })
else:
testargs.update({ atype: 'true' })
manifest = TestManifest()
manifest.read(filepath)
all_tests = manifest.active_tests(disabled=False)
manifest_tests = manifest.active_tests(disabled=False,
device=self.device,
app=self.appName)
skip_tests = list(set([x['path'] for x in all_tests]) -
set([x['path'] for x in manifest_tests]))
for skipped in skip_tests:
self.logger.info('TEST-SKIP | %s | device=%s, app=%s' %
(os.path.basename(skipped),
self.device,
self.appName))
self.todo += 1
target_tests = manifest.get(tests=manifest_tests, **testargs)
if self.shuffle:
random.shuffle(target_tests)
for i in target_tests:
self.run_test(i["path"], i["expected"])
if self.marionette.check_for_crash():
return
return
self.logger.info('TEST-START %s' % os.path.basename(test))
self.test_kwargs['expected'] = expected
for handler in self.test_handlers:
if handler.match(os.path.basename(test)):
handler.add_tests_to_suite(mod_name,
filepath,
suite,
testloader,
self.marionette,
self.testvars,
**self.test_kwargs)
break
if suite.countTestCases():
runner = self.textrunnerclass(verbosity=3,
marionette=self.marionette)
results = runner.run(suite)
self.results.append(results)
self.failed += len(results.failures) + len(results.errors)
if hasattr(results, 'skipped'):
self.todo += len(results.skipped)
self.passed += results.passed
for failure in results.failures + results.errors:
self.failures.append((results.getInfo(failure), failure.output, 'TEST-UNEXPECTED-FAIL'))
if hasattr(results, 'unexpectedSuccesses'):
self.failed += len(results.unexpectedSuccesses)
for failure in results.unexpectedSuccesses:
self.failures.append((results.getInfo(failure), 'TEST-UNEXPECTED-PASS'))
if hasattr(results, 'expectedFailures'):
self.passed += len(results.expectedFailures)
def cleanup(self):
if self.httpd:
self.httpd.stop()
__del__ = cleanup
def generate_xml(self, results_list):
def _extract_xml(test, result='passed'):
testcase = doc.createElement('testcase')
testcase.setAttribute('classname', test.test_class)
testcase.setAttribute('name', unicode(test.name).split()[0])
testcase.setAttribute('time', str(test.duration))
testsuite.appendChild(testcase)
if result in ['failure', 'error', 'skipped']:
f = doc.createElement(result)
f.setAttribute('message', 'test %s' % result)
f.appendChild(doc.createTextNode(test.reason))
testcase.appendChild(f)
doc = dom.Document()
testsuite = doc.createElement('testsuite')
testsuite.setAttribute('name', 'Marionette')
testsuite.setAttribute('time', str(self.elapsedtime.total_seconds()))
testsuite.setAttribute('tests', str(sum([results.testsRun for
results in results_list])))
def failed_count(results):
count = len(results.failures)
if hasattr(results, 'unexpectedSuccesses'):
count += len(results.unexpectedSuccesses)
return count
testsuite.setAttribute('failures', str(sum([failed_count(results)
for results in results_list])))
testsuite.setAttribute('errors', str(sum([len(results.errors)
for results in results_list])))
testsuite.setAttribute('skips', str(sum([len(results.skipped) +
len(results.expectedFailures)
for results in results_list])))
for results in results_list:
for result in results.errors:
_extract_xml(result, result='error')
for result in results.failures:
_extract_xml(result, result='failure')
if hasattr(results, 'unexpectedSuccesses'):
for test in results.unexpectedSuccesses:
# unexpectedSuccesses is a list of Testcases only, no tuples
_extract_xml(test, result='failure')
if hasattr(results, 'skipped'):
for result in results.skipped:
_extract_xml(result, result='skipped')
if hasattr(results, 'expectedFailures'):
for result in results.expectedFailures:
_extract_xml(result, result='skipped')
for result in results.tests_passed:
_extract_xml(result)
doc.appendChild(testsuite)
return doc.toprettyxml(encoding='utf-8')

View File

@ -1,7 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from endurance import *
from reporting import *
from b2g import *

View File

@ -1,29 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import mozdevice
import os
class B2GTestCaseMixin(object):
# TODO: add methods like 'restart b2g'
def __init__(self, *args, **kwargs):
self._device_manager = None
@property
def device_manager(self, *args, **kwargs):
if not self._device_manager:
dm_type = os.environ.get('DM_TRANS', 'adb')
if dm_type == 'adb':
self._device_manager = mozdevice.DeviceManagerADB()
elif dm_type == 'sut':
host = os.environ.get('TEST_DEVICE')
if not host:
raise Exception('Must specify host with SUT!')
self._device_manager = mozdevice.DeviceManagerSUT(host=host)
else:
raise Exception('Unknown device manager type: %s' % dm_type)
return self._device_manager

View File

@ -1,196 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import sys
import time
class EnduranceOptionsMixin(object):
# parse_args
def endurance_parse_args(self, options, tests, args=None, values=None):
if options.iterations is not None:
if options.checkpoint_interval is None or options.checkpoint_interval > options.iterations:
options.checkpoint_interval = options.iterations
# verify_usage
def endurance_verify_usage(self, options, tests):
if options.iterations is not None and options.iterations < 1:
raise ValueError('iterations must be a positive integer')
if options.checkpoint_interval is not None and options.checkpoint_interval < 1:
raise ValueError('checkpoint interval must be a positive integer')
if options.checkpoint_interval and not options.iterations:
raise ValueError('you must specify iterations when using checkpoint intervals')
def __init__(self, **kwargs):
# Inheriting object must call this __init__ to set up option handling
group = self.add_option_group('endurance')
group.add_option('--iterations',
action='store',
dest='iterations',
type='int',
metavar='int',
help='iterations for endurance tests')
group.add_option('--checkpoint',
action='store',
dest='checkpoint_interval',
type='int',
metavar='int',
help='checkpoint interval for endurance tests')
self.parse_args_handlers.append(self.endurance_parse_args)
self.verify_usage_handlers.append(self.endurance_verify_usage)
class EnduranceTestCaseMixin(object):
def __init__(self, *args, **kwargs):
self.iterations = kwargs.pop('iterations') or 1
self.checkpoint_interval = kwargs.pop('checkpoint_interval') or self.iterations
self.drive_setup_functions = []
self.pre_test_functions = []
self.post_test_functions = []
self.checkpoint_functions = []
self.process_checkpoint_functions = []
self.log_name = None
self.checkpoint_path = None
def add_drive_setup_function(self, function):
self.drive_setup_functions.append(function)
def add_pre_test_function(self, function):
self.pre_test_functions.append(function)
def add_post_test_function(self, function):
self.post_test_functions.append(function)
def add_checkpoint_function(self, function):
self.checkpoint_functions.append(function)
def add_process_checkpoint_function(self, function):
self.process_checkpoint_functions.append(function)
def drive(self, test, app=None):
self.test_method = test
self.app_under_test = app
for function in self.drive_setup_functions:
function(test, app)
# Now drive the actual test case iterations
for count in range(1, self.iterations + 1):
self.iteration = count
self.marionette.log("%s iteration %d of %d" % (self.test_method.__name__, count, self.iterations))
# Print to console so can see what iteration we're on while test is running
if self.iteration == 1:
print "\n"
print "Iteration %d of %d..." % (count, self.iterations)
sys.stdout.flush()
for function in self.pre_test_functions:
function()
self.test_method()
for function in self.post_test_functions:
function()
# Checkpoint time?
if ((count % self.checkpoint_interval) == 0) or count == self.iterations:
self.checkpoint()
# Finished, now process checkpoint data into .json output
self.process_checkpoint_data()
def checkpoint(self):
# Console output so know what's happening if watching console
print "Checkpoint..."
sys.stdout.flush()
self.cur_time = time.strftime("%Y%m%d%H%M%S", time.localtime())
# If first checkpoint, create the file if it doesn't exist already
if self.iteration in (0, self.checkpoint_interval):
self.checkpoint_path = "checkpoints"
if not os.path.exists(self.checkpoint_path):
os.makedirs(self.checkpoint_path, 0755)
self.log_name = "%s/checkpoint_%s_%s.log" % (self.checkpoint_path, self.test_method.__name__, self.cur_time)
with open(self.log_name, 'a') as log_file:
log_file.write('%s Endurance Test: %s\n' % (self.cur_time, self.test_method.__name__))
log_file.write('%s Checkpoint after iteration %d of %d:\n' % (self.cur_time, self.iteration, self.iterations))
else:
with open(self.log_name, 'a') as log_file:
log_file.write('%s Checkpoint after iteration %d of %d:\n' % (self.cur_time, self.iteration, self.iterations))
for function in self.checkpoint_functions:
function()
def process_checkpoint_data(self):
# Process checkpoint data into .json
self.marionette.log("processing checkpoint data")
for function in self.process_checkpoint_functions:
function()
class MemoryEnduranceTestCaseMixin(object):
def __init__(self, *args, **kwargs):
# TODO: add desktop support
if self.device_manager:
self.add_checkpoint_function(self.memory_b2g_checkpoint)
self.add_process_checkpoint_function(self.memory_b2g_process_checkpoint)
def memory_b2g_checkpoint(self):
# Sleep to give device idle time (for gc)
idle_time = 30
self.marionette.log("sleeping %d seconds to give the device some idle time" % idle_time)
time.sleep(idle_time)
# Dump out some memory status info
self.marionette.log("checkpoint")
output_str = self.device_manager.shellCheckOutput(["b2g-ps"])
with open(self.log_name, 'a') as log_file:
log_file.write('%s\n' % output_str)
def memory_b2g_process_checkpoint(self):
# Process checkpoint data into .json
self.marionette.log("processing checkpoint data from %s" % self.log_name)
# Open the checkpoint file
checkpoint_file = open(self.log_name, 'r')
# Grab every b2g rss reading for each checkpoint
b2g_rss_list = []
for next_line in checkpoint_file:
if next_line.startswith("b2g"):
b2g_rss_list.append(next_line.split()[5])
# Close the checkpoint file
checkpoint_file.close()
# Calculate the average b2g_rss
total = 0
for b2g_mem_value in b2g_rss_list:
total += int(b2g_mem_value)
avg_rss = total / len(b2g_rss_list)
# Create a summary text file
summary_name = self.log_name.replace('.log', '_summary.log')
summary_file = open(summary_name, 'w')
# Write the summarized checkpoint data
summary_file.write('test_name: %s\n' % self.test_method.__name__)
summary_file.write('completed: %s\n' % self.cur_time)
summary_file.write('app_under_test: %s\n' % self.app_under_test.lower())
summary_file.write('total_iterations: %d\n' % self.iterations)
summary_file.write('checkpoint_interval: %d\n' % self.checkpoint_interval)
summary_file.write('b2g_rss: ')
summary_file.write(', '.join(b2g_rss_list))
summary_file.write('\navg_rss: %d\n\n' % avg_rss)
# Close the summary file
summary_file.close()
# Write to suite summary file
suite_summary_file_name = '%s/avg_b2g_rss_suite_summary.log' % self.checkpoint_path
suite_summary_file = open(suite_summary_file_name, 'a')
suite_summary_file.write('%s: %s\n' % (self.test_method.__name__, avg_rss))
suite_summary_file.close()

View File

@ -1,205 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import base64
import cgi
import datetime
import json
import os
import pkg_resources
import sys
from py.xml import html
from py.xml import raw
class HTMLReportingTestRunnerMixin(object):
def __init__(self, name=None, version=None, html_output=None, **kwargs):
"""
Name should be the name of the name of the testrunner, version should correspond
to the testrunner version.
html_output is the file to output to
"""
# for HTML output
self.html_output = html_output
self.html_name = name
self.html_version = version
self.testvars['html_output'] = self.html_output
self.mixin_run_tests.append(self.html_run_tests)
def html_run_tests(self, tests):
if self.html_output:
# change default encoding to avoid encoding problem for page source
reload(sys)
sys.setdefaultencoding('utf-8')
html_dir = os.path.dirname(os.path.abspath(self.html_output))
if not os.path.exists(html_dir):
os.makedirs(html_dir)
with open(self.html_output, 'w') as f:
f.write(self.generate_html(self.results))
def generate_html(self, results_list):
tests = sum([results.testsRun for results in results_list])
failures = sum([len(results.failures) for results in results_list])
expected_failures = sum([len(results.expectedFailures) for results in results_list])
skips = sum([len(results.skipped) for results in results_list])
errors = sum([len(results.errors) for results in results_list])
passes = sum([results.passed for results in results_list])
unexpected_passes = sum([len(results.unexpectedSuccesses) for results in results_list])
test_time = self.elapsedtime.total_seconds()
test_logs = []
def _extract_html(test, class_name, duration=0, text='', result='passed', debug=None):
cls_name = class_name
tc_name = unicode(test)
tc_time = duration
additional_html = []
debug = debug or {}
links_html = []
if result in ['skipped', 'failure', 'expected failure', 'error']:
if debug.get('screenshot'):
screenshot = 'data:image/png;base64,%s' % debug['screenshot']
additional_html.append(html.div(
html.a(html.img(src=screenshot), href="#"),
class_='screenshot'))
for name, content in debug.items():
try:
if 'screenshot' in name:
href = '#'
else:
# use base64 to avoid that some browser (such as Firefox, Opera)
# treats '#' as the start of another link if the data URL contains.
# use 'charset=utf-8' to show special characters like Chinese.
href = 'data:text/plain;charset=utf-8;base64,%s' % base64.b64encode(content)
links_html.append(html.a(
name.title(),
class_=name,
href=href,
target='_blank'))
links_html.append(' ')
except:
pass
log = html.div(class_='log')
for line in text.splitlines():
separator = line.startswith(' ' * 10)
if separator:
log.append(line[:80])
else:
if line.lower().find("error") != -1 or line.lower().find("exception") != -1:
log.append(html.span(raw(cgi.escape(line)), class_='error'))
else:
log.append(raw(cgi.escape(line)))
log.append(html.br())
additional_html.append(log)
test_logs.append(html.tr([
html.td(result.title(), class_='col-result'),
html.td(cls_name, class_='col-class'),
html.td(tc_name, class_='col-name'),
html.td(tc_time, class_='col-duration'),
html.td(links_html, class_='col-links'),
html.td(additional_html, class_='debug')],
class_=result.lower() + ' results-table-row'))
for results in results_list:
for test in results.tests_passed:
_extract_html(test.name, test.test_class)
for result in results.skipped:
_extract_html(result.name, result.test_class, text='\n'.join(result.output), result='skipped')
for result in results.failures:
_extract_html(result.name, result.test_class, text='\n'.join(result.output), result='failure', debug=result.debug)
for result in results.expectedFailures:
_extract_html(result.name, result.test_class, text='\n'.join(result.output), result='expected failure', debug=result.debug)
for test in results.unexpectedSuccesses:
_extract_html(test.name, test.test_class, result='unexpected pass')
for result in results.errors:
_extract_html(result.name, result.test_class, text='\n'.join(result.output), result='error', debug=result.debug)
generated = datetime.datetime.now()
doc = html.html(
html.head(
html.meta(charset='utf-8'),
html.title('Test Report'),
#TODO: must redisgn this to use marionette's resourcs, instead of the caller folder's
html.style(raw(pkg_resources.resource_string(
__name__, os.path.sep.join(['resources', 'htmlreport', 'style.css']))),
type='text/css')),
html.body(
html.script(raw(pkg_resources.resource_string(
__name__, os.path.sep.join(['resources', 'htmlreport', 'jquery.js']))),
type='text/javascript'),
html.script(raw(pkg_resources.resource_string(
__name__, os.path.sep.join(['resources', 'htmlreport', 'main.js']))),
type='text/javascript'),
html.p('Report generated on %s at %s by %s %s' % (
generated.strftime('%d-%b-%Y'),
generated.strftime('%H:%M:%S'),
self.html_name, self.html_version)),
html.h2('Summary'),
html.p('%i tests ran in %i seconds.' % (tests, test_time),
html.br(),
html.span('%i passed' % passes, class_='passed'), ', ',
html.span('%i skipped' % skips, class_='skipped'), ', ',
html.span('%i failed' % failures, class_='failed'), ', ',
html.span('%i errors' % errors, class_='error'), '.',
html.br(),
html.span('%i expected failures' % expected_failures,
class_='expected failure'), ', ',
html.span('%i unexpected passes' % unexpected_passes,
class_='unexpected pass'), '.'),
html.h2('Results'),
html.table([html.thead(
html.tr([
html.th('Result', class_='sortable', col='result'),
html.th('Class', class_='sortable', col='class'),
html.th('Test Name', class_='sortable', col='name'),
html.th('Duration', class_='sortable numeric', col='duration'),
html.th('Links')]), id='results-table-head'),
html.tbody(test_logs, id='results-table-body')], id='results-table')))
return doc.unicode(indent=2)
class HTMLReportingOptionsMixin(object):
def __init__(self, **kwargs):
group = self.add_option_group('htmlreporting')
group.add_option('--html-output',
action='store',
dest='html_output',
help='html output',
metavar='path')
class HTMLReportingTestResultMixin(object):
def __init__(self, *args, **kwargs):
self.result_modifiers.append(self.html_modifier)
def html_modifier(self, test, result_expected, result_actual, output, context):
test.debug = None
if result_actual is not 'PASS':
test.debug = self.gather_debug()
def gather_debug(self):
debug = {}
try:
# TODO make screenshot consistant size by using full viewport
# Bug 883294 - Add ability to take full viewport screenshots
debug['screenshot'] = self.marionette.screenshot()
debug['source'] = self.marionette.page_source
self.marionette.switch_to_frame()
debug['settings'] = json.dumps(self.marionette.execute_async_script("""
SpecialPowers.addPermission('settings-read', true, document);
var req = window.navigator.mozSettings.createLock().get('*');
req.onsuccess = function() {
marionetteScriptFinished(req.result);
}""", special_powers=True), sort_keys=True, indent=4, separators=(',', ': '))
except:
pass
return debug

File diff suppressed because one or more lines are too long

View File

@ -1,109 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
$(document).ready(function() {
reset_sort_headers();
split_debug_onto_two_rows();
$('.col-links a.screenshot').click(function(event) {
window.open($(this).parents('.results-table-row').next('.debug').find('.screenshot img').attr('src'));
event.preventDefault();
});
$('.screenshot a').click(function(event) {
window.open($(this).find('img').attr('src'));
event.preventDefault();
});
$('.sortable').click(toggle_sort_states);
$('.sortable').click(function() {
var columnName = $(this).attr('col');
if ($(this).hasClass('numeric')) {
sort_rows_num($(this), 'col-' + columnName);
} else {
sort_rows_alpha($(this), 'col-' + columnName);
}
});
});
function sort_rows_alpha(clicked, sortclass) {
one_row_for_data();
var therows = $('.results-table-row');
therows.sort(function(s, t) {
var a = s.getElementsByClassName(sortclass)[0].innerHTML.toLowerCase();
var b = t.getElementsByClassName(sortclass)[0].innerHTML.toLowerCase();
if (clicked.hasClass('asc')) {
if (a < b)
return -1;
if (a > b)
return 1;
return 0;
} else {
if (a < b)
return 1;
if (a > b)
return -1;
return 0;
}
});
$('#results-table-body').append(therows);
split_debug_onto_two_rows();
}
function sort_rows_num(clicked, sortclass) {
one_row_for_data();
var therows = $('.results-table-row');
therows.sort(function(s, t) {
var a = s.getElementsByClassName(sortclass)[0].innerHTML
var b = t.getElementsByClassName(sortclass)[0].innerHTML
if (clicked.hasClass('asc')) {
return a - b;
} else {
return b - a;
}
});
$('#results-table-body').append(therows);
split_debug_onto_two_rows();
}
function reset_sort_headers() {
$('.sort-icon').remove();
$('.sortable').prepend('<div class="sort-icon">vvv</div>');
$('.sortable').removeClass('asc desc inactive active');
$('.sortable').addClass('asc inactive');
}
function toggle_sort_states() {
//if active, toggle between asc and desc
if ($(this).hasClass('active')) {
$(this).toggleClass('asc');
$(this).toggleClass('desc');
}
//if inactive, reset all other functions and add ascending active
if ($(this).hasClass('inactive')) {
reset_sort_headers();
$(this).removeClass('inactive');
$(this).addClass('active');
}
}
function split_debug_onto_two_rows() {
$('tr.results-table-row').each(function() {
$('<tr class="debug">').insertAfter(this).append($('.debug', this));
});
$('td.debug').attr('colspan', 5);
}
function one_row_for_data() {
$('tr.results-table-row').each(function() {
if ($(this).next().hasClass('debug')) {
$(this).append($(this).next().contents().unwrap());
}
});
}

View File

@ -1,158 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
body {
font-family: Helvetica, Arial, sans-serif;
font-size: 12px;
min-width: 1200px;
color: #999;
}
h2 {
font-size: 16px;
color: black;
}
p {
color: black;
}
a {
color: #999;
}
table {
border-collapse: collapse;
}
/******************************
* SUMMARY INFORMATION
******************************/
#configuration td {
padding: 5px;
border: 1px solid #E6E6E6;
}
#configuration tr:nth-child(odd) {
background-color: #f6f6f6;
}
/******************************
* TEST RESULT COLORS
******************************/
span.passed, .passed .col-result {
color: green;
}
span.expected.failure, .expected.failure .col-result {
color: orange;
}
span.skipped, .skipped .col-result {
color: orange;
}
span.unexpected.pass, .unexpected.pass .col-result {
color: red;
}
span.failed, .failure .col-result {
color: red;
}
span.error,.error .col-result {
color: red;
}
/******************************
* RESULTS TABLE
*
* 1. Table Layout
* 2. Debug
* 3. Sorting items
*
******************************/
/*------------------
* 1. Table Layout
*------------------*/
#results-table {
border: 1px solid #e6e6e6;
color: #999;
font-size: 12px;
width: 100%
}
#results-table th, #results-table td {
padding: 5px;
border: 1px solid #E6E6E6;
text-align: left
}
#results-table th {
font-weight: bold
}
/*------------------
* 2. Debug
*------------------*/
.log:only-child {
height: inherit
}
.log {
background-color: #e6e6e6;
border: 1px solid #e6e6e6;
color: black;
display: block;
font-family: "Courier New", Courier, monospace;
height: 230px;
overflow-y: scroll;
padding: 5px;
white-space: pre-wrap
}
div.screenshot {
border: 1px solid #e6e6e6;
float: right;
margin-left: 5px;
height: 240px
}
div.screenshot img {
height: 240px
}
/*if the result is passed or xpassed don't show debug row*/
.passed + .debug, .unexpected.pass + .debug {
display: none;
}
/*------------------
* 3. Sorting items
*------------------*/
.sortable {
cursor: pointer;
}
.sort-icon {
font-size: 0px;
float: left;
margin-right: 5px;
margin-top: 5px;
/*triangle*/
width: 0;
height: 0;
border-left: 8px solid transparent;
border-right: 8px solid transparent;
}
.inactive .sort-icon {
/*finish triangle*/
border-top: 8px solid #E6E6E6;
}
.asc.active .sort-icon {
/*finish triangle*/
border-bottom: 8px solid #999;
}
.desc.active .sort-icon {
/*finish triangle*/
border-top: 8px solid #999;
}

View File

@ -2,16 +2,907 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from datetime import datetime
import logging
from optparse import OptionParser
import os
import unittest
import socket
import sys
import time
import traceback
import random
import moznetwork
import xml.dom.minidom as dom
from marionette_test import MarionetteTestCase, MarionetteJSTestCase
from runner import BaseMarionetteTestRunner, BaseMarionetteOptions
from manifestparser import TestManifest
from mozhttpd import MozHttpd
from marionette import Marionette
from moztest.results import TestResultCollection, TestResult, relevant_line
from marionette_test import MarionetteJSTestCase, MarionetteTestCase
class MarionetteTestRunner(BaseMarionetteTestRunner):
class MarionetteTest(TestResult):
@property
def test_name(self):
if self.test_class is not None:
return '%s.py %s.%s' % (self.test_class.split('.')[0],
self.test_class,
self.name)
else:
return self.name
class MarionetteTestResult(unittest._TextTestResult, TestResultCollection):
resultClass = MarionetteTest
def __init__(self, *args, **kwargs):
self.marionette = kwargs.pop('marionette')
TestResultCollection.__init__(self, 'MarionetteTest')
unittest._TextTestResult.__init__(self, *args, **kwargs)
self.passed = 0
self.testsRun = 0
@property
def skipped(self):
return [t for t in self if t.result == 'SKIPPED']
@skipped.setter
def skipped(self, value):
pass
@property
def expectedFailures(self):
return [t for t in self if t.result == 'KNOWN-FAIL']
@expectedFailures.setter
def expectedFailures(self, value):
pass
@property
def unexpectedSuccesses(self):
return [t for t in self if t.result == 'UNEXPECTED-PASS']
@unexpectedSuccesses.setter
def unexpectedSuccesses(self, value):
pass
@property
def tests_passed(self):
return [t for t in self if t.result == 'PASS']
@property
def errors(self):
return [t for t in self if t.result == 'ERROR']
@errors.setter
def errors(self, value):
pass
@property
def failures(self):
return [t for t in self if t.result == 'UNEXPECTED-FAIL']
@failures.setter
def failures(self, value):
pass
@property
def duration(self):
if self.stop_time:
return self.stop_time - self.start_time
else:
return 0
def add_test_result(self, test, result_expected='PASS',
result_actual='PASS', output='', context=None, **kwargs):
def get_class(test):
return test.__class__.__module__ + '.' + test.__class__.__name__
name = str(test).split()[0]
test_class = get_class(test)
if hasattr(test, 'jsFile'):
name = os.path.basename(test.jsFile)
test_class = None
t = self.resultClass(name=name, test_class=test_class,
time_start=test.start_time, result_expected=result_expected,
context=context, **kwargs)
t.finish(result_actual,
time_end=time.time() if test.start_time else 0,
reason=relevant_line(output),
output=output)
self.append(t)
def addError(self, test, err):
self.add_test_result(test, output=self._exc_info_to_string(err, test), result_actual='ERROR')
self._mirrorOutput = True
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
self.add_test_result(test, output=self._exc_info_to_string(err, test), result_actual='UNEXPECTED-FAIL')
self._mirrorOutput = True
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def addSuccess(self, test):
self.passed += 1
self.add_test_result(test, result_actual='PASS')
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.add_test_result(test, output=self._exc_info_to_string(err, test),
result_actual='KNOWN-FAIL')
if self.showAll:
self.stream.writeln("expected failure")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.add_test_result(test, result_actual='UNEXPECTED-PASS')
if self.showAll:
self.stream.writeln("unexpected success")
elif self.dots:
self.stream.write("u")
self.stream.flush()
def addSkip(self, test, reason):
self.add_test_result(test, output=reason, result_actual='SKIPPED')
if self.showAll:
self.stream.writeln("skipped {0!r}".format(reason))
elif self.dots:
self.stream.write("s")
self.stream.flush()
def getInfo(self, test):
return test.test_name
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
desc = str(test)
if hasattr(test, 'jsFile'):
desc = "%s, %s" % (test.jsFile, desc)
return desc
def printLogs(self, test):
for testcase in test._tests:
if hasattr(testcase, 'loglines') and testcase.loglines:
# Don't dump loglines to the console if they only contain
# TEST-START and TEST-END.
skip_log = True
for line in testcase.loglines:
str_line = ' '.join(line)
if not 'TEST-END' in str_line and not 'TEST-START' in str_line:
skip_log = False
break
if skip_log:
return
self.stream.writeln('\nSTART LOG:')
for line in testcase.loglines:
self.stream.writeln(' '.join(line).encode('ascii', 'replace'))
self.stream.writeln('END LOG:')
def printErrorList(self, flavour, errors):
for error in errors:
err = error.output
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, error.description))
self.stream.writeln(self.separator2)
lastline = None
fail_present = None
for line in err:
if not line.startswith('\t'):
lastline = line
if 'TEST-UNEXPECTED-FAIL' in line:
fail_present = True
for line in err:
if line != lastline or fail_present:
self.stream.writeln("%s" % line)
else:
self.stream.writeln("TEST-UNEXPECTED-FAIL | %s | %s" %
(self.getInfo(error), line))
def stopTest(self, *args, **kwargs):
unittest._TextTestResult.stopTest(self, *args, **kwargs)
if self.marionette.check_for_crash():
# this tells unittest.TestSuite not to continue running tests
self.shouldStop = True
class MarionetteTextTestRunner(unittest.TextTestRunner):
resultclass = MarionetteTestResult
def __init__(self, **kwargs):
BaseMarionetteTestRunner.__init__(self, **kwargs)
self.test_handlers = [MarionetteTestCase, MarionetteJSTestCase]
self.marionette = kwargs['marionette']
del kwargs['marionette']
unittest.TextTestRunner.__init__(self, **kwargs)
def _makeResult(self):
return self.resultclass(self.stream,
self.descriptions,
self.verbosity,
marionette=self.marionette)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
if hasattr(self, 'failfast'):
result.failfast = self.failfast
if hasattr(self, 'buffer'):
result.buffer = self.buffer
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
stopTime = time.time()
if hasattr(result, 'time_taken'):
result.time_taken = stopTime - startTime
result.printLogs(test)
result.printErrors()
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", result.time_taken))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
except AttributeError:
pass
else:
expectedFails, unexpectedSuccesses, skipped = results
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = map(len, (result.failures, result.errors))
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.writeln(" (%s)" % (", ".join(infos),))
else:
self.stream.write("\n")
return result
class MarionetteTestRunner(object):
textrunnerclass = MarionetteTextTestRunner
def __init__(self, address=None, emulator=None, emulatorBinary=None,
emulatorImg=None, emulator_res='480x800', homedir=None,
app=None, app_args=None, bin=None, profile=None, autolog=False,
revision=None, logger=None, testgroup="marionette", noWindow=False,
logcat_dir=None, xml_output=None, repeat=0, gecko_path=None,
testvars=None, tree=None, type=None, device_serial=None,
symbols_path=None, timeout=None, es_servers=None, shuffle=False,
sdcard=None, **kwargs):
self.address = address
self.emulator = emulator
self.emulatorBinary = emulatorBinary
self.emulatorImg = emulatorImg
self.emulator_res = emulator_res
self.homedir = homedir
self.app = app
self.app_args = app_args or []
self.bin = bin
self.profile = profile
self.autolog = autolog
self.testgroup = testgroup
self.revision = revision
self.logger = logger
self.noWindow = noWindow
self.httpd = None
self.baseurl = None
self.marionette = None
self.logcat_dir = logcat_dir
self.xml_output = xml_output
self.repeat = repeat
self.gecko_path = gecko_path
self.testvars = {}
self.test_kwargs = kwargs
self.tree = tree
self.type = type
self.device_serial = device_serial
self.symbols_path = symbols_path
self.timeout = timeout
self._device = None
self._capabilities = None
self._appName = None
self.es_servers = es_servers
self.shuffle = shuffle
self.sdcard = sdcard
if testvars:
if not os.path.exists(testvars):
raise Exception('--testvars file does not exist')
import json
with open(testvars) as f:
self.testvars = json.loads(f.read())
# set up test handlers
self.test_handlers = []
self.register_handlers()
self.reset_test_stats()
if self.logger is None:
self.logger = logging.getLogger('Marionette')
self.logger.setLevel(logging.INFO)
self.logger.addHandler(logging.StreamHandler())
if self.logcat_dir:
if not os.access(self.logcat_dir, os.F_OK):
os.mkdir(self.logcat_dir)
# for XML output
self.testvars['xml_output'] = self.xml_output
self.results = []
@property
def capabilities(self):
if self._capabilities:
return self._capabilities
self.marionette.start_session()
self._capabilities = self.marionette.session_capabilities
self.marionette.delete_session()
return self._capabilities
@property
def device(self):
if self._device:
return self._device
self._device = self.capabilities.get('device')
return self._device
@property
def appName(self):
if self._appName:
return self._appName
self._appName = self.capabilities.get('browserName')
return self._appName
def reset_test_stats(self):
self.passed = 0
self.failed = 0
self.todo = 0
self.failures = []
def start_httpd(self):
host = moznetwork.get_ip()
self.httpd = MozHttpd(host=host,
port=0,
docroot=os.path.join(os.path.dirname(__file__), 'www'))
self.httpd.start()
self.baseurl = 'http://%s:%d/' % (host, self.httpd.httpd.server_port)
self.logger.info('running webserver on %s' % self.baseurl)
def start_marionette(self):
assert(self.baseurl is not None)
if self.bin:
if self.address:
host, port = self.address.split(':')
else:
host = 'localhost'
port = 2828
self.marionette = Marionette(host=host,
port=int(port),
app=self.app,
app_args=self.app_args,
bin=self.bin,
profile=self.profile,
baseurl=self.baseurl,
timeout=self.timeout,
device_serial=self.device_serial)
elif self.address:
host, port = self.address.split(':')
try:
#establish a socket connection so we can vertify the data come back
connection = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
connection.connect((host,int(port)))
connection.close()
except Exception, e:
raise Exception("Could not connect to given marionette host:port: %s" % e)
if self.emulator:
self.marionette = Marionette.getMarionetteOrExit(
host=host, port=int(port),
connectToRunningEmulator=True,
homedir=self.homedir,
baseurl=self.baseurl,
logcat_dir=self.logcat_dir,
gecko_path=self.gecko_path,
symbols_path=self.symbols_path,
timeout=self.timeout)
else:
self.marionette = Marionette(host=host,
port=int(port),
baseurl=self.baseurl,
timeout=self.timeout)
elif self.emulator:
self.marionette = Marionette.getMarionetteOrExit(
emulator=self.emulator,
emulatorBinary=self.emulatorBinary,
emulatorImg=self.emulatorImg,
emulator_res=self.emulator_res,
homedir=self.homedir,
baseurl=self.baseurl,
noWindow=self.noWindow,
logcat_dir=self.logcat_dir,
gecko_path=self.gecko_path,
symbols_path=self.symbols_path,
timeout=self.timeout,
sdcard=self.sdcard)
else:
raise Exception("must specify binary, address or emulator")
def post_to_autolog(self, elapsedtime):
self.logger.info('posting results to autolog')
logfile = None
if self.emulator:
filename = os.path.join(os.path.abspath(self.logcat_dir),
"emulator-%d.log" % self.marionette.emulator.port)
if os.access(filename, os.F_OK):
logfile = filename
for es_server in self.es_servers:
# This is all autolog stuff.
# See: https://wiki.mozilla.org/Auto-tools/Projects/Autolog
from mozautolog import RESTfulAutologTestGroup
testgroup = RESTfulAutologTestGroup(
testgroup=self.testgroup,
os='android',
platform='emulator',
harness='marionette',
server=es_server,
restserver=None,
machine=socket.gethostname(),
logfile=logfile)
testgroup.set_primary_product(
tree=self.tree,
buildtype='opt',
revision=self.revision)
testgroup.add_test_suite(
testsuite='b2g emulator testsuite',
elapsedtime=elapsedtime.seconds,
cmdline='',
passed=self.passed,
failed=self.failed,
todo=self.todo)
# Add in the test failures.
for f in self.failures:
testgroup.add_test_failure(test=f[0], text=f[1], status=f[2])
testgroup.submit()
def run_tests(self, tests):
self.reset_test_stats()
starttime = datetime.utcnow()
counter = self.repeat
while counter >=0:
round = self.repeat - counter
if round > 0:
self.logger.info('\nREPEAT %d\n-------' % round)
if self.shuffle:
random.shuffle(tests)
for test in tests:
self.run_test(test)
counter -= 1
self.logger.info('\nSUMMARY\n-------')
self.logger.info('passed: %d' % self.passed)
self.logger.info('failed: %d' % self.failed)
self.logger.info('todo: %d' % self.todo)
if self.failed > 0:
self.logger.info('\nFAILED TESTS\n-------')
for failed_test in self.failures:
self.logger.info('%s' % failed_test[0])
try:
self.marionette.check_for_crash()
except:
traceback.print_exc()
self.elapsedtime = datetime.utcnow() - starttime
if self.autolog:
self.post_to_autolog(self.elapsedtime)
if self.xml_output:
xml_dir = os.path.dirname(os.path.abspath(self.xml_output))
if not os.path.exists(xml_dir):
os.makedirs(xml_dir)
with open(self.xml_output, 'w') as f:
f.write(self.generate_xml(self.results))
if self.marionette.instance:
self.marionette.instance.close()
self.marionette.instance = None
del self.marionette
def run_test(self, test, expected='pass'):
if not self.httpd:
print "starting httpd"
self.start_httpd()
if not self.marionette:
self.start_marionette()
filepath = os.path.abspath(test)
if os.path.isdir(filepath):
for root, dirs, files in os.walk(filepath):
if self.shuffle:
random.shuffle(files)
for filename in files:
if ((filename.startswith('test_') or filename.startswith('browser_')) and
(filename.endswith('.py') or filename.endswith('.js'))):
filepath = os.path.join(root, filename)
self.run_test(filepath)
if self.marionette.check_for_crash():
return
return
mod_name,file_ext = os.path.splitext(os.path.split(filepath)[-1])
testloader = unittest.TestLoader()
suite = unittest.TestSuite()
if file_ext == '.ini':
testargs = {}
if self.type is not None:
testtypes = self.type.replace('+', ' +').replace('-', ' -').split()
for atype in testtypes:
if atype.startswith('+'):
testargs.update({ atype[1:]: 'true' })
elif atype.startswith('-'):
testargs.update({ atype[1:]: 'false' })
else:
testargs.update({ atype: 'true' })
manifest = TestManifest()
manifest.read(filepath)
all_tests = manifest.active_tests(disabled=False)
manifest_tests = manifest.active_tests(disabled=False,
device=self.device,
app=self.appName)
skip_tests = list(set([x['path'] for x in all_tests]) -
set([x['path'] for x in manifest_tests]))
for skipped in skip_tests:
self.logger.info('TEST-SKIP | %s | device=%s, app=%s' %
(os.path.basename(skipped),
self.device,
self.appName))
self.todo += 1
target_tests = manifest.get(tests=manifest_tests, **testargs)
if self.shuffle:
random.shuffle(target_tests)
for i in target_tests:
self.run_test(i["path"], i["expected"])
if self.marionette.check_for_crash():
return
return
self.logger.info('TEST-START %s' % os.path.basename(test))
self.test_kwargs['expected'] = expected
for handler in self.test_handlers:
if handler.match(os.path.basename(test)):
handler.add_tests_to_suite(mod_name,
filepath,
suite,
testloader,
self.marionette,
self.testvars,
**self.test_kwargs)
break
if suite.countTestCases():
runner = self.textrunnerclass(verbosity=3,
marionette=self.marionette)
results = runner.run(suite)
self.results.append(results)
self.failed += len(results.failures) + len(results.errors)
if hasattr(results, 'skipped'):
self.todo += len(results.skipped)
self.passed += results.passed
for failure in results.failures + results.errors:
self.failures.append((results.getInfo(failure), failure.output, 'TEST-UNEXPECTED-FAIL'))
if hasattr(results, 'unexpectedSuccesses'):
self.failed += len(results.unexpectedSuccesses)
for failure in results.unexpectedSuccesses:
self.failures.append((results.getInfo(failure), 'TEST-UNEXPECTED-PASS'))
if hasattr(results, 'expectedFailures'):
self.passed += len(results.expectedFailures)
def register_handlers(self):
self.test_handlers.extend([MarionetteTestCase, MarionetteJSTestCase])
def cleanup(self):
if self.httpd:
self.httpd.stop()
__del__ = cleanup
def generate_xml(self, results_list):
def _extract_xml(test, result='passed'):
testcase = doc.createElement('testcase')
testcase.setAttribute('classname', test.test_class)
testcase.setAttribute('name', unicode(test.name).split()[0])
testcase.setAttribute('time', str(test.duration))
testsuite.appendChild(testcase)
if result in ['failure', 'error', 'skipped']:
f = doc.createElement(result)
f.setAttribute('message', 'test %s' % result)
f.appendChild(doc.createTextNode(test.reason))
testcase.appendChild(f)
doc = dom.Document()
testsuite = doc.createElement('testsuite')
testsuite.setAttribute('name', 'Marionette')
testsuite.setAttribute('time', str(self.elapsedtime.total_seconds()))
testsuite.setAttribute('tests', str(sum([results.testsRun for
results in results_list])))
def failed_count(results):
count = len(results.failures)
if hasattr(results, 'unexpectedSuccesses'):
count += len(results.unexpectedSuccesses)
return count
testsuite.setAttribute('failures', str(sum([failed_count(results)
for results in results_list])))
testsuite.setAttribute('errors', str(sum([len(results.errors)
for results in results_list])))
testsuite.setAttribute('skips', str(sum([len(results.skipped) +
len(results.expectedFailures)
for results in results_list])))
for results in results_list:
for result in results.errors:
_extract_xml(result, result='error')
for result in results.failures:
_extract_xml(result, result='failure')
if hasattr(results, 'unexpectedSuccesses'):
for test in results.unexpectedSuccesses:
# unexpectedSuccesses is a list of Testcases only, no tuples
_extract_xml(test, result='failure')
if hasattr(results, 'skipped'):
for result in results.skipped:
_extract_xml(result, result='skipped')
if hasattr(results, 'expectedFailures'):
for result in results.expectedFailures:
_extract_xml(result, result='skipped')
for result in results.tests_passed:
_extract_xml(result)
doc.appendChild(testsuite)
return doc.toprettyxml(encoding='utf-8')
class MarionetteTestOptions(OptionParser):
def __init__(self, **kwargs):
OptionParser.__init__(self, **kwargs)
self.add_option('--autolog',
action='store_true',
dest='autolog',
default=False,
help='send test results to autolog')
self.add_option('--revision',
action='store',
dest='revision',
help='git revision for autolog submissions')
self.add_option('--testgroup',
action='store',
dest='testgroup',
help='testgroup names for autolog submissions')
self.add_option('--emulator',
action='store',
dest='emulator',
choices=['x86', 'arm'],
help='if no --address is given, then the harness will launch a B2G emulator on which to run '
'emulator tests. if --address is given, then the harness assumes you are running an '
'emulator already, and will run the emulator tests using that emulator. you need to '
'specify which architecture to emulate for both cases')
self.add_option('--emulator-binary',
action='store',
dest='emulatorBinary',
help='launch a specific emulator binary rather than launching from the B2G built emulator')
self.add_option('--emulator-img',
action='store',
dest='emulatorImg',
help='use a specific image file instead of a fresh one')
self.add_option('--emulator-res',
action='store',
dest='emulator_res',
type='str',
help='set a custom resolution for the emulator'
'Example: "480x800"')
self.add_option('--sdcard',
action='store',
dest='sdcard',
help='size of sdcard to create for the emulator')
self.add_option('--no-window',
action='store_true',
dest='noWindow',
default=False,
help='when Marionette launches an emulator, start it with the -no-window argument')
self.add_option('--logcat-dir',
dest='logcat_dir',
action='store',
help='directory to store logcat dump files')
self.add_option('--address',
dest='address',
action='store',
help='host:port of running Gecko instance to connect to')
self.add_option('--device',
dest='device_serial',
action='store',
help='serial ID of a device to use for adb / fastboot')
self.add_option('--type',
dest='type',
action='store',
default='browser+b2g',
help="the type of test to run, can be a combination of values defined in the manifest file; "
"individual values are combined with '+' or '-' characters. for example: 'browser+b2g' "
"means the set of tests which are compatible with both browser and b2g; 'b2g-qemu' means "
"the set of tests which are compatible with b2g but do not require an emulator. this "
"argument is only used when loading tests from manifest files")
self.add_option('--homedir',
dest='homedir',
action='store',
help='home directory of emulator files')
self.add_option('--app',
dest='app',
action='store',
help='application to use')
self.add_option('--app-arg',
dest='app_args',
action='append',
default=[],
help='specify a command line argument to be passed onto the application')
self.add_option('--binary',
dest='bin',
action='store',
help='gecko executable to launch before running the test')
self.add_option('--profile',
dest='profile',
action='store',
help='profile to use when launching the gecko process. if not passed, then a profile will be '
'constructed and used')
self.add_option('--repeat',
dest='repeat',
action='store',
type=int,
default=0,
help='number of times to repeat the test(s)')
self.add_option('-x', '--xml-output',
action='store',
dest='xml_output',
help='xml output')
self.add_option('--gecko-path',
dest='gecko_path',
action='store',
help='path to b2g gecko binaries that should be installed on the device or emulator')
self.add_option('--testvars',
dest='testvars',
action='store',
help='path to a json file with any test data required')
self.add_option('--tree',
dest='tree',
action='store',
default='b2g',
help='the tree that the revision parameter refers to')
self.add_option('--symbols-path',
dest='symbols_path',
action='store',
help='absolute path to directory containing breakpad symbols, or the url of a zip file containing symbols')
self.add_option('--timeout',
dest='timeout',
type=int,
help='if a --timeout value is given, it will set the default page load timeout, search timeout and script timeout to the given value. If not passed in, it will use the default values of 30000ms for page load, 0ms for search timeout and 10000ms for script timeout')
self.add_option('--es-server',
dest='es_servers',
action='append',
help='the ElasticSearch server to use for autolog submission')
self.add_option('--shuffle',
action='store_true',
dest='shuffle',
default=False,
help='run tests in a random order')
def verify_usage(self, options, tests):
if not tests:
print 'must specify one or more test files, manifests, or directories'
sys.exit(1)
if not options.emulator and not options.address and not options.bin:
print 'must specify --binary, --emulator or --address'
sys.exit(1)
if not options.es_servers:
options.es_servers = ['elasticsearch-zlb.dev.vlan81.phx.mozilla.com:9200',
'elasticsearch-zlb.webapp.scl3.mozilla.com:9200']
# default to storing logcat output for emulator runs
if options.emulator and not options.logcat_dir:
options.logcat_dir = 'logcat'
# check for valid resolution string, strip whitespaces
try:
if options.emulator_res:
dims = options.emulator_res.split('x')
assert len(dims) == 2
width = str(int(dims[0]))
height = str(int(dims[1]))
options.emulator_res = 'x'.join([width, height])
except:
raise ValueError('Invalid emulator resolution format. '
'Should be like "480x800".')
return (options, tests)
def startTestRunner(runner_class, options, tests):
@ -19,8 +910,7 @@ def startTestRunner(runner_class, options, tests):
runner.run_tests(tests)
return runner
def cli(runner_class=MarionetteTestRunner, parser_class=BaseMarionetteOptions):
def cli(runner_class=MarionetteTestRunner, parser_class=MarionetteTestOptions):
parser = parser_class(usage='%prog [options] test_file_or_dir <test_file_or_dir> ...')
options, tests = parser.parse_args()
parser.verify_usage(options, tests)
@ -31,4 +921,3 @@ def cli(runner_class=MarionetteTestRunner, parser_class=BaseMarionetteOptions):
if __name__ == "__main__":
cli()

View File

@ -1,7 +1,7 @@
import os
from setuptools import setup, find_packages
version = '0.7.0'
version = '0.6.2'
# get documentation from the README
try:
@ -15,7 +15,7 @@ deps = ['manifestdestiny', 'mozhttpd >= 0.5',
'mozprocess >= 0.9', 'mozrunner >= 5.15',
'mozdevice >= 0.22', 'moznetwork >= 0.21',
'mozcrash >= 0.5', 'mozprofile >= 0.7',
'moztest >= 0.1', 'py==1.4.14']
'moztest >= 0.1']
setup(name='marionette_client',
version=version,