Bug 1034290 - Use structured log output for test results in reftest, r=jmaher

Structured logs bring many benefits. We can stop parsing the logs for magic strings, we
can modify the format without breaking things, and we can stream results into systems like
ActiveData. The structured logs originate primarily in reftest.js. StructuredLog.jsm is
used to generate the JSON-based log stream. Finally OutputHandler in the python harness
reads structured output from stdout, and formats it into human readable form.

MozReview-Commit-ID: G3ZLkMRl6p7
This commit is contained in:
Andrew Halberstadt 2016-02-05 15:44:20 -05:00
parent 3c5ccad7c1
commit 7fe6d4357c
18 changed files with 481 additions and 371 deletions

View File

@ -396,7 +396,7 @@ class Automation(object):
self.log.info("Can't trigger Breakpad, just killing process")
self.killPid(processPID)
def waitForFinish(self, proc, utilityPath, timeout, maxTime, startTime, debuggerInfo, symbolsPath):
def waitForFinish(self, proc, utilityPath, timeout, maxTime, startTime, debuggerInfo, symbolsPath, outputHandler=None):
""" Look for timeout or crashes and return the status after the process terminates """
stackFixerFunction = None
didTimeout = False
@ -436,7 +436,12 @@ class Automation(object):
while line != "" and not didTimeout:
if stackFixerFunction:
line = stackFixerFunction(line)
self.log.info(line.rstrip().decode("UTF-8", "ignore"))
if outputHandler is None:
self.log.info(line.rstrip().decode("UTF-8", "ignore"))
else:
outputHandler(line)
if "TEST-START" in line and "|" in line:
self.lastTestSeen = line.split("|")[1].strip()
if not debuggerInfo and "TEST-UNEXPECTED-FAIL" in line and "Test timed out" in line:
@ -530,7 +535,7 @@ class Automation(object):
debuggerInfo = None, symbolsPath = None,
timeout = -1, maxTime = None, onLaunch = None,
detectShutdownLeaks = False, screenshotOnFail=False, testPath=None, bisectChunk=None,
valgrindPath=None, valgrindArgs=None, valgrindSuppFiles=None):
valgrindPath=None, valgrindArgs=None, valgrindSuppFiles=None, outputHandler=None):
"""
Run the app, log the duration it took to execute, return the status code.
Kills the app if it runs for longer than |maxTime| seconds, or outputs nothing for |timeout| seconds.
@ -579,7 +584,8 @@ class Automation(object):
# app is launched.
onLaunch()
status = self.waitForFinish(proc, utilityPath, timeout, maxTime, startTime, debuggerInfo, symbolsPath)
status = self.waitForFinish(proc, utilityPath, timeout, maxTime, startTime, debuggerInfo, symbolsPath,
outputHandler=outputHandler)
self.log.info("INFO | automation.py | Application ran for: %s", str(datetime.now() - startTime))
# Do a final check for zombie child processes.

View File

@ -209,23 +209,31 @@ class B2GRemoteAutomation(Automation):
return app, args
def waitForFinish(self, proc, utilityPath, timeout, maxTime, startTime,
debuggerInfo, symbolsPath):
debuggerInfo, symbolsPath, outputHandler=None):
""" Wait for tests to finish (as evidenced by a signature string
in logcat), or for a given amount of time to elapse with no
output.
"""
timeout = timeout or 120
while True:
currentlog = proc.getStdoutLines(timeout)
if currentlog:
print currentlog
lines = proc.getStdoutLines(timeout)
if lines:
currentlog = '\n'.join(lines)
if outputHandler:
for line in lines:
outputHandler(line)
else:
print(currentlog)
# Match the test filepath from the last TEST-START line found in the new
# log content. These lines are in the form:
# ... INFO TEST-START | /filepath/we/wish/to/capture.html\n
testStartFilenames = re.findall(r"TEST-START \| ([^\s]*)", currentlog)
if testStartFilenames:
self.lastTestSeen = testStartFilenames[-1]
if hasattr(self, 'logFinish') and self.logFinish in currentlog:
if (outputHandler and outputHandler.suite_finished) or (
hasattr(self, 'logFinish') and self.logFinish in currentlog):
return 0
else:
self.log.info("TEST-UNEXPECTED-FAIL | %s | application timed "
@ -434,11 +442,12 @@ class B2GRemoteAutomation(Automation):
break
# wait 'timeout' for any additional lines
try:
lines.append(self.queue.get(True, timeout))
except Queue.Empty:
pass
return '\n'.join(lines)
if not lines:
try:
lines.append(self.queue.get(True, timeout))
except Queue.Empty:
pass
return lines
def wait(self, timeout=None):
# this should never happen

View File

@ -95,7 +95,7 @@ class RemoteAutomation(Automation):
return env
def waitForFinish(self, proc, utilityPath, timeout, maxTime, startTime, debuggerInfo, symbolsPath):
def waitForFinish(self, proc, utilityPath, timeout, maxTime, startTime, debuggerInfo, symbolsPath, outputHandler=None):
""" Wait for tests to finish.
If maxTime seconds elapse or no output is detected for timeout
seconds, kill the process and fail the test.
@ -307,20 +307,21 @@ class RemoteAutomation(Automation):
return pid
def read_stdout(self):
""" Fetch the full remote log file using devicemanager and return just
the new log entries since the last call (as a list of messages or lines).
"""
Fetch the full remote log file using devicemanager, process them and
return whether there were any new log entries since the last call.
"""
if not self.dm.fileExists(self.proc):
return []
return False
try:
newLogContent = self.dm.pullFile(self.proc, self.stdoutlen)
except DMError:
# we currently don't retry properly in the pullFile
# function in dmSUT, so an error here is not necessarily
# the end of the world
return []
return False
if not newLogContent:
return []
return False
self.stdoutlen += len(newLogContent)
@ -329,26 +330,27 @@ class RemoteAutomation(Automation):
if testStartFilenames:
self.lastTestSeen = testStartFilenames[-1]
print newLogContent
return [newLogContent]
return True
self.logBuffer += newLogContent
lines = self.logBuffer.split('\n')
if not lines:
return
# We only keep the last (unfinished) line in the buffer
self.logBuffer = lines[-1]
del lines[-1]
messages = []
if lines:
# We only keep the last (unfinished) line in the buffer
self.logBuffer = lines[-1]
del lines[-1]
if not lines:
return False
for line in lines:
# This passes the line to the logger (to be logged or buffered)
# and returns a list of structured messages (dict)
parsed_messages = self.messageLogger.write(line)
for message in parsed_messages:
if message['action'] == 'test_start':
if isinstance(message, dict) and message.get('action') == 'test_start':
self.lastTestSeen = message['test']
messages += parsed_messages
return messages
return True
@property
def getLastTestSeen(self):
@ -374,10 +376,10 @@ class RemoteAutomation(Automation):
# too long, only do it every 60 seconds
if (not slowLog) or (timer % 60 == 0):
startRead = datetime.datetime.now()
messages = self.read_stdout()
hasOutput = self.read_stdout()
if (datetime.datetime.now() - startRead) > datetime.timedelta(seconds=5):
slowLog = True
if messages:
if hasOutput:
noOutputTimer = 0
time.sleep(interval)
timer += interval

View File

@ -12,6 +12,7 @@ _HARNESS_FILES = \
$(srcdir)/runreftestb2g.py \
$(srcdir)/runreftestmulet.py \
$(srcdir)/gaia_lock_screen.js \
$(srcdir)/output.py \
automation.py \
$(topsrcdir)/testing/mozbase/mozdevice/mozdevice/devicemanager.py \
$(topsrcdir)/testing/mozbase/mozdevice/mozdevice/devicemanagerADB.py \

View File

@ -2,6 +2,7 @@ reftest.jar:
% content reftest %content/
* content/reftest-content.js (reftest-content.js)
content/httpd.jsm (../../../netwerk/test/httpserver/httpd.js)
content/StructuredLog.jsm (../../../testing/modules/StructuredLog.jsm)
#ifdef BOOTSTRAP
* content/reftest.jsm (reftest.js)
#else

View File

@ -203,8 +203,6 @@ class ReftestRunner(MozbuildObject):
if not kwargs["runTestsInParallel"]:
kwargs["logFile"] = "%s.log" % kwargs["suite"]
# Remove the stdout handler from the internal logger and let mach deal with it
runreftest.log.removeHandler(runreftest.log.handlers[0])
self.log_manager.enable_unstructured()
try:
rv = runreftest.run(**kwargs)

View File

@ -0,0 +1,144 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os
import threading
from mozlog.formatters import TbplFormatter
from mozrunner.utils import get_stack_fixer_function
class ReftestFormatter(TbplFormatter):
"""
Formatter designed to preserve the legacy "tbpl" format in reftest.
This is needed for both the reftest-analyzer and mozharness log parsing.
We can change this format when both reftest-analyzer and mozharness have
been changed to read structured logs.
"""
def __call__(self, data):
if 'component' in data and data['component'] == 'mozleak':
# Output from mozleak requires that no prefix be added
# so that mozharness will pick up these failures.
return "%s\n" % data['message']
formatted = TbplFormatter.__call__(self, data)
if data['action'] == 'process_output':
return formatted
return 'REFTEST %s' % formatted
def log(self, data):
prefix = "%s |" % data['level'].upper()
return "%s %s\n" % (prefix, data['message'])
def test_end(self, data):
extra = data.get('extra', {})
status = data['status']
status_msg = "TEST-"
if 'expected' in data:
status_msg += "UNEXPECTED-%s" % status
else:
if status != "PASS":
status_msg += "KNOWN-"
status_msg += status
if extra.get('status_msg') == 'Random':
status_msg += "(EXPECTED RANDOM)"
test = self.id_str(data['test'])
if 'message' in data:
status_details = data['message']
elif isinstance(data['test'], tuple):
status_details = 'image comparison ({})'.format(data['test'][1])
else:
status_details = '(LOAD ONLY)'
output_text = "%s | %s | %s" % (status_msg, test, status_details)
if 'differences' in extra:
diff_msg = (", max difference: %(max_difference)s"
", number of differing pixels: %(differences)s") % extra
diagnostic_data = ("REFTEST IMAGE 1 (TEST): %(image1)s\n"
"REFTEST IMAGE 2 (REFERENCE): %(image2)s") % extra
output_text += '%s\n%s' % (diff_msg, diagnostic_data)
elif "image1" in extra:
diagnostic_data = "REFTEST IMAGE: %(image1)s" % extra
output_text += '\n%s' % diagnostic_data
output_text += "\nREFTEST TEST-END | %s" % test
return "%s\n" % output_text
def process_output(self, data):
return "%s\n" % data["data"]
def suite_end(self, data):
lines = []
summary = data['extra']['results']
summary['success'] = summary['Pass'] + summary['LoadOnly']
lines.append("Successful: %(success)s (%(Pass)s pass, %(LoadOnly)s load only)" %
summary)
summary['unexpected'] = (summary['Exception'] + summary['FailedLoad'] +
summary['UnexpectedFail'] + summary['UnexpectedPass'] +
summary['AssertionUnexpected'] +
summary['AssertionUnexpectedFixed'])
lines.append(("Unexpected: %(unexpected)s (%(UnexpectedFail)s unexpected fail, "
"%(UnexpectedPass)s unexpected pass, "
"%(AssertionUnexpected)s unexpected asserts, "
"%(FailedLoad)s failed load, "
"%(Exception)s exception)") % summary)
summary['known'] = (summary['KnownFail'] + summary['AssertionKnown'] +
summary['Random'] + summary['Skip'] + summary['Slow'])
lines.append(("Known problems: %(known)s (" +
"%(KnownFail)s known fail, " +
"%(AssertionKnown)s known asserts, " +
"%(Random)s random, " +
"%(Skip)s skipped, " +
"%(Slow)s slow)") % summary)
lines = ["REFTEST INFO | %s" % s for s in lines]
lines.append("REFTEST SUITE-END | Shutdown")
return "INFO | Result summary:\n{}\n".format('\n'.join(lines))
def id_str(self, test_id):
if isinstance(test_id, basestring):
return test_id
return test_id[0]
class OutputHandler(object):
"""Process the output of a process during a test run and translate
raw data logged from reftest.js to an appropriate structured log action,
where applicable.
"""
def __init__(self, log, utilityPath, symbolsPath=None):
self.stack_fixer_function = get_stack_fixer_function(utilityPath, symbolsPath)
self.log = log
# needed for b2gautomation.py
self.suite_finished = False
def __call__(self, line):
# need to return processed messages to appease remoteautomation.py
if not line.strip():
return []
try:
data = json.loads(line)
except ValueError:
self.verbatim(line)
return [line]
if isinstance(data, dict) and 'action' in data:
if data['action'] == 'suite_end':
self.suite_finished = True
self.log.log_raw(data)
else:
self.verbatim(json.dumps(data))
return [data]
def verbatim(self, line):
if self.stack_fixer_function:
line = self.stack_fixer_function(line)
self.log.process_output(threading.current_thread().name, line)

View File

@ -38,6 +38,7 @@ const NS_OBSERVER_SERVICE_CONTRACTID =
CU.import("resource://gre/modules/FileUtils.jsm");
CU.import("chrome://reftest/content/httpd.jsm", this);
CU.import("chrome://reftest/content/StructuredLog.jsm", this);
CU.import("resource://gre/modules/Services.jsm");
CU.import("resource://gre/modules/NetUtil.jsm");
@ -99,6 +100,7 @@ var gTotalTests = 0;
var gState;
var gCurrentURL;
var gTestLog = [];
var gLogLevel;
var gServer;
var gCount = 0;
var gAssertionCount = 0;
@ -163,10 +165,6 @@ var gNoCanvasCache = false;
var gRecycledCanvases = new Array();
// By default we just log to stdout
var gDumpLog = dump;
var gVerbose = false;
// Only dump the sandbox once, because it doesn't depend on the
// manifest URL (yet!).
var gDumpedConditionSandbox = false;
@ -181,25 +179,27 @@ function HasUnexpectedResult()
gTestResults.AssertionUnexpectedFixed > 0;
}
function LogWarning(str)
// By default we just log to stdout
var gDumpFn = dump;
var gDumpRawLog = function(record) {
// Dump JSON representation of data on a single line
var line = JSON.stringify(record);
gDumpFn("\n" + line + "\n");
}
var logger = new StructuredLogger('reftest', gDumpRawLog);
function TestBuffer(str)
{
gDumpLog("REFTEST INFO | " + str + "\n");
logger.debug(str);
gTestLog.push(str);
}
function LogInfo(str)
function FlushTestBuffer()
{
if (gVerbose)
gDumpLog("REFTEST INFO | " + str + "\n");
gTestLog.push(str);
}
function FlushTestLog()
{
if (!gVerbose) {
// In verbose mode, we've dumped all these messages already.
// In debug mode, we've dumped all these messages already.
if (gLogLevel !== 'debug') {
for (var i = 0; i < gTestLog.length; ++i) {
gDumpLog("REFTEST INFO | Saved log: " + gTestLog[i] + "\n");
logger.info("Saved log: " + gTestLog[i]);
}
}
gTestLog = [];
@ -244,7 +244,7 @@ function getTestPlugin(aName) {
return tags[i];
}
LogWarning("Failed to find the test-plugin.");
logger.warning("Failed to find the test-plugin.");
return null;
}
@ -257,7 +257,6 @@ this.OnRefTestLoad = function OnRefTestLoad(win)
var env = CC["@mozilla.org/process/environment;1"].
getService(CI.nsIEnvironment);
gVerbose = !!env.get("MOZ_REFTEST_VERBOSE");
var prefs = Components.classes["@mozilla.org/preferences-service;1"].
getService(Components.interfaces.nsIPrefBranch);
@ -279,6 +278,12 @@ this.OnRefTestLoad = function OnRefTestLoad(win)
gBrowserIsIframe = false;
}
try {
gLogLevel = prefs.getCharPref("reftest.logLevel");
} catch (e) {
gLogLevel ='info';
}
if (win === undefined || win == null) {
win = window;
}
@ -322,7 +327,7 @@ this.OnRefTestLoad = function OnRefTestLoad(win)
plugin1.enabledState = CI.nsIPluginTag.STATE_ENABLED;
plugin2.enabledState = CI.nsIPluginTag.STATE_ENABLED;
} else {
LogWarning("Could not get test plugin tags.");
logger.warning("Could not get test plugin tags.");
}
gBrowserMessageManager = gBrowser.QueryInterface(CI.nsIFrameLoaderOwner)
@ -339,7 +344,7 @@ function InitAndStartRefTests()
var prefs = Components.classes["@mozilla.org/preferences-service;1"].
getService(Components.interfaces.nsIPrefBranch);
} catch(e) {
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | | EXCEPTION: " + e + "\n");
logger.error("EXCEPTION: " + e);
}
try {
@ -361,7 +366,7 @@ function InitAndStartRefTests()
var f = FileUtils.File(logFile);
var mfl = FileUtils.openFileOutputStream(f, FileUtils.MODE_WRONLY | FileUtils.MODE_CREATE);
// Set to mirror to stdout as well as the file
gDumpLog = function (msg) {
gDumpFn = function (msg) {
#ifdef BOOTSTRAP
#ifdef REFTEST_B2G
dump(msg);
@ -376,7 +381,7 @@ function InitAndStartRefTests()
}
catch(e) {
// If there is a problem, just use stdout
gDumpLog = dump;
gDumpFn = dump;
}
}
} catch(e) {}
@ -427,7 +432,7 @@ function InitAndStartRefTests()
} catch (ex) {
//gBrowser.loadURI('data:text/plain,' + ex);
++gTestResults.Exception;
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | | EXCEPTION: " + ex + "\n");
logger.error("EXCEPTION: " + ex);
DoneTests();
}
@ -465,7 +470,7 @@ function StartTests()
var prefs = Components.classes["@mozilla.org/preferences-service;1"].
getService(Components.interfaces.nsIPrefBranch);
} catch(e) {
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | | EXCEPTION: " + e + "\n");
logger.error("EXCEPTION: " + e);
}
try {
@ -513,7 +518,7 @@ function StartTests()
var manifests = JSON.parse(prefs.getCharPref("reftest.manifests"));
gURLFilterRegex = manifests[null];
} catch(e) {
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | | Unable to find reftest.manifests pref. Please ensure your profile is setup properly\n");
logger.error("Unable to find reftest.manifests pref. Please ensure your profile is setup properly");
DoneTests();
}
@ -525,7 +530,7 @@ function StartTests()
// process includes before reading the included manifest again
manifestURLs.sort(function(a,b) {return a.length - b.length})
manifestURLs.forEach(function(manifestURL) {
gDumpLog("Reading manifest " + manifestURL + "\n");
logger.info("Reading manifest " + manifestURL);
var filter = manifests[manifestURL] ? new RegExp(manifests[manifestURL]) : null;
ReadTopManifest(manifestURL, [globalFilter, filter, false]);
});
@ -534,6 +539,7 @@ function StartTests()
// Filter tests which will be skipped to get a more even distribution when chunking
// tURLs is a temporary array containing all active tests
var tURLs = new Array();
var tIDs = new Array();
for (var i = 0; i < gURLs.length; ++i) {
if (gURLs[i].expected == EXPECTED_DEATH)
continue;
@ -545,9 +551,10 @@ function StartTests()
continue;
tURLs.push(gURLs[i]);
tIDs.push(gURLs[i].identifier);
}
gDumpLog("REFTEST INFO | Discovered " + gURLs.length + " tests, after filtering SKIP tests, we have " + tURLs.length + "\n");
logger.suiteStart(tIDs, {"skipped": gURLs.length - tURLs.length});
if (gTotalChunks > 0 && gThisChunk > 0) {
// Calculate start and end indices of this chunk if tURLs array were
@ -562,8 +569,8 @@ function StartTests()
end = gThisChunk == gTotalChunks ? gURLs.length : gURLs.indexOf(tURLs[end + 1]) - 1;
gURLs = gURLs.slice(start, end);
gDumpLog("REFTEST INFO | Running chunk " + gThisChunk + " out of " + gTotalChunks + " chunks. ");
gDumpLog("tests " + (start+1) + "-" + end + "/" + gURLs.length + "\n");
logger.info("Running chunk " + gThisChunk + " out of " + gTotalChunks + " chunks. " +
"tests " + (start+1) + "-" + end + "/" + gURLs.length);
}
if (gShuffle) {
@ -580,7 +587,7 @@ function StartTests()
} catch (ex) {
//gBrowser.loadURI('data:text/plain,' + ex);
++gTestResults.Exception;
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | | EXCEPTION: " + ex + "\n");
logger.error("EXCEPTION: " + ex);
DoneTests();
}
}
@ -593,7 +600,7 @@ function OnRefTestUnload()
plugin1.enabledState = gTestPluginEnabledStates[0];
plugin2.enabledState = gTestPluginEnabledStates[1];
} else {
LogWarning("Failed to get test plugin tags.");
logger.warning("Failed to get test plugin tags.");
}
}
@ -749,8 +756,8 @@ function BuildConditionSandbox(aURL) {
}
if (!gDumpedConditionSandbox) {
dump("REFTEST INFO | Dumping JSON representation of sandbox \n");
dump("REFTEST INFO | " + JSON.stringify(CU.waiveXrays(sandbox)) + " \n");
logger.info("Dumping JSON representation of sandbox");
logger.info(JSON.stringify(CU.waiveXrays(sandbox)));
gDumpedConditionSandbox = true;
}
@ -811,6 +818,12 @@ function AddTestItem(aTest, aFilter)
if (gFocusFilterMode == FOCUS_FILTER_NON_NEEDS_FOCUS_TESTS &&
aTest.needsFocus)
return;
if (aTest.url2 !== null)
aTest.identifier = [aTest.prettyPath, aTest.type, aTest.url2.spec];
else
aTest.identifier = aTest.prettyPath;
gURLs.push(aTest);
}
@ -837,8 +850,7 @@ function ReadManifest(aURL, inherited_status, aFilter)
var inputStream = channel.open2();
if (channel instanceof Components.interfaces.nsIHttpChannel
&& channel.responseStatus != 200) {
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | | HTTP ERROR : " +
channel.responseStatus + "\n");
logger.error("HTTP ERROR : " + channel.responseStatus);
}
var streamBuf = getStreamContent(inputStream);
inputStream.close();
@ -1238,19 +1250,20 @@ function StartCurrentTest()
// make sure we don't run tests that are expected to kill the browser
while (gURLs.length > 0) {
var test = gURLs[0];
logger.testStart(test.identifier);
if (test.expected == EXPECTED_DEATH) {
++gTestResults.Skip;
gDumpLog("REFTEST TEST-KNOWN-FAIL | " + test.url1.spec + " | (SKIP)\n");
logger.testEnd(test.identifier, "SKIP");
gURLs.shift();
} else if (test.needsFocus && !Focus()) {
// FIXME: Marking this as a known fail is dangerous! What
// if it starts failing all the time?
++gTestResults.Skip;
gDumpLog("REFTEST TEST-KNOWN-FAIL | " + test.url1.spec + " | (SKIPPED; COULDN'T GET FOCUS)\n");
logger.testEnd(test.identifier, "SKIP", null, "(COULDN'T GET FOCUS)");
gURLs.shift();
} else if (test.slow && !gRunSlowTests) {
++gTestResults.Slow;
gDumpLog("REFTEST TEST-KNOWN-SLOW | " + test.url1.spec + " | (SLOW)\n");
logger.testEnd(test.identifier, "SKIP", null, "(SLOW)");
gURLs.shift();
} else {
break;
@ -1266,7 +1279,6 @@ function StartCurrentTest()
gRepeat--;
StartTests();
} else {
gDumpLog("REFTEST TEST-START | " + gURLs[0].prettyPath + "\n");
if (gURLs[0].chaosMode) {
gWindowUtils.enterChaosMode();
}
@ -1332,32 +1344,30 @@ function StartCurrentURI(aState)
} else if (ps.type == PREF_INTEGER) {
prefs.setIntPref(ps.name, value);
}
gDumpLog("SET PREFERENCE pref(" + ps.name + "," + value + ")\n");
logger.info("SET PREFERENCE pref(" + ps.name + "," + value + ")");
}
});
} catch (e) {
if (e == "bad pref") {
var test = gURLs[0];
if (test.expected == EXPECTED_FAIL) {
gDumpLog("REFTEST TEST-KNOWN-FAIL | " + test.url1.spec +
" | (SKIPPED; " + badPref + " not known or wrong type)\n");
logger.testEnd(test.identifier, "FAIL", "FAIL",
"(SKIPPED; " + badPref + " not known or wrong type)");
++gTestResults.Skip;
} else {
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | " + test.url1.spec +
" | " + badPref + " not known or wrong type\n");
logger.testEnd(test.identifier, "FAIL", "PASS",
badPref + " not known or wrong type");
++gTestResults.UnexpectedFail;
}
// skip the test that had a bad preference
gURLs.shift();
StartCurrentTest();
return;
} else {
throw e;
}
}
if (badPref != undefined) {
// skip the test that had a bad preference
gURLs.shift();
StartCurrentTest();
return;
}
}
if (prefSettings.length == 0 &&
@ -1370,9 +1380,11 @@ function StartCurrentURI(aState)
gContainingWindow.setTimeout(RecordResult, 0);
} else {
var currentTest = gTotalTests - gURLs.length;
gDumpLog("REFTEST TEST-LOAD | " + gCurrentURL + " | " + currentTest + " / " + gTotalTests +
" (" + Math.floor(100 * (currentTest / gTotalTests)) + "%)\n");
LogInfo("START " + gCurrentURL);
// Log this to preserve the same overall log format,
// should be removed if the format is updated
gDumpFn("REFTEST TEST-LOAD | " + gCurrentURL + " | " + currentTest + " / " + gTotalTests +
" (" + Math.floor(100 * (currentTest / gTotalTests)) + "%)\n");
TestBuffer("START " + gCurrentURL);
var type = gURLs[0].type
if (TYPE_SCRIPT == type) {
SendLoadScriptTest(gCurrentURL, gLoadTimeout);
@ -1384,37 +1396,10 @@ function StartCurrentURI(aState)
function DoneTests()
{
gDumpLog("REFTEST FINISHED: Slowest test took " + gSlowestTestTime +
"ms (" + gSlowestTestURL + ")\n");
logger.suiteEnd(extra={'results': gTestResults});
logger.info("Slowest test took " + gSlowestTestTime + "ms (" + gSlowestTestURL + ")");
logger.info("Total canvas count = " + gRecycledCanvases.length);
gDumpLog("REFTEST INFO | Result summary:\n");
var count = gTestResults.Pass + gTestResults.LoadOnly;
gDumpLog("REFTEST INFO | Successful: " + count + " (" +
gTestResults.Pass + " pass, " +
gTestResults.LoadOnly + " load only)\n");
count = gTestResults.Exception + gTestResults.FailedLoad +
gTestResults.UnexpectedFail + gTestResults.UnexpectedPass +
gTestResults.AssertionUnexpected +
gTestResults.AssertionUnexpectedFixed;
gDumpLog("REFTEST INFO | Unexpected: " + count + " (" +
gTestResults.UnexpectedFail + " unexpected fail, " +
gTestResults.UnexpectedPass + " unexpected pass, " +
gTestResults.AssertionUnexpected + " unexpected asserts, " +
gTestResults.AssertionUnexpectedFixed + " unexpected fixed asserts, " +
gTestResults.FailedLoad + " failed load, " +
gTestResults.Exception + " exception)\n");
count = gTestResults.KnownFail + gTestResults.AssertionKnown +
gTestResults.Random + gTestResults.Skip + gTestResults.Slow;
gDumpLog("REFTEST INFO | Known problems: " + count + " (" +
gTestResults.KnownFail + " known fail, " +
gTestResults.AssertionKnown + " known asserts, " +
gTestResults.Random + " random, " +
gTestResults.Skip + " skipped, " +
gTestResults.Slow + " slow)\n");
gDumpLog("REFTEST INFO | Total canvas count = " + gRecycledCanvases.length + "\n");
gDumpLog("REFTEST TEST-START | Shutdown\n");
function onStopped() {
let appStartup = CC["@mozilla.org/toolkit/app-startup;1"].getService(CI.nsIAppStartup);
appStartup.quit(CI.nsIAppStartup.eForceQuit);
@ -1462,7 +1447,7 @@ function DoDrawWindow(ctx, x, y, w, h)
// browser element
flags |= ctx.DRAWWINDOW_USE_WIDGET_LAYERS;
} else if (gBrowserIsRemote) {
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | " + gCurrentURL + " | can't drawWindow remote content\n");
logger.error(gCurrentURL + " | can't drawWindow remote content");
++gTestResults.Exception;
}
@ -1475,22 +1460,21 @@ function DoDrawWindow(ctx, x, y, w, h)
} else {
// Output a special warning because we need to be able to detect
// this whenever it happens.
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | WARNING: USE_WIDGET_LAYERS disabled\n");
logger.error("WARNING: USE_WIDGET_LAYERS disabled");
}
gDumpLog("REFTEST INFO | drawWindow flags = " + flagsStr +
"; window size = " + gContainingWindow.innerWidth + "," + gContainingWindow.innerHeight +
"; test browser size = " + testRect.width + "," + testRect.height +
"\n");
logger.info("drawWindow flags = " + flagsStr +
"; window size = " + gContainingWindow.innerWidth + "," + gContainingWindow.innerHeight +
"; test browser size = " + testRect.width + "," + testRect.height);
}
LogInfo("DoDrawWindow " + x + "," + y + "," + w + "," + h);
TestBuffer("DoDrawWindow " + x + "," + y + "," + w + "," + h);
ctx.drawWindow(gContainingWindow, x, y, w, h, "rgb(255,255,255)",
gDrawWindowFlags);
}
function InitCurrentCanvasWithSnapshot()
{
LogInfo("Initializing canvas snapshot");
TestBuffer("Initializing canvas snapshot");
if (gURLs[0].type == TYPE_LOAD || gURLs[0].type == TYPE_SCRIPT) {
// We don't want to snapshot this kind of test
@ -1508,7 +1492,7 @@ function InitCurrentCanvasWithSnapshot()
function UpdateCurrentCanvasForInvalidation(rects)
{
LogInfo("Updating canvas for invalidation");
TestBuffer("Updating canvas for invalidation");
if (!gCurrentCanvas) {
return;
@ -1538,7 +1522,7 @@ function UpdateCurrentCanvasForInvalidation(rects)
function UpdateWholeCurrentCanvasForInvalidation()
{
LogInfo("Updating entire canvas for invalidation");
TestBuffer("Updating entire canvas for invalidation");
if (!gCurrentCanvas) {
return;
@ -1550,7 +1534,7 @@ function UpdateWholeCurrentCanvasForInvalidation()
function RecordResult(testRunTime, errorMsg, scriptResults)
{
LogInfo("RecordResult fired");
TestBuffer("RecordResult fired");
// Keep track of which test was slowest, and how long it took.
if (testRunTime > gSlowestTestTime) {
@ -1560,26 +1544,26 @@ function RecordResult(testRunTime, errorMsg, scriptResults)
// Not 'const ...' because of 'EXPECTED_*' value dependency.
var outputs = {};
const randomMsg = "(EXPECTED RANDOM)";
outputs[EXPECTED_PASS] = {
true: {s: "TEST-PASS" , n: "Pass"},
false: {s: "TEST-UNEXPECTED-FAIL" , n: "UnexpectedFail"}
true: {s: ["PASS", "PASS"], n: "Pass"},
false: {s: ["FAIL", "PASS"], n: "UnexpectedFail"}
};
outputs[EXPECTED_FAIL] = {
true: {s: "TEST-UNEXPECTED-PASS" , n: "UnexpectedPass"},
false: {s: "TEST-KNOWN-FAIL" , n: "KnownFail"}
true: {s: ["PASS", "FAIL"], n: "UnexpectedPass"},
false: {s: ["FAIL", "FAIL"], n: "KnownFail"}
};
outputs[EXPECTED_RANDOM] = {
true: {s: "TEST-PASS" + randomMsg , n: "Random"},
false: {s: "TEST-KNOWN-FAIL" + randomMsg, n: "Random"}
true: {s: ["PASS", "PASS"], n: "Random"},
false: {s: ["FAIL", "FAIL"], n: "Random"}
};
outputs[EXPECTED_FUZZY] = outputs[EXPECTED_PASS];
var output;
var extra;
if (gURLs[0].type == TYPE_LOAD) {
++gTestResults.LoadOnly;
gDumpLog("REFTEST TEST-PASS | " + gURLs[0].prettyPath + " | (LOAD ONLY)\n");
logger.testEnd(gURLs[0].identifier, "PASS", "PASS", "(LOAD ONLY)");
gCurrentCanvas = null;
FinishTestItem();
return;
@ -1597,17 +1581,14 @@ function RecordResult(testRunTime, errorMsg, scriptResults)
if (!gURLs[0].allowSilentFail)
errorMsg = "No test results reported. (SCRIPT)\n";
else
gDumpLog("REFTEST INFO | An expected silent failure occurred \n");
logger.info("An expected silent failure occurred");
}
if (errorMsg) {
output = outputs[expected][false];
extra = { status_msg: output.n };
++gTestResults[output.n];
var result = "REFTEST " + output.s + " | " +
gURLs[0].prettyPath + " | " + // the URL being tested
errorMsg;
gDumpLog(result);
logger.testEnd(gURLs[0].identifier, output.s[0], output.s[1], errorMsg, null, extra);
FinishTestItem();
return;
}
@ -1627,16 +1608,15 @@ function RecordResult(testRunTime, errorMsg, scriptResults)
var index = 0;
scriptResults.forEach(function(result) {
var output = outputPair[result.passed];
var extra = { status_msg: output.n };
++gTestResults[output.n];
result = "REFTEST " + output.s + " | " +
gURLs[0].prettyPath + " | " + // the URL being tested
result.description + " item " + (++index) + "\n";
gDumpLog(result);
logger.testEnd(gURLs[0].identifier, output.s[0], output.s[1],
result.description + " item " + (++index), null, extra);
});
if (anyFailed && expected == EXPECTED_PASS) {
FlushTestLog();
FlushTestBuffer();
}
FinishTestItem();
@ -1648,7 +1628,7 @@ function RecordResult(testRunTime, errorMsg, scriptResults)
gCurrentCanvas = gURICanvases[gCurrentURL];
}
if (gCurrentCanvas == null) {
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | " + gCurrentURL + " | program error managing snapshots\n");
logger.error(gCurrentURL, "program error managing snapshots");
++gTestResults.Exception;
}
if (gState == 1) {
@ -1691,7 +1671,7 @@ function RecordResult(testRunTime, errorMsg, scriptResults)
throw "Inconsistent result from compareCanvases.";
}
equal = expected == EXPECTED_FUZZY;
gDumpLog("REFTEST fuzzy match\n");
logger.info("REFTEST fuzzy match");
}
var failedExtraCheck = gFailedNoPaint || gFailedOpaqueLayer || gFailedAssignedLayer;
@ -1700,6 +1680,7 @@ function RecordResult(testRunTime, errorMsg, scriptResults)
var test_passed = (equal == (gURLs[0].type == TYPE_REFTEST_EQUAL)) && !failedExtraCheck;
output = outputs[expected][test_passed];
extra = { status_msg: output.n };
++gTestResults[output.n];
@ -1718,39 +1699,21 @@ function RecordResult(testRunTime, errorMsg, scriptResults)
failures.push("failed reftest-assigned-layer: " + gFailedAssignedLayerMessages.join(", "));
}
var failureString = failures.join(", ");
if (expected == EXPECTED_FAIL) {
gDumpLog("REFTEST TEST-KNOWN-FAIL | " + gURLs[0].prettyPath + " | " + failureString + "\n");
} else {
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | " + gURLs[0].prettyPath + " | " + failureString + "\n");
}
logger.testEnd(gURLs[0].identifier, output.s[0], output.s[1], failureString, null, extra);
} else {
var result = "REFTEST " + output.s + " | " +
gURLs[0].prettyPath + " | "; // the URL being tested
switch (gURLs[0].type) {
case TYPE_REFTEST_NOTEQUAL:
result += "image comparison (!=)";
break;
case TYPE_REFTEST_EQUAL:
result += "image comparison (==)";
break;
}
if (!test_passed && expected == EXPECTED_PASS ||
!test_passed && expected == EXPECTED_FUZZY ||
test_passed && expected == EXPECTED_FAIL) {
if (!equal) {
result += ", max difference: " + maxDifference.value + ", number of differing pixels: " + differences + "\n";
result += "REFTEST IMAGE 1 (TEST): " + gCanvas1.toDataURL() + "\n";
result += "REFTEST IMAGE 2 (REFERENCE): " + gCanvas2.toDataURL() + "\n";
extra.max_difference = maxDifference.value;
extra.differences = differences;
extra.image1 = gCanvas1.toDataURL();
extra.image2 = gCanvas2.toDataURL();
} else {
result += "\n";
result += "REFTEST IMAGE: " + gCanvas1.toDataURL() + "\n";
extra.image1 = gCanvas1.toDataURL();
}
} else {
result += "\n";
}
gDumpLog(result);
logger.testEnd(gURLs[0].identifier, output.s[0], output.s[1], null, null, extra);
if (gURLs[0].prefSettings1.length == 0) {
UpdateCanvasCache(gURLs[0].url1, gCanvas1);
@ -1761,7 +1724,7 @@ function RecordResult(testRunTime, errorMsg, scriptResults)
}
if ((!test_passed && expected == EXPECTED_PASS) || (test_passed && expected == EXPECTED_FAIL)) {
FlushTestLog();
FlushTestBuffer();
}
CleanUpCrashDumpFiles();
@ -1778,11 +1741,10 @@ function LoadFailed(why)
// Once bug 896840 is fixed, this can go away, but for now it will give log
// output that is TBPL starable for bug 789751 and bug 720452.
if (!why) {
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | load failed with unknown reason\n");
logger.error("load failed with unknown reason");
}
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | " +
gURLs[0]["url" + gState].spec + " | load failed: " + why + "\n");
FlushTestLog();
logger.testEnd(gURLs[0]["url" + gState].spec, "FAIL", "PASS", "load failed: " + why);
FlushTestBuffer();
FinishTestItem();
}
@ -1819,11 +1781,9 @@ function FindUnexpectedCrashDumpFiles()
if (!foundCrashDumpFile) {
++gTestResults.UnexpectedFail;
foundCrashDumpFile = true;
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | " + gCurrentURL +
" | This test left crash dumps behind, but we weren't expecting it to!\n");
logger.testEnd(gCurrentURL, "FAIL", "PASS", "This test left crash dumps behind, but we weren't expecting it to!");
}
gDumpLog("REFTEST INFO | Found unexpected crash dump file " + path +
".\n");
logger.info("Found unexpected crash dump file " + path);
gUnexpectedCrashDumpFiles[path] = true;
}
}
@ -1840,7 +1800,7 @@ function FinishTestItem()
{
// Replace document with BLANK_URL_FOR_CLEARING in case there are
// assertions when unloading.
gDumpLog("REFTEST INFO | Loading a blank page\n");
logger.debug("Loading a blank page");
// After clearing, content will notify us of the assertion count
// and tests will continue.
SendClear();
@ -1875,26 +1835,25 @@ function DoAssertionCheck(numAsserts)
if (numAsserts < minAsserts) {
++gTestResults.AssertionUnexpectedFixed;
gDumpLog("REFTEST TEST-UNEXPECTED-PASS | " + gURLs[0].prettyPath +
" | assertion count " + numAsserts + " is less than " +
expectedAssertions + "\n");
gDumpFn("REFTEST TEST-UNEXPECTED-PASS | " + gURLs[0].prettyPath +
" | assertion count" + numAsserts + " is less than " +
expectedAssertions + "\n");
} else if (numAsserts > maxAsserts) {
++gTestResults.AssertionUnexpected;
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | " + gURLs[0].prettyPath +
" | assertion count " + numAsserts + " is more than " +
expectedAssertions + "\n");
gDumpFn("REFTEST TEST-UNEXPECTED-FAIL | " + gURLs[0].prettyPath +
" | assertion count " + numAsserts + " is more than " +
expectedAssertions + "\n");
} else if (numAsserts != 0) {
++gTestResults.AssertionKnown;
gDumpLog("REFTEST TEST-KNOWN-FAIL | " + gURLs[0].prettyPath +
" | assertion count " + numAsserts + " matches " +
expectedAssertions + "\n");
gDumpFn("REFTEST TEST-KNOWN-FAIL | " + gURLs[0].prettyPath +
"assertion count " + numAsserts + " matches " +
expectedAssertions + "\n");
}
}
if (gURLs[0].chaosMode) {
gWindowUtils.leaveChaosMode();
}
gDumpLog("REFTEST TEST-END | " + gURLs[0].prettyPath + "\n");
// And start the next test.
gURLs.shift();
@ -1923,7 +1882,7 @@ function RestoreChangedPreferences()
} else if (ps.type == PREF_INTEGER) {
prefs.setIntPref(ps.name, value);
}
gDumpLog("RESTORE PREFERENCE pref(" + ps.name + "," + value + ")\n");
logger.info("RESTORE PREFERENCE pref(" + ps.name + "," + value + ")");
});
gPrefsToRestore = [];
}
@ -2004,7 +1963,7 @@ function RecvContentReady()
function RecvException(what)
{
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | " + gCurrentURL + " | " + what + "\n");
logger.error(gCurrentURL + " | " + what);
++gTestResults.Exception;
}
@ -2036,13 +1995,13 @@ function RecvInitCanvasWithSnapshot()
function RecvLog(type, msg)
{
msg = "[CONTENT] "+ msg;
msg = "[CONTENT] " + msg;
if (type == "info") {
LogInfo(msg);
TestBuffer(msg);
} else if (type == "warning") {
LogWarning(msg);
logger.warning(msg);
} else {
gDumpLog("REFTEST TEST-UNEXPECTED-FAIL | " + gCurrentURL + " | unknown log type " + type + "\n");
logger.error("REFTEST TEST-UNEXPECTED-FAIL | " + gCurrentURL + " | unknown log type " + type + "\n");
++gTestResults.Exception;
}
}

View File

@ -1,8 +1,11 @@
import argparse
import os
import sys
from collections import OrderedDict
from urlparse import urlparse
import mozlog
here = os.path.abspath(os.path.dirname(__file__))
@ -207,6 +210,8 @@ class ReftestArgumentsParser(argparse.ArgumentParser):
nargs="*",
help="Path to test file, manifest file, or directory containing tests")
mozlog.commandline.add_logging_group(self)
def get_ip(self):
import moznetwork
if os.name != "nt":
@ -235,8 +240,6 @@ class ReftestArgumentsParser(argparse.ArgumentParser):
self.error("Failed to determine test suite; supply --suite to set this explicitly")
def validate(self, options, reftest):
import sys
if not options.tests:
# Can't just set this in the argument parser because mach will set a default
self.error("Must supply at least one path to a manifest file, test directory, or test file to run.")

View File

@ -8,19 +8,21 @@ import time
import tempfile
import traceback
# We need to know our current directory so that we can serve our test files from it.
SCRIPT_DIRECTORY = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
from runreftest import RefTest, ReftestResolver
from automation import Automation
import devicemanager
import droid
import mozinfo
import moznetwork
from automation import Automation
from remoteautomation import RemoteAutomation, fennecLogcatFilters
from output import OutputHandler
from runreftest import RefTest, ReftestResolver
import reftestcommandline
# We need to know our current directory so that we can serve our test files from it.
SCRIPT_DIRECTORY = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
class RemoteReftestResolver(ReftestResolver):
def absManifestPath(self, path):
script_abs_path = os.path.join(SCRIPT_DIRECTORY, path)
@ -146,6 +148,13 @@ class RemoteReftest(RefTest):
self.automation.deleteANRs()
self.automation.deleteTombstones()
self._populate_logger(options)
outputHandler = OutputHandler(self.log, options.utilityPath, options.symbolsPath)
# RemoteAutomation.py's 'messageLogger' is also used by mochitest. Mimic a mochitest
# MessageLogger object to re-use this code path.
outputHandler.write = outputHandler.__call__
self.automation._processArgs['messageLogger'] = outputHandler
def findPath(self, paths, filename = None):
for path in paths:
p = path
@ -445,4 +454,3 @@ def main():
if __name__ == "__main__":
sys.exit(main())

View File

@ -26,26 +26,23 @@ import mozcrash
import mozdebug
import mozinfo
import mozleak
import mozlog
import mozprocess
import mozprofile
import mozrunner
from mozrunner.utils import get_stack_fixer_function, test_environment
from mozscreenshot import printstatus, dump_screen
from output import OutputHandler, ReftestFormatter
import reftestcommandline
# set up logging handler a la automation.py.in for compatability
import logging
log = logging.getLogger()
here = os.path.abspath(os.path.dirname(__file__))
def resetGlobalLog():
while log.handlers:
log.removeHandler(log.handlers[0])
handler = logging.StreamHandler(sys.stdout)
log.setLevel(logging.INFO)
log.addHandler(handler)
resetGlobalLog()
try:
from mozbuild.base import MozbuildObject
build_obj = MozbuildObject.from_environment(cwd=here)
except ImportError:
build_obj = None
def categoriesToRegex(categoryList):
@ -203,6 +200,21 @@ class RefTest(object):
self.lastTestSeen = 'reftest'
self.haveDumpedScreen = False
self.resolver = self.resolver_cls()
self.log = None
def _populate_logger(self, options):
if self.log:
return
mozlog.commandline.log_formatters["tbpl"] = (ReftestFormatter,
"Reftest specific formatter for the"
"benefit of legacy log parsers and"
"tools such as the reftest analyzer")
fmt_options = {}
if not options.log_tbpl_level and os.environ.get('MOZ_REFTEST_VERBOSE'):
options.log_tbpl_level = fmt_options['level'] = 'debug'
self.log = mozlog.commandline.setup_logging(
"reftest harness", options, {"tbpl": sys.stdout}, fmt_options)
def update_mozinfo(self):
"""walk up directories to find mozinfo.json update the info"""
@ -255,6 +267,7 @@ class RefTest(object):
if options.runUntilFailure:
prefs['reftest.runUntilFailure'] = True
prefs['reftest.focusFilterMode'] = options.focusFilterMode
prefs['reftest.logLevel'] = options.log_tbpl_level or 'info'
prefs['reftest.manifests'] = json.dumps(manifests)
# Ensure that telemetry is disabled, so we don't connect to the telemetry
@ -331,7 +344,7 @@ class RefTest(object):
return profile
def environment(self, **kwargs):
kwargs['log'] = log
kwargs['log'] = self.log
return test_environment(**kwargs)
def buildBrowserEnv(self, options, profileDir):
@ -360,11 +373,11 @@ class RefTest(object):
def killNamedOrphans(self, pname):
""" Kill orphan processes matching the given command name """
log.info("Checking for orphan %s processes..." % pname)
self.log.info("Checking for orphan %s processes..." % pname)
def _psInfo(line):
if pname in line:
log.info(line)
self.log.info(line)
process = mozprocess.ProcessHandler(['ps', '-f'],
processOutputLine=_psInfo)
process.run()
@ -375,13 +388,13 @@ class RefTest(object):
if len(parts) == 3 and parts[0].isdigit():
pid = int(parts[0])
if parts[2] == pname and parts[1] == '1':
log.info("killing %s orphan with pid %d" % (pname, pid))
self.log.info("killing %s orphan with pid %d" % (pname, pid))
try:
os.kill(
pid, getattr(signal, "SIGKILL", signal.SIGTERM))
except Exception as e:
log.info("Failed to kill process %d: %s" %
(pid, str(e)))
self.log.info("Failed to kill process %d: %s" %
(pid, str(e)))
process = mozprocess.ProcessHandler(['ps', '-o', 'pid,ppid,comm'],
processOutputLine=_psKill)
process.run()
@ -392,6 +405,8 @@ class RefTest(object):
shutil.rmtree(profileDir, True)
def runTests(self, tests, options, cmdlineArgs=None):
self._populate_logger(options)
# Despite our efforts to clean up servers started by this script, in practice
# we still see infrequent cases where a process is orphaned and interferes
# with future tests, typically because the old server is keeping the port in use.
@ -484,17 +499,16 @@ class RefTest(object):
"""handle process output timeout"""
# TODO: bug 913975 : _processOutput should call self.processOutputLine
# one more time one timeout (I think)
log.error("TEST-UNEXPECTED-FAIL | %s | application timed out after %d seconds with no output" %
(self.lastTestSeen, int(timeout)))
self.log.error("%s | application timed out after %d seconds with no output" % (self.lastTestSeen, int(timeout)))
self.killAndGetStack(
proc, utilityPath, debuggerInfo, dump_screen=not debuggerInfo)
def dumpScreen(self, utilityPath):
if self.haveDumpedScreen:
log.info("Not taking screenshot here: see the one that was previously logged")
self.log.info("Not taking screenshot here: see the one that was previously logged")
return
self.haveDumpedScreen = True
dump_screen(utilityPath, log)
dump_screen(utilityPath, self.log)
def killAndGetStack(self, process, utilityPath, debuggerInfo, dump_screen=False):
"""
@ -522,75 +536,11 @@ class RefTest(object):
process.kill(sig=signal.SIGABRT)
except OSError:
# https://bugzilla.mozilla.org/show_bug.cgi?id=921509
log.info("Can't trigger Breakpad, process no longer exists")
self.log.info("Can't trigger Breakpad, process no longer exists")
return
log.info("Can't trigger Breakpad, just killing process")
self.log.info("Can't trigger Breakpad, just killing process")
process.kill()
# output processing
class OutputHandler(object):
"""line output handler for mozrunner"""
def __init__(self, harness, utilityPath, symbolsPath=None, dump_screen_on_timeout=True):
"""
harness -- harness instance
dump_screen_on_timeout -- whether to dump the screen on timeout
"""
self.harness = harness
self.utilityPath = utilityPath
self.symbolsPath = symbolsPath
self.dump_screen_on_timeout = dump_screen_on_timeout
self.stack_fixer_function = self.stack_fixer()
def processOutputLine(self, line):
"""per line handler of output for mozprocess"""
for handler in self.output_handlers():
line = handler(line)
__call__ = processOutputLine
def output_handlers(self):
"""returns ordered list of output handlers"""
return [self.fix_stack,
self.format,
self.record_last_test,
self.handle_timeout_and_dump_screen,
self.log,
]
def stack_fixer(self):
"""
return get_stack_fixer_function, if any, to use on the output lines
"""
return get_stack_fixer_function(self.utilityPath, self.symbolsPath)
# output line handlers:
# these take a line and return a line
def fix_stack(self, line):
if self.stack_fixer_function:
return self.stack_fixer_function(line)
return line
def format(self, line):
"""format the line"""
return line.rstrip().decode("UTF-8", "ignore")
def record_last_test(self, line):
"""record last test on harness"""
if "TEST-START" in line and "|" in line:
self.harness.lastTestSeen = line.split("|")[1].strip()
return line
def handle_timeout_and_dump_screen(self, line):
if self.dump_screen_on_timeout and "TEST-UNEXPECTED-FAIL" in line and "Test timed out" in line:
self.harness.dumpScreen(self.utilityPath)
return line
def log(self, line):
log.info(line)
return line
def runApp(self, profile, binary, cmdargs, env,
timeout=None, debuggerInfo=None,
symbolsPath=None, options=None,
@ -606,10 +556,14 @@ class RefTest(object):
interactive = debuggerInfo.interactive
debug_args = [debuggerInfo.path] + debuggerInfo.args
outputHandler = self.OutputHandler(harness=self,
utilityPath=options.utilityPath,
symbolsPath=symbolsPath,
dump_screen_on_timeout=not debuggerInfo)
def record_last_test(message):
"""Records the last test seen by this harness for the benefit of crash logging."""
if message['action'] == 'test_start':
self.lastTestSeen = message['test']
self.log.add_handler(record_last_test)
outputHandler = OutputHandler(self.log, options.utilityPath, symbolsPath=symbolsPath)
kp_kwargs = {
'kill_on_timeout': False,
@ -639,21 +593,18 @@ class RefTest(object):
interactive=interactive,
outputTimeout=timeout)
proc = runner.process_handler
status = runner.wait()
runner.process_handler = None
if timeout is None:
didTimeout = False
else:
didTimeout = proc.didTimeout
if status:
log.info("TEST-UNEXPECTED-FAIL | %s | application terminated with exit code %s",
self.lastTestSeen, status)
self.log.error("TEST-UNEXPECTED-FAIL | %s | application terminated with exit code %s" % (self.lastTestSeen, status))
else:
self.lastTestSeen = 'Main app process exited normally'
crashed = mozcrash.check_for_crashes(os.path.join(profile.profile, "minidumps"),
symbolsPath, test_name=self.lastTestSeen)
crashed = mozcrash.log_crashes(self.log, os.path.join(profile.profile, 'minidumps'),
symbolsPath, test=self.lastTestSeen)
runner.cleanup()
if not status and crashed:
status = 1
@ -667,7 +618,7 @@ class RefTest(object):
profileDir = None
try:
if cmdlineArgs == None:
if cmdlineArgs is None:
cmdlineArgs = []
profile = self.createReftestProfile(options, manifests)
profileDir = profile.profile # name makes more sense
@ -675,7 +626,6 @@ class RefTest(object):
# browser environment
browserEnv = self.buildBrowserEnv(options, profileDir)
log.info("REFTEST INFO | runreftest.py | Running tests: start.\n")
status = self.runApp(profile,
binary=options.app,
cmdargs=cmdlineArgs,
@ -688,11 +638,9 @@ class RefTest(object):
debuggerInfo=debuggerInfo)
mozleak.process_leak_log(self.leakLogFile,
leak_thresholds=options.leakThresholds,
log=log,
stack_fixer=get_stack_fixer_function(options.utilityPath,
options.symbolsPath),
)
log.info("\nREFTEST INFO | runreftest.py | Running tests: end.")
finally:
self.cleanup(profileDir)
return status
@ -712,31 +660,28 @@ class RefTest(object):
dest = os.path.join(profileDir, os.path.basename(abspath))
shutil.copytree(abspath, dest)
else:
log.warning(
"WARNING | runreftest.py | Failed to copy %s to profile", abspath)
self.log.warning(
"runreftest.py | Failed to copy %s to profile" % abspath)
continue
def run(**kwargs):
parser = reftestcommandline.DesktopArgumentsParser()
# Mach gives us kwargs; this is a way to turn them back into an
# options object
parser = reftestcommandline.DesktopArgumentsParser()
reftest = RefTest()
parser.set_defaults(**kwargs)
options = parser.parse_args(kwargs["tests"])
if 'tests' in kwargs:
options = parser.parse_args(kwargs["tests"])
else:
options = parser.parse_args()
reftest = RefTest()
parser.validate(options, reftest)
return reftest.runTests(options.tests, options)
def main():
parser = reftestcommandline.DesktopArgumentsParser()
reftest = RefTest()
options = parser.parse_args()
parser.validate(options, reftest)
sys.exit(reftest.runTests(options.tests, options))
if __name__ == "__main__":
main()
sys.exit(run())

View File

@ -16,6 +16,7 @@ if here not in sys.path:
from automation import Automation
from b2gautomation import B2GRemoteAutomation
from runreftestmulet import run_test_harness as run_mulet_reftests
from output import OutputHandler
from remotereftest import RemoteReftestResolver, ReftestServer
from runreftest import RefTest
import reftestcommandline
@ -311,6 +312,8 @@ class B2GRemoteReftest(RefTest):
timeout=None, debuggerInfo=None,
symbolsPath=None, options=None,
valgrindPath=None, valgrindArgs=None, valgrindSuppFiles=None):
outputHandler = OutputHandler(self.log, options.utilityPath, options.symbolsPath)
status = self.automation.runApp(None, env,
binary,
profile.profile,
@ -319,7 +322,8 @@ class B2GRemoteReftest(RefTest):
xrePath=options.xrePath,
debuggerInfo=debuggerInfo,
symbolsPath=symbolsPath,
timeout=timeout)
timeout=timeout,
outputHandler=outputHandler)
return status
@ -380,7 +384,6 @@ def run_remote_reftests(parser, options):
auto.setProduct("b2g")
auto.test_script = os.path.join(here, 'b2g_start_script.js')
auto.test_script_args = [options.remoteWebServer, options.httpPort]
auto.logFinish = "REFTEST TEST-START | Shutdown"
reftest = B2GRemoteReftest(auto, dm, options, here)
parser.validate(options, reftest)

View File

@ -20,11 +20,10 @@ import mozinfo
import mozlog
from runreftest import RefTest
from output import OutputHandler
import reftestcommandline
log = mozlog.unstructured.getLogger('REFTEST')
class MuletReftest(RefTest):
build_type = "mulet"
marionette = None
@ -59,7 +58,11 @@ class MuletReftest(RefTest):
self.profile = self.create_profile(options, manifests,
profile_to_clone=options.profile)
env = self.buildBrowserEnv(options, self.profile.profile)
kp_kwargs = { 'processOutputLine': [self._on_output],
self._populate_logger(options)
outputHandler = OutputHandler(self.log, options.utilityPath, symbolsPath=options.symbolsPath)
kp_kwargs = { 'processOutputLine': [outputHandler],
'onTimeout': [self._on_timeout],
'kill_on_timeout': False }
@ -71,7 +74,7 @@ class MuletReftest(RefTest):
options.timeout = 300
self.timeout = options.timeout + 30.0
log.info("%s | Running tests: start.", os.path.basename(__file__))
self.log.info("%s | Running tests: start." % os.path.basename(__file__))
cmd, args = self.build_command_line(options.app,
ignore_window_size=options.ignoreWindowSize,
browser_arg=options.browser_arg)
@ -86,9 +89,9 @@ class MuletReftest(RefTest):
status = 0
try:
self.runner.start(outputTimeout=self.timeout)
log.info("%s | Application pid: %d",
self.log.info("%s | Application pid: %d" % (
os.path.basename(__file__),
self.runner.process_handler.pid)
self.runner.process_handler.pid))
# kick starts the reftest harness
self.run_marionette_script()
@ -98,13 +101,13 @@ class MuletReftest(RefTest):
self.runner.cleanup()
if status > 0:
log.testFail("%s | application terminated with exit code %s",
self.last_test, status)
self.log.testFail("%s | application terminated with exit code %s" % (
self.last_test, status))
elif status < 0:
log.info("%s | application killed with signal %s",
self.last_test, -status)
self.log.info("%s | application killed with signal %s" % (
self.last_test, -status))
log.info("%s | Running tests: end.", os.path.basename(__file__))
self.log.info("%s | Running tests: end." % os.path.basename(__file__))
return status
def create_profile(self, options, manifests, profile_to_clone=None):
@ -159,22 +162,13 @@ class MuletReftest(RefTest):
args += ['-chrome', 'chrome://b2g/content/shell.html']
return cmd, args
def _on_output(self, line):
sys.stdout.write("%s\n" % line)
sys.stdout.flush()
# TODO use structured logging
if "TEST-START" in line and "|" in line:
self.last_test = line.split("|")[1].strip()
def _on_timeout(self):
msg = "%s | application timed out after %s seconds with no output"
log.testFail(msg % (self.last_test, self.timeout))
self.log.testFail(msg % (self.last_test, self.timeout))
# kill process to get a stack
self.runner.stop(sig=signal.SIGABRT)
def _unlockScreen(self):
self.marionette.set_context(self.marionette.CONTEXT_CONTENT)
self.marionette.import_script(os.path.abspath(
@ -183,10 +177,11 @@ class MuletReftest(RefTest):
self.marionette.execute_async_script('GaiaLockScreen.unlock()')
def _wait_for_homescreen(self, timeout):
log.info("Waiting for home screen to load")
self.log.info("Waiting for home screen to load")
Wait(self.marionette, timeout).until(expected.element_present(
By.CSS_SELECTOR, '#homescreen[loading-state=false]'))
def run_test_harness(parser, options):
marionette_args = {}
if options.marionette:

View File

@ -87,17 +87,35 @@ StructuredLogger.prototype = {
this._logData("test_end", data);
},
suiteStart: function (tests, runinfo=null) {
suiteStart: function (tests, runinfo=null, versioninfo=null, deviceinfo=null, extra=null) {
var data = {tests: tests};
if (runinfo !== null) {
data.runinfo = runinfo;
}
if (versioninfo !== null) {
data.versioninfo = versioninfo;
}
if (deviceinfo !== null) {
data.deviceinfo = deviceinfo;
}
if (extra !== null) {
data.extra = extra;
}
this._logData("suite_start", data);
},
suiteEnd: function () {
this._logData("suite_end");
suiteEnd: function (extra=null) {
var data = {};
if (extra !== null) {
data.extra = extra;
}
this._logData("suite_end", data);
},
@ -142,6 +160,14 @@ StructuredLogger.prototype = {
this.log("CRITICAL", message, extra);
},
processOutput: function(thread, message) {
this._logData('process_output', {
message: message,
thread: thread,
});
},
_logData: function (action, data={}) {
var allData = {
action: action,

View File

@ -10,9 +10,15 @@ import sys
import mozinfo
import mozrunner.utils
def _raw_log():
import logging
return logging.getLogger(__name__)
def _get_default_logger():
from mozlog import get_default_logger
log = get_default_logger(component='mozleak')
if not log:
import logging
log = logging.getLogger(__name__)
return log
# Do not add anything to this list, unless one of the existing leaks below
@ -130,7 +136,7 @@ def process_single_leak_file(leakLogFileName, processType, leakThreshold,
r"\s*-?\d+\s+(?P<numLeaked>-?\d+)")
# The class name can contain spaces. We remove trailing whitespace later.
log = log or _raw_log()
log = log or _get_default_logger()
processString = "%s process:" % processType
expectedLeaks = expectedTabProcessLeakCounts() if processType == 'tab' else {}
@ -276,7 +282,7 @@ def process_leak_log(leak_log_file, leak_thresholds=None,
in the list ignore_missing_leaks.
"""
log = log or _raw_log()
log = log or _get_default_logger()
leakLogFile = leak_log_file
if not os.path.exists(leakLogFile):

View File

@ -5,6 +5,7 @@
from .base import BaseFormatter
from .process import strstatus
class TbplFormatter(BaseFormatter):
"""Formatter that formats logs in the legacy formatting format used by TBPL
This is intended to be used to preserve backward compatibility with existing tools
@ -102,12 +103,12 @@ class TbplFormatter(BaseFormatter):
def test_end(self, data):
test_id = self.test_id(data["test"])
time_msg = ""
duration_msg = ""
if test_id in self.test_start_times:
start_time = self.test_start_times.pop(test_id)
time = data["time"] - start_time
time_msg = "took %ims" % time
duration_msg = "took %ims" % time
if "expected" in data:
message = data.get("message", "")
@ -119,18 +120,20 @@ class TbplFormatter(BaseFormatter):
message = message[:-1]
failure_line = "TEST-UNEXPECTED-%s | %s | %s\n" % (
data["status"], test_id, message)
data["status"], self.id_str(test_id), message)
if data["expected"] not in ("PASS", "OK"):
expected_msg = "expected %s | " % data["expected"]
else:
expected_msg = ""
info_line = "TEST-INFO %s%s\n" % (expected_msg, time_msg)
info_line = "TEST-INFO %s%s\n" % (expected_msg, duration_msg)
return failure_line + info_line
return "TEST-%s | %s | %s\n" % (
data["status"], test_id, time_msg)
sections = ["TEST-%s" % data['status'], self.id_str(test_id)]
if duration_msg:
sections.append(duration_msg)
return ' | '.join(sections) + '\n'
def suite_end(self, data):
start_time = self.suite_start_time

View File

@ -132,9 +132,9 @@ class TestId(DataType):
def convert(self, data):
if isinstance(data, unicode):
return data
elif isinstance(data, str):
elif isinstance(data, bytes):
return data.decode("utf-8", "replace")
elif isinstance(data, tuple):
elif isinstance(data, (tuple, list)):
# This is really a bit of a hack; should really split out convertors from the
# fields they operate on
func = Unicode(None).convert

View File

@ -238,7 +238,8 @@ class StructuredLogger(object):
@log_action(List("tests", Unicode),
Dict("run_info", default=None, optional=True),
Dict("version_info", default=None, optional=True),
Dict("device_info", default=None, optional=True))
Dict("device_info", default=None, optional=True),
Dict("extra", default=None, optional=True))
def suite_start(self, data):
"""Log a suite_start message
@ -252,7 +253,7 @@ class StructuredLogger(object):
self._log_data("suite_start", data)
@log_action()
@log_action(Dict("extra", default=None, optional=True))
def suite_end(self, data):
"""Log a suite_end message"""
if not self._ensure_suite_state('suite_end', data):