2009-09-05 18:08:49 -07:00
|
|
|
#
|
2012-05-21 04:12:37 -07:00
|
|
|
# This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
2009-09-05 18:08:49 -07:00
|
|
|
|
2011-11-14 20:33:21 -08:00
|
|
|
from __future__ import with_statement
|
2014-07-23 21:23:00 -07:00
|
|
|
import logging
|
2013-11-20 12:53:08 -08:00
|
|
|
from operator import itemgetter
|
2014-07-23 21:23:00 -07:00
|
|
|
import os
|
|
|
|
import platform
|
|
|
|
import re
|
2014-03-26 03:14:51 -07:00
|
|
|
import signal
|
2014-07-23 21:23:00 -07:00
|
|
|
import subprocess
|
|
|
|
import sys
|
|
|
|
import tempfile
|
2014-08-11 04:46:46 -07:00
|
|
|
import mozinfo
|
2013-09-23 07:47:48 -07:00
|
|
|
|
2009-07-22 12:24:54 -07:00
|
|
|
__all__ = [
|
2009-09-05 18:08:49 -07:00
|
|
|
"dumpLeakLog",
|
|
|
|
"processLeakLog",
|
2013-09-23 07:47:48 -07:00
|
|
|
'systemMemory',
|
2013-10-05 15:28:34 -07:00
|
|
|
'environment',
|
|
|
|
'dumpScreen',
|
2014-08-29 07:37:17 -07:00
|
|
|
"ShutdownLeaks",
|
|
|
|
"setAutomationLog",
|
2009-09-05 18:08:49 -07:00
|
|
|
]
|
2009-07-22 12:24:54 -07:00
|
|
|
|
2014-08-29 07:37:17 -07:00
|
|
|
log = logging.getLogger()
|
|
|
|
def resetGlobalLog():
|
|
|
|
while log.handlers:
|
|
|
|
log.removeHandler(log.handlers[0])
|
|
|
|
handler = logging.StreamHandler(sys.stdout)
|
|
|
|
log.setLevel(logging.INFO)
|
|
|
|
log.addHandler(handler)
|
|
|
|
resetGlobalLog()
|
|
|
|
|
|
|
|
def setAutomationLog(alt_logger):
|
|
|
|
global log
|
|
|
|
log = alt_logger
|
|
|
|
|
2014-03-26 03:14:51 -07:00
|
|
|
# Python does not provide strsignal() even in the very latest 3.x.
|
|
|
|
# This is a reasonable fake.
|
|
|
|
def strsig(n):
|
|
|
|
# Signal numbers run 0 through NSIG-1; an array with NSIG members
|
|
|
|
# has exactly that many slots
|
|
|
|
_sigtbl = [None]*signal.NSIG
|
|
|
|
for k in dir(signal):
|
|
|
|
if k.startswith("SIG") and not k.startswith("SIG_") and k != "SIGCLD" and k != "SIGPOLL":
|
|
|
|
_sigtbl[getattr(signal, k)] = k
|
|
|
|
# Realtime signals mostly have no names
|
|
|
|
if hasattr(signal, "SIGRTMIN") and hasattr(signal, "SIGRTMAX"):
|
|
|
|
for r in range(signal.SIGRTMIN+1, signal.SIGRTMAX+1):
|
|
|
|
_sigtbl[r] = "SIGRTMIN+" + str(r - signal.SIGRTMIN)
|
|
|
|
# Fill in any remaining gaps
|
|
|
|
for i in range(signal.NSIG):
|
|
|
|
if _sigtbl[i] is None:
|
|
|
|
_sigtbl[i] = "unrecognized signal, number " + str(i)
|
|
|
|
if n < 0 or n >= signal.NSIG:
|
|
|
|
return "out-of-range signal, number "+str(n)
|
|
|
|
return _sigtbl[n]
|
|
|
|
|
|
|
|
def printstatus(status, name = ""):
|
|
|
|
# 'status' is the exit status
|
|
|
|
if os.name != 'posix':
|
|
|
|
# Windows error codes are easier to look up if printed in hexadecimal
|
|
|
|
if status < 0:
|
|
|
|
status += 2**32
|
|
|
|
print "TEST-INFO | %s: exit status %x\n" % (name, status)
|
|
|
|
elif os.WIFEXITED(status):
|
|
|
|
print "TEST-INFO | %s: exit %d\n" % (name, os.WEXITSTATUS(status))
|
|
|
|
elif os.WIFSIGNALED(status):
|
|
|
|
# The python stdlib doesn't appear to have strsignal(), alas
|
|
|
|
print "TEST-INFO | {}: killed by {}".format(name,strsig(os.WTERMSIG(status)))
|
|
|
|
else:
|
|
|
|
# This is probably a can't-happen condition on Unix, but let's be defensive
|
|
|
|
print "TEST-INFO | %s: undecodable exit status %04x\n" % (name, status)
|
|
|
|
|
2009-09-05 18:08:49 -07:00
|
|
|
def dumpLeakLog(leakLogFile, filter = False):
|
|
|
|
"""Process the leak log, without parsing it.
|
|
|
|
|
|
|
|
Use this function if you want the raw log only.
|
|
|
|
Use it preferably with the |XPCOM_MEM_LEAK_LOG| environment variable.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# Don't warn (nor "info") if the log file is not there.
|
|
|
|
if not os.path.exists(leakLogFile):
|
|
|
|
return
|
|
|
|
|
2013-03-18 10:18:06 -07:00
|
|
|
with open(leakLogFile, "r") as leaks:
|
|
|
|
leakReport = leaks.read()
|
2009-09-05 18:08:49 -07:00
|
|
|
|
|
|
|
# Only |XPCOM_MEM_LEAK_LOG| reports can be actually filtered out.
|
|
|
|
# Only check whether an actual leak was reported.
|
|
|
|
if filter and not "0 TOTAL " in leakReport:
|
|
|
|
return
|
|
|
|
|
|
|
|
# Simply copy the log.
|
|
|
|
log.info(leakReport.rstrip("\n"))
|
|
|
|
|
2014-09-30 14:17:27 -07:00
|
|
|
def processSingleLeakFile(leakLogFileName, processType, leakThreshold, ignoreMissingLeaks):
|
2013-03-18 10:18:08 -07:00
|
|
|
"""Process a single leak log.
|
2009-09-05 18:08:49 -07:00
|
|
|
"""
|
|
|
|
|
2015-04-22 11:06:50 -07:00
|
|
|
# | |Per-Inst Leaked| Total Rem|
|
|
|
|
# 0 |TOTAL | 17 192| 419115886 2|
|
|
|
|
# 833 |nsTimerImpl | 60 120| 24726 2|
|
|
|
|
# 930 |Foo<Bar, Bar> | 32 8| 100 1|
|
|
|
|
lineRe = re.compile(r"^\s*\d+ \|"
|
|
|
|
r"(?P<name>[^|]+)\|"
|
|
|
|
r"\s*(?P<size>-?\d+)\s+(?P<bytesLeaked>-?\d+)\s*\|"
|
|
|
|
r"\s*-?\d+\s+(?P<numLeaked>-?\d+)")
|
|
|
|
# The class name can contain spaces. We remove trailing whitespace later.
|
2009-09-05 18:08:49 -07:00
|
|
|
|
2014-09-25 13:01:33 -07:00
|
|
|
processString = "%s process:" % processType
|
2013-03-18 10:18:07 -07:00
|
|
|
crashedOnPurpose = False
|
|
|
|
totalBytesLeaked = None
|
2014-09-16 19:32:13 -07:00
|
|
|
logAsWarning = False
|
2013-03-18 10:18:07 -07:00
|
|
|
leakAnalysis = []
|
2014-09-22 11:07:46 -07:00
|
|
|
leakedObjectAnalysis = []
|
2013-03-18 10:18:07 -07:00
|
|
|
leakedObjectNames = []
|
2014-09-22 11:07:46 -07:00
|
|
|
recordLeakedObjects = False
|
2013-03-18 10:18:06 -07:00
|
|
|
with open(leakLogFileName, "r") as leaks:
|
|
|
|
for line in leaks:
|
|
|
|
if line.find("purposefully crash") > -1:
|
|
|
|
crashedOnPurpose = True
|
|
|
|
matches = lineRe.match(line)
|
|
|
|
if not matches:
|
2013-03-18 10:18:07 -07:00
|
|
|
# eg: the leak table header row
|
|
|
|
log.info(line.rstrip())
|
2013-03-18 10:18:06 -07:00
|
|
|
continue
|
2015-04-22 11:06:50 -07:00
|
|
|
name = matches.group("name").rstrip()
|
2013-03-18 10:18:06 -07:00
|
|
|
size = int(matches.group("size"))
|
|
|
|
bytesLeaked = int(matches.group("bytesLeaked"))
|
|
|
|
numLeaked = int(matches.group("numLeaked"))
|
2013-03-18 10:18:07 -07:00
|
|
|
# Output the raw line from the leak log table if it is the TOTAL row,
|
|
|
|
# or is for an object row that has been leaked.
|
|
|
|
if numLeaked != 0 or name == "TOTAL":
|
|
|
|
log.info(line.rstrip())
|
|
|
|
# Analyse the leak log, but output later or it will interrupt the leak table
|
2013-03-18 10:18:06 -07:00
|
|
|
if name == "TOTAL":
|
2014-09-22 11:07:46 -07:00
|
|
|
# Multiple default processes can end up writing their bloat views into a single
|
|
|
|
# log, particularly on B2G. Eventually, these should be split into multiple
|
|
|
|
# logs (bug 1068869), but for now, we report the largest leak.
|
|
|
|
if totalBytesLeaked != None:
|
2014-09-25 13:01:33 -07:00
|
|
|
leakAnalysis.append("WARNING | leakcheck | %s multiple BloatView byte totals found"
|
2014-09-22 11:07:46 -07:00
|
|
|
% processString)
|
|
|
|
else:
|
|
|
|
totalBytesLeaked = 0
|
|
|
|
if bytesLeaked > totalBytesLeaked:
|
|
|
|
totalBytesLeaked = bytesLeaked
|
|
|
|
# Throw out the information we had about the previous bloat view.
|
|
|
|
leakedObjectNames = []
|
|
|
|
leakedObjectAnalysis = []
|
|
|
|
recordLeakedObjects = True
|
|
|
|
else:
|
|
|
|
recordLeakedObjects = False
|
2013-03-18 10:18:06 -07:00
|
|
|
if size < 0 or bytesLeaked < 0 or numLeaked < 0:
|
2014-09-25 13:01:33 -07:00
|
|
|
leakAnalysis.append("TEST-UNEXPECTED-FAIL | leakcheck | %s negative leaks caught!"
|
2013-03-18 10:18:07 -07:00
|
|
|
% processString)
|
2014-09-16 19:32:13 -07:00
|
|
|
logAsWarning = True
|
2013-03-18 10:18:06 -07:00
|
|
|
continue
|
2014-09-22 11:07:46 -07:00
|
|
|
if name != "TOTAL" and numLeaked != 0 and recordLeakedObjects:
|
2013-03-18 10:18:06 -07:00
|
|
|
leakedObjectNames.append(name)
|
2014-09-25 13:01:33 -07:00
|
|
|
leakedObjectAnalysis.append("TEST-INFO | leakcheck | %s leaked %d %s (%s bytes)"
|
2014-09-22 11:07:46 -07:00
|
|
|
% (processString, numLeaked, name, bytesLeaked))
|
|
|
|
|
|
|
|
leakAnalysis.extend(leakedObjectAnalysis)
|
2014-09-16 19:32:13 -07:00
|
|
|
if logAsWarning:
|
|
|
|
log.warning('\n'.join(leakAnalysis))
|
|
|
|
else:
|
|
|
|
log.info('\n'.join(leakAnalysis))
|
|
|
|
|
|
|
|
logAsWarning = False
|
2013-03-18 10:18:05 -07:00
|
|
|
|
2013-03-18 10:18:04 -07:00
|
|
|
if totalBytesLeaked is None:
|
|
|
|
# We didn't see a line with name 'TOTAL'
|
2009-12-09 10:51:38 -08:00
|
|
|
if crashedOnPurpose:
|
2014-09-25 13:01:33 -07:00
|
|
|
log.info("TEST-INFO | leakcheck | %s deliberate crash and thus no leak log"
|
2013-03-18 10:18:08 -07:00
|
|
|
% processString)
|
2014-09-30 14:17:27 -07:00
|
|
|
elif ignoreMissingLeaks:
|
|
|
|
log.info("TEST-INFO | leakcheck | %s ignoring missing output line for total leaks"
|
|
|
|
% processString)
|
2009-12-09 10:51:38 -08:00
|
|
|
else:
|
2014-09-30 14:17:27 -07:00
|
|
|
log.info("TEST-UNEXPECTED-FAIL | leakcheck | %s missing output line for total leaks!"
|
2013-03-18 10:18:08 -07:00
|
|
|
% processString)
|
2014-09-30 14:17:27 -07:00
|
|
|
log.info("TEST-INFO | leakcheck | missing output line from log file %s"
|
|
|
|
% leakLogFileName)
|
2013-03-18 10:18:07 -07:00
|
|
|
return
|
|
|
|
|
|
|
|
if totalBytesLeaked == 0:
|
2014-09-25 13:01:33 -07:00
|
|
|
log.info("TEST-PASS | leakcheck | %s no leaks detected!" % processString)
|
2013-03-18 10:18:07 -07:00
|
|
|
return
|
|
|
|
|
2013-03-18 10:18:08 -07:00
|
|
|
# totalBytesLeaked was seen and is non-zero.
|
2013-03-18 10:18:07 -07:00
|
|
|
if totalBytesLeaked > leakThreshold:
|
2014-09-30 09:54:25 -07:00
|
|
|
logAsWarning = True
|
|
|
|
# Fail the run if we're over the threshold (which defaults to 0)
|
|
|
|
prefix = "TEST-UNEXPECTED-FAIL"
|
2013-03-18 10:18:04 -07:00
|
|
|
else:
|
2013-03-18 10:18:07 -07:00
|
|
|
prefix = "WARNING"
|
|
|
|
# Create a comma delimited string of the first N leaked objects found,
|
2013-03-18 10:18:08 -07:00
|
|
|
# to aid with bug summary matching in TBPL. Note: The order of the objects
|
|
|
|
# had no significance (they're sorted alphabetically).
|
2013-03-18 10:18:07 -07:00
|
|
|
maxSummaryObjects = 5
|
|
|
|
leakedObjectSummary = ', '.join(leakedObjectNames[:maxSummaryObjects])
|
|
|
|
if len(leakedObjectNames) > maxSummaryObjects:
|
|
|
|
leakedObjectSummary += ', ...'
|
2014-09-16 19:32:13 -07:00
|
|
|
|
|
|
|
if logAsWarning:
|
2014-09-25 13:01:33 -07:00
|
|
|
log.warning("%s | leakcheck | %s %d bytes leaked (%s)"
|
2014-09-16 19:32:13 -07:00
|
|
|
% (prefix, processString, totalBytesLeaked, leakedObjectSummary))
|
|
|
|
else:
|
2014-09-25 13:01:33 -07:00
|
|
|
log.info("%s | leakcheck | %s %d bytes leaked (%s)"
|
2014-09-16 19:32:13 -07:00
|
|
|
% (prefix, processString, totalBytesLeaked, leakedObjectSummary))
|
2009-11-10 11:16:12 -08:00
|
|
|
|
2014-10-06 14:23:17 -07:00
|
|
|
def processLeakLog(leakLogFile, options):
|
2009-11-10 11:16:12 -08:00
|
|
|
"""Process the leak log, including separate leak logs created
|
|
|
|
by child processes.
|
|
|
|
|
|
|
|
Use this function if you want an additional PASS/FAIL summary.
|
|
|
|
It must be used with the |XPCOM_MEM_BLOAT_LOG| environment variable.
|
2014-09-19 15:00:43 -07:00
|
|
|
|
|
|
|
The base of leakLogFile for a non-default process needs to end with
|
|
|
|
_proctype_pid12345.log
|
|
|
|
"proctype" is a string denoting the type of the process, which should
|
|
|
|
be the result of calling XRE_ChildProcessTypeToString(). 12345 is
|
|
|
|
a series of digits that is the pid for the process. The .log is
|
|
|
|
optional.
|
|
|
|
|
|
|
|
All other file names are treated as being for default processes.
|
2014-09-30 09:54:25 -07:00
|
|
|
|
2014-10-06 14:23:17 -07:00
|
|
|
The options argument is checked for two optional attributes,
|
|
|
|
leakThresholds and ignoreMissingLeaks.
|
|
|
|
|
2014-09-30 09:54:25 -07:00
|
|
|
leakThresholds should be a dict mapping process types to leak thresholds,
|
|
|
|
in bytes. If a process type is not present in the dict the threshold
|
|
|
|
will be 0.
|
2014-10-06 14:23:17 -07:00
|
|
|
|
|
|
|
ignoreMissingLeaks should be a list of process types. If a process
|
|
|
|
creates a leak log without a TOTAL, then we report an error if it isn't
|
|
|
|
in the list ignoreMissingLeaks.
|
2009-11-10 11:16:12 -08:00
|
|
|
"""
|
|
|
|
|
|
|
|
if not os.path.exists(leakLogFile):
|
2013-03-18 10:18:03 -07:00
|
|
|
log.info("WARNING | leakcheck | refcount logging is off, so leaks can't be detected!")
|
2009-11-10 11:16:12 -08:00
|
|
|
return
|
|
|
|
|
2014-10-06 14:23:17 -07:00
|
|
|
leakThresholds = getattr(options, 'leakThresholds', {})
|
|
|
|
ignoreMissingLeaks = getattr(options, 'ignoreMissingLeaks', [])
|
|
|
|
|
2014-09-30 09:54:25 -07:00
|
|
|
# This list is based on kGeckoProcessTypeString. ipdlunittest processes likely
|
|
|
|
# are not going to produce leak logs we will ever see.
|
|
|
|
knownProcessTypes = ["default", "plugin", "tab", "geckomediaplugin"]
|
|
|
|
|
|
|
|
for processType in knownProcessTypes:
|
|
|
|
log.info("TEST-INFO | leakcheck | %s process: leak threshold set at %d bytes"
|
|
|
|
% (processType, leakThresholds.get(processType, 0)))
|
|
|
|
|
|
|
|
for processType in leakThresholds:
|
|
|
|
if not processType in knownProcessTypes:
|
|
|
|
log.info("TEST-UNEXPECTED-FAIL | leakcheck | Unknown process type %s in leakThresholds"
|
|
|
|
% processType)
|
2013-03-18 10:18:06 -07:00
|
|
|
|
2009-11-10 11:16:12 -08:00
|
|
|
(leakLogFileDir, leakFileBase) = os.path.split(leakLogFile)
|
|
|
|
if leakFileBase[-4:] == ".log":
|
|
|
|
leakFileBase = leakFileBase[:-4]
|
2014-09-19 15:00:43 -07:00
|
|
|
fileNameRegExp = re.compile(r"_([a-z]*)_pid\d*.log$")
|
2014-09-19 15:00:43 -07:00
|
|
|
else:
|
|
|
|
fileNameRegExp = re.compile(r"_([a-z]*)_pid\d*$")
|
2009-11-10 11:16:12 -08:00
|
|
|
|
|
|
|
for fileName in os.listdir(leakLogFileDir):
|
|
|
|
if fileName.find(leakFileBase) != -1:
|
|
|
|
thisFile = os.path.join(leakLogFileDir, fileName)
|
2013-03-18 10:18:08 -07:00
|
|
|
m = fileNameRegExp.search(fileName)
|
2009-11-10 11:16:12 -08:00
|
|
|
if m:
|
|
|
|
processType = m.group(1)
|
2014-09-19 15:00:43 -07:00
|
|
|
else:
|
|
|
|
processType = "default"
|
2014-09-30 09:54:25 -07:00
|
|
|
if not processType in knownProcessTypes:
|
|
|
|
log.info("TEST-UNEXPECTED-FAIL | leakcheck | Leak log with unknown process type %s"
|
|
|
|
% processType)
|
|
|
|
leakThreshold = leakThresholds.get(processType, 0)
|
2014-09-30 14:17:27 -07:00
|
|
|
processSingleLeakFile(thisFile, processType, leakThreshold,
|
|
|
|
processType in ignoreMissingLeaks)
|
2010-03-12 14:57:29 -08:00
|
|
|
|
2013-09-23 07:47:48 -07:00
|
|
|
def systemMemory():
|
|
|
|
"""
|
|
|
|
Returns total system memory in kilobytes.
|
|
|
|
Works only on unix-like platforms where `free` is in the path.
|
|
|
|
"""
|
|
|
|
return int(os.popen("free").readlines()[1].split()[1])
|
|
|
|
|
2014-06-20 09:08:30 -07:00
|
|
|
def environment(xrePath, env=None, crashreporter=True, debugger=False, dmdPath=None, lsanPath=None):
|
2013-09-23 07:47:48 -07:00
|
|
|
"""populate OS environment variables for mochitest"""
|
|
|
|
|
|
|
|
env = os.environ.copy() if env is None else env
|
|
|
|
|
|
|
|
assert os.path.isabs(xrePath)
|
|
|
|
|
2014-09-29 11:51:25 -07:00
|
|
|
if mozinfo.isMac:
|
|
|
|
ldLibraryPath = os.path.join(os.path.dirname(xrePath), "MacOS")
|
|
|
|
else:
|
|
|
|
ldLibraryPath = xrePath
|
2013-09-23 07:47:48 -07:00
|
|
|
|
|
|
|
envVar = None
|
2013-11-13 11:47:41 -08:00
|
|
|
dmdLibrary = None
|
|
|
|
preloadEnvVar = None
|
2014-08-05 15:15:38 -07:00
|
|
|
if 'toolkit' in mozinfo.info and mozinfo.info['toolkit'] == "gonk":
|
2014-08-05 14:11:53 -07:00
|
|
|
# Skip all of this, it's only valid for the host.
|
|
|
|
pass
|
|
|
|
elif mozinfo.isUnix:
|
2013-09-23 07:47:48 -07:00
|
|
|
envVar = "LD_LIBRARY_PATH"
|
|
|
|
env['MOZILLA_FIVE_HOME'] = xrePath
|
2013-11-13 11:47:41 -08:00
|
|
|
dmdLibrary = "libdmd.so"
|
|
|
|
preloadEnvVar = "LD_PRELOAD"
|
2013-09-23 07:47:48 -07:00
|
|
|
elif mozinfo.isMac:
|
|
|
|
envVar = "DYLD_LIBRARY_PATH"
|
2013-11-13 11:47:41 -08:00
|
|
|
dmdLibrary = "libdmd.dylib"
|
|
|
|
preloadEnvVar = "DYLD_INSERT_LIBRARIES"
|
2013-09-23 07:47:48 -07:00
|
|
|
elif mozinfo.isWin:
|
|
|
|
envVar = "PATH"
|
2013-11-13 11:47:41 -08:00
|
|
|
dmdLibrary = "dmd.dll"
|
|
|
|
preloadEnvVar = "MOZ_REPLACE_MALLOC_LIB"
|
2013-09-23 07:47:48 -07:00
|
|
|
if envVar:
|
|
|
|
envValue = ((env.get(envVar), str(ldLibraryPath))
|
|
|
|
if mozinfo.isWin
|
2013-11-13 11:47:41 -08:00
|
|
|
else (ldLibraryPath, dmdPath, env.get(envVar)))
|
2013-09-23 07:47:48 -07:00
|
|
|
env[envVar] = os.path.pathsep.join([path for path in envValue if path])
|
|
|
|
|
2013-11-13 11:47:41 -08:00
|
|
|
if dmdPath and dmdLibrary and preloadEnvVar:
|
|
|
|
env[preloadEnvVar] = os.path.join(dmdPath, dmdLibrary)
|
|
|
|
|
2013-09-23 07:47:48 -07:00
|
|
|
# crashreporter
|
|
|
|
env['GNOME_DISABLE_CRASH_DIALOG'] = '1'
|
|
|
|
env['XRE_NO_WINDOWS_CRASH_DIALOG'] = '1'
|
|
|
|
|
2013-09-16 11:44:25 -07:00
|
|
|
if crashreporter and not debugger:
|
2013-09-23 07:47:48 -07:00
|
|
|
env['MOZ_CRASHREPORTER_NO_REPORT'] = '1'
|
|
|
|
env['MOZ_CRASHREPORTER'] = '1'
|
|
|
|
else:
|
|
|
|
env['MOZ_CRASHREPORTER_DISABLE'] = '1'
|
|
|
|
|
2014-09-22 06:48:00 -07:00
|
|
|
# Crash on non-local network connections by default.
|
|
|
|
# MOZ_DISABLE_NONLOCAL_CONNECTIONS can be set to "0" to temporarily
|
|
|
|
# enable non-local connections for the purposes of local testing. Don't
|
|
|
|
# override the user's choice here. See bug 1049688.
|
|
|
|
env.setdefault('MOZ_DISABLE_NONLOCAL_CONNECTIONS', '1')
|
2014-06-17 07:49:57 -07:00
|
|
|
|
2013-12-19 08:42:19 -08:00
|
|
|
# Set WebRTC logging in case it is not set yet
|
2014-11-19 16:16:29 -08:00
|
|
|
env.setdefault('NSPR_LOG_MODULES', 'signaling:5,mtransport:5,datachannel:5,jsep:5,MediaPipelineFactory:5')
|
2014-03-04 10:51:37 -08:00
|
|
|
env.setdefault('R_LOG_LEVEL', '6')
|
2013-12-19 08:42:19 -08:00
|
|
|
env.setdefault('R_LOG_DESTINATION', 'stderr')
|
|
|
|
env.setdefault('R_LOG_VERBOSE', '1')
|
2013-09-23 07:47:48 -07:00
|
|
|
|
|
|
|
# ASan specific environment stuff
|
|
|
|
asan = bool(mozinfo.info.get("asan"))
|
|
|
|
if asan and (mozinfo.isLinux or mozinfo.isMac):
|
|
|
|
try:
|
2013-10-21 11:11:07 -07:00
|
|
|
# Symbolizer support
|
|
|
|
llvmsym = os.path.join(xrePath, "llvm-symbolizer")
|
|
|
|
if os.path.isfile(llvmsym):
|
|
|
|
env["ASAN_SYMBOLIZER_PATH"] = llvmsym
|
2014-08-29 07:37:17 -07:00
|
|
|
log.info("INFO | runtests.py | ASan using symbolizer at %s" % llvmsym)
|
2014-06-14 16:43:46 -07:00
|
|
|
else:
|
2014-08-29 07:37:17 -07:00
|
|
|
log.info("TEST-UNEXPECTED-FAIL | runtests.py | Failed to find ASan symbolizer at %s" % llvmsym)
|
2013-10-21 11:11:07 -07:00
|
|
|
|
2013-09-23 07:47:48 -07:00
|
|
|
totalMemory = systemMemory()
|
|
|
|
|
2013-11-13 16:44:41 -08:00
|
|
|
# Only 4 GB RAM or less available? Use custom ASan options to reduce
|
2013-09-23 07:47:48 -07:00
|
|
|
# the amount of resources required to do the tests. Standard options
|
|
|
|
# will otherwise lead to OOM conditions on the current test slaves.
|
|
|
|
message = "INFO | runtests.py | ASan running in %s configuration"
|
2014-06-20 09:08:30 -07:00
|
|
|
asanOptions = []
|
2013-11-13 16:44:41 -08:00
|
|
|
if totalMemory <= 1024 * 1024 * 4:
|
2013-09-23 07:47:48 -07:00
|
|
|
message = message % 'low-memory'
|
2014-06-20 09:08:30 -07:00
|
|
|
asanOptions = ['quarantine_size=50331648', 'malloc_context_size=5']
|
2013-09-23 07:47:48 -07:00
|
|
|
else:
|
|
|
|
message = message % 'default memory'
|
2014-06-20 09:08:30 -07:00
|
|
|
|
|
|
|
if lsanPath:
|
|
|
|
log.info("LSan enabled.")
|
|
|
|
asanOptions.append('detect_leaks=1')
|
|
|
|
lsanOptions = ["exitcode=0"]
|
|
|
|
suppressionsFile = os.path.join(lsanPath, 'lsan_suppressions.txt')
|
|
|
|
if os.path.exists(suppressionsFile):
|
|
|
|
log.info("LSan using suppression file " + suppressionsFile)
|
|
|
|
lsanOptions.append("suppressions=" + suppressionsFile)
|
|
|
|
else:
|
|
|
|
log.info("WARNING | runtests.py | LSan suppressions file does not exist! " + suppressionsFile)
|
|
|
|
env["LSAN_OPTIONS"] = ':'.join(lsanOptions)
|
|
|
|
# Run shutdown GCs and CCs to avoid spurious leaks.
|
|
|
|
env['MOZ_CC_RUN_DURING_SHUTDOWN'] = '1'
|
|
|
|
|
|
|
|
if len(asanOptions):
|
|
|
|
env['ASAN_OPTIONS'] = ':'.join(asanOptions)
|
|
|
|
|
2013-09-23 07:47:48 -07:00
|
|
|
except OSError,err:
|
2014-08-29 07:37:17 -07:00
|
|
|
log.info("Failed determine available memory, disabling ASan low-memory configuration: %s" % err.strerror)
|
2013-09-23 07:47:48 -07:00
|
|
|
except:
|
|
|
|
log.info("Failed determine available memory, disabling ASan low-memory configuration")
|
|
|
|
else:
|
|
|
|
log.info(message)
|
|
|
|
|
2015-04-24 08:42:54 -07:00
|
|
|
tsan = bool(mozinfo.info.get("tsan"))
|
|
|
|
if tsan and mozinfo.isLinux:
|
|
|
|
# Symbolizer support.
|
|
|
|
llvmsym = os.path.join(xrePath, "llvm-symbolizer")
|
|
|
|
if os.path.isfile(llvmsym):
|
|
|
|
env["TSAN_OPTIONS"] = "external_symbolizer_path=%s" % llvmsym
|
|
|
|
log.info("INFO | runtests.py | TSan using symbolizer at %s" % llvmsym)
|
|
|
|
else:
|
|
|
|
log.info("TEST-UNEXPECTED-FAIL | runtests.py | Failed to find TSan symbolizer at %s" % llvmsym)
|
|
|
|
|
2013-09-23 07:47:48 -07:00
|
|
|
return env
|
2013-10-05 15:28:34 -07:00
|
|
|
|
|
|
|
def dumpScreen(utilityPath):
|
2014-01-17 09:04:02 -08:00
|
|
|
"""dumps a screenshot of the entire screen to a directory specified by
|
|
|
|
the MOZ_UPLOAD_DIR environment variable"""
|
2013-10-05 15:28:34 -07:00
|
|
|
|
2014-01-17 09:04:02 -08:00
|
|
|
# Need to figure out which OS-dependent tool to use
|
2013-10-05 15:28:34 -07:00
|
|
|
if mozinfo.isUnix:
|
|
|
|
utility = [os.path.join(utilityPath, "screentopng")]
|
2014-03-26 03:14:51 -07:00
|
|
|
utilityname = "screentopng"
|
2013-10-05 15:28:34 -07:00
|
|
|
elif mozinfo.isMac:
|
|
|
|
utility = ['/usr/sbin/screencapture', '-C', '-x', '-t', 'png']
|
2014-03-26 03:14:51 -07:00
|
|
|
utilityname = "screencapture"
|
2013-10-05 15:28:34 -07:00
|
|
|
elif mozinfo.isWin:
|
|
|
|
utility = [os.path.join(utilityPath, "screenshot.exe")]
|
2014-03-26 03:14:51 -07:00
|
|
|
utilityname = "screenshot"
|
2014-01-17 09:04:02 -08:00
|
|
|
|
|
|
|
# Get dir where to write the screenshot file
|
|
|
|
parent_dir = os.environ.get('MOZ_UPLOAD_DIR', None)
|
|
|
|
if not parent_dir:
|
|
|
|
log.info('Failed to retrieve MOZ_UPLOAD_DIR env var')
|
|
|
|
return
|
|
|
|
|
|
|
|
# Run the capture
|
2013-10-05 15:28:34 -07:00
|
|
|
try:
|
2014-02-05 06:34:23 -08:00
|
|
|
tmpfd, imgfilename = tempfile.mkstemp(prefix='mozilla-test-fail-screenshot_', suffix='.png', dir=parent_dir)
|
|
|
|
os.close(tmpfd)
|
|
|
|
returncode = subprocess.call(utility + [imgfilename])
|
2014-03-26 03:14:51 -07:00
|
|
|
printstatus(returncode, utilityname)
|
2013-10-05 15:28:34 -07:00
|
|
|
except OSError, err:
|
2014-08-29 07:37:17 -07:00
|
|
|
log.info("Failed to start %s for screenshot: %s" %
|
2013-10-05 15:28:34 -07:00
|
|
|
utility[0], err.strerror)
|
|
|
|
return
|
|
|
|
|
2013-11-20 12:53:08 -08:00
|
|
|
class ShutdownLeaks(object):
|
|
|
|
"""
|
|
|
|
Parses the mochitest run log when running a debug build, assigns all leaked
|
|
|
|
DOM windows (that are still around after test suite shutdown, despite running
|
|
|
|
the GC) to the tests that created them and prints leak statistics.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, logger):
|
|
|
|
self.logger = logger
|
|
|
|
self.tests = []
|
|
|
|
self.leakedWindows = {}
|
|
|
|
self.leakedDocShells = set()
|
|
|
|
self.currentTest = None
|
2015-02-24 13:00:54 -08:00
|
|
|
self.seenShutdown = set()
|
2013-11-20 12:53:08 -08:00
|
|
|
|
2014-07-17 00:02:00 -07:00
|
|
|
def log(self, message):
|
|
|
|
if message['action'] == 'log':
|
|
|
|
line = message['message']
|
|
|
|
if line[2:11] == "DOMWINDOW":
|
|
|
|
self._logWindow(line)
|
|
|
|
elif line[2:10] == "DOCSHELL":
|
|
|
|
self._logDocShell(line)
|
2015-02-24 13:00:54 -08:00
|
|
|
elif line.startswith("Completed ShutdownLeaks collections in process"):
|
|
|
|
pid = int(line.split()[-1])
|
|
|
|
self.seenShutdown.add(pid)
|
2014-07-17 00:02:00 -07:00
|
|
|
elif message['action'] == 'test_start':
|
|
|
|
fileName = message['test'].replace("chrome://mochitests/content/browser/", "")
|
2013-11-20 12:53:08 -08:00
|
|
|
self.currentTest = {"fileName": fileName, "windows": set(), "docShells": set()}
|
2014-07-17 00:02:00 -07:00
|
|
|
elif message['action'] == 'test_end':
|
2013-11-20 12:53:08 -08:00
|
|
|
# don't track a test if no windows or docShells leaked
|
|
|
|
if self.currentTest and (self.currentTest["windows"] or self.currentTest["docShells"]):
|
|
|
|
self.tests.append(self.currentTest)
|
|
|
|
self.currentTest = None
|
|
|
|
|
|
|
|
def process(self):
|
2014-08-12 12:23:29 -07:00
|
|
|
if not self.seenShutdown:
|
2014-09-16 19:32:13 -07:00
|
|
|
self.logger.warning("TEST-UNEXPECTED-FAIL | ShutdownLeaks | process() called before end of test suite")
|
2014-08-12 12:23:29 -07:00
|
|
|
|
2013-11-27 05:52:02 -08:00
|
|
|
for test in self._parseLeakingTests():
|
2013-11-20 12:53:08 -08:00
|
|
|
for url, count in self._zipLeakedWindows(test["leakedWindows"]):
|
2014-09-16 19:32:13 -07:00
|
|
|
self.logger.warning("TEST-UNEXPECTED-FAIL | %s | leaked %d window(s) until shutdown [url = %s]" % (test["fileName"], count, url))
|
2013-11-20 12:53:08 -08:00
|
|
|
|
2015-01-12 16:09:06 -08:00
|
|
|
if test["leakedWindowsString"]:
|
|
|
|
self.logger.info("TEST-INFO | %s | windows(s) leaked: %s" % (test["fileName"], test["leakedWindowsString"]))
|
|
|
|
|
2013-11-20 12:53:08 -08:00
|
|
|
if test["leakedDocShells"]:
|
2014-09-16 19:32:13 -07:00
|
|
|
self.logger.warning("TEST-UNEXPECTED-FAIL | %s | leaked %d docShell(s) until shutdown" % (test["fileName"], len(test["leakedDocShells"])))
|
2015-01-12 16:09:06 -08:00
|
|
|
self.logger.info("TEST-INFO | %s | docShell(s) leaked: %s" % (test["fileName"],
|
|
|
|
', '.join(["[pid = %s] [id = %s]" % x for x in test["leakedDocShells"]])))
|
2013-11-20 12:53:08 -08:00
|
|
|
|
|
|
|
def _logWindow(self, line):
|
|
|
|
created = line[:2] == "++"
|
|
|
|
pid = self._parseValue(line, "pid")
|
|
|
|
serial = self._parseValue(line, "serial")
|
|
|
|
|
|
|
|
# log line has invalid format
|
|
|
|
if not pid or not serial:
|
2014-09-16 19:32:13 -07:00
|
|
|
self.logger.warning("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>" % line)
|
2013-11-20 12:53:08 -08:00
|
|
|
return
|
|
|
|
|
2015-01-12 16:09:06 -08:00
|
|
|
key = (pid, serial)
|
2013-11-20 12:53:08 -08:00
|
|
|
|
|
|
|
if self.currentTest:
|
|
|
|
windows = self.currentTest["windows"]
|
|
|
|
if created:
|
|
|
|
windows.add(key)
|
|
|
|
else:
|
|
|
|
windows.discard(key)
|
2015-02-24 13:00:54 -08:00
|
|
|
elif int(pid) in self.seenShutdown and not created:
|
2013-11-20 12:53:08 -08:00
|
|
|
self.leakedWindows[key] = self._parseValue(line, "url")
|
|
|
|
|
|
|
|
def _logDocShell(self, line):
|
|
|
|
created = line[:2] == "++"
|
|
|
|
pid = self._parseValue(line, "pid")
|
|
|
|
id = self._parseValue(line, "id")
|
|
|
|
|
|
|
|
# log line has invalid format
|
|
|
|
if not pid or not id:
|
2014-09-16 19:32:13 -07:00
|
|
|
self.logger.warning("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>" % line)
|
2013-11-20 12:53:08 -08:00
|
|
|
return
|
|
|
|
|
2015-01-12 16:09:06 -08:00
|
|
|
key = (pid, id)
|
2013-11-20 12:53:08 -08:00
|
|
|
|
|
|
|
if self.currentTest:
|
|
|
|
docShells = self.currentTest["docShells"]
|
|
|
|
if created:
|
|
|
|
docShells.add(key)
|
|
|
|
else:
|
|
|
|
docShells.discard(key)
|
2015-02-24 13:00:54 -08:00
|
|
|
elif int(pid) in self.seenShutdown and not created:
|
2013-11-20 12:53:08 -08:00
|
|
|
self.leakedDocShells.add(key)
|
|
|
|
|
|
|
|
def _parseValue(self, line, name):
|
|
|
|
match = re.search("\[%s = (.+?)\]" % name, line)
|
|
|
|
if match:
|
|
|
|
return match.group(1)
|
|
|
|
return None
|
|
|
|
|
|
|
|
def _parseLeakingTests(self):
|
|
|
|
leakingTests = []
|
|
|
|
|
|
|
|
for test in self.tests:
|
2015-01-12 16:09:06 -08:00
|
|
|
leakedWindows = [id for id in test["windows"] if id in self.leakedWindows]
|
|
|
|
test["leakedWindows"] = [self.leakedWindows[id] for id in leakedWindows]
|
|
|
|
test["leakedWindowsString"] = ', '.join(["[pid = %s] [serial = %s]" % x for x in leakedWindows])
|
2013-11-20 12:53:08 -08:00
|
|
|
test["leakedDocShells"] = [id for id in test["docShells"] if id in self.leakedDocShells]
|
|
|
|
test["leakCount"] = len(test["leakedWindows"]) + len(test["leakedDocShells"])
|
|
|
|
|
|
|
|
if test["leakCount"]:
|
|
|
|
leakingTests.append(test)
|
|
|
|
|
|
|
|
return sorted(leakingTests, key=itemgetter("leakCount"), reverse=True)
|
|
|
|
|
|
|
|
def _zipLeakedWindows(self, leakedWindows):
|
|
|
|
counts = []
|
|
|
|
counted = set()
|
|
|
|
|
|
|
|
for url in leakedWindows:
|
|
|
|
if not url in counted:
|
|
|
|
counts.append((url, leakedWindows.count(url)))
|
|
|
|
counted.add(url)
|
|
|
|
|
|
|
|
return sorted(counts, key=itemgetter(1), reverse=True)
|
2014-06-20 09:08:30 -07:00
|
|
|
|
|
|
|
|
|
|
|
class LSANLeaks(object):
|
|
|
|
"""
|
|
|
|
Parses the log when running an LSAN build, looking for interesting stack frames
|
|
|
|
in allocation stacks, and prints out reports.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, logger):
|
|
|
|
self.logger = logger
|
|
|
|
self.inReport = False
|
|
|
|
self.foundFrames = set([])
|
|
|
|
self.recordMoreFrames = None
|
|
|
|
self.currStack = None
|
|
|
|
self.maxNumRecordedFrames = 4
|
|
|
|
|
|
|
|
# Don't various allocation-related stack frames, as they do not help much to
|
|
|
|
# distinguish different leaks.
|
|
|
|
unescapedSkipList = [
|
2015-03-26 02:25:16 -07:00
|
|
|
"malloc", "js_malloc", "malloc_", "__interceptor_malloc", "moz_xmalloc",
|
|
|
|
"calloc", "js_calloc", "calloc_", "__interceptor_calloc", "moz_xcalloc",
|
|
|
|
"realloc","js_realloc", "realloc_", "__interceptor_realloc", "moz_xrealloc",
|
2014-06-20 09:08:30 -07:00
|
|
|
"new",
|
|
|
|
"js::MallocProvider",
|
|
|
|
]
|
|
|
|
self.skipListRegExp = re.compile("^" + "|".join([re.escape(f) for f in unescapedSkipList]) + "$")
|
|
|
|
|
|
|
|
self.startRegExp = re.compile("==\d+==ERROR: LeakSanitizer: detected memory leaks")
|
|
|
|
self.stackFrameRegExp = re.compile(" #\d+ 0x[0-9a-f]+ in ([^(</]+)")
|
|
|
|
self.sysLibStackFrameRegExp = re.compile(" #\d+ 0x[0-9a-f]+ \(([^+]+)\+0x[0-9a-f]+\)")
|
|
|
|
|
|
|
|
|
|
|
|
def log(self, line):
|
|
|
|
if re.match(self.startRegExp, line):
|
|
|
|
self.inReport = True
|
|
|
|
return
|
|
|
|
|
|
|
|
if not self.inReport:
|
|
|
|
return
|
|
|
|
|
|
|
|
if line.startswith("Direct leak"):
|
|
|
|
self._finishStack()
|
|
|
|
self.recordMoreFrames = True
|
|
|
|
self.currStack = []
|
|
|
|
return
|
|
|
|
|
|
|
|
if line.startswith("Indirect leak"):
|
|
|
|
self._finishStack()
|
|
|
|
# Only report direct leaks, in the hope that they are less flaky.
|
|
|
|
self.recordMoreFrames = False
|
|
|
|
return
|
|
|
|
|
|
|
|
if line.startswith("SUMMARY: AddressSanitizer"):
|
|
|
|
self._finishStack()
|
|
|
|
self.inReport = False
|
|
|
|
return
|
|
|
|
|
|
|
|
if not self.recordMoreFrames:
|
|
|
|
return
|
|
|
|
|
|
|
|
stackFrame = re.match(self.stackFrameRegExp, line)
|
|
|
|
if stackFrame:
|
|
|
|
# Split the frame to remove any return types.
|
|
|
|
frame = stackFrame.group(1).split()[-1]
|
|
|
|
if not re.match(self.skipListRegExp, frame):
|
|
|
|
self._recordFrame(frame)
|
|
|
|
return
|
|
|
|
|
|
|
|
sysLibStackFrame = re.match(self.sysLibStackFrameRegExp, line)
|
|
|
|
if sysLibStackFrame:
|
|
|
|
# System library stack frames will never match the skip list,
|
|
|
|
# so don't bother checking if they do.
|
|
|
|
self._recordFrame(sysLibStackFrame.group(1))
|
|
|
|
|
|
|
|
# If we don't match either of these, just ignore the frame.
|
|
|
|
# We'll end up with "unknown stack" if everything is ignored.
|
|
|
|
|
|
|
|
def process(self):
|
|
|
|
for f in self.foundFrames:
|
2014-09-16 19:32:13 -07:00
|
|
|
self.logger.warning("TEST-UNEXPECTED-FAIL | LeakSanitizer | leak at " + f)
|
2014-06-20 09:08:30 -07:00
|
|
|
|
|
|
|
def _finishStack(self):
|
|
|
|
if self.recordMoreFrames and len(self.currStack) == 0:
|
|
|
|
self.currStack = ["unknown stack"]
|
|
|
|
if self.currStack:
|
|
|
|
self.foundFrames.add(", ".join(self.currStack))
|
|
|
|
self.currStack = None
|
|
|
|
self.recordMoreFrames = False
|
|
|
|
self.numRecordedFrames = 0
|
|
|
|
|
|
|
|
def _recordFrame(self, frame):
|
|
|
|
self.currStack.append(frame)
|
|
|
|
self.numRecordedFrames += 1
|
|
|
|
if self.numRecordedFrames >= self.maxNumRecordedFrames:
|
|
|
|
self.recordMoreFrames = False
|