You've already forked linux-packaging-mono
acceptance-tests
data
debian
docs
external
Newtonsoft.Json
api-doc-tools
api-snapshot
aspnetwebstack
bdwgc
binary-reference-assemblies
bockbuild
boringssl
cecil
cecil-legacy
corefx
corert
helix-binaries
ikdasm
ikvm
illinker-test-assets
linker
llvm-project
clang
clang-tools-extra
compiler-rt
eng
libcxx
libcxxabi
libunwind
lld
lldb
llvm
bindings
cmake
docs
examples
include
lib
projects
resources
runtimes
scripts
test
tools
unittests
utils
FileCheck
KillTheDoctor
LLVMVisualizers
Misc
PerfectShuffle
TableGen
Target
bugpoint
count
crosstool
docker
emacs
fpcmp
gdb-scripts
git
git-svn
jedit
kate
lint
lit
llvm-build
llvm-lit
not
release
sanitizers
testgen
textmate
unittest
valgrind
vim
vscode
yaml-bench
DSAclean.py
DSAextract.py
GenLibDeps.pl
GetRepositoryPath
GetSourceVersion
LLVMBuild.txt
UpdateCMakeLists.pl
abtest.py
bisect
bisect-skip-count
check-each-file
clang-parse-diagnostics-file
codegen-diff
countloc.sh
create_ladder_graph.py
extract_symbols.py
findmisopt
findoptdiff
findsym.pl
getsrcs.sh
lldbDataFormatters.py
llvm-compilers-check
llvm-gisel-cov.py
llvm-native-gxx
llvm.grm
llvmdo
llvmgrep
makellvm
prepare-code-coverage-artifact.py
schedcover.py
shuffle_fuzz.py
shuffle_select_fuzz_tester.py
sort_includes.py
update_llc_test_checks.py
update_mir_test_checks.py
update_test_checks.py
wciia.py
.arcconfig
.clang-format
.clang-tidy
.gitattributes
.gitignore
CMakeLists.txt
CODE_OWNERS.TXT
CREDITS.TXT
LICENSE.TXT
LLVMBuild.txt
README.txt
RELEASE_TESTERS.TXT
configure
llvm.spec.in
version.txt.in
nuget
openmp
polly
Directory.Build.props
Directory.Build.targets
NuGet.config
azure-pipelines.yml
build.cmd
build.sh
dir.common.props
global.json
llvm.proj
mxe-Win64.cmake.in
nuget-buildtasks
nunit-lite
roslyn-binaries
rx
xunit-binaries
how-to-bump-roslyn-binaries.md
ikvm-native
llvm
m4
man
mcs
mono
msvc
netcore
po
runtime
samples
scripts
support
tools
COPYING.LIB
LICENSE
Makefile.am
Makefile.in
NEWS
README.md
acinclude.m4
aclocal.m4
autogen.sh
code_of_conduct.md
compile
config.guess
config.h.in
config.rpath
config.sub
configure.REMOVED.git-id
configure.ac.REMOVED.git-id
depcomp
install-sh
ltmain.sh.REMOVED.git-id
missing
mkinstalldirs
mono-uninstalled.pc.in
test-driver
winconfig.h
233 lines
7.5 KiB
Python
Executable File
233 lines
7.5 KiB
Python
Executable File
#!/usr/bin/env python
|
|
#
|
|
# Given a previous good compile narrow down miscompiles.
|
|
# Expects two directories named "before" and "after" each containing a set of
|
|
# assembly or object files where the "after" version is assumed to be broken.
|
|
# You also have to provide a script called "link_test". It is called with a list
|
|
# of files which should be linked together and result tested. "link_test" should
|
|
# returns with exitcode 0 if the linking and testing succeeded.
|
|
#
|
|
# abtest.py operates by taking all files from the "before" directory and
|
|
# in each step replacing one of them with a file from the "bad" directory.
|
|
#
|
|
# Additionally you can perform the same steps with a single .s file. In this
|
|
# mode functions are identified by " -- Begin function FunctionName" and
|
|
# " -- End function" markers. The abtest.py then takes all
|
|
# function from the file in the "before" directory and replaces one function
|
|
# with the corresponding function from the "bad" file in each step.
|
|
#
|
|
# Example usage to identify miscompiled files:
|
|
# 1. Create a link_test script, make it executable. Simple Example:
|
|
# clang "$@" -o /tmp/test && /tmp/test || echo "PROBLEM"
|
|
# 2. Run the script to figure out which files are miscompiled:
|
|
# > ./abtest.py
|
|
# somefile.s: ok
|
|
# someotherfile.s: skipped: same content
|
|
# anotherfile.s: failed: './link_test' exitcode != 0
|
|
# ...
|
|
# Example usage to identify miscompiled functions inside a file:
|
|
# 3. Run the tests on a single file (assuming before/file.s and
|
|
# after/file.s exist)
|
|
# > ./abtest.py file.s
|
|
# funcname1 [0/XX]: ok
|
|
# funcname2 [1/XX]: ok
|
|
# funcname3 [2/XX]: skipped: same content
|
|
# funcname4 [3/XX]: failed: './link_test' exitcode != 0
|
|
# ...
|
|
from fnmatch import filter
|
|
from sys import stderr
|
|
import argparse
|
|
import filecmp
|
|
import os
|
|
import subprocess
|
|
import sys
|
|
|
|
LINKTEST="./link_test"
|
|
ESCAPE="\033[%sm"
|
|
BOLD=ESCAPE % "1"
|
|
RED=ESCAPE % "31"
|
|
NORMAL=ESCAPE % "0"
|
|
FAILED=RED+"failed"+NORMAL
|
|
|
|
def find(dir, file_filter=None):
|
|
files = [walkdir[0]+"/"+file for walkdir in os.walk(dir) for file in walkdir[2]]
|
|
if file_filter != None:
|
|
files = filter(files, file_filter)
|
|
return files
|
|
|
|
def error(message):
|
|
stderr.write("Error: %s\n" % (message,))
|
|
|
|
def warn(message):
|
|
stderr.write("Warning: %s\n" % (message,))
|
|
|
|
def extract_functions(file):
|
|
functions = []
|
|
in_function = None
|
|
for line in open(file):
|
|
marker = line.find(" -- Begin function ")
|
|
if marker != -1:
|
|
if in_function != None:
|
|
warn("Missing end of function %s" % (in_function,))
|
|
funcname = line[marker + 19:-1]
|
|
in_function = funcname
|
|
text = line
|
|
continue
|
|
|
|
marker = line.find(" -- End function")
|
|
if marker != -1:
|
|
text += line
|
|
functions.append( (in_function, text) )
|
|
in_function = None
|
|
continue
|
|
|
|
if in_function != None:
|
|
text += line
|
|
return functions
|
|
|
|
def replace_function(file, function, replacement, dest):
|
|
out = open(dest, "w")
|
|
skip = False
|
|
found = False
|
|
in_function = None
|
|
for line in open(file):
|
|
marker = line.find(" -- Begin function ")
|
|
if marker != -1:
|
|
if in_function != None:
|
|
warn("Missing end of function %s" % (in_function,))
|
|
funcname = line[marker + 19:-1]
|
|
in_function = funcname
|
|
if in_function == function:
|
|
out.write(replacement)
|
|
skip = True
|
|
else:
|
|
marker = line.find(" -- End function")
|
|
if marker != -1:
|
|
in_function = None
|
|
if skip:
|
|
skip = False
|
|
continue
|
|
|
|
if not skip:
|
|
out.write(line)
|
|
|
|
def announce_test(name):
|
|
stderr.write("%s%s%s: " % (BOLD, name, NORMAL))
|
|
stderr.flush()
|
|
|
|
def announce_result(result, info):
|
|
stderr.write(result)
|
|
if info != "":
|
|
stderr.write(": %s" % info)
|
|
stderr.write("\n")
|
|
stderr.flush()
|
|
|
|
def testrun(files):
|
|
linkline="%s %s" % (LINKTEST, " ".join(files),)
|
|
res = subprocess.call(linkline, shell=True)
|
|
if res != 0:
|
|
announce_result(FAILED, "'%s' exitcode != 0" % LINKTEST)
|
|
return False
|
|
else:
|
|
announce_result("ok", "")
|
|
return True
|
|
|
|
def check_files():
|
|
"""Check files mode"""
|
|
for i in range(0, len(NO_PREFIX)):
|
|
f = NO_PREFIX[i]
|
|
b=baddir+"/"+f
|
|
if b not in BAD_FILES:
|
|
warn("There is no corresponding file to '%s' in %s" \
|
|
% (gooddir+"/"+f, baddir))
|
|
continue
|
|
|
|
announce_test(f + " [%s/%s]" % (i+1, len(NO_PREFIX)))
|
|
|
|
# combine files (everything from good except f)
|
|
testfiles=[]
|
|
skip=False
|
|
for c in NO_PREFIX:
|
|
badfile = baddir+"/"+c
|
|
goodfile = gooddir+"/"+c
|
|
if c == f:
|
|
testfiles.append(badfile)
|
|
if filecmp.cmp(goodfile, badfile):
|
|
announce_result("skipped", "same content")
|
|
skip = True
|
|
break
|
|
else:
|
|
testfiles.append(goodfile)
|
|
if skip:
|
|
continue
|
|
testrun(testfiles)
|
|
|
|
def check_functions_in_file(base, goodfile, badfile):
|
|
functions = extract_functions(goodfile)
|
|
if len(functions) == 0:
|
|
warn("Couldn't find any function in %s, missing annotations?" % (goodfile,))
|
|
return
|
|
badfunctions = dict(extract_functions(badfile))
|
|
if len(functions) == 0:
|
|
warn("Couldn't find any function in %s, missing annotations?" % (badfile,))
|
|
return
|
|
|
|
COMBINED="/tmp/combined.s"
|
|
i = 0
|
|
for (func,func_text) in functions:
|
|
announce_test(func + " [%s/%s]" % (i+1, len(functions)))
|
|
i+=1
|
|
if func not in badfunctions:
|
|
warn("Function '%s' missing from bad file" % func)
|
|
continue
|
|
if badfunctions[func] == func_text:
|
|
announce_result("skipped", "same content")
|
|
continue
|
|
replace_function(goodfile, func, badfunctions[func], COMBINED)
|
|
testfiles=[]
|
|
for c in NO_PREFIX:
|
|
if c == base:
|
|
testfiles.append(COMBINED)
|
|
continue
|
|
testfiles.append(gooddir + "/" + c)
|
|
|
|
testrun(testfiles)
|
|
|
|
parser = argparse.ArgumentParser()
|
|
parser.add_argument('--a', dest='dir_a', default='before')
|
|
parser.add_argument('--b', dest='dir_b', default='after')
|
|
parser.add_argument('--insane', help='Skip sanity check', action='store_true')
|
|
parser.add_argument('file', metavar='file', nargs='?')
|
|
config = parser.parse_args()
|
|
|
|
gooddir=config.dir_a
|
|
baddir=config.dir_b
|
|
|
|
BAD_FILES=find(baddir, "*")
|
|
GOOD_FILES=find(gooddir, "*")
|
|
NO_PREFIX=sorted([x[len(gooddir)+1:] for x in GOOD_FILES])
|
|
|
|
# "Checking whether build environment is sane ..."
|
|
if not config.insane:
|
|
announce_test("sanity check")
|
|
if not os.access(LINKTEST, os.X_OK):
|
|
error("Expect '%s' to be present and executable" % (LINKTEST,))
|
|
exit(1)
|
|
|
|
res = testrun(GOOD_FILES)
|
|
if not res:
|
|
# "build environment is grinning and holding a spatula. Guess not."
|
|
linkline="%s %s" % (LINKTEST, " ".join(GOOD_FILES),)
|
|
stderr.write("\n%s\n\n" % linkline)
|
|
stderr.write("Returned with exitcode != 0\n")
|
|
sys.exit(1)
|
|
|
|
if config.file is not None:
|
|
# File exchange mode
|
|
goodfile = gooddir+"/"+config.file
|
|
badfile = baddir+"/"+config.file
|
|
check_functions_in_file(config.file, goodfile, badfile)
|
|
else:
|
|
# Function exchange mode
|
|
check_files()
|