mirror of
https://github.com/token2/snapd.git
synced 2026-03-13 11:15:47 -07:00
* Squashed 'tests/lib/external/snapd-testing-tools/' changes from 1c8efb77e1..496cb7b5b3 496cb7b5b3 removing support for centos-8 f2eef30db4 Updated the log helper and log parser 5a375ebf73 Formatting for python utils d3eed3faa5 fix codespell in CODE_OF_CONDUCT.md 18bcca6b14 new log helper d60381fcd9 add run number to filtered filename 5dde2d67b8 consider the tests execution in main 6b9a3aabcc change filtered log name b2756aa579 default file is .filtered.log 500b9dace4 Fix tests workflow 45db26a3d2 fix shellcheck error in log-filter fe45c27b7d create a var to store filter params 5a9b66d7dc filter spread results 51f9b055af New tool used to filter the spread.log output b8d20c1d5b fix snaps.name test with correct siffix spelling f640ac72e3 Add missing test details f0754df304 Filter the error y debug output in log-parser fc10196efd Add suggestions to details 94ac5ffe58 Add details on tests 501578c719 add more checks in os.query to check is-core_xx e8929207ff fix os-query for ubuntu comparing with core 226114641f os.query won't check SPREAD_SYSTEM anymore to compare core systems b89ec98b23 use local variables in os.query tool dacfd81de9 fix is_core functions 1db5214d5f Improve the remote docs (#36) 2e4a3153a2 1 more comment 3a0fc57e1e add explanation about why we check for ( Do | Doing ) 4cf8e635bf fix os.query test after merge b89b4f8647 fix artifacts name d30cee6da0 Merge remote-tracking branch 'upstream/main' 5ef5dcbe8f Tests use artifacts in spread tests (#51) 555c43d2ab Support auto-refresh with Do instead of Doing 96c2b0c19c remove tests support for ubuntu 23.04 (EoL) 74082c0c34 Tests improve remote wait (#49) 5121bfb659 remove support for opensuse leap 15.4 (#48) 30df700d08 Add new systems support (#47) 1f08938925 Support check amazon linux version (#46) 43533bdd97 Change the exit value checking for test formats (#45) 3c88244c04 Update check-test-format to support a dir and a list of files (#44) 510d95f429 add extra check for error in auto-refresh detection function 3289d4031b Try open the log with latin-1 encoding when utf-8 is not working 9db785499f improved how the tools are waiting for system reboot 2a5c4414a3 fix shellcheck errors 5e7b63883d Fixes for osquery and tests pkgs (#43) 4c9145e2ac support reboot waiting for auto-refresh 45768f5188 show changes in unknown status after refresh 8013c30c2a Remove support for ubuntu 22.10 b32b80bf54 Fix remote.rait-for test in bionic 5675c625e9 Enable fedora 38 55f4471957 Support for new oss f2e88b357c New tool used to query spread json reports cacd35ede0 utils/spread-shellcheck: explain disabled warnings (#42) c82afb2dee Support --no-install-recommends parameter when installing dependencies with tests.pkgs b84eea92e2 spread-shellcheck: fix quotes in environment variables (#41) ab1e51c29f New comparison in os-query for core systems (#40) e5ae22a5d4 systemd units can be overwritten 63540b845a Fix error messages in remote pull and push 75e8a426a5 make sure the unit is removed in tests.systemd test 9089ff5c02 Update tests to use the new tests.systemd stop-unit 44ecd5e56a Move tests.systemd stop-units to stop-unit 01a2a83b4b Update tests.systemd to have stop units as systemd.sh 162e93bd35 update tests.systemd CLI options to be the same than retry command 14aa43a405 new feature to re-run failed spread tests (#39) 604cb782db Fix shellcheck in systemd tool bfc71082c8 Update the tests.systemd to allow parameters waiting for service status 8a2d0a99df Adding quiet tool and removing set +-x from tests.pkgs d90935d2a4 A comment explaining about the default values for wait-for 3232c5dba7 Add support for ubuntu 23.04 a7164fba07 remove fedora 35 support, add fedora 37 support 89b9eb5301 Update systems supported 92bb6a0664 Include snap-sufix in the snaps.name tool git-subtree-dir: tests/lib/external/snapd-testing-tools git-subtree-split: 496cb7b5b3c160dc86e84fb7adca3bd5c862e88e * log-parser does not support anymore --print-detail * Squashed 'tests/lib/external/snapd-testing-tools/' changes from 496cb7b5b3..cc68c9868b cc68c9868b Added type annotations for log-filter 66f90d10cd Adding -O to scp command to make it compatible in uc24 tests git-subtree-dir: tests/lib/external/snapd-testing-tools git-subtree-split: cc68c9868bcc504497e0a8e459810c8ca2aa0c8d * Squashed 'tests/lib/external/snapd-testing-tools/' changes from cc68c9868b..18615b1667 18615b1667 just usc scp -O when ssh version is -ge 9 git-subtree-dir: tests/lib/external/snapd-testing-tools git-subtree-split: 18615b1667b6f292d29333f34a973938a28c0e7b * Squashed 'tests/lib/external/snapd-testing-tools/' changes from 18615b1667..1ff651e680 1ff651e680 update wording of remote.pull git-subtree-dir: tests/lib/external/snapd-testing-tools git-subtree-split: 1ff651e6804b32184f7b62f9492c58d95a397cc1 * Update tests/lib/external/snapd-testing-tools/tests/remote.retry/task.yaml Co-authored-by: Zygmunt Bazyli Krynicki <me@zygoon.pl> * fix merge issue * Squashed 'tests/lib/external/snapd-testing-tools/' changes from 1ff651e680..58da1e36c3 58da1e36c3 mypy cleaned git-subtree-dir: tests/lib/external/snapd-testing-tools git-subtree-split: 58da1e36c3c95398af5b2111ebcef87eebb7bd46 * Squashed 'tests/lib/external/snapd-testing-tools/' changes from 58da1e36c3..b4a5439c9b b4a5439c9b added more type annotatios to log_helper git-subtree-dir: tests/lib/external/snapd-testing-tools git-subtree-split: b4a5439c9bd31f11f7fa08515a8142b399fcbcc8 * Squashed 'tests/lib/external/snapd-testing-tools/' changes from b4a5439c9b..6f6187416d 6f6187416d fix list implementation git-subtree-dir: tests/lib/external/snapd-testing-tools git-subtree-split: 6f6187416da05d9b6799c10bfb6534085a13524b --------- Co-authored-by: Zygmunt Bazyli Krynicki <me@zygoon.pl>
526 lines
15 KiB
Python
Executable File
526 lines
15 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
|
|
"""
|
|
This tool reads a spread log and creates a file with all the data
|
|
The output file includes the more important information extracted
|
|
from the log to be analyzed
|
|
"""
|
|
|
|
import argparse
|
|
import json
|
|
import os
|
|
import re
|
|
import sys
|
|
|
|
import log_helper as helper
|
|
|
|
|
|
class Phase:
|
|
"""
|
|
Phase represents the task/suite/project action in terms of:
|
|
Preparing, Executing and Restoring
|
|
"""
|
|
|
|
def __init__(self, verb, task, date, time, source_line):
|
|
self.type = "phase"
|
|
self.verb = verb
|
|
self.time = time
|
|
self.date = date
|
|
self.task = task
|
|
self.source_line = source_line
|
|
|
|
def __repr__(self):
|
|
return self.source_line
|
|
|
|
def to_dict(self):
|
|
return {
|
|
"type": self.type,
|
|
"date": self.date,
|
|
"time": self.time,
|
|
"verb": self.verb,
|
|
"task": self.task,
|
|
}
|
|
|
|
|
|
class Result:
|
|
"""
|
|
Result represents the results for a spread run
|
|
The results can be: Successful, failed and aborted
|
|
"""
|
|
|
|
def __init__(
|
|
self, result_type, level, stage, number, date, time, detail, source_line
|
|
):
|
|
self.type = "result"
|
|
self.result_type = result_type
|
|
self.level = level
|
|
self.stage = stage
|
|
self.number = number
|
|
self.time = time
|
|
self.date = date
|
|
self.detail = detail
|
|
self.source_line = source_line
|
|
|
|
def __repr__(self):
|
|
if self.detail:
|
|
return "{}{}".format(self.source_line, str(self.detail))
|
|
return self.source_line
|
|
|
|
def to_dict(self):
|
|
prepared_detail = None
|
|
if self.detail:
|
|
prepared_detail = self.detail.to_dict()
|
|
return {
|
|
"type": self.type,
|
|
"date": self.date,
|
|
"time": self.time,
|
|
"result_type": self.result_type,
|
|
"level": self.level,
|
|
"stage": self.stage,
|
|
"number": self.number,
|
|
"detail": prepared_detail,
|
|
}
|
|
|
|
|
|
class Info:
|
|
"""
|
|
Info represents the tasks status information which is included
|
|
in the spread log. The info can be: Error, Debug and Warning.
|
|
"""
|
|
|
|
def __init__(self, info_type, verb, task, extra, date, time, detail, source_line):
|
|
self.type = "info"
|
|
self.info_type = info_type
|
|
self.verb = verb
|
|
self.time = time
|
|
self.date = date
|
|
self.task = task
|
|
self.extra = extra
|
|
self.detail = detail
|
|
self.source_line = source_line
|
|
|
|
def __repr__(self):
|
|
if self.detail:
|
|
return "{}{}".format(self.source_line, self.detail)
|
|
return self.source_line
|
|
|
|
def to_dict(self):
|
|
prepared_detail = None
|
|
if self.detail:
|
|
prepared_detail = self.detail.to_dict()
|
|
return {
|
|
"type": self.type,
|
|
"date": self.date,
|
|
"time": self.time,
|
|
"info_type": self.info_type,
|
|
"verb": self.verb,
|
|
"task": self.task,
|
|
"extra": self.extra,
|
|
"detail": prepared_detail,
|
|
}
|
|
|
|
|
|
class Rule:
|
|
"""
|
|
Rule represent the KEY=PATTERN used to extract information from a set of lines
|
|
"""
|
|
|
|
def __init__(self, rule):
|
|
parts = rule.split("=", 1)
|
|
if len(parts) != 2:
|
|
raise ValueError(
|
|
"Error: Rule '{}' does not follow the KEY=PATTERN format".format(rule)
|
|
)
|
|
|
|
self.key = parts[0]
|
|
self.pattern = parts[1]
|
|
|
|
try:
|
|
re.compile(self.pattern)
|
|
except re.error as err:
|
|
raise ValueError(
|
|
"Error: pattern '{}' cannot be compiled: {}".format(self.pattern, err)
|
|
)
|
|
|
|
def filter(self, lines) -> list[str]:
|
|
regex = re.compile(self.pattern)
|
|
all_matches = []
|
|
for line in lines:
|
|
matches = regex.findall(line)
|
|
for match in matches:
|
|
if match:
|
|
all_matches.append(match)
|
|
|
|
return all_matches
|
|
|
|
|
|
class Detail:
|
|
"""
|
|
Detail represents the extra lines which are displayed after the info
|
|
"""
|
|
|
|
def __init__(self, lines_limit: int, lines: list[str], rules: list[str]) -> None:
|
|
self.lines_limit = lines_limit
|
|
self.lines = lines
|
|
self.data = dict[str, list[str]]()
|
|
self._process_rules(rules)
|
|
|
|
def _get_lines(self) -> list[str]:
|
|
if self.lines_limit < 0 or self.lines_limit > len(self.lines):
|
|
return self.lines
|
|
|
|
# Use self.lines_limit-1 because the last line is a '.' and we don't
|
|
# want to count it as a line in the log details
|
|
return self.lines[-self.lines_limit - 1 :]
|
|
|
|
def _process_rules(self, rules) -> None:
|
|
for rule in rules:
|
|
key = rule.key
|
|
matches = rule.filter(self.lines)
|
|
self.data[key] = matches
|
|
|
|
def __repr__(self):
|
|
return "".join(self._get_lines())
|
|
|
|
def to_dict(self) -> dict[str, list[str]]:
|
|
details_dict = {"lines": self.lines[-self.lines_limit - 1 :]}
|
|
for key in self.data.keys():
|
|
details_dict[key] = self.data[key]
|
|
|
|
return details_dict
|
|
|
|
|
|
class Action:
|
|
"""
|
|
The actions are general operations that the spread can do while
|
|
executing tests like: Rebooting, Discarding, Allocating, Waiting,
|
|
Allocated, Connecting, Connected, Sending
|
|
"""
|
|
|
|
def __init__(
|
|
self, verb: str, task: str, extra: str, date: str, time: str, source_line: str
|
|
) -> None:
|
|
self.type = "action"
|
|
self.verb = verb
|
|
self.time = time
|
|
self.extra = extra
|
|
self.date = date
|
|
self.task = task
|
|
self.source_line = source_line
|
|
|
|
def __repr__(self):
|
|
return self.source_line
|
|
|
|
def to_dict(self) -> dict[str, str]:
|
|
return {
|
|
"type": self.type,
|
|
"date": self.date,
|
|
"time": self.time,
|
|
"verb": self.verb,
|
|
"task": self.task,
|
|
"extra": self.extra,
|
|
}
|
|
|
|
|
|
class LogReader:
|
|
"""
|
|
LogReader manages the spread log, it allows to read, export and print
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
filepath: str,
|
|
output_type: str,
|
|
lines_limit: str,
|
|
error_rules: str,
|
|
debug_rules: str,
|
|
) -> None:
|
|
self.filepath = filepath
|
|
self.output_type = output_type
|
|
self.lines_limit = lines_limit
|
|
self.lines = list[str]()
|
|
self.iter = 0
|
|
self.full_log = list[str]()
|
|
self.error_rules = [Rule(rule) for rule in error_rules]
|
|
self.debug_rules = [Rule(rule) for rule in debug_rules]
|
|
|
|
def __repr__(self):
|
|
return str(self.to_dict())
|
|
|
|
def to_dict(self) -> dict:
|
|
return {"full_log": self.full_log}
|
|
|
|
def print_log(self) -> None:
|
|
if not self.full_log:
|
|
return
|
|
|
|
print("".join(str(x) for x in self.full_log))
|
|
|
|
def export_log(self, filepath: str) -> None:
|
|
prepared_log = []
|
|
for item in self.full_log:
|
|
if isinstance(item, str):
|
|
prepared_log.append(item)
|
|
else:
|
|
prepared_log.append(item.to_dict())
|
|
with open(filepath, "w") as json_file:
|
|
json.dump(prepared_log, json_file, indent=4)
|
|
|
|
def _next_line(self) -> str:
|
|
self.iter = self.iter + 1
|
|
return self.lines[self.iter - 1]
|
|
|
|
def check_log_exists(self) -> bool:
|
|
return os.path.exists(self.filepath)
|
|
|
|
def read_spread_log(self) -> None:
|
|
try:
|
|
with open(self.filepath, "r", encoding="utf-8") as filepath:
|
|
self.lines = filepath.readlines()
|
|
except UnicodeDecodeError:
|
|
with open(self.filepath, "r", encoding="latin-1") as filepath:
|
|
self.lines = filepath.readlines()
|
|
|
|
self.iter = 0
|
|
|
|
# Then iterate line by line analyzing the log
|
|
while self.iter < len(self.lines):
|
|
line = self._next_line()
|
|
|
|
if not helper.is_any_operation(line):
|
|
continue
|
|
|
|
# The line is a task execution; preparing, executing, restoring
|
|
if self._match_phase(line):
|
|
phase = self._get_phase(line)
|
|
if phase:
|
|
self.full_log.append(phase)
|
|
continue
|
|
|
|
# The line shows info: error, debug, warning
|
|
if self._match_info(line):
|
|
info = self._get_info(line)
|
|
if info:
|
|
self.full_log.append(info)
|
|
continue
|
|
|
|
# The line is another operation: Rebooting, Discarding, Allocating
|
|
# Waiting, Allocated, Connecting, Connected, Sending'
|
|
if self._match_action(line):
|
|
action = self._get_action(line)
|
|
if action:
|
|
self.full_log.append(action)
|
|
continue
|
|
|
|
# The line is a result: Successful, Aborted, Failed
|
|
if self._match_result(line):
|
|
result = self._get_result(line)
|
|
if result:
|
|
self.full_log.append(result)
|
|
continue
|
|
|
|
def _match_info(self, line: str) -> bool:
|
|
return helper.get_operation(line) in helper.ExecutionInfo.list()
|
|
|
|
def _match_phase(self, line: str) -> bool:
|
|
return helper.get_operation(line) in helper.ExecutionPhase.list()
|
|
|
|
def _match_action(self, line: str) -> bool:
|
|
return (
|
|
helper.get_operation(line)
|
|
in helper.GeneralAction.list() + helper.GeneralActionStatus.list()
|
|
)
|
|
|
|
def _match_result(self, line: str) -> bool:
|
|
return helper.get_operation(line) in helper.Result.list()
|
|
|
|
def _get_detail(self, rules, results=False, other_limit=None):
|
|
"""
|
|
This function is used to get the piece of log which is after the
|
|
info lines (error, debug, warning). The detail could also include
|
|
a limit of lines to tail the log and show the last lines.
|
|
It returns a Detail object included all the lines.
|
|
"""
|
|
detail = []
|
|
while self.iter < len(self.lines):
|
|
previous_line = self.lines[self.iter - 1]
|
|
line = self._next_line()
|
|
if helper.is_detail_finished(line):
|
|
# This is needed because it could happen that in the details there is a spread log
|
|
# because sometimes we run nested spread
|
|
# The is not valid when the details are for failed tests in results section
|
|
if results or previous_line.strip() == ".":
|
|
break
|
|
|
|
detail.append(line)
|
|
|
|
# We leave the iter in the last line in case the log has finished
|
|
if not self.iter == len(self.lines):
|
|
self.iter = self.iter - 1
|
|
if not other_limit:
|
|
other_limit = self.lines_limit
|
|
|
|
return Detail(other_limit, detail, rules)
|
|
|
|
def _get_info(self, line):
|
|
"""
|
|
Get the Info object for the error, debug and warning lines including
|
|
the details for this
|
|
"""
|
|
date = helper.get_date(line)
|
|
time = helper.get_time(line)
|
|
info_type = helper.get_operation(line)
|
|
info_extra = helper.get_operation_info(line)
|
|
|
|
verb = None
|
|
task = None
|
|
if info_type == helper.ExecutionInfo.WARNING.value:
|
|
# Removing the : from WARNING:
|
|
info_type = info_type.split(":")[0]
|
|
verb = None
|
|
task = None
|
|
extra = info_extra
|
|
elif info_type == helper.ExecutionInfo.ERROR.value:
|
|
verb = info_extra.split(" ")[0]
|
|
task = info_extra.split(" ")[1]
|
|
extra = None
|
|
elif info_type == helper.ExecutionInfo.DEBUG.value:
|
|
verb = None
|
|
task = info_extra.split(" ")[2]
|
|
extra = None
|
|
else:
|
|
print("log-parser: detail type not recognized: {}".format(info_type))
|
|
return
|
|
|
|
# Pass the rules according to the info type
|
|
rules = self.debug_rules
|
|
if info_type == helper.ExecutionInfo.ERROR.value:
|
|
rules = self.error_rules
|
|
|
|
detail = None
|
|
if helper.is_detail(line):
|
|
detail = self._get_detail(rules)
|
|
|
|
return Info(info_type, verb, task, extra, date, time, detail, line)
|
|
|
|
def _get_result(self, line):
|
|
"""Get the Result object including the details for the result"""
|
|
date = helper.get_date(line)
|
|
time = helper.get_time(line)
|
|
result_type = helper.get_operation(line)
|
|
result_extra = helper.get_operation_info(line)
|
|
level = result_extra.split(" ")[0].split(":")[0]
|
|
number = result_extra.split(" ")[-1]
|
|
|
|
stage = None
|
|
detail = None
|
|
if result_type == helper.Result.FAILED.value:
|
|
if level in helper.ExecutionLevel.list():
|
|
stage = result_extra.split(" ")[1].split(":")[0]
|
|
detail = self._get_detail([], results=True, other_limit=-1)
|
|
|
|
return Result(result_type, level, stage, number, date, time, detail, line)
|
|
|
|
def _get_phase(self, line):
|
|
"""
|
|
Get the phase object for lines preparing, executing and restoring
|
|
"""
|
|
date = helper.get_date(line)
|
|
time = helper.get_time(line)
|
|
verb = helper.get_operation(line)
|
|
task = helper.get_operation_info(line).split(" ")[0]
|
|
return Phase(verb, task.split("...")[0], date, time, line)
|
|
|
|
def _get_action(self, line):
|
|
"""Get the general actions object for lines rebooting, allocating, etc"""
|
|
|
|
date = helper.get_date(line)
|
|
time = helper.get_time(line)
|
|
verb = helper.get_operation(line)
|
|
task = None
|
|
extra = helper.get_operation_info(line)
|
|
return Action(verb, task, extra, date, time, line)
|
|
|
|
|
|
def _make_parser():
|
|
# type: () -> argparse.ArgumentParser
|
|
parser = argparse.ArgumentParser(
|
|
description="""
|
|
Parse the spread log and generates a file with a standardized output. It also
|
|
allows to filter the output by type and define the number of lines to show
|
|
for the error/debug/warning output.
|
|
"""
|
|
)
|
|
parser.add_argument(
|
|
"-c",
|
|
"--cut",
|
|
type=int,
|
|
default=1000,
|
|
help="maximum number of lines for logs on errors and debug sections",
|
|
)
|
|
parser.add_argument(
|
|
"-f",
|
|
"--format",
|
|
type=str,
|
|
default="json",
|
|
choices=["json"],
|
|
help="format for the output",
|
|
)
|
|
parser.add_argument(
|
|
"-o",
|
|
"--output",
|
|
default="spread-results.json",
|
|
type=str,
|
|
help="output file to save the result",
|
|
)
|
|
parser.add_argument(
|
|
"-er",
|
|
"--error-rule",
|
|
action="append",
|
|
default=[],
|
|
help="A KEY=PATTERN used to extract and store specific data from errors",
|
|
)
|
|
parser.add_argument(
|
|
"-dr",
|
|
"--debug-rule",
|
|
action="append",
|
|
default=[],
|
|
help="A KEY=PATTERN used to extract and store specific data from debug output",
|
|
)
|
|
parser.add_argument(
|
|
"log_path", metavar="PATH", help="path to the log to be analyzed"
|
|
)
|
|
return parser
|
|
|
|
|
|
def main():
|
|
# type: () -> None
|
|
parser = _make_parser()
|
|
args = parser.parse_args()
|
|
|
|
if len(args.log_path) == 0:
|
|
parser.print_usage()
|
|
parser.exit(0)
|
|
|
|
reader = LogReader(
|
|
args.log_path,
|
|
args.format,
|
|
args.cut,
|
|
args.error_rule,
|
|
args.debug_rule,
|
|
)
|
|
if not reader.check_log_exists():
|
|
print("log-parser: log not found")
|
|
sys.exit(1)
|
|
|
|
reader.read_spread_log()
|
|
|
|
if args.output:
|
|
reader.export_log(args.output)
|
|
|
|
reader.print_log()
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|