diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 9388e929..15c7132e 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -26,3 +26,37 @@ jobs: - uses: seL4/ci-actions/link-check@master with: exclude: '/node_modules/' + + mypy: + name: MyPy + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install mypy + run: pip3 install mypy + - name: Run mypy + #run: mypy --explicit-package-bases ./ + run: | + mypy seL4-platforms + mypy seL4-platforms camkes-hw + mypy seL4-platforms camkes-test + mypy seL4-platforms camkes-vm + mypy seL4-platforms camkes-vm-hw + mypy seL4-platforms cparser-run + mypy seL4-platforms dashboard + mypy seL4-platforms l4v-deploy + mypy seL4-platforms march-of-platform + mypy seL4-platforms rump-hello + mypy seL4-platforms rump-hello-hw + mypy seL4-platforms sel4bench + mypy seL4-platforms sel4bench-hw + mypy seL4-platforms sel4bench-web + mypy seL4-platforms sel4test-hw + mypy seL4-platforms sel4test-hw-matrix + mypy seL4-platforms sel4test-hw-run + mypy seL4-platforms sel4test-sim + mypy seL4-platforms thylint + mypy seL4-platforms trigger + mypy seL4-platforms tutorials + mypy seL4-platforms webserver + mypy seL4-platforms webserver-hw diff --git a/camkes-test/build.py b/camkes-test/build.py index 8f0346fc..4a7c8e7c 100644 --- a/camkes-test/build.py +++ b/camkes-test/build.py @@ -8,15 +8,15 @@ Expects seL4-platforms/ to be co-located or otherwise in the PYTHONPATH. """ -from builds import Build, run_build_script, run_builds, load_builds, release_mq_locks, SKIP -from pprint import pprint -from typing import List, Union - -import json -import os import sys +import os +import argparse +import json + +import builds +import platforms +import pprint -from platforms import load_yaml, gh_output # See also builds.yml for how builds are split up in this test. We use the build # matrix and filtering for the hardware builds, and an explicit list for the @@ -33,7 +33,7 @@ def __init__(self, sim: dict): self.name = sim['match'] + post self.__dict__.update(**sim) - def __repr__(self): + def __repr__(self) -> str: return f"SimBuild('{self.name}', " '{' \ f" 'match': '{self.match}'," \ f" 'exclude': '{self.exclude}'," \ @@ -41,19 +41,22 @@ def __repr__(self): ' })' -def run_build(manifest_dir: str, build: Union[Build, SimBuild]): +def run_build(manifest_dir: str, build: builds.Build | SimBuild) -> int: """Run one CAmkES test. Can be either Build or SimBuild.""" - if isinstance(build, Build): + if isinstance(build, builds.Build): app = apps[build.app] build.files = build.get_platform().image_names(build.get_mode(), "capdl-loader") build.settings['CAMKES_APP'] = build.app - del build.settings['BAMBOO'] # not used in this test, avoid warning if app.get('has_cakeml'): build.settings['CAKEMLDIR'] = '/cakeml' build.settings['CAKEML_BIN'] = f"/cake-x64-{build.get_mode()}/cake" + # remove parameters from setting that CMake does not use and thus would + # raise a nasty warning + del build.settings['BAMBOO'] + script = [ ["../init-build.sh"] + build.settings_args(), ["ninja"], @@ -73,23 +76,23 @@ def run_build(manifest_dir: str, build: Union[Build, SimBuild]): else: print(f"Warning: unknown build type for {build.name}") - return run_build_script(manifest_dir, build, script) + return builds.run_build_script(manifest_dir, build, script) -def hw_run(manifest_dir: str, build: Build): +def hw_run(manifest_dir: str, build: builds.Build) -> int: """Run one hardware test.""" if build.is_disabled(): print(f"Build {build.name} disabled, skipping.") - return SKIP + return builds.SKIP build.success = apps[build.app]['success'] script, final = build.hw_run('log.txt') - return run_build_script(manifest_dir, build, script, final_script=final) + return builds.run_build_script(manifest_dir, build, script, final_script=final) -def build_filter(build: Build): +def build_filter(build: builds.Build) -> bool: if not build.app: return False @@ -106,37 +109,57 @@ def build_filter(build: Build): return True -def sim_build_filter(build: SimBuild): +def sim_build_filter(build: SimBuild) -> bool: name = os.environ.get('INPUT_NAME') plat = os.environ.get('INPUT_PLATFORM') return (not name or build.name == name) and (not plat or plat == 'sim') -def to_json(builds: List[Build]) -> str: - """Return a GitHub build matrix as GitHub output assignment.""" +def gh_output_matrix(param_name: str, build_list: list[builds.Build]) -> None: + matrix_builds = [{"name": b.name, + "platform": b.get_platform().name + } + for b in build_list] + # GitHub output assignment + matrix_json = json.dumps({"include": matrix_builds}) + platforms.gh_output(f"{param_name}={matrix_json}") + + +def main(params: list) -> int: + parser = argparse.ArgumentParser() + g = parser.add_mutually_exclusive_group() + g.add_argument('--dump', action='store_true') + g.add_argument('--matrix', action='store_true') + g.add_argument('--hw', action='store_true') + g.add_argument('--post', action='store_true') + g.add_argument('--build', action='store_true') + args = parser.parse_args(params) + + builds_yaml_file = os.path.join(os.path.dirname(__file__), "builds.yml") + yml = platforms.load_yaml(builds_yaml_file) + apps = yml['apps'] + sim_builds = [SimBuild(s) for s in yml['sim']] + hw_builds = builds.load_builds(None, build_filter, yml) + build_list = [b for b in sim_builds if sim_build_filter(b)] + hw_builds + + if args.dump: + pprint.pprint(build_list) + return 0 + + if args.matrix: + gh_output_matrix("matrix", build_list) + return 0 + + if args.hw: + return builds.run_builds(build_list, hw_run) - matrix = {"include": [{"name": b.name, "platform": b.get_platform().name} for b in builds]} - return "matrix=" + json.dumps(matrix) + if args.post: + builds.release_mq_locks(build_list) + return 0 + + # perform args.build as default + return builds.run_builds(build_list, run_build) -# If called as main, run all builds from builds.yml if __name__ == '__main__': - yml = load_yaml(os.path.dirname(__file__) + "/builds.yml") - apps = yml['apps'] - sim_builds = [SimBuild(s) for s in yml['sim']] - hw_builds = load_builds(None, build_filter, yml) - builds = [b for b in sim_builds if sim_build_filter(b)] + hw_builds - - if len(sys.argv) > 1 and sys.argv[1] == '--dump': - pprint(builds) - sys.exit(0) - elif len(sys.argv) > 1 and sys.argv[1] == '--matrix': - gh_output(to_json(builds)) - sys.exit(0) - elif len(sys.argv) > 1 and sys.argv[1] == '--hw': - sys.exit(run_builds(builds, hw_run)) - elif len(sys.argv) > 1 and sys.argv[1] == '--post': - release_mq_locks(builds) - sys.exit(0) - - sys.exit(run_builds(builds, run_build)) + sys.exit(main(sys.argv[1:])) diff --git a/camkes-vm/build.py b/camkes-vm/build.py index d7d18347..f64ccd16 100644 --- a/camkes-vm/build.py +++ b/camkes-vm/build.py @@ -8,11 +8,12 @@ Expects seL4-platforms/ to be co-located or otherwise in the PYTHONPATH. """ -from builds import Build, run_build_script, run_builds, load_builds, release_mq_locks, SKIP, sim_script -from pprint import pprint - -import os import sys +import os +import argparse + +from builds import Build, run_build_script, run_builds, load_builds, release_mq_locks, SKIP +from pprint import pprint # See also builds.yml for how builds are split up in this test. We use the build @@ -21,22 +22,24 @@ # The only thing this really has in common with a "Build" is the "name" field. -def run_build(manifest_dir: str, build: Build): +def run_build(manifest_dir: str, build: builds.Build) -> int: """Run one CAmkES VM test.""" plat = build.get_platform() build.files = plat.image_names(build.get_mode(), "capdl-loader") build.settings['CAMKES_VM_APP'] = build.app or build.name - del build.settings['BAMBOO'] # not used in this test, avoid warning - - if plat.arch == 'x86': - del build.settings['PLATFORM'] # not used for x86 in this test, avoid warning # if vm_platform is set, the init-build.sh script expects a different platform name. if build.vm_platform: build.settings['PLATFORM'] = build.vm_platform + # remove parameters from setting that CMake does not use and thus would + # raise a nasty warning + del build.settings['BAMBOO'] + if plat.arch == 'x86': + del build.settings['PLATFORM'] + script = [ ["../init-build.sh"] + build.settings_args(), ["ninja"], @@ -46,35 +49,50 @@ def run_build(manifest_dir: str, build: Build): if plat.has_simulation and plat.name != 'PC99': script.append(sim_script(build.success, failure=build.error)) - return run_build_script(manifest_dir, build, script) + return builds.run_build_script(manifest_dir, build, script) -def hw_run(manifest_dir: str, build: Build): +def hw_run(manifest_dir: str, build: builds.Build) -> int: """Run one hardware test.""" if build.is_disabled(): print(f"Build {build.name} disabled, skipping.") - return SKIP + return builds.SKIP plat = build.get_platform() build.files = plat.image_names(build.get_mode(), "capdl-loader") script, final = build.hw_run('log.txt') - return run_build_script(manifest_dir, build, script, final_script=final) + return builds.run_build_script(manifest_dir, build, script, final_script=final) + + +def main(params: list) -> int: + parser = argparse.ArgumentParser() + g = parser.add_mutually_exclusive_group() + g.add_argument('--dump', action='store_true') + g.add_argument('--hw', action='store_true') + g.add_argument('--post', action='store_true') + g.add_argument('--build', action='store_true') + args = parser.parse_args(params) + + builds_yaml_file = os.path.join(os.path.dirname(__file__), "builds.yml") + build_list = builds.load_builds(builds_yaml_file) + + if args.dump: + pprint.pprint(build_list) + return 0 + + if args.hw: + return builds.run_builds(build_list, hw_run) + + if args.post: + builds.release_mq_locks(build_list) + return 0 + + # perform args.build as default + return builds.run_builds(build_list, run_build) -# If called as main, run all builds from builds.yml if __name__ == '__main__': - builds = load_builds(os.path.dirname(__file__) + "/builds.yml") - - if len(sys.argv) > 1 and sys.argv[1] == '--dump': - pprint(builds) - sys.exit(0) - elif len(sys.argv) > 1 and sys.argv[1] == '--hw': - sys.exit(run_builds(builds, hw_run)) - elif len(sys.argv) > 1 and sys.argv[1] == '--post': - release_mq_locks(builds) - sys.exit(0) - - sys.exit(run_builds(builds, run_build)) + sys.exit(main(sys.argv[1:])) diff --git a/cparser-run/build.py b/cparser-run/build.py index 26a7e392..8d0dbd99 100644 --- a/cparser-run/build.py +++ b/cparser-run/build.py @@ -8,14 +8,15 @@ Expects seL4-platforms/ to be co-located or otherwise in the PYTHONPATH. """ +import sys +import os +import argparse + from builds import run_build_script, run_builds, load_builds from pprint import pprint -import os -import sys - -def run_cparser(manifest_dir: str, build): +def run_cparser(manifest_dir: str, build) -> int: """Single run of the C Parser test, for one build definition""" script = [ @@ -28,12 +29,23 @@ def run_cparser(manifest_dir: str, build): return run_build_script(manifest_dir, build, script) -# If called as main, run all builds from builds.yml -if __name__ == '__main__': - builds = load_builds(os.path.dirname(__file__) + "/builds.yml") +def main(params: list) -> int: + parser = argparse.ArgumentParser() + g = parser.add_mutually_exclusive_group() + g.add_argument('--dump', action='store_true') + g.add_argument('--build', action='store_true') + args = parser.parse_args(params) + + builds_yaml_file = os.path.join(os.path.dirname(__file__), "builds.yml") + builds = load_builds(builds_yaml_file) - if len(sys.argv) > 1 and sys.argv[1] == '--dump': + if args.dump: pprint(builds) - sys.exit(0) + return 0 + + # perform args.build as default + return run_builds(builds, run_cparser) - sys.exit(run_builds(builds, run_cparser)) + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) diff --git a/l4v-deploy/common.py b/l4v-deploy/common.py index d94f3f85..a0a1b3f7 100644 --- a/l4v-deploy/common.py +++ b/l4v-deploy/common.py @@ -19,12 +19,12 @@ def loud_command(*args, **kwargs): return run_command(*args, **kwargs) -def indent(s, indent=' '): +def indent(s, indent=' ') -> str: '''Indent all lines in a string''' return '\n'.join(indent + line for line in s.splitlines()) -def format_commit_message(msg): +def format_commit_message(msg: str) -> str: '''Add a standard header and footer to a commit message''' msg = "[CI] " + msg return msg diff --git a/march-of-platform/march.py b/march-of-platform/march.py index fbb64a6a..5844d3b6 100755 --- a/march-of-platform/march.py +++ b/march-of-platform/march.py @@ -2,17 +2,28 @@ # # SPDX-License-Identifier: BSD-2-Clause -from platforms import platforms, gh_output import sys +#import argparse +from platforms import platforms, gh_output + + +def main(params: list) -> int: + #parser = argparse.ArgumentParser() + #g = parser.add_mutually_exclusive_group() + #args = parser.parse_args() + + if len(params) != 2: + return 1 + + arg = params[1] + plat = platforms.get(arg.upper()) + if plat: + gh_output(f"march={plat.march}") + return 0 + + print(f"Unknown platform: '{arg}'") + return 1 + if __name__ == '__main__': - if len(sys.argv) == 2: - plat = platforms.get(sys.argv[1].upper()) - if plat: - gh_output(f"march={plat.march}") - sys.exit(0) - else: - print(f"Unknown platform {sys.argv[1]}") - sys.exit(1) - - sys.exit(1) + sys.exit(main(sys.argv[1:])) diff --git a/rump-hello/build.py b/rump-hello/build.py index c4c78b21..55ebc0aa 100644 --- a/rump-hello/build.py +++ b/rump-hello/build.py @@ -8,20 +8,22 @@ Expects seL4-platforms/ to be co-located or otherwise in the PYTHONPATH. """ -from builds import Build, run_build_script, run_builds, load_builds, sim_script -from builds import release_mq_locks, SKIP -from pprint import pprint - -import os import sys +import os +import argparse +import build +import pprint -def adjust_build(build: Build): + +def adjust_build(build: builds.Build): build.files = build.get_platform().image_names(build.get_mode(), "roottask") - del build.settings['BAMBOO'] # not used in this test, avoid warning + # remove parameters from setting that CMake does not use and thus would + # raise a nasty warning + del build.settings['BAMBOO'] -def run_build(manifest_dir: str, build: Build): +def run_build(manifest_dir: str, build: builds.Build) -> int: """Run one rumprun-hello test.""" adjust_build(build) @@ -32,40 +34,53 @@ def run_build(manifest_dir: str, build: Build): ] if build.req == 'sim': - script.append(sim_script(build.success)) + script.append(builds.sim_script(build.success)) else: script.append(["tar", "czf", f"../{build.name}-images.tar.gz", "images/"]) - return run_build_script(manifest_dir, build, script) + return builds.run_build_script(manifest_dir, build, script) -def hw_run(manifest_dir: str, build: Build): +def hw_run(manifest_dir: str, build: builds.Build) -> int: """Run one hardware test.""" adjust_build(build) if build.is_disabled(): print(f"Build {build.name} disabled, skipping.") - return SKIP + return builds.SKIP script, final = build.hw_run('log.txt') - return run_build_script(manifest_dir, build, script, final_script=final) + return builds.run_build_script(manifest_dir, build, script, final_script=final) -# If called as main, run all builds from builds.yml -if __name__ == '__main__': - builds = load_builds(os.path.dirname(__file__) + "/builds.yml") +def main(params: list) -> int: + parser = argparse.ArgumentParser() + g = parser.add_mutually_exclusive_group() + g.add_argument('--dump', action='store_true') + g.add_argument('--hw', action='store_true') + g.add_argument('--post', action='store_true') + g.add_argument('--build', action='store_true') + args = parser.parse_args(params) - if len(sys.argv) > 1 and sys.argv[1] == '--dump': - pprint(builds) - sys.exit(0) + builds_yaml_file = os.path.join(os.path.dirname(__file__), "builds.yml") + build_list = builds.load_builds(builds_yaml_file) - if len(sys.argv) > 1 and sys.argv[1] == '--hw': - sys.exit(run_builds(builds, hw_run)) + if args.dump: + pprint.pprint(build_list) + return 0 - if len(sys.argv) > 1 and sys.argv[1] == '--post': - release_mq_locks(builds) - sys.exit(0) + if args.hw: + return builds.run_builds(build_list, hw_run) - sys.exit(run_builds(builds, run_build)) + if args.post: + builds.release_mq_locks(build_list) + return 0 + + # perform args.build as default + return builds.run_builds(build_list, run_build) + + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) diff --git a/seL4-platforms/builds.py b/seL4-platforms/builds.py index 4f7aa2fa..3758ea2b 100644 --- a/seL4-platforms/builds.py +++ b/seL4-platforms/builds.py @@ -12,18 +12,19 @@ `default_junit_results` for a standard place to leave a jUnit summary file. """ -from junitparser.junitparser import Failure, Error -from platforms import ValidationException, Platform, platforms, load_yaml, mcs_unsupported +from typing import Union +import sys +import os +import subprocess +import shutil +import copy +import time -from typing import Optional, List, Tuple, Union +import platforms +from platforms import ValidationException +from junitparser.junitparser import Failure, Error from junitparser import JUnitXml -import copy -import time -import os -import shutil -import subprocess -import sys # exported names: __all__ = [ @@ -34,13 +35,72 @@ # where to expect jUnit results by default junit_results = 'results.xml' -# colour codes -ANSI_RESET = "\033[0m" -ANSI_RED = "\033[31;1m" -ANSI_GREEN = "\033[32m" -ANSI_YELLOW = "\033[33m" -ANSI_WHITE = "\033[37m" -ANSI_BOLD = "\033[1m" +# return codes for a test run or single step of a run +FAILURE = 0 +SUCCESS = 1 +SKIP = 2 +REPEAT = 3 + + +class AnsiPrinter: + # colour codes + ANSI_RESET = "\033[0m" + ANSI_BOLD = "\033[1m" + #ANSI_BLACK = "\033[30m" + ANSI_RED = "\033[31;1m" + ANSI_GREEN = "\033[32m" + ANSI_YELLOW = "\033[33m" + ANSI_BLUE = "\033[34m" + ANSI_MAGENTA = "\033[35m" + ANSI_CYAN = "\033[36m" + ANSI_WHITE = "\033[37m" + + @classmethod + def printc(cls, ansi_color: str, content: str): + print(f"{ansi_color}{content}{cls.ANSI_RESET}") + sys.stdout.flush() + + @classmethod + def error(cls, content: str): + cls.printc(cls.ANSI_RED, content) + + @classmethod + def warn(cls, content: str): + cls.printc(cls.ANSI_YELLOW, content) + + @classmethod + def skip(cls, content: str): + cls.printc(cls.ANSI_YELLOW, content) + + @classmethod + def ok(cls, content: str): + cls.printc(cls.ANSI_GREEN, content) + + @classmethod + def command(cls, cmd: Union[str, list]): + cmd = cmd if isinstance(cmd, str) \ + else " ".join(cmd) if isinstance(cmd, list) \ + else str(cmd) + cls.printc(cls.ANSI_YELLOW, f"+++ {cmd}") + + @classmethod + def step_start(cls, step_type: str, step_name: str): + print(f"::group::{step_name}") + cls.printc(cls.ANSI_BOLD, + f"-----------[ start {step_type} {step_name} ]-----------") + + @classmethod + def step_end(cls, step_type: str, step_name: str, result: int): + cls.printc(cls.ANSI_BOLD, + f"-----------[ end {step_type} {step_name} ]-----------") + print("::endgroup::") + # print status after group, so that it's easier to scan for failed jobs + if result == SUCCESS: + cls.ok(f"{step_name} succeeded") + elif result == SKIP: + cls.skip(f"{step_name} skipped") + elif result == FAILURE: + cls.error(f"{step_name} FAILED") class Build: @@ -62,7 +122,7 @@ def __init__(self, entries: dict, default={}): self.settings = {} self.timeout = 900 self.no_hw_test = False - self.image_base_name = "sel4test-driver" + self.image_base_name = None [self.name] = entries.keys() attribs = copy.deepcopy(default) # this potentially overwrites the default settings dict, we restore it later @@ -83,17 +143,25 @@ def update_settings(self): if p.arch != "x86": self.settings[p.cmake_toolchain_setting(m)] = "TRUE" self.settings["PLATFORM"] = p.get_platform(m) + # somewhat misnamed now; sets test output to parsable xml: + # See sel4test/settings.cmake, if Sel4testAllowSettingsOverride is not + # set then BAMBOO controls the setting for LibSel4TestPrintXML self.settings["BAMBOO"] = "TRUE" + self.files = p.image_names(m, self.image_base_name) + if self.req == 'sim': + # See sel4test/settings.cmake, if Sel4testAllowSettingsOverride is + # bot set then SIMULATION controls the settings for + # Sel4testSimulation and Sel4testHaveCache. self.settings["SIMULATION"] = "TRUE" - def get_platform(self) -> Platform: + def get_platform(self) -> platforms.Platform: """Return the Platform object for this build definition.""" - return platforms[self.platform] + return platforms.platforms[self.platform] - def get_mode(self) -> Optional[int]: + def get_mode(self) -> int: """Return the mode (32/64) for this build; taken from platform if not defined""" if not self.mode and self.get_platform().get_mode(): return self.get_platform().get_mode() @@ -159,7 +227,7 @@ def is_gcc(self) -> bool: return not self.is_clang() def can_mcs(self) -> bool: - return self.get_platform().name not in mcs_unsupported + return self.get_platform().name not in platforms.mcs_unsupported def set_mcs(self): if not self.can_mcs(): @@ -192,6 +260,8 @@ def is_domains(self) -> bool: return self.settings.get('DOMAINS') is not None def validate(self): + if not self.image_base_name: + raise ValidationException("Build: no image base name") if not self.get_mode(): raise ValidationException("Build: no unique mode") if not self.get_platform(): @@ -217,7 +287,7 @@ def __repr__(self) -> str: def is_disabled(self) -> bool: return self.no_hw_test or self.get_platform().no_hw_test - def get_req(self) -> List[str]: + def get_req(self) -> list[str]: req = self.req or self.get_platform().req if not req or req == []: return [] @@ -241,13 +311,13 @@ class Run: Build class. So far we only vary machine requirements (req) and name in a Run. """ - def __init__(self, build: Build, suffix: Optional[str] = None, - req: Optional[str] = None): + def __init__(self, build: Build, suffix: str = None, + req: str = None): self.build = build self.name = build.name + suffix if suffix else build.name self.req = req - def get_req(self) -> List[str]: + def get_req(self) -> list[str]: return self.req or self.build.get_req() def hw_run(self, log): @@ -347,14 +417,14 @@ def hw_run(self, log): ] -def repeat_on_boot_failure(log: Optional[List[str]]) -> int: +def repeat_on_boot_failure(log: list[str]) -> int: """Try to repeat the test run if the board failed to boot.""" if log: for pat in boot_fail_patterns: for i in range(len(log)+1-len(pat)): if all(p in log[i+j] for j, p in enumerate(pat)): - printc(ANSI_RED, "Boot failure detected.") + AnsiPrinter.error("Boot failure detected.") time.sleep(10) return REPEAT, None else: @@ -363,7 +433,7 @@ def repeat_on_boot_failure(log: Optional[List[str]]) -> int: return SUCCESS, None -def release_mq_locks(runs: List[Union[Run, Build]]): +def release_mq_locks(runs: list[Union[Run, Build]]): """Release locks from this job; runs the commands instead of returning a list.""" def run(command): @@ -404,7 +474,7 @@ def get_machine(req): return req[job_index % len(req)] -def job_key(): +def job_key() -> str: return os.environ.get('GITHUB_REPOSITORY') + "-" + \ os.environ.get('GITHUB_WORKFLOW') + "-" + \ os.environ.get('GITHUB_RUN_ID') + "-" + \ @@ -414,15 +484,15 @@ def job_key(): def mq_run(success_str: str, machine: str, - files: List[str], + files: list[str], retries: int = -1, lock_timeout: int = 8, completion_timeout: int = -1, - log: Optional[str] = None, + log: str = None, lock_held=False, keep_alive=False, - key: Optional[str] = None, - error_str: Optional[str] = None): + key: str = None, + error_str: str = None): """Machine queue mq.sh run command with arguments. Expects success marker, machine name, and boot image file(s). @@ -451,48 +521,33 @@ def mq_run(success_str: str, return command -def mq_lock(machine: str) -> List[str]: +def mq_lock(machine: str) -> list[str]: """Get lock for a machine. Allow lock to be reclaimed after 30min.""" return ['time', 'mq.sh', 'sem', '-wait', machine, '-k', job_key(), '-T', '1800'] -def mq_release(machine: str) -> List[str]: +def mq_release(machine: str) -> list[str]: """Release lock on a machine.""" return ['mq.sh', 'sem', '-signal', machine, '-k', job_key()] -def mq_cancel(machine: str) -> List[str]: +def mq_cancel(machine: str) -> list[str]: """Cancel processes waiting on lock for a machine.""" return ['mq.sh', 'sem', '-cancel', machine, '-k', job_key()] -def mq_print_lock(machine: str) -> List[str]: +def mq_print_lock(machine: str) -> list[str]: """Print lock status for machine.""" return ['mq.sh', 'sem', '-info', machine] -# return codes for a test run or single step of a run -FAILURE = 0 -SUCCESS = 1 -SKIP = 2 -REPEAT = 3 - - -def success_from_bool(success: bool) -> int: - if success: - return SUCCESS - else: - return FAILURE - - -def run_cmd(cmd, run: Union[Run, Build], prev_output: Optional[str] = None) -> int: - """If the command is a List[str], echo + run command with arguments, otherwise +def run_cmd(cmd, run: Union[Run, Build], prev_output: str = None) -> int: + """If the command is a list[str], echo + run command with arguments, otherwise expect a function, and run that function on the supplied Run plus outputs from previous command.""" if isinstance(cmd, list): - printc(ANSI_YELLOW, "+++ " + " ".join(cmd)) - sys.stdout.flush() + AnsiPrinter.command(cmd) # Print output as it arrives. Some of the build commands take too long to # wait until all output is there. Keep stderr separate, but flush it. process = subprocess.Popen(cmd, text=True, stdout=subprocess.PIPE, @@ -504,18 +559,15 @@ def run_cmd(cmd, run: Union[Run, Build], prev_output: Optional[str] = None) -> i print(line) sys.stdout.flush() sys.stderr.flush() - ret = process.wait() - return success_from_bool(ret == 0), lines + ret_code = process.wait() + ret = SUCCESS if (0 == ret_code) else FAILURE + return ret, lines else: return cmd(run, prev_output) -def printc(color: str, content: str): - print(color + content + ANSI_RESET) - - -def summarise_junit(file_path: str) -> Tuple[int, List[str]]: +def summarise_junit(file_path: str) -> tuple[int, list[str]]: """Parse jUnit output and show a summary. Returns True if there were no failures or errors, raises exception @@ -524,24 +576,25 @@ def summarise_junit(file_path: str) -> Tuple[int, List[str]]: xml = JUnitXml.fromfile(file_path) succeeded = xml.tests - (xml.failures + xml.errors + xml.skipped) success = xml.failures == 0 and xml.errors == 0 + ret = SUCCESS if success else FAILURE + col = AnsiPrinter.ANSI_GREEN if success else AnsiPrinter.ANSI_RED - col = ANSI_GREEN if success else ANSI_RED - - printc(col, "Test summary") - printc(col, "------------") - printc(ANSI_GREEN if success else "", f"succeeded: {succeeded}/{xml.tests}") + AnsiPrinter.printc(col, "Test summary") + AnsiPrinter.printc(col, "------------") + AnsiPrinter.printc(AnsiPrinter.ANSI_GREEN if success else "", + f"succeeded: {succeeded}/{xml.tests}") if xml.skipped > 0: - printc(ANSI_YELLOW, f"skipped: {xml.skipped}") + AnsiPrinter.skip(f"skipped: {xml.skipped}") if xml.failures > 0: - printc(ANSI_RED, f"failures: {xml.failures}") + AnsiPrinter.error(f"failures: {xml.failures}") if xml.errors > 0: - printc(ANSI_RED, f"errors: {xml.errors}") + AnsiPrinter.error(f"errors: {xml.errors}") print() failures = {str(case.name) for case in xml if any([isinstance(r, Failure) or isinstance(r, Error) for r in case.result])} - return success_from_bool(success), list(failures) + return ret, list(failures) # where junit results are left after sanitising: @@ -585,9 +638,7 @@ def run_build_script(manifest_dir: str, result = SKIP tries_left = 3 - print(f"::group::{run.name}") - printc(ANSI_BOLD, f"-----------[ start test {run.name} ]-----------") - sys.stdout.flush() + AnsiPrinter.step_start("test", run.name) while tries_left > 0: tries_left -= 1 @@ -614,9 +665,9 @@ def run_build_script(manifest_dir: str, break if result == FAILURE: - printc(ANSI_RED, ">>> command failed, aborting.") + AnsiPrinter.error(">>> command failed, aborting.") elif result == SKIP: - printc(ANSI_YELLOW, ">>> skipping this test.") + AnsiPrinter.skip(">>> skipping this test.") # run final script tasks even in case of failure, but not for SKIP if result != SKIP: @@ -639,36 +690,32 @@ def run_build_script(manifest_dir: str, try: result, failures = summarise_junit(junit_file) except IOError: - printc(ANSI_RED, f"Error reading {junit_file}") + AnsiPrinter.error(f"Error reading {junit_file}") result = FAILURE except: - printc(ANSI_RED, f"Error parsing {junit_file}") + AnsiPrinter.error(f"Error parsing {junit_file}") result = FAILURE if result == REPEAT and tries_left > 0: - printc(ANSI_YELLOW, ">>> command failed, repeating test.") + AnsiPrinter.warn(">>> command failed, repeating test.") elif result == REPEAT and tries_left == 0: result = FAILURE - printc(ANSI_RED, ">>> command failed, no tries left.") + AnsiPrinter.error(">>> command failed, no tries left.") if result != REPEAT: break - printc(ANSI_BOLD, f"-----------[ end test {run.name} ]-----------") - print("::endgroup::") + AnsiPrinter.step_end("test", run.name, result) # after group, so that it's easier to scan for failed jobs - if result == SUCCESS: - printc(ANSI_GREEN, f"{run.name} succeeded") - elif result == SKIP: - printc(ANSI_YELLOW, f"{run.name} skipped") - elif result == FAILURE: - printc(ANSI_RED, f"{run.name} FAILED") + if result == FAILURE: if failures != []: max_print = 10 - printc(ANSI_RED, "Failed cases: " + ", ".join(failures[:max_print]) + - (" ..." if len(failures) > max_print else "")) + AnsiPrinter.error("Failed cases: " + ", ".join(failures[:max_print]) + + (" ..." if len(failures) > max_print else "")) + elif result in [SUCCESS, SKIP]: + pass # AnsiPrinter.step_end(() has printed everything else: - printc(ANSI_RED, f"{run.name} with REPEAT at end of test, we should not see this.") + AnsiPrinter.error(f"{run.name} with REPEAT at end of test, we should not see this.") print("") sys.stdout.flush() @@ -716,7 +763,7 @@ def build_for_platform(platform, default={}): return Build({platform: the_build}) -def build_for_variant(base_build: Build, variant, filter_fun=lambda x: True) -> Optional[Build]: +def build_for_variant(base_build: Build, variant, filter_fun=lambda x: True) -> Build: """Make a build definition from a supplied base build and a build variant. Optionally takes a filter/validation function to reject specific build @@ -731,11 +778,10 @@ def build_for_variant(base_build: Build, variant, filter_fun=lambda x: True) -> build.name = build.name + "_" + variant_name(variant) mode = var_dict.get("mode") or build.get_mode() - if mode in build.get_platform().modes: - build.mode = mode - else: + if mode not in build.get_platform().modes: return None + build.mode = mode # build.mode is now unique, more settings could apply build.update_settings() @@ -776,7 +822,7 @@ def build_for_variant(base_build: Build, variant, filter_fun=lambda x: True) -> def get_env_filters() -> list: """Process input env variables and return a build filter (list of dict)""" - def get(var: str) -> Optional[str]: + def get(var: str) -> Union[str, None]: return os.environ.get('INPUT_' + var.upper()) def to_list(string: str) -> list: @@ -790,7 +836,7 @@ def to_list(string: str) -> list: return [filter] -def filtered(build: Build, build_filters: dict) -> Optional[Build]: +def filtered(build: Build, build_filters: dict) -> Build: """Return build if build matches filter criteria, otherwise None.""" def match_dict(build: Build, f): @@ -858,8 +904,8 @@ def match_dict(build: Build, f): return None -def load_builds(file_name: Optional[str], filter_fun=lambda x: True, - yml: Optional[dict] = None) -> List[Build]: +def load_builds(file_name: str, filter_fun=lambda x: True, + yml: dict = None) -> list[Build]: """Load a list of build definitions from yaml. Use provided yaml dict, or if None, load from file. One of file_name, yml @@ -868,7 +914,7 @@ def load_builds(file_name: Optional[str], filter_fun=lambda x: True, Applies defaults, variants, and build-filter from the yaml file. Takes an optional filtering function for removing unwanted builds.""" - yml = yml or load_yaml(file_name) + yml = yml or platforms.load_yaml(file_name) default_build = yml.get("default", {}) build_filters = yml.get("build-filter", []) @@ -877,7 +923,7 @@ def load_builds(file_name: Optional[str], filter_fun=lambda x: True, yml_builds = yml.get("builds", []) if yml_builds == []: - base_builds = [build_for_platform(p, default_build) for p in platforms.keys()] + base_builds = [build_for_platform(p, default_build) for p in platforms.platforms.keys()] else: base_builds = [Build(b, default_build) for b in yml_builds] @@ -917,12 +963,13 @@ def run_builds(builds: list, run_fun) -> int: results[run_fun(manifest_dir, build)].append(build.name) no_failures = results[FAILURE] == [] - printc(ANSI_GREEN if no_failures else "", "Successful tests: " + ", ".join(results[SUCCESS])) + AnsiPrinter.printc(AnsiPrinter.ANSI_GREEN if no_failures else "", + "Successful tests: " + ", ".join(results[SUCCESS])) if results[SKIP] != []: print() - printc(ANSI_YELLOW, "SKIPPED tests: " + ", ".join(results[SKIP])) + AnsiPrinter.skip("SKIPPED tests: " + ", ".join(results[SKIP])) if results[FAILURE] != []: print() - printc(ANSI_RED, "FAILED tests: " + ", ".join(results[FAILURE])) + AnsiPrinter.error("FAILED tests: " + ", ".join(results[FAILURE])) return 0 if no_failures else 1 diff --git a/seL4-platforms/platforms.py b/seL4-platforms/platforms.py index 31a231d8..a9279a7a 100644 --- a/seL4-platforms/platforms.py +++ b/seL4-platforms/platforms.py @@ -14,11 +14,12 @@ modes, platforms, a list of unsupported platforms, and a list of named machines. """ +from typing import Union from io import StringIO -from typing import Optional -from pprint import pprint -import yaml import os +import yaml +from pprint import pprint + # exported names: __all__ = [ @@ -139,7 +140,7 @@ def can_aarch_hyp_64(self) -> bool: """Does the platform support ARM_HYP in mode 64?""" return 64 in self.aarch_hyp - def get_mode(self) -> Optional[int]: + def get_mode(self) -> Union[int, None]: """Return mode (32/64) of this platform if unique, otherwise None""" if len(self.modes) == 1: return self.modes[0] @@ -194,7 +195,7 @@ def getISA(self, mode: int) -> str: return self.march.capitalize() -def load_yaml(file_name): +def load_yaml(file_name: str): """Load a yaml file""" with open(file_name, 'r') as file: return yaml.safe_load(file) @@ -211,7 +212,8 @@ def gh_output(assgn: str): # module init: -_yaml_platforms = load_yaml(os.path.dirname(__file__) + "/platforms.yml") +_platforms_yaml_file = os.path.join(os.path.dirname(__file__), "platforms.yml") +_yaml_platforms = load_yaml(_platforms_yaml_file) all_architectures = _yaml_platforms["architectures"] all_modes = _yaml_platforms["modes"] @@ -227,8 +229,9 @@ def gh_output(assgn: str): if not platforms.get(p): print(f"Warning: unknown platform '{p}' in mcs_unsupported list") + # if called as main, dump info: -if __name__ == '__main__': +def main(argv: list) -> int: print("\n# Architectures:") pprint(all_architectures) @@ -250,3 +253,8 @@ def sup(p: Platform) -> str: print("\n# all sim:") pprint([p.name for p in platforms.values() if p.has_simulation]) + return 0 + + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/sel4bench/build.py b/sel4bench/build.py index aac89d94..53b7d6a3 100644 --- a/sel4bench/build.py +++ b/sel4bench/build.py @@ -8,33 +8,33 @@ Expects seL4-platforms/ to be co-located or otherwise in the PYTHONPATH. """ +import sys +import os +import subprocess +import argparse +import json +import time +from datetime import datetime + from builds import Build, Run, run_build_script, run_builds, load_builds, load_yaml from builds import release_mq_locks, filtered, get_env_filters, printc, ANSI_RED from builds import SKIP, SUCCESS, REPEAT, FAILURE from pprint import pprint -from typing import List, Optional - -from datetime import datetime - -import json -import os -import sys -import subprocess -import time def adjust_build_settings(build: Build): - if 'BAMBOO' in build.settings: - del build.settings['BAMBOO'] # not used in this build, avoid warning - # see discussion on https://github.com/seL4/sel4bench/pull/20 for hifive exclusion if build.is_smp() or build.get_platform().name == 'HIFIVE': build.settings['HARDWARE'] = 'FALSE' build.settings['FAULT'] = 'FALSE' + # remove parameters from setting that CMake does not use and thus would + # raise a nasty warning + del build.settings['BAMBOO'] + -def hw_build(manifest_dir: str, build: Build): +def hw_build(manifest_dir: str, build: Build) -> int: """Do one hardware build.""" adjust_build_settings(build) @@ -68,7 +68,7 @@ def extract_json(results: str, run: Run) -> int: return SUCCESS if res.returncode == 0 else REPEAT -def hw_run(manifest_dir: str, run: Run): +def hw_run(manifest_dir: str, run: Run) -> int: """Run one hardware test.""" if run.build.is_disabled(): @@ -118,7 +118,7 @@ def build_filter(build: Build) -> bool: return True -def make_runs(builds: List[Build]) -> List[Run]: +def make_runs(builds: list[Build]) -> list[Run]: """Split PC99 builds into runs for haswell3 and skylake, no changes to the rest""" # could filter more generically, but we're really only interested in REQ here, @@ -141,7 +141,7 @@ def make_runs(builds: List[Build]) -> List[Run]: return runs -def get_results(run: Run) -> List[float]: +def get_results(run: Run) -> list[float]: """Get the benchmark results from JSON for a specific run.""" with open(f"{run.name}.json") as f: @@ -185,7 +185,7 @@ def get_results(run: Run) -> List[float]: return [irq_invoke, irq_invoke_s, ipc_call, ipc_call_s, ipc_reply, ipc_reply_s, notify, notify_s] -def get_run(runs: List[Run], name: str) -> Optional[Run]: +def get_run(runs: list[Run], name: str) -> Run: """Get a run by name.""" for run in runs: @@ -196,7 +196,7 @@ def get_run(runs: List[Run], name: str) -> Optional[Run]: return None -def gen_web(runs: List[Run], yml, file_name: str): +def gen_web(runs: list[Run], yml, file_name: str): """Generate web page for benchmark results according to the set defined in builds.yml""" manifest_sha = os.getenv('INPUT_MANIFEST_SHA') @@ -355,9 +355,18 @@ def gen_web(runs: List[Run], yml, file_name: str): f.write('.

') -# If called as main, run all builds from builds.yml -if __name__ == '__main__': - yml = load_yaml(os.path.dirname(__file__) + "/builds.yml") +def main(params: list) -> int: + parser = argparse.ArgumentParser() + g = parser.add_mutually_exclusive_group() + g.add_argument('--dump', action='store_true') + g.add_argument('--hw', action='store_true') + g.add_argument('--post', action='store_true') + g.add_argument('--web', action='store_true') + g.add_argument('--build', action='store_true') + args = parser.parse_args(argv[1:]) + + builds_yaml_file = os.path.join(os.path.dirname(__file__), "builds.yml") + yml = load_yaml(builds_yaml_file) builds = load_builds(None, filter_fun=build_filter, yml=yml) # add additional builds; run only env filter, trusting that manual builds @@ -367,19 +376,24 @@ def gen_web(runs: List[Run], yml, file_name: str): more_builds = [Build(b, default_build) for b in yml.get("more_builds", [])] builds.extend([b for b in more_builds if b and filtered(b, env_filters)]) - if len(sys.argv) > 1 and sys.argv[1] == '--dump': + if args.dump: pprint(builds) - sys.exit(0) + return 0 - if len(sys.argv) > 1 and sys.argv[1] == '--hw': - sys.exit(run_builds(make_runs(builds), hw_run)) + if args.hw: + return run_builds(make_runs(builds), hw_run) - if len(sys.argv) > 1 and sys.argv[1] == '--post': + if args.post: release_mq_locks(builds) - sys.exit(0) + return 0 - if len(sys.argv) > 1 and sys.argv[1] == '--web': + if args.web: gen_web(make_runs(builds), yml, "index.html") - sys.exit(0) + return 0 + + # perform args.build as default + return run_builds(builds, hw_build) - sys.exit(run_builds(builds, hw_build)) + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) diff --git a/sel4test-hw/build.py b/sel4test-hw/build.py index 4682b4ac..6a49f2b7 100644 --- a/sel4test-hw/build.py +++ b/sel4test-hw/build.py @@ -8,19 +8,18 @@ Expects seL4-platforms/ to be co-located or otherwise in the PYTHONPATH. """ -from builds import Build, run_build_script, run_builds, load_builds, junit_results -from builds import release_mq_locks, SKIP -from platforms import Platform, gh_output - -from pprint import pprint -from typing import List - -import json -import os import sys +import os +import argparse +import json +import itertools + +import builds +import platforms +import pprint -def hw_build(manifest_dir: str, build: Build): +def hw_build(manifest_dir: str, build: builds.Build) -> int: """Run one hardware build.""" if build.get_platform().name == "RPI4": @@ -35,29 +34,29 @@ def hw_build(manifest_dir: str, build: Build): ["cp", "kernel/kernel.elf", f"../{build.name}-kernel.elf"] ] - return run_build_script(manifest_dir, build, script) + return builds.run_build_script(manifest_dir, build, script) -def hw_run(manifest_dir: str, build: Build): +def hw_run(manifest_dir: str, build: builds.Build) -> int: """Run one hardware test.""" if build.is_disabled(): print(f"Build {build.name} disabled, skipping.") - return SKIP + return builds.SKIP - script, final = build.hw_run(junit_results) + script, final = build.hw_run(builds.junit_results) - return run_build_script(manifest_dir, build, script, final_script=final, junit=True) + return builds.run_build_script(manifest_dir, build, script, final_script=final, junit=True) -def build_filter(build: Build) -> bool: +def build_filter(build: builds.Build) -> bool: plat = build.get_platform() if plat.no_hw_build: return False if plat.arch == 'arm': - # Bamboo says: don't build release for hikey when in aarch64 arm_hyp mode + # ToDo: why is release for hikey in aarch64 arm_hyp mode is not supported if build.is_hyp() and build.get_mode() == 64 and build.is_release() and \ plat.name == 'HIKEY': return False @@ -85,7 +84,7 @@ def build_filter(build: Build) -> bool: return False if plat.arch == 'x86': - # Bamboo config says no VTX for SMP or verification + # ToDo: explant why we don't do VTX for SMP or verification if build.is_hyp() and (build.is_smp() or build.is_verification()): return False @@ -101,58 +100,66 @@ def build_filter(build: Build) -> bool: return True -def to_json(builds: List[Build]) -> str: - """Return a GitHub build matrix per enabled hardware platform as GitHub output assignment.""" +def gh_output_matrix(param_name: str, build_list: list[builds.Build]) -> None: + matrix_builds = [] + # Loop over all the different platforms of the build list. Using + # set-comprehension " { ... for ... } " instead of list-comprehension + # " [ ... for ... ] " eliminates duplicates automatically. + for plat in {b.get_platform() for b in build_list}: - def run_for_plat(plat: Platform) -> List[dict]: + # ignore all platforms that can't tested or not even be built if plat.no_hw_test or plat.no_hw_build: - return [] + continue - # separate runs for each compiler on arm - if plat.arch == 'arm': - return [ - {"platform": plat.name, "march": plat.march, "compiler": "gcc"}, - {"platform": plat.name, "march": plat.march, "compiler": "clang"}, - ] + variants = {"compiler": ["gcc", "clang"]} + if (plat.arch == 'x86'): + variants["mode"] = plat.modes - if plat.arch == 'riscv': - return [ - {"platform": plat.name, "march": plat.march, "compiler": "gcc"}, - {"platform": plat.name, "march": plat.march, "compiler": "clang"}, - ] + # create builds for all combination from the variants matrix + for vals in itertools.product(*(variants.values())): + build_variant = {"platform": plat.name, + "march": plat.march, + **dict(zip(variants.keys(), vals)) + } + matrix_builds.append(build_variant) - # separate runs for each compiler + mode on x86, because we have more machines available - if plat.arch == 'x86': - return [ - {"platform": plat.name, "march": plat.march, "compiler": "gcc", "mode": 32}, - {"platform": plat.name, "march": plat.march, "compiler": "clang", "mode": 32}, - {"platform": plat.name, "march": plat.march, "compiler": "gcc", "mode": 64}, - {"platform": plat.name, "march": plat.march, "compiler": "clang", "mode": 64}, - ] + # GitHub output assignment + matrix_json = json.dumps({"include": matrix_builds}) + platforms.gh_output(f"{param_name}={matrix_json}") - platforms = set([b.get_platform() for b in builds]) - matrix = {"include": [run for plat in platforms for run in run_for_plat(plat)]} - return "matrix=" + json.dumps(matrix) +def main(params: list) -> int: + parser = argparse.ArgumentParser() + g = parser.add_mutually_exclusive_group() + g.add_argument('--dump', action='store_true') + g.add_argument('--matrix', action='store_true') + g.add_argument('--hw', action='store_true') + g.add_argument('--post', action='store_true') + g.add_argument('--build', action='store_true') + args = parser.parse_args(params) + builds_yaml_file = os.path.join(os.path.dirname(__file__), "builds.yml") + build_list = builds.load_builds(builds_yaml_file, filter_fun=build_filter) -# If called as main, run all builds from builds.yml -if __name__ == '__main__': - builds = load_builds(os.path.dirname(__file__) + "/builds.yml", filter_fun=build_filter) + if args.dump: + pprint.pprint(build_list) + return 0 - if len(sys.argv) > 1 and sys.argv[1] == '--dump': - pprint(builds) - sys.exit(0) + if args.matrix: + gh_output_matrix("matrix", build_list) + return 0 - if len(sys.argv) > 1 and sys.argv[1] == '--matrix': - gh_output(to_json(builds)) - sys.exit(0) + if args.hw: + builds.run_builds(build_list, hw_run) + return 0 - if len(sys.argv) > 1 and sys.argv[1] == '--hw': - sys.exit(run_builds(builds, hw_run)) + if args.post: + builds.release_mq_locks(build_list) + return 0 - if len(sys.argv) > 1 and sys.argv[1] == '--post': - release_mq_locks(builds) - sys.exit(0) + # perform args.build as default + return builds.run_builds(build_list, hw_build) - sys.exit(run_builds(builds, hw_build)) + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) diff --git a/sel4test-hw/builds.yml b/sel4test-hw/builds.yml index 1acac464..ea601201 100644 --- a/sel4test-hw/builds.yml +++ b/sel4test-hw/builds.yml @@ -5,6 +5,7 @@ --- default: + image_base_name: 'sel4test-driver' success: '' # see seltest-sim/builds.yml for full schema example diff --git a/sel4test-sim/build.py b/sel4test-sim/build.py index 2037bb65..57852d7e 100644 --- a/sel4test-sim/build.py +++ b/sel4test-sim/build.py @@ -8,14 +8,15 @@ Expects seL4-platforms/ to be co-located or otherwise in the PYTHONPATH. """ -from builds import Build, run_build_script, run_builds, load_builds, junit_results -from pprint import pprint - -import os import sys +import os +import argparse +import builds +import pprint -def run_simulation(manifest_dir: str, build: Build): + +def run_simulation(manifest_dir: str, build: builds.Build) -> int: """Run one simulation build and test.""" expect = '"%s" {exit 0} timeout {exit 1}' % build.success @@ -24,18 +25,29 @@ def run_simulation(manifest_dir: str, build: Build): ["../init-build.sh"] + build.settings_args(), ["ninja"], ["bash", "-c", - f"expect -c 'spawn ./simulate; set timeout 1200; expect {expect}' | tee {junit_results}"] + f"expect -c 'spawn ./simulate; set timeout 1200; expect {expect}' | tee {builds.junit_results}"] ] - return run_build_script(manifest_dir, build, script, junit=True) + return builds.run_build_script(manifest_dir, build, script, junit=True) -# If called as main, run all builds from builds.yml -if __name__ == '__main__': - builds = load_builds(os.path.dirname(__file__) + "/builds.yml") +def main(params: list) -> int: + parser = argparse.ArgumentParser() + g = parser.add_mutually_exclusive_group() + g.add_argument('--dump', action='store_true') + g.add_argument('--build', action='store_true') + args = parser.parse_args(params) - if len(sys.argv) > 1 and sys.argv[1] == '--dump': - pprint(builds) - sys.exit(0) + builds_yaml_file = os.path.join(os.path.dirname(__file__), "builds.yml") + build_list = builds.load_builds(builds_yaml_file) - sys.exit(run_builds(builds, run_simulation)) + if args.dump: + pprint.pprint(build_list) + return 0 + + # perform args.build as default + return builds.run_builds(build_list, run_simulation) + + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) diff --git a/sel4test-sim/builds.yml b/sel4test-sim/builds.yml index 762a7a6d..d5dba89f 100644 --- a/sel4test-sim/builds.yml +++ b/sel4test-sim/builds.yml @@ -46,7 +46,7 @@ build-filter: arch: [arm, x86] - has_simulation: true arch: [riscv] - # Bamboo has no "release" simulation for RISCV, and it doesn't seem to work either: + # "release" simulation for RISCV does not work debug: [debug] diff --git a/tutorials/build.py b/tutorials/build.py index 62612267..355ce4d9 100644 --- a/tutorials/build.py +++ b/tutorials/build.py @@ -8,16 +8,17 @@ Expects seL4-platforms/ to be co-located or otherwise in the PYTHONPATH. """ +import sys +import os +import argparse +import json + from builds import Build, load_builds, run_build_script, run_builds, junit_results from platforms import load_yaml, gh_output from pprint import pprint -import json -import os -import sys - -def run_simulation(manifest_dir: str, build: Build): +def run_simulation(manifest_dir: str, build: Build) -> int: """Run one tutorial test.""" script = [ @@ -33,28 +34,37 @@ def build_filter(build: Build) -> bool: return build.app not in disable_app_for.get(build.get_platform().name, []) -def to_json(builds: list) -> str: - """Return a GitHub build matrix as GitHub output assignment. +def gh_output_matrix(param_name: str, build_list: list[builds.Build]) -> None: + matrix_builds = [{"name": b.name} for b in build_list] + # GitHub output assignment + matrix_json = json.dumps({"include": matrix_builds}) + platforms.gh_output(f"{param_name}={matrix_json}") - Basically just returns a list of build names that we can then - filter on.""" - matrix = {"include": [{"name": b.name} for b in builds]} - return "matrix=" + json.dumps(matrix) +def main(params: list) -> int: + parser = argparse.ArgumentParser() + g = parser.add_mutually_exclusive_group() + g.add_argument('--dump', action='store_true') + g.add_argument('--matrix', action='store_true') + g.add_argument('--build', action='store_true') + args = parser.parse_args(params) - -# If called as main, run all builds from builds.yml -if __name__ == '__main__': - yml = load_yaml(os.path.dirname(__file__) + "/builds.yml") + builds_yaml_file = os.path.join(os.path.dirname(__file__), "builds.yml") + yml = load_yaml(builds_yaml_file) disable_app_for = yml['disable_app_for'] - builds = load_builds(None, build_filter, yml) - if len(sys.argv) > 1 and sys.argv[1] == '--dump': + if args.dump: pprint(builds) - sys.exit(0) - elif len(sys.argv) > 1 and sys.argv[1] == '--matrix': - gh_output(to_json(builds)) - sys.exit(0) + return 0 + + if args.matrix: + gh_output_matrix("matrix", build_list) + return 0 - sys.exit(run_builds(builds, run_simulation)) + # perform args.build as default + return run_builds(builds, run_simulation) + + +if __name__ == '__main__': + sys.exit(main(sys.argv[1:])) diff --git a/webserver/build.py b/webserver/build.py index 946b7906..f015a96f 100644 --- a/webserver/build.py +++ b/webserver/build.py @@ -8,11 +8,12 @@ Expects seL4-platforms/ to be co-located or otherwise in the PYTHONPATH. """ -from builds import Build, run_build_script, run_builds, load_builds, release_mq_locks, SKIP, sim_script -from pprint import pprint - -import os import sys +import os +import argparse + +import builds +import pprint # See also builds.yml for how builds are split up in this test. We use the build @@ -21,13 +22,16 @@ # The only thing this really has in common with a "Build" is the "name" field. -def run_build(manifest_dir: str, build: Build): +def run_build(manifest_dir: str, build: builds.Build) -> int: """Run one seL4 web server app test.""" plat = build.get_platform() build.files = plat.image_names(build.get_mode(), "capdl-loader") - del build.settings['BAMBOO'] # not used in this test, avoid warning + + # remove parameters from setting that CMake does not use and thus would + # raise a nasty warning + del build.settings['BAMBOO'] script = [ ["../init-build.sh"] + build.settings_args(), @@ -36,37 +40,52 @@ def run_build(manifest_dir: str, build: Build): ] if plat.has_simulation and plat.name != 'PC99': - script.append(sim_script(build.success)) + script.append(builds.sim_script(build.success)) - return run_build_script(manifest_dir, build, script) + return builds.run_build_script(manifest_dir, build, script) -def hw_run(manifest_dir: str, build: Build): +def hw_run(manifest_dir: str, build: builds.Build) -> int: """Run one hardware test.""" if build.is_disabled(): print(f"Build {build.name} disabled, skipping.") - return SKIP + return builds.SKIP plat = build.get_platform() build.files = plat.image_names(build.get_mode(), "capdl-loader") script, final = build.hw_run('log.txt') - return run_build_script(manifest_dir, build, script, final_script=final) + return builds.run_build_script(manifest_dir, build, script, final_script=final) + + +def main(params: list) -> int: + parser = argparse.ArgumentParser() + g = parser.add_mutually_exclusive_group() + g.add_argument('--dump', action='store_true') + g.add_argument('--hw', action='store_true') + g.add_argument('--post', action='store_true') + g.add_argument('--build', action='store_true') + args = parser.parse_args(params) + + builds_yaml_file = os.path.join(os.path.dirname(__file__), "builds.yml") + build_list = builds.load_builds(builds_yaml_file) + + if args.dump: + pprint.pprint(build_list) + return 0 + + if args.hw: + return builds.run_builds(build_list, hw_run) + + if args.post: + builds.release_mq_locks(build_list) + return 0 + + # perform args.build as default + return builds.run_builds(build_list, run_build) -# If called as main, run all builds from builds.yml if __name__ == '__main__': - builds = load_builds(os.path.dirname(__file__) + "/builds.yml") - - if len(sys.argv) > 1 and sys.argv[1] == '--dump': - pprint(builds) - sys.exit(0) - elif len(sys.argv) > 1 and sys.argv[1] == '--hw': - sys.exit(run_builds(builds, hw_run)) - elif len(sys.argv) > 1 and sys.argv[1] == '--post': - release_mq_locks(builds) - sys.exit(0) - - sys.exit(run_builds(builds, run_build)) + sys.exit(main(argv[1:]))