From 1e275c0a46d10ccd07e35658001918aa850d456e Mon Sep 17 00:00:00 2001 From: reuben olinsky Date: Sun, 2 Feb 2025 22:46:59 -0800 Subject: [PATCH] test: run bash-completion tests in PR/CI flows (#292) --- .github/workflows/ci.yaml | 68 ++++++++++++++++++++++++++++- scripts/summarize-pytest-results.py | 48 ++++++++++++++++++++ 2 files changed, 115 insertions(+), 1 deletion(-) create mode 100755 scripts/summarize-pytest-results.py diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 1d1759bc..149e2f51 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -368,6 +368,71 @@ jobs: main/benchmarks.txt benchmark-results.md + # Run bash-completion test suite + bash-completion-tests: + name: "External tests / bash-completion test suite" + runs-on: ubuntu-latest + needs: build + steps: + - name: Checkout brush + uses: actions/checkout@v4 + with: + path: "brush" + + - name: Checkout bash-completion + uses: actions/checkout@v4 + with: + repository: "scop/bash-completion" + ref: "2.15.0" + path: "bash-completion" + + - name: Download prebuilt brush binaries + uses: actions/download-artifact@v4 + with: + name: "binaries-x86_64-linux" + path: "binaries" + + - name: Setup downloads + run: | + chmod +x binaries/* + ls -l binaries + + - name: Install prerequisites for running tests + run: | + set -x + sudo apt-get update -y + sudo apt-get install -y python3 + python3 -m pip install --user pytest pytest-xdist pytest-md-report pytest-json-report + + - name: "Run test suite (oracle)" + working-directory: bash-completion/test + run: | + pytest -n 128 --no-summary ./t || true + + - name: "Run test suite (brush)" + env: + BASH_COMPLETION_TEST_BASH: ${{ github.workspace }}/binaries/brush --noprofile --input-backend=basic + working-directory: bash-completion/test + run: | + pytest -n 128 \ + --json-report \ + --json-report-file=${{ github.workspace }}/test-results-bash-completion.json \ + ./t || true + + - name: "Generate report summary" + run: | + python3 brush/scripts/summarize-pytest-results.py \ + -r ${{ github.workspace }}/test-results-bash-completion.json \ + --title="Test Summary: bash-completion test suite" \ + >${{ github.workspace }}/test-results-bash-completion.md + + - name: Upload test report + uses: actions/upload-artifact@v4 + with: + name: test-reports-bash-completion + path: | + test-results-bash-completion.md + # Test release binary on a variety of OS platforms. os-tests: strategy: @@ -396,7 +461,7 @@ jobs: with: path: sources - - name: Download binaries + - name: Download prebuilt brush binaries uses: actions/download-artifact@v4 with: name: binaries-x86_64-linux @@ -421,6 +486,7 @@ jobs: run: ${{ matrix.prereqs_command }} - name: Run tests + shell: bash run: | export BRUSH_PATH=$PWD/binaries/brush export BRUSH_COMPAT_TEST_CASES=$PWD/sources/brush-shell/tests/cases diff --git a/scripts/summarize-pytest-results.py b/scripts/summarize-pytest-results.py new file mode 100755 index 00000000..0626f00e --- /dev/null +++ b/scripts/summarize-pytest-results.py @@ -0,0 +1,48 @@ +#!/usr/bin/python3 +import argparse +import json + +parser = argparse.ArgumentParser(description='Summarize pytest results') +parser.add_argument("-r", "--results", dest="results_file_path", type=str, required=True, help="Path to .json pytest results file") +parser.add_argument("--title", dest="title", type=str, default="Pytest results", help="Title to display") + +args = parser.parse_args() + +with open(args.results_file_path, "r") as results_file: + results = json.load(results_file) + +summary = results["summary"] + +error_count = summary.get("error") or 0 +fail_count = summary.get("failed") or 0 +pass_count = summary.get("passed") or 0 +skip_count = summary.get("skipped") or 0 +expected_fail_count = summary.get("xfailed") or 0 +unexpected_pass_count = summary.get("xpassed") or 0 + +total_count = summary.get("total") or 0 +collected_count = summary.get("collected") or 0 +deselected_count = summary.get("deselected") or 0 + +# +# Output +# + +print(f"# {args.title}") + +print(f"| Outcome | Count | Percentage |") +print(f"| ------------------ | ----------------------: | ---------: |") +print(f"| ✅ Pass | {pass_count} | {pass_count * 100 / total_count:.2f} |") + +if error_count > 0: + print(f"| ❗️ Error | {error_count} | {error_count * 100 / total_count:.2f} |") +if fail_count > 0: + print(f"| ❌ Fail | {fail_count} | {fail_count * 100 / total_count:.2f} |") +if skip_count > 0: + print(f"| ⏩ Skip | {skip_count} | {skip_count * 100 / total_count:.2f} |") +if expected_fail_count > 0: + print(f"| ❎ Expected Fail | {expected_fail_count} | {expected_fail_count * 100 / total_count:.2f} |") +if unexpected_pass_count > 0: + print(f"| ✔️ Unexpected Pass | {unexpected_pass_count} | {unexpected_pass_count * 100 / total_count:.2f} |") + +print(f"| 📊 Total | {total_count} | {total_count * 100 / total_count:.2f} |")