diff --git a/.github/workflows/bench-reports.yml b/.github/workflows/bench-reports.yml index 1d3f2844d6..cefc652fc3 100644 --- a/.github/workflows/bench-reports.yml +++ b/.github/workflows/bench-reports.yml @@ -9,8 +9,7 @@ env: PYTHON_VERSION: "3.11" RUST_TOOLCHAIN_VERSION: "1.80" -jobs: - runBenchmark: +jobs: runBenchmark: runs-on: ubuntu-latest name: run benchmark permissions: @@ -28,13 +27,10 @@ jobs: - name: setup cargo criterion run: cargo install cargo-criterion - name: run benching script - run: ./bench.sh + run: ./build.py --ci-bench - name: preserve bench artifacts uses: actions/upload-artifact@v4 with: name: benchmarks - path: | - target/criterion/**/* - week_ago.json - latest.json + path: *.json diff --git a/bench.sh b/bench.sh deleted file mode 100755 index e2cb6bf68b..0000000000 --- a/bench.sh +++ /dev/null @@ -1,8 +0,0 @@ -branch="alex/benching" -echo $(git rev-list --since="1 day ago" --pretty='format:%ad__%h' --date=short $branch | awk 'NR%2==0') -for date_and_commit in $(git rev-list --since="1 week ago" --pretty='format:%ad__%h' --date=short $branch | awk 'NR%2==0') -do - echo "benching commit" $date_and_commit - cargo criterion --message-format=json --history-id $date_and_commit > $date_and_commit.json -done - diff --git a/build.py b/build.py index ae188c9c65..8e96157e8f 100755 --- a/build.py +++ b/build.py @@ -68,6 +68,13 @@ help="Build and run the integration tests (default is --no-integration-tests)", ) +parser.add_argument( + "--ci-bench", + action=argparse.BooleanOptionalAction, + default=False, + help="Run the benchmarking script that is run in CI (default is --no-ci-bench)", +) + args = parser.parse_args() if args.check_prereqs: @@ -83,6 +90,7 @@ and not args.play and not args.vscode and not args.jupyterlab + and not args.ci_bench ) build_cli = build_all or args.cli build_pip = build_all or args.pip @@ -92,6 +100,7 @@ build_play = build_all or args.play build_vscode = build_all or args.vscode build_jupyterlab = build_all or args.jupyterlab +ci_bench = args.ci_bench # JavaScript projects and eslint, prettier depend on npm_install # However the JupyterLab extension uses yarn in a separate workspace @@ -291,6 +300,28 @@ def run_python_integration_tests(cwd, interpreter): subprocess.run(command_args, check=True, text=True, cwd=cwd) +def run_ci_historic_benchmark(): + branch = "alex/benching" + output = subprocess.check_output( + ["git", "rev-list", "--since=1 day ago", "--pretty=format:%ad__%h", "--date=short", branch] + ).decode("utf-8") + print('\n'.join([line for i, line in enumerate(output.split('\n')) if i % 2 == 1])) + + output = subprocess.check_output( + ["git", "rev-list", "--since=1 week ago", "--pretty=format:%ad__%h", "--date=short", branch] + ).decode("utf-8") + date_and_commits = [line for i, line in enumerate(output.split('\n')) if i % 2 == 1] + + for date_and_commit in date_and_commits: + print("benching commit", date_and_commit) + result = subprocess.run( + ["cargo", "criterion", "--message-format=json", "--history-id", date_and_commit], + capture_output=True, + text=True + ) + with open(f"{date_and_commit}.json", "w") as f: + f.write(result.stdout) + if build_pip: step_start("Building the pip package") @@ -529,3 +560,8 @@ def run_python_integration_tests(cwd, interpreter): for test_project_dir in test_projects_directories: run_python_tests(test_project_dir, python_bin) step_end() + +if ci_bench: + step_start("Running CI benchmarking script") + run_ci_historic_benchmark() + step_end()