diff --git a/.github/workflows/bencher_on_pr_or_fork.yml b/.github/workflows/bencher_on_pr_or_fork.yml new file mode 100644 index 0000000000..48699a1295 --- /dev/null +++ b/.github/workflows/bencher_on_pr_or_fork.yml @@ -0,0 +1,34 @@ +name: Run benchmarks on PR + +on: + pull_request: + types: [opened, reopened, edited, synchronize] + +jobs: + benchmark_fork_pr_branch: + name: Run Fork PR Benchmarks + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Rust Cache + uses: Swatinem/rust-cache@v2.7.7 + with: + # reasoning: we want to cache xtask, most of the jobs in the matrix will be sped up a good bit thanks to that + save-if: ${{ github.ref == 'refs/heads/main' }} + cache-all-crates: true + - name: Run `cargo xtask init` + run: | + cargo xtask init + - name: Run `cargo xtask bench` and save results + run: | + cargo xtask bench > benchmark_results.txt + - name: Upload Benchmark Results + uses: actions/upload-artifact@v4 + with: + name: benchmark_results.txt + path: ./benchmark_results.txt + - name: Upload GitHub Pull Request Event + uses: actions/upload-artifact@v4 + with: + name: event.json + path: ${{ github.event_path }} \ No newline at end of file diff --git a/.github/workflows/bencher_on_pr_or_fork_closed.yml b/.github/workflows/bencher_on_pr_or_fork_closed.yml new file mode 100644 index 0000000000..7107cdead4 --- /dev/null +++ b/.github/workflows/bencher_on_pr_or_fork_closed.yml @@ -0,0 +1,17 @@ +on: + pull_request_target: + types: [closed] + +jobs: + archive_fork_pr_branch: + name: Archive closed fork PR branch with Bencher + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: bencherdev/bencher@main + - name: Archive closed fork PR branch with Bencher + run: | + bencher archive \ + --project bms \ + --token '${{ secrets.BENCHER_API_TOKEN }}' \ + --branch "$GITHUB_HEAD_REF" \ No newline at end of file diff --git a/.github/workflows/bencher_on_pr_or_fork_upload.yml b/.github/workflows/bencher_on_pr_or_fork_upload.yml new file mode 100644 index 0000000000..38442c96ec --- /dev/null +++ b/.github/workflows/bencher_on_pr_or_fork_upload.yml @@ -0,0 +1,52 @@ +name: Upload benchmarks to bencher + +on: + workflow_run: + workflows: [Run benchmarks on PR] + types: [completed] + +jobs: + track_fork_pr_branch: + if: github.event.workflow_run.conclusion == 'success' + runs-on: ubuntu-latest + env: + BENCHMARK_RESULTS: benchmark_results.txt + PR_EVENT: event.json + steps: + - name: Download Benchmark Results + uses: dawidd6/action-download-artifact@v6 + with: + name: ${{ env.BENCHMARK_RESULTS }} + run_id: ${{ github.event.workflow_run.id }} + - name: Download PR Event + uses: dawidd6/action-download-artifact@v6 + with: + name: ${{ env.PR_EVENT }} + run_id: ${{ github.event.workflow_run.id }} + - name: Export PR Event Data + uses: actions/github-script@v6 + with: + script: | + let fs = require('fs'); + let prEvent = JSON.parse(fs.readFileSync(process.env.PR_EVENT, {encoding: 'utf8'})); + core.exportVariable("PR_HEAD", prEvent.pull_request.head.ref); + core.exportVariable("PR_BASE", prEvent.pull_request.base.ref); + core.exportVariable("PR_BASE_SHA", prEvent.pull_request.base.sha); + core.exportVariable("PR_NUMBER", prEvent.number); + - uses: bencherdev/bencher@main + - name: Track Benchmarks with Bencher + run: | + bencher run \ + --project bms \ + --token '${{ secrets.BENCHER_API_TOKEN }}' \ + --branch "$PR_HEAD" \ + --start-point "$PR_BASE" \ + --start-point-hash "$PR_BASE_SHA" \ + --start-point-clone-thresholds \ + --start-point-reset \ + --testbed linux-gha \ + --err \ + --adapter rust_criterion \ + --github-actions '${{ secrets.GITHUB_TOKEN }}' \ + --ci-number "$PR_NUMBER" \ + --file "$BENCHMARK_RESULTS" \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index 3044400cce..57de792ae5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,6 +15,7 @@ include = ["readme.md", "/src", "/examples", "/assets", "LICENSE", "/badges"] [lib] name = "bevy_mod_scripting" path = "src/lib.rs" +bench = false [package.metadata."docs.rs"] features = ["lua54", "rhai"] diff --git a/benches/benchmarks.rs b/benches/benchmarks.rs index 76766cfe47..e2f394569a 100644 --- a/benches/benchmarks.rs +++ b/benches/benchmarks.rs @@ -1,9 +1,7 @@ -use std::path::PathBuf; +use std::{path::PathBuf, time::Duration}; use bevy::utils::HashMap; -use criterion::{ - criterion_group, criterion_main, measurement::Measurement, BenchmarkGroup, Criterion, -}; +use criterion::{criterion_main, measurement::Measurement, BenchmarkGroup, Criterion}; use script_integration_test_harness::{run_lua_benchmark, run_rhai_benchmark}; use test_utils::{discover_all_tests, Test}; @@ -91,5 +89,10 @@ fn script_benchmarks(criterion: &mut Criterion) { } } -criterion_group!(benches, script_benchmarks); +pub fn benches() { + let mut criterion: criterion::Criterion<_> = (criterion::Criterion::default()) + .configure_from_args() + .measurement_time(Duration::from_secs(10)); + script_benchmarks(&mut criterion); +} criterion_main!(benches); diff --git a/crates/xtask/src/main.rs b/crates/xtask/src/main.rs index f942866615..6e0dac98e2 100644 --- a/crates/xtask/src/main.rs +++ b/crates/xtask/src/main.rs @@ -9,7 +9,7 @@ use std::{ ffi::{OsStr, OsString}, io::Write, path::{Path, PathBuf}, - process::Command, + process::{Command, Output}, str::FromStr, }; use strum::{IntoEnumIterator, VariantNames}; @@ -349,13 +349,16 @@ impl App { Xtasks::Install { binary } => { cmd.arg("install").arg(binary.as_ref()); } - Xtasks::Bench { publish } => { - cmd.arg("bench"); + Xtasks::Bencher { publish } => { + cmd.arg("bencher"); if publish { cmd.arg("--publish"); } } + Xtasks::Bench {} => { + cmd.arg("bench"); + } } cmd @@ -643,11 +646,13 @@ enum Xtasks { CiMatrix, /// Runs bencher in dry mode by default if not on the main branch /// To publish main branch defaults set publish mode to true - Bench { + Bencher { /// Publish the benchmarks when on main #[clap(long, default_value = "false", help = "Publish the benchmarks")] publish: bool, }, + /// Runs criterion benchmarks generates json required to be published by bencher and generates html performance report + Bench {}, } #[derive(Serialize, Clone)] @@ -691,8 +696,10 @@ impl Xtasks { let mut rows = Vec::default(); for os in ::VARIANTS { for row in output.iter() { - let step_should_run_on_main_os = - matches!(row.subcmd, Xtasks::Build | Xtasks::Docs { .. }); + let step_should_run_on_main_os = matches!( + row.subcmd, + Xtasks::Build | Xtasks::Docs { .. } | Xtasks::Bencher { .. } + ); let is_coverage_step = row.global_args.coverage; if !os.is_main_os() && step_should_run_on_main_os { @@ -723,7 +730,8 @@ impl Xtasks { bevy_features, } => Self::codegen(app_settings, output_dir, bevy_features), Xtasks::Install { binary } => Self::install(app_settings, binary), - Xtasks::Bench { publish: execute } => Self::bench(app_settings, execute), + Xtasks::Bencher { publish } => Self::bencher(app_settings, publish), + Xtasks::Bench {} => Self::bench(app_settings), }?; Ok("".into()) @@ -811,7 +819,7 @@ impl Xtasks { context: &str, add_args: I, dir: Option<&Path>, - ) -> Result<()> { + ) -> Result { let coverage_mode = app_settings .coverage .then_some("with coverage") @@ -878,7 +886,7 @@ impl Xtasks { let output = cmd.output().with_context(|| context.to_owned())?; match output.status.code() { - Some(0) => Ok(()), + Some(0) => Ok(output), _ => bail!( "{} failed with exit code: {}. Features: {}", context, @@ -1223,7 +1231,26 @@ impl Xtasks { Ok(()) } - fn bench(app_settings: GlobalArgs, execute: bool) -> Result<()> { + fn bench(app_settings: GlobalArgs) -> Result<()> { + Self::run_workspace_command( + // run with just lua54 + &app_settings.with_features(Features::new(vec![ + Feature::Lua54, + Feature::Rhai, + Feature::CoreFunctions, + Feature::BevyBindings, + ])), + "bench", + "Failed to run benchmarks", + Vec::::default(), + None, + ) + .with_context(|| "when executing criterion benchmarks")?; + + Ok(()) + } + + fn bencher(app_settings: GlobalArgs, publish: bool) -> Result<()> { // // first of all figure out which branch we're on // // run // git rev-parse --abbrev-ref HEAD let workspace_dir = Self::workspace_dir(&app_settings).unwrap(); @@ -1277,13 +1304,13 @@ impl Xtasks { bencher_cmd.args(["--github-actions", token]); } - if !is_main || !execute { + if !is_main || !publish { bencher_cmd.args(["--dry-run"]); } bencher_cmd .args(["--adapter", "rust_criterion"]) - .arg("cargo bench --features=lua54"); + .arg("cargo xtask bench"); log::info!("Running bencher command: {:?}", bencher_cmd); @@ -1295,7 +1322,7 @@ impl Xtasks { } // if we're on linux and publishing and on main synch graphs - if os == "linux" && is_main && execute && github_token.is_some() { + if os == "linux" && is_main && publish && github_token.is_some() { Self::synch_bencher_graphs()?; } @@ -1625,7 +1652,7 @@ impl Xtasks { // on non-main branches this will just dry run output.push(App { global_args: default_args.clone(), - subcmd: Xtasks::Bench { publish: true }, + subcmd: Xtasks::Bencher { publish: true }, }); // and finally run tests with coverage