Skip to content

chore: run bencher on PR or fork #390

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Mar 24, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 34 additions & 0 deletions .github/workflows/bencher_on_pr_or_fork.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
name: Run benchmarks on PR

on:
pull_request:
types: [opened, reopened, edited, synchronize]

jobs:
benchmark_fork_pr_branch:
name: Run Fork PR Benchmarks
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Rust Cache
uses: Swatinem/rust-cache@v2.7.7
with:
# reasoning: we want to cache xtask, most of the jobs in the matrix will be sped up a good bit thanks to that
save-if: ${{ github.ref == 'refs/heads/main' }}
cache-all-crates: true
- name: Run `cargo xtask init`
run: |
cargo xtask init
- name: Run `cargo xtask bench` and save results
run: |
cargo xtask bench > benchmark_results.txt
- name: Upload Benchmark Results
uses: actions/upload-artifact@v4
with:
name: benchmark_results.txt
path: ./benchmark_results.txt
- name: Upload GitHub Pull Request Event
uses: actions/upload-artifact@v4
with:
name: event.json
path: ${{ github.event_path }}
17 changes: 17 additions & 0 deletions .github/workflows/bencher_on_pr_or_fork_closed.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
on:
pull_request_target:
types: [closed]

jobs:
archive_fork_pr_branch:
name: Archive closed fork PR branch with Bencher
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: bencherdev/bencher@main
- name: Archive closed fork PR branch with Bencher
run: |
bencher archive \
--project bms \
--token '${{ secrets.BENCHER_API_TOKEN }}' \
--branch "$GITHUB_HEAD_REF"
52 changes: 52 additions & 0 deletions .github/workflows/bencher_on_pr_or_fork_upload.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
name: Upload benchmarks to bencher

on:
workflow_run:
workflows: [Run benchmarks on PR]
types: [completed]

jobs:
track_fork_pr_branch:
if: github.event.workflow_run.conclusion == 'success'
runs-on: ubuntu-latest
env:
BENCHMARK_RESULTS: benchmark_results.txt
PR_EVENT: event.json
steps:
- name: Download Benchmark Results
uses: dawidd6/action-download-artifact@v6
with:
name: ${{ env.BENCHMARK_RESULTS }}
run_id: ${{ github.event.workflow_run.id }}
- name: Download PR Event
uses: dawidd6/action-download-artifact@v6
with:
name: ${{ env.PR_EVENT }}
run_id: ${{ github.event.workflow_run.id }}
- name: Export PR Event Data
uses: actions/github-script@v6
with:
script: |
let fs = require('fs');
let prEvent = JSON.parse(fs.readFileSync(process.env.PR_EVENT, {encoding: 'utf8'}));
core.exportVariable("PR_HEAD", prEvent.pull_request.head.ref);
core.exportVariable("PR_BASE", prEvent.pull_request.base.ref);
core.exportVariable("PR_BASE_SHA", prEvent.pull_request.base.sha);
core.exportVariable("PR_NUMBER", prEvent.number);
- uses: bencherdev/bencher@main
- name: Track Benchmarks with Bencher
run: |
bencher run \
--project bms \
--token '${{ secrets.BENCHER_API_TOKEN }}' \
--branch "$PR_HEAD" \
--start-point "$PR_BASE" \
--start-point-hash "$PR_BASE_SHA" \
--start-point-clone-thresholds \
--start-point-reset \
--testbed linux-gha \
--err \
--adapter rust_criterion \
--github-actions '${{ secrets.GITHUB_TOKEN }}' \
--ci-number "$PR_NUMBER" \
--file "$BENCHMARK_RESULTS"
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ include = ["readme.md", "/src", "/examples", "/assets", "LICENSE", "/badges"]
[lib]
name = "bevy_mod_scripting"
path = "src/lib.rs"
bench = false

[package.metadata."docs.rs"]
features = ["lua54", "rhai"]
Expand Down
13 changes: 8 additions & 5 deletions benches/benchmarks.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
use std::path::PathBuf;
use std::{path::PathBuf, time::Duration};

use bevy::utils::HashMap;
use criterion::{
criterion_group, criterion_main, measurement::Measurement, BenchmarkGroup, Criterion,
};
use criterion::{criterion_main, measurement::Measurement, BenchmarkGroup, Criterion};
use script_integration_test_harness::{run_lua_benchmark, run_rhai_benchmark};
use test_utils::{discover_all_tests, Test};

Expand Down Expand Up @@ -91,5 +89,10 @@ fn script_benchmarks(criterion: &mut Criterion) {
}
}

criterion_group!(benches, script_benchmarks);
pub fn benches() {
let mut criterion: criterion::Criterion<_> = (criterion::Criterion::default())
.configure_from_args()
.measurement_time(Duration::from_secs(10));
script_benchmarks(&mut criterion);
}
criterion_main!(benches);
55 changes: 41 additions & 14 deletions crates/xtask/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use std::{
ffi::{OsStr, OsString},
io::Write,
path::{Path, PathBuf},
process::Command,
process::{Command, Output},
str::FromStr,
};
use strum::{IntoEnumIterator, VariantNames};
Expand Down Expand Up @@ -349,13 +349,16 @@ impl App {
Xtasks::Install { binary } => {
cmd.arg("install").arg(binary.as_ref());
}
Xtasks::Bench { publish } => {
cmd.arg("bench");
Xtasks::Bencher { publish } => {
cmd.arg("bencher");

if publish {
cmd.arg("--publish");
}
}
Xtasks::Bench {} => {
cmd.arg("bench");
}
}

cmd
Expand Down Expand Up @@ -643,11 +646,13 @@ enum Xtasks {
CiMatrix,
/// Runs bencher in dry mode by default if not on the main branch
/// To publish main branch defaults set publish mode to true
Bench {
Bencher {
/// Publish the benchmarks when on main
#[clap(long, default_value = "false", help = "Publish the benchmarks")]
publish: bool,
},
/// Runs criterion benchmarks generates json required to be published by bencher and generates html performance report
Bench {},
}

#[derive(Serialize, Clone)]
Expand Down Expand Up @@ -691,8 +696,10 @@ impl Xtasks {
let mut rows = Vec::default();
for os in <CiOs as strum::VariantArray>::VARIANTS {
for row in output.iter() {
let step_should_run_on_main_os =
matches!(row.subcmd, Xtasks::Build | Xtasks::Docs { .. });
let step_should_run_on_main_os = matches!(
row.subcmd,
Xtasks::Build | Xtasks::Docs { .. } | Xtasks::Bencher { .. }
);
let is_coverage_step = row.global_args.coverage;

if !os.is_main_os() && step_should_run_on_main_os {
Expand Down Expand Up @@ -723,7 +730,8 @@ impl Xtasks {
bevy_features,
} => Self::codegen(app_settings, output_dir, bevy_features),
Xtasks::Install { binary } => Self::install(app_settings, binary),
Xtasks::Bench { publish: execute } => Self::bench(app_settings, execute),
Xtasks::Bencher { publish } => Self::bencher(app_settings, publish),
Xtasks::Bench {} => Self::bench(app_settings),
}?;

Ok("".into())
Expand Down Expand Up @@ -811,7 +819,7 @@ impl Xtasks {
context: &str,
add_args: I,
dir: Option<&Path>,
) -> Result<()> {
) -> Result<Output> {
let coverage_mode = app_settings
.coverage
.then_some("with coverage")
Expand Down Expand Up @@ -878,7 +886,7 @@ impl Xtasks {

let output = cmd.output().with_context(|| context.to_owned())?;
match output.status.code() {
Some(0) => Ok(()),
Some(0) => Ok(output),
_ => bail!(
"{} failed with exit code: {}. Features: {}",
context,
Expand Down Expand Up @@ -1223,7 +1231,26 @@ impl Xtasks {
Ok(())
}

fn bench(app_settings: GlobalArgs, execute: bool) -> Result<()> {
fn bench(app_settings: GlobalArgs) -> Result<()> {
Self::run_workspace_command(
// run with just lua54
&app_settings.with_features(Features::new(vec![
Feature::Lua54,
Feature::Rhai,
Feature::CoreFunctions,
Feature::BevyBindings,
])),
"bench",
"Failed to run benchmarks",
Vec::<String>::default(),
None,
)
.with_context(|| "when executing criterion benchmarks")?;

Ok(())
}

fn bencher(app_settings: GlobalArgs, publish: bool) -> Result<()> {
// // first of all figure out which branch we're on
// // run // git rev-parse --abbrev-ref HEAD
let workspace_dir = Self::workspace_dir(&app_settings).unwrap();
Expand Down Expand Up @@ -1277,13 +1304,13 @@ impl Xtasks {
bencher_cmd.args(["--github-actions", token]);
}

if !is_main || !execute {
if !is_main || !publish {
bencher_cmd.args(["--dry-run"]);
}

bencher_cmd
.args(["--adapter", "rust_criterion"])
.arg("cargo bench --features=lua54");
.arg("cargo xtask bench");

log::info!("Running bencher command: {:?}", bencher_cmd);

Expand All @@ -1295,7 +1322,7 @@ impl Xtasks {
}

// if we're on linux and publishing and on main synch graphs
if os == "linux" && is_main && execute && github_token.is_some() {
if os == "linux" && is_main && publish && github_token.is_some() {
Self::synch_bencher_graphs()?;
}

Expand Down Expand Up @@ -1625,7 +1652,7 @@ impl Xtasks {
// on non-main branches this will just dry run
output.push(App {
global_args: default_args.clone(),
subcmd: Xtasks::Bench { publish: true },
subcmd: Xtasks::Bencher { publish: true },
});

// and finally run tests with coverage
Expand Down
Loading