diff --git a/Cargo.toml b/Cargo.toml index 57de792ae5..70738d6ee6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -53,6 +53,9 @@ rhai = ["bevy_mod_scripting_rhai", "bevy_mod_scripting_functions/rhai_bindings"] ## rune # rune = ["bevy_mod_scripting_rune"] +### Profiling +profile_with_tracy = ["bevy/trace_tracy"] + [dependencies] bevy = { workspace = true } bevy_mod_scripting_core = { workspace = true } @@ -85,6 +88,7 @@ ladfile_builder = { path = "crates/ladfile_builder", version = "0.2.6" } script_integration_test_harness = { workspace = true } test_utils = { workspace = true } libtest-mimic = "0.8" +tracing-tracy = "0.11" [workspace] members = [ diff --git a/benches/benchmarks.rs b/benches/benchmarks.rs index e2f394569a..cd7f4f6e24 100644 --- a/benches/benchmarks.rs +++ b/benches/benchmarks.rs @@ -1,14 +1,18 @@ -use std::{path::PathBuf, time::Duration}; - -use bevy::utils::HashMap; +use bevy::log::tracing_subscriber; +use bevy::log::tracing_subscriber::layer::SubscriberExt; +use bevy::utils::{tracing, HashMap}; use criterion::{criterion_main, measurement::Measurement, BenchmarkGroup, Criterion}; use script_integration_test_harness::{run_lua_benchmark, run_rhai_benchmark}; +use std::{path::PathBuf, sync::LazyLock, time::Duration}; use test_utils::{discover_all_tests, Test}; extern crate bevy_mod_scripting; extern crate script_integration_test_harness; extern crate test_utils; +static ENABLE_PROFILING: LazyLock = + LazyLock::new(|| std::env::var("ENABLE_PROFILING").is_ok()); + pub trait BenchmarkExecutor { fn benchmark_group(&self) -> String; fn benchmark_name(&self) -> String; @@ -89,10 +93,33 @@ fn script_benchmarks(criterion: &mut Criterion) { } } +fn maybe_with_profiler(f: impl Fn(bool)) { + if *ENABLE_PROFILING { + println!("profiling enabled, make sure to run tracy. If using it across windows/WSL you can use something like `tracy-capture.exe -o output.tracy -a localhost` on windows"); + // set global tracing subscriber so bevy doesn't set it itself first + let subscriber = tracing_subscriber::Registry::default(); + let tracy_layer = tracing_tracy::TracyLayer::default(); + + let subscriber = subscriber.with(tracy_layer); + + tracing::subscriber::set_global_default(subscriber).unwrap(); + + let _ = tracing_tracy::client::span!("test2"); + tracing::info_span!("test"); + + f(true); + } else { + f(false); + } +} + pub fn benches() { - let mut criterion: criterion::Criterion<_> = (criterion::Criterion::default()) - .configure_from_args() - .measurement_time(Duration::from_secs(10)); - script_benchmarks(&mut criterion); + maybe_with_profiler(|_profiler| { + let mut criterion: criterion::Criterion<_> = (criterion::Criterion::default()) + .configure_from_args() + .measurement_time(Duration::from_secs(10)); + + script_benchmarks(&mut criterion); + }); } criterion_main!(benches); diff --git a/crates/bevy_mod_scripting_core/src/bindings/function/script_function.rs b/crates/bevy_mod_scripting_core/src/bindings/function/script_function.rs index c7d6061f2e..88200962e8 100644 --- a/crates/bevy_mod_scripting_core/src/bindings/function/script_function.rs +++ b/crates/bevy_mod_scripting_core/src/bindings/function/script_function.rs @@ -103,7 +103,7 @@ impl DynamicScriptFunction { args: I, context: FunctionCallContext, ) -> Result { - profiling::scope!("Dynamic Call ", self.name().to_string()); + profiling::scope!("Dynamic Call ", self.name().deref()); let args = args.into_iter().collect::>(); // should we be inlining call errors into the return value? let return_val = (self.func)(context, args); @@ -159,7 +159,7 @@ impl DynamicScriptFunctionMut { args: I, context: FunctionCallContext, ) -> Result { - profiling::scope!("Dynamic Call Mut", self.name().to_string()); + profiling::scope!("Dynamic Call Mut", self.name().deref()); let args = args.into_iter().collect::>(); // should we be inlining call errors into the return value? let mut write = self.func.write(); diff --git a/crates/testing_crates/script_integration_test_harness/src/lib.rs b/crates/testing_crates/script_integration_test_harness/src/lib.rs index 81209056eb..d9f79fae25 100644 --- a/crates/testing_crates/script_integration_test_harness/src/lib.rs +++ b/crates/testing_crates/script_integration_test_harness/src/lib.rs @@ -318,6 +318,7 @@ pub fn run_lua_benchmark( label: &str, criterion: &mut criterion::BenchmarkGroup, ) -> Result<(), String> { + use bevy::utils::tracing; use bevy_mod_scripting_lua::mlua::Function; let plugin = make_test_lua_plugin(); @@ -333,6 +334,7 @@ pub fn run_lua_benchmark( if let Some(pre_bencher) = &pre_bencher { pre_bencher.call::<()>(()).unwrap(); } + tracing::info_span!("profiling_iter", label); c.iter(|| { bencher.call::<()>(()).unwrap(); }) @@ -348,6 +350,7 @@ pub fn run_rhai_benchmark( label: &str, criterion: &mut criterion::BenchmarkGroup, ) -> Result<(), String> { + use bevy::utils::tracing; use bevy_mod_scripting_rhai::rhai::Dynamic; let plugin = make_test_rhai_plugin(); @@ -367,6 +370,8 @@ pub fn run_rhai_benchmark( .call_fn::(&mut ctxt.scope, &ctxt.ast, "pre_bench", ARGS) .unwrap(); } + tracing::info_span!("profiling_iter", label); + c.iter(|| { let _ = runtime .call_fn::(&mut ctxt.scope, &ctxt.ast, "bench", ARGS) diff --git a/crates/testing_crates/test_utils/src/test_data.rs b/crates/testing_crates/test_utils/src/test_data.rs index fea49718d7..b2c8e5d8c5 100644 --- a/crates/testing_crates/test_utils/src/test_data.rs +++ b/crates/testing_crates/test_utils/src/test_data.rs @@ -346,7 +346,7 @@ pub fn setup_integration_test(init: F) HierarchyPlugin, DiagnosticsPlugin, LogPlugin { - filter: "bevy_mod_scripting_core=debug".to_string(), + filter: "bevy_mod_scripting_core=trace".to_string(), ..Default::default() }, )); diff --git a/crates/xtask/src/main.rs b/crates/xtask/src/main.rs index 9dc073f8ad..c3ad9fd391 100644 --- a/crates/xtask/src/main.rs +++ b/crates/xtask/src/main.rs @@ -51,8 +51,7 @@ enum Feature { // Rune, // Profiling - #[strum(serialize = "bevy/trace_tracy")] - Tracy, + ProfileWithTracy, } #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, strum::EnumIter)] @@ -101,10 +100,10 @@ impl IntoFeatureGroup for Feature { Feature::MluaAsync | Feature::MluaMacros | Feature::MluaSerialize - | Feature::UnsafeLuaModules - | Feature::Tracy => FeatureGroup::ForExternalCrate, - Feature::BevyBindings | Feature::CoreFunctions => FeatureGroup::BMSFeature, - // don't use wildcard here, we want to be explicit + | Feature::UnsafeLuaModules => FeatureGroup::ForExternalCrate, + Feature::BevyBindings | Feature::CoreFunctions | Feature::ProfileWithTracy => { + FeatureGroup::BMSFeature + } // don't use wildcard here, we want to be explicit } } } @@ -119,7 +118,6 @@ impl Default for Features { Feature::Lua54, Feature::CoreFunctions, Feature::BevyBindings, - Feature::Tracy, ]) } } @@ -356,8 +354,19 @@ impl App { cmd.arg("--publish"); } } - Xtasks::Bench {} => { + Xtasks::Bench { + name, + enable_profiling: profile, + } => { cmd.arg("bench"); + + if let Some(name) = name { + cmd.arg("--name").arg(name); + } + + if profile { + cmd.arg("--profile"); + } } } @@ -652,7 +661,14 @@ enum Xtasks { publish: bool, }, /// Runs criterion benchmarks generates json required to be published by bencher and generates html performance report - Bench {}, + Bench { + /// Whether or not to enable tracy profiling + #[clap(long, default_value = "false", help = "Enable tracy profiling")] + enable_profiling: bool, + /// The name argument passed to `cargo bench`, can be used in combination with profile to selectively profile benchmarks + #[clap(long, help = "The name argument passed to `cargo bench`")] + name: Option, + }, } #[derive(Serialize, Clone)] @@ -731,7 +747,10 @@ impl Xtasks { } => Self::codegen(app_settings, output_dir, bevy_features), Xtasks::Install { binary } => Self::install(app_settings, binary), Xtasks::Bencher { publish } => Self::bencher(app_settings, publish), - Xtasks::Bench {} => Self::bench(app_settings), + Xtasks::Bench { + name, + enable_profiling, + } => Self::bench(app_settings, enable_profiling, name), }?; Ok("".into()) @@ -1231,18 +1250,34 @@ impl Xtasks { Ok(()) } - fn bench(app_settings: GlobalArgs) -> Result<()> { + fn bench(app_settings: GlobalArgs, profile: bool, name: Option) -> Result<()> { + log::info!("Profiling enabled: {profile}"); + + let mut features = vec![ + Feature::Lua54, + Feature::Rhai, + Feature::CoreFunctions, + Feature::BevyBindings, + ]; + + if profile { + std::env::set_var("ENABLE_PROFILING", "1"); + // features.push(Feature::BevyTracy); + features.push(Feature::ProfileWithTracy); + } + + let args = if let Some(name) = name { + vec!["--".to_owned(), name] + } else { + vec![] + }; + Self::run_workspace_command( // run with just lua54 - &app_settings.with_features(Features::new(vec![ - Feature::Lua54, - Feature::Rhai, - Feature::CoreFunctions, - Feature::BevyBindings, - ])), + &app_settings.with_features(Features::new(features)), "bench", "Failed to run benchmarks", - Vec::::default(), + args, None, ) .with_context(|| "when executing criterion benchmarks")?;