diff --git a/devops/scripts/benchmarks/benches/base.py b/devops/scripts/benchmarks/benches/base.py index efbf7d77e003d..26a5516e2e88d 100644 --- a/devops/scripts/benchmarks/benches/base.py +++ b/devops/scripts/benchmarks/benches/base.py @@ -3,7 +3,6 @@ # See LICENSE.TXT # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -from dataclasses import dataclass import os import shutil import subprocess @@ -12,6 +11,7 @@ from options import options from utils.utils import download, run from abc import ABC, abstractmethod +from utils.unitrace import get_unitrace benchmark_tags = [ BenchmarkTag("SYCL", "Benchmark uses SYCL runtime"), @@ -61,6 +61,12 @@ def enabled(self) -> bool: By default, it returns True, but can be overridden to disable a benchmark.""" return True + def traceable(self) -> bool: + """Returns whether this benchmark should be traced by Unitrace. + By default, it returns True, but can be overridden to disable tracing for a benchmark. + """ + return True + @abstractmethod def setup(self): pass @@ -70,7 +76,7 @@ def teardown(self): pass @abstractmethod - def run(self, env_vars) -> list[Result]: + def run(self, env_vars: dict, run_unitrace: bool = False) -> list[Result]: pass @staticmethod @@ -86,7 +92,14 @@ def get_adapter_full_path(): ), f"could not find adapter file {adapter_path} (and in similar lib paths)" def run_bench( - self, command, env_vars, ld_library=[], add_sycl=True, use_stdout=True + self, + command, + env_vars, + ld_library=[], + add_sycl=True, + use_stdout=True, + run_unitrace=False, + extra_unitrace_opt=None, ): env_vars = env_vars.copy() if options.ur is not None: @@ -99,13 +112,28 @@ def run_bench( ld_libraries = options.extra_ld_libraries.copy() ld_libraries.extend(ld_library) - result = run( - command=command, - env_vars=env_vars, - add_sycl=add_sycl, - cwd=options.benchmark_cwd, - ld_library=ld_libraries, - ) + if self.traceable() and run_unitrace: + if extra_unitrace_opt is None: + extra_unitrace_opt = [] + unitrace_output, command = get_unitrace().setup( + self.name(), command, extra_unitrace_opt + ) + + try: + result = run( + command=command, + env_vars=env_vars, + add_sycl=add_sycl, + cwd=options.benchmark_cwd, + ld_library=ld_libraries, + ) + except subprocess.CalledProcessError: + if run_unitrace: + get_unitrace().cleanup(options.benchmark_cwd, unitrace_output) + raise + + if self.traceable() and run_unitrace: + get_unitrace().handle_output(unitrace_output) if use_stdout: return result.stdout.decode() diff --git a/devops/scripts/benchmarks/benches/benchdnn.py b/devops/scripts/benchmarks/benches/benchdnn.py index 27f425328cd49..d349789b0ce6f 100644 --- a/devops/scripts/benchmarks/benches/benchdnn.py +++ b/devops/scripts/benchmarks/benches/benchdnn.py @@ -129,7 +129,7 @@ def setup(self): if not self.bench_bin.exists(): raise FileNotFoundError(f"Benchmark binary not found: {self.bench_bin}") - def run(self, env_vars): + def run(self, env_vars, run_unitrace: bool = False) -> list[Result]: command = [ str(self.bench_bin), *self.bench_args.split(), @@ -148,6 +148,8 @@ def run(self, env_vars): add_sycl=True, ld_library=ld_library, use_stdout=True, + run_unitrace=run_unitrace, + extra_unitrace_opt=["--chrome-dnn-logging"], ) result_value = self._extract_time(output) diff --git a/devops/scripts/benchmarks/benches/benchdnn_list.py b/devops/scripts/benchmarks/benches/benchdnn_list.py index 542858b33a343..53721ec1fa178 100644 --- a/devops/scripts/benchmarks/benches/benchdnn_list.py +++ b/devops/scripts/benchmarks/benches/benchdnn_list.py @@ -62,6 +62,7 @@ "graph", "sdpa-plain-f16", "--reset --dt=f16 --case=complex_fusion/mha/sdpa-plain-implicit-causal-mask-fp32-bs1.json", + False, # Do not run SYCL graph for this benchmark ], [ "graph", diff --git a/devops/scripts/benchmarks/benches/compute.py b/devops/scripts/benchmarks/benches/compute.py index bdd07b430f407..723d6719a2869 100644 --- a/devops/scripts/benchmarks/benches/compute.py +++ b/devops/scripts/benchmarks/benches/compute.py @@ -224,6 +224,9 @@ def parse_unit_type(compute_unit): class ComputeBenchmark(Benchmark): + + not_traceable = [] # List of benchmarks that should not be traced by Unitrace + def __init__(self, bench, name, test, runtime: RUNTIMES = None): super().__init__(bench.directory, bench) self.bench = bench @@ -263,6 +266,9 @@ def enabled(self) -> bool: # Check if the specific runtime is enabled (or no specific runtime required) return self.runtime is None or self.runtime in self.enabled_runtimes() + def traceable(self) -> bool: + return self.bench_name not in self.not_traceable + def bin_args(self) -> list[str]: return [] @@ -280,7 +286,7 @@ def explicit_group(self): def description(self) -> str: return "" - def run(self, env_vars) -> list[Result]: + def run(self, env_vars, run_unitrace: bool = False) -> list[Result]: command = [ f"{self.benchmark_bin}", f"--test={self.test}", @@ -291,7 +297,11 @@ def run(self, env_vars) -> list[Result]: command += self.bin_args() env_vars.update(self.extra_env_vars()) - result = self.run_bench(command, env_vars) + result = self.run_bench( + command, + env_vars, + run_unitrace=run_unitrace, + ) parsed_results = self.parse_output(result) ret = [] for label, median, stddev, unit in parsed_results: diff --git a/devops/scripts/benchmarks/benches/gromacs.py b/devops/scripts/benchmarks/benches/gromacs.py index a2eec1b496b03..5e1843876f7fa 100644 --- a/devops/scripts/benchmarks/benches/gromacs.py +++ b/devops/scripts/benchmarks/benches/gromacs.py @@ -162,7 +162,7 @@ def setup(self): ld_library=self.suite.oneapi.ld_libraries(), ) - def run(self, env_vars): + def run(self, env_vars, run_unitrace: bool = False) -> list[Result]: model_dir = self.grappa_dir / self.model env_vars.update({"SYCL_CACHE_PERSISTENT": "1"}) @@ -201,6 +201,7 @@ def run(self, env_vars): add_sycl=True, use_stdout=False, ld_library=self.suite.oneapi.ld_libraries(), + run_unitrace=run_unitrace, ) if not self._validate_correctness(options.benchmark_cwd + "/md.log"): diff --git a/devops/scripts/benchmarks/benches/llamacpp.py b/devops/scripts/benchmarks/benches/llamacpp.py index be2fe74c516a5..d025357fcd8f6 100644 --- a/devops/scripts/benchmarks/benches/llamacpp.py +++ b/devops/scripts/benchmarks/benches/llamacpp.py @@ -115,7 +115,7 @@ def get_tags(self): def lower_is_better(self): return False - def run(self, env_vars) -> list[Result]: + def run(self, env_vars, run_unitrace: bool = False) -> list[Result]: command = [ f"{self.benchmark_bin}", "--output", @@ -141,7 +141,10 @@ def run(self, env_vars) -> list[Result]: ] result = self.run_bench( - command, env_vars, ld_library=self.bench.oneapi.ld_libraries() + command, + env_vars, + ld_library=self.bench.oneapi.ld_libraries(), + run_unitrace=run_unitrace, ) parsed = self.parse_output(result) results = [] diff --git a/devops/scripts/benchmarks/benches/syclbench.py b/devops/scripts/benchmarks/benches/syclbench.py index ffb164e2ce7cd..65bc26ac46d18 100644 --- a/devops/scripts/benchmarks/benches/syclbench.py +++ b/devops/scripts/benchmarks/benches/syclbench.py @@ -137,7 +137,7 @@ def setup(self): self.directory, "sycl-bench-build", self.bench_name ) - def run(self, env_vars) -> list[Result]: + def run(self, env_vars, run_unitrace: bool = False) -> list[Result]: self.outputfile = os.path.join(self.bench.directory, self.test + ".csv") command = [ @@ -151,7 +151,11 @@ def run(self, env_vars) -> list[Result]: env_vars.update(self.extra_env_vars()) # no output to stdout, all in outputfile - self.run_bench(command, env_vars) + self.run_bench( + command, + env_vars, + run_unitrace=run_unitrace, + ) with open(self.outputfile, "r") as f: reader = csv.reader(f) diff --git a/devops/scripts/benchmarks/benches/test.py b/devops/scripts/benchmarks/benches/test.py index bfc1cfcd55323..e7d40f5380e06 100644 --- a/devops/scripts/benchmarks/benches/test.py +++ b/devops/scripts/benchmarks/benches/test.py @@ -88,7 +88,7 @@ def notes(self) -> str: def unstable(self) -> str: return self.unstable_text - def run(self, env_vars) -> list[Result]: + def run(self, env_vars, run_unitrace: bool = False) -> list[Result]: random_value = self.value + random.uniform(-1 * (self.diff), self.diff) return [ Result( diff --git a/devops/scripts/benchmarks/benches/umf.py b/devops/scripts/benchmarks/benches/umf.py index 752d71f9b1a43..0cb2a97f3ed9d 100644 --- a/devops/scripts/benchmarks/benches/umf.py +++ b/devops/scripts/benchmarks/benches/umf.py @@ -138,7 +138,7 @@ def get_names_of_benchmarks_to_be_run(self, command, env_vars): return all_names - def run(self, env_vars) -> list[Result]: + def run(self, env_vars, run_unitrace: bool = False) -> list[Result]: command = [f"{self.benchmark_bin}"] all_names = self.get_names_of_benchmarks_to_be_run(command, env_vars) @@ -152,7 +152,11 @@ def run(self, env_vars) -> list[Result]: specific_benchmark = command + ["--benchmark_filter=^" + name + "$"] result = self.run_bench( - specific_benchmark, env_vars, add_sycl=False, ld_library=[self.umf_lib] + specific_benchmark, + env_vars, + add_sycl=False, + ld_library=[self.umf_lib], + run_unitrace=run_unitrace, ) parsed = self.parse_output(result) diff --git a/devops/scripts/benchmarks/benches/velocity.py b/devops/scripts/benchmarks/benches/velocity.py index d4ceae393144b..a810674c3f984 100644 --- a/devops/scripts/benchmarks/benches/velocity.py +++ b/devops/scripts/benchmarks/benches/velocity.py @@ -130,7 +130,7 @@ def description(self) -> str: def get_tags(self): return ["SYCL", "application"] - def run(self, env_vars) -> list[Result]: + def run(self, env_vars, run_unitrace: bool = False) -> list[Result]: env_vars.update(self.extra_env_vars()) command = [ @@ -138,7 +138,12 @@ def run(self, env_vars) -> list[Result]: ] command += self.bin_args() - result = self.run_bench(command, env_vars, ld_library=self.ld_libraries()) + result = self.run_bench( + command, + env_vars, + ld_library=self.ld_libraries(), + run_unitrace=run_unitrace, + ) return [ Result( @@ -282,7 +287,7 @@ class QuickSilver(VelocityBase): def __init__(self, vb: VelocityBench): super().__init__("QuickSilver", "qs", vb, "MMS/CTT") - def run(self, env_vars) -> list[Result]: + def run(self, env_vars, run_unitrace: bool = False) -> list[Result]: # TODO: fix the crash in QuickSilver when UR_L0_USE_IMMEDIATE_COMMANDLISTS=0 if ( "UR_L0_USE_IMMEDIATE_COMMANDLISTS" in env_vars diff --git a/devops/scripts/benchmarks/history.py b/devops/scripts/benchmarks/history.py index 843ee8b63dff1..30591657fa298 100644 --- a/devops/scripts/benchmarks/history.py +++ b/devops/scripts/benchmarks/history.py @@ -14,6 +14,7 @@ from utils.validate import Validate from utils.detect_versions import DetectVersion +from utils.unitrace import get_unitrace class BenchmarkHistory: @@ -149,23 +150,28 @@ def git_info_from_path(path: Path) -> (str, str): compute_runtime=compute_runtime, ) - def save(self, save_name, results: list[Result], to_file=True): + def save(self, save_name, results: list[Result]): benchmark_data = self.create_run(save_name, results) self.runs.append(benchmark_data) - if not to_file: + if options.save_name is None: return - serialized = benchmark_data.to_json() + serialized = benchmark_data.to_json() # type: ignore results_dir = Path(os.path.join(self.dir, "results")) os.makedirs(results_dir, exist_ok=True) - # Use formatted timestamp for the filename - timestamp = ( - datetime.now(tz=timezone.utc).strftime("%Y%m%d_%H%M%S") - if options.timestamp_override is None - else options.timestamp_override - ) + if get_unitrace() is not None: + timestamp = get_unitrace().timestamp # type: ignore + elif options.timestamp_override is not None: + timestamp = options.timestamp_override + else: + timestamp = ( + datetime.now(tz=timezone.utc).strftime("%Y%m%d_%H%M%S") + if options.timestamp_override is None + else options.timestamp_override + ) + file_path = Path(os.path.join(results_dir, f"{save_name}_{timestamp}.json")) with file_path.open("w") as file: json.dump(serialized, file, indent=4) diff --git a/devops/scripts/benchmarks/html/data.js b/devops/scripts/benchmarks/html/data.js index 2f1862fe621b7..eaa5dfdf8b375 100644 --- a/devops/scripts/benchmarks/html/data.js +++ b/devops/scripts/benchmarks/html/data.js @@ -8,4 +8,3 @@ benchmarkRuns = []; defaultCompareNames = []; - diff --git a/devops/scripts/benchmarks/main.py b/devops/scripts/benchmarks/main.py index d90824bbb8c38..06b72dde2e46b 100755 --- a/devops/scripts/benchmarks/main.py +++ b/devops/scripts/benchmarks/main.py @@ -21,8 +21,8 @@ from utils.compute_runtime import * from utils.validate import Validate from utils.detect_versions import DetectVersion +from utils.unitrace import get_unitrace, create_unitrace from presets import enabled_suites, presets - import argparse import re import statistics @@ -38,10 +38,15 @@ def run_iterations( iters: int, results: dict[str, list[Result]], failures: dict[str, str], + run_unitrace: bool = False, ): for iter in range(iters): - print(f"running {benchmark.name()}, iteration {iter}... ", flush=True) - bench_results = benchmark.run(env_vars) + if run_unitrace: + print(f"running {benchmark.name()} with Unitrace... ", flush=True) + else: + print(f"running {benchmark.name()}, iteration {iter}... ", flush=True) + + bench_results = benchmark.run(env_vars, run_unitrace=run_unitrace) if bench_results is None: if options.exit_on_failure: raise RuntimeError(f"Benchmark {benchmark.name()} produced no results!") @@ -163,9 +168,23 @@ def collect_metadata(suites): return metadata -def main(directory, additional_env_vars, save_name, compare_names, filter): +def main(directory, additional_env_vars, compare_names, filter): prepare_workdir(directory, INTERNAL_WORKDIR_VERSION) + if args.unitrace == "inclusive": + create_unitrace(inclusive=True) + elif args.unitrace is True: + create_unitrace(inclusive=False) + elif args.unitrace is not None: + parser.error( + "Invalid value for --unitrace. Use 'inclusive' for tracing along regular benchmarks or no argument for tracing only." + ) + + if get_unitrace() is not None and options.save_name is None: + raise ValueError( + "Unitrace requires a save name to be specified via --save option." + ) + if options.build_compute_runtime: print(f"Setting up Compute Runtime {options.compute_runtime_tag}") cr = get_compute_runtime() @@ -241,19 +260,32 @@ def main(directory, additional_env_vars, save_name, compare_names, filter): merged_env_vars = {**additional_env_vars} intermediate_results: dict[str, list[Result]] = {} processed: list[Result] = [] - for _ in range(options.iterations_stddev): + # regular run of the benchmark + if get_unitrace() is None or get_unitrace().inclusive: + for _ in range(options.iterations_stddev): + run_iterations( + benchmark, + merged_env_vars, + options.iterations, + intermediate_results, + failures, + run_unitrace=False, + ) + valid, processed = process_results( + intermediate_results, benchmark.stddev_threshold() + ) + if valid: + break + # single unitrace run independent of benchmark iterations + if get_unitrace() is not None: run_iterations( benchmark, merged_env_vars, - options.iterations, + 1, intermediate_results, failures, + run_unitrace=True, ) - valid, processed = process_results( - intermediate_results, benchmark.stddev_threshold() - ) - if valid: - break results += processed except Exception as e: if options.exit_on_failure: @@ -310,13 +342,13 @@ def main(directory, additional_env_vars, save_name, compare_names, filter): f"Markdown with benchmark results has been written to {md_path}/benchmark_results.md" ) - saved_name = save_name if save_name is not None else this_name + saved_name = options.save_name if options.save_name is not None else this_name # It's important we don't save the current results into history before # we calculate historical averages or get latest results for compare. # Otherwise we might be comparing the results to themselves. if not options.dry_run: - history.save(saved_name, results, save_name is not None) + history.save(saved_name, results) if saved_name not in compare_names: compare_names.append(saved_name) @@ -509,6 +541,14 @@ def validate_and_parse_env_args(env_args): help="HIP device architecture", default=None, ) + parser.add_argument( + "--unitrace", + nargs="?", + const=True, + default=None, + help="Unitrace tracing for single iteration of benchmarks. Inclusive tracing is done along regular benchmarks.", + choices=["inclusive", True], + ) # Options intended for CI: parser.add_argument( @@ -599,6 +639,7 @@ def validate_and_parse_env_args(env_args): options.ur = args.ur options.ur_adapter = args.adapter options.exit_on_failure = args.exit_on_failure + options.save_name = args.save options.compare = Compare(args.compare_type) options.compare_max = args.compare_max options.output_markdown = args.output_markdown @@ -664,7 +705,6 @@ def validate_and_parse_env_args(env_args): main( args.benchmark_directory, additional_env_vars, - args.save, args.compare, benchmark_filter, ) diff --git a/devops/scripts/benchmarks/options.py b/devops/scripts/benchmarks/options.py index 04a7e76be43e3..7cc954b0d3775 100644 --- a/devops/scripts/benchmarks/options.py +++ b/devops/scripts/benchmarks/options.py @@ -1,6 +1,7 @@ from dataclasses import dataclass, field from enum import Enum import multiprocessing +import os class Compare(Enum): @@ -53,6 +54,7 @@ class Options: timeout: float = 600 iterations: int = 3 verbose: bool = False + save_name: str = None compare: Compare = Compare.LATEST compare_max: int = 10 # average/median over how many results output_markdown: MarkdownSize = MarkdownSize.SHORT @@ -68,7 +70,7 @@ class Options: build_igc: bool = False current_run_name: str = "This PR" preset: str = "Full" - build_jobs: int = multiprocessing.cpu_count() + build_jobs: int = len(os.sched_getaffinity(0)) # Cores available for the process. exit_on_failure: bool = False # Options intended for CI: diff --git a/devops/scripts/benchmarks/utils/oneapi.py b/devops/scripts/benchmarks/utils/oneapi.py index 0a477f6e246b1..80049149810ad 100644 --- a/devops/scripts/benchmarks/utils/oneapi.py +++ b/devops/scripts/benchmarks/utils/oneapi.py @@ -108,7 +108,6 @@ def ld_libraries(self): oneapi_instance = None - def get_oneapi() -> OneAPI: # oneAPI singleton if not hasattr(get_oneapi, "instance"): get_oneapi.instance = OneAPI() diff --git a/devops/scripts/benchmarks/utils/unitrace.py b/devops/scripts/benchmarks/utils/unitrace.py new file mode 100644 index 0000000000000..399908898ddd7 --- /dev/null +++ b/devops/scripts/benchmarks/utils/unitrace.py @@ -0,0 +1,189 @@ +# Copyright (C) 2025 Intel Corporation +# Part of the Unified-Runtime Project, under the Apache License v2.0 with LLVM Exceptions. +# See LICENSE.TXT +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +import os +import shutil +import re + +from options import options +from utils.utils import run, git_clone +from datetime import datetime, timezone + + +class Unitrace: + + inclusive: bool = False + + def __init__(self, inclusive: bool = False): + self.inclusive = inclusive + self.timestamp = ( + datetime.now(tz=timezone.utc).strftime("%Y%m%d_%H%M%S") + if options.timestamp_override is None + else options.timestamp_override + ) + + print("Downloading and building Unitrace...") + repo_dir = git_clone( + options.workdir, + "pti-gpu-repo", + "https://github.com/intel/pti-gpu.git", + "master", + ) + build_dir = os.path.join(options.workdir, "unitrace-build") + unitrace_src = os.path.join(repo_dir, "tools", "unitrace") + os.makedirs(build_dir, exist_ok=True) + + unitrace_exe = os.path.join(build_dir, "unitrace") + if not os.path.isfile(unitrace_exe): + run( + [ + "cmake", + f"-S {unitrace_src}", + f"-B {build_dir}", + "-DCMAKE_BUILD_TYPE=Release", + "-DCMAKE_CXX_COMPILER=clang++", + "-DCMAKE_C_COMPILER=clang", + "-DBUILD_WITH_L0=1", + "-DBUILD_WITH_OPENCL=0", + "-DBUILD_WITH_ITT=1", + "-DBUILD_WITH_XPTI=1", + "-DBUILD_WITH_MPI=0", + ], + add_sycl=True, + ) + run( + ["cmake", "--build", build_dir, "-j", options.build_jobs], + add_sycl=True, + ) + print("Unitrace built successfully.") + + if options.results_directory_override == None: + self.traces_dir = os.path.join(options.workdir, "results", "traces") + else: + self.traces_dir = os.path.join(options.results_directory_override, "traces") + + def _prune_unitrace_dirs(self, dir: str, FILECNT: int = 10): + files = os.listdir(dir) + files.sort() # Lexicographical sort matches timestamp order + if len(files) > 2 * FILECNT: + for f in files[: len(files) - 2 * FILECNT]: + full_path = os.path.join(dir, f) + if os.path.isdir(full_path): + shutil.rmtree(full_path) + else: + os.remove(full_path) + if options.verbose: + print(f"Removing old unitrace file: {full_path}") + + def cleanup(self, bench_cwd: str, unitrace_output: str): + # Remove .pid files from the benchmark directory and .json files from cwd + unitrace_dir = os.path.dirname(unitrace_output) + unitrace_base = os.path.basename(unitrace_output) + print(f"Cleanup unitrace output {unitrace_base} from {unitrace_dir}") + for f in os.listdir(unitrace_dir): + if f.startswith(unitrace_base + "."): + os.remove(os.path.join(unitrace_dir, f)) + print(f"Cleanup: Removed {f} from {unitrace_dir}") + if os.path.exists(bench_cwd): + for f in os.listdir(bench_cwd): + if f.endswith(".json"): + os.remove(os.path.join(bench_cwd, f)) + print(f"Cleanup: Removed {f} from {bench_cwd}") + + def setup( + self, bench_name: str, command: list[str], extra_unitrace_opt: list[str] = None + ): + unitrace_bin = os.path.join(options.workdir, "unitrace-build", "unitrace") + if not os.path.exists(unitrace_bin): + raise FileNotFoundError(f"Unitrace binary not found: {unitrace_bin}. ") + os.makedirs(self.traces_dir, exist_ok=True) + bench_dir = os.path.join(f"{self.traces_dir}", f"{bench_name}") + + os.makedirs(bench_dir, exist_ok=True) + + unitrace_output = os.path.join( + bench_dir, f"{self.timestamp}_{options.save_name}.out" + ) + + if extra_unitrace_opt is None: + extra_unitrace_opt = [] + + unitrace_command = ( + [ + str(unitrace_bin), + "--call-logging", + "--host-timing", + "--device-timing", + "--chrome-sycl-logging", + "--chrome-call-logging", + "--chrome-kernel-logging", + "--output", + unitrace_output, + ] + + extra_unitrace_opt + + command + ) + if options.verbose: + print(f"Unitrace cmd: {' '.join(unitrace_command)}") + + return unitrace_output, unitrace_command + + def handle_output(self, unitrace_output: str): + + # Handle {name}.{pid}.json files in cwd: move and rename to {self.name()}_{timestamp}.{pid}.json + pid_json_files = [] + pid = "" + for f in os.listdir(options.benchmark_cwd): + parts = f.split(".") + l = len(parts) + if len(parts) >= 3 and parts[l - 1] == "json" and parts[l - 2].isdigit(): + pid_json_files.append(f) + pid = parts[l - 2] + + if len(pid_json_files) == 0: + raise FileNotFoundError( + f"No .pid.json files found in {options.benchmark_cwd}." + ) + elif len(pid_json_files) > 1: + # If there are multiple .pid.json files due to previous failures, keep only the most recent one + pid_json_files.sort( + key=lambda f: os.path.getmtime(os.path.join(options.benchmark_cwd, f)) + ) + for f in pid_json_files[:-1]: + os.remove(os.path.join(options.benchmark_cwd, f)) + pid_json_files = [pid_json_files[-1]] + + dst = ( + unitrace_output[:-4] + f".{pid}.json" + if unitrace_output.endswith(".out") + else unitrace_output + f".{pid}.json" + ) + + shutil.move(os.path.join(options.benchmark_cwd, pid_json_files[0]), dst) + if options.verbose: + print(f"Moved {pid_json_files[0]} to {dst}") + + # Prune old unitrace directories + self._prune_unitrace_dirs(os.path.dirname(unitrace_output)) + + +_unitrace_instance = None + + +def create_unitrace(inclusive: bool) -> None: + global _unitrace_instance + if _unitrace_instance is None: + try: + _unitrace_instance = Unitrace(inclusive) + except Exception as e: + print(f"Failed to build Unitrace: {e}") + _unitrace_instance = None + if _unitrace_instance is not None: + print("Unitrace instance created successfully.") + else: + raise ValueError("Unitrace instance already created") + + +def get_unitrace() -> Unitrace | None: + return _unitrace_instance