Skip to content

Commit b09513a

Browse files
committed
format
1 parent 0fbfd74 commit b09513a

File tree

1 file changed

+72
-45
lines changed

1 file changed

+72
-45
lines changed

clang/test/Analysis/lifetime_safety/benchmark.py

Lines changed: 72 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
from scipy.optimize import curve_fit
1010
from scipy.stats import t
1111

12+
1213
def generate_cpp_cycle_test(n: int) -> str:
1314
"""
1415
Generates a C++ code snippet with a specified number of pointers in a cycle.
@@ -34,6 +35,7 @@ def generate_cpp_cycle_test(n: int) -> str:
3435
cpp_code += f"\nint main() {{ long_cycle_{n}(false); return 0; }}\n"
3536
return cpp_code
3637

38+
3739
def generate_cpp_merge_test(n: int) -> str:
3840
"""
3941
Generates a C++ code snippet with N independent conditional assignments.
@@ -55,6 +57,7 @@ def generate_cpp_merge_test(n: int) -> str:
5557
cpp_code += f"\nint main() {{ conditional_merges_{n}(false); return 0; }}\n"
5658
return cpp_code
5759

60+
5861
def analyze_trace_file(trace_path: str) -> tuple[float, float]:
5962
"""
6063
Parses the -ftime-trace JSON output to find durations.
@@ -65,29 +68,32 @@ def analyze_trace_file(trace_path: str) -> tuple[float, float]:
6568
lifetime_duration = 0.0
6669
total_duration = 0.0
6770
try:
68-
with open(trace_path, 'r') as f:
71+
with open(trace_path, "r") as f:
6972
trace_data = json.load(f)
70-
for event in trace_data.get('traceEvents', []):
71-
if event.get('name') == 'LifetimeAnalysis':
72-
lifetime_duration += float(event.get('dur', 0))
73-
if event.get('name') == 'ExecuteCompiler':
74-
total_duration += float(event.get('dur', 0))
73+
for event in trace_data.get("traceEvents", []):
74+
if event.get("name") == "LifetimeAnalysis":
75+
lifetime_duration += float(event.get("dur", 0))
76+
if event.get("name") == "ExecuteCompiler":
77+
total_duration += float(event.get("dur", 0))
7578

7679
except (IOError, json.JSONDecodeError) as e:
7780
print(f"Error reading or parsing trace file {trace_path}: {e}", file=sys.stderr)
7881
return 0.0, 0.0
7982
return lifetime_duration, total_duration
8083

84+
8185
def power_law(n, c, k):
8286
"""Represents the power law function: y = c * n^k"""
8387
return c * np.power(n, k)
8488

89+
8590
def human_readable_time(ms: float) -> str:
8691
"""Converts milliseconds to a human-readable string (ms or s)."""
8792
if ms >= 1000:
8893
return f"{ms / 1000:.2f} s"
8994
return f"{ms:.2f} ms"
9095

96+
9197
def generate_markdown_report(results: dict) -> str:
9298
"""Generates a Markdown-formatted report from the benchmark results."""
9399
report = []
@@ -97,7 +103,7 @@ def generate_markdown_report(results: dict) -> str:
97103
report.append("\n---\n")
98104

99105
for test_type, data in results.items():
100-
title = 'Pointer Cycle in Loop' if test_type == 'cycle' else 'CFG Merges'
106+
title = "Pointer Cycle in Loop" if test_type == "cycle" else "CFG Merges"
101107
report.append(f"## Test Case: {title}")
102108
report.append("")
103109

@@ -106,9 +112,9 @@ def generate_markdown_report(results: dict) -> str:
106112
report.append("|:----|--------------:|-----------------:|")
107113

108114
# Table rows
109-
n_data = np.array(data['n'])
110-
analysis_data = np.array(data['lifetime_ms'])
111-
total_data = np.array(data['total_ms'])
115+
n_data = np.array(data["n"])
116+
analysis_data = np.array(data["lifetime_ms"])
117+
total_data = np.array(data["total_ms"])
112118
for i in range(len(n_data)):
113119
analysis_str = human_readable_time(analysis_data[i])
114120
total_str = human_readable_time(total_data[i])
@@ -119,28 +125,36 @@ def generate_markdown_report(results: dict) -> str:
119125
# Complexity analysis
120126
report.append(f"**Complexity Analysis:**")
121127
try:
122-
popt, pcov = curve_fit(power_law, n_data, analysis_data, p0=[0, 2], maxfev=5000)
128+
popt, pcov = curve_fit(
129+
power_law, n_data, analysis_data, p0=[0, 2], maxfev=5000
130+
)
123131
_, k = popt
124-
132+
125133
# R-squared calculation
126134
residuals = analysis_data - power_law(n_data, *popt)
127135
ss_res = np.sum(residuals**2)
128-
ss_tot = np.sum((analysis_data - np.mean(analysis_data))**2)
136+
ss_tot = np.sum((analysis_data - np.mean(analysis_data)) ** 2)
129137
r_squared = 1 - (ss_res / ss_tot)
130-
138+
131139
# Confidence Interval for k
132140
alpha = 0.05 # 95% confidence
133-
dof = max(0, len(n_data) - len(popt)) # degrees of freedom
134-
t_val = t.ppf(1.0 - alpha / 2., dof)
141+
dof = max(0, len(n_data) - len(popt)) # degrees of freedom
142+
t_val = t.ppf(1.0 - alpha / 2.0, dof)
135143
# Standard error of the parameters
136144
perr = np.sqrt(np.diag(pcov))
137145
k_stderr = perr[1]
138146
k_ci_lower = k - t_val * k_stderr
139147
k_ci_upper = k + t_val * k_stderr
140148

141-
report.append(f"- The performance of the analysis for this case scales approximately as **O(n<sup>{k:.2f}</sup>)**.")
142-
report.append(f"- **Goodness of Fit (R²):** `{r_squared:.4f}` (closer to 1.0 is better).")
143-
report.append(f"- **95% Confidence Interval for exponent 'k':** `[{k_ci_lower:.2f}, {k_ci_upper:.2f}]`.")
149+
report.append(
150+
f"- The performance of the analysis for this case scales approximately as **O(n<sup>{k:.2f}</sup>)**."
151+
)
152+
report.append(
153+
f"- **Goodness of Fit (R²):** `{r_squared:.4f}` (closer to 1.0 is better)."
154+
)
155+
report.append(
156+
f"- **95% Confidence Interval for exponent 'k':** `[{k_ci_lower:.2f}, {k_ci_upper:.2f}]`."
157+
)
144158

145159
except RuntimeError:
146160
report.append("- Could not determine a best-fit curve for the data.")
@@ -149,67 +163,80 @@ def generate_markdown_report(results: dict) -> str:
149163

150164
return "\n".join(report)
151165

166+
152167
def run_single_test(clang_binary: str, test_type: str, n: int) -> tuple[float, float]:
153168
"""Generates, compiles, and benchmarks a single test case."""
154169
print(f"--- Running Test: {test_type.capitalize()} with N={n} ---")
155-
170+
156171
generated_code = ""
157-
if test_type == 'cycle':
172+
if test_type == "cycle":
158173
generated_code = generate_cpp_cycle_test(n)
159-
else: # merge
174+
else: # merge
160175
generated_code = generate_cpp_merge_test(n)
161176

162-
with tempfile.NamedTemporaryFile(mode='w+', suffix='.cpp', delete=False) as tmp_cpp:
177+
with tempfile.NamedTemporaryFile(mode="w+", suffix=".cpp", delete=False) as tmp_cpp:
163178
tmp_cpp.write(generated_code)
164179
source_file = tmp_cpp.name
165-
166-
trace_file = os.path.splitext(source_file)[0] + '.json'
180+
181+
trace_file = os.path.splitext(source_file)[0] + ".json"
167182

168183
clang_command = [
169-
clang_binary, '-c', '-o', '/dev/null', '-ftime-trace=' + trace_file,
170-
'-Wexperimental-lifetime-safety', '-std=c++17', source_file
184+
clang_binary,
185+
"-c",
186+
"-o",
187+
"/dev/null",
188+
"-ftime-trace=" + trace_file,
189+
"-Wexperimental-lifetime-safety",
190+
"-std=c++17",
191+
source_file,
171192
]
172193

173194
result = subprocess.run(clang_command, capture_output=True, text=True)
174-
195+
175196
if result.returncode != 0:
176197
print(f"Compilation failed for N={n}!", file=sys.stderr)
177198
print(result.stderr, file=sys.stderr)
178199
os.remove(source_file)
179200
return 0.0, 0.0
180-
201+
181202
lifetime_us, total_us = analyze_trace_file(trace_file)
182203
os.remove(source_file)
183204
os.remove(trace_file)
184-
205+
185206
return lifetime_us / 1000.0, total_us / 1000.0
186207

208+
187209
if __name__ == "__main__":
188-
parser = argparse.ArgumentParser(description="Generate, compile, and benchmark C++ test cases for Clang's lifetime analysis.")
189-
parser.add_argument("--clang-binary", type=str, required=True, help="Path to the Clang executable.")
190-
210+
parser = argparse.ArgumentParser(
211+
description="Generate, compile, and benchmark C++ test cases for Clang's lifetime analysis."
212+
)
213+
parser.add_argument(
214+
"--clang-binary", type=str, required=True, help="Path to the Clang executable."
215+
)
216+
191217
args = parser.parse_args()
192218

193219
n_values = [10, 25, 50, 75, 100, 150, 200]
194220
results = {
195-
'cycle': {'n': [], 'lifetime_ms': [], 'total_ms': []},
196-
'merge': {'n': [], 'lifetime_ms': [], 'total_ms': []}
221+
"cycle": {"n": [], "lifetime_ms": [], "total_ms": []},
222+
"merge": {"n": [], "lifetime_ms": [], "total_ms": []},
197223
}
198224

199225
print("Running performance benchmarks...")
200-
for test_type in ['cycle', 'merge']:
226+
for test_type in ["cycle", "merge"]:
201227
for n in n_values:
202228
lifetime_ms, total_ms = run_single_test(args.clang_binary, test_type, n)
203229
if total_ms > 0:
204-
results[test_type]['n'].append(n)
205-
results[test_type]['lifetime_ms'].append(lifetime_ms)
206-
results[test_type]['total_ms'].append(total_ms)
207-
print(f" Total: {human_readable_time(total_ms)} | Analysis: {human_readable_time(lifetime_ms)}")
208-
209-
print("\n\n" + "="*80)
230+
results[test_type]["n"].append(n)
231+
results[test_type]["lifetime_ms"].append(lifetime_ms)
232+
results[test_type]["total_ms"].append(total_ms)
233+
print(
234+
f" Total: {human_readable_time(total_ms)} | Analysis: {human_readable_time(lifetime_ms)}"
235+
)
236+
237+
print("\n\n" + "=" * 80)
210238
print("Generating Markdown Report...")
211-
print("="*80 + "\n")
212-
239+
print("=" * 80 + "\n")
240+
213241
markdown_report = generate_markdown_report(results)
214242
print(markdown_report)
215-

0 commit comments

Comments
 (0)