Skip to content

Commit 41d108f

Browse files
committed
format
1 parent 0fbfd74 commit 41d108f

File tree

1 file changed

+124
-61
lines changed

1 file changed

+124
-61
lines changed

clang/test/Analysis/lifetime_safety/benchmark.py

Lines changed: 124 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,32 @@
99
from scipy.optimize import curve_fit
1010
from scipy.stats import t
1111

12+
1213
def generate_cpp_cycle_test(n: int) -> str:
1314
"""
1415
Generates a C++ code snippet with a specified number of pointers in a cycle.
16+
Example:
17+
struct MyObj { int id; ~MyObj() {} };
18+
19+
void long_cycle_4(bool condition) {
20+
MyObj v1{1};
21+
MyObj v2{1};
22+
MyObj v3{1};
23+
MyObj v4{1};
24+
25+
MyObj* p1 = &v1;
26+
MyObj* p2 = &v2;
27+
MyObj* p3 = &v3;
28+
MyObj* p4 = &v4;
29+
30+
while (condition) {
31+
MyObj* temp = p1;
32+
p1 = p2;
33+
p2 = p3;
34+
p3 = p4;
35+
p4 = temp;
36+
}
37+
}
1538
"""
1639
if n <= 0:
1740
return "// Number of variables must be positive."
@@ -34,9 +57,22 @@ def generate_cpp_cycle_test(n: int) -> str:
3457
cpp_code += f"\nint main() {{ long_cycle_{n}(false); return 0; }}\n"
3558
return cpp_code
3659

60+
3761
def generate_cpp_merge_test(n: int) -> str:
3862
"""
3963
Generates a C++ code snippet with N independent conditional assignments.
64+
Example:
65+
struct MyObj { int id; ~MyObj() {} };
66+
67+
void conditional_merges_4(bool condition) {
68+
MyObj v1, v2, v3, v4;
69+
MyObj *p1 = nullptr, *p2 = nullptr, *p3 = nullptr, *p4 = nullptr;
70+
71+
if(condition) { p1 = &v1; }
72+
if(condition) { p2 = &v2; }
73+
if(condition) { p3 = &v3; }
74+
if(condition) { p4 = &v4; }
75+
}
4076
"""
4177
if n <= 0:
4278
return "// Number of variables must be positive."
@@ -55,6 +91,7 @@ def generate_cpp_merge_test(n: int) -> str:
5591
cpp_code += f"\nint main() {{ conditional_merges_{n}(false); return 0; }}\n"
5692
return cpp_code
5793

94+
5895
def analyze_trace_file(trace_path: str) -> tuple[float, float]:
5996
"""
6097
Parses the -ftime-trace JSON output to find durations.
@@ -65,29 +102,32 @@ def analyze_trace_file(trace_path: str) -> tuple[float, float]:
65102
lifetime_duration = 0.0
66103
total_duration = 0.0
67104
try:
68-
with open(trace_path, 'r') as f:
105+
with open(trace_path, "r") as f:
69106
trace_data = json.load(f)
70-
for event in trace_data.get('traceEvents', []):
71-
if event.get('name') == 'LifetimeAnalysis':
72-
lifetime_duration += float(event.get('dur', 0))
73-
if event.get('name') == 'ExecuteCompiler':
74-
total_duration += float(event.get('dur', 0))
107+
for event in trace_data.get("traceEvents", []):
108+
if event.get("name") == "LifetimeSafetyAnalysis":
109+
lifetime_duration += float(event.get("dur", 0))
110+
if event.get("name") == "ExecuteCompiler":
111+
total_duration += float(event.get("dur", 0))
75112

76113
except (IOError, json.JSONDecodeError) as e:
77114
print(f"Error reading or parsing trace file {trace_path}: {e}", file=sys.stderr)
78115
return 0.0, 0.0
79116
return lifetime_duration, total_duration
80117

118+
81119
def power_law(n, c, k):
82120
"""Represents the power law function: y = c * n^k"""
83121
return c * np.power(n, k)
84122

123+
85124
def human_readable_time(ms: float) -> str:
86125
"""Converts milliseconds to a human-readable string (ms or s)."""
87126
if ms >= 1000:
88127
return f"{ms / 1000:.2f} s"
89128
return f"{ms:.2f} ms"
90129

130+
91131
def generate_markdown_report(results: dict) -> str:
92132
"""Generates a Markdown-formatted report from the benchmark results."""
93133
report = []
@@ -97,7 +137,7 @@ def generate_markdown_report(results: dict) -> str:
97137
report.append("\n---\n")
98138

99139
for test_type, data in results.items():
100-
title = 'Pointer Cycle in Loop' if test_type == 'cycle' else 'CFG Merges'
140+
title = "Pointer Cycle in Loop" if test_type == "cycle" else "CFG Merges"
101141
report.append(f"## Test Case: {title}")
102142
report.append("")
103143

@@ -106,9 +146,9 @@ def generate_markdown_report(results: dict) -> str:
106146
report.append("|:----|--------------:|-----------------:|")
107147

108148
# Table rows
109-
n_data = np.array(data['n'])
110-
analysis_data = np.array(data['lifetime_ms'])
111-
total_data = np.array(data['total_ms'])
149+
n_data = np.array(data["n"])
150+
analysis_data = np.array(data["lifetime_ms"])
151+
total_data = np.array(data["total_ms"])
112152
for i in range(len(n_data)):
113153
analysis_str = human_readable_time(analysis_data[i])
114154
total_str = human_readable_time(total_data[i])
@@ -119,28 +159,36 @@ def generate_markdown_report(results: dict) -> str:
119159
# Complexity analysis
120160
report.append(f"**Complexity Analysis:**")
121161
try:
122-
popt, pcov = curve_fit(power_law, n_data, analysis_data, p0=[0, 2], maxfev=5000)
162+
popt, pcov = curve_fit(
163+
power_law, n_data, analysis_data, p0=[0, 2], maxfev=5000
164+
)
123165
_, k = popt
124-
166+
125167
# R-squared calculation
126168
residuals = analysis_data - power_law(n_data, *popt)
127169
ss_res = np.sum(residuals**2)
128-
ss_tot = np.sum((analysis_data - np.mean(analysis_data))**2)
170+
ss_tot = np.sum((analysis_data - np.mean(analysis_data)) ** 2)
129171
r_squared = 1 - (ss_res / ss_tot)
130-
172+
131173
# Confidence Interval for k
132174
alpha = 0.05 # 95% confidence
133-
dof = max(0, len(n_data) - len(popt)) # degrees of freedom
134-
t_val = t.ppf(1.0 - alpha / 2., dof)
175+
dof = max(0, len(n_data) - len(popt)) # degrees of freedom
176+
t_val = t.ppf(1.0 - alpha / 2.0, dof)
135177
# Standard error of the parameters
136178
perr = np.sqrt(np.diag(pcov))
137179
k_stderr = perr[1]
138180
k_ci_lower = k - t_val * k_stderr
139181
k_ci_upper = k + t_val * k_stderr
140182

141-
report.append(f"- The performance of the analysis for this case scales approximately as **O(n<sup>{k:.2f}</sup>)**.")
142-
report.append(f"- **Goodness of Fit (R²):** `{r_squared:.4f}` (closer to 1.0 is better).")
143-
report.append(f"- **95% Confidence Interval for exponent 'k':** `[{k_ci_lower:.2f}, {k_ci_upper:.2f}]`.")
183+
report.append(
184+
f"- The performance of the analysis for this case scales approximately as **O(n<sup>{k:.2f}</sup>)**."
185+
)
186+
report.append(
187+
f"- **Goodness of Fit (R<sup>2</sup>):** `{r_squared:.4f}` (closer to 1.0 is better)."
188+
)
189+
report.append(
190+
f"- **95% Confidence Interval for exponent 'k':** `[{k_ci_lower:.2f}, {k_ci_upper:.2f}]`."
191+
)
144192

145193
except RuntimeError:
146194
report.append("- Could not determine a best-fit curve for the data.")
@@ -149,67 +197,82 @@ def generate_markdown_report(results: dict) -> str:
149197

150198
return "\n".join(report)
151199

200+
152201
def run_single_test(clang_binary: str, test_type: str, n: int) -> tuple[float, float]:
153202
"""Generates, compiles, and benchmarks a single test case."""
154203
print(f"--- Running Test: {test_type.capitalize()} with N={n} ---")
155-
204+
156205
generated_code = ""
157-
if test_type == 'cycle':
206+
if test_type == "cycle":
158207
generated_code = generate_cpp_cycle_test(n)
159-
else: # merge
208+
else: # merge
160209
generated_code = generate_cpp_merge_test(n)
161210

162-
with tempfile.NamedTemporaryFile(mode='w+', suffix='.cpp', delete=False) as tmp_cpp:
163-
tmp_cpp.write(generated_code)
164-
source_file = tmp_cpp.name
165-
166-
trace_file = os.path.splitext(source_file)[0] + '.json'
167-
168-
clang_command = [
169-
clang_binary, '-c', '-o', '/dev/null', '-ftime-trace=' + trace_file,
170-
'-Wexperimental-lifetime-safety', '-std=c++17', source_file
171-
]
172-
173-
result = subprocess.run(clang_command, capture_output=True, text=True)
174-
175-
if result.returncode != 0:
176-
print(f"Compilation failed for N={n}!", file=sys.stderr)
177-
print(result.stderr, file=sys.stderr)
178-
os.remove(source_file)
179-
return 0.0, 0.0
180-
181-
lifetime_us, total_us = analyze_trace_file(trace_file)
182-
os.remove(source_file)
183-
os.remove(trace_file)
184-
185-
return lifetime_us / 1000.0, total_us / 1000.0
211+
# Use a temporary directory to manage the source and trace files.
212+
# The directory and its contents will be cleaned up automatically on exit.
213+
with tempfile.TemporaryDirectory() as tmpdir:
214+
base_name = f"test_{test_type}_{n}"
215+
source_file = os.path.join(tmpdir, f"{base_name}.cpp")
216+
trace_file = os.path.join(tmpdir, f"{base_name}.json")
217+
218+
with open(source_file, "w") as f:
219+
f.write(generated_code)
220+
221+
clang_command = [
222+
clang_binary,
223+
"-c",
224+
"-o",
225+
"/dev/null",
226+
"-ftime-trace=" + trace_file,
227+
"-Wexperimental-lifetime-safety",
228+
"-std=c++17",
229+
source_file,
230+
]
231+
232+
result = subprocess.run(clang_command, capture_output=True, text=True)
233+
234+
if result.returncode != 0:
235+
print(f"Compilation failed for N={n}!", file=sys.stderr)
236+
print(result.stderr, file=sys.stderr)
237+
# No need for manual cleanup, the 'with' statement handles it.
238+
return 0.0, 0.0
239+
240+
lifetime_us, total_us = analyze_trace_file(trace_file)
241+
242+
return lifetime_us / 1000.0, total_us / 1000.0
243+
186244

187245
if __name__ == "__main__":
188-
parser = argparse.ArgumentParser(description="Generate, compile, and benchmark C++ test cases for Clang's lifetime analysis.")
189-
parser.add_argument("--clang-binary", type=str, required=True, help="Path to the Clang executable.")
190-
246+
parser = argparse.ArgumentParser(
247+
description="Generate, compile, and benchmark C++ test cases for Clang's lifetime analysis."
248+
)
249+
parser.add_argument(
250+
"--clang-binary", type=str, required=True, help="Path to the Clang executable."
251+
)
252+
191253
args = parser.parse_args()
192254

193255
n_values = [10, 25, 50, 75, 100, 150, 200]
194256
results = {
195-
'cycle': {'n': [], 'lifetime_ms': [], 'total_ms': []},
196-
'merge': {'n': [], 'lifetime_ms': [], 'total_ms': []}
257+
"cycle": {"n": [], "lifetime_ms": [], "total_ms": []},
258+
"merge": {"n": [], "lifetime_ms": [], "total_ms": []},
197259
}
198260

199261
print("Running performance benchmarks...")
200-
for test_type in ['cycle', 'merge']:
262+
for test_type in ["cycle", "merge"]:
201263
for n in n_values:
202264
lifetime_ms, total_ms = run_single_test(args.clang_binary, test_type, n)
203265
if total_ms > 0:
204-
results[test_type]['n'].append(n)
205-
results[test_type]['lifetime_ms'].append(lifetime_ms)
206-
results[test_type]['total_ms'].append(total_ms)
207-
print(f" Total: {human_readable_time(total_ms)} | Analysis: {human_readable_time(lifetime_ms)}")
208-
209-
print("\n\n" + "="*80)
266+
results[test_type]["n"].append(n)
267+
results[test_type]["lifetime_ms"].append(lifetime_ms)
268+
results[test_type]["total_ms"].append(total_ms)
269+
print(
270+
f" Total: {human_readable_time(total_ms)} | Analysis: {human_readable_time(lifetime_ms)}"
271+
)
272+
273+
print("\n\n" + "=" * 80)
210274
print("Generating Markdown Report...")
211-
print("="*80 + "\n")
212-
275+
print("=" * 80 + "\n")
276+
213277
markdown_report = generate_markdown_report(results)
214278
print(markdown_report)
215-

0 commit comments

Comments
 (0)