|
| 1 | +import sys |
| 2 | +import argparse |
| 3 | +import subprocess |
| 4 | +import tempfile |
| 5 | +import json |
| 6 | +import os |
| 7 | +from datetime import datetime |
| 8 | +import numpy as np |
| 9 | +from scipy.optimize import curve_fit |
| 10 | +from scipy.stats import t |
| 11 | + |
| 12 | + |
| 13 | +def generate_cpp_cycle_test(n: int) -> str: |
| 14 | + """ |
| 15 | + Generates a C++ code snippet with a specified number of pointers in a cycle. |
| 16 | + Creates a while loop that rotates N pointers. |
| 17 | + This pattern tests the convergence speed of the dataflow analysis when |
| 18 | + reaching its fixed point. |
| 19 | +
|
| 20 | + Example: |
| 21 | + struct MyObj { int id; ~MyObj() {} }; |
| 22 | +
|
| 23 | + void long_cycle_4(bool condition) { |
| 24 | + MyObj v1{1}; |
| 25 | + MyObj v2{1}; |
| 26 | + MyObj v3{1}; |
| 27 | + MyObj v4{1}; |
| 28 | +
|
| 29 | + MyObj* p1 = &v1; |
| 30 | + MyObj* p2 = &v2; |
| 31 | + MyObj* p3 = &v3; |
| 32 | + MyObj* p4 = &v4; |
| 33 | +
|
| 34 | + while (condition) { |
| 35 | + MyObj* temp = p1; |
| 36 | + p1 = p2; |
| 37 | + p2 = p3; |
| 38 | + p3 = p4; |
| 39 | + p4 = temp; |
| 40 | + } |
| 41 | + } |
| 42 | + """ |
| 43 | + if n <= 0: |
| 44 | + return "// Number of variables must be positive." |
| 45 | + |
| 46 | + cpp_code = "struct MyObj { int id; ~MyObj() {} };\n\n" |
| 47 | + cpp_code += f"void long_cycle_{n}(bool condition) {{\n" |
| 48 | + for i in range(1, n + 1): |
| 49 | + cpp_code += f" MyObj v{i}{{1}};\n" |
| 50 | + cpp_code += "\n" |
| 51 | + for i in range(1, n + 1): |
| 52 | + cpp_code += f" MyObj* p{i} = &v{i};\n" |
| 53 | + |
| 54 | + cpp_code += "\n while (condition) {\n" |
| 55 | + if n > 0: |
| 56 | + cpp_code += f" MyObj* temp = p1;\n" |
| 57 | + for i in range(1, n): |
| 58 | + cpp_code += f" p{i} = p{i+1};\n" |
| 59 | + cpp_code += f" p{n} = temp;\n" |
| 60 | + cpp_code += " }\n}\n" |
| 61 | + cpp_code += f"\nint main() {{ long_cycle_{n}(false); return 0; }}\n" |
| 62 | + return cpp_code |
| 63 | + |
| 64 | + |
| 65 | +def generate_cpp_merge_test(n: int) -> str: |
| 66 | + """ |
| 67 | + Creates N independent if statements that merge at a single point. |
| 68 | + This pattern specifically stresses the performance of the |
| 69 | + 'LifetimeLattice::join' operation. |
| 70 | +
|
| 71 | + Example: |
| 72 | + struct MyObj { int id; ~MyObj() {} }; |
| 73 | +
|
| 74 | + void conditional_merges_4(bool condition) { |
| 75 | + MyObj v1, v2, v3, v4; |
| 76 | + MyObj *p1 = nullptr, *p2 = nullptr, *p3 = nullptr, *p4 = nullptr; |
| 77 | +
|
| 78 | + if(condition) { p1 = &v1; } |
| 79 | + if(condition) { p2 = &v2; } |
| 80 | + if(condition) { p3 = &v3; } |
| 81 | + if(condition) { p4 = &v4; } |
| 82 | + } |
| 83 | + """ |
| 84 | + if n <= 0: |
| 85 | + return "// Number of variables must be positive." |
| 86 | + |
| 87 | + cpp_code = "struct MyObj { int id; ~MyObj() {} };\n\n" |
| 88 | + cpp_code += f"void conditional_merges_{n}(bool condition) {{\n" |
| 89 | + decls = [f"v{i}" for i in range(1, n + 1)] |
| 90 | + cpp_code += f" MyObj {', '.join(decls)};\n" |
| 91 | + ptr_decls = [f"*p{i} = nullptr" for i in range(1, n + 1)] |
| 92 | + cpp_code += f" MyObj {', '.join(ptr_decls)};\n\n" |
| 93 | + |
| 94 | + for i in range(1, n + 1): |
| 95 | + cpp_code += f" if(condition) {{ p{i} = &v{i}; }}\n" |
| 96 | + |
| 97 | + cpp_code += "}\n" |
| 98 | + cpp_code += f"\nint main() {{ conditional_merges_{n}(false); return 0; }}\n" |
| 99 | + return cpp_code |
| 100 | + |
| 101 | + |
| 102 | +def analyze_trace_file(trace_path: str) -> tuple[float, float]: |
| 103 | + """ |
| 104 | + Parses the -ftime-trace JSON output to find durations. |
| 105 | +
|
| 106 | + Returns: |
| 107 | + A tuple of (lifetime_analysis_duration_us, total_clang_duration_us). |
| 108 | + """ |
| 109 | + lifetime_duration = 0.0 |
| 110 | + total_duration = 0.0 |
| 111 | + try: |
| 112 | + with open(trace_path, "r") as f: |
| 113 | + trace_data = json.load(f) |
| 114 | + for event in trace_data.get("traceEvents", []): |
| 115 | + if event.get("name") == "LifetimeSafetyAnalysis": |
| 116 | + lifetime_duration += float(event.get("dur", 0)) |
| 117 | + if event.get("name") == "ExecuteCompiler": |
| 118 | + total_duration += float(event.get("dur", 0)) |
| 119 | + |
| 120 | + except (IOError, json.JSONDecodeError) as e: |
| 121 | + print(f"Error reading or parsing trace file {trace_path}: {e}", file=sys.stderr) |
| 122 | + return 0.0, 0.0 |
| 123 | + return lifetime_duration, total_duration |
| 124 | + |
| 125 | + |
| 126 | +def power_law(n, c, k): |
| 127 | + """Represents the power law function: y = c * n^k""" |
| 128 | + return c * np.power(n, k) |
| 129 | + |
| 130 | + |
| 131 | +def human_readable_time(ms: float) -> str: |
| 132 | + """Converts milliseconds to a human-readable string (ms or s).""" |
| 133 | + if ms >= 1000: |
| 134 | + return f"{ms / 1000:.2f} s" |
| 135 | + return f"{ms:.2f} ms" |
| 136 | + |
| 137 | + |
| 138 | +def generate_markdown_report(results: dict) -> str: |
| 139 | + """Generates a Markdown-formatted report from the benchmark results.""" |
| 140 | + report = [] |
| 141 | + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S %Z") |
| 142 | + report.append(f"# Lifetime Analysis Performance Report") |
| 143 | + report.append(f"> Generated on: {timestamp}") |
| 144 | + report.append("\n---\n") |
| 145 | + |
| 146 | + for test_name, data in results.items(): |
| 147 | + title = data["title"] |
| 148 | + report.append(f"## Test Case: {title}") |
| 149 | + report.append("") |
| 150 | + |
| 151 | + # Table header |
| 152 | + report.append("| N | Analysis Time | Total Clang Time |") |
| 153 | + report.append("|:----|--------------:|-----------------:|") |
| 154 | + |
| 155 | + # Table rows |
| 156 | + n_data = np.array(data["n"]) |
| 157 | + analysis_data = np.array(data["lifetime_ms"]) |
| 158 | + total_data = np.array(data["total_ms"]) |
| 159 | + for i in range(len(n_data)): |
| 160 | + analysis_str = human_readable_time(analysis_data[i]) |
| 161 | + total_str = human_readable_time(total_data[i]) |
| 162 | + report.append(f"| {n_data[i]:<3} | {analysis_str:>13} | {total_str:>16} |") |
| 163 | + |
| 164 | + report.append("") |
| 165 | + |
| 166 | + # Complexity analysis |
| 167 | + report.append(f"**Complexity Analysis:**") |
| 168 | + try: |
| 169 | + # Curve fitting requires at least 3 points |
| 170 | + if len(n_data) < 3: |
| 171 | + raise ValueError("Not enough data points to perform curve fitting.") |
| 172 | + |
| 173 | + popt, pcov = curve_fit( |
| 174 | + power_law, n_data, analysis_data, p0=[0, 2], maxfev=5000 |
| 175 | + ) |
| 176 | + _, k = popt |
| 177 | + |
| 178 | + # Confidence Interval for k |
| 179 | + alpha = 0.05 # 95% confidence |
| 180 | + dof = max(0, len(n_data) - len(popt)) # degrees of freedom |
| 181 | + t_val = t.ppf(1.0 - alpha / 2.0, dof) |
| 182 | + # Standard error of the parameters |
| 183 | + perr = np.sqrt(np.diag(pcov)) |
| 184 | + k_stderr = perr[1] |
| 185 | + k_ci_lower = k - t_val * k_stderr |
| 186 | + k_ci_upper = k + t_val * k_stderr |
| 187 | + |
| 188 | + report.append( |
| 189 | + f"- The performance for this case scales approx. as **O(n<sup>{k:.2f}</sup>)**." |
| 190 | + ) |
| 191 | + report.append( |
| 192 | + f"- **95% Confidence interval for exponent:** `[{k_ci_lower:.2f}, {k_ci_upper:.2f}]`." |
| 193 | + ) |
| 194 | + |
| 195 | + except (RuntimeError, ValueError) as e: |
| 196 | + report.append(f"- Could not determine a best-fit curve for the data: {e}") |
| 197 | + |
| 198 | + report.append("\n---\n") |
| 199 | + |
| 200 | + return "\n".join(report) |
| 201 | + |
| 202 | + |
| 203 | +def run_single_test( |
| 204 | + clang_binary: str, output_dir: str, test_name: str, generator_func, n: int |
| 205 | +) -> tuple[float, float]: |
| 206 | + """Generates, compiles, and benchmarks a single test case.""" |
| 207 | + print(f"--- Running Test: {test_name.capitalize()} with N={n} ---") |
| 208 | + |
| 209 | + generated_code = generator_func(n) |
| 210 | + |
| 211 | + base_name = f"test_{test_name}_{n}" |
| 212 | + source_file = os.path.join(output_dir, f"{base_name}.cpp") |
| 213 | + trace_file = os.path.join(output_dir, f"{base_name}.json") |
| 214 | + |
| 215 | + with open(source_file, "w") as f: |
| 216 | + f.write(generated_code) |
| 217 | + |
| 218 | + clang_command = [ |
| 219 | + clang_binary, |
| 220 | + "-c", |
| 221 | + "-o", |
| 222 | + "/dev/null", |
| 223 | + "-ftime-trace=" + trace_file, |
| 224 | + "-Wexperimental-lifetime-safety", |
| 225 | + "-std=c++17", |
| 226 | + source_file, |
| 227 | + ] |
| 228 | + |
| 229 | + result = subprocess.run(clang_command, capture_output=True, text=True) |
| 230 | + |
| 231 | + if result.returncode != 0: |
| 232 | + print(f"Compilation failed for N={n}!", file=sys.stderr) |
| 233 | + print(result.stderr, file=sys.stderr) |
| 234 | + return 0.0, 0.0 |
| 235 | + |
| 236 | + lifetime_us, total_us = analyze_trace_file(trace_file) |
| 237 | + |
| 238 | + return lifetime_us / 1000.0, total_us / 1000.0 |
| 239 | + |
| 240 | + |
| 241 | +if __name__ == "__main__": |
| 242 | + parser = argparse.ArgumentParser( |
| 243 | + description="Generate, compile, and benchmark C++ test cases for Clang's lifetime analysis." |
| 244 | + ) |
| 245 | + parser.add_argument( |
| 246 | + "--clang-binary", type=str, required=True, help="Path to the Clang executable." |
| 247 | + ) |
| 248 | + parser.add_argument( |
| 249 | + "--output-dir", |
| 250 | + type=str, |
| 251 | + default="benchmark_results", |
| 252 | + help="Directory to save persistent benchmark files. (Default: ./benchmark_results)", |
| 253 | + ) |
| 254 | + |
| 255 | + args = parser.parse_args() |
| 256 | + |
| 257 | + os.makedirs(args.output_dir, exist_ok=True) |
| 258 | + print(f"Benchmark files will be saved in: {os.path.abspath(args.output_dir)}\n") |
| 259 | + |
| 260 | + test_configurations = [ |
| 261 | + { |
| 262 | + "name": "cycle", |
| 263 | + "title": "Pointer Cycle in Loop", |
| 264 | + "generator_func": generate_cpp_cycle_test, |
| 265 | + "n_values": [10, 25, 50, 75, 100, 150], |
| 266 | + }, |
| 267 | + { |
| 268 | + "name": "merge", |
| 269 | + "title": "CFG Merges", |
| 270 | + "generator_func": generate_cpp_merge_test, |
| 271 | + "n_values": [10, 50, 100, 200, 400, 800], |
| 272 | + }, |
| 273 | + ] |
| 274 | + |
| 275 | + results = {} |
| 276 | + |
| 277 | + print("Running performance benchmarks...") |
| 278 | + for config in test_configurations: |
| 279 | + test_name = config["name"] |
| 280 | + results[test_name] = { |
| 281 | + "title": config["title"], |
| 282 | + "n": [], |
| 283 | + "lifetime_ms": [], |
| 284 | + "total_ms": [], |
| 285 | + } |
| 286 | + for n in config["n_values"]: |
| 287 | + lifetime_ms, total_ms = run_single_test( |
| 288 | + args.clang_binary, |
| 289 | + args.output_dir, |
| 290 | + test_name, |
| 291 | + config["generator_func"], |
| 292 | + n, |
| 293 | + ) |
| 294 | + if total_ms > 0: |
| 295 | + results[test_name]["n"].append(n) |
| 296 | + results[test_name]["lifetime_ms"].append(lifetime_ms) |
| 297 | + results[test_name]["total_ms"].append(total_ms) |
| 298 | + print( |
| 299 | + f" Total: {human_readable_time(total_ms)} | Analysis: {human_readable_time(lifetime_ms)}" |
| 300 | + ) |
| 301 | + |
| 302 | + print("\n\n" + "=" * 80) |
| 303 | + print("Generating Markdown Report...") |
| 304 | + print("=" * 80 + "\n") |
| 305 | + |
| 306 | + markdown_report = generate_markdown_report(results) |
| 307 | + print(markdown_report) |
0 commit comments