|
| 1 | +name: Continuous Benchmarking |
| 2 | + |
| 3 | +on: [push, pull_request, workflow_dispatch] |
| 4 | + |
| 5 | +permissions: |
| 6 | + contents: write |
| 7 | + deployments: write |
| 8 | + pages: write |
| 9 | + id-token: write |
| 10 | + |
| 11 | +jobs: |
| 12 | + self: |
| 13 | + name: "Store benchmark result" |
| 14 | + runs-on: ubuntu-latest |
| 15 | + steps: |
| 16 | + - name: Checkout code |
| 17 | + uses: actions/checkout@v4 |
| 18 | + |
| 19 | + - name: Convert MFC to Google Benchmark Format |
| 20 | + run: | |
| 21 | + python3 << 'EOF' |
| 22 | + import json |
| 23 | + from datetime import datetime |
| 24 | +
|
| 25 | + # Read the MFC benchmark data |
| 26 | + with open('bench.json', 'r') as f: |
| 27 | + mfc_data = json.load(f) |
| 28 | +
|
| 29 | + # Convert to Google Benchmark format |
| 30 | + benchmarks = [] |
| 31 | +
|
| 32 | + for case_name, case_data in mfc_data['cases'].items(): |
| 33 | + output_summary = case_data['output_summary'] |
| 34 | + |
| 35 | + # Simulation execution time |
| 36 | + if 'simulation' in output_summary and 'exec' in output_summary['simulation']: |
| 37 | + benchmarks.append({ |
| 38 | + "name": f"{case_name}/simulation_time", |
| 39 | + "family_index": len(benchmarks), |
| 40 | + "per_family_instance_index": 0, |
| 41 | + "run_name": f"{case_name}/simulation_time", |
| 42 | + "run_type": "iteration", |
| 43 | + "repetitions": 1, |
| 44 | + "repetition_index": 0, |
| 45 | + "threads": 1, |
| 46 | + "iterations": 1, |
| 47 | + "real_time": output_summary['simulation']['exec'] * 1e9, |
| 48 | + "cpu_time": output_summary['simulation']['exec'] * 1e9, |
| 49 | + "time_unit": "ns" |
| 50 | + }) |
| 51 | + |
| 52 | + # Simulation grind time |
| 53 | + if 'simulation' in output_summary and 'grind' in output_summary['simulation']: |
| 54 | + benchmarks.append({ |
| 55 | + "name": f"{case_name}/grind_time", |
| 56 | + "family_index": len(benchmarks), |
| 57 | + "per_family_instance_index": 0, |
| 58 | + "run_name": f"{case_name}/grind_time", |
| 59 | + "run_type": "iteration", |
| 60 | + "repetitions": 1, |
| 61 | + "repetition_index": 0, |
| 62 | + "threads": 1, |
| 63 | + "iterations": 1, |
| 64 | + "real_time": output_summary['simulation']['grind'], |
| 65 | + "cpu_time": output_summary['simulation']['grind'], |
| 66 | + "time_unit": "ns" |
| 67 | + }) |
| 68 | +
|
| 69 | + # Create Google Benchmark format |
| 70 | + google_benchmark_data = { |
| 71 | + "context": { |
| 72 | + "date": datetime.now().isoformat(), |
| 73 | + "host_name": "github-runner", |
| 74 | + "executable": "mfc_benchmark", |
| 75 | + "num_cpus": 2, |
| 76 | + "mhz_per_cpu": 2000, |
| 77 | + "cpu_scaling_enabled": False, |
| 78 | + "caches": [] |
| 79 | + }, |
| 80 | + "benchmarks": benchmarks |
| 81 | + } |
| 82 | +
|
| 83 | + # Write the converted data |
| 84 | + with open('bench-google.json', 'w') as f: |
| 85 | + json.dump(google_benchmark_data, f, indent=2) |
| 86 | +
|
| 87 | + print(f"✓ Converted {len(benchmarks)} benchmark measurements") |
| 88 | + EOF |
| 89 | + |
| 90 | + - name: Create report generator script |
| 91 | + run: | |
| 92 | + cat > generate_report.py << 'SCRIPT' |
| 93 | + import json |
| 94 | + import os |
| 95 | + from datetime import datetime |
| 96 | +
|
| 97 | + def generate_markdown_report(): |
| 98 | + # Read benchmark data |
| 99 | + with open('bench-google.json', 'r') as f: |
| 100 | + data = json.load(f) |
| 101 | +
|
| 102 | + # Create directory |
| 103 | + os.makedirs('docs/documentation', exist_ok=True) |
| 104 | + |
| 105 | + # Start building content |
| 106 | + lines = [] |
| 107 | + lines.append("# Continuouscl Benchmarking Results") |
| 108 | + lines.append("") |
| 109 | + lines.append(f"Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S UTC')}") |
| 110 | + lines.append("") |
| 111 | + lines.append("## System Information") |
| 112 | + lines.append(f"- Host: {data['context']['host_name']}") |
| 113 | + lines.append(f"- CPUs: {data['context']['num_cpus']}") |
| 114 | + lines.append(f"- MHz per CPU: {data['context']['mhz_per_cpu']}") |
| 115 | + lines.append("") |
| 116 | + lines.append("## Benchmark Results") |
| 117 | + lines.append("") |
| 118 | + lines.append("| Test Case | Metric | Time (seconds) | Time (nanoseconds) |") |
| 119 | + lines.append("|-----------|--------|----------------|-------------------|") |
| 120 | + |
| 121 | + # Add benchmark data |
| 122 | + for benchmark in data['benchmarks']: |
| 123 | + name_parts = benchmark['name'].split('/') |
| 124 | + case_name = name_parts[0] |
| 125 | + metric_name = name_parts[1] if len(name_parts) > 1 else 'unknown' |
| 126 | + time_seconds = benchmark['real_time'] / 1e9 |
| 127 | + time_ns = benchmark['real_time'] |
| 128 | + lines.append(f"| {case_name} | {metric_name} | {time_seconds:.6f} | {time_ns:.0f} |") |
| 129 | + |
| 130 | + lines.append("") |
| 131 | + lines.append("## Raw Data") |
| 132 | + lines.append("") |
| 133 | + lines.append("```json") |
| 134 | + lines.append(json.dumps(data, indent=2)) |
| 135 | + lines.append("```") |
| 136 | + lines.append("") |
| 137 | + lines.append("---") |
| 138 | + lines.append(f"*Last updated: {datetime.now().isoformat()}*") |
| 139 | + |
| 140 | + # Write file |
| 141 | + with open('docs/documentation/cont-bench.md', 'w') as f: |
| 142 | + f.write('\n'.join(lines)) |
| 143 | + |
| 144 | + print("✓ Generated Markdown report at docs/documentation/cont-bench.md") |
| 145 | +
|
| 146 | + if __name__ == "__main__": |
| 147 | + generate_markdown_report() |
| 148 | + SCRIPT |
| 149 | +
|
| 150 | + - name: Generate Markdown Report |
| 151 | + run: python3 generate_report.py |
| 152 | + |
| 153 | + - name: Commit and Push Results |
| 154 | + run: | |
| 155 | + git config --local user.email "mohdsaid497566@gmail.com" |
| 156 | + git config --local user.name "Malmahrouqi3" |
| 157 | + |
| 158 | + # Fetch latest changes from the remote branch |
| 159 | + git fetch origin cont-bench || true |
| 160 | + |
| 161 | + # Stage the generated file |
| 162 | + git add docs/documentation/cont-bench.md |
| 163 | + |
| 164 | + # Commit changes to have a clean working tree |
| 165 | + git commit -m "Update continuous benchmarking results" || true |
| 166 | + |
| 167 | + # Check if we're behind the remote branch |
| 168 | + if git rev-list HEAD..origin/cont-bench --count | grep -q "^[1-9]"; then |
| 169 | + echo "Branch is behind remote, resolving..." |
| 170 | + |
| 171 | + # Save our changes to a temporary branch |
| 172 | + git branch temp-benchmark-updates |
| 173 | + |
| 174 | + # Get up to date with the remote branch |
| 175 | + git reset --hard origin/cont-bench |
| 176 | + |
| 177 | + # Regenerate the report with the latest code |
| 178 | + python3 generate_report.py |
| 179 | + |
| 180 | + # Add the newly generated report |
| 181 | + git add docs/documentation/cont-bench.md |
| 182 | + |
| 183 | + # Commit the changes |
| 184 | + git commit -m "Update continuous benchmarking results" || true |
| 185 | + fi |
| 186 | + |
| 187 | + # Push the changes, forcing if necessary |
| 188 | + git push origin HEAD:cont-bench || git push -f origin HEAD:cont-bench |
| 189 | + |
| 190 | + - name: Store benchmark result |
| 191 | + uses: benchmark-action/github-action-benchmark@v1 |
| 192 | + with: |
| 193 | + name: C++ Benchmark |
| 194 | + tool: 'googlecpp' |
| 195 | + output-file-path: bench-google.json |
| 196 | + github-token: ${{ secrets.GITHUB_TOKEN }} |
| 197 | + auto-push: false |
| 198 | + alert-threshold: '200%' |
| 199 | + comment-on-alert: true |
| 200 | + fail-on-alert: true |
| 201 | + alert-comment-cc-users: '@Malmahrouqi' |
0 commit comments