Skip to content

Commit d7b8ed4

Browse files
committed
all files
1 parent 14066f2 commit d7b8ed4

File tree

12 files changed

+964
-14
lines changed

12 files changed

+964
-14
lines changed

.env

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
TOKEN=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
2+
GITHUB_TOKEN=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

.github/workflows/bench/test.yaml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
name: test_benchmark
2+
time: 1.23

.github/workflows/cont-bench.yml

Lines changed: 76 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -53,28 +53,90 @@ jobs:
5353
(cd pr && ./mfc.sh bench -o bench.yaml)
5454
find pr -maxdepth 1 -name "*.yaml" -exec sh -c 'yq eval -o=json "$1" > "${1%.yaml}.json"' _ {} \;
5555
56-
- name: Create Benchmark Documentation
56+
57+
- name: Convert MFC to Google Benchmark Format
5758
run: |
58-
mkdir -p pr/docs/documentation
59-
cat > pr/docs/documentation/cont-bench.md << 'EOF'
60-
# Continuous Benchmarking
59+
python3 << 'EOF'
60+
import json
61+
from datetime import datetime
62+
63+
# Read the MFC benchmark data
64+
with open('bench.json', 'r') as f:
65+
mfc_data = json.load(f)
6166
62-
## Overview
67+
# Convert to Google Benchmark format
68+
benchmarks = []
6369
64-
The continuous benchmarking system automatically runs performance tests on every approved pull request and main branch commit to track performance regressions and improvements over time.
70+
for case_name, case_data in mfc_data['cases'].items():
71+
output_summary = case_data['output_summary']
72+
73+
# Simulation execution time
74+
if 'simulation' in output_summary and 'exec' in output_summary['simulation']:
75+
benchmarks.append({
76+
"name": f"{case_name}/simulation_time",
77+
"family_index": len(benchmarks),
78+
"per_family_instance_index": 0,
79+
"run_name": f"{case_name}/simulation_time",
80+
"run_type": "iteration",
81+
"repetitions": 1,
82+
"repetition_index": 0,
83+
"threads": 1,
84+
"iterations": 1,
85+
"real_time": output_summary['simulation']['exec'] * 1e9,
86+
"cpu_time": output_summary['simulation']['exec'] * 1e9,
87+
"time_unit": "ns"
88+
})
89+
90+
# Simulation grind time
91+
if 'simulation' in output_summary and 'grind' in output_summary['simulation']:
92+
benchmarks.append({
93+
"name": f"{case_name}/grind_time",
94+
"family_index": len(benchmarks),
95+
"per_family_instance_index": 0,
96+
"run_name": f"{case_name}/grind_time",
97+
"run_type": "iteration",
98+
"repetitions": 1,
99+
"repetition_index": 0,
100+
"threads": 1,
101+
"iterations": 1,
102+
"real_time": output_summary['simulation']['grind'],
103+
"cpu_time": output_summary['simulation']['grind'],
104+
"time_unit": "ns"
105+
})
65106
66-
## Benchmark Results
107+
# Create Google Benchmark format
108+
google_benchmark_data = {
109+
"context": {
110+
"date": datetime.now().isoformat(),
111+
"host_name": "github-runner",
112+
"executable": "mfc_benchmark",
113+
"num_cpus": 2,
114+
"mhz_per_cpu": 2000,
115+
"cpu_scaling_enabled": False,
116+
"caches": []
117+
},
118+
"benchmarks": benchmarks
119+
}
67120
68-
### Live Dashboard
69-
70-
View the interactive benchmark dashboard with historical performance data:
71-
72-
**[🔗 Live Benchmark Results](https://mflowcode.github.io/MFC/benchmarks/)**
121+
# Write the converted data
122+
with open('bench-google.json', 'w') as f:
123+
json.dump(google_benchmark_data, f, indent=2)
73124
74-
*Last updated: $(date '+%Y-%m-%d %H:%M:%S UTC')*
75-
*Generated automatically by the continuous benchmarking workflow*
125+
print(f"✓ Converted {len(benchmarks)} benchmark measurements")
76126
EOF
77127
128+
- name: Store benchmark result
129+
uses: benchmark-action/github-action-benchmark@v1
130+
with:
131+
name: C++ Benchmark
132+
tool: 'googlecpp'
133+
output-file-path: bench-google.json
134+
github-token: ${{ secrets.GITHUB_TOKEN }}
135+
auto-push: true
136+
alert-threshold: '200%'
137+
comment-on-alert: true
138+
fail-on-alert: true
139+
alert-comment-cc-users: '@Malmahrouqi'
78140

79141
- name: Archive Results
80142
uses: actions/upload-artifact@v4

.github/workflows/mfc.sh

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
#!/bin/bash
2+
echo "Mock benchmark running..."
3+
mkdir -p bench
4+
echo "name: test_benchmark" > bench/test.yaml
5+
echo "time: 1.23" >> bench/test.yaml
6+
echo "Mock benchmark completed"

.github/workflows/test-components.sh

Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
#!/bin/bash
2+
set -e
3+
4+
echo "=== Testing Individual Components ==="
5+
6+
# Test 1: Check if mfc.sh exists and is executable
7+
echo "1. Testing mfc.sh..."
8+
if [ -f "./mfc.sh" ]; then
9+
chmod +x ./mfc.sh
10+
echo "✓ mfc.sh found and made executable"
11+
else
12+
echo "✗ mfc.sh not found - creating mock for testing"
13+
cat > ./mfc.sh << 'MOCK'
14+
#!/bin/bash
15+
echo "Mock benchmark running..."
16+
mkdir -p bench
17+
echo "name: test_benchmark" > bench/test.yaml
18+
echo "time: 1.23" >> bench/test.yaml
19+
echo "Mock benchmark completed"
20+
MOCK
21+
chmod +x ./mfc.sh
22+
fi
23+
24+
# Test 2: Run benchmark command
25+
echo "2. Testing benchmark execution..."
26+
./mfc.sh bench -o bench || echo "Benchmark failed, continuing with mock data"
27+
28+
# Test 3: Check yq installation and YAML to JSON conversion
29+
echo "3. Testing YAML to JSON conversion..."
30+
if ! command -v yq &> /dev/null; then
31+
echo "Installing yq..."
32+
sudo wget -qO /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64
33+
sudo chmod +x /usr/local/bin/yq
34+
fi
35+
36+
# Convert any YAML files found
37+
find . -maxdepth 1 -name "*.yaml" -exec sh -c 'echo "Converting $1"; yq eval -o=json "$1" > "${1%.yaml}.json"' _ {} \;
38+
39+
# Test 4: Check JSON output format
40+
echo "4. Validating JSON format..."
41+
for json_file in *.json; do
42+
if [ -f "$json_file" ]; then
43+
echo "Checking $json_file:"
44+
python3 -c "import json; json.load(open('$json_file')); print('✓ Valid JSON')" || echo "✗ Invalid JSON"
45+
fi
46+
done
47+
48+
echo "=== Component Testing Complete ==="

.github/workflows/test-cont-bench.yml

Lines changed: 201 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,201 @@
1+
name: Continuous Benchmarking
2+
3+
on: [push, pull_request, workflow_dispatch]
4+
5+
permissions:
6+
contents: write
7+
deployments: write
8+
pages: write
9+
id-token: write
10+
11+
jobs:
12+
self:
13+
name: "Store benchmark result"
14+
runs-on: ubuntu-latest
15+
steps:
16+
- name: Checkout code
17+
uses: actions/checkout@v4
18+
19+
- name: Convert MFC to Google Benchmark Format
20+
run: |
21+
python3 << 'EOF'
22+
import json
23+
from datetime import datetime
24+
25+
# Read the MFC benchmark data
26+
with open('bench.json', 'r') as f:
27+
mfc_data = json.load(f)
28+
29+
# Convert to Google Benchmark format
30+
benchmarks = []
31+
32+
for case_name, case_data in mfc_data['cases'].items():
33+
output_summary = case_data['output_summary']
34+
35+
# Simulation execution time
36+
if 'simulation' in output_summary and 'exec' in output_summary['simulation']:
37+
benchmarks.append({
38+
"name": f"{case_name}/simulation_time",
39+
"family_index": len(benchmarks),
40+
"per_family_instance_index": 0,
41+
"run_name": f"{case_name}/simulation_time",
42+
"run_type": "iteration",
43+
"repetitions": 1,
44+
"repetition_index": 0,
45+
"threads": 1,
46+
"iterations": 1,
47+
"real_time": output_summary['simulation']['exec'] * 1e9,
48+
"cpu_time": output_summary['simulation']['exec'] * 1e9,
49+
"time_unit": "ns"
50+
})
51+
52+
# Simulation grind time
53+
if 'simulation' in output_summary and 'grind' in output_summary['simulation']:
54+
benchmarks.append({
55+
"name": f"{case_name}/grind_time",
56+
"family_index": len(benchmarks),
57+
"per_family_instance_index": 0,
58+
"run_name": f"{case_name}/grind_time",
59+
"run_type": "iteration",
60+
"repetitions": 1,
61+
"repetition_index": 0,
62+
"threads": 1,
63+
"iterations": 1,
64+
"real_time": output_summary['simulation']['grind'],
65+
"cpu_time": output_summary['simulation']['grind'],
66+
"time_unit": "ns"
67+
})
68+
69+
# Create Google Benchmark format
70+
google_benchmark_data = {
71+
"context": {
72+
"date": datetime.now().isoformat(),
73+
"host_name": "github-runner",
74+
"executable": "mfc_benchmark",
75+
"num_cpus": 2,
76+
"mhz_per_cpu": 2000,
77+
"cpu_scaling_enabled": False,
78+
"caches": []
79+
},
80+
"benchmarks": benchmarks
81+
}
82+
83+
# Write the converted data
84+
with open('bench-google.json', 'w') as f:
85+
json.dump(google_benchmark_data, f, indent=2)
86+
87+
print(f"✓ Converted {len(benchmarks)} benchmark measurements")
88+
EOF
89+
90+
- name: Create report generator script
91+
run: |
92+
cat > generate_report.py << 'SCRIPT'
93+
import json
94+
import os
95+
from datetime import datetime
96+
97+
def generate_markdown_report():
98+
# Read benchmark data
99+
with open('bench-google.json', 'r') as f:
100+
data = json.load(f)
101+
102+
# Create directory
103+
os.makedirs('docs/documentation', exist_ok=True)
104+
105+
# Start building content
106+
lines = []
107+
lines.append("# Continuouscl Benchmarking Results")
108+
lines.append("")
109+
lines.append(f"Generated on: {datetime.now().strftime('%Y-%m-%d %H:%M:%S UTC')}")
110+
lines.append("")
111+
lines.append("## System Information")
112+
lines.append(f"- Host: {data['context']['host_name']}")
113+
lines.append(f"- CPUs: {data['context']['num_cpus']}")
114+
lines.append(f"- MHz per CPU: {data['context']['mhz_per_cpu']}")
115+
lines.append("")
116+
lines.append("## Benchmark Results")
117+
lines.append("")
118+
lines.append("| Test Case | Metric | Time (seconds) | Time (nanoseconds) |")
119+
lines.append("|-----------|--------|----------------|-------------------|")
120+
121+
# Add benchmark data
122+
for benchmark in data['benchmarks']:
123+
name_parts = benchmark['name'].split('/')
124+
case_name = name_parts[0]
125+
metric_name = name_parts[1] if len(name_parts) > 1 else 'unknown'
126+
time_seconds = benchmark['real_time'] / 1e9
127+
time_ns = benchmark['real_time']
128+
lines.append(f"| {case_name} | {metric_name} | {time_seconds:.6f} | {time_ns:.0f} |")
129+
130+
lines.append("")
131+
lines.append("## Raw Data")
132+
lines.append("")
133+
lines.append("```json")
134+
lines.append(json.dumps(data, indent=2))
135+
lines.append("```")
136+
lines.append("")
137+
lines.append("---")
138+
lines.append(f"*Last updated: {datetime.now().isoformat()}*")
139+
140+
# Write file
141+
with open('docs/documentation/cont-bench.md', 'w') as f:
142+
f.write('\n'.join(lines))
143+
144+
print("✓ Generated Markdown report at docs/documentation/cont-bench.md")
145+
146+
if __name__ == "__main__":
147+
generate_markdown_report()
148+
SCRIPT
149+
150+
- name: Generate Markdown Report
151+
run: python3 generate_report.py
152+
153+
- name: Commit and Push Results
154+
run: |
155+
git config --local user.email "mohdsaid497566@gmail.com"
156+
git config --local user.name "Malmahrouqi3"
157+
158+
# Fetch latest changes from the remote branch
159+
git fetch origin cont-bench || true
160+
161+
# Stage the generated file
162+
git add docs/documentation/cont-bench.md
163+
164+
# Commit changes to have a clean working tree
165+
git commit -m "Update continuous benchmarking results" || true
166+
167+
# Check if we're behind the remote branch
168+
if git rev-list HEAD..origin/cont-bench --count | grep -q "^[1-9]"; then
169+
echo "Branch is behind remote, resolving..."
170+
171+
# Save our changes to a temporary branch
172+
git branch temp-benchmark-updates
173+
174+
# Get up to date with the remote branch
175+
git reset --hard origin/cont-bench
176+
177+
# Regenerate the report with the latest code
178+
python3 generate_report.py
179+
180+
# Add the newly generated report
181+
git add docs/documentation/cont-bench.md
182+
183+
# Commit the changes
184+
git commit -m "Update continuous benchmarking results" || true
185+
fi
186+
187+
# Push the changes, forcing if necessary
188+
git push origin HEAD:cont-bench || git push -f origin HEAD:cont-bench
189+
190+
- name: Store benchmark result
191+
uses: benchmark-action/github-action-benchmark@v1
192+
with:
193+
name: C++ Benchmark
194+
tool: 'googlecpp'
195+
output-file-path: bench-google.json
196+
github-token: ${{ secrets.GITHUB_TOKEN }}
197+
auto-push: false
198+
alert-threshold: '200%'
199+
comment-on-alert: true
200+
fail-on-alert: true
201+
alert-comment-cc-users: '@Malmahrouqi'

0 commit comments

Comments
 (0)