Skip to content

Compute Benchmarks #259

Compute Benchmarks

Compute Benchmarks #259

Workflow file for this run

name: Compute Benchmarks
on:
workflow_dispatch:
inputs:
str_name:
description: Formatted adapter name
type: choice
required: true
default: 'level_zero'
options:
- level_zero
- level_zero_v2
preset:
description: Preset
type: choice
required: true
default: 'Minimal'
options:
- Minimal
- Normal
- Full
pr_no:
description: PR number (if 0, it'll run on the main)
type: number
required: true
bench_script_params:
# If you want to save the results of the manual run in 'benchmark-results' branch,
# you have to pass '--save XXX', where XXX is the label of your results.
description: Parameters passed to the script executing benchmark
type: string
required: false
default: ''
sycl_config_params:
description: Extra params for SYCL configuration
type: string
required: false
default: ''
sycl_repo:
description: 'Compiler repo'
type: string
required: true
default: 'intel/llvm'
sycl_commit:
description: 'Compiler commit'
type: string
required: false
default: ''
compute_runtime_commit:
description: 'Compute Runtime commit'
type: string
required: false
default: ''
runner:
description: 'Runner'
type: choice
required: true
default: 'level_zero_PERF'
options:
- level_zero_PERF
- DSS-L0-BMG
permissions:
contents: read
jobs:
manual:
name: Compute Benchmarks
uses: ./.github/workflows/benchmarks-reusable.yml
permissions:
contents: write
pull-requests: write
with:
str_name: ${{ inputs.str_name }}
pr_no: ${{ inputs.pr_no }}
bench_script_params: ${{ inputs.bench_script_params }}
sycl_config_params: ${{ inputs.sycl_config_params }}
sycl_repo: ${{ inputs.sycl_repo }}
sycl_commit: ${{ inputs.sycl_commit }}
compute_runtime_commit: ${{ inputs.compute_runtime_commit }}
preset: ${{ inputs.preset }}
runner: ${{ inputs.runner }}