diff --git a/.github/workflows/sycl-detect-changes.yml b/.github/workflows/sycl-detect-changes.yml index 00891018cc31..a4562919029e 100644 --- a/.github/workflows/sycl-detect-changes.yml +++ b/.github/workflows/sycl-detect-changes.yml @@ -64,6 +64,8 @@ jobs: - devops/scripts/install_drivers.sh devigccfg: - devops/dependencies-igc-dev.json + benchmarks: + - 'devops/scripts/benchmarks/**' perf-tests: - sycl/test-e2e/PerformanceTests/** esimd: diff --git a/.github/workflows/sycl-linux-precommit.yml b/.github/workflows/sycl-linux-precommit.yml index 52d690dbb26f..0f350b6321f3 100644 --- a/.github/workflows/sycl-linux-precommit.yml +++ b/.github/workflows/sycl-linux-precommit.yml @@ -165,6 +165,28 @@ jobs: skip_run: ${{matrix.use_igc_dev && contains(github.event.pull_request.labels.*.name, 'ci-no-devigc') || 'false'}} env: ${{ contains(needs.detect_changes.outputs.filters, 'esimd') && '{}' || '{"LIT_FILTER_OUT":"ESIMD/"}' }} + test_benchmark_scripts: + needs: [build, detect_changes] + if: | + always() && !cancelled() + && needs.build.outputs.build_conclusion == 'success' + && contains(needs.detect_changes.outputs.filters, 'benchmarks') + uses: ./.github/workflows/sycl-linux-run-tests.yml + with: + name: Benchmark suite precommit testing + runner: '["PVC_PERF"]' + image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest + image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN + target_devices: 'level_zero:gpu' + tests_selector: benchmarks + benchmark_upload_results: false + benchmark_preset: 'Minimal' + benchmark_dry_run: true + repo_ref: ${{ github.sha }} + sycl_toolchain_artifact: sycl_linux_default + sycl_toolchain_archive: ${{ needs.build.outputs.artifact_archive_name }} + sycl_toolchain_decompress_command: ${{ needs.build.outputs.artifact_decompress_command }} + test-perf: needs: [build, detect_changes] if: | diff --git a/.github/workflows/sycl-linux-run-tests.yml b/.github/workflows/sycl-linux-run-tests.yml index bb3268bd883b..1aed6f13bf2b 100644 --- a/.github/workflows/sycl-linux-run-tests.yml +++ b/.github/workflows/sycl-linux-run-tests.yml @@ -132,6 +132,12 @@ on: type: string default: 'Minimal' required: False + benchmark_dry_run: + description: | + Whether or not to fail the workflow upon a regression. + type: string + default: 'false' + required: False workflow_dispatch: inputs: @@ -335,6 +341,7 @@ jobs: upload_results: ${{ inputs.benchmark_upload_results }} save_name: ${{ inputs.benchmark_save_name }} preset: ${{ inputs.benchmark_preset }} + dry_run: ${{ inputs.benchmark_dry_run }} env: RUNNER_TAG: ${{ inputs.runner }} GITHUB_TOKEN: ${{ secrets.LLVM_SYCL_BENCHMARK_TOKEN }} diff --git a/devops/actions/run-tests/benchmark/action.yml b/devops/actions/run-tests/benchmark/action.yml index 16b6529b11b0..25e85c7cde90 100644 --- a/devops/actions/run-tests/benchmark/action.yml +++ b/devops/actions/run-tests/benchmark/action.yml @@ -25,6 +25,9 @@ inputs: preset: type: string required: True + dry_run: + type: string + required: False runs: using: "composite" @@ -165,7 +168,7 @@ runs: --name "$SAVE_NAME" \ --compare-file "./llvm-ci-perf-results/results/${SAVE_NAME}_${SAVE_TIMESTAMP}.json" \ --results-dir "./llvm-ci-perf-results/results/" \ - --regression-filter '^[a-z_]+_sycl ' \ + --regression-filter '^[a-z_]+_sycl ' ${{ inputs.dry_run == 'true' && '--dry-run' }} \ --verbose echo "-----" diff --git a/devops/scripts/benchmarks/compare.py b/devops/scripts/benchmarks/compare.py index 9b1a9213810b..4a20d43d9b82 100644 --- a/devops/scripts/benchmarks/compare.py +++ b/devops/scripts/benchmarks/compare.py @@ -343,6 +343,11 @@ def to_hist( help="If provided, only regressions matching provided regex will cause exit status 1.", default=None, ) + parser_avg.add_argument( + "--dry-run", + action="store_true", + help="Do not return error upon regressions.", + ) args = parser.parse_args() @@ -393,7 +398,8 @@ def print_regression(entry: dict): print("#\n# Regressions:\n#\n") for test in regressions_of_concern: print_regression(test) - exit(1) # Exit 1 to trigger github test failure + if not args.dry_run: + exit(1) # Exit 1 to trigger github test failure print("\nNo unexpected regressions found!") else: print("Unsupported operation: exiting.")