|
| 1 | +name: Validate full |
| 2 | +description: Run a full validate on a ghc version |
| 3 | +inputs: |
| 4 | + ghc: |
| 5 | + description: ghc version to use |
| 6 | + required: true |
| 7 | + allow-newer: |
| 8 | + description: allow-newer line |
| 9 | + required: false |
| 10 | + constraints: |
| 11 | + description: constraints line |
| 12 | + required: false |
| 13 | + static: |
| 14 | + description: whether to build statically |
| 15 | + required: false |
| 16 | + default: 'false' |
| 17 | + shell: |
| 18 | + description: shell to use |
| 19 | + required: false |
| 20 | + default: 'bash' |
| 21 | + with_cache: |
| 22 | + description: whether to instantiate cache |
| 23 | + required: false |
| 24 | + default: 'true' |
| 25 | + |
| 26 | +runs: |
| 27 | + using: composite |
| 28 | + steps: |
| 29 | + - uses: ./.github/actions/cabal-setup |
| 30 | + id: cabal-setup |
| 31 | + with: |
| 32 | + shell: ${{ inputs.shell }} |
| 33 | + ghc: ${{ inputs.ghc }} |
| 34 | + allow-newer: ${{ inputs.allow-newer }} |
| 35 | + constraints: ${{ inputs.constraints }} |
| 36 | + static: ${{ inputs.static }} |
| 37 | + with_cache: ${{ inputs.with_cache }} |
| 38 | + |
| 39 | + - name: Validate build |
| 40 | + shell: ${{ inputs.shell }} |
| 41 | + run: | |
| 42 | + echo ::group::Build |
| 43 | + sh validate.sh $FLAGS -s build |
| 44 | +
|
| 45 | + - name: Tar cabal head executable |
| 46 | + if: matrix.ghc == env.GHC_FOR_RELEASE |
| 47 | + shell: ${{ inputs.shell }} |
| 48 | + run: | |
| 49 | + echo ::group::Tar |
| 50 | + CABAL_EXEC=$(cabal list-bin --builddir=dist-newstyle-validate-ghc-${{ matrix.ghc }} --project-file=cabal.validate.project cabal-install:exe:cabal) |
| 51 | + # We have to tar the executable to preserve executable permissions |
| 52 | + # see https://github.com/actions/upload-artifact/issues/38 |
| 53 | + if [[ "${{ runner.os }}" == "Windows" ]]; then |
| 54 | + # `cabal list-bin` gives us a windows path but tar needs the posix one |
| 55 | + CABAL_EXEC=$(cygpath "$CABAL_EXEC") |
| 56 | + fi |
| 57 | + if [[ "${{ runner.os }}" == "macOS" ]]; then |
| 58 | + # Workaround to avoid bsdtar corrupting the executable |
| 59 | + # such that executing it after untar throws `cannot execute binary file` |
| 60 | + # see https://github.com/actions/virtual-environments/issues/2619#issuecomment-788397841 |
| 61 | + sudo /usr/sbin/purge |
| 62 | + fi |
| 63 | + DIR=$(dirname "$CABAL_EXEC") |
| 64 | + FILE=$(basename "$CABAL_EXEC") |
| 65 | + CABAL_EXEC_TAR="cabal-head-${{ runner.os }}-$CABAL_ARCH.tar.gz" |
| 66 | + tar -czvf "$CABAL_EXEC_TAR" -C "$DIR" "$FILE" |
| 67 | + echo "CABAL_EXEC_TAR=$CABAL_EXEC_TAR" >> "$GITHUB_ENV" |
| 68 | +
|
| 69 | + # We upload the cabal executable built with the ghc used in the release for: |
| 70 | + # - Reuse it in the dogfooding job (although we could use the cached build dir) |
| 71 | + # - Make it available in the workflow to make easier testing it locally |
| 72 | + - name: Upload cabal-install executable to workflow artifacts |
| 73 | + if: inputs.ghc == env.GHC_FOR_RELEASE |
| 74 | + uses: actions/upload-artifact@v4 |
| 75 | + with: |
| 76 | + name: cabal-${{ runner.os }}-${{ env.CABAL_ARCH }} |
| 77 | + path: ${{ env.CABAL_EXEC_TAR }} |
| 78 | + |
| 79 | + # We want all the tests to be run even if one fails, but we want to fail the validate |
| 80 | + # if any of them fail. It turns out that there is a way to get at the outcome of a step |
| 81 | + # before `continue-on-error`` is applied, so the last step uses that. |
| 82 | + # |
| 83 | + # Note that we can't use filter syntax to look for any such failure, because the |
| 84 | + # hackage-repo-tool install is also `continue-on-error` and its failure is legitimate |
| 85 | + # (see the comment there). So the final step must list all of the tests, not that I expect |
| 86 | + # any new ones to be added. |
| 87 | + |
| 88 | + - name: Validate lib-tests |
| 89 | + id: lib-tests |
| 90 | + continue-on-error: true |
| 91 | + env: |
| 92 | + # `rawSystemStdInOut reports text decoding errors` |
| 93 | + # test does not find ghc without the full path in windows |
| 94 | + GHCPATH: ${{ steps.setup-haskell.outputs.ghc-exe }} |
| 95 | + shell: ${{ inputs.shell }} |
| 96 | + run: | |
| 97 | + echo ::group::Validate lib-tests |
| 98 | + sh validate.sh $FLAGS -s lib-tests |
| 99 | +
|
| 100 | + - name: Validate lib-suite |
| 101 | + id: lib-suite |
| 102 | + continue-on-error: true |
| 103 | + shell: ${{ inputs.shell }} |
| 104 | + run: | |
| 105 | + echo ::group::Validate lib-suite |
| 106 | + sh validate.sh $FLAGS -s lib-suite |
| 107 | +
|
| 108 | + - name: Validate cli-tests |
| 109 | + id: cli-tests |
| 110 | + continue-on-error: true |
| 111 | + shell: ${{ inputs.shell }} |
| 112 | + run: | |
| 113 | + echo ::group::Validate cli-tests |
| 114 | + sh validate.sh $FLAGS -s cli-tests |
| 115 | +
|
| 116 | + - name: Validate cli-suite |
| 117 | + id: cli-suite |
| 118 | + continue-on-error: true |
| 119 | + shell: ${{ inputs.shell }} |
| 120 | + run: | |
| 121 | + echo ::group::Validate cli-suite |
| 122 | + sh validate.sh $FLAGS -s cli-suite |
| 123 | +
|
| 124 | + - name: Validate solver-benchmarks-tests |
| 125 | + id: solver-benchmarks-tests |
| 126 | + continue-on-error: true |
| 127 | + if: "inputs.ghc == env.GHC_FOR_SOLVER_BENCHMARKS" |
| 128 | + shell: ${{ inputs.shell }} |
| 129 | + run: | |
| 130 | + echo ::group::Validate solver-benchmarks-tests |
| 131 | + sh validate.sh $FLAGS -s solver-benchmarks-tests |
| 132 | +
|
| 133 | + - name: Validate solver-benchmarks-run |
| 134 | + id: solver-benchmarks-run |
| 135 | + continue-on-error: true |
| 136 | + if: "inputs.ghc == env.GHC_FOR_SOLVER_BENCHMARKS" |
| 137 | + shell: ${{ inputs.shell }} |
| 138 | + run: | |
| 139 | + echo ::group::Validate solver-benchmarks-run |
| 140 | + sh validate.sh $FLAGS -s solver-benchmarks-run |
| 141 | +
|
| 142 | + - name: Collect test results |
| 143 | + if: steps.lib-tests.outcome == 'failure' || steps.lib-suite.outcome == 'failure' || steps.cli-tests.outcome == 'failure' || steps.cli-suite.outcome == 'failure' || steps.solver-benchmarks-tests.outcome == 'failure' || steps.solver-benchmarks-run.outcome == 'failure' |
| 144 | + shell: ${{ inputs.shell }} |
| 145 | + run: exit 1 |
0 commit comments