Skip to content

Commit d6bf991

Browse files
committed
[WIP][RC] Update PyTorch to 2.8.0
Signed-off-by: Huy Do <huydhn@gmail.com>
1 parent baba038 commit d6bf991

File tree

10 files changed

+47
-43
lines changed

10 files changed

+47
-43
lines changed

.pre-commit-config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ repos:
4949
rev: 0.6.17
5050
hooks:
5151
- id: pip-compile
52-
args: [requirements/test.in, -o, requirements/test.txt, --index-strategy, unsafe-best-match, --torch-backend, cu128]
52+
args: [requirements/test.in, -o, requirements/test.txt, --index-strategy, unsafe-best-match, --extra-index-url, https://download.pytorch.org/whl/test/cu128]
5353
files: ^requirements/test\.(in|txt)$
5454
- repo: local
5555
hooks:

CMakeLists.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@ set(HIP_SUPPORTED_ARCHS "gfx906;gfx908;gfx90a;gfx942;gfx950;gfx1030;gfx1100;gfx1
4545
# requirements.txt files and should be kept consistent. The ROCm torch
4646
# versions are derived from docker/Dockerfile.rocm
4747
#
48-
set(TORCH_SUPPORTED_VERSION_CUDA "2.7.0")
49-
set(TORCH_SUPPORTED_VERSION_ROCM "2.7.0")
48+
set(TORCH_SUPPORTED_VERSION_CUDA "2.8.0")
49+
set(TORCH_SUPPORTED_VERSION_ROCM "2.8.0")
5050

5151
#
5252
# Try to find python package with an executable that exactly matches

docker/Dockerfile

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ ARG UV_INDEX_URL=${PIP_INDEX_URL}
5050
ARG UV_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}
5151

5252
# PyTorch provides its own indexes for standard and nightly builds
53-
ARG PYTORCH_CUDA_INDEX_BASE_URL=https://download.pytorch.org/whl
53+
ARG PYTORCH_CUDA_INDEX_BASE_URL=https://download.pytorch.org/whl/test
5454
ARG PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL=https://download.pytorch.org/whl/nightly
5555

5656
# PIP supports multiple authentication schemes, including keyring
@@ -376,8 +376,8 @@ RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist
376376
# $ # upload the wheel to a public location, e.g. https://wheels.vllm.ai/flashinfer/v0.2.6.post1/flashinfer_python-0.2.6.post1-cp39-abi3-linux_x86_64.whl
377377

378378
# Allow specifying a version, Git revision or local .whl file
379-
ARG FLASHINFER_CUDA128_INDEX_URL="https://download.pytorch.org/whl/cu128/flashinfer"
380-
ARG FLASHINFER_CUDA128_WHEEL="flashinfer_python-0.2.6.post1%2Bcu128torch2.7-cp39-abi3-linux_x86_64.whl"
379+
ARG FLASHINFER_CUDA128_INDEX_URL="https://download.pytorch.org/whl/test/cu128/flashinfer"
380+
ARG FLASHINFER_CUDA128_WHEEL="flashinfer_python-0.2.6.post1%2Bcu128torch2.8-cp39-abi3-linux_x86_64.whl"
381381
ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git"
382382
ARG FLASHINFER_GIT_REF="v0.2.6.post1"
383383
RUN --mount=type=cache,target=/root/.cache/uv bash - <<'BASH'

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ requires = [
66
"packaging>=24.2",
77
"setuptools>=77.0.3,<80.0.0",
88
"setuptools-scm>=8.0",
9-
"torch == 2.7.0",
9+
"torch == 2.8.0",
1010
"wheel",
1111
"jinja2",
1212
]

requirements/build.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,8 @@ ninja
44
packaging>=24.2
55
setuptools>=77.0.3,<80.0.0
66
setuptools-scm>=8
7-
torch==2.7.0
7+
torch==2.8.0
88
wheel
99
jinja2>=3.1.6
1010
regex
11+
build

requirements/cpu.txt

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -7,18 +7,18 @@ numba == 0.61.2; python_version > '3.9'
77
# Dependencies for CPUs
88
packaging>=24.2
99
setuptools>=77.0.3,<80.0.0
10-
--extra-index-url https://download.pytorch.org/whl/cpu
10+
--extra-index-url https://download.pytorch.org/whl/test/cpu
1111
torch==2.6.0+cpu; platform_machine == "x86_64" # torch>2.6.0+cpu has performance regression on x86 platform, see https://github.com/pytorch/pytorch/pull/151218
12-
torch==2.7.0; platform_system == "Darwin"
13-
torch==2.7.0; platform_machine == "ppc64le" or platform_machine == "aarch64"
12+
torch==2.8.0; platform_system == "Darwin"
13+
torch==2.8.0; platform_machine == "ppc64le" or platform_machine == "aarch64"
1414

1515
# required for the image processor of minicpm-o-2_6, this must be updated alongside torch
1616
torchaudio; platform_machine != "ppc64le" and platform_machine != "s390x"
17-
torchaudio==2.7.0; platform_machine == "ppc64le"
17+
torchaudio==2.8.0; platform_machine == "ppc64le"
1818

1919
# required for the image processor of phi3v, this must be updated alongside torch
2020
torchvision; platform_machine != "ppc64le" and platform_machine != "s390x"
21-
torchvision==0.22.0; platform_machine == "ppc64le"
21+
torchvision==0.23.0; platform_machine == "ppc64le"
2222
datasets # for benchmark scripts
2323

2424
# Intel Extension for PyTorch, only for x86_64 CPUs

requirements/cuda.txt

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,9 +6,9 @@ numba == 0.61.2; python_version > '3.9'
66

77
# Dependencies for NVIDIA GPUs
88
ray[cgraph]>=2.43.0, !=2.44.* # Ray Compiled Graph, required for pipeline parallelism in V1.
9-
torch==2.7.0
10-
torchaudio==2.7.0
9+
torch==2.8.0
10+
torchaudio==2.8.0
1111
# These must be updated alongside torch
12-
torchvision==0.22.0 # Required for phi3v processor. See https://github.com/pytorch/vision?tab=readme-ov-file#installation for corresponding version
12+
torchvision==0.23.0 # Required for phi3v processor. See https://github.com/pytorch/vision?tab=readme-ov-file#installation for corresponding version
1313
# https://github.com/facebookresearch/xformers/releases/tag/v0.0.30
14-
xformers==0.0.30; platform_system == 'Linux' and platform_machine == 'x86_64' # Requires PyTorch >= 2.7
14+
git+https://github.com/facebookresearch/xformers@v0.0.30; platform_system == 'Linux' and platform_machine == 'x86_64' # Requires PyTorch >= 2.7

requirements/rocm-build.txt

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
# Common dependencies
22
-r common.txt
33

4-
--extra-index-url https://download.pytorch.org/whl/rocm6.2.4
5-
torch==2.7.0
6-
torchvision==0.22.0
7-
torchaudio==2.7.0
4+
--extra-index-url https://download.pytorch.org/whl/test/rocm6.3
5+
torch==2.8.0
6+
torchvision==0.23.0
7+
torchaudio==2.8.0
88

99
triton==3.2
1010
cmake>=3.26.1,<4

requirements/test.in

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,9 +22,10 @@ sentence-transformers # required for embedding tests
2222
soundfile # required for audio tests
2323
jiwer # required for audio tests
2424
timm # required for internvl test
25-
torch==2.7.0
26-
torchaudio==2.7.0
27-
torchvision==0.22.0
25+
--extra-index-url https://download.pytorch.org/whl/test/cu128
26+
torch==2.8.0
27+
torchaudio==2.8.0
28+
torchvision==0.23.0
2829
transformers_stream_generator # required for qwen-vl test
2930
mamba_ssm # required for plamo2 test
3031
matplotlib # required for qwen-vl test

requirements/test.txt

Lines changed: 21 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# This file was autogenerated by uv via the following command:
2-
# uv pip compile requirements/test.in -o requirements/test.txt --index-strategy unsafe-best-match --torch-backend cu128
2+
# uv pip compile requirements/test.in -o requirements/test.txt --index-strategy unsafe-best-match
33
absl-py==2.1.0
44
# via rouge-score
55
accelerate==1.0.1
@@ -377,42 +377,44 @@ numpy==1.26.4
377377
# transformers
378378
# tritonclient
379379
# vocos
380-
nvidia-cublas-cu12==12.8.3.14
380+
nvidia-cublas-cu12==12.8.4.1
381381
# via
382382
# nvidia-cudnn-cu12
383383
# nvidia-cusolver-cu12
384384
# torch
385-
nvidia-cuda-cupti-cu12==12.8.57
385+
nvidia-cuda-cupti-cu12==12.8.90
386386
# via torch
387-
nvidia-cuda-nvrtc-cu12==12.8.61
387+
nvidia-cuda-nvrtc-cu12==12.8.93
388388
# via torch
389-
nvidia-cuda-runtime-cu12==12.8.57
389+
nvidia-cuda-runtime-cu12==12.8.90
390390
# via torch
391-
nvidia-cudnn-cu12==9.7.1.26
391+
nvidia-cudnn-cu12==9.10.2.21
392392
# via torch
393-
nvidia-cufft-cu12==11.3.3.41
393+
nvidia-cufft-cu12==11.3.3.83
394394
# via torch
395-
nvidia-cufile-cu12==1.13.0.11
395+
nvidia-cufile-cu12==1.13.1.3
396396
# via torch
397-
nvidia-curand-cu12==10.3.9.55
397+
nvidia-curand-cu12==10.3.9.90
398398
# via torch
399-
nvidia-cusolver-cu12==11.7.2.55
399+
nvidia-cusolver-cu12==11.7.3.90
400400
# via torch
401-
nvidia-cusparse-cu12==12.5.7.53
401+
nvidia-cusparse-cu12==12.5.8.93
402402
# via
403403
# nvidia-cusolver-cu12
404404
# torch
405-
nvidia-cusparselt-cu12==0.6.3
405+
nvidia-cusparselt-cu12==0.7.1
406406
# via torch
407-
nvidia-nccl-cu12==2.26.2
407+
nvidia-nccl-cu12==2.27.3
408408
# via torch
409-
nvidia-nvjitlink-cu12==12.8.61
409+
nvidia-nvjitlink-cu12==12.8.93
410410
# via
411411
# nvidia-cufft-cu12
412412
# nvidia-cusolver-cu12
413413
# nvidia-cusparse-cu12
414414
# torch
415-
nvidia-nvtx-cu12==12.8.55
415+
nvidia-nvshmem-cu12==3.2.5
416+
# via torch
417+
nvidia-nvtx-cu12==12.8.90
416418
# via torch
417419
opencensus==0.11.4
418420
# via ray
@@ -757,7 +759,7 @@ tomli==2.2.1
757759
# via schemathesis
758760
tomli-w==1.2.0
759761
# via schemathesis
760-
torch==2.7.0+cu128
762+
torch==2.8.0+cu128
761763
# via
762764
# -r requirements/test.in
763765
# accelerate
@@ -776,12 +778,12 @@ torch==2.7.0+cu128
776778
# torchvision
777779
# vector-quantize-pytorch
778780
# vocos
779-
torchaudio==2.7.0+cu128
781+
torchaudio==2.8.0+cu128
780782
# via
781783
# -r requirements/test.in
782784
# encodec
783785
# vocos
784-
torchvision==0.22.0+cu128
786+
torchvision==0.23.0+cu128
785787
# via
786788
# -r requirements/test.in
787789
# timm
@@ -811,7 +813,7 @@ transformers==4.52.4
811813
# transformers-stream-generator
812814
transformers-stream-generator==0.0.5
813815
# via -r requirements/test.in
814-
triton==3.3.0
816+
triton==3.4.0
815817
# via torch
816818
tritonclient==2.51.0
817819
# via

0 commit comments

Comments
 (0)