diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index c434d0d4f..20b6b2bf5 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -6,7 +6,7 @@ inputs: default: '3.9' torch-version: required: false - default: '2.5' + default: '2.6' cuda-version: required: false default: cpu diff --git a/.github/workflows/linting.yml b/.github/workflows/linting.yml index 80a691d30..652a1a9de 100644 --- a/.github/workflows/linting.yml +++ b/.github/workflows/linting.yml @@ -12,13 +12,16 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@v4 + - name: Set up Python uses: actions/setup-python@v5 with: python-version: 3.9 + - name: Install dependencies run: | pip install -e '.[full,test]' -f https://download.pytorch.org/whl/cpu pip list + - name: Check type hints run: mypy diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index b7fe56ec0..f126602e6 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -1,4 +1,4 @@ -name: Testing PyTorch 2.5 +name: Testing on: # yamllint disable-line rule:truthy push: @@ -24,6 +24,7 @@ jobs: - '2.3' - '2.4' - '2.5' + - '2.6' - 'nightly' steps: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 009b3c52b..a62d539ba 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -81,11 +81,11 @@ repos: args: [--fix, --exit-non-zero-on-fix] - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.14.1 + rev: v1.15.0 hooks: - id: mypy name: Check types - additional_dependencies: [torch==2.5.0] + additional_dependencies: [torch==2.6.*] exclude: "^test/|^examples/|^benchmark/" - repo: https://github.com/executablebooks/mdformat diff --git a/CHANGELOG.md b/CHANGELOG.md index 35eddc3d2..88e01f97b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/). ### Added +- Added support for PyTorch 2.6 ([#494](https://github.com/pyg-team/pytorch-frame/pull/494)) + ### Changed ### Deprecated diff --git a/docs/requirements.txt b/docs/requirements.txt index 907cd381d..2cd365c02 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,2 +1,2 @@ -https://download.pytorch.org/whl/cpu/torch-2.5.0%2Bcpu-cp39-cp39-linux_x86_64.whl +https://download.pytorch.org/whl/cpu/torch-2.6.0%2Bcpu-cp39-cp39-linux_x86_64.whl git+https://github.com/pyg-team/pyg_sphinx_theme.git diff --git a/torch_frame/nn/conv/excelformer_conv.py b/torch_frame/nn/conv/excelformer_conv.py index a897fa2b7..d9d88ade1 100644 --- a/torch_frame/nn/conv/excelformer_conv.py +++ b/torch_frame/nn/conv/excelformer_conv.py @@ -58,6 +58,7 @@ def __init__(self, channels: int, num_cols: int, num_heads: int, self.lin_out = Linear(channels, channels) if num_heads > 1 else None self.num_heads = num_heads self.dropout = Dropout(dropout) + self.seq_ids: Tensor self.register_buffer('seq_ids', torch.arange(num_cols)) self.reset_parameters() diff --git a/torch_frame/nn/encoding/cyclic_encoding.py b/torch_frame/nn/encoding/cyclic_encoding.py index 70b3981a7..11317f421 100644 --- a/torch_frame/nn/encoding/cyclic_encoding.py +++ b/torch_frame/nn/encoding/cyclic_encoding.py @@ -23,8 +23,11 @@ def __init__(self, out_size: int) -> None: raise ValueError( f"out_size should be divisible by 2 (got {out_size}).") self.out_size = out_size - mult_term = torch.arange(1, self.out_size // 2 + 1) - self.register_buffer("mult_term", mult_term) + self.mult_term: Tensor + self.register_buffer( + "mult_term", + torch.arange(1, self.out_size // 2 + 1), + ) def forward(self, input_tensor: Tensor) -> Tensor: assert torch.all((input_tensor >= 0) & (input_tensor <= 1)) diff --git a/torch_frame/nn/encoding/positional_encoding.py b/torch_frame/nn/encoding/positional_encoding.py index 7b7de7329..eb451e130 100644 --- a/torch_frame/nn/encoding/positional_encoding.py +++ b/torch_frame/nn/encoding/positional_encoding.py @@ -18,9 +18,14 @@ def __init__(self, out_size: int) -> None: raise ValueError( f"out_size should be divisible by 2 (got {out_size}).") self.out_size = out_size - mult_term = torch.pow(1 / 10000.0, - torch.arange(0, self.out_size, 2) / out_size) - self.register_buffer("mult_term", mult_term) + self.mult_term: Tensor + self.register_buffer( + "mult_term", + torch.pow( + 1 / 10000.0, + torch.arange(0, self.out_size, 2) / out_size, + ), + ) def forward(self, input_tensor: Tensor) -> Tensor: assert torch.all(input_tensor >= 0) diff --git a/torch_frame/nn/models/tabnet.py b/torch_frame/nn/models/tabnet.py index db14065f3..406c6e7d0 100644 --- a/torch_frame/nn/models/tabnet.py +++ b/torch_frame/nn/models/tabnet.py @@ -1,7 +1,7 @@ from __future__ import annotations import math -from typing import Any +from typing import Any, Callable, cast import torch import torch.nn.functional as F @@ -257,10 +257,14 @@ def forward(self, x: Tensor) -> Tensor: return x def reset_parameters(self) -> None: + # TODO: Remove this type cast when PyTorch fixes typing issue where + # reset_parameters is typed as: + # Union[torch._tensor.Tensor, torch.nn.modules.module.Module] + # This issue was first observed on PyTorch 2.6.0. if not isinstance(self.shared_glu_block, Identity): - self.shared_glu_block.reset_parameters() + cast(Callable, self.shared_glu_block.reset_parameters)() if not isinstance(self.dependent, Identity): - self.dependent.reset_parameters() + cast(Callable, self.dependent.reset_parameters)() class GLUBlock(Module):