Skip to content

Commit 3f2cd6e

Browse files
q10facebook-github-bot
authored andcommitted
Update release versioning to 1.3 (pytorch#4408)
Summary: X-link: facebookresearch/FBGEMM#1479 - Update release versioning to 1.3 for upcoming release Pull Request resolved: pytorch#4408 Reviewed By: spcyppt Differential Revision: D77394007 Pulled By: q10 fbshipit-source-id: 2faa1d4d3962516863b0f5094068a22c061abc42
1 parent 97c191f commit 3f2cd6e

File tree

9 files changed

+30
-10
lines changed

9 files changed

+30
-10
lines changed

.github/workflows/fbgemm_gpu_pip.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,12 +26,12 @@ on:
2626
workflow_dispatch:
2727
inputs:
2828
pytorch_version:
29-
description: PyTorch Version (e.g. '2.6.0', 'nightly', 'test')
29+
description: PyTorch Version (e.g. '2.8.0', 'nightly', 'test')
3030
type: string
3131
required: true
3232
default: "nightly"
3333
fbgemm_gpu_channel_version:
34-
description: FBGEMM-GPU Channel + Version (e.g. '1.2.0', 'nightly', 'test/1.2.0')
34+
description: FBGEMM-GPU Channel + Version (e.g. '1.3.0', 'nightly', 'test/1.3.0')
3535
type: string
3636
required: true
3737
default: "nightly"

fbgemm_gpu/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ endif()
103103
# Declare CMake project
104104
project(
105105
fbgemm_gpu
106-
VERSION 1.2.0
106+
VERSION 1.3.0
107107
LANGUAGES ${project_languages})
108108

109109
# AVX Flags Setup - must be set AFTER project declaration

fbgemm_gpu/docs/src/conf.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,10 @@
2828
author = "FBGEMM Team"
2929

3030
# The short X.Y version.
31-
version = "1.2"
31+
version = "1.3"
3232

3333
# The full version, including alpha/beta/rc tags
34-
release = "1.2.0"
34+
release = "1.3.0"
3535

3636

3737
# -- Path setup --------------------------------------------------------------

fbgemm_gpu/docs/src/fbgemm_genai/development/BuildInstructions.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ Clone the repo along with its submodules, and install ``requirements_genai.txt``
4848
# !! Run inside the Conda environment !!
4949
5050
# Select a version tag
51-
FBGEMM_VERSION=v1.2.0
51+
FBGEMM_VERSION=v1.3.0
5252
5353
# Clone the repo along with its submodules
5454
git clone --recursive -b ${FBGEMM_VERSION} https://github.com/pytorch/FBGEMM.git fbgemm_${FBGEMM_VERSION}

fbgemm_gpu/docs/src/fbgemm_gpu/development/BuildInstructions.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -475,7 +475,7 @@ Clone the repo along with its submodules, and install ``requirements.txt``:
475475
# !! Run inside the Conda environment !!
476476
477477
# Select a version tag
478-
FBGEMM_VERSION=v1.2.0
478+
FBGEMM_VERSION=v1.3.0
479479
480480
# Clone the repo along with its submodules
481481
git clone --recursive -b ${FBGEMM_VERSION} https://github.com/pytorch/FBGEMM.git fbgemm_${FBGEMM_VERSION}

fbgemm_gpu/docs/src/general/Releases.rst

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,9 @@ older than the one that the FBGEMM release corresponds to.
1414
| FBGEMM Release | Corresponding | Supported | Supported | Supported CUDA | (Experimental) Supported | (Experimental) Supported |
1515
| | PyTorch Release | Python Versions | CUDA Versions | Architectures | ROCm Versions | ROCm Architectures |
1616
+=================+==================+==================+================+====================+===========================+===========================+
17+
| 1.3.0 | 2.8.x | 3.9, 3.10, 3.11, | 12.6, 12.8, | 7.0, 8.0, 9.0a, | 6.3, 6.4 | gfx908, gfx90a, gfx942 |
18+
| | | 3.12, 3.13 | 12.9 | 10.0a, 12.0a | | |
19+
+-----------------+------------------+------------------+----------------+--------------------+---------------------------+---------------------------+
1720
| 1.2.0 | 2.7.x | 3.9, 3.10, 3.11, | 11.8, 12.6, | 7.0, 8.0, 9.0, | 6.2.4, 6.3 | gfx908, gfx90a, gfx942 |
1821
| | | 3.12, 3.13 | 12.8 | 9.0a, 10.0a, 12.0a | | |
1922
+-----------------+------------------+------------------+----------------+--------------------+---------------------------+---------------------------+

fbgemm_gpu/experimental/gen_ai/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,8 @@ y = torch.ops.fbgemm.f8f8bf16_rowwise(
4444

4545
```bash
4646
# Full FBGEMM library
47-
pip install fbgemm-gpu==1.2.0
48-
pip install fbgemm-gpu==1.2.0 --index-url https://download.pytorch.org/whl/cu126
47+
pip install fbgemm-gpu==1.3.0
48+
pip install fbgemm-gpu==1.3.0 --index-url https://download.pytorch.org/whl/cu126
4949

5050
# FBGEMM library with GenAI operator only
5151
pip install fbgemm-gpu-genai

fbgemm_gpu/test/release/stable_ops_v1.json

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,23 @@
5252
"torch.ops.fbgemm.segment_sum_csr":"fbgemm::segment_sum_csr(SymInt batch_size, Tensor csr_seg, Tensor values) -> Tensor",
5353
"torch.ops.fbgemm.keyed_jagged_index_select_dim1":"fbgemm::keyed_jagged_index_select_dim1(Tensor values, Tensor lengths, Tensor offsets, Tensor indices, SymInt batch_size, Tensor? weights=None, SymInt? selected_lengths_sum=None) -> Tensor[]"
5454
}
55+
},
56+
{
57+
"version":"1.3.0",
58+
"api":{
59+
"torch.ops.fbgemm.jagged_to_padded_dense":"fbgemm::jagged_to_padded_dense(Tensor values, Tensor[] offsets, SymInt[] max_lengths, float padding_value = 0) -> Tensor",
60+
"torch.ops.fbgemm.merge_pooled_embeddings":"fbgemm::merge_pooled_embeddings(Tensor[] pooled_embeddings, SymInt uncat_dim_size, Device target_device, SymInt cat_dim=1) -> Tensor",
61+
"torch.ops.fbgemm.permute_pooled_embs_auto_grad":"fbgemm::permute_pooled_embs_auto_grad(Tensor pooled_embs, Tensor offset_dim_list, Tensor permute_list, Tensor inv_offset_dim_list, Tensor inv_permute_list) -> Tensor",
62+
"torch.ops.fbgemm.FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf":"fbgemm::FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf(Tensor input, int bit_rate) -> Tensor",
63+
"torch.ops.fbgemm.permute_2D_sparse_data":"fbgemm::permute_2D_sparse_data(Tensor permute, Tensor lengths, Tensor values, Tensor? weights=None, SymInt? permuted_lengths_sum=None) -> (Tensor, Tensor, Tensor?)",
64+
"torch.ops.fbgemm.permute_1D_sparse_data":"fbgemm::permute_1D_sparse_data(Tensor permute, Tensor lengths, Tensor values, Tensor? weights=None, SymInt? permuted_lengths_sum=None) -> (Tensor, Tensor, Tensor?)",
65+
"torch.ops.fbgemm.expand_into_jagged_permute":"fbgemm::expand_into_jagged_permute(Tensor permute, Tensor input_offset, Tensor output_offset, SymInt output_size) -> Tensor",
66+
"torch.ops.fbgemm.block_bucketize_sparse_features":"fbgemm::block_bucketize_sparse_features(Tensor lengths, Tensor indices, bool bucketize_pos, bool sequence, Tensor block_sizes, SymInt my_size, Tensor? weights=None, Tensor? batch_size_per_feature=None, SymInt max_B= -1, Tensor[]? block_bucketize_pos=None, bool keep_orig_idx=False, Tensor? total_num_blocks=None, Tensor? keep_orig_idx_per_feature=None) -> (Tensor, Tensor, Tensor?, Tensor?, Tensor?)",
67+
"torch.ops.fbgemm.asynchronous_complete_cumsum":"fbgemm::asynchronous_complete_cumsum(Tensor t_in) -> Tensor",
68+
"torch.ops.fbgemm.offsets_range":"fbgemm::offsets_range(Tensor offsets, SymInt range_size) -> Tensor",
69+
"torch.ops.fbgemm.segment_sum_csr":"fbgemm::segment_sum_csr(SymInt batch_size, Tensor csr_seg, Tensor values) -> Tensor",
70+
"torch.ops.fbgemm.keyed_jagged_index_select_dim1":"fbgemm::keyed_jagged_index_select_dim1(Tensor values, Tensor lengths, Tensor offsets, Tensor indices, SymInt batch_size, Tensor? weights=None, SymInt? selected_lengths_sum=None) -> Tensor[]"
71+
}
5572
}
5673
]
5774
}

fbgemm_gpu/test/release/stable_release_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ def test_backwards_compatibility(self) -> None:
155155
"""
156156
Test the schema compatibility of the operators against previous versions of the API.
157157
"""
158-
for version in ["1.0.0", "1.1.0", "1.2.0"]:
158+
for version in ["1.0.0", "1.1.0", "1.2.0", "1.3.0"]:
159159
try:
160160
self._test_stable_schema(version)
161161
except Exception as e:

0 commit comments

Comments
 (0)