Skip to content

Commit 1eb2b9c

Browse files
authored
[CI] update typos config for CI pre-commit and fix some spells (#20919)
Signed-off-by: Peter Pan <Peter.Pan@daocloud.io>
1 parent 6ebf313 commit 1eb2b9c

File tree

19 files changed

+200
-196
lines changed

19 files changed

+200
-196
lines changed

.pre-commit-config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ repos:
2121
- id: ruff-format
2222
files: ^(.buildkite|benchmarks|examples)/.*
2323
- repo: https://github.com/crate-ci/typos
24-
rev: v1.32.0
24+
rev: v1.34.0
2525
hooks:
2626
- id: typos
2727
- repo: https://github.com/PyCQA/isort

csrc/cpu/sgl-kernels/common.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ namespace {
5858

5959
#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous")
6060
#define CHECK_LAST_DIM_CONTIGUOUS(x) \
61-
TORCH_CHECK(x.strides()[x.strides().size() - 1] == 1, #x "must be contiguous at last dimention")
61+
TORCH_CHECK(x.strides()[x.strides().size() - 1] == 1, #x "must be contiguous at last dimension")
6262

6363
#define CHECK_INPUT(x) \
6464
CHECK_CPU(x); \

csrc/cpu/sgl-kernels/gemm.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ void fused_experts_int4_w4a16_kernel_impl(
126126
int64_t topk,
127127
int64_t num_tokens_post_pad);
128128

129-
// shared expert implememntation for int8 w8a8
129+
// shared expert implementation for int8 w8a8
130130
template <typename scalar_t>
131131
void shared_expert_int8_kernel_impl(
132132
scalar_t* __restrict__ output,

csrc/cpu/sgl-kernels/gemm_int8.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ struct tinygemm_kernel_nn<at::BFloat16, has_bias, BLOCK_M, BLOCK_N> {
4141
__m512 vd0;
4242
__m512 vd1[COLS];
4343

44-
// oops! 4x4 spills but luckly we use 4x2
44+
// oops! 4x4 spills but luckily we use 4x2
4545
__m512 vbias[COLS];
4646

4747
// [NOTE]: s8s8 igemm compensation in avx512-vnni

csrc/cpu/sgl-kernels/vec.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ inline Vectorized<at::BFloat16> convert_from_float_ext<at::BFloat16>(const Vecto
3737
#define CVT_FP16_TO_FP32(a) \
3838
_mm512_cvtps_ph(a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC))
3939

40-
// this doesn't hanel NaN.
40+
// this doesn't handle NaN.
4141
inline __m512bh cvt_e4m3_bf16_intrinsic_no_nan(__m256i fp8_vec) {
4242
const __m512i x = _mm512_cvtepu8_epi16(fp8_vec);
4343

docker/Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ ARG PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL=https://download.pytorch.org/whl/nightly
6363
ARG PIP_KEYRING_PROVIDER=disabled
6464
ARG UV_KEYRING_PROVIDER=${PIP_KEYRING_PROVIDER}
6565

66-
# Flag enables build-in KV-connector dependency libs into docker images
66+
# Flag enables built-in KV-connector dependency libs into docker images
6767
ARG INSTALL_KV_CONNECTORS=false
6868

6969
#################### BASE BUILD IMAGE ####################

docs/usage/v1_guide.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,7 @@ to enable simultaneous generation and embedding using the same engine instance i
106106

107107
Models using selective state-space mechanisms instead of standard transformer attention are partially supported.
108108
Models that use Mamba-2 layers (e.g., `Mamba2ForCausalLM`) are supported, but models that use older Mamba-1 layers
109-
(e.g., `MambaForCausalLM`, `JambaForCausalLM`) are not yet suported. Please note that these models currently require
109+
(e.g., `MambaForCausalLM`, `JambaForCausalLM`) are not yet supported. Please note that these models currently require
110110
enforcing eager mode and disabling prefix caching in V1.
111111

112112
Models that combine Mamba-2 layers with standard attention layers are also supported (e.g., `BambaForCausalLM`,

pyproject.toml

Lines changed: 183 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -174,3 +174,186 @@ respect-ignore-files = true
174174

175175
[tool.ty.environment]
176176
python = "./.venv"
177+
178+
[tool.typos.files]
179+
# these files may be written in non english words
180+
extend-exclude = ["tests/models/fixtures/*", "tests/prompts/*",
181+
"benchmarks/sonnet.txt", "tests/lora/data/*", "build/*",
182+
"vllm/third_party/*"]
183+
ignore-hidden = true
184+
ignore-files = true
185+
ignore-dot = true
186+
ignore-vcs = true
187+
ignore-global = true
188+
ignore-parent = true
189+
190+
[tool.typos.default]
191+
binary = false
192+
check-filename = false
193+
check-file = true
194+
unicode = true
195+
ignore-hex = true
196+
identifier-leading-digits = false
197+
locale = "en"
198+
extend-ignore-identifiers-re = ["NVML_*", ".*Unc.*", ".*_thw",
199+
".*UE8M0.*", ".*[UE4M3|ue4m3].*", ".*eles.*",
200+
".*[Tt]h[rR].*"]
201+
extend-ignore-words-re = []
202+
extend-ignore-re = []
203+
204+
[tool.typos.default.extend-identifiers]
205+
bbc5b7ede = "bbc5b7ede"
206+
womens_doubles = "womens_doubles"
207+
v_2nd = "v_2nd"
208+
# splitted_input = "splitted_input"
209+
NOOPs = "NOOPs"
210+
typ = "typ"
211+
nin_shortcut = "nin_shortcut"
212+
UperNetDecoder = "UperNetDecoder"
213+
subtile = "subtile"
214+
cudaDevAttrMaxSharedMemoryPerBlockOptin = "cudaDevAttrMaxSharedMemoryPerBlockOptin"
215+
SFOuput = "SFOuput"
216+
# huggingface transformers repo uses these words
217+
depthwise_seperable_out_channel = "depthwise_seperable_out_channel"
218+
DepthWiseSeperableConv1d = "DepthWiseSeperableConv1d"
219+
depthwise_seperable_CNN = "depthwise_seperable_CNN"
220+
221+
[tool.typos.default.extend-words]
222+
iy = "iy"
223+
tendencias = "tendencias"
224+
# intel cpu features
225+
tme = "tme"
226+
dout = "dout"
227+
Pn = "Pn"
228+
arange = "arange"
229+
230+
[tool.typos.type.py]
231+
extend-glob = []
232+
extend-ignore-identifiers-re = []
233+
extend-ignore-words-re = []
234+
extend-ignore-re = []
235+
236+
[tool.typos.type.py.extend-identifiers]
237+
arange = "arange"
238+
NDArray = "NDArray"
239+
EOFError = "EOFError"
240+
fo = "fo"
241+
ba = "ba"
242+
243+
[tool.typos.type.py.extend-words]
244+
245+
[tool.typos.type.cpp]
246+
extend-glob = ["*.cu"]
247+
extend-ignore-identifiers-re = []
248+
extend-ignore-words-re = []
249+
extend-ignore-re = []
250+
251+
[tool.typos.type.cpp.extend-identifiers]
252+
countr_one = "countr_one"
253+
k_ot = "k_ot"
254+
ot = "ot"
255+
256+
[tool.typos.type.cpp.extend-words]
257+
258+
[tool.typos.type.rust]
259+
extend-glob = []
260+
extend-ignore-identifiers-re = []
261+
extend-ignore-words-re = []
262+
extend-ignore-re = []
263+
264+
[tool.typos.type.rust.extend-identifiers]
265+
flate2 = "flate2"
266+
267+
[tool.typos.type.rust.extend-words]
268+
ser = "ser"
269+
270+
[tool.typos.type.lock]
271+
extend-glob = []
272+
check-file = false
273+
extend-ignore-identifiers-re = []
274+
extend-ignore-words-re = []
275+
extend-ignore-re = []
276+
277+
[tool.typos.type.lock.extend-identifiers]
278+
279+
[tool.typos.type.lock.extend-words]
280+
281+
[tool.typos.type.jl]
282+
extend-glob = []
283+
extend-ignore-identifiers-re = []
284+
extend-ignore-words-re = []
285+
extend-ignore-re = []
286+
287+
[tool.typos.type.jl.extend-identifiers]
288+
289+
[tool.typos.type.jl.extend-words]
290+
modul = "modul"
291+
egals = "egals"
292+
usig = "usig"
293+
egal = "egal"
294+
295+
[tool.typos.type.go]
296+
extend-glob = []
297+
extend-ignore-identifiers-re = []
298+
extend-ignore-words-re = []
299+
extend-ignore-re = []
300+
301+
[tool.typos.type.go.extend-identifiers]
302+
flate = "flate"
303+
304+
[tool.typos.type.go.extend-words]
305+
306+
[tool.typos.type.css]
307+
extend-glob = []
308+
extend-ignore-identifiers-re = []
309+
extend-ignore-words-re = []
310+
extend-ignore-re = []
311+
312+
[tool.typos.type.css.extend-identifiers]
313+
nd = "nd"
314+
315+
[tool.typos.type.css.extend-words]
316+
317+
[tool.typos.type.man]
318+
extend-glob = []
319+
extend-ignore-identifiers-re = []
320+
extend-ignore-words-re = []
321+
extend-ignore-re = []
322+
323+
[tool.typos.type.man.extend-identifiers]
324+
Nd = "Nd"
325+
326+
[tool.typos.type.man.extend-words]
327+
328+
[tool.typos.type.cert]
329+
extend-glob = []
330+
check-file = false
331+
extend-ignore-identifiers-re = []
332+
extend-ignore-words-re = []
333+
extend-ignore-re = []
334+
335+
[tool.typos.type.cert.extend-identifiers]
336+
337+
[tool.typos.type.cert.extend-words]
338+
339+
[tool.typos.type.sh]
340+
extend-glob = []
341+
extend-ignore-identifiers-re = []
342+
extend-ignore-words-re = []
343+
extend-ignore-re = []
344+
345+
[tool.typos.type.sh.extend-identifiers]
346+
ot = "ot"
347+
348+
[tool.typos.type.sh.extend-words]
349+
350+
[tool.typos.type.vimscript]
351+
extend-glob = []
352+
extend-ignore-identifiers-re = []
353+
extend-ignore-words-re = []
354+
extend-ignore-re = []
355+
356+
[tool.typos.type.vimscript.extend-identifiers]
357+
windo = "windo"
358+
359+
[tool.typos.type.vimscript.extend-words]

tests/kernels/moe/modular_kernel_tools/common.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -416,7 +416,7 @@ def make_hidden_states(
416416
# We dequant and use that as hidden_states so the tests are stable.
417417
# quantizing and dequantizing yield slightly different results
418418
# depending on the hardware. Here we, quantize and dequantize
419-
# first - so further quantize and dequantize will yeild the same
419+
# first - so further quantize and dequantize will yield the same
420420
# values.
421421
if config.is_per_tensor_act_quant:
422422
a_q, a_scales = ops.scaled_fp8_quant(

tests/kernels/moe/test_deepgemm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ def run_single_case(m, n, k, topk, num_experts, block_size):
9595
topk_weights, topk_ids = torch.topk(router_logits, k=topk, dim=-1)
9696
topk_weights = torch.nn.functional.softmax(topk_weights, dim=-1)
9797

98-
# triton referrence
98+
# triton reference
9999
out_triton = fused_experts(
100100
hidden_states=tokens_bf16,
101101
w1=w1,

0 commit comments

Comments
 (0)