Skip to content

Commit 0dae55a

Browse files
authored
[MISC] fix format check error (#654)
This pr makes format.sh works as expect. Signed-off-by: wangxiyuan <wangxiyuan1007@gmail.com>
1 parent 1fce70a commit 0dae55a

17 files changed

+45
-44
lines changed

csrc/kernels/pos_encoding_kernels.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ using vllm_ascend::AccType;
2929
using vllm_ascend::local_mem_copy;
3030
template <typename scalar_t, bool isNeox> class RotaryEmbedding {
3131
// NOTE(ganyi): we use 512B as load stride for pipe, need to find another way to
32-
// retrive this size from runtime for more Soc support
32+
// retrieve this size from runtime for more Soc support
3333
static int constexpr loadSize = 512;
3434
using dst_t = scalar_t;
3535
using acc_t = typename AccType<scalar_t>::type;
@@ -66,7 +66,7 @@ template <typename scalar_t, bool isNeox> class RotaryEmbedding {
6666
pipe_->InitBuffer(inQue_, 1 /* buffer_num */, loadSize /* buffer_size */);
6767
pipe_->InitBuffer(inQueSinCos_, 1 /* buffer_num */, rotDim_ * sizeof(scalar_t) /* buffer_size */);
6868
pipe_->InitBuffer(outQue_, 1 /* buffer_num */, loadSize /* buffer_size */);
69-
// 2 temperary calculation buffer
69+
// 2 temporary calculation buffer
7070
calcTmpBufferOffset_ = 0;
7171
// 1 upcast buffer for bf16 (headSize)
7272
upcastInputBufferOffset_ = calcTmpBufferOffset_ + sizeof(acc_t) * embedDim_ * 2;
@@ -75,10 +75,10 @@ template <typename scalar_t, bool isNeox> class RotaryEmbedding {
7575
// 2 sin cos upcast buffer for bf16
7676
cosSinUpcastBufferOffset_ = upcastTempBufferOffset_ + sizeof(acc_t) * 2 * embedDim_;
7777
// 2. bf16 path: needs 2 cos sin upcast buffer size
78-
// 3. fp16 path: needs 2 temperary calculation buffer size
78+
// 3. fp16 path: needs 2 temporary calculation buffer size
7979
tempBufferSize_ = cosSinUpcastBufferOffset_ + 2 * embedDim_ * sizeof(acc_t);
8080
// need to consider upcast the bf16 to fp32, so we might need 4 buffer just in case
81-
// 2 temperary buffer, 2 input buffer, 1 cos buffer, 1 sin buffer, 2 scale buffer (headSize), 2 zp
81+
// 2 temporary buffer, 2 input buffer, 1 cos buffer, 1 sin buffer, 2 scale buffer (headSize), 2 zp
8282
// buffer(headSize int8), 1 dst_temp buffer(headSize, int32)
8383
pipe_->InitBuffer(calcBuf_, tempBufferSize_ /* buffer_size */);
8484
if constexpr (!std::is_same_v<scalar_t, acc_t>) {

docs/source/faqs.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ Currently, w8a8 quantization is already supported by vllm-ascend originally on v
8989

9090
Currently, w8a8 DeepSeek is working in process: [support AscendW8A8 quantization](https://github.com/vllm-project/vllm-ascend/pull/511)
9191

92-
Please run DeepSeek with BF16 now, follwing the [Multi-Node DeepSeek inferencing tutorail](https://vllm-ascend.readthedocs.io/en/main/tutorials/multi_node.html)
92+
Please run DeepSeek with BF16 now, following the [Multi-Node DeepSeek inferencing tutorail](https://vllm-ascend.readthedocs.io/en/main/tutorials/multi_node.html)
9393

9494
### 12. There is not output in log when loading models using vllm-ascend, How to solve it?
9595

docs/source/installation.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -127,7 +127,7 @@ apt update -y
127127
apt install -y gcc g++ cmake libnuma-dev wget git
128128
```
129129

130-
**[Optinal]** Config the extra-index of `pip` if you are working on a **x86** machine, so that the torch with cpu could be found:
130+
**[Optional]** Config the extra-index of `pip` if you are working on a **x86** machine, so that the torch with cpu could be found:
131131

132132
```bash
133133
pip config set global.extra-index-url https://download.pytorch.org/whl/cpu/

examples/disaggregated_prefill_hccl.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ def run_decode(prefill_done):
8484
gpu_memory_utilization=0.8,
8585
tensor_parallel_size=2)
8686

87-
# Wait for the producer to start the comsumer
87+
# Wait for the producer to start the consumer
8888
print("Waiting for prefill node to finish...")
8989
prefill_done.wait()
9090

examples/offline_inference_audio_language.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def main(args):
9393
inputs = {"prompt": prompt, "multi_modal_data": mm_data}
9494
if args.num_prompts > 1:
9595
# Batch inference
96-
inputs = [inputs] * args.num_prompts
96+
inputs = [inputs] * args.num_prompts # type: ignore
9797

9898
outputs = llm.generate(inputs, sampling_params=sampling_params)
9999

format.sh

Lines changed: 16 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,7 @@ format_all() {
116116
yapf --in-place "${YAPF_FLAGS[@]}" "${YAPF_EXCLUDES[@]}" .
117117
}
118118

119+
echo 'vllm-ascend yapf:'
119120
## This flag formats individual files. --files *must* be the first command line
120121
## arg to use this option.
121122
if [[ "$1" == '--files' ]]; then
@@ -128,12 +129,12 @@ else
128129
# Format only the files that changed in last commit.
129130
format_changed
130131
fi
131-
echo 'vLLM yapf: Done'
132+
echo 'vllm-ascend yapf: Done'
132133

133134
# Run mypy
134-
echo 'vLLM mypy:'
135+
echo 'vllm-ascend mypy:'
135136
tools/mypy.sh
136-
echo 'vLLM mypy: Done'
137+
echo 'vllm-ascend mypy: Done'
137138

138139

139140
# If git diff returns a file that is in the skip list, the file may be checked anyway:
@@ -172,6 +173,7 @@ spell_check_changed() {
172173
fi
173174
}
174175

176+
echo 'vllm-ascend codespell:'
175177
# Run Codespell
176178
## This flag runs spell check of individual files. --files *must* be the first command line
177179
## arg to use this option.
@@ -185,7 +187,7 @@ else
185187
# Check spelling only of the files that changed in last commit.
186188
spell_check_changed
187189
fi
188-
echo 'vLLM codespell: Done'
190+
echo 'vllm-ascend codespell: Done'
189191

190192

191193
# Lint specified files
@@ -211,6 +213,7 @@ lint_changed() {
211213

212214
}
213215

216+
echo 'vllm-ascend ruff:'
214217
# Run Ruff
215218
### This flag lints individual files. --files *must* be the first command line
216219
### arg to use this option.
@@ -224,7 +227,7 @@ else
224227
# Format only the files that changed in last commit.
225228
lint_changed
226229
fi
227-
echo 'vLLM ruff: Done'
230+
echo 'vllm-ascend ruff: Done'
228231

229232
# check spelling of specified files
230233
isort_check() {
@@ -251,6 +254,7 @@ isort_check_changed() {
251254
fi
252255
}
253256

257+
echo 'vllm-ascend isort:'
254258
# Run Isort
255259
# This flag runs spell check of individual files. --files *must* be the first command line
256260
# arg to use this option.
@@ -264,18 +268,13 @@ else
264268
# Check spelling only of the files that changed in last commit.
265269
isort_check_changed
266270
fi
267-
echo 'vLLM isort: Done'
271+
echo 'vllm-ascend isort: Done'
268272

269273
# Clang-format section
270274
# Exclude some files for formatting because they are vendored
271275
# NOTE: Keep up to date with .github/workflows/clang-format.yml
272276
CLANG_FORMAT_EXCLUDES=(
273-
'csrc/moe/topk_softmax_kernels.cu'
274-
'csrc/quantization/gguf/ggml-common.h'
275-
'csrc/quantization/gguf/dequantize.cuh'
276-
'csrc/quantization/gguf/vecdotq.cuh'
277-
'csrc/quantization/gguf/mmq.cuh'
278-
'csrc/quantization/gguf/mmvq.cuh'
277+
'csrc/kernels/pos_encoding_kernels.cpp'
279278
)
280279

281280
# Format specified files with clang-format
@@ -315,15 +314,15 @@ elif [[ "$1" == '--all' ]]; then
315314
else
316315
clang_format_changed
317316
fi
318-
echo 'vLLM clang-format: Done'
317+
echo 'vllm-ascend clang-format: Done'
319318

320-
echo 'vLLM actionlint:'
319+
echo 'vllm-ascend actionlint:'
321320
tools/actionlint.sh -color
322-
echo 'vLLM actionlint: Done'
321+
echo 'vllm-ascend actionlint: Done'
323322

324-
echo 'vLLM shellcheck:'
323+
echo 'vllm-ascend shellcheck:'
325324
tools/shellcheck.sh
326-
echo 'vLLM shellcheck: Done'
325+
echo 'vllm-ascend shellcheck: Done'
327326

328327
echo 'excalidraw png check:'
329328
tools/png-lint.sh

tests/singlecard/spec_decode/e2e/test_medusa_correctness.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,7 @@ def test_medusa_e2e_greedy_logprobs(vllm_runner, common_llm_kwargs,
236236

237237
# TODO: There is a problem with the preemptive scheduling in the current
238238
# version, which makes this case fail. Please release this case after the
239-
# preemptive scheduling preblem is solved.
239+
# preemptive scheduling problem is solved.
240240
# @pytest.mark.parametrize(
241241
# "common_llm_kwargs",
242242
# [{

tests/singlecard/spec_decode/e2e/test_mlp_correctness.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ def test_mlp_e2e_seeded_correctness(vllm_runner, common_llm_kwargs,
296296

297297
# TODO: There is a problem with the preemptive scheduling in the current
298298
# version, which makes this case fail. Please release this case after the
299-
# preemptive scheduling preblem is solved.
299+
# preemptive scheduling problem is solved.
300300
# @pytest.mark.parametrize(
301301
# "common_llm_kwargs",
302302
# [{
@@ -352,7 +352,7 @@ def test_mlp_e2e_seeded_correctness(vllm_runner, common_llm_kwargs,
352352

353353
# TODO: There is a problem with the preemptive scheduling in the current
354354
# version, which makes this case fail. Please release this case after the
355-
# preemptive scheduling preblem is solved.
355+
# preemptive scheduling problem is solved.
356356
# @pytest.mark.parametrize(
357357
# "common_llm_kwargs",
358358
# [{

tests/singlecard/spec_decode/e2e/test_ngram_correctness.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ def test_ngram_e2e_greedy_logprobs(vllm_runner, common_llm_kwargs,
175175

176176
# TODO: There is a problem with the preemptive scheduling in the current
177177
# version, which makes this case fail. Please release this case after the
178-
# preemptive scheduling preblem is solved.
178+
# preemptive scheduling problem is solved.
179179
# @pytest.mark.parametrize(
180180
# "common_llm_kwargs",
181181
# [{

tools/actionlint.sh

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
# This file is a part of the vllm-ascend project.
1919
# Adapted from https://github.com/vllm-project/vllm/tree/main/tools
2020
#
21+
export SHELLCHECK_OPTS="--exclude=SC2046,SC2006"
2122

2223
if command -v actionlint &> /dev/null; then
2324
actionlint .github/workflows/*.yml .github/workflows/*.yaml
@@ -29,4 +30,4 @@ fi
2930

3031
# download a binary to the current directory - v1.7.3
3132
bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/aa0a7be8e566b096e64a5df8ff290ec24fa58fbc/scripts/download-actionlint.bash)
32-
./actionlint .github/workflows/*.yml .github/workflows/*.yaml
33+
./actionlint .github/workflows/*.yml .github/workflows/*.yaml

0 commit comments

Comments
 (0)