File tree Expand file tree Collapse file tree 4 files changed +12
-8
lines changed Expand file tree Collapse file tree 4 files changed +12
-8
lines changed Original file line number Diff line number Diff line change 88
88
uses : docker/build-push-action@v6
89
89
with :
90
90
platforms : linux/amd64,linux/arm64
91
+ # use the current repo path as the build context, ensure .git is contained
92
+ context : .
91
93
# only trigger when tag, branch/main push
92
94
push : ${{ github.event_name == 'push' && github.repository_owner == 'vllm-project' }}
93
95
labels : ${{ steps.meta.outputs.labels }}
Original file line number Diff line number Diff line change 88
88
uses : docker/build-push-action@v6
89
89
with :
90
90
platforms : linux/amd64,linux/arm64
91
+ # use the current repo path as the build context, ensure .git is contained
92
+ context : .
91
93
# only trigger when tag, branch/main push
92
94
push : ${{ github.event_name == 'push' && github.repository_owner == 'vllm-project' }}
93
95
labels : ${{ steps.meta.outputs.labels }}
Original file line number Diff line number Diff line change @@ -31,16 +31,16 @@ RUN apt-get update -y && \
31
31
32
32
WORKDIR /workspace
33
33
34
- COPY . /workspace/vllm-ascend/
34
+ COPY . /vllm- workspace/vllm-ascend/
35
35
36
36
RUN pip config set global.index-url ${PIP_INDEX_URL}
37
37
38
38
# Install vLLM
39
39
ARG VLLM_REPO=https://github.com/vllm-project/vllm.git
40
40
ARG VLLM_TAG=v0.8.5
41
- RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /workspace/vllm
41
+ RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /vllm- workspace/vllm
42
42
# In x86, triton will be installed by vllm. But in Ascend, triton doesn't work correctly. we need to uninstall it.
43
- RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /workspace/vllm/ --extra-index https://download.pytorch.org/whl/cpu/ && \
43
+ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /vllm- workspace/vllm/ --extra-index https://download.pytorch.org/whl/cpu/ && \
44
44
python3 -m pip uninstall -y triton && \
45
45
python3 -m pip cache purge
46
46
@@ -49,7 +49,7 @@ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -v -e /workspace/vllm/ --e
49
49
RUN source /usr/local/Ascend/ascend-toolkit/set_env.sh && \
50
50
source /usr/local/Ascend/nnal/atb/set_env.sh && \
51
51
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \
52
- python3 -m pip install -v -e /workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \
52
+ python3 -m pip install -v -e /vllm- workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \
53
53
python3 -m pip cache purge
54
54
55
55
# Install modelscope (for fast download) and ray (for multinode)
Original file line number Diff line number Diff line change @@ -30,23 +30,23 @@ RUN pip config set global.index-url ${PIP_INDEX_URL}
30
30
31
31
WORKDIR /workspace
32
32
33
- COPY . /workspace/vllm-ascend/
33
+ COPY . /vllm- workspace/vllm-ascend/
34
34
35
35
# Install vLLM
36
36
ARG VLLM_REPO=https://github.com/vllm-project/vllm.git
37
37
ARG VLLM_TAG=v0.8.5
38
38
39
- RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /workspace/vllm
39
+ RUN git clone --depth 1 $VLLM_REPO --branch $VLLM_TAG /vllm- workspace/vllm
40
40
# In x86, triton will be installed by vllm. But in Ascend, triton doesn't work correctly. we need to uninstall it.
41
- RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /workspace/vllm/ --extra-index https://download.pytorch.org/whl/cpu/ && \
41
+ RUN VLLM_TARGET_DEVICE="empty" python3 -m pip install -e /vllm- workspace/vllm/ --extra-index https://download.pytorch.org/whl/cpu/ && \
42
42
python3 -m pip uninstall -y triton && \
43
43
python3 -m pip cache purge
44
44
45
45
# Install vllm-ascend
46
46
RUN source /usr/local/Ascend/ascend-toolkit/set_env.sh && \
47
47
source /usr/local/Ascend/nnal/atb/set_env.sh && \
48
48
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/Ascend/ascend-toolkit/latest/`uname -i`-linux/devlib && \
49
- python3 -m pip install -v -e /workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \
49
+ python3 -m pip install -v -e /vllm- workspace/vllm-ascend/ --extra-index https://download.pytorch.org/whl/cpu/ && \
50
50
python3 -m pip cache purge
51
51
52
52
# Install modelscope (for fast download) and ray (for multinode)
You can’t perform that action at this time.
0 commit comments