Skip to content

Commit 8dcc98f

Browse files
authored
Merge pull request #5 from arthw/cherry-1118
Cherry 1118
2 parents 75a3266 + a979201 commit 8dcc98f

File tree

503 files changed

+129333
-78865
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

503 files changed

+129333
-78865
lines changed

.devops/full-cuda.Dockerfile

Lines changed: 11 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,16 @@
11
ARG UBUNTU_VERSION=22.04
2-
32
# This needs to generally match the container host's environment.
4-
ARG CUDA_VERSION=11.7.1
5-
3+
ARG CUDA_VERSION=12.6.0
64
# Target the CUDA build image
75
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
86

97
FROM ${BASE_CUDA_DEV_CONTAINER} AS build
108

11-
# Unless otherwise specified, we make a fat build.
12-
ARG CUDA_DOCKER_ARCH=all
9+
# CUDA architecture to build for (defaults to all supported archs)
10+
ARG CUDA_DOCKER_ARCH=default
1311

1412
RUN apt-get update && \
15-
apt-get install -y build-essential python3 python3-pip git libcurl4-openssl-dev libgomp1
13+
apt-get install -y build-essential cmake python3 python3-pip git libcurl4-openssl-dev libgomp1
1614

1715
COPY requirements.txt requirements.txt
1816
COPY requirements requirements
@@ -24,13 +22,12 @@ WORKDIR /app
2422

2523
COPY . .
2624

27-
# Set nvcc architecture
28-
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
29-
# Enable CUDA
30-
ENV GGML_CUDA=1
31-
# Enable cURL
32-
ENV LLAMA_CURL=1
33-
34-
RUN make -j$(nproc)
25+
# Use the default CUDA archs if not specified
26+
RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \
27+
export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \
28+
fi && \
29+
cmake -B build -DGGML_NATIVE=OFF -DGGML_CUDA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
30+
cmake --build build --config Release -j$(nproc) && \
31+
cp build/bin/* .
3532

3633
ENTRYPOINT ["/app/.devops/tools.sh"]

.devops/full-musa.Dockerfile

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
ARG UBUNTU_VERSION=22.04
2+
# This needs to generally match the container host's environment.
3+
ARG MUSA_VERSION=rc3.1.0
4+
# Target the MUSA build image
5+
ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
6+
7+
FROM ${BASE_MUSA_DEV_CONTAINER} AS build
8+
9+
RUN apt-get update && \
10+
apt-get install -y build-essential cmake python3 python3-pip git libcurl4-openssl-dev libgomp1
11+
12+
COPY requirements.txt requirements.txt
13+
COPY requirements requirements
14+
15+
RUN pip install --upgrade pip setuptools wheel \
16+
&& pip install -r requirements.txt
17+
18+
WORKDIR /app
19+
20+
COPY . .
21+
22+
RUN cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
23+
cmake --build build --config Release -j$(nproc) && \
24+
cp build/bin/* .
25+
26+
ENTRYPOINT ["/app/.devops/tools.sh"]

.devops/full-rocm.Dockerfile

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ FROM ${BASE_ROCM_DEV_CONTAINER} AS build
1111
# Unless otherwise specified, we make a fat build.
1212
# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878
1313
# This is mostly tied to rocBLAS supported archs.
14-
ARG ROCM_DOCKER_ARCH=\
14+
ARG ROCM_DOCKER_ARCH="\
1515
gfx803 \
1616
gfx900 \
1717
gfx906 \
@@ -21,7 +21,7 @@ ARG ROCM_DOCKER_ARCH=\
2121
gfx1030 \
2222
gfx1100 \
2323
gfx1101 \
24-
gfx1102
24+
gfx1102"
2525

2626
COPY requirements.txt requirements.txt
2727
COPY requirements requirements
@@ -34,7 +34,7 @@ WORKDIR /app
3434
COPY . .
3535

3636
# Set nvcc architecture
37-
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
37+
ENV AMDGPU_TARGETS=${ROCM_DOCKER_ARCH}
3838
# Enable ROCm
3939
ENV GGML_HIPBLAS=1
4040
ENV CC=/opt/rocm/llvm/bin/clang

.devops/llama-cli-cann.Dockerfile

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
ARG ASCEND_VERSION=8.0.rc2.alpha003-910b-openeuler22.03-py3.8
2+
3+
FROM ascendai/cann:$ASCEND_VERSION AS build
4+
5+
WORKDIR /app
6+
7+
COPY . .
8+
9+
RUN yum install -y gcc g++ cmake make
10+
ENV ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest
11+
ENV LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:$LIBRARY_PATH
12+
ENV LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:${ASCEND_TOOLKIT_HOME}/lib64/plugin/opskernel:${ASCEND_TOOLKIT_HOME}/lib64/plugin/nnengine:${ASCEND_TOOLKIT_HOME}/opp/built-in/op_impl/ai_core/tbe/op_tiling:${LD_LIBRARY_PATH}
13+
ENV PYTHONPATH=${ASCEND_TOOLKIT_HOME}/python/site-packages:${ASCEND_TOOLKIT_HOME}/opp/built-in/op_impl/ai_core/tbe:${PYTHONPATH}
14+
ENV PATH=${ASCEND_TOOLKIT_HOME}/bin:${ASCEND_TOOLKIT_HOME}/compiler/ccec_compiler/bin:${PATH}
15+
ENV ASCEND_AICPU_PATH=${ASCEND_TOOLKIT_HOME}
16+
ENV ASCEND_OPP_PATH=${ASCEND_TOOLKIT_HOME}/opp
17+
ENV TOOLCHAIN_HOME=${ASCEND_TOOLKIT_HOME}/toolkit
18+
ENV ASCEND_HOME_PATH=${ASCEND_TOOLKIT_HOME}
19+
20+
# find libascend_hal.so, because the drive hasn`t been mounted.
21+
ENV LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/runtime/lib64/stub:$LD_LIBRARY_PATH
22+
23+
RUN echo "Building with static libs" && \
24+
source /usr/local/Ascend/ascend-toolkit/set_env.sh --force && \
25+
cmake -B build -DGGML_NATIVE=OFF -DGGML_CANN=ON -DBUILD_SHARED_LIBS=OFF && \
26+
cmake --build build --config Release --target llama-cli
27+
28+
# TODO: use image with NNRT
29+
FROM ascendai/cann:$ASCEND_VERSION AS runtime
30+
COPY --from=build /app/build/bin/llama-cli /llama-cli
31+
32+
ENV LC_ALL=C.utf8
33+
34+
ENV ASCEND_TOOLKIT_HOME=/usr/local/Ascend/ascend-toolkit/latest
35+
ENV LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:$LIBRARY_PATH
36+
ENV LD_LIBRARY_PATH=${ASCEND_TOOLKIT_HOME}/lib64:${ASCEND_TOOLKIT_HOME}/lib64/plugin/opskernel:${ASCEND_TOOLKIT_HOME}/lib64/plugin/nnengine:${ASCEND_TOOLKIT_HOME}/opp/built-in/op_impl/ai_core/tbe/op_tiling:${LD_LIBRARY_PATH}
37+
ENV PYTHONPATH=${ASCEND_TOOLKIT_HOME}/python/site-packages:${ASCEND_TOOLKIT_HOME}/opp/built-in/op_impl/ai_core/tbe:${PYTHONPATH}
38+
ENV PATH=${ASCEND_TOOLKIT_HOME}/bin:${ASCEND_TOOLKIT_HOME}/compiler/ccec_compiler/bin:${PATH}
39+
ENV ASCEND_AICPU_PATH=${ASCEND_TOOLKIT_HOME}
40+
ENV ASCEND_OPP_PATH=${ASCEND_TOOLKIT_HOME}/opp
41+
ENV TOOLCHAIN_HOME=${ASCEND_TOOLKIT_HOME}/toolkit
42+
ENV ASCEND_HOME_PATH=${ASCEND_TOOLKIT_HOME}
43+
44+
ENTRYPOINT ["/llama-cli" ]

.devops/llama-cli-cuda.Dockerfile

Lines changed: 14 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,35 +1,38 @@
11
ARG UBUNTU_VERSION=22.04
22
# This needs to generally match the container host's environment.
3-
ARG CUDA_VERSION=11.7.1
3+
ARG CUDA_VERSION=12.6.0
44
# Target the CUDA build image
55
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
66
# Target the CUDA runtime image
77
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
88

99
FROM ${BASE_CUDA_DEV_CONTAINER} AS build
1010

11-
# Unless otherwise specified, we make a fat build.
12-
ARG CUDA_DOCKER_ARCH=all
11+
# CUDA architecture to build for (defaults to all supported archs)
12+
ARG CUDA_DOCKER_ARCH=default
1313

1414
RUN apt-get update && \
15-
apt-get install -y build-essential git
15+
apt-get install -y build-essential git cmake
1616

1717
WORKDIR /app
1818

1919
COPY . .
2020

21-
# Set nvcc architecture
22-
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
23-
# Enable CUDA
24-
ENV GGML_CUDA=1
25-
26-
RUN make -j$(nproc) llama-cli
21+
# Use the default CUDA archs if not specified
22+
RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \
23+
export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \
24+
fi && \
25+
cmake -B build -DGGML_NATIVE=OFF -DGGML_CUDA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
26+
cmake --build build --config Release --target llama-cli -j$(nproc) && \
27+
mkdir -p /app/lib && \
28+
find build -name "*.so" -exec cp {} /app/lib \;
2729

2830
FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime
2931

3032
RUN apt-get update && \
3133
apt-get install -y libgomp1
3234

33-
COPY --from=build /app/llama-cli /llama-cli
35+
COPY --from=build /app/lib/ /
36+
COPY --from=build /app/build/bin/llama-cli /
3437

3538
ENTRYPOINT [ "/llama-cli" ]

.devops/llama-cli-intel.Dockerfile

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04
1+
ARG ONEAPI_VERSION=2025.0.0-0-devel-ubuntu22.04
22

33
FROM intel/oneapi-basekit:$ONEAPI_VERSION AS build
44

@@ -15,7 +15,7 @@ RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
1515
export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
1616
fi && \
1717
echo "Building with static libs" && \
18-
cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx \
18+
cmake -B build -DGGML_NATIVE=OFF -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx \
1919
${OPT_SYCL_F16} -DBUILD_SHARED_LIBS=OFF && \
2020
cmake --build build --config Release --target llama-cli
2121

.devops/llama-cli-musa.Dockerfile

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
ARG UBUNTU_VERSION=22.04
2+
# This needs to generally match the container host's environment.
3+
ARG MUSA_VERSION=rc3.1.0
4+
# Target the MUSA build image
5+
ARG BASE_MUSA_DEV_CONTAINER=mthreads/musa:${MUSA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
6+
# Target the MUSA runtime image
7+
ARG BASE_MUSA_RUN_CONTAINER=mthreads/musa:${MUSA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
8+
9+
FROM ${BASE_MUSA_DEV_CONTAINER} AS build
10+
11+
RUN apt-get update && \
12+
apt-get install -y build-essential git cmake
13+
14+
WORKDIR /app
15+
16+
COPY . .
17+
18+
RUN cmake -B build -DGGML_NATIVE=OFF -DGGML_MUSA=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
19+
cmake --build build --config Release --target llama-cli -j$(nproc) && \
20+
mkdir -p /app/lib && \
21+
find build -name "*.so" -exec cp {} /app/lib \;
22+
23+
FROM ${BASE_MUSA_RUN_CONTAINER} AS runtime
24+
25+
RUN apt-get update && \
26+
apt-get install -y libgomp1
27+
28+
COPY --from=build /app/lib/ /
29+
COPY --from=build /app/build/bin/llama-cli /llama-cli
30+
31+
ENTRYPOINT [ "/llama-cli" ]

.devops/llama-cli-rocm.Dockerfile

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ FROM ${BASE_ROCM_DEV_CONTAINER} AS build
1111
# Unless otherwise specified, we make a fat build.
1212
# List from https://github.com/ggerganov/llama.cpp/pull/1087#issuecomment-1682807878
1313
# This is mostly tied to rocBLAS supported archs.
14-
ARG ROCM_DOCKER_ARCH=\
14+
ARG ROCM_DOCKER_ARCH="\
1515
gfx803 \
1616
gfx900 \
1717
gfx906 \
@@ -21,7 +21,7 @@ ARG ROCM_DOCKER_ARCH=\
2121
gfx1030 \
2222
gfx1100 \
2323
gfx1101 \
24-
gfx1102
24+
gfx1102"
2525

2626
COPY requirements.txt requirements.txt
2727
COPY requirements requirements
@@ -34,7 +34,7 @@ WORKDIR /app
3434
COPY . .
3535

3636
# Set nvcc architecture
37-
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
37+
ENV AMDGPU_TARGETS=${ROCM_DOCKER_ARCH}
3838
# Enable ROCm
3939
ENV GGML_HIPBLAS=1
4040
ENV CC=/opt/rocm/llvm/bin/clang

.devops/llama-cli-vulkan.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key
1414
# Build it
1515
WORKDIR /app
1616
COPY . .
17-
RUN cmake -B build -DGGML_VULKAN=1 && \
17+
RUN cmake -B build -DGGML_NATIVE=OFF -DGGML_VULKAN=1 && \
1818
cmake --build build --config Release --target llama-cli
1919

2020
# Clean up

.devops/llama-server-cuda.Dockerfile

Lines changed: 17 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1,38 +1,42 @@
11
ARG UBUNTU_VERSION=22.04
22
# This needs to generally match the container host's environment.
3-
ARG CUDA_VERSION=11.7.1
3+
ARG CUDA_VERSION=12.6.0
44
# Target the CUDA build image
55
ARG BASE_CUDA_DEV_CONTAINER=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu${UBUNTU_VERSION}
66
# Target the CUDA runtime image
77
ARG BASE_CUDA_RUN_CONTAINER=nvidia/cuda:${CUDA_VERSION}-runtime-ubuntu${UBUNTU_VERSION}
88

99
FROM ${BASE_CUDA_DEV_CONTAINER} AS build
1010

11-
# Unless otherwise specified, we make a fat build.
12-
ARG CUDA_DOCKER_ARCH=all
11+
# CUDA architecture to build for (defaults to all supported archs)
12+
ARG CUDA_DOCKER_ARCH=default
1313

1414
RUN apt-get update && \
15-
apt-get install -y build-essential git libcurl4-openssl-dev
15+
apt-get install -y build-essential git cmake libcurl4-openssl-dev
1616

1717
WORKDIR /app
1818

1919
COPY . .
2020

21-
# Set nvcc architecture
22-
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
23-
# Enable CUDA
24-
ENV GGML_CUDA=1
25-
# Enable cURL
26-
ENV LLAMA_CURL=1
27-
28-
RUN make -j$(nproc) llama-server
21+
# Use the default CUDA archs if not specified
22+
RUN if [ "${CUDA_DOCKER_ARCH}" != "default" ]; then \
23+
export CMAKE_ARGS="-DCMAKE_CUDA_ARCHITECTURES=${CUDA_DOCKER_ARCH}"; \
24+
fi && \
25+
cmake -B build -DGGML_NATIVE=OFF -DGGML_CUDA=ON -DLLAMA_CURL=ON ${CMAKE_ARGS} -DCMAKE_EXE_LINKER_FLAGS=-Wl,--allow-shlib-undefined . && \
26+
cmake --build build --config Release --target llama-server -j$(nproc) && \
27+
mkdir -p /app/lib && \
28+
find build -name "*.so" -exec cp {} /app/lib \;
2929

3030
FROM ${BASE_CUDA_RUN_CONTAINER} AS runtime
3131

3232
RUN apt-get update && \
3333
apt-get install -y libcurl4-openssl-dev libgomp1 curl
3434

35-
COPY --from=build /app/llama-server /llama-server
35+
COPY --from=build /app/lib/ /
36+
COPY --from=build /app/build/bin/llama-server /llama-server
37+
38+
# Must be set to 0.0.0.0 so it can listen to requests from host machine
39+
ENV LLAMA_ARG_HOST=0.0.0.0
3640

3741
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
3842

0 commit comments

Comments
 (0)