Skip to content

Commit 675a741

Browse files
committed
Merge branch 'master' into convert-bf16-fix
2 parents 5b67a6c + 8748d8a commit 675a741

File tree

525 files changed

+155993
-143305
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

525 files changed

+155993
-143305
lines changed

.devops/cloud-v-pipeline

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ node('x86_runner1'){ // Running on x86 runner containing latest vecto
1515
stage('Running llama.cpp'){
1616
sh'''#!/bin/bash
1717
module load gnu-bin2/0.1 # loading latest versions of vector qemu and vector gcc
18-
qemu-riscv64 -L /softwares/gnu-bin2/sysroot -cpu rv64,v=true,vlen=256,elen=64,vext_spec=v1.0 ./main -m /home/alitariq/codellama-7b.Q4_K_M.gguf -p "Anything" -n 9 > llama_log.txt # Running llama.cpp on vector qemu-riscv64
18+
qemu-riscv64 -L /softwares/gnu-bin2/sysroot -cpu rv64,v=true,vlen=256,elen=64,vext_spec=v1.0 ./llama-cli -m /home/alitariq/codellama-7b.Q4_K_M.gguf -p "Anything" -n 9 > llama_log.txt # Running llama.cpp on vector qemu-riscv64
1919
cat llama_log.txt # Printing results
2020
'''
2121
}

.devops/full-cuda.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ COPY . .
2727
# Set nvcc architecture
2828
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
2929
# Enable CUDA
30-
ENV LLAMA_CUDA=1
30+
ENV GGML_CUDA=1
3131
# Enable cURL
3232
ENV LLAMA_CURL=1
3333

.devops/full-rocm.Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ COPY . .
3636
# Set nvcc architecture
3737
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
3838
# Enable ROCm
39-
ENV LLAMA_HIPBLAS=1
39+
ENV GGML_HIPBLAS=1
4040
ENV CC=/opt/rocm/llvm/bin/clang
4141
ENV CXX=/opt/rocm/llvm/bin/clang++
4242

.devops/main-cuda.Dockerfile renamed to .devops/llama-cli-cuda.Dockerfile

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -21,15 +21,15 @@ COPY . .
2121
# Set nvcc architecture
2222
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
2323
# Enable CUDA
24-
ENV LLAMA_CUDA=1
24+
ENV GGML_CUDA=1
2525

26-
RUN make -j$(nproc) main
26+
RUN make -j$(nproc) llama-cli
2727

2828
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
2929

3030
RUN apt-get update && \
3131
apt-get install -y libgomp1
3232

33-
COPY --from=build /app/main /main
33+
COPY --from=build /app/llama-cli /llama-cli
3434

35-
ENTRYPOINT [ "/main" ]
35+
ENTRYPOINT [ "/llama-cli" ]

.devops/llama-cli-intel.Dockerfile

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04
2+
3+
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
4+
5+
ARG GGML_SYCL_F16=OFF
6+
RUN apt-get update && \
7+
apt-get install -y git
8+
9+
WORKDIR /app
10+
11+
COPY . .
12+
13+
RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
14+
echo "GGML_SYCL_F16 is set" && \
15+
export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
16+
fi && \
17+
cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \
18+
cmake --build build --config Release --target llama-cli
19+
20+
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
21+
22+
COPY --from=build /app/build/bin/llama-cli /llama-cli
23+
24+
ENV LC_ALL=C.utf8
25+
26+
ENTRYPOINT [ "/llama-cli" ]

.devops/main-rocm.Dockerfile renamed to .devops/llama-cli-rocm.Dockerfile

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,10 +36,10 @@ COPY . .
3636
# Set nvcc architecture
3737
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
3838
# Enable ROCm
39-
ENV LLAMA_HIPBLAS=1
39+
ENV GGML_HIPBLAS=1
4040
ENV CC=/opt/rocm/llvm/bin/clang
4141
ENV CXX=/opt/rocm/llvm/bin/clang++
4242

43-
RUN make -j$(nproc) main
43+
RUN make -j$(nproc) llama-cli
4444

45-
ENTRYPOINT [ "/app/main" ]
45+
ENTRYPOINT [ "/app/llama-cli" ]

.devops/main-vulkan.Dockerfile renamed to .devops/llama-cli-vulkan.Dockerfile

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,14 +14,14 @@ RUN wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key
1414
# Build it
1515
WORKDIR /app
1616
COPY . .
17-
RUN cmake -B build -DLLAMA_VULKAN=1 && \
18-
cmake --build build --config Release --target main
17+
RUN cmake -B build -DGGML_VULKAN=1 && \
18+
cmake --build build --config Release --target llama-cli
1919

2020
# Clean up
2121
WORKDIR /
22-
RUN cp /app/build/bin/main /main && \
22+
RUN cp /app/build/bin/llama-cli /llama-cli && \
2323
rm -rf /app
2424

2525
ENV LC_ALL=C.utf8
2626

27-
ENTRYPOINT [ "/main" ]
27+
ENTRYPOINT [ "/llama-cli" ]

.devops/main.Dockerfile renamed to .devops/llama-cli.Dockerfile

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,15 +9,15 @@ WORKDIR /app
99

1010
COPY . .
1111

12-
RUN make -j$(nproc) main
12+
RUN make -j$(nproc) llama-cli
1313

1414
FROM ubuntu:$UBUNTU_VERSION as runtime
1515

1616
RUN apt-get update && \
1717
apt-get install -y libgomp1
1818

19-
COPY --from=build /app/main /main
19+
COPY --from=build /app/llama-cli /llama-cli
2020

2121
ENV LC_ALL=C.utf8
2222

23-
ENTRYPOINT [ "/main" ]
23+
ENTRYPOINT [ "/llama-cli" ]

.devops/llama-cpp-clblast.srpm.spec

Lines changed: 0 additions & 84 deletions
This file was deleted.

.devops/llama-cpp-cuda.srpm.spec

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -32,13 +32,13 @@ CPU inference for Meta's Lllama2 models using default options.
3232
%setup -n llama.cpp-master
3333

3434
%build
35-
make -j LLAMA_CUDA=1
35+
make -j GGML_CUDA=1
3636

3737
%install
3838
mkdir -p %{buildroot}%{_bindir}/
39-
cp -p main %{buildroot}%{_bindir}/llamacppcuda
40-
cp -p server %{buildroot}%{_bindir}/llamacppcudaserver
41-
cp -p simple %{buildroot}%{_bindir}/llamacppcudasimple
39+
cp -p llama-cli %{buildroot}%{_bindir}/llama-cuda-cli
40+
cp -p llama-server %{buildroot}%{_bindir}/llama-cuda-server
41+
cp -p llama-simple %{buildroot}%{_bindir}/llama-cuda-simple
4242

4343
mkdir -p %{buildroot}/usr/lib/systemd/system
4444
%{__cat} <<EOF > %{buildroot}/usr/lib/systemd/system/llamacuda.service
@@ -49,7 +49,7 @@ After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.t
4949
[Service]
5050
Type=simple
5151
EnvironmentFile=/etc/sysconfig/llama
52-
ExecStart=/usr/bin/llamacppcudaserver $LLAMA_ARGS
52+
ExecStart=/usr/bin/llama-cuda-server $LLAMA_ARGS
5353
ExecReload=/bin/kill -s HUP $MAINPID
5454
Restart=never
5555

@@ -67,9 +67,9 @@ rm -rf %{buildroot}
6767
rm -rf %{_builddir}/*
6868

6969
%files
70-
%{_bindir}/llamacppcuda
71-
%{_bindir}/llamacppcudaserver
72-
%{_bindir}/llamacppcudasimple
70+
%{_bindir}/llama-cuda-cli
71+
%{_bindir}/llama-cuda-server
72+
%{_bindir}/llama-cuda-simple
7373
/usr/lib/systemd/system/llamacuda.service
7474
%config /etc/sysconfig/llama
7575

.devops/llama-cpp.srpm.spec

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,9 @@ make -j
3838

3939
%install
4040
mkdir -p %{buildroot}%{_bindir}/
41-
cp -p main %{buildroot}%{_bindir}/llama
42-
cp -p server %{buildroot}%{_bindir}/llamaserver
43-
cp -p simple %{buildroot}%{_bindir}/llamasimple
41+
cp -p llama-cli %{buildroot}%{_bindir}/llama-cli
42+
cp -p llama-server %{buildroot}%{_bindir}/llama-server
43+
cp -p llama-simple %{buildroot}%{_bindir}/llama-simple
4444

4545
mkdir -p %{buildroot}/usr/lib/systemd/system
4646
%{__cat} <<EOF > %{buildroot}/usr/lib/systemd/system/llama.service
@@ -51,7 +51,7 @@ After=syslog.target network.target local-fs.target remote-fs.target nss-lookup.t
5151
[Service]
5252
Type=simple
5353
EnvironmentFile=/etc/sysconfig/llama
54-
ExecStart=/usr/bin/llamaserver $LLAMA_ARGS
54+
ExecStart=/usr/bin/llama-server $LLAMA_ARGS
5555
ExecReload=/bin/kill -s HUP $MAINPID
5656
Restart=never
5757

@@ -69,9 +69,9 @@ rm -rf %{buildroot}
6969
rm -rf %{_builddir}/*
7070

7171
%files
72-
%{_bindir}/llama
73-
%{_bindir}/llamaserver
74-
%{_bindir}/llamasimple
72+
%{_bindir}/llama-cli
73+
%{_bindir}/llama-server
74+
%{_bindir}/llama-simple
7575
/usr/lib/systemd/system/llama.service
7676
%config /etc/sysconfig/llama
7777

.devops/server-cuda.Dockerfile renamed to .devops/llama-server-cuda.Dockerfile

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,17 +21,19 @@ COPY . .
2121
# Set nvcc architecture
2222
ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
2323
# Enable CUDA
24-
ENV LLAMA_CUDA=1
24+
ENV GGML_CUDA=1
2525
# Enable cURL
2626
ENV LLAMA_CURL=1
2727

28-
RUN make -j$(nproc) server
28+
RUN make -j$(nproc) llama-server
2929

3030
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
3131

3232
RUN apt-get update && \
33-
apt-get install -y libcurl4-openssl-dev libgomp1
33+
apt-get install -y libcurl4-openssl-dev libgomp1 curl
3434

35-
COPY --from=build /app/server /server
35+
COPY --from=build /app/llama-server /llama-server
3636

37-
ENTRYPOINT [ "/server" ]
37+
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
38+
39+
ENTRYPOINT [ "/llama-server" ]

.devops/llama-server-intel.Dockerfile

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
ARG ONEAPI_VERSION=2024.1.1-devel-ubuntu22.04
2+
3+
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
4+
5+
ARG GGML_SYCL_F16=OFF
6+
RUN apt-get update && \
7+
apt-get install -y git libcurl4-openssl-dev
8+
9+
WORKDIR /app
10+
11+
COPY . .
12+
13+
RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
14+
echo "GGML_SYCL_F16 is set" && \
15+
export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
16+
fi && \
17+
cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \
18+
cmake --build build --config Release --target llama-server
19+
20+
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
21+
22+
RUN apt-get update && \
23+
apt-get install -y libcurl4-openssl-dev curl
24+
25+
COPY --from=build /app/build/bin/llama-server /llama-server
26+
27+
ENV LC_ALL=C.utf8
28+
29+
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
30+
31+
ENTRYPOINT [ "/llama-server" ]

.devops/server-rocm.Dockerfile renamed to .devops/llama-server-rocm.Dockerfile

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -36,15 +36,17 @@ COPY . .
3636
# Set nvcc architecture
3737
ENV GPU_TARGETS=${ROCM_DOCKER_ARCH}
3838
# Enable ROCm
39-
ENV LLAMA_HIPBLAS=1
39+
ENV GGML_HIPBLAS=1
4040
ENV CC=/opt/rocm/llvm/bin/clang
4141
ENV CXX=/opt/rocm/llvm/bin/clang++
4242

4343
# Enable cURL
4444
ENV LLAMA_CURL=1
4545
RUN apt-get update && \
46-
apt-get install -y libcurl4-openssl-dev
46+
apt-get install -y libcurl4-openssl-dev curl
4747

48-
RUN make -j$(nproc)
48+
RUN make -j$(nproc) llama-server
4949

50-
ENTRYPOINT [ "/app/server" ]
50+
HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
51+
52+
ENTRYPOINT [ "/app/llama-server" ]

0 commit comments

Comments
 (0)