We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent a161c69 commit 313eccbCopy full SHA for 313eccb
README.md
@@ -13,7 +13,7 @@ used instead.
13
14
```bash
15
make build
16
-make llama-2-13b
+make llama-3-8b
17
make up
18
```
19
docker-compose.yml
@@ -5,7 +5,7 @@ services:
5
environment:
6
- GGML_CUDA_NO_PINNED=1
7
- LLAMA_CTX_SIZE=2048
8
- - LLAMA_MODEL=/models/llama-2-13b-chat.Q5_K_M.gguf
+ - LLAMA_MODEL=/models/Meta-Llama-3.1-8B-Instruct-Q5_K_M.gguf
9
- LLAMA_N_GPU_LAYERS=99
10
volumes:
11
- ./models:/models
docker-entrypoint.sh
@@ -62,7 +62,7 @@ set_default_env_vars() {
62
export LLAMA_HOST="0.0.0.0"
63
fi
64
if [ -z ${LLAMA_MODEL+x} ]; then
65
- export LLAMA_MODEL="/models/llama-2-13b-chat.Q5_K_M.gguf"
+ export LLAMA_MODEL="/models/Meta-Llama-3.1-8B-Instruct-Q5_K_M.gguf"
66
67
}
68
0 commit comments