We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent ae146e9 commit 747c73eCopy full SHA for 747c73e
compose/local/django/Dockerfile
@@ -87,7 +87,7 @@ COPY --from=python-build-stage /usr/src/app/wheels /wheels/
87
RUN pip install --no-cache-dir --no-index --find-links=/wheels/ $(find /wheels/ -name "*.whl" ! -name "llama_cpp_python*") \
88
&& rm -rf /wheels/
89
90
-# Install llama-cpp-python with specific CMAKE flags for Kubernetes nodes with our without AVX support
+# Install llama-cpp-python with specific CMAKE flags for Kubernetes nodes with or without AVX support
91
RUN if [ "${DISABLE_AVX}" = "true" ]; then \
92
CMAKE_ARGS='-DLLAMA_AVX=OFF -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF -DLLAMA_F16C=OFF -DLLAMA_OPENMP=ON' pip install llama-cpp-python==${LLAMA_VERSION} --force-reinstall --no-cache-dir; \
93
else \
0 commit comments