We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent e712cff commit cafa33eCopy full SHA for cafa33e
CMakeLists.txt
@@ -75,7 +75,21 @@ if (LLAMA_BUILD)
75
add_subdirectory(vendor/llama.cpp)
76
llama_cpp_python_install_target(llama)
77
llama_cpp_python_install_target(ggml)
78
-
+ llama_cpp_python_install_target(ggml-cpu)
79
+ llama_cpp_python_install_target(ggml-base)
80
+
81
+ if (GGML_METAL)
82
+ llama_cpp_python_install_target(ggml-metal)
83
+ endif()
84
85
+ if (GGML_CUDA)
86
+ llama_cpp_python_install_target(ggml-cuda)
87
88
89
+ if (GGML_VULKAN)
90
+ llama_cpp_python_install_target(ggml-vulkan)
91
92
93
# Workaround for Windows + CUDA https://github.com/abetlen/llama-cpp-python/issues/563
94
if (WIN32)
95
install(
vendor/llama.cpp
0 commit comments