Skip to content

Commit cabe97c

Browse files
committed
Test out ggml-org/llama.cpp#4605 with my local config
1 parent 32933a4 commit cabe97c

File tree

4 files changed

+31
-13
lines changed

4 files changed

+31
-13
lines changed

flake.lock

Lines changed: 26 additions & 10 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

flake.nix

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
overlays = builtins.attrValues overlays;
4040
config.allowUnfree = true;
4141
config.hostPlatform = system;
42+
config.cudaSupport = true;
4243
};
4344
x86_64-linux = import nixpkgs (mkConfig "x86_64-linux");
4445
aarch64-darwin = import nixpkgs (mkConfig "aarch64-darwin");

overlays.nix

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,9 @@ in
3838
});
3939
});
4040

41+
# On zebul, we use CUDA 12.3
42+
cudaPackages = final.cudaPackages_12_3;
43+
4144
# Work in progress: build wpa_supplicant from source
4245
#wpa_supplicant = prev.wpa_supplicant.overrideAttrs (prevAttrs: {
4346
# src = prev.fetchgit {

programs.nix

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -160,9 +160,7 @@
160160
nurl
161161

162162
# `llama-cpp` is a set of programs for running LLMs locally
163-
(llama-cpp-cuda.override {
164-
cudaPackages = pkgs.cudaPackages_12_3;
165-
})
163+
llama-cpp
166164
];
167165

168166
users.users.philip.packages = with pkgs; [

0 commit comments

Comments
 (0)