Skip to content

Commit c47c88f

Browse files
committed
Export lora A matrix pre-transposed
1 parent 661a163 commit c47c88f

File tree

2 files changed

+12
-2
lines changed

2 files changed

+12
-2
lines changed

convert-lora-to-ggml.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,8 @@ def write_tensor_header(self, name: str, shape: Sequence[int], data_type: 1) ->
9494
# since ggml doesn't always support other types for the second operand,
9595
# the tensors are always converted and exported as f32
9696
t = v.float().numpy()
97+
if "lora_A" in k:
98+
t = t.T
9799
print(f"{k} => {translate_tensor_name(k)} {t.shape} {t.dtype} {t.nbytes/1024/1024:.2f}MB")
98100
write_tensor_header(fout, translate_tensor_name(k), t.shape, t.dtype)
99101
t.tofile(fout)

llama.cpp

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1768,8 +1768,12 @@ int llama_model_quantize(
17681768

17691769
int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lora, int n_threads) {
17701770
// TODO: refactor all of this after PR #801
1771+
fprintf(stderr, "%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
1772+
17711773
auto & model = ctx->model;
17721774

1775+
const int64_t t_start_lora_us = ggml_time_us();
1776+
17731777
auto fin = std::ifstream(path_lora, std::ios::binary);
17741778
if (!fin) {
17751779
fprintf(stderr, "%s: failed to open '%s'\n", __func__, path_lora);
@@ -1882,7 +1886,7 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
18821886
lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) {
18831887

18841888
ggml_tensor * tensor = model.tensors[base_name];
1885-
ggml_tensor * loraA = ggml_transpose(lora_ctx, lora_tensors[base_name + ".loraA"]);
1889+
ggml_tensor * loraA = lora_tensors[base_name + ".loraA"];
18861890
ggml_tensor * loraB = lora_tensors[base_name + ".loraB"];
18871891

18881892
if (tensor->ne[0] != loraA->ne[1]) {
@@ -1909,7 +1913,11 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
19091913
fprintf(stderr, ".");
19101914
}
19111915
}
1912-
fprintf(stderr, " done\n");
1916+
1917+
ggml_free(lora_ctx);
1918+
1919+
const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
1920+
fprintf(stderr, " done (%.2f ms)\n", t_lora_us / 1000.0);
19131921

19141922
return 0;
19151923
}

0 commit comments

Comments
 (0)