Skip to content

Commit 82a0c6d

Browse files
committed
Export lora A matrix pre-transposed
1 parent 7231eec commit 82a0c6d

File tree

2 files changed

+12
-2
lines changed

2 files changed

+12
-2
lines changed

convert-lora-to-ggml.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,8 @@ def write_tensor_header(self, name: str, shape: Sequence[int], data_type: 1) ->
9494
# since ggml doesn't always support other types for the second operand,
9595
# the tensors are always converted and exported as f32
9696
t = v.float().numpy()
97+
if "lora_A" in k:
98+
t = t.T
9799
print(f"{k} => {translate_tensor_name(k)} {t.shape} {t.dtype} {t.nbytes/1024/1024:.2f}MB")
98100
write_tensor_header(fout, translate_tensor_name(k), t.shape, t.dtype)
99101
t.tofile(fout)

llama.cpp

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1752,8 +1752,12 @@ int llama_model_quantize(
17521752

17531753
int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lora, int n_threads) {
17541754
// TODO: refactor all of this after PR #801
1755+
fprintf(stderr, "%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora);
1756+
17551757
auto & model = ctx->model;
17561758

1759+
const int64_t t_start_lora_us = ggml_time_us();
1760+
17571761
auto fin = std::ifstream(path_lora, std::ios::binary);
17581762
if (!fin) {
17591763
fprintf(stderr, "%s: failed to open '%s'\n", __func__, path_lora);
@@ -1866,7 +1870,7 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
18661870
lora_tensors.find(base_name + ".loraB") != lora_tensors.end()) {
18671871

18681872
ggml_tensor * tensor = model.tensors[base_name];
1869-
ggml_tensor * loraA = ggml_transpose(lora_ctx, lora_tensors[base_name + ".loraA"]);
1873+
ggml_tensor * loraA = lora_tensors[base_name + ".loraA"];
18701874
ggml_tensor * loraB = lora_tensors[base_name + ".loraB"];
18711875

18721876
if (tensor->ne[0] != loraA->ne[1]) {
@@ -1893,7 +1897,11 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor
18931897
fprintf(stderr, ".");
18941898
}
18951899
}
1896-
fprintf(stderr, " done\n");
1900+
1901+
ggml_free(lora_ctx);
1902+
1903+
const int64_t t_lora_us = ggml_time_us() - t_start_lora_us;
1904+
fprintf(stderr, " done (%.2f ms)\n", t_lora_us / 1000.0);
18971905

18981906
return 0;
18991907
}

0 commit comments

Comments
 (0)