Skip to content

Commit 14858ba

Browse files
committed
Show warning when using a quantized base model
1 parent fc89916 commit 14858ba

File tree

1 file changed

+10
-3
lines changed

1 file changed

+10
-3
lines changed

llama.cpp

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1843,9 +1843,8 @@ int llama_apply_lora_from_file_internal(struct llama_context * ctx, const char *
18431843
model_loader->mapping.reset(new llama_mmap(&model_loader->file_loaders.at(0)->file, false));
18441844
}
18451845

1846-
fprintf(stderr, "%s: ", __func__);
1847-
18481846
// read tensors and apply
1847+
bool warned = false;
18491848
int n_tensors = 0;
18501849
while (true) {
18511850
int32_t n_dims;
@@ -1938,6 +1937,14 @@ int llama_apply_lora_from_file_internal(struct llama_context * ctx, const char *
19381937
base_t = dest_t;
19391938
}
19401939

1940+
if (base_t->type == GGML_TYPE_Q4_0 || base_t->type == GGML_TYPE_Q4_1) {
1941+
if (!warned) {
1942+
fprintf(stderr, "%s: warning: using a lora adapter with a quantized model may result in poor quality, "
1943+
"use a f16 or f32 base model with --lora-base\n", __func__);
1944+
warned = true;
1945+
}
1946+
}
1947+
19411948
ggml_tensor * loraA = lora_tensors[base_name + ".loraA"];
19421949
ggml_tensor * loraB = lora_tensors[base_name + ".loraB"];
19431950

@@ -1974,7 +1981,7 @@ int llama_apply_lora_from_file_internal(struct llama_context * ctx, const char *
19741981
lora_tensors.clear();
19751982

19761983
n_tensors++;
1977-
if (n_tensors % 8 == 0)
1984+
if (n_tensors % 4 == 0)
19781985
fprintf(stderr, ".");
19791986
}
19801987
}

0 commit comments

Comments
 (0)