Skip to content

Commit 2f39cd7

Browse files
compiladeCISC
andcommitted
model : remove unnecessary prefix for tensor loading constants
Co-authored-by: Sigbjørn Skjæret <sigbjorn.skjaeret@scala.com>
1 parent db5ff0c commit 2f39cd7

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

src/llama-model.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3220,10 +3220,10 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
32203220
{
32213221
output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
32223222

3223-
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
3223+
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
32243224
// if output is NULL, init from the input tok embed, duplicated to allow offloading
32253225
if (output == NULL) {
3226-
output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
3226+
output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
32273227
}
32283228
}
32293229

@@ -3266,10 +3266,10 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
32663266
{
32673267
output_norm = create_tensor(tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, 0);
32683268

3269-
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED);
3269+
output = create_tensor(tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, TENSOR_NOT_REQUIRED);
32703270
// if output is NULL, init from the input tok embed, duplicated to allow offloading
32713271
if (output == NULL) {
3272-
output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED);
3272+
output = create_tensor(tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, TENSOR_DUPLICATED);
32733273
}
32743274
}
32753275

@@ -3316,7 +3316,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
33163316

33173317
layer.ffn_norm = create_tensor(tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, 0);
33183318

3319-
layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, llama_model_loader::TENSOR_NOT_REQUIRED);
3319+
layer.ffn_gate_inp = create_tensor(tn(LLM_TENSOR_FFN_GATE_INP, "weight", i), {n_embd, n_expert}, TENSOR_NOT_REQUIRED);
33203320

33213321
if (layer.ffn_gate_inp) {
33223322
// MoE

0 commit comments

Comments
 (0)