Skip to content

Commit 5b44f4e

Browse files
committed
fix: Remove unused ssm_in_b
Branch: GraniteFour Signed-off-by: Gabe Goodhart <ghart@us.ibm.com>
1 parent dcf51e0 commit 5b44f4e

File tree

2 files changed

+0
-2
lines changed

2 files changed

+0
-2
lines changed

src/llama-model.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3454,7 +3454,6 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
34543454
if (hparams.is_recurrent(i)) {
34553455
// ssm layers
34563456
layer.ssm_in = create_tensor(tn(LLM_TENSOR_SSM_IN, "weight", i), {n_embd, d_in_proj}, 0);
3457-
layer.ssm_in_b = create_tensor(tn(LLM_TENSOR_SSM_IN, "bias", i), {n_embd, d_in_proj}, TENSOR_NOT_REQUIRED);
34583457

34593458
layer.ssm_conv1d = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "weight", i), {d_conv, d_inner + 2*n_group*d_state}, 0);
34603459
layer.ssm_conv1d_b = create_tensor(tn(LLM_TENSOR_SSM_CONV1D, "bias", i), {d_inner + 2*n_group*d_state}, TENSOR_NOT_REQUIRED);

src/llama-model.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,6 @@ struct llama_layer {
261261
// mamba bias
262262
struct ggml_tensor * ssm_conv1d_b = nullptr;
263263
struct ggml_tensor * ssm_dt_b = nullptr;
264-
struct ggml_tensor * ssm_in_b = nullptr;
265264

266265
// rwkv
267266
struct ggml_tensor * time_mix_w1 = nullptr;

0 commit comments

Comments
 (0)