Skip to content

Commit cb9178f

Browse files
authored
llama : remove llm_graph_input_one (#14603)
1 parent 4a5686d commit cb9178f

File tree

3 files changed

+1
-29
lines changed

3 files changed

+1
-29
lines changed

src/llama-graph.cpp

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -340,13 +340,6 @@ void llm_graph_input_mem_hybrid::set_input(const llama_ubatch * ubatch) {
340340
inp_rs->set_input(ubatch);
341341
}
342342

343-
void llm_graph_input_one::set_input(const llama_ubatch * ubatch) {
344-
GGML_UNUSED(ubatch);
345-
GGML_ASSERT(one && ggml_nelements(one) == 1);
346-
float f_one = 1.0f;
347-
ggml_backend_tensor_set(one, &f_one, 0, sizeof(float));
348-
}
349-
350343
//
351344
// llm_graph_context
352345
//

src/llama-graph.h

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -341,17 +341,6 @@ class llm_graph_input_mem_hybrid : public llm_graph_input_i {
341341
const llama_memory_hybrid_context * mctx;
342342
};
343343

344-
// TODO: remove this when ggml_scale_add is implemented
345-
class llm_graph_input_one : public llm_graph_input_i {
346-
public:
347-
llm_graph_input_one() {}
348-
virtual ~llm_graph_input_one() = default;
349-
350-
void set_input(const llama_ubatch * ubatch) override;
351-
352-
ggml_tensor * one = nullptr; // F32
353-
};
354-
355344
//
356345
// llm_graph_result
357346
//

src/llama-model.cpp

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -9485,8 +9485,6 @@ struct llm_build_gemma3n_iswa : public llm_graph_context {
94859485
const int n_layer_sparsity = 10; // number of layers using activation sparsity
94869486
const float f_sparsity_std_mul = 1.6448533535003662f; // std_multiplier = normal_dist.icdf(0.95)
94879487

9488-
ggml_tensor * one; // containing single element 1.0f
9489-
94909488
llm_build_gemma3n_iswa(const llama_model & model, const llm_graph_params & params, ggml_cgraph * gf)
94919489
: llm_graph_context(params),
94929490
model(model),
@@ -9498,14 +9496,6 @@ struct llm_build_gemma3n_iswa : public llm_graph_context {
94989496
ggml_tensor * cur;
94999497
ggml_tensor * inpL;
95009498

9501-
// TODO: remove this when ggml_scale_add is implemented
9502-
one = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
9503-
{
9504-
auto inp = std::make_unique<llm_graph_input_one>();
9505-
inp->one = one;
9506-
res->add_input(std::move(inp));
9507-
}
9508-
95099499
inpL = build_inp_embd(model.tok_embd);
95109500

95119501
// important: do not normalize weights for raw embeddings input (i.e. encoded image emdeddings)
@@ -9895,7 +9885,7 @@ struct llm_build_gemma3n_iswa : public llm_graph_context {
98959885
cb(innovation, "innovation", il);
98969886

98979887
ggml_tensor * all_coefs = build_lora_mm(model.layers[il].altup_correct_coef, modalities); // [n_altup, n_tokens]
9898-
all_coefs = ggml_add(ctx0, all_coefs, one);
9888+
all_coefs = ggml_scale_bias(ctx0, all_coefs, 1.0f, 1.0f); // + 1.0
98999889
cb(all_coefs, "all_coefs", il);
99009890
all_coefs = ggml_cont(ctx0, ggml_transpose(ctx0, all_coefs)); // [n_tokens, n_altup]
99019891
all_coefs = ggml_reshape_3d(ctx0, all_coefs, 1, n_tokens, n_altup); // [1, n_tokens, n_altup]

0 commit comments

Comments
 (0)