@@ -4195,9 +4195,6 @@ static bool llm_load_tensors(
4195
4195
4196
4196
auto & layer = model.layers [i];
4197
4197
4198
- // TODO: what's the difference between ctx_layer and ctx_split?
4199
- // A: It seems that ctx_split is for matrices (2d???) while ctx_layer is for other things (like 1D bias and norms, probably.)
4200
-
4201
4198
// norm
4202
4199
layer.attn_norm = ml.create_tensor (ctx_layer, tn (LLM_TENSOR_ATTN_NORM, " weight" , i), {n_embd});
4203
4200
@@ -6912,7 +6909,6 @@ struct llm_build_context {
6912
6909
6913
6910
const int32_t n_tok = batch.n_tokens ;
6914
6911
6915
- // hopefully the compiler does constant folding
6916
6912
const int64_t d_model = n_embd;
6917
6913
const int64_t d_inner = n_head;
6918
6914
GGML_ASSERT (2 * d_model == d_inner);
@@ -6959,8 +6955,8 @@ struct llm_build_context {
6959
6955
6960
6956
// The following tensor is too big in order to avoid an assertion error when making an overlapping view.
6961
6957
// TODO: in ggml_new_tensor_impl, handle overlapping data range in data size calculation
6962
- // This could then be a tensor with ne[] = {(d_conv-1)+n_tok, d_inner}
6963
- // which is around (d_conv-1) times as small as its current size .
6958
+ // This could then be a tensor with ne[] = {(d_conv-1)+n_tok, d_inner},
6959
+ // but the size difference is not that big (d_conv is usually 4) .
6964
6960
struct ggml_tensor * conv_x = ggml_new_tensor_1d (ctx0, conv_state->type , d_conv*d_inner*n_tok);
6965
6961
const size_t conv_x_nb1 = (d_conv - 1 + n_tok) * ggml_element_size (conv_x);
6966
6962
0 commit comments