Skip to content

Commit a5fd538

Browse files
committed
added cb_eval & cb_eval_user_data to context_params.
1 parent 8ddc126 commit a5fd538

File tree

3 files changed

+18
-8
lines changed

3 files changed

+18
-8
lines changed

llama-cpp-2/examples/simple.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ fn main() -> Result<()> {
6262
..LlamaContextParams::default()
6363
};
6464

65-
let mut ctx = model.new_context(&backend, &ctx_params)
65+
let mut ctx = model.new_context(&backend, ctx_params)
6666
.with_context(|| "unable to create the llama_context")?;
6767

6868
// tokenize the prompt

llama-cpp-2/src/context/params.rs

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ impl From<RopeScalingType> for i8 {
4343
}
4444

4545
/// A safe wrapper around `llama_context_params`.
46-
#[derive(Debug, Clone, Copy, PartialEq)]
46+
#[derive(Debug, PartialEq)]
4747
#[allow(
4848
missing_docs,
4949
clippy::struct_excessive_bools,
@@ -71,6 +71,8 @@ pub struct LlamaContextParams {
7171
pub logits_all: bool,
7272
pub embedding: bool,
7373
pub offload_kqv: bool,
74+
pub cb_eval: llama_cpp_sys_2::ggml_backend_sched_eval_callback,
75+
pub cb_eval_user_data: *mut std::ffi::c_void,
7476
}
7577

7678
/// Default parameters for `LlamaContext`. (as defined in llama.cpp by `llama_context_default_params`)
@@ -97,6 +99,8 @@ impl From<llama_context_params> for LlamaContextParams {
9799
n_threads_batch,
98100
rope_freq_base,
99101
rope_freq_scale,
102+
cb_eval,
103+
cb_eval_user_data,
100104
type_k,
101105
type_v,
102106
mul_mat_q,
@@ -131,6 +135,8 @@ impl From<llama_context_params> for LlamaContextParams {
131135
yarn_beta_slow,
132136
yarn_orig_ctx,
133137
offload_kqv,
138+
cb_eval,
139+
cb_eval_user_data,
134140
}
135141
}
136142
}
@@ -157,6 +163,8 @@ impl From<LlamaContextParams> for llama_context_params {
157163
yarn_beta_slow,
158164
yarn_orig_ctx,
159165
offload_kqv,
166+
cb_eval,
167+
cb_eval_user_data,
160168
}: LlamaContextParams,
161169
) -> Self {
162170
llama_context_params {
@@ -179,6 +187,8 @@ impl From<LlamaContextParams> for llama_context_params {
179187
yarn_beta_slow,
180188
yarn_orig_ctx,
181189
offload_kqv,
190+
cb_eval,
191+
cb_eval_user_data,
182192
}
183193
}
184-
}
194+
}

llama-cpp-2/src/model.rs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -309,12 +309,12 @@ impl LlamaModel {
309309
/// # Errors
310310
///
311311
/// There is many ways this can fail. See [`LlamaContextLoadError`] for more information.
312-
pub fn new_context<'a>(
313-
&'a self,
312+
pub fn new_context(
313+
&self,
314314
_: &LlamaBackend,
315-
params: &LlamaContextParams,
316-
) -> Result<LlamaContext<'a>, LlamaContextLoadError> {
317-
let context_params = llama_context_params::from(*params);
315+
params: LlamaContextParams,
316+
) -> Result<LlamaContext, LlamaContextLoadError> {
317+
let context_params = llama_context_params::from(params);
318318
let context = unsafe {
319319
llama_cpp_sys_2::llama_new_context_with_model(self.model.as_ptr(), context_params)
320320
};

0 commit comments

Comments
 (0)