Skip to content

Commit d454a74

Browse files
authored
Merge pull request #530 from babichjacob/fix-use-c-char-instead-of-i8
fix: implicitly use `c_char` type instead of hardcoding `i8`
2 parents 08d7495 + 92bef3a commit d454a74

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

llama-cpp-2/src/model.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -523,7 +523,7 @@ impl LlamaModel {
523523
let message_length = chat.iter().fold(0, |acc, c| {
524524
acc + c.role.to_bytes().len() + c.content.to_bytes().len()
525525
});
526-
let mut buff: Vec<i8> = vec![0_i8; message_length * 4];
526+
let mut buff = vec![0; message_length * 4];
527527

528528
// Build our llama_cpp_sys_2 chat messages
529529
let chat: Vec<llama_cpp_sys_2::llama_chat_message> = chat
@@ -548,7 +548,7 @@ impl LlamaModel {
548548
chat.as_ptr(),
549549
chat.len(),
550550
add_ass,
551-
buff.as_mut_ptr().cast::<std::os::raw::c_char>(),
551+
buff.as_mut_ptr(),
552552
buff.len() as i32,
553553
);
554554
// A buffer twice the size should be sufficient for all models, if this is not the case for a new model, we can increase it

0 commit comments

Comments
 (0)