Skip to content

Commit 1f6c8a9

Browse files
committed
ChatON:WIP:Add c api wrapper for chat_template_apply
Initial skeletons Update existing logics to help with same. Also the inbetween helper was having a bad signature wrt returning status and data, thats also fixed.
1 parent 9cad4af commit 1f6c8a9

File tree

1 file changed

+29
-5
lines changed

1 file changed

+29
-5
lines changed

common/chaton.hpp

Lines changed: 29 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -306,6 +306,9 @@ inline bool chaton_tmpl_apply_ex(
306306
std::vector<int> &lens,
307307
bool alertAssistantAtEnd
308308
) {
309+
if (!chaton_tmpl_exists(tmpl)) {
310+
return false;
311+
}
309312
ChatParts cp = {};
310313
std::stringstream ss;
311314
std::string globalBegin = chaton_tmpl_role_kv(tmpl, K_GLOBAL, {K_BEGIN});
@@ -390,16 +393,37 @@ inline bool chaton_tmpl_apply_ex(
390393
// global-begin + [[role-begin] + [role-prefix] + msg + role-suffix] + global-end
391394
// if there is a combination of system-user messages,
392395
// then 1st user message will have user-prefix only if systemuser-1st-user-has-prefix is true
393-
inline std::string chaton_tmpl_apply(
396+
inline int32_t chaton_tmpl_apply(
394397
const std::string &tmpl,
395398
const std::vector<llama_chat_message> &msgs,
396-
bool alertAssistantAtEnd
399+
bool alertAssistantAtEnd,
400+
std::string &tagged
397401
) {
398-
std::string tagged;
399402
std::string types;
400403
std::vector<int> lens;
401-
chaton_tmpl_apply_ex(tmpl, msgs, tagged, types, lens, alertAssistantAtEnd);
402-
return tagged;
404+
if (!chaton_tmpl_apply_ex(tmpl, msgs, tagged, types, lens, alertAssistantAtEnd)) {
405+
return -1;
406+
}
407+
return tagged.size();
408+
}
409+
410+
inline int32_t chaton_tmpl_apply_capi(
411+
const char *tmpl,
412+
const struct llama_chat_message *msgs,
413+
const size_t numMsgs,
414+
bool alertAssistantAtEnd,
415+
char *dest,
416+
int32_t destLength
417+
) {
418+
if ((tmpl == nullptr) || (dest == nullptr)) {
419+
return -1;
420+
}
421+
std::vector<const llama_chat_message *> vMsgs;
422+
for(size_t i=0; i<numMsgs; i++) {
423+
vMsgs.push_back(vMsgs[i]);
424+
}
425+
std::string taggedMsgs;
426+
int32_t taggedLength = chaton_tmpl_apply(tmpl, vMsgs, alertAssistantAtEnd, taggedMsgs);
403427
}
404428

405429

0 commit comments

Comments
 (0)