Skip to content

Commit e6addfc

Browse files
nsarrazingary149
andauthored
Conversation trees (#223) (#807)
* work on branching * branching * work on input field * wip * wip * helper stuff * pass tests * add type guards and clean up type requirements * fixed shared conv type * more helpers stuff * clean up code * addChildren helper * addSibling * add test for addSibling & refacto addChildren tests * backend work pt. 1 * backend done * add children property to messages for easier frontend rendering * fix message id type rail * front-end work on simple linear conversation * fix title generation * convert conversations on post * server side retry works with branching * clean up buttons * fix retry feature backend * Send new messages in any subtree * make edit previous prompts feature work * fix padding * revert unneeded changes * bring back pending message * fix front-end streaming * fix initial message * Revert "fix initial message" This reverts commit 6257fe8. * Fix bug subtree state refresh * fix continue feature on shared conversations * fix websearch updates * Fix first message streaming * fix fornt-end websearch updates * fix loading icon * fix bottom padding * move children nav to below the message * Show current message in continue & retry * children nav styling * you can now edit the first message * bottom padding on assistant message * fix test * lint * use <form> * tree navigation * misc * mobile: hide download link * forgot to implem continue feature lol * fix continue feature on llama 70b * fix edit mode * disable submit button & nav when loading * fix bug when interrupting * hide arrows in edit mode * forgot to reset edit mode when submitting retry * reset editing when switching conversations --------- Co-authored-by: Victor Mustar <victor.mustar@gmail.com>
1 parent 89ae59b commit e6addfc

36 files changed

+1451
-568
lines changed

.env.template

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -33,10 +33,6 @@ MODELS=`[
3333
"name": "meta-llama/Llama-2-70b-chat-hf",
3434
"description": "The latest and biggest model from Meta, fine-tuned for chat.",
3535
"websiteUrl": "https://ai.meta.com/llama/",
36-
"userMessageToken": "",
37-
"userMessageEndToken": " [/INST] ",
38-
"assistantMessageToken": "",
39-
"assistantMessageEndToken": " </s><s>[INST] ",
4036
"preprompt": " ",
4137
"chatPromptTemplate" : "<s>[INST] <<SYS>>\n{{preprompt}}\n<</SYS>>\n\n{{#each messages}}{{#ifUser}}{{content}} [/INST] {{/ifUser}}{{#ifAssistant}}{{content}} </s><s>[INST] {{/ifAssistant}}{{/each}}",
4238
"promptExamples": [
@@ -58,7 +54,7 @@ MODELS=`[
5854
"top_k": 50,
5955
"truncate": 3072,
6056
"max_new_tokens": 1024,
61-
"stop" : ["</s>", " </s><s>[INST] "]
57+
"stop" : ["</s>", "</s><s>[INST]"]
6258
}
6359
},
6460
{

src/lib/buildPrompt.ts

Lines changed: 19 additions & 82 deletions
Original file line numberDiff line numberDiff line change
@@ -1,94 +1,31 @@
1+
import type { EndpointParameters } from "./server/endpoints/endpoints";
12
import type { BackendModel } from "./server/models";
2-
import type { Message } from "./types/Message";
3-
import { format } from "date-fns";
4-
import type { WebSearch } from "./types/WebSearch";
5-
import { downloadFile } from "./server/files/downloadFile";
6-
import type { Conversation } from "./types/Conversation";
73

8-
interface buildPromptOptions {
9-
messages: Pick<Message, "from" | "content" | "files">[];
10-
id?: Conversation["_id"];
4+
type buildPromptOptions = Pick<EndpointParameters, "messages" | "preprompt" | "continueMessage"> & {
115
model: BackendModel;
12-
locals?: App.Locals;
13-
webSearch?: WebSearch;
14-
preprompt?: string;
15-
files?: File[];
16-
continue?: boolean;
17-
}
6+
};
187

198
export async function buildPrompt({
209
messages,
2110
model,
22-
webSearch,
2311
preprompt,
24-
id,
12+
continueMessage,
2513
}: buildPromptOptions): Promise<string> {
26-
let modifiedMessages = [...messages];
27-
28-
if (webSearch && webSearch.context) {
29-
// find index of the last user message
30-
const lastUsrMsgIndex = modifiedMessages.map((el) => el.from).lastIndexOf("user");
31-
32-
// combine all the other previous questions into one string
33-
const previousUserMessages = modifiedMessages.filter((el) => el.from === "user").slice(0, -1);
34-
const previousQuestions =
35-
previousUserMessages.length > 0
36-
? `Previous questions: \n${previousUserMessages
37-
.map(({ content }) => `- ${content}`)
38-
.join("\n")}`
39-
: "";
40-
41-
const currentDate = format(new Date(), "MMMM d, yyyy");
42-
43-
// update the last user message directly (that way if the last message is an assistant partial answer, we keep the beginning of that answer)
44-
modifiedMessages[lastUsrMsgIndex] = {
45-
from: "user",
46-
content: `I searched the web using the query: ${webSearch.searchQuery}. Today is ${currentDate} and here are the results:
47-
=====================
48-
${webSearch.context}
49-
=====================
50-
${previousQuestions}
51-
Answer the question: ${messages[lastUsrMsgIndex].content}`,
52-
};
53-
}
54-
// section to handle potential files input
55-
if (model.multimodal) {
56-
modifiedMessages = await Promise.all(
57-
modifiedMessages.map(async (el) => {
58-
let content = el.content;
59-
60-
if (el.from === "user") {
61-
if (el?.files && el.files.length > 0 && id) {
62-
const markdowns = await Promise.all(
63-
el.files.map(async (hash) => {
64-
try {
65-
const { content: image, mime } = await downloadFile(hash, id);
66-
const b64 = image.toString("base64");
67-
return `![](data:${mime};base64,${b64})})`;
68-
} catch (e) {
69-
console.error(e);
70-
}
71-
})
72-
);
73-
content += markdowns.join("\n ");
74-
} else {
75-
// if no image, append an empty white image
76-
content +=
77-
"\n![](data:image/png;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/2wBDAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/wAARCAAQABADAREAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD+/igAoAKACgD/2Q==)";
78-
}
79-
}
80-
81-
return { ...el, content };
82-
})
83-
);
14+
let prompt = model
15+
.chatPromptRender({ messages, preprompt })
16+
// Not super precise, but it's truncated in the model's backend anyway
17+
.split(" ")
18+
.slice(-(model.parameters?.truncate ?? 0))
19+
.join(" ");
20+
21+
if (continueMessage && model.parameters?.stop) {
22+
prompt = model.parameters.stop.reduce((acc: string, curr: string) => {
23+
if (acc.endsWith(curr)) {
24+
return acc.slice(0, acc.length - curr.length);
25+
}
26+
return acc;
27+
}, prompt.trimEnd());
8428
}
8529

86-
return (
87-
model
88-
.chatPromptRender({ messages: modifiedMessages, preprompt })
89-
// Not super precise, but it's truncated in the model's backend anyway
90-
.split(" ")
91-
.slice(-(model.parameters?.truncate ?? 0))
92-
.join(" ")
93-
);
30+
return prompt;
9431
}

0 commit comments

Comments
 (0)