|
1 | 1 | ---
|
2 |
| -sidebar_position: 10 |
| 2 | +sidebar_position: 8 |
3 | 3 | title: LLM Conversation
|
4 | 4 | description: llm conversation chatbot example
|
5 | 5 | keywords: [react, chat, chatbot, chatbotify]
|
6 | 6 | ---
|
7 | 7 |
|
8 | 8 | # LLM Conversation
|
9 | 9 |
|
10 |
| -The following is an example showing how to use React ChatBotify to front conversations with LLMs (demonstrated using OpenAI/ChatGPT). If you wish to try out this example, you will have to obtain and provide an [OpenAI API key](https://platform.openai.com/docs/introduction) (note that OpenAI charges for API key use). Alternatively, you may refer to the [**real-time stream**](/docs/examples/real_time_stream) example which uses [**Google Gemini**](https://ai.google.dev/) that comes with free API keys. |
| 10 | +The following is an example showing how to integrate in-browser models (e.g. via [**WebLlm**](https://webllm.mlc.ai/)/[**Wllama**](https://www.npmjs.com/package/@wllama/wllama)) into React ChatBotify. It leverages on the [**LLM Connector Plugin**](https://www.npmjs.com/package/@rcb-plugins/llm-connector), which is maintained separately on the [**React ChatBotify Plugins**](https://github.com/orgs/React-ChatBotify-Plugins) organization. This example also taps on the [**WebLlmProvider**](https://github.com/React-ChatBotify-Plugins/llm-connnector/blob/main/docs/providers/WebLlm.md) and [**WllamaProvider**](https://github.com/React-ChatBotify-Plugins/llm-connnector/blob/main/docs/providers/Wllama.md), both of which ships by default with the LLM Connector Plugin. If you require support with the plugin, please reach out to support on the [**plugins discord**](https://discord.gg/J6pA4v3AMW) instead. |
| 11 | + |
| 12 | +:::tip |
| 13 | + |
| 14 | +The plugin also comes with other default providers, which you can try out in the [**OpenAI Integration Example**](/docs/examples/openai_integration.md) and [**Gemini Integration Example**](/docs/examples/gemini_integration.md). |
| 15 | + |
| 16 | +::: |
| 17 | + |
| 18 | +:::tip |
| 19 | + |
| 20 | +If you expect your LLM responses to contain markdown, consider using the [**Markdown Renderer Plugin**](https://www.npmjs.com/package/@rcb-plugins/markdown-renderer) as well! |
| 21 | + |
| 22 | +::: |
11 | 23 |
|
12 | 24 | :::caution
|
13 | 25 |
|
14 |
| -This is for testing purposes only, **do not** embed your API keys on your website in production. You may refer to [**this article**](https://tjtanjin.medium.com/how-to-build-and-integrate-a-react-chatbot-with-llms-a-react-chatbotify-guide-part-4-b40cd59fd6e6) for more details. |
| 26 | +Running models in the browser can be sluggish (especially if a large model is chosen). In production, you should pick a reasonably sized model or look to proxy your request to a backend. A lightweight demo project for an LLM proxy can be found [**here**](https://github.com/tjtanjin/llm-proxy). You may also refer to [**this article**](https://tjtanjin.medium.com/how-to-build-and-integrate-a-react-chatbot-with-llms-a-react-chatbotify-guide-part-4-b40cd59fd6e6) for more details. |
15 | 27 |
|
16 | 28 | :::
|
17 | 29 |
|
18 | 30 | ```jsx live noInline title=MyChatBot.js
|
19 | 31 | const MyChatBot = () => {
|
20 |
| - let apiKey = null; |
21 |
| - let modelType = "gpt-3.5-turbo"; |
22 |
| - let hasError = false; |
23 |
| - |
24 |
| - // example openai conversation |
25 |
| - // you can replace with other LLMs such as Google Gemini |
26 |
| - const call_openai = async (params) => { |
27 |
| - try { |
28 |
| - const openai = new OpenAI({ |
29 |
| - apiKey: apiKey, |
30 |
| - dangerouslyAllowBrowser: true // required for testing on browser side, not recommended |
31 |
| - }); |
32 |
| - |
33 |
| - // for streaming responses in parts (real-time), refer to real-time stream example |
34 |
| - const chatCompletion = await openai.chat.completions.create({ |
35 |
| - // conversation history is not shown in this example as message length is kept to 1 |
36 |
| - messages: [{ role: 'user', content: params.userInput }], |
37 |
| - model: modelType, |
38 |
| - }); |
39 |
| - |
40 |
| - await params.injectMessage(chatCompletion.choices[0].message.content); |
41 |
| - } catch (error) { |
42 |
| - await params.injectMessage("Unable to load model, is your API Key valid?"); |
43 |
| - hasError = true; |
| 32 | + // initialize the plugin |
| 33 | + const plugins = [LlmConnector()]; |
| 34 | + |
| 35 | + // checks user message stop condition to end llm conversation |
| 36 | + const onUserMessageCheck = async (message: Message) => { |
| 37 | + if ( |
| 38 | + typeof message.content === 'string' && |
| 39 | + message.content.toUpperCase() === 'RESTART' |
| 40 | + ) { |
| 41 | + return 'start'; |
44 | 42 | }
|
| 43 | + }; |
| 44 | + |
| 45 | + // checks key down stop condition to end llm conversation |
| 46 | + const onKeyDownCheck = async (event: KeyboardEvent) => { |
| 47 | + if (event.key === 'Escape') { |
| 48 | + return 'start'; |
| 49 | + } |
| 50 | + return null; |
45 | 51 | }
|
46 |
| - const flow={ |
| 52 | + |
| 53 | + // example flow for testing |
| 54 | + const flow: Flow = { |
47 | 55 | start: {
|
48 |
| - message: "Enter your OpenAI api key and start asking away!", |
49 |
| - path: "api_key", |
50 |
| - isSensitive: true |
| 56 | + message: "Hello, pick a model runtime to get started!", |
| 57 | + options: ["WebLlm", "Wllama"], |
| 58 | + chatDisabled: true, |
| 59 | + path: async (params) => { |
| 60 | + await params.simulateStreamMessage("Type 'RESTART' or hit 'ESC` to pick another runtime!"); |
| 61 | + await params.simulateStreamMessage("Ask away!"); |
| 62 | + return params.userInput.toLowerCase(); |
| 63 | + }, |
51 | 64 | },
|
52 |
| - api_key: { |
53 |
| - message: (params) => { |
54 |
| - apiKey = params.userInput.trim(); |
55 |
| - return "Ask me anything!"; |
| 65 | + webllm: { |
| 66 | + llmConnector: { |
| 67 | + // provider configuration guide: |
| 68 | + // https://github.com/React-ChatBotify-Plugins/llm-connnector/blob/main/docs/providers/WebLlm.md |
| 69 | + provider: new WebLlmProvider({ |
| 70 | + model: 'Qwen2-0.5B-Instruct-q4f16_1-MLC', |
| 71 | + }), |
| 72 | + outputType: 'character', |
| 73 | + stopConditions: { |
| 74 | + onUserMessage: onUserMessageCheck, |
| 75 | + onKeyDown: onKeyDownCheck, |
| 76 | + }, |
56 | 77 | },
|
57 |
| - path: "loop", |
58 | 78 | },
|
59 |
| - loop: { |
60 |
| - message: async (params) => { |
61 |
| - await call_openai(params); |
| 79 | + wllama: { |
| 80 | + llmConnector: { |
| 81 | + // provider configuration guide: |
| 82 | + // https://github.com/React-ChatBotify-Plugins/llm-connnector/blob/main/docs/providers/Wllama.md |
| 83 | + provider: new WllamaProvider({ |
| 84 | + modelUrl: 'https://huggingface.co/HuggingFaceTB/SmolLM2-360M-Instruct-GGUF/resolve/main/smollm2-360m-instruct-q8_0.gguf', |
| 85 | + loadModelConfig: { |
| 86 | + n_ctx: 8192, |
| 87 | + }, |
| 88 | + }), |
| 89 | + outputType: 'character', |
| 90 | + stopConditions: { |
| 91 | + onUserMessage: onUserMessageCheck, |
| 92 | + onKeyDown: onKeyDownCheck, |
| 93 | + }, |
62 | 94 | },
|
63 |
| - path: () => { |
64 |
| - if (hasError) { |
65 |
| - return "start" |
66 |
| - } |
67 |
| - return "loop" |
68 |
| - } |
69 |
| - } |
70 |
| - } |
| 95 | + }, |
| 96 | + }; |
| 97 | + |
71 | 98 | return (
|
72 |
| - <ChatBot settings={{general: {embedded: true}, chatHistory: {storageKey: "example_llm_conversation"}}} flow={flow}/> |
| 99 | + <ChatBot |
| 100 | + settings={{general: {embedded: true}, chatHistory: {storageKey: "example_openai_integration"}}} |
| 101 | + plugins={plugins} |
| 102 | + flow={flow} |
| 103 | + ></ChatBot> |
73 | 104 | );
|
74 | 105 | };
|
75 | 106 |
|
|
0 commit comments