diff --git a/.gitignore b/.gitignore index 3f29b4e..a67e677 100644 --- a/.gitignore +++ b/.gitignore @@ -7,7 +7,6 @@ # production /build -/dist *.tgz # misc diff --git a/dist/App.d.ts b/dist/App.d.ts new file mode 100644 index 0000000..c28a238 --- /dev/null +++ b/dist/App.d.ts @@ -0,0 +1,3 @@ +declare const App: () => import("react/jsx-runtime").JSX.Element; +export default App; +//# sourceMappingURL=App.d.ts.map \ No newline at end of file diff --git a/dist/App.d.ts.map b/dist/App.d.ts.map new file mode 100644 index 0000000..05a5b83 --- /dev/null +++ b/dist/App.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"App.d.ts","sourceRoot":"","sources":["../src/App.tsx"],"names":[],"mappings":"AAaA,QAAA,MAAM,GAAG,+CAuHR,CAAC;AAEF,eAAe,GAAG,CAAC"} \ No newline at end of file diff --git a/dist/constants/DefaultPluginConfig.d.ts b/dist/constants/DefaultPluginConfig.d.ts new file mode 100644 index 0000000..a7a4ed8 --- /dev/null +++ b/dist/constants/DefaultPluginConfig.d.ts @@ -0,0 +1,6 @@ +import { PluginConfig } from '../types/PluginConfig'; +/** + * Default values for plugin config. + */ +export declare const DefaultPluginConfig: PluginConfig; +//# sourceMappingURL=DefaultPluginConfig.d.ts.map \ No newline at end of file diff --git a/dist/constants/DefaultPluginConfig.d.ts.map b/dist/constants/DefaultPluginConfig.d.ts.map new file mode 100644 index 0000000..337e4ef --- /dev/null +++ b/dist/constants/DefaultPluginConfig.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"DefaultPluginConfig.d.ts","sourceRoot":"","sources":["../../src/constants/DefaultPluginConfig.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,MAAM,uBAAuB,CAAC;AAErD;;GAEG;AACH,eAAO,MAAM,mBAAmB,EAAE,YAEjC,CAAC"} \ No newline at end of file diff --git a/dist/core/useRcbPlugin.d.ts b/dist/core/useRcbPlugin.d.ts new file mode 100644 index 0000000..8452738 --- /dev/null +++ b/dist/core/useRcbPlugin.d.ts @@ -0,0 +1,10 @@ +import { Plugin } from 'react-chatbotify'; +import { PluginConfig } from '../types/PluginConfig'; +/** + * Plugin hook that handles all the core logic. + * + * @param pluginConfig configurations for the plugin + */ +declare const useRcbPlugin: (pluginConfig?: PluginConfig) => ReturnType; +export default useRcbPlugin; +//# sourceMappingURL=useRcbPlugin.d.ts.map \ No newline at end of file diff --git a/dist/core/useRcbPlugin.d.ts.map b/dist/core/useRcbPlugin.d.ts.map new file mode 100644 index 0000000..4aa0bc1 --- /dev/null +++ b/dist/core/useRcbPlugin.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"useRcbPlugin.d.ts","sourceRoot":"","sources":["../../src/core/useRcbPlugin.tsx"],"names":[],"mappings":"AACA,OAAO,EAEN,MAAM,EAON,MAAM,kBAAkB,CAAC;AAE1B,OAAO,EAAE,YAAY,EAAE,MAAM,uBAAuB,CAAC;AAOrD;;;;GAIG;AACH,QAAA,MAAM,YAAY,GAAI,eAAe,YAAY,KAAG,UAAU,CAAC,MAAM,CAsFpE,CAAC;AAEF,eAAe,YAAY,CAAC"} \ No newline at end of file diff --git a/dist/development.d.ts b/dist/development.d.ts new file mode 100644 index 0000000..de2c7d6 --- /dev/null +++ b/dist/development.d.ts @@ -0,0 +1,2 @@ +export {}; +//# sourceMappingURL=development.d.ts.map \ No newline at end of file diff --git a/dist/development.d.ts.map b/dist/development.d.ts.map new file mode 100644 index 0000000..dd3b27b --- /dev/null +++ b/dist/development.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"development.d.ts","sourceRoot":"","sources":["../src/development.tsx"],"names":[],"mappings":""} \ No newline at end of file diff --git a/dist/factory/RcbPluginFactory.d.ts b/dist/factory/RcbPluginFactory.d.ts new file mode 100644 index 0000000..0887cb6 --- /dev/null +++ b/dist/factory/RcbPluginFactory.d.ts @@ -0,0 +1,13 @@ +import { PluginConfig } from '../types/PluginConfig'; +/** + * Factory that prepares the plugin hook to be consumed by the core library. + * + * @param pluginConfig configurations for the plugin + */ +declare const RcbPluginFactory: (pluginConfig?: PluginConfig) => () => { + name: string; + settings?: import("react-chatbotify").Settings; + styles?: import("react-chatbotify").Styles; +}; +export default RcbPluginFactory; +//# sourceMappingURL=RcbPluginFactory.d.ts.map \ No newline at end of file diff --git a/dist/factory/RcbPluginFactory.d.ts.map b/dist/factory/RcbPluginFactory.d.ts.map new file mode 100644 index 0000000..530d07f --- /dev/null +++ b/dist/factory/RcbPluginFactory.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"RcbPluginFactory.d.ts","sourceRoot":"","sources":["../../src/factory/RcbPluginFactory.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,YAAY,EAAE,MAAM,uBAAuB,CAAC;AAErD;;;;GAIG;AACH,QAAA,MAAM,gBAAgB,GAAI,eAAe,YAAY;;;;CAOpD,CAAC;AAEF,eAAe,gBAAgB,CAAC"} \ No newline at end of file diff --git a/dist/hooks/useChangePath.d.ts b/dist/hooks/useChangePath.d.ts new file mode 100644 index 0000000..6c4e47d --- /dev/null +++ b/dist/hooks/useChangePath.d.ts @@ -0,0 +1,11 @@ +import { Flow } from 'react-chatbotify'; +import { LlmConnectorBlock } from '../types/LlmConnectorBlock'; +/** + * Handles changing of conversation path (block). + * + * @param getFlow flow of the chatbot + * @param setConnectorBlockFields sets all fields required for llm connector block + */ +declare const useChangePath: (getFlow: () => Flow, setConnectorBlockFields: (block: LlmConnectorBlock) => void) => void; +export { useChangePath }; +//# sourceMappingURL=useChangePath.d.ts.map \ No newline at end of file diff --git a/dist/hooks/useChangePath.d.ts.map b/dist/hooks/useChangePath.d.ts.map new file mode 100644 index 0000000..b38f5d5 --- /dev/null +++ b/dist/hooks/useChangePath.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"useChangePath.d.ts","sourceRoot":"","sources":["../../src/hooks/useChangePath.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,IAAI,EAA+C,MAAM,kBAAkB,CAAC;AAErF,OAAO,EAAE,iBAAiB,EAAE,MAAM,4BAA4B,CAAC;AAE/D;;;;;GAKG;AACH,QAAA,MAAM,aAAa,GAAI,SAAS,MAAM,IAAI,EAAE,yBAAyB,CAAC,KAAK,EAAE,iBAAiB,KAAK,IAAI,SAoBtG,CAAC;AAEF,OAAO,EAAE,aAAa,EAAE,CAAC"} \ No newline at end of file diff --git a/dist/hooks/useMessageHandler.d.ts b/dist/hooks/useMessageHandler.d.ts new file mode 100644 index 0000000..051bffc --- /dev/null +++ b/dist/hooks/useMessageHandler.d.ts @@ -0,0 +1,32 @@ +import { Message } from 'react-chatbotify'; +import { Provider } from '../types/Provider'; +/** + * Handles message events. + * + * @param refs object containing relevant refs + * @param actions object containing relevant actions + */ +declare const useMessageHandler: (refs: { + providerRef: React.MutableRefObject; + messagesRef: React.MutableRefObject; + outputTypeRef: React.MutableRefObject<"character" | "chunk" | "full">; + outputSpeedRef: React.MutableRefObject; + historySizeRef: React.MutableRefObject; + initialMessageRef: React.MutableRefObject; + errorMessageRef: React.MutableRefObject; + onUserMessageRef: React.MutableRefObject<((msg: Message) => Promise) | null>; + onKeyDownRef: React.MutableRefObject<((e: KeyboardEvent) => Promise) | null>; +}, actions: { + speakAudio: (text: string) => void; + injectMessage: (content: string | JSX.Element, sender?: string) => Promise; + simulateStreamMessage: (content: string, sender?: string) => Promise; + streamMessage: (msg: string) => void; + endStreamMessage: () => void; + toggleTextAreaDisabled: (active?: boolean) => void; + toggleIsBotTyping: (active?: boolean) => void; + focusTextArea: () => void; + goToPath: (path: string) => void; + getIsChatBotVisible: () => boolean; +}) => void; +export { useMessageHandler }; +//# sourceMappingURL=useMessageHandler.d.ts.map \ No newline at end of file diff --git a/dist/hooks/useMessageHandler.d.ts.map b/dist/hooks/useMessageHandler.d.ts.map new file mode 100644 index 0000000..1166604 --- /dev/null +++ b/dist/hooks/useMessageHandler.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"useMessageHandler.d.ts","sourceRoot":"","sources":["../../src/hooks/useMessageHandler.ts"],"names":[],"mappings":"AACA,OAAO,EAIN,OAAO,EAGP,MAAM,kBAAkB,CAAC;AAE1B,OAAO,EAAE,QAAQ,EAAE,MAAM,mBAAmB,CAAC;AAM7C;;;;;GAKG;AACH,QAAA,MAAM,iBAAiB,GACtB,MAAM;IACL,WAAW,EAAE,KAAK,CAAC,gBAAgB,CAAC,QAAQ,GAAG,IAAI,CAAC,CAAC;IACrD,WAAW,EAAE,KAAK,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC;IAC/C,aAAa,EAAE,KAAK,CAAC,gBAAgB,CAAC,WAAW,GAAG,OAAO,GAAG,MAAM,CAAC,CAAC;IACtE,cAAc,EAAE,KAAK,CAAC,gBAAgB,CAAC,MAAM,CAAC,CAAC;IAC/C,cAAc,EAAE,KAAK,CAAC,gBAAgB,CAAC,MAAM,CAAC,CAAC;IAC/C,iBAAiB,EAAE,KAAK,CAAC,gBAAgB,CAAC,MAAM,CAAC,CAAC;IAClD,eAAe,EAAE,KAAK,CAAC,gBAAgB,CAAC,MAAM,CAAC,CAAC;IAChD,gBAAgB,EAAE,KAAK,CAAC,gBAAgB,CAAC,CAAC,CAAC,GAAG,EAAE,OAAO,KAAK,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC;IAC5F,YAAY,EAAE,KAAK,CAAC,gBAAgB,CAAC,CAAC,CAAC,CAAC,EAAE,aAAa,KAAK,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC;CAC5F,EACD,SAAS;IACR,UAAU,EAAE,CAAC,IAAI,EAAE,MAAM,KAAK,IAAI,CAAC;IACnC,aAAa,EAAE,CAAC,OAAO,EAAE,MAAM,GAAG,GAAG,CAAC,OAAO,EAAE,MAAM,CAAC,EAAE,MAAM,KAAK,OAAO,CAAC,OAAO,GAAG,IAAI,CAAC,CAAC;IAC3F,qBAAqB,EAAE,CAAC,OAAO,EAAE,MAAM,EAAE,MAAM,CAAC,EAAE,MAAM,KAAK,OAAO,CAAC,OAAO,GAAG,IAAI,CAAC,CAAC;IACrF,aAAa,EAAE,CAAC,GAAG,EAAE,MAAM,KAAK,IAAI,CAAC;IACrC,gBAAgB,EAAE,MAAM,IAAI,CAAC;IAC7B,sBAAsB,EAAE,CAAC,MAAM,CAAC,EAAE,OAAO,KAAK,IAAI,CAAC;IACnD,iBAAiB,EAAE,CAAC,MAAM,CAAC,EAAE,OAAO,KAAK,IAAI,CAAC;IAC9C,aAAa,EAAE,MAAM,IAAI,CAAC;IAC1B,QAAQ,EAAE,CAAC,IAAI,EAAE,MAAM,KAAK,IAAI,CAAC;IACjC,mBAAmB,EAAE,MAAM,OAAO,CAAC;CACnC,SA8FD,CAAC;AAEF,OAAO,EAAE,iBAAiB,EAAE,CAAC"} \ No newline at end of file diff --git a/dist/hooks/useProcessBlock.d.ts b/dist/hooks/useProcessBlock.d.ts new file mode 100644 index 0000000..be109e1 --- /dev/null +++ b/dist/hooks/useProcessBlock.d.ts @@ -0,0 +1,32 @@ +import { Message } from 'react-chatbotify'; +import { Provider } from '../types/Provider'; +/** + * Handles pre-processing and post-processing of blocks. + * + * @param refs object containing relevant refs + * @param actions object containing relevant actions + */ +declare const useProcessBlock: (refs: { + providerRef: React.MutableRefObject; + messagesRef: React.MutableRefObject; + outputTypeRef: React.MutableRefObject<"character" | "chunk" | "full">; + outputSpeedRef: React.MutableRefObject; + historySizeRef: React.MutableRefObject; + initialMessageRef: React.MutableRefObject; + errorMessageRef: React.MutableRefObject; + onUserMessageRef: React.MutableRefObject<((msg: Message) => Promise) | null>; + onKeyDownRef: React.MutableRefObject<((e: KeyboardEvent) => Promise) | null>; +}, actions: { + speakAudio: (text: string) => void; + injectMessage: (content: string | JSX.Element, sender?: string) => Promise; + simulateStreamMessage: (content: string, sender?: string) => Promise; + streamMessage: (msg: string) => void; + endStreamMessage: () => void; + toggleTextAreaDisabled: (active?: boolean) => void; + toggleIsBotTyping: (active?: boolean) => void; + focusTextArea: () => void; + goToPath: (path: string) => void; + getIsChatBotVisible: () => boolean; +}) => void; +export { useProcessBlock }; +//# sourceMappingURL=useProcessBlock.d.ts.map \ No newline at end of file diff --git a/dist/hooks/useProcessBlock.d.ts.map b/dist/hooks/useProcessBlock.d.ts.map new file mode 100644 index 0000000..b06eb7b --- /dev/null +++ b/dist/hooks/useProcessBlock.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"useProcessBlock.d.ts","sourceRoot":"","sources":["../../src/hooks/useProcessBlock.ts"],"names":[],"mappings":"AACA,OAAO,EAAqD,OAAO,EAA2B,MAAM,kBAAkB,CAAC;AAEvH,OAAO,EAAE,QAAQ,EAAE,MAAM,mBAAmB,CAAC;AAE7C;;;;;GAKG;AACH,QAAA,MAAM,eAAe,GACpB,MAAM;IACL,WAAW,EAAE,KAAK,CAAC,gBAAgB,CAAC,QAAQ,GAAG,IAAI,CAAC,CAAC;IACrD,WAAW,EAAE,KAAK,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC;IAC/C,aAAa,EAAE,KAAK,CAAC,gBAAgB,CAAC,WAAW,GAAG,OAAO,GAAG,MAAM,CAAC,CAAC;IACtE,cAAc,EAAE,KAAK,CAAC,gBAAgB,CAAC,MAAM,CAAC,CAAC;IAC/C,cAAc,EAAE,KAAK,CAAC,gBAAgB,CAAC,MAAM,CAAC,CAAC;IAC/C,iBAAiB,EAAE,KAAK,CAAC,gBAAgB,CAAC,MAAM,CAAC,CAAC;IAClD,eAAe,EAAE,KAAK,CAAC,gBAAgB,CAAC,MAAM,CAAC,CAAC;IAChD,gBAAgB,EAAE,KAAK,CAAC,gBAAgB,CAAC,CAAC,CAAC,GAAG,EAAE,OAAO,KAAK,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC;IAC5F,YAAY,EAAE,KAAK,CAAC,gBAAgB,CAAC,CAAC,CAAC,CAAC,EAAE,aAAa,KAAK,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC;CAC5F,EACD,SAAS;IACR,UAAU,EAAE,CAAC,IAAI,EAAE,MAAM,KAAK,IAAI,CAAC;IACnC,aAAa,EAAE,CAAC,OAAO,EAAE,MAAM,GAAG,GAAG,CAAC,OAAO,EAAE,MAAM,CAAC,EAAE,MAAM,KAAK,OAAO,CAAC,OAAO,GAAG,IAAI,CAAC,CAAC;IAC3F,qBAAqB,EAAE,CAAC,OAAO,EAAE,MAAM,EAAE,MAAM,CAAC,EAAE,MAAM,KAAK,OAAO,CAAC,OAAO,GAAG,IAAI,CAAC,CAAC;IACrF,aAAa,EAAE,CAAC,GAAG,EAAE,MAAM,KAAK,IAAI,CAAC;IACrC,gBAAgB,EAAE,MAAM,IAAI,CAAC;IAC7B,sBAAsB,EAAE,CAAC,MAAM,CAAC,EAAE,OAAO,KAAK,IAAI,CAAC;IACnD,iBAAiB,EAAE,CAAC,MAAM,CAAC,EAAE,OAAO,KAAK,IAAI,CAAC;IAC9C,aAAa,EAAE,MAAM,IAAI,CAAC;IAC1B,QAAQ,EAAE,CAAC,IAAI,EAAE,MAAM,KAAK,IAAI,CAAC;IACjC,mBAAmB,EAAE,MAAM,OAAO,CAAC;CACnC,SAuDD,CAAC;AACF,OAAO,EAAE,eAAe,EAAE,CAAC"} \ No newline at end of file diff --git a/dist/index.cjs b/dist/index.cjs new file mode 100644 index 0000000..722d057 --- /dev/null +++ b/dist/index.cjs @@ -0,0 +1,2 @@ +"use strict";var _=Object.create;var $=Object.defineProperty;var z=Object.getOwnPropertyDescriptor;var G=Object.getOwnPropertyNames;var L=Object.getPrototypeOf,N=Object.prototype.hasOwnProperty;var K=(a,e,t,s)=>{if(e&&typeof e=="object"||typeof e=="function")for(let n of G(e))!N.call(a,n)&&n!==t&&$(a,n,{get:()=>e[n],enumerable:!(s=z(e,n))||s.enumerable});return a};var J=(a,e,t)=>(t=a!=null?_(L(a)):{},K(e||!a||!a.__esModule?$(t,"default",{value:a,enumerable:!0}):t,a));Object.defineProperties(exports,{__esModule:{value:!0},[Symbol.toStringTag]:{value:"Module"}});const g=require("react"),u=require("react-chatbotify"),Y={autoConfig:!0},q=(a,e)=>{const t=g.useCallback(s=>{const r=a()[s.data.nextPath];e(r)},[a,e]);u.useOnRcbEvent(u.RcbEvent.CHANGE_PATH,t)},H=(a,e)=>{const{outputTypeRef:t}=a,{toggleTextAreaDisabled:s,toggleIsBotTyping:n,focusTextArea:r,injectMessage:o,simulateStreamMessage:i,getIsChatBotVisible:d}=e,c=g.useCallback(l=>{var m;const p=l.data.block;p.llmConnector&&(l.preventDefault(),l.type==="rcb-pre-process-block"&&((m=p.llmConnector)!=null&&m.initialMessage&&(t.current==="full"?o(a.initialMessageRef.current):i(a.initialMessageRef.current)),n(!1),s(!1),setTimeout(()=>{d()&&r()})))},[n,s,r,d]);u.useOnRcbEvent(u.RcbEvent.PRE_PROCESS_BLOCK,c),u.useOnRcbEvent(u.RcbEvent.POST_PROCESS_BLOCK,c)},V=async function*(a,e){for await(const t of a)for(const s of t)yield s,await new Promise(n=>setTimeout(n,e))},Q=async function*(a,e){for await(const t of a)yield t,await new Promise(s=>setTimeout(s,e))},X=async function*(a,e,t){e==="character"?yield*V(a,t):yield*Q(a,t)},Z=async function*(a,e){for await(const t of a)e(t),yield t},ee=async(a,e,t,s={})=>{var R,M;if(!e.providerRef.current)return;const{speakAudio:n,toggleIsBotTyping:r,toggleTextAreaDisabled:o,focusTextArea:i,injectMessage:d,streamMessage:c,endStreamMessage:l,getIsChatBotVisible:p}=t,m=e.providerRef.current.sendMessages(a),b=e.outputTypeRef.current,y=e.outputSpeedRef.current;if(b==="full"){let h="";for await(const f of m){if((R=s.signal)!=null&&R.aborted)break;h+=f}r(!1),d(h),setTimeout(()=>{o(!1),p()&&i()})}else{const h=X(Z(m,n),b,y);let f="",S=!1;for await(const C of h){if((M=s.signal)!=null&&M.aborted)break;S||(r(!1),S=!0),f+=C,c(f)}l(),setTimeout(()=>{o(!1),p()&&i()})}},te=500,se=(a,e)=>{const{messagesRef:t,outputTypeRef:s,onUserMessageRef:n,onKeyDownRef:r,errorMessageRef:o}=a,{injectMessage:i,simulateStreamMessage:d,toggleTextAreaDisabled:c,toggleIsBotTyping:l,goToPath:p,focusTextArea:m,getIsChatBotVisible:b}=e,y=g.useRef(null),R=g.useCallback(M=>{if(!a.providerRef.current)return;const h=M.data.message,f=h.sender.toUpperCase();h.tags=h.tags??[],h.tags.push(`rcb-llm-connector-plugin:${f}`),f==="USER"&&(l(!0),c(!0),setTimeout(async()=>{var v;if(n.current){const P=await n.current(h);if(P)return(v=y.current)==null||v.abort(),y.current=null,p(P)}const S=a.historySizeRef.current,C=t.current,T=S?[...C.slice(-(S-1)),h]:[h],E=new AbortController;y.current=E,ee(T,a,e,{signal:E.signal}).catch(P=>{l(!1),c(!1),setTimeout(()=>{b()&&m()}),console.error("LLM prompt failed",P),s.current==="full"?i(o.current):d(o.current)})},te))},[a,e]);u.useOnRcbEvent(u.RcbEvent.POST_INJECT_MESSAGE,R),u.useOnRcbEvent(u.RcbEvent.STOP_SIMULATE_STREAM_MESSAGE,R),u.useOnRcbEvent(u.RcbEvent.STOP_STREAM_MESSAGE,R),g.useEffect(()=>{const M=async h=>{var f;if(r.current){const S=await r.current(h);S&&((f=y.current)==null||f.abort(),y.current=null,p(S))}};return window.addEventListener("keydown",M),()=>window.removeEventListener("keydown",M)},[])},re=a=>{const e=g.useRef([]),t=g.useRef(null),s=g.useRef("chunk"),n=g.useRef(30),r=g.useRef(0),o=g.useRef(""),i=g.useRef("Unable to get response, please try again."),d=g.useRef(null),c=g.useRef(null),{getFlow:l}=u.useFlow(),{speakAudio:p}=u.useAudio(),{messages:m,injectMessage:b,simulateStreamMessage:y,streamMessage:R,endStreamMessage:M}=u.useMessages(),{goToPath:h}=u.usePaths(),{toggleTextAreaDisabled:f,focusTextArea:S}=u.useTextArea(),{toggleIsBotTyping:C,getIsChatBotVisible:T}=u.useChatWindow(),E={...Y,...a??{}};g.useEffect(()=>{e.current=m},[m]),q(l,w=>{var x,A,k,B,U,F,I,W,j,D;t.current=((x=w.llmConnector)==null?void 0:x.provider)??null,s.current=((A=w.llmConnector)==null?void 0:A.outputType)??"chunk",n.current=((k=w.llmConnector)==null?void 0:k.outputSpeed)??30,r.current=((B=w.llmConnector)==null?void 0:B.historySize)??0,o.current=((U=w.llmConnector)==null?void 0:U.initialMessage)??"",i.current=((F=w.llmConnector)==null?void 0:F.errorMessage)??"Unable to get response, please try again.",d.current=((W=(I=w.llmConnector)==null?void 0:I.stopConditions)==null?void 0:W.onUserMessage)??null,c.current=((D=(j=w.llmConnector)==null?void 0:j.stopConditions)==null?void 0:D.onKeyDown)??null});const v={providerRef:t,messagesRef:e,outputTypeRef:s,outputSpeedRef:n,historySizeRef:r,initialMessageRef:o,errorMessageRef:i,onUserMessageRef:d,onKeyDownRef:c},P={speakAudio:p,injectMessage:b,simulateStreamMessage:y,streamMessage:R,endStreamMessage:M,toggleTextAreaDisabled:f,toggleIsBotTyping:C,focusTextArea:S,goToPath:h,getIsChatBotVisible:T};H(v,P),se(v,P);const O={name:"@rcb-plugins/llm-connector"};return E!=null&&E.autoConfig&&(O.settings={event:{rcbChangePath:!0,rcbPostInjectMessage:!0,rcbStopSimulateStreamMessage:!0,rcbStopStreamMessage:!0,rcbPreProcessBlock:!0,rcbPostProcessBlock:!0}}),O},oe=a=>()=>re(a);class ne{constructor(e){this.debug=!1,this.roleMap=s=>{switch(s){case"USER":return"user";default:return"model"}},this.constructBodyWithMessages=s=>{let n;return this.messageParser?n=this.messageParser(s):n=s.filter(o=>typeof o.content=="string"&&o.sender.toUpperCase()!=="SYSTEM").map(o=>{const i=this.roleMap(o.sender.toUpperCase()),d=o.content;return{role:i,parts:[{text:d}]}}),this.systemMessage&&(n=[{role:"user",parts:[{text:this.systemMessage}]},...n]),{contents:n,...this.body}},this.handleStreamResponse=async function*(s){var o,i,d,c,l;const n=new TextDecoder("utf-8");let r="";for(;;){const{value:p,done:m}=await s.read();if(m)break;r+=n.decode(p,{stream:!0});const b=r.split(` +`);r=b.pop();for(const y of b){const R=y.trim();if(!R.startsWith("data: "))continue;const M=R.slice(6);try{const f=(l=(c=(d=(i=(o=JSON.parse(M).candidates)==null?void 0:o[0])==null?void 0:i.content)==null?void 0:d.parts)==null?void 0:c[0])==null?void 0:l.text;f&&(yield f)}catch(h){console.error("SSE JSON parse error:",M,h)}}}},this.method=e.method??"POST",this.body=e.body??{},this.systemMessage=e.systemMessage,this.responseFormat=e.responseFormat??"stream",this.messageParser=e.messageParser,this.debug=e.debug??!1,this.headers={"Content-Type":"application/json",Accept:this.responseFormat==="stream"?"text/event-stream":"application/json",...e.headers};const t=e.baseUrl??"https://generativelanguage.googleapis.com/v1beta";if(e.mode==="direct")this.endpoint=this.responseFormat==="stream"?`${t}/models/${e.model}:streamGenerateContent?alt=sse&key=${e.apiKey||""}`:`${t}/models/${e.model}:generateContent?key=${e.apiKey||""}`;else if(e.mode==="proxy")this.endpoint=`${t}/${e.model}`;else throw Error("Invalid mode specified for Gemini provider ('direct' or 'proxy').")}async*sendMessages(e){var s,n,r,o,i;if(this.debug){const d=this.endpoint.replace(/\?key=([^&]+)/,"?key=[REDACTED]"),c={...this.headers};console.log("[GeminiProvider] Request:",{method:this.method,endpoint:d,headers:c,body:this.constructBodyWithMessages(e)})}const t=await fetch(this.endpoint,{method:this.method,headers:this.headers,body:JSON.stringify(this.constructBodyWithMessages(e))});if(this.debug&&console.log("[GeminiProvider] Response status:",t.status),!t.ok)throw new Error(`Gemini API error ${t.status}: ${await t.text()}`);if(this.responseFormat==="stream"){if(!t.body)throw new Error("Response body is empty – cannot stream");const d=t.body.getReader();for await(const c of this.handleStreamResponse(d))yield c}else{const d=await t.json();this.debug&&console.log("[GeminiProvider] Response body:",d);const c=(i=(o=(r=(n=(s=d.candidates)==null?void 0:s[0])==null?void 0:n.content)==null?void 0:r.parts)==null?void 0:o[0])==null?void 0:i.text;if(typeof c=="string")yield c;else throw new Error("Unexpected response shape – no text candidate")}}}class ae{constructor(e){if(this.debug=!1,this.roleMap=t=>{switch(t){case"USER":return"user";case"SYSTEM":return"system";default:return"assistant"}},this.constructBodyWithMessages=t=>{let s;return this.messageParser?s=this.messageParser(t):s=t.filter(r=>typeof r.content=="string"&&r.sender.toUpperCase()!=="SYSTEM").map(r=>{const o=this.roleMap(r.sender.toUpperCase()),i=r.content;return{role:o,content:i}}),this.systemMessage&&(s=[{role:"system",content:this.systemMessage},...s]),{messages:s,...this.body}},this.handleStreamResponse=async function*(t){var r,o,i;const s=new TextDecoder("utf-8");let n="";for(;;){const{value:d,done:c}=await t.read();if(c)break;n+=s.decode(d,{stream:!0});const l=n.split(/\r?\n/);n=l.pop();for(const p of l){if(!p.startsWith("data: "))continue;const m=p.slice(6).trim();if(m==="[DONE]")return;try{const y=(i=(o=(r=JSON.parse(m).choices)==null?void 0:r[0])==null?void 0:o.delta)==null?void 0:i.content;y&&(yield y)}catch(b){console.error("Stream parse error",b)}}}},this.method=e.method??"POST",this.endpoint=e.baseUrl??"https://api.openai.com/v1/chat/completions",this.systemMessage=e.systemMessage,this.responseFormat=e.responseFormat??"stream",this.messageParser=e.messageParser,this.debug=e.debug??!1,this.headers={"Content-Type":"application/json",Accept:this.responseFormat==="stream"?"text/event-stream":"application/json",...e.headers},this.body={model:e.model,stream:this.responseFormat==="stream",...e.body},e.mode==="direct"){this.headers={...this.headers,Authorization:`Bearer ${e.apiKey}`};return}if(e.mode!=="proxy")throw Error("Invalid mode specified for OpenAI provider ('direct' or 'proxy').")}async*sendMessages(e){var s,n,r;if(this.debug){const o={...this.headers};delete o.Authorization,console.log("[OpenaiProvider] Request:",{method:this.method,endpoint:this.endpoint,headers:o,body:this.constructBodyWithMessages(e)})}const t=await fetch(this.endpoint,{method:this.method,headers:this.headers,body:JSON.stringify(this.constructBodyWithMessages(e))});if(this.debug&&console.log("[OpenaiProvider] Response status:",t.status),!t.ok)throw new Error(`Openai API error ${t.status}: ${await t.text()}`);if(this.responseFormat==="stream"){if(!t.body)throw new Error("Response body is empty – cannot stream");const o=t.body.getReader();for await(const i of this.handleStreamResponse(o))yield i}else{const o=await t.json();this.debug&&console.log("[OpenaiProvider] Response body:",o);const i=(r=(n=(s=o.choices)==null?void 0:s[0])==null?void 0:n.message)==null?void 0:r.content;if(typeof i=="string")yield i;else throw new Error("Unexpected response shape – no text candidate")}}}class ie{constructor(e){this.debug=!1,this.roleMap=t=>{switch(t){case"USER":return"user";case"SYSTEM":return"system";default:return"assistant"}},this.constructBodyWithMessages=t=>{let s;return this.messageParser?s=this.messageParser(t):s=t.filter(r=>typeof r.content=="string"&&r.sender.toUpperCase()!=="SYSTEM").map(r=>{const o=this.roleMap(r.sender.toUpperCase()),i=r.content;return{role:o,content:i}}),this.systemMessage&&(s=[{role:"system",content:this.systemMessage},...s]),{messages:s,stream:this.responseFormat==="stream",...this.chatCompletionOptions}},this.model=e.model,this.systemMessage=e.systemMessage,this.responseFormat=e.responseFormat??"stream",this.messageParser=e.messageParser,this.engineConfig=e.engineConfig??{},this.chatCompletionOptions=e.chatCompletionOptions??{},this.debug=e.debug??!1,this.createEngine()}async createEngine(){const{CreateMLCEngine:e}=await import("@mlc-ai/web-llm");this.engine=await e(this.model,{...this.engineConfig})}async*sendMessages(e){var s,n,r,o,i,d;this.engine||await this.createEngine(),this.debug&&console.log("[WebLlmProvider] Request:",{model:this.model,systemMessage:this.systemMessage,responseFormat:this.responseFormat,engineConfig:this.engineConfig,chatCompletionOptions:this.chatCompletionOptions,messages:this.constructBodyWithMessages(e).messages});const t=await((s=this.engine)==null?void 0:s.chat.completions.create(this.constructBodyWithMessages(e)));if(this.debug&&console.log("[WebLlmProvider] Response:",t),t&&Symbol.asyncIterator in t)for await(const c of t){const l=(r=(n=c.choices[0])==null?void 0:n.delta)==null?void 0:r.content;l&&(yield l)}else(d=(i=(o=t==null?void 0:t.choices)==null?void 0:o[0])==null?void 0:i.message)!=null&&d.content&&(yield t.choices[0].message.content)}}class ce{constructor(e){if(this.debug=!1,this.roleMap=t=>{switch(t){case"USER":return"user";case"SYSTEM":return"system";default:return"assistant"}},this.constructBodyWithMessages=t=>{let s;return this.messageParser?s=this.messageParser(t):s=t.filter(r=>typeof r.content=="string"&&r.sender.toUpperCase()!=="SYSTEM").map(r=>{const o=this.roleMap(r.sender.toUpperCase()),i=r.content;return{role:o,content:i}}),this.systemMessage&&(s=[{role:"system",content:this.systemMessage},...s]),{model:this.body.model,messages:s}},this.handleStreamResponse=async function*(t){const s=new TextDecoder("utf-8");let n="";for(;;){const{value:r,done:o}=await t.read();if(o)break;n+=s.decode(r,{stream:!0});const i=n.split(/\r?\n/);n=i.pop();for(const d of i)try{const c=JSON.parse(d);if(c.done===!0)return;c.message&&typeof c.message.content=="string"&&(yield c.message.content)}catch(c){console.error("Stream parse error",c)}}},this.method=e.method??"POST",this.endpoint=e.baseUrl??"http://localhost:11434/api/chat",this.systemMessage=e.systemMessage,this.responseFormat=e.responseFormat??"stream",this.messageParser=e.messageParser,this.debug=e.debug??!1,this.headers={"Content-Type":"application/json",Accept:this.responseFormat==="stream"?"text/event-stream":"application/json",...e.headers},this.body={model:e.model,stream:this.responseFormat==="stream",...e.body},e.mode==="direct"){this.headers={...this.headers,Authorization:`Bearer ${e.apiKey}`};return}if(e.mode!=="proxy")throw Error("Invalid mode specified for Ollama provider ('direct' or 'proxy').")}async*sendMessages(e){var s,n,r;if(this.debug){const o={...this.headers};delete o.Authorization,console.log("[OllamaProvider] Request:",{method:this.method,endpoint:this.endpoint,headers:o,body:this.constructBodyWithMessages(e)})}const t=await fetch(this.endpoint,{method:this.method,headers:this.headers,body:JSON.stringify(this.constructBodyWithMessages(e))});if(this.debug&&console.log("[OllamaProvider] Response status:",t.status),!t.ok)throw new Error(`Ollama API error ${t.status}: ${await t.text()}`);if(this.responseFormat==="stream"){if(!t.body)throw new Error("Response body is empty – cannot stream");const o=t.body.getReader();for await(const i of this.handleStreamResponse(o))yield i}else{const o=await t.json();this.debug&&console.log("[OllamaProvider] Response body:",o);const i=(r=(n=(s=o.choices)==null?void 0:s[0])==null?void 0:n.message)==null?void 0:r.content;if(typeof i=="string")yield i;else throw new Error("Unexpected response shape – no text candidate")}}}exports.GeminiProvider=ne;exports.OllamaProvider=ce;exports.OpenaiProvider=ae;exports.WebLlmProvider=ie;exports.default=oe; diff --git a/dist/index.d.ts b/dist/index.d.ts new file mode 100644 index 0000000..e0dde96 --- /dev/null +++ b/dist/index.d.ts @@ -0,0 +1,12 @@ +import LlmConnector from './factory/RcbPluginFactory'; +import GeminiProvider from './providers/GeminiProvider'; +import OpenaiProvider from './providers/OpenaiProvider'; +import WebLlmProvider from './providers/WebLlmProvider'; +import OllamaProvider from './providers/OllamaProvider'; +import { LlmConnectorBlock } from './types/LlmConnectorBlock'; +import { PluginConfig } from './types/PluginConfig'; +import { Provider } from './types/Provider'; +export { GeminiProvider, OpenaiProvider, WebLlmProvider, OllamaProvider }; +export type { LlmConnectorBlock, PluginConfig, Provider }; +export default LlmConnector; +//# sourceMappingURL=index.d.ts.map \ No newline at end of file diff --git a/dist/index.d.ts.map b/dist/index.d.ts.map new file mode 100644 index 0000000..ea86f11 --- /dev/null +++ b/dist/index.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.tsx"],"names":[],"mappings":"AACA,OAAO,YAAY,MAAM,4BAA4B,CAAC;AAGtD,OAAO,cAAc,MAAM,4BAA4B,CAAC;AACxD,OAAO,cAAc,MAAM,4BAA4B,CAAC;AACxD,OAAO,cAAc,MAAM,4BAA4B,CAAC;AACxD,OAAO,cAAc,MAAM,4BAA4B,CAAC;AAGxD,OAAO,EAAE,iBAAiB,EAAE,MAAM,2BAA2B,CAAC;AAC9D,OAAO,EAAE,YAAY,EAAE,MAAM,sBAAsB,CAAC;AACpD,OAAO,EAAE,QAAQ,EAAE,MAAM,kBAAkB,CAAC;AAG5C,OAAO,EAAE,cAAc,EAAE,cAAc,EAAE,cAAc,EAAE,cAAc,EAAE,CAAC;AAG1E,YAAY,EAAE,iBAAiB,EAAE,YAAY,EAAE,QAAQ,EAAE,CAAC;AAG1D,eAAe,YAAY,CAAC"} \ No newline at end of file diff --git a/dist/index.js b/dist/index.js new file mode 100644 index 0000000..5116245 --- /dev/null +++ b/dist/index.js @@ -0,0 +1,561 @@ +import { useCallback as O, useRef as S, useEffect as z } from "react"; +import { useOnRcbEvent as C, RcbEvent as T, useFlow as G, useAudio as L, useMessages as N, usePaths as K, useTextArea as J, useChatWindow as Y } from "react-chatbotify"; +const H = { + autoConfig: !0 +}, q = (i, e) => { + const t = O( + (s) => { + const r = i()[s.data.nextPath]; + e(r); + }, + [i, e] + ); + C(T.CHANGE_PATH, t); +}, V = (i, e) => { + const { outputTypeRef: t } = i, { + toggleTextAreaDisabled: s, + toggleIsBotTyping: n, + focusTextArea: r, + injectMessage: o, + simulateStreamMessage: a, + getIsChatBotVisible: d + } = e, c = O( + (l) => { + var p; + const u = l.data.block; + u.llmConnector && (l.preventDefault(), l.type === "rcb-pre-process-block" && ((p = u.llmConnector) != null && p.initialMessage && (t.current === "full" ? o(i.initialMessageRef.current) : a(i.initialMessageRef.current)), n(!1), s(!1), setTimeout(() => { + d() && r(); + }))); + }, + [n, s, r, d] + ); + C(T.PRE_PROCESS_BLOCK, c), C(T.POST_PROCESS_BLOCK, c); +}, Q = async function* (i, e) { + for await (const t of i) + for (const s of t) + yield s, await new Promise((n) => setTimeout(n, e)); +}, X = async function* (i, e) { + for await (const t of i) + yield t, await new Promise((s) => setTimeout(s, e)); +}, Z = async function* (i, e, t) { + e === "character" ? yield* Q(i, t) : yield* X(i, t); +}, ee = async function* (i, e) { + for await (const t of i) + e(t), yield t; +}, te = async (i, e, t, s = {}) => { + var M, y; + if (!e.providerRef.current) + return; + const { + speakAudio: n, + toggleIsBotTyping: r, + toggleTextAreaDisabled: o, + focusTextArea: a, + injectMessage: d, + streamMessage: c, + endStreamMessage: l, + getIsChatBotVisible: u + } = t, p = e.providerRef.current.sendMessages(i), f = e.outputTypeRef.current, g = e.outputSpeedRef.current; + if (f === "full") { + let h = ""; + for await (const m of p) { + if ((M = s.signal) != null && M.aborted) break; + h += m; + } + r(!1), d(h), setTimeout(() => { + o(!1), u() && a(); + }); + } else { + const h = Z(ee(p, n), f, g); + let m = "", b = !1; + for await (const E of h) { + if ((y = s.signal) != null && y.aborted) + break; + b || (r(!1), b = !0), m += E, c(m); + } + l(), setTimeout(() => { + o(!1), u() && a(); + }); + } +}, se = 500, re = (i, e) => { + const { messagesRef: t, outputTypeRef: s, onUserMessageRef: n, onKeyDownRef: r, errorMessageRef: o } = i, { + injectMessage: a, + simulateStreamMessage: d, + toggleTextAreaDisabled: c, + toggleIsBotTyping: l, + goToPath: u, + focusTextArea: p, + getIsChatBotVisible: f + } = e, g = S(null), M = O( + (y) => { + if (!i.providerRef.current) + return; + const h = y.data.message, m = h.sender.toUpperCase(); + h.tags = h.tags ?? [], h.tags.push(`rcb-llm-connector-plugin:${m}`), m === "USER" && (l(!0), c(!0), setTimeout(async () => { + var v; + if (n.current) { + const R = await n.current(h); + if (R) + return (v = g.current) == null || v.abort(), g.current = null, u(R); + } + const b = i.historySizeRef.current, E = t.current, x = b ? [...E.slice(-(b - 1)), h] : [h], P = new AbortController(); + g.current = P, te(x, i, e, { signal: P.signal }).catch((R) => { + l(!1), c(!1), setTimeout(() => { + f() && p(); + }), console.error("LLM prompt failed", R), s.current === "full" ? a(o.current) : d(o.current); + }); + }, se)); + }, + [i, e] + ); + C(T.POST_INJECT_MESSAGE, M), C(T.STOP_SIMULATE_STREAM_MESSAGE, M), C(T.STOP_STREAM_MESSAGE, M), z(() => { + const y = async (h) => { + var m; + if (r.current) { + const b = await r.current(h); + b && ((m = g.current) == null || m.abort(), g.current = null, u(b)); + } + }; + return window.addEventListener("keydown", y), () => window.removeEventListener("keydown", y); + }, []); +}, oe = (i) => { + const e = S([]), t = S(null), s = S("chunk"), n = S(30), r = S(0), o = S(""), a = S("Unable to get response, please try again."), d = S(null), c = S(null), { getFlow: l } = G(), { speakAudio: u } = L(), { messages: p, injectMessage: f, simulateStreamMessage: g, streamMessage: M, endStreamMessage: y } = N(), { goToPath: h } = K(), { toggleTextAreaDisabled: m, focusTextArea: b } = J(), { toggleIsBotTyping: E, getIsChatBotVisible: x } = Y(), P = { ...H, ...i ?? {} }; + z(() => { + e.current = p; + }, [p]), q(l, (w) => { + var k, B, U, F, I, W, D, $, j, _; + t.current = ((k = w.llmConnector) == null ? void 0 : k.provider) ?? null, s.current = ((B = w.llmConnector) == null ? void 0 : B.outputType) ?? "chunk", n.current = ((U = w.llmConnector) == null ? void 0 : U.outputSpeed) ?? 30, r.current = ((F = w.llmConnector) == null ? void 0 : F.historySize) ?? 0, o.current = ((I = w.llmConnector) == null ? void 0 : I.initialMessage) ?? "", a.current = ((W = w.llmConnector) == null ? void 0 : W.errorMessage) ?? "Unable to get response, please try again.", d.current = (($ = (D = w.llmConnector) == null ? void 0 : D.stopConditions) == null ? void 0 : $.onUserMessage) ?? null, c.current = ((_ = (j = w.llmConnector) == null ? void 0 : j.stopConditions) == null ? void 0 : _.onKeyDown) ?? null; + }); + const v = { + providerRef: t, + messagesRef: e, + outputTypeRef: s, + outputSpeedRef: n, + historySizeRef: r, + initialMessageRef: o, + errorMessageRef: a, + onUserMessageRef: d, + onKeyDownRef: c + }, R = { + speakAudio: u, + injectMessage: f, + simulateStreamMessage: g, + streamMessage: M, + endStreamMessage: y, + toggleTextAreaDisabled: m, + toggleIsBotTyping: E, + focusTextArea: b, + goToPath: h, + getIsChatBotVisible: x + }; + V(v, R), re(v, R); + const A = { name: "@rcb-plugins/llm-connector" }; + return P != null && P.autoConfig && (A.settings = { + event: { + rcbChangePath: !0, + rcbPostInjectMessage: !0, + rcbStopSimulateStreamMessage: !0, + rcbStopStreamMessage: !0, + rcbPreProcessBlock: !0, + rcbPostProcessBlock: !0 + } + }), A; +}, ie = (i) => () => oe(i); +class ce { + /** + * Sets default values for the provider based on given configuration. Configuration guide here: + * https://github.com/React-ChatBotify-Plugins/llm-connector/blob/main/docs/providers/Gemini.md + * + * @param config configuration for setup + */ + constructor(e) { + this.debug = !1, this.roleMap = (s) => { + switch (s) { + case "USER": + return "user"; + default: + return "model"; + } + }, this.constructBodyWithMessages = (s) => { + let n; + return this.messageParser ? n = this.messageParser(s) : n = s.filter( + (o) => typeof o.content == "string" && o.sender.toUpperCase() !== "SYSTEM" + ).map((o) => { + const a = this.roleMap(o.sender.toUpperCase()), d = o.content; + return { + role: a, + parts: [{ text: d }] + }; + }), this.systemMessage && (n = [{ role: "user", parts: [{ text: this.systemMessage }] }, ...n]), { + contents: n, + ...this.body + }; + }, this.handleStreamResponse = async function* (s) { + var o, a, d, c, l; + const n = new TextDecoder("utf-8"); + let r = ""; + for (; ; ) { + const { value: u, done: p } = await s.read(); + if (p) break; + r += n.decode(u, { stream: !0 }); + const f = r.split(` +`); + r = f.pop(); + for (const g of f) { + const M = g.trim(); + if (!M.startsWith("data: ")) continue; + const y = M.slice(6); + try { + const m = (l = (c = (d = (a = (o = JSON.parse(y).candidates) == null ? void 0 : o[0]) == null ? void 0 : a.content) == null ? void 0 : d.parts) == null ? void 0 : c[0]) == null ? void 0 : l.text; + m && (yield m); + } catch (h) { + console.error("SSE JSON parse error:", y, h); + } + } + } + }, this.method = e.method ?? "POST", this.body = e.body ?? {}, this.systemMessage = e.systemMessage, this.responseFormat = e.responseFormat ?? "stream", this.messageParser = e.messageParser, this.debug = e.debug ?? !1, this.headers = { + "Content-Type": "application/json", + Accept: this.responseFormat === "stream" ? "text/event-stream" : "application/json", + ...e.headers + }; + const t = e.baseUrl ?? "https://generativelanguage.googleapis.com/v1beta"; + if (e.mode === "direct") + this.endpoint = this.responseFormat === "stream" ? `${t}/models/${e.model}:streamGenerateContent?alt=sse&key=${e.apiKey || ""}` : `${t}/models/${e.model}:generateContent?key=${e.apiKey || ""}`; + else if (e.mode === "proxy") + this.endpoint = `${t}/${e.model}`; + else + throw Error("Invalid mode specified for Gemini provider ('direct' or 'proxy')."); + } + /** + * Calls Gemini and yields each chunk (or the full text). + * + * @param messages messages to include in the request + */ + async *sendMessages(e) { + var s, n, r, o, a; + if (this.debug) { + const d = this.endpoint.replace(/\?key=([^&]+)/, "?key=[REDACTED]"), c = { ...this.headers }; + console.log("[GeminiProvider] Request:", { + method: this.method, + endpoint: d, + headers: c, + body: this.constructBodyWithMessages(e) + }); + } + const t = await fetch(this.endpoint, { + method: this.method, + headers: this.headers, + body: JSON.stringify(this.constructBodyWithMessages(e)) + }); + if (this.debug && console.log("[GeminiProvider] Response status:", t.status), !t.ok) + throw new Error(`Gemini API error ${t.status}: ${await t.text()}`); + if (this.responseFormat === "stream") { + if (!t.body) + throw new Error("Response body is empty – cannot stream"); + const d = t.body.getReader(); + for await (const c of this.handleStreamResponse(d)) + yield c; + } else { + const d = await t.json(); + this.debug && console.log("[GeminiProvider] Response body:", d); + const c = (a = (o = (r = (n = (s = d.candidates) == null ? void 0 : s[0]) == null ? void 0 : n.content) == null ? void 0 : r.parts) == null ? void 0 : o[0]) == null ? void 0 : a.text; + if (typeof c == "string") + yield c; + else + throw new Error("Unexpected response shape – no text candidate"); + } + } +} +class de { + /** + * Sets default values for the provider based on given configuration. Configuration guide here: + * https://github.com/React-ChatBotify-Plugins/llm-connector/blob/main/docs/providers/OpenAI.md + * + * @param config configuration for setup + */ + constructor(e) { + if (this.debug = !1, this.roleMap = (t) => { + switch (t) { + case "USER": + return "user"; + case "SYSTEM": + return "system"; + default: + return "assistant"; + } + }, this.constructBodyWithMessages = (t) => { + let s; + return this.messageParser ? s = this.messageParser(t) : s = t.filter( + (r) => typeof r.content == "string" && r.sender.toUpperCase() !== "SYSTEM" + ).map((r) => { + const o = this.roleMap(r.sender.toUpperCase()), a = r.content; + return { + role: o, + content: a + }; + }), this.systemMessage && (s = [{ role: "system", content: this.systemMessage }, ...s]), { + messages: s, + ...this.body + }; + }, this.handleStreamResponse = async function* (t) { + var r, o, a; + const s = new TextDecoder("utf-8"); + let n = ""; + for (; ; ) { + const { value: d, done: c } = await t.read(); + if (c) break; + n += s.decode(d, { stream: !0 }); + const l = n.split(/\r?\n/); + n = l.pop(); + for (const u of l) { + if (!u.startsWith("data: ")) continue; + const p = u.slice(6).trim(); + if (p === "[DONE]") return; + try { + const g = (a = (o = (r = JSON.parse(p).choices) == null ? void 0 : r[0]) == null ? void 0 : o.delta) == null ? void 0 : a.content; + g && (yield g); + } catch (f) { + console.error("Stream parse error", f); + } + } + } + }, this.method = e.method ?? "POST", this.endpoint = e.baseUrl ?? "https://api.openai.com/v1/chat/completions", this.systemMessage = e.systemMessage, this.responseFormat = e.responseFormat ?? "stream", this.messageParser = e.messageParser, this.debug = e.debug ?? !1, this.headers = { + "Content-Type": "application/json", + Accept: this.responseFormat === "stream" ? "text/event-stream" : "application/json", + ...e.headers + }, this.body = { + model: e.model, + stream: this.responseFormat === "stream", + ...e.body + }, e.mode === "direct") { + this.headers = { ...this.headers, Authorization: `Bearer ${e.apiKey}` }; + return; + } + if (e.mode !== "proxy") + throw Error("Invalid mode specified for OpenAI provider ('direct' or 'proxy')."); + } + /** + * Calls Openai and yields each chunk (or the full text). + * + * @param messages messages to include in the request + */ + async *sendMessages(e) { + var s, n, r; + if (this.debug) { + const o = { ...this.headers }; + delete o.Authorization, console.log("[OpenaiProvider] Request:", { + method: this.method, + endpoint: this.endpoint, + headers: o, + body: this.constructBodyWithMessages(e) + }); + } + const t = await fetch(this.endpoint, { + method: this.method, + headers: this.headers, + body: JSON.stringify(this.constructBodyWithMessages(e)) + }); + if (this.debug && console.log("[OpenaiProvider] Response status:", t.status), !t.ok) + throw new Error(`Openai API error ${t.status}: ${await t.text()}`); + if (this.responseFormat === "stream") { + if (!t.body) + throw new Error("Response body is empty – cannot stream"); + const o = t.body.getReader(); + for await (const a of this.handleStreamResponse(o)) + yield a; + } else { + const o = await t.json(); + this.debug && console.log("[OpenaiProvider] Response body:", o); + const a = (r = (n = (s = o.choices) == null ? void 0 : s[0]) == null ? void 0 : n.message) == null ? void 0 : r.content; + if (typeof a == "string") + yield a; + else + throw new Error("Unexpected response shape – no text candidate"); + } + } +} +class le { + /** + * Sets default values for the provider based on given configuration. Configuration guide here: + * https://github.com/React-ChatBotify-Plugins/llm-connector/blob/main/docs/providers/WebLlm.md + * + * @param config configuration for setup + */ + constructor(e) { + this.debug = !1, this.roleMap = (t) => { + switch (t) { + case "USER": + return "user"; + case "SYSTEM": + return "system"; + default: + return "assistant"; + } + }, this.constructBodyWithMessages = (t) => { + let s; + return this.messageParser ? s = this.messageParser(t) : s = t.filter( + (r) => typeof r.content == "string" && r.sender.toUpperCase() !== "SYSTEM" + ).map((r) => { + const o = this.roleMap(r.sender.toUpperCase()), a = r.content; + return { + role: o, + content: a + }; + }), this.systemMessage && (s = [ + { + role: "system", + content: this.systemMessage + }, + ...s + ]), { + messages: s, + stream: this.responseFormat === "stream", + ...this.chatCompletionOptions + }; + }, this.model = e.model, this.systemMessage = e.systemMessage, this.responseFormat = e.responseFormat ?? "stream", this.messageParser = e.messageParser, this.engineConfig = e.engineConfig ?? {}, this.chatCompletionOptions = e.chatCompletionOptions ?? {}, this.debug = e.debug ?? !1, this.createEngine(); + } + /** + * Creates MLC Engine for inferencing. + */ + async createEngine() { + const { CreateMLCEngine: e } = await import("@mlc-ai/web-llm"); + this.engine = await e(this.model, { + ...this.engineConfig + }); + } + /** + * Calls WebLlm and yields each chunk (or the full text). + * + * @param messages messages to include in the request + */ + async *sendMessages(e) { + var s, n, r, o, a, d; + this.engine || await this.createEngine(), this.debug && console.log("[WebLlmProvider] Request:", { + model: this.model, + systemMessage: this.systemMessage, + responseFormat: this.responseFormat, + engineConfig: this.engineConfig, + chatCompletionOptions: this.chatCompletionOptions, + messages: this.constructBodyWithMessages(e).messages + // Log messages being sent + }); + const t = await ((s = this.engine) == null ? void 0 : s.chat.completions.create(this.constructBodyWithMessages(e))); + if (this.debug && console.log("[WebLlmProvider] Response:", t), t && Symbol.asyncIterator in t) + for await (const c of t) { + const l = (r = (n = c.choices[0]) == null ? void 0 : n.delta) == null ? void 0 : r.content; + l && (yield l); + } + else (d = (a = (o = t == null ? void 0 : t.choices) == null ? void 0 : o[0]) == null ? void 0 : a.message) != null && d.content && (yield t.choices[0].message.content); + } +} +class he { + /** + * Sets default values for the provider based on given configuration. Configuration guide here: + * https://github.com/React-ChatBotify-Plugins/llm-connector/blob/main/docs/providers/OpenAI.md + * + * @param config configuration for setup + */ + constructor(e) { + if (this.debug = !1, this.roleMap = (t) => { + switch (t) { + case "USER": + return "user"; + case "SYSTEM": + return "system"; + default: + return "assistant"; + } + }, this.constructBodyWithMessages = (t) => { + let s; + return this.messageParser ? s = this.messageParser(t) : s = t.filter( + (r) => typeof r.content == "string" && r.sender.toUpperCase() !== "SYSTEM" + ).map((r) => { + const o = this.roleMap(r.sender.toUpperCase()), a = r.content; + return { + role: o, + content: a + }; + }), this.systemMessage && (s = [{ role: "system", content: this.systemMessage }, ...s]), { + model: this.body.model, + messages: s + }; + }, this.handleStreamResponse = async function* (t) { + const s = new TextDecoder("utf-8"); + let n = ""; + for (; ; ) { + const { value: r, done: o } = await t.read(); + if (o) break; + n += s.decode(r, { stream: !0 }); + const a = n.split(/\r?\n/); + n = a.pop(); + for (const d of a) + try { + const c = JSON.parse(d); + if (c.done === !0) return; + c.message && typeof c.message.content == "string" && (yield c.message.content); + } catch (c) { + console.error("Stream parse error", c); + } + } + }, this.method = e.method ?? "POST", this.endpoint = e.baseUrl ?? "http://localhost:11434/api/chat", this.systemMessage = e.systemMessage, this.responseFormat = e.responseFormat ?? "stream", this.messageParser = e.messageParser, this.debug = e.debug ?? !1, this.headers = { + "Content-Type": "application/json", + Accept: this.responseFormat === "stream" ? "text/event-stream" : "application/json", + ...e.headers + }, this.body = { + model: e.model, + stream: this.responseFormat === "stream", + ...e.body + }, e.mode === "direct") { + this.headers = { ...this.headers, Authorization: `Bearer ${e.apiKey}` }; + return; + } + if (e.mode !== "proxy") + throw Error("Invalid mode specified for Ollama provider ('direct' or 'proxy')."); + } + /** + * Calls Ollama and yields each chunk (or the full text). + * + * @param messages messages to include in the request + */ + async *sendMessages(e) { + var s, n, r; + if (this.debug) { + const o = { ...this.headers }; + delete o.Authorization, console.log("[OllamaProvider] Request:", { + method: this.method, + endpoint: this.endpoint, + headers: o, + body: this.constructBodyWithMessages(e) + }); + } + const t = await fetch(this.endpoint, { + method: this.method, + headers: this.headers, + body: JSON.stringify(this.constructBodyWithMessages(e)) + }); + if (this.debug && console.log("[OllamaProvider] Response status:", t.status), !t.ok) + throw new Error(`Ollama API error ${t.status}: ${await t.text()}`); + if (this.responseFormat === "stream") { + if (!t.body) + throw new Error("Response body is empty – cannot stream"); + const o = t.body.getReader(); + for await (const a of this.handleStreamResponse(o)) + yield a; + } else { + const o = await t.json(); + this.debug && console.log("[OllamaProvider] Response body:", o); + const a = (r = (n = (s = o.choices) == null ? void 0 : s[0]) == null ? void 0 : n.message) == null ? void 0 : r.content; + if (typeof a == "string") + yield a; + else + throw new Error("Unexpected response shape – no text candidate"); + } + } +} +export { + ce as GeminiProvider, + he as OllamaProvider, + de as OpenaiProvider, + le as WebLlmProvider, + ie as default +}; diff --git a/dist/providers/GeminiProvider.d.ts b/dist/providers/GeminiProvider.d.ts new file mode 100644 index 0000000..f48f102 --- /dev/null +++ b/dist/providers/GeminiProvider.d.ts @@ -0,0 +1,49 @@ +import { GeminiProviderConfig } from '../types/provider-config/GeminiProviderConfig'; +import { Provider } from '../types/Provider'; +import { Message } from 'react-chatbotify'; +/** + * Provider for Gemini’s API, supports both direct and proxy modes. + */ +declare class GeminiProvider implements Provider { + private method; + private endpoint; + private headers; + private body; + private systemMessage?; + private responseFormat; + private messageParser?; + private debug; + /** + * Sets default values for the provider based on given configuration. Configuration guide here: + * https://github.com/React-ChatBotify-Plugins/llm-connector/blob/main/docs/providers/Gemini.md + * + * @param config configuration for setup + */ + constructor(config: GeminiProviderConfig); + /** + * Calls Gemini and yields each chunk (or the full text). + * + * @param messages messages to include in the request + */ + sendMessages(messages: Message[]): AsyncGenerator; + /** + * Maps the chatbot message sender to the provider message sender. + * + * @param sender sender from the chatbot + */ + private roleMap; + /** + * Builds the full request body. + * + * @param messages messages to parse + */ + private constructBodyWithMessages; + /** + * Consumes an SSE/text stream Response and yield each text chunk. + * + * @reader request body reader + */ + private handleStreamResponse; +} +export default GeminiProvider; +//# sourceMappingURL=GeminiProvider.d.ts.map \ No newline at end of file diff --git a/dist/providers/GeminiProvider.d.ts.map b/dist/providers/GeminiProvider.d.ts.map new file mode 100644 index 0000000..ee32d6d --- /dev/null +++ b/dist/providers/GeminiProvider.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"GeminiProvider.d.ts","sourceRoot":"","sources":["../../src/providers/GeminiProvider.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,oBAAoB,EAAE,MAAM,+CAA+C,CAAC;AACrF,OAAO,EAAE,QAAQ,EAAE,MAAM,mBAAmB,CAAC;AAC7C,OAAO,EAAE,OAAO,EAAE,MAAM,kBAAkB,CAAC;AAG3C;;GAEG;AACH,cAAM,cAAe,YAAW,QAAQ;IACvC,OAAO,CAAC,MAAM,CAAU;IACxB,OAAO,CAAC,QAAQ,CAAU;IAC1B,OAAO,CAAC,OAAO,CAA2B;IAC1C,OAAO,CAAC,IAAI,CAA2B;IACvC,OAAO,CAAC,aAAa,CAAC,CAAS;IAC/B,OAAO,CAAC,cAAc,CAAqB;IAC3C,OAAO,CAAC,aAAa,CAAC,CAAmD;IACzE,OAAO,CAAC,KAAK,CAAkB;IAE/B;;;;;OAKG;gBACgB,MAAM,EAAE,oBAAoB;IA0B/C;;;;OAIG;IACW,YAAY,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,cAAc,CAAC,MAAM,CAAC;IAmDvE;;;;OAIG;IACH,OAAO,CAAC,OAAO,CAOb;IAEF;;;;OAIG;IACH,OAAO,CAAC,yBAAyB,CA6B/B;IAEF;;;;OAIG;IACH,OAAO,CAAC,oBAAoB,CA4B1B;CACF;AAED,eAAe,cAAc,CAAC"} \ No newline at end of file diff --git a/dist/providers/OllamaProvider.d.ts b/dist/providers/OllamaProvider.d.ts new file mode 100644 index 0000000..26726a0 --- /dev/null +++ b/dist/providers/OllamaProvider.d.ts @@ -0,0 +1,49 @@ +import { Provider } from '../types/Provider'; +import { Message } from 'react-chatbotify'; +import { OpenaiProviderConfig } from '../types/provider-config/OpenaiProviderConfig'; +/** + * Provider for Ollama’s API, supports both direct and proxy modes. + */ +declare class OllamaProvider implements Provider { + private method; + private endpoint; + private headers; + private body; + private systemMessage?; + private responseFormat; + private messageParser?; + private debug; + /** + * Sets default values for the provider based on given configuration. Configuration guide here: + * https://github.com/React-ChatBotify-Plugins/llm-connector/blob/main/docs/providers/OpenAI.md + * + * @param config configuration for setup + */ + constructor(config: OpenaiProviderConfig); + /** + * Calls Ollama and yields each chunk (or the full text). + * + * @param messages messages to include in the request + */ + sendMessages(messages: Message[]): AsyncGenerator; + /** + * Maps the chatbot message sender to the provider message sender. + * + * @param sender sender from the chatbot + */ + private roleMap; + /** + * Builds the full request body. + * + * @param messages messages to parse + */ + private constructBodyWithMessages; + /** + * Consumes an SSE/text stream Response and yield each text chunk. + * + * @reader request body reader + */ + private handleStreamResponse; +} +export default OllamaProvider; +//# sourceMappingURL=OllamaProvider.d.ts.map \ No newline at end of file diff --git a/dist/providers/OllamaProvider.d.ts.map b/dist/providers/OllamaProvider.d.ts.map new file mode 100644 index 0000000..9785e68 --- /dev/null +++ b/dist/providers/OllamaProvider.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"OllamaProvider.d.ts","sourceRoot":"","sources":["../../src/providers/OllamaProvider.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,QAAQ,EAAE,MAAM,mBAAmB,CAAC;AAC7C,OAAO,EAAE,OAAO,EAAE,MAAM,kBAAkB,CAAC;AAC3C,OAAO,EAAE,oBAAoB,EAAE,MAAM,+CAA+C,CAAC;AAGrF;;GAEG;AACH,cAAM,cAAe,YAAW,QAAQ;IACvC,OAAO,CAAC,MAAM,CAAU;IACxB,OAAO,CAAC,QAAQ,CAAU;IAC1B,OAAO,CAAC,OAAO,CAA2B;IAC1C,OAAO,CAAC,IAAI,CAA2B;IACvC,OAAO,CAAC,aAAa,CAAC,CAAS;IAC/B,OAAO,CAAC,cAAc,CAAqB;IAC3C,OAAO,CAAC,aAAa,CAAC,CAAmD;IACzE,OAAO,CAAC,KAAK,CAAkB;IAE/B;;;;;OAKG;gBACgB,MAAM,EAAE,oBAAoB;IA4B/C;;;;OAIG;IACW,YAAY,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,cAAc,CAAC,MAAM,CAAC;IA+CvE;;;;OAIG;IACH,OAAO,CAAC,OAAO,CASb;IAEF;;;;OAIG;IACH,OAAO,CAAC,yBAAyB,CA4B/B;IAEF;;;;OAIG;IACH,OAAO,CAAC,oBAAoB,CA0B1B;CACF;AAED,eAAe,cAAc,CAAC"} \ No newline at end of file diff --git a/dist/providers/OpenaiProvider.d.ts b/dist/providers/OpenaiProvider.d.ts new file mode 100644 index 0000000..039b1e7 --- /dev/null +++ b/dist/providers/OpenaiProvider.d.ts @@ -0,0 +1,49 @@ +import { Provider } from '../types/Provider'; +import { Message } from 'react-chatbotify'; +import { OpenaiProviderConfig } from '../types/provider-config/OpenaiProviderConfig'; +/** + * Provider for Openai’s API, supports both direct and proxy modes. + */ +declare class OpenaiProvider implements Provider { + private method; + private endpoint; + private headers; + private body; + private systemMessage?; + private responseFormat; + private messageParser?; + private debug; + /** + * Sets default values for the provider based on given configuration. Configuration guide here: + * https://github.com/React-ChatBotify-Plugins/llm-connector/blob/main/docs/providers/OpenAI.md + * + * @param config configuration for setup + */ + constructor(config: OpenaiProviderConfig); + /** + * Calls Openai and yields each chunk (or the full text). + * + * @param messages messages to include in the request + */ + sendMessages(messages: Message[]): AsyncGenerator; + /** + * Maps the chatbot message sender to the provider message sender. + * + * @param sender sender from the chatbot + */ + private roleMap; + /** + * Builds the full request body. + * + * @param messages messages to parse + */ + private constructBodyWithMessages; + /** + * Consumes an SSE/text stream Response and yield each text chunk. + * + * @reader request body reader + */ + private handleStreamResponse; +} +export default OpenaiProvider; +//# sourceMappingURL=OpenaiProvider.d.ts.map \ No newline at end of file diff --git a/dist/providers/OpenaiProvider.d.ts.map b/dist/providers/OpenaiProvider.d.ts.map new file mode 100644 index 0000000..b527a9a --- /dev/null +++ b/dist/providers/OpenaiProvider.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"OpenaiProvider.d.ts","sourceRoot":"","sources":["../../src/providers/OpenaiProvider.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,QAAQ,EAAE,MAAM,mBAAmB,CAAC;AAC7C,OAAO,EAAE,OAAO,EAAE,MAAM,kBAAkB,CAAC;AAC3C,OAAO,EAAE,oBAAoB,EAAE,MAAM,+CAA+C,CAAC;AAGrF;;GAEG;AACH,cAAM,cAAe,YAAW,QAAQ;IACvC,OAAO,CAAC,MAAM,CAAU;IACxB,OAAO,CAAC,QAAQ,CAAU;IAC1B,OAAO,CAAC,OAAO,CAA2B;IAC1C,OAAO,CAAC,IAAI,CAA2B;IACvC,OAAO,CAAC,aAAa,CAAC,CAAS;IAC/B,OAAO,CAAC,cAAc,CAAqB;IAC3C,OAAO,CAAC,aAAa,CAAC,CAAmD;IACzE,OAAO,CAAC,KAAK,CAAkB;IAE/B;;;;;OAKG;gBACgB,MAAM,EAAE,oBAAoB;IA4B/C;;;;OAIG;IACW,YAAY,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,cAAc,CAAC,MAAM,CAAC;IA+CvE;;;;OAIG;IACH,OAAO,CAAC,OAAO,CASb;IAEF;;;;OAIG;IACH,OAAO,CAAC,yBAAyB,CA6B/B;IAEF;;;;OAIG;IACH,OAAO,CAAC,oBAAoB,CA2B1B;CACF;AAED,eAAe,cAAc,CAAC"} \ No newline at end of file diff --git a/dist/providers/WebLlmProvider.d.ts b/dist/providers/WebLlmProvider.d.ts new file mode 100644 index 0000000..dd6642f --- /dev/null +++ b/dist/providers/WebLlmProvider.d.ts @@ -0,0 +1,47 @@ +import { WebLlmProviderConfig } from '../types/provider-config/WebLlmProviderConfig'; +import { Provider } from '../types/Provider'; +import { Message } from 'react-chatbotify'; +/** + * Provider for MLC’s WebLLM runtime, for running models in the browser. + */ +declare class WebLlmProvider implements Provider { + private model; + private systemMessage?; + private responseFormat; + private engineConfig; + private chatCompletionOptions; + private messageParser?; + private engine?; + private debug; + /** + * Sets default values for the provider based on given configuration. Configuration guide here: + * https://github.com/React-ChatBotify-Plugins/llm-connector/blob/main/docs/providers/WebLlm.md + * + * @param config configuration for setup + */ + constructor(config: WebLlmProviderConfig); + /** + * Creates MLC Engine for inferencing. + */ + private createEngine; + /** + * Calls WebLlm and yields each chunk (or the full text). + * + * @param messages messages to include in the request + */ + sendMessages(messages: Message[]): AsyncGenerator; + /** + * Maps the chatbot message sender to the provider message sender. + * + * @param sender sender from the chatbot + */ + private roleMap; + /** + * Builds the full request body. + * + * @param messages messages to parse + */ + private constructBodyWithMessages; +} +export default WebLlmProvider; +//# sourceMappingURL=WebLlmProvider.d.ts.map \ No newline at end of file diff --git a/dist/providers/WebLlmProvider.d.ts.map b/dist/providers/WebLlmProvider.d.ts.map new file mode 100644 index 0000000..bdaeef9 --- /dev/null +++ b/dist/providers/WebLlmProvider.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"WebLlmProvider.d.ts","sourceRoot":"","sources":["../../src/providers/WebLlmProvider.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,oBAAoB,EAAE,MAAM,+CAA+C,CAAC;AACrF,OAAO,EAAE,QAAQ,EAAE,MAAM,mBAAmB,CAAC;AAC7C,OAAO,EAAE,OAAO,EAAE,MAAM,kBAAkB,CAAC;AAI3C;;GAEG;AACH,cAAM,cAAe,YAAW,QAAQ;IACvC,OAAO,CAAC,KAAK,CAAU;IACvB,OAAO,CAAC,aAAa,CAAC,CAAS;IAC/B,OAAO,CAAC,cAAc,CAAqB;IAC3C,OAAO,CAAC,YAAY,CAAkB;IACtC,OAAO,CAAC,qBAAqB,CAA0B;IACvD,OAAO,CAAC,aAAa,CAAC,CAAmD;IACzE,OAAO,CAAC,MAAM,CAAC,CAAY;IAC3B,OAAO,CAAC,KAAK,CAAkB;IAE/B;;;;;OAKG;gBACS,MAAM,EAAE,oBAAoB;IAWxC;;OAEG;YACW,YAAY;IAO1B;;;;OAIG;IACW,YAAY,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,cAAc,CAAC,MAAM,CAAC;IAkCvE;;;;OAIG;IACH,OAAO,CAAC,OAAO,CASb;IAEF;;;;OAIG;IACH,OAAO,CAAC,yBAAyB,CAoC/B;CACF;AAED,eAAe,cAAc,CAAC"} \ No newline at end of file diff --git a/dist/tsconfig.tsbuildinfo b/dist/tsconfig.tsbuildinfo new file mode 100644 index 0000000..12d7934 --- /dev/null +++ b/dist/tsconfig.tsbuildinfo @@ -0,0 +1 @@ +{"root":["../src/app.tsx","../src/development.tsx","../src/index.tsx","../src/vite-env.d.ts","../src/constants/defaultpluginconfig.ts","../src/core/usercbplugin.tsx","../src/factory/rcbpluginfactory.ts","../src/hooks/usechangepath.ts","../src/hooks/usemessagehandler.ts","../src/hooks/useprocessblock.ts","../src/providers/geminiprovider.ts","../src/providers/ollamaprovider.ts","../src/providers/openaiprovider.ts","../src/providers/webllmprovider.ts","../src/types/llmconnectorblock.ts","../src/types/pluginconfig.ts","../src/types/provider.ts","../src/types/provider-config/geminiproviderconfig.ts","../src/types/provider-config/ollamaproviderconfig.ts","../src/types/provider-config/openaiproviderconfig.ts","../src/types/provider-config/webllmproviderconfig.ts","../src/types/provider-message/geminiprovidermessage.ts","../src/types/provider-message/ollamaprovidermessage.ts","../src/types/provider-message/openaiprovidermessage.ts","../src/types/provider-message/webllmprovidermessage.ts","../src/utils/prompthandler.tsx","../src/utils/streamcontroller.ts"],"version":"5.8.3"} \ No newline at end of file diff --git a/dist/types/LlmConnectorBlock.d.ts b/dist/types/LlmConnectorBlock.d.ts new file mode 100644 index 0000000..ee904b7 --- /dev/null +++ b/dist/types/LlmConnectorBlock.d.ts @@ -0,0 +1,20 @@ +import { Block, Message } from 'react-chatbotify'; +import { Provider } from './Provider'; +/** + * Extends the Block from React ChatBotify to support the llm connector attribute and its properties. + */ +export type LlmConnectorBlock = Block & { + llmConnector: { + provider: Provider; + outputType?: 'character' | 'chunk' | 'full'; + outputSpeed?: number; + historySize?: number; + initialMessage?: string; + errorMessage?: string; + stopConditions?: { + onUserMessage?: (message: Message) => Promise; + onKeyDown?: (event: KeyboardEvent) => Promise; + }; + }; +}; +//# sourceMappingURL=LlmConnectorBlock.d.ts.map \ No newline at end of file diff --git a/dist/types/LlmConnectorBlock.d.ts.map b/dist/types/LlmConnectorBlock.d.ts.map new file mode 100644 index 0000000..9c642d1 --- /dev/null +++ b/dist/types/LlmConnectorBlock.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"LlmConnectorBlock.d.ts","sourceRoot":"","sources":["../../src/types/LlmConnectorBlock.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,KAAK,EAAE,OAAO,EAAE,MAAM,kBAAkB,CAAC;AAClD,OAAO,EAAE,QAAQ,EAAE,MAAM,YAAY,CAAC;AAEtC;;GAEG;AACH,MAAM,MAAM,iBAAiB,GAAG,KAAK,GAAG;IACvC,YAAY,EAAE;QACb,QAAQ,EAAE,QAAQ,CAAC;QACnB,UAAU,CAAC,EAAE,WAAW,GAAG,OAAO,GAAG,MAAM,CAAC;QAC5C,WAAW,CAAC,EAAE,MAAM,CAAC;QACrB,WAAW,CAAC,EAAE,MAAM,CAAC;QACrB,cAAc,CAAC,EAAE,MAAM,CAAC;QACxB,YAAY,CAAC,EAAE,MAAM,CAAC;QACtB,cAAc,CAAC,EAAE;YAChB,aAAa,CAAC,EAAE,CAAC,OAAO,EAAE,OAAO,KAAK,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAAC;YAC7D,SAAS,CAAC,EAAE,CAAC,KAAK,EAAE,aAAa,KAAK,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAAC;SAC7D,CAAC;KACF,CAAC;CACF,CAAC"} \ No newline at end of file diff --git a/dist/types/PluginConfig.d.ts b/dist/types/PluginConfig.d.ts new file mode 100644 index 0000000..99fc076 --- /dev/null +++ b/dist/types/PluginConfig.d.ts @@ -0,0 +1,8 @@ +/** + * Shared plugin-level settings. + */ +type PluginConfig = { + autoConfig?: boolean; +}; +export type { PluginConfig }; +//# sourceMappingURL=PluginConfig.d.ts.map \ No newline at end of file diff --git a/dist/types/PluginConfig.d.ts.map b/dist/types/PluginConfig.d.ts.map new file mode 100644 index 0000000..e75dd5d --- /dev/null +++ b/dist/types/PluginConfig.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"PluginConfig.d.ts","sourceRoot":"","sources":["../../src/types/PluginConfig.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,KAAK,YAAY,GAAG;IACnB,UAAU,CAAC,EAAE,OAAO,CAAC;CACrB,CAAC;AAEF,YAAY,EAAE,YAAY,EAAE,CAAC"} \ No newline at end of file diff --git a/dist/types/Provider.d.ts b/dist/types/Provider.d.ts new file mode 100644 index 0000000..820fa26 --- /dev/null +++ b/dist/types/Provider.d.ts @@ -0,0 +1,13 @@ +import { Message } from 'react-chatbotify'; +/** + * Interface that all LLM providers must implement. + */ +export type Provider = { + /** + * Sends a series of messages to the LLM to get a reply. + * + * @param messages messages to send + */ + sendMessages(messages: Message[]): AsyncGenerator; +}; +//# sourceMappingURL=Provider.d.ts.map \ No newline at end of file diff --git a/dist/types/Provider.d.ts.map b/dist/types/Provider.d.ts.map new file mode 100644 index 0000000..3b8627a --- /dev/null +++ b/dist/types/Provider.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"Provider.d.ts","sourceRoot":"","sources":["../../src/types/Provider.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAE,MAAM,kBAAkB,CAAC;AAE3C;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG;IACtB;;;;OAIG;IACH,YAAY,CAAC,QAAQ,EAAE,OAAO,EAAE,GAAG,cAAc,CAAC,MAAM,CAAC,CAAC;CAC1D,CAAC"} \ No newline at end of file diff --git a/dist/types/provider-config/GeminiProviderConfig.d.ts b/dist/types/provider-config/GeminiProviderConfig.d.ts new file mode 100644 index 0000000..82a5ed4 --- /dev/null +++ b/dist/types/provider-config/GeminiProviderConfig.d.ts @@ -0,0 +1,39 @@ +import { Message } from 'react-chatbotify'; +import { GeminiProviderMessage } from '../provider-message/GeminiProviderMessage'; +/** + * Configurations for GeminiProvider in direct mode. + */ +type DirectConfig = { + mode: 'direct'; + model: string; + apiKey: string; + systemMessage?: string; + responseFormat?: 'stream' | 'json'; + baseUrl?: string; + method?: string; + headers?: Record; + body?: Record; + messageParser?: (messages: Message[]) => GeminiProviderMessage[]; + debug?: boolean; +}; +/** + * Configurations for GeminiProvider in proxy mode. + */ +type ProxyConfig = { + mode: 'proxy'; + model: string; + baseUrl: string; + systemMessage?: string; + responseFormat?: 'stream' | 'json'; + method?: string; + headers?: Record; + body?: Record; + messageParser?: (messages: Message[]) => GeminiProviderMessage[]; + debug?: boolean; +}; +/** + * Combined gemini provider configurations. + */ +type GeminiProviderConfig = DirectConfig | ProxyConfig; +export type { GeminiProviderConfig }; +//# sourceMappingURL=GeminiProviderConfig.d.ts.map \ No newline at end of file diff --git a/dist/types/provider-config/GeminiProviderConfig.d.ts.map b/dist/types/provider-config/GeminiProviderConfig.d.ts.map new file mode 100644 index 0000000..7205577 --- /dev/null +++ b/dist/types/provider-config/GeminiProviderConfig.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"GeminiProviderConfig.d.ts","sourceRoot":"","sources":["../../../src/types/provider-config/GeminiProviderConfig.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAE,MAAM,kBAAkB,CAAC;AAC3C,OAAO,EAAE,qBAAqB,EAAE,MAAM,2CAA2C,CAAC;AAElF;;GAEG;AACH,KAAK,YAAY,GAAG;IACnB,IAAI,EAAE,QAAQ,CAAC;IACf,KAAK,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,MAAM,CAAC;IACf,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,cAAc,CAAC,EAAE,QAAQ,GAAG,MAAM,CAAC;IACnC,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAC9B,aAAa,CAAC,EAAE,CAAC,QAAQ,EAAE,OAAO,EAAE,KAAK,qBAAqB,EAAE,CAAC;IACjE,KAAK,CAAC,EAAE,OAAO,CAAC;CAChB,CAAC;AAEF;;GAEG;AACH,KAAK,WAAW,GAAG;IAClB,IAAI,EAAE,OAAO,CAAC;IACd,KAAK,EAAE,MAAM,CAAC;IACd,OAAO,EAAE,MAAM,CAAC;IAChB,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,cAAc,CAAC,EAAE,QAAQ,GAAG,MAAM,CAAC;IACnC,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAC9B,aAAa,CAAC,EAAE,CAAC,QAAQ,EAAE,OAAO,EAAE,KAAK,qBAAqB,EAAE,CAAC;IACjE,KAAK,CAAC,EAAE,OAAO,CAAC;CAChB,CAAC;AAEF;;GAEG;AACH,KAAK,oBAAoB,GAAG,YAAY,GAAG,WAAW,CAAC;AAEvD,YAAY,EAAE,oBAAoB,EAAE,CAAC"} \ No newline at end of file diff --git a/dist/types/provider-config/OllamaProviderConfig.d.ts b/dist/types/provider-config/OllamaProviderConfig.d.ts new file mode 100644 index 0000000..0520ad2 --- /dev/null +++ b/dist/types/provider-config/OllamaProviderConfig.d.ts @@ -0,0 +1,39 @@ +import { Message } from 'react-chatbotify'; +import { OllamaProviderMessage } from '../provider-message/OllamaProviderMessage'; +/** + * Configurations for OllamaProvider in direct mode. + */ +type DirectConfig = { + mode: 'direct'; + model: string; + apiKey: string; + systemMessage?: string; + responseFormat?: 'stream' | 'json'; + baseUrl?: string; + method?: string; + headers?: Record; + body?: Record; + messageParser?: (messages: Message[]) => OllamaProviderMessage[]; + debug?: boolean; +}; +/** + * Configurations for OllamaProvider in proxy mode. + */ +type ProxyConfig = { + mode: 'proxy'; + model: string; + baseUrl: string; + systemMessage?: string; + responseFormat?: 'stream' | 'json'; + method?: string; + headers?: Record; + body?: Record; + messageParser?: (messages: Message[]) => OllamaProviderMessage[]; + debug?: boolean; +}; +/** + * Combined openai provider configurations. + */ +type OllamaProviderConfig = DirectConfig | ProxyConfig; +export type { OllamaProviderConfig }; +//# sourceMappingURL=OllamaProviderConfig.d.ts.map \ No newline at end of file diff --git a/dist/types/provider-config/OllamaProviderConfig.d.ts.map b/dist/types/provider-config/OllamaProviderConfig.d.ts.map new file mode 100644 index 0000000..2da5729 --- /dev/null +++ b/dist/types/provider-config/OllamaProviderConfig.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"OllamaProviderConfig.d.ts","sourceRoot":"","sources":["../../../src/types/provider-config/OllamaProviderConfig.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAE,MAAM,kBAAkB,CAAC;AAE3C,OAAO,EAAE,qBAAqB,EAAE,MAAM,2CAA2C,CAAC;AAElF;;GAEG;AACH,KAAK,YAAY,GAAG;IACnB,IAAI,EAAE,QAAQ,CAAC;IACf,KAAK,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,MAAM,CAAC;IACf,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,cAAc,CAAC,EAAE,QAAQ,GAAG,MAAM,CAAC;IACnC,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAC9B,aAAa,CAAC,EAAE,CAAC,QAAQ,EAAE,OAAO,EAAE,KAAK,qBAAqB,EAAE,CAAC;IACjE,KAAK,CAAC,EAAE,OAAO,CAAC;CAChB,CAAC;AAEF;;GAEG;AACH,KAAK,WAAW,GAAG;IAClB,IAAI,EAAE,OAAO,CAAC;IACd,KAAK,EAAE,MAAM,CAAC;IACd,OAAO,EAAE,MAAM,CAAC;IAChB,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,cAAc,CAAC,EAAE,QAAQ,GAAG,MAAM,CAAC;IACnC,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAC9B,aAAa,CAAC,EAAE,CAAC,QAAQ,EAAE,OAAO,EAAE,KAAK,qBAAqB,EAAE,CAAC;IACjE,KAAK,CAAC,EAAE,OAAO,CAAC;CAChB,CAAC;AAEF;;GAEG;AACH,KAAK,oBAAoB,GAAG,YAAY,GAAG,WAAW,CAAC;AAEvD,YAAY,EAAE,oBAAoB,EAAE,CAAC"} \ No newline at end of file diff --git a/dist/types/provider-config/OpenaiProviderConfig.d.ts b/dist/types/provider-config/OpenaiProviderConfig.d.ts new file mode 100644 index 0000000..4cc734f --- /dev/null +++ b/dist/types/provider-config/OpenaiProviderConfig.d.ts @@ -0,0 +1,39 @@ +import { Message } from 'react-chatbotify'; +import { OpenaiProviderMessage } from '../provider-message/OpenaiProviderMessage'; +/** + * Configurations for OpenaiProvider in direct mode. + */ +type DirectConfig = { + mode: 'direct'; + model: string; + apiKey: string; + systemMessage?: string; + responseFormat?: 'stream' | 'json'; + baseUrl?: string; + method?: string; + headers?: Record; + body?: Record; + messageParser?: (messages: Message[]) => OpenaiProviderMessage[]; + debug?: boolean; +}; +/** + * Configurations for OpenaiProvider in proxy mode. + */ +type ProxyConfig = { + mode: 'proxy'; + model: string; + baseUrl: string; + systemMessage?: string; + responseFormat?: 'stream' | 'json'; + method?: string; + headers?: Record; + body?: Record; + messageParser?: (messages: Message[]) => OpenaiProviderMessage[]; + debug?: boolean; +}; +/** + * Combined openai provider configurations. + */ +type OpenaiProviderConfig = DirectConfig | ProxyConfig; +export type { OpenaiProviderConfig }; +//# sourceMappingURL=OpenaiProviderConfig.d.ts.map \ No newline at end of file diff --git a/dist/types/provider-config/OpenaiProviderConfig.d.ts.map b/dist/types/provider-config/OpenaiProviderConfig.d.ts.map new file mode 100644 index 0000000..ad31887 --- /dev/null +++ b/dist/types/provider-config/OpenaiProviderConfig.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"OpenaiProviderConfig.d.ts","sourceRoot":"","sources":["../../../src/types/provider-config/OpenaiProviderConfig.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAE,MAAM,kBAAkB,CAAC;AAC3C,OAAO,EAAE,qBAAqB,EAAE,MAAM,2CAA2C,CAAC;AAElF;;GAEG;AACH,KAAK,YAAY,GAAG;IACnB,IAAI,EAAE,QAAQ,CAAC;IACf,KAAK,EAAE,MAAM,CAAC;IACd,MAAM,EAAE,MAAM,CAAC;IACf,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,cAAc,CAAC,EAAE,QAAQ,GAAG,MAAM,CAAC;IACnC,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAC9B,aAAa,CAAC,EAAE,CAAC,QAAQ,EAAE,OAAO,EAAE,KAAK,qBAAqB,EAAE,CAAC;IACjE,KAAK,CAAC,EAAE,OAAO,CAAC;CAChB,CAAC;AAEF;;GAEG;AACH,KAAK,WAAW,GAAG;IAClB,IAAI,EAAE,OAAO,CAAC;IACd,KAAK,EAAE,MAAM,CAAC;IACd,OAAO,EAAE,MAAM,CAAC;IAChB,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,cAAc,CAAC,EAAE,QAAQ,GAAG,MAAM,CAAC;IACnC,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,OAAO,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IACjC,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,CAAC;IAC9B,aAAa,CAAC,EAAE,CAAC,QAAQ,EAAE,OAAO,EAAE,KAAK,qBAAqB,EAAE,CAAC;IACjE,KAAK,CAAC,EAAE,OAAO,CAAC;CAChB,CAAC;AAEF;;GAEG;AACH,KAAK,oBAAoB,GAAG,YAAY,GAAG,WAAW,CAAC;AAEvD,YAAY,EAAE,oBAAoB,EAAE,CAAC"} \ No newline at end of file diff --git a/dist/types/provider-config/WebLlmProviderConfig.d.ts b/dist/types/provider-config/WebLlmProviderConfig.d.ts new file mode 100644 index 0000000..8331712 --- /dev/null +++ b/dist/types/provider-config/WebLlmProviderConfig.d.ts @@ -0,0 +1,17 @@ +import { Message } from 'react-chatbotify'; +import { WebLlmProviderMessage } from '../provider-message/WebLlmProviderMessage'; +import { MLCEngineConfig } from '@mlc-ai/web-llm'; +/** + * Configurations for WebLlmProvider. + */ +type WebLlmProviderConfig = { + model: string; + systemMessage?: string; + responseFormat?: 'stream' | 'json'; + engineConfig?: MLCEngineConfig; + chatCompletionOptions?: Record; + messageParser?: (messages: Message[]) => WebLlmProviderMessage[]; + debug?: boolean; +}; +export type { WebLlmProviderConfig }; +//# sourceMappingURL=WebLlmProviderConfig.d.ts.map \ No newline at end of file diff --git a/dist/types/provider-config/WebLlmProviderConfig.d.ts.map b/dist/types/provider-config/WebLlmProviderConfig.d.ts.map new file mode 100644 index 0000000..8d6378a --- /dev/null +++ b/dist/types/provider-config/WebLlmProviderConfig.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"WebLlmProviderConfig.d.ts","sourceRoot":"","sources":["../../../src/types/provider-config/WebLlmProviderConfig.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,OAAO,EAAE,MAAM,kBAAkB,CAAC;AAC3C,OAAO,EAAE,qBAAqB,EAAE,MAAM,2CAA2C,CAAC;AAClF,OAAO,EAAE,eAAe,EAAE,MAAM,iBAAiB,CAAC;AAElD;;GAEG;AACH,KAAK,oBAAoB,GAAG;IAC3B,KAAK,EAAE,MAAM,CAAC;IACd,aAAa,CAAC,EAAE,MAAM,CAAC;IACvB,cAAc,CAAC,EAAE,QAAQ,GAAG,MAAM,CAAC;IACnC,YAAY,CAAC,EAAE,eAAe,CAAC;IAC/B,qBAAqB,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;IAChD,aAAa,CAAC,EAAE,CAAC,QAAQ,EAAE,OAAO,EAAE,KAAK,qBAAqB,EAAE,CAAC;IACjE,KAAK,CAAC,EAAE,OAAO,CAAC;CAChB,CAAC;AAEF,YAAY,EAAE,oBAAoB,EAAE,CAAC"} \ No newline at end of file diff --git a/dist/types/provider-message/GeminiProviderMessage.d.ts b/dist/types/provider-message/GeminiProviderMessage.d.ts new file mode 100644 index 0000000..5d1deb5 --- /dev/null +++ b/dist/types/provider-message/GeminiProviderMessage.d.ts @@ -0,0 +1,9 @@ +/** + * Message format for Google Gemini. + */ +type GeminiProviderMessage = { + role: 'user' | 'model'; + content: string; +}; +export type { GeminiProviderMessage }; +//# sourceMappingURL=GeminiProviderMessage.d.ts.map \ No newline at end of file diff --git a/dist/types/provider-message/GeminiProviderMessage.d.ts.map b/dist/types/provider-message/GeminiProviderMessage.d.ts.map new file mode 100644 index 0000000..1e92727 --- /dev/null +++ b/dist/types/provider-message/GeminiProviderMessage.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"GeminiProviderMessage.d.ts","sourceRoot":"","sources":["../../../src/types/provider-message/GeminiProviderMessage.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,KAAK,qBAAqB,GAAG;IAC5B,IAAI,EAAE,MAAM,GAAG,OAAO,CAAC;IACvB,OAAO,EAAE,MAAM,CAAC;CAChB,CAAC;AAEF,YAAY,EAAE,qBAAqB,EAAE,CAAC"} \ No newline at end of file diff --git a/dist/types/provider-message/OllamaProviderMessage.d.ts b/dist/types/provider-message/OllamaProviderMessage.d.ts new file mode 100644 index 0000000..8e0b286 --- /dev/null +++ b/dist/types/provider-message/OllamaProviderMessage.d.ts @@ -0,0 +1,9 @@ +/** + * Message format for OpenAI. + */ +type OllamaProviderMessage = { + role: 'user' | 'assistant' | 'system'; + content: string; +}; +export type { OllamaProviderMessage }; +//# sourceMappingURL=OllamaProviderMessage.d.ts.map \ No newline at end of file diff --git a/dist/types/provider-message/OllamaProviderMessage.d.ts.map b/dist/types/provider-message/OllamaProviderMessage.d.ts.map new file mode 100644 index 0000000..159ad2f --- /dev/null +++ b/dist/types/provider-message/OllamaProviderMessage.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"OllamaProviderMessage.d.ts","sourceRoot":"","sources":["../../../src/types/provider-message/OllamaProviderMessage.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,KAAK,qBAAqB,GAAG;IAC5B,IAAI,EAAE,MAAM,GAAG,WAAW,GAAG,QAAQ,CAAC;IACtC,OAAO,EAAE,MAAM,CAAC;CAChB,CAAC;AAEF,YAAY,EAAE,qBAAqB,EAAE,CAAC"} \ No newline at end of file diff --git a/dist/types/provider-message/OpenaiProviderMessage.d.ts b/dist/types/provider-message/OpenaiProviderMessage.d.ts new file mode 100644 index 0000000..00c9a95 --- /dev/null +++ b/dist/types/provider-message/OpenaiProviderMessage.d.ts @@ -0,0 +1,9 @@ +/** + * Message format for OpenAI. + */ +type OpenaiProviderMessage = { + role: 'user' | 'assistant' | 'system'; + content: string; +}; +export type { OpenaiProviderMessage }; +//# sourceMappingURL=OpenaiProviderMessage.d.ts.map \ No newline at end of file diff --git a/dist/types/provider-message/OpenaiProviderMessage.d.ts.map b/dist/types/provider-message/OpenaiProviderMessage.d.ts.map new file mode 100644 index 0000000..76ba5df --- /dev/null +++ b/dist/types/provider-message/OpenaiProviderMessage.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"OpenaiProviderMessage.d.ts","sourceRoot":"","sources":["../../../src/types/provider-message/OpenaiProviderMessage.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,KAAK,qBAAqB,GAAG;IAC5B,IAAI,EAAE,MAAM,GAAG,WAAW,GAAG,QAAQ,CAAC;IACtC,OAAO,EAAE,MAAM,CAAC;CAChB,CAAC;AAEF,YAAY,EAAE,qBAAqB,EAAE,CAAC"} \ No newline at end of file diff --git a/dist/types/provider-message/WebLlmProviderMessage.d.ts b/dist/types/provider-message/WebLlmProviderMessage.d.ts new file mode 100644 index 0000000..4ab2d28 --- /dev/null +++ b/dist/types/provider-message/WebLlmProviderMessage.d.ts @@ -0,0 +1,9 @@ +/** + * Message format for web-llm. + */ +type WebLlmProviderMessage = { + role: 'user' | 'assistant' | 'system'; + content: string; +}; +export type { WebLlmProviderMessage }; +//# sourceMappingURL=WebLlmProviderMessage.d.ts.map \ No newline at end of file diff --git a/dist/types/provider-message/WebLlmProviderMessage.d.ts.map b/dist/types/provider-message/WebLlmProviderMessage.d.ts.map new file mode 100644 index 0000000..1265039 --- /dev/null +++ b/dist/types/provider-message/WebLlmProviderMessage.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"WebLlmProviderMessage.d.ts","sourceRoot":"","sources":["../../../src/types/provider-message/WebLlmProviderMessage.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,KAAK,qBAAqB,GAAG;IAC5B,IAAI,EAAE,MAAM,GAAG,WAAW,GAAG,QAAQ,CAAC;IACtC,OAAO,EAAE,MAAM,CAAC;CAChB,CAAC;AAEF,YAAY,EAAE,qBAAqB,EAAE,CAAC"} \ No newline at end of file diff --git a/dist/utils/promptHandler.d.ts b/dist/utils/promptHandler.d.ts new file mode 100644 index 0000000..01d00ba --- /dev/null +++ b/dist/utils/promptHandler.d.ts @@ -0,0 +1,36 @@ +import { Provider } from '../types/Provider'; +import { Message } from 'react-chatbotify'; +/** + * Processes the prompt using the provided model connector. + * + * @param messages messages to send to the LLM + * @param refs object containing relevant refs + * @param actions object containing relevant actions + * @param opts optional AbortSignal + */ +declare const handlePrompt: (messages: Message[], refs: { + providerRef: React.MutableRefObject; + messagesRef: React.MutableRefObject; + outputTypeRef: React.MutableRefObject<"character" | "chunk" | "full">; + outputSpeedRef: React.MutableRefObject; + historySizeRef: React.MutableRefObject; + initialMessageRef: React.MutableRefObject; + errorMessageRef: React.MutableRefObject; + onUserMessageRef: React.MutableRefObject<((msg: Message) => Promise) | null>; + onKeyDownRef: React.MutableRefObject<((e: KeyboardEvent) => Promise) | null>; +}, actions: { + speakAudio: (text: string) => void; + injectMessage: (content: string | JSX.Element, sender?: string) => Promise; + simulateStreamMessage: (content: string, sender?: string) => Promise; + streamMessage: (msg: string) => void; + endStreamMessage: () => void; + toggleTextAreaDisabled: (active?: boolean) => void; + toggleIsBotTyping: (active?: boolean) => void; + focusTextArea: () => void; + goToPath: (path: string) => void; + getIsChatBotVisible: () => boolean; +}, opts?: { + signal?: AbortSignal; +}) => Promise; +export { handlePrompt }; +//# sourceMappingURL=promptHandler.d.ts.map \ No newline at end of file diff --git a/dist/utils/promptHandler.d.ts.map b/dist/utils/promptHandler.d.ts.map new file mode 100644 index 0000000..ff6e848 --- /dev/null +++ b/dist/utils/promptHandler.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"promptHandler.d.ts","sourceRoot":"","sources":["../../src/utils/promptHandler.tsx"],"names":[],"mappings":"AACA,OAAO,EAAE,QAAQ,EAAE,MAAM,mBAAmB,CAAC;AAC7C,OAAO,EAAE,OAAO,EAAE,MAAM,kBAAkB,CAAC;AAe3C;;;;;;;GAOG;AACH,QAAA,MAAM,YAAY,GACjB,UAAU,OAAO,EAAE,EACnB,MAAM;IACL,WAAW,EAAE,KAAK,CAAC,gBAAgB,CAAC,QAAQ,GAAG,IAAI,CAAC,CAAC;IACrD,WAAW,EAAE,KAAK,CAAC,gBAAgB,CAAC,OAAO,EAAE,CAAC,CAAC;IAC/C,aAAa,EAAE,KAAK,CAAC,gBAAgB,CAAC,WAAW,GAAG,OAAO,GAAG,MAAM,CAAC,CAAC;IACtE,cAAc,EAAE,KAAK,CAAC,gBAAgB,CAAC,MAAM,CAAC,CAAC;IAC/C,cAAc,EAAE,KAAK,CAAC,gBAAgB,CAAC,MAAM,CAAC,CAAC;IAC/C,iBAAiB,EAAE,KAAK,CAAC,gBAAgB,CAAC,MAAM,CAAC,CAAC;IAClD,eAAe,EAAE,KAAK,CAAC,gBAAgB,CAAC,MAAM,CAAC,CAAC;IAChD,gBAAgB,EAAE,KAAK,CAAC,gBAAgB,CAAC,CAAC,CAAC,GAAG,EAAE,OAAO,KAAK,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC;IAC5F,YAAY,EAAE,KAAK,CAAC,gBAAgB,CAAC,CAAC,CAAC,CAAC,EAAE,aAAa,KAAK,OAAO,CAAC,MAAM,GAAG,IAAI,CAAC,CAAC,GAAG,IAAI,CAAC,CAAC;CAC5F,EACD,SAAS;IACR,UAAU,EAAE,CAAC,IAAI,EAAE,MAAM,KAAK,IAAI,CAAC;IACnC,aAAa,EAAE,CAAC,OAAO,EAAE,MAAM,GAAG,GAAG,CAAC,OAAO,EAAE,MAAM,CAAC,EAAE,MAAM,KAAK,OAAO,CAAC,OAAO,GAAG,IAAI,CAAC,CAAC;IAC3F,qBAAqB,EAAE,CAAC,OAAO,EAAE,MAAM,EAAE,MAAM,CAAC,EAAE,MAAM,KAAK,OAAO,CAAC,OAAO,GAAG,IAAI,CAAC,CAAC;IACrF,aAAa,EAAE,CAAC,GAAG,EAAE,MAAM,KAAK,IAAI,CAAC;IACrC,gBAAgB,EAAE,MAAM,IAAI,CAAC;IAC7B,sBAAsB,EAAE,CAAC,MAAM,CAAC,EAAE,OAAO,KAAK,IAAI,CAAC;IACnD,iBAAiB,EAAE,CAAC,MAAM,CAAC,EAAE,OAAO,KAAK,IAAI,CAAC;IAC9C,aAAa,EAAE,MAAM,IAAI,CAAC;IAC1B,QAAQ,EAAE,CAAC,IAAI,EAAE,MAAM,KAAK,IAAI,CAAC;IACjC,mBAAmB,EAAE,MAAM,OAAO,CAAC;CACnC,EACD,OAAM;IAAE,MAAM,CAAC,EAAE,WAAW,CAAA;CAAO,KACjC,OAAO,CAAC,IAAI,CA+Dd,CAAC;AAEF,OAAO,EAAE,YAAY,EAAE,CAAC"} \ No newline at end of file diff --git a/dist/utils/streamController.d.ts b/dist/utils/streamController.d.ts new file mode 100644 index 0000000..449f42e --- /dev/null +++ b/dist/utils/streamController.d.ts @@ -0,0 +1,10 @@ +/** + * Formats a raw stream according to the specified mode. + * + * @param stream raw async iterable stream of strings. + * @param outputType 'character' for per-character output, 'chunk' for as-is. + * @param outputSpeed speed in milliseconds to stream response + */ +declare const formatStream: (stream: AsyncGenerator, outputType: "chunk" | "character" | "full", outputSpeed: number) => AsyncGenerator; +export { formatStream }; +//# sourceMappingURL=streamController.d.ts.map \ No newline at end of file diff --git a/dist/utils/streamController.d.ts.map b/dist/utils/streamController.d.ts.map new file mode 100644 index 0000000..6463739 --- /dev/null +++ b/dist/utils/streamController.d.ts.map @@ -0,0 +1 @@ +{"version":3,"file":"streamController.d.ts","sourceRoot":"","sources":["../../src/utils/streamController.ts"],"names":[],"mappings":"AAgCA;;;;;;GAMG;AACH,QAAA,MAAM,YAAY,GACjB,QAAQ,cAAc,CAAC,MAAM,CAAC,EAC9B,YAAY,OAAO,GAAG,WAAW,GAAG,MAAM,EAC1C,aAAa,MAAM,KACjB,cAAc,CAAC,MAAM,CAMvB,CAAC;AAEF,OAAO,EAAE,YAAY,EAAE,CAAC"} \ No newline at end of file diff --git a/src/App.tsx b/src/App.tsx index 14f2641..6059188 100644 --- a/src/App.tsx +++ b/src/App.tsx @@ -5,6 +5,7 @@ import { LlmConnectorBlock } from './types/LlmConnectorBlock'; import GeminiProvider from './providers/GeminiProvider'; import OpenaiProvider from './providers/OpenaiProvider'; import WebLlmProvider from './providers/WebLlmProvider'; +import OllamaProvider from './providers/OllamaProvider'; // fill in your api keys below if you wish to explore/develop const geminiApiKey = ''; @@ -40,7 +41,7 @@ const App = () => { } return 'Pick another model to try!'; }, - options: ['WebLlm', 'Gemini', 'OpenAI'], + options: ['WebLlm', 'Gemini', 'OpenAI', 'Ollama'], chatDisabled: true, path: async (params: Params) => { // if browser model chosen, give a gentle warning about performance @@ -110,6 +111,22 @@ const App = () => { }, }, } as LlmConnectorBlock, + ollama: { + llmConnector: { + provider: new OllamaProvider({ + baseUrl: 'http://localhost:11434/api/chat', + mode: 'direct', + model: 'robot', + apiKey: '', + debug: true, + }), + outputType: 'character', + stopConditions: { + onUserMessage: onUserMessageCheck, + onKeyDown: onKeyDownCheck, + }, + }, + } as LlmConnectorBlock, }; return ; diff --git a/src/index.tsx b/src/index.tsx index 83fb37d..6659aa7 100644 --- a/src/index.tsx +++ b/src/index.tsx @@ -5,6 +5,7 @@ import LlmConnector from './factory/RcbPluginFactory'; import GeminiProvider from './providers/GeminiProvider'; import OpenaiProvider from './providers/OpenaiProvider'; import WebLlmProvider from './providers/WebLlmProvider'; +import OllamaProvider from './providers/OllamaProvider'; // type imports import { LlmConnectorBlock } from './types/LlmConnectorBlock'; @@ -12,7 +13,7 @@ import { PluginConfig } from './types/PluginConfig'; import { Provider } from './types/Provider'; // default provider exports -export { GeminiProvider, OpenaiProvider, WebLlmProvider }; +export { GeminiProvider, OpenaiProvider, WebLlmProvider, OllamaProvider }; // type exports export type { LlmConnectorBlock, PluginConfig, Provider }; diff --git a/src/providers/OllamaProvider.ts b/src/providers/OllamaProvider.ts new file mode 100644 index 0000000..cfb4ff9 --- /dev/null +++ b/src/providers/OllamaProvider.ts @@ -0,0 +1,190 @@ +import { Provider } from '../types/Provider'; +import { Message } from 'react-chatbotify'; +import { OpenaiProviderConfig } from '../types/provider-config/OpenaiProviderConfig'; +import { OllamaProviderMessage } from '../types/provider-message/OllamaProviderMessage'; + +/** + * Provider for Ollama’s API, supports both direct and proxy modes. + */ +class OllamaProvider implements Provider { + private method!: string; + private endpoint!: string; + private headers!: Record; + private body!: Record; + private systemMessage?: string; + private responseFormat!: 'stream' | 'json'; + private messageParser?: (messages: Message[]) => OllamaProviderMessage[]; + private debug: boolean = false; + + /** + * Sets default values for the provider based on given configuration. Configuration guide here: + * https://github.com/React-ChatBotify-Plugins/llm-connector/blob/main/docs/providers/OpenAI.md + * + * @param config configuration for setup + */ + public constructor(config: OpenaiProviderConfig) { + this.method = config.method ?? 'POST'; + this.endpoint = config.baseUrl ?? 'http://localhost:11434/api/chat'; + this.systemMessage = config.systemMessage; + this.responseFormat = config.responseFormat ?? 'stream'; + this.messageParser = config.messageParser; + this.debug = config.debug ?? false; + this.headers = { + 'Content-Type': 'application/json', + Accept: this.responseFormat === 'stream' ? 'text/event-stream' : 'application/json', + ...config.headers, + }; + this.body = { + model: config.model, + stream: this.responseFormat === 'stream', + ...config.body, + }; + + if (config.mode === 'direct') { + this.headers = { ...this.headers, Authorization: `Bearer ${config.apiKey}` }; + return; + } + + if (config.mode !== 'proxy') { + throw Error("Invalid mode specified for Ollama provider ('direct' or 'proxy')."); + } + } + + /** + * Calls Ollama and yields each chunk (or the full text). + * + * @param messages messages to include in the request + */ + public async *sendMessages(messages: Message[]): AsyncGenerator { + if (this.debug) { + const sanitizedHeaders = { ...this.headers }; + delete sanitizedHeaders['Authorization']; + console.log('[OllamaProvider] Request:', { + method: this.method, + endpoint: this.endpoint, + headers: sanitizedHeaders, + body: this.constructBodyWithMessages(messages), + }); + } + const res = await fetch(this.endpoint, { + method: this.method, + headers: this.headers as HeadersInit, + body: JSON.stringify(this.constructBodyWithMessages(messages)), + }); + + if (this.debug) { + console.log('[OllamaProvider] Response status:', res.status); + } + + if (!res.ok) { + throw new Error(`Ollama API error ${res.status}: ${await res.text()}`); + } + + if (this.responseFormat === 'stream') { + if (!res.body) { + throw new Error('Response body is empty – cannot stream'); + } + const reader = res.body.getReader(); + for await (const chunk of this.handleStreamResponse(reader)) { + yield chunk; + } + } else { + const payload = await res.json(); + if (this.debug) { + console.log('[OllamaProvider] Response body:', payload); + } + const text = payload.choices?.[0]?.message?.content; + if (typeof text === 'string') { + yield text; + } else { + throw new Error('Unexpected response shape – no text candidate'); + } + } + } + + /** + * Maps the chatbot message sender to the provider message sender. + * + * @param sender sender from the chatbot + */ + private roleMap = (sender: string): 'system' | 'user' | 'assistant' => { + switch (sender) { + case 'USER': + return 'user'; + case 'SYSTEM': + return 'system'; + default: + return 'assistant'; + } + }; + + /** + * Builds the full request body. + * + * @param messages messages to parse + */ + private constructBodyWithMessages = (messages: Message[]) => { + let parsedMessages; + if (this.messageParser) { + parsedMessages = this.messageParser(messages); + } else { + const filteredMessages = messages.filter( + (message) => typeof message.content === 'string' && message.sender.toUpperCase() !== 'SYSTEM' + ); + parsedMessages = filteredMessages.map((message) => { + const role = this.roleMap(message.sender.toUpperCase()); + const text = message.content; + return { + role, + content: text, + }; + }); + } + + // append system message if specified + if (this.systemMessage) { + parsedMessages = [{ role: 'system', content: this.systemMessage }, ...parsedMessages]; + } + + // Only include model and messages for Ollama + return { + model: this.body.model, + messages: parsedMessages, + }; + }; + + /** + * Consumes an SSE/text stream Response and yield each text chunk. + * + * @reader request body reader + */ + private handleStreamResponse = async function* ( + reader: ReadableStreamDefaultReader> + ): AsyncGenerator { + const decoder = new TextDecoder('utf-8'); + let buffer = ''; + + while (true) { + const { value, done } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const parts = buffer.split(/\r?\n/); + buffer = parts.pop()!; + + for (const line of parts) { + try { + const event = JSON.parse(line); + if (event.done === true) return; + if (event.message && typeof event.message.content === 'string') { + yield event.message.content; + } + } catch (err) { + console.error('Stream parse error', err); + } + } + } + }; +} + +export default OllamaProvider; diff --git a/src/types/provider-config/OllamaProviderConfig.ts b/src/types/provider-config/OllamaProviderConfig.ts new file mode 100644 index 0000000..7558194 --- /dev/null +++ b/src/types/provider-config/OllamaProviderConfig.ts @@ -0,0 +1,43 @@ +import { Message } from 'react-chatbotify'; +import { OpenaiProviderMessage } from '../provider-message/OpenaiProviderMessage'; +import { OllamaProviderMessage } from '../provider-message/OllamaProviderMessage'; + +/** + * Configurations for OllamaProvider in direct mode. + */ +type DirectConfig = { + mode: 'direct'; + model: string; + apiKey: string; + systemMessage?: string; + responseFormat?: 'stream' | 'json'; + baseUrl?: string; + method?: string; + headers?: Record; + body?: Record; + messageParser?: (messages: Message[]) => OllamaProviderMessage[]; + debug?: boolean; +}; + +/** + * Configurations for OllamaProvider in proxy mode. + */ +type ProxyConfig = { + mode: 'proxy'; + model: string; + baseUrl: string; + systemMessage?: string; + responseFormat?: 'stream' | 'json'; + method?: string; + headers?: Record; + body?: Record; + messageParser?: (messages: Message[]) => OllamaProviderMessage[]; + debug?: boolean; +}; + +/** + * Combined openai provider configurations. + */ +type OllamaProviderConfig = DirectConfig | ProxyConfig; + +export type { OllamaProviderConfig }; diff --git a/src/types/provider-message/OllamaProviderMessage.ts b/src/types/provider-message/OllamaProviderMessage.ts new file mode 100644 index 0000000..bb5d1b1 --- /dev/null +++ b/src/types/provider-message/OllamaProviderMessage.ts @@ -0,0 +1,9 @@ +/** + * Message format for OpenAI. + */ +type OllamaProviderMessage = { + role: 'user' | 'assistant' | 'system'; + content: string; +}; + +export type { OllamaProviderMessage };