diff --git a/actions/ai/generate-completion.ts b/actions/ai/generate-completion.ts new file mode 100644 index 0000000..45ba015 --- /dev/null +++ b/actions/ai/generate-completion.ts @@ -0,0 +1,76 @@ +'use server'; + +import { ResponseCreateParamsBase } from 'openai/resources/responses/responses.mjs'; + +import { isOwner } from '@/lib/owner'; +import { AIProvider } from '@/server/ai-provider'; + +import { getCurrentUser } from '../auth/get-current-user'; +import { getAppConfig } from '../configs/get-app-config'; + +type GenerateCompletion = Omit & { + model?: string; +}; + +export const generateCompletion = async ({ + input, + instructions, + model, + stream = false, +}: GenerateCompletion) => { + const user = await getCurrentUser(); + const config = await getAppConfig(); + + const aiModel = model ?? config?.ai?.['text-models']?.[0].value ?? ''; + const provider = AIProvider(config?.ai?.provider); + + const TEXT_MODELS = config?.ai?.['text-models'] ?? []; + const models = (isOwner(user?.userId) ? TEXT_MODELS : TEXT_MODELS.slice(0, 2)).map( + ({ value }) => value, + ); + + if (!models.includes(aiModel)) { + return { completion: null, model: aiModel }; + } + + const completion = await provider.responses.create({ + input, + instructions, + model: aiModel, + stream, + }); + + if (stream) { + const encoder = new TextEncoder(); + const stream_response = new TransformStream(); + + (async () => { + const writer = stream_response.writable.getWriter(); + + try { + for await (const event of completion as any) { + if (event.type === 'response.output_text.delta') { + const data = { + item_id: event.item_id, + output_index: event.output_index, + content_index: event.content_index, + delta: event.delta, + }; + await writer.write(encoder.encode(`data: ${JSON.stringify(data)}\n\n`)); + } + } + } catch (error) { + console.error('Stream processing error:', error); + await writer.write( + encoder.encode(`data: ${JSON.stringify({ error: 'Stream processing error' })}\n\n`), + ); + } finally { + await writer.close(); + } + })(); + + return { completion: stream_response.readable, model: aiModel }; + } + + return { completion, model: aiModel }; +}; diff --git a/actions/ai/generate-image.ts b/actions/ai/generate-image.ts new file mode 100644 index 0000000..e2382c9 --- /dev/null +++ b/actions/ai/generate-image.ts @@ -0,0 +1,42 @@ +'use server'; + +import { ImageGenerateParams } from 'openai/resources/images.mjs'; + +import { AIProvider } from '@/server/ai-provider'; + +import { getAppConfig } from '../configs/get-app-config'; + +type GenerateImage = Omit & { + model?: string; +}; + +export const generateImage = async ({ model, prompt }: GenerateImage) => { + const config = await getAppConfig(); + + const aiModel = model ?? config?.ai?.['image-models']?.[0].value ?? ''; + const provider = AIProvider(config?.ai?.provider); + + const IMAGE_MODELS = config?.ai?.['image-models'] ?? []; + const models = IMAGE_MODELS.map(({ value }) => value); + + if (!models.includes(aiModel)) { + return { + image: null, + model: aiModel, + }; + } + + const response = await provider.images.generate({ + model: aiModel, + n: 1, + prompt, + quality: 'hd', + response_format: 'b64_json', + size: '1024x1024', + }); + + return { + image: response, + model: null, + }; +}; diff --git a/actions/auth/get-login-quote.ts b/actions/auth/get-login-quote.ts index f0fc745..f45f5e3 100644 --- a/actions/auth/get-login-quote.ts +++ b/actions/auth/get-login-quote.ts @@ -6,36 +6,25 @@ import { ChatCompletionUserMessageParam } from 'openai/resources/index.mjs'; import { ChatCompletionRole } from '@/constants/ai'; import { TEN_MINUTE_SEC } from '@/constants/common'; import { fetchCachedData } from '@/lib/cache'; -import { AIProvider } from '@/server/ai-provider'; -import { getAppConfig } from '../configs/get-app-config'; +import { generateCompletion } from '../ai/generate-completion'; export const getLoginQuote = async () => { const locale = await getLocale(); - const config = await getAppConfig(); - - const provider = AIProvider(config?.ai?.provider as string); - - const DEFAULT_MODEL = config?.ai?.['text-models']?.[0].value; try { const response = await fetchCachedData( `login-quote-[${locale}]`, async () => { - const response = await provider.chat.completions.create({ - messages: [ - { - role: 'system', - content: - 'You are a machine that only returns JSON object format without unnecessary symbols.', - }, + const response = await generateCompletion({ + instructions: + 'You are a machine that only returns JSON object format without unnecessary symbols.', + input: [ { content: `Generate a quote from a famous philosopher. Language code is ${locale}. Write it down in JSON format - {"quote": "Quote", "author": "Quote the author"}`, role: ChatCompletionRole.USER as unknown as ChatCompletionUserMessageParam['role'], }, ], - model: DEFAULT_MODEL, - temperature: 0.8, }); return response; @@ -43,11 +32,12 @@ export const getLoginQuote = async () => { TEN_MINUTE_SEC, ); - const generatedQuote = JSON.parse(response.choices[0].message.content || '{}'); + const generatedQuote = JSON.parse(response.completion.output_text ?? '{}'); + const model = response.model ?? ''; return { author: generatedQuote?.author ?? '', - model: DEFAULT_MODEL, + model, quote: generatedQuote?.quote ?? '', }; } catch (error) { @@ -55,7 +45,7 @@ export const getLoginQuote = async () => { return { author: '', - model: DEFAULT_MODEL, + model: '', quote: '', }; } diff --git a/actions/chat/get-chat-initial.ts b/actions/chat/get-chat-initial.ts index ad4f67d..497ac40 100644 --- a/actions/chat/get-chat-initial.ts +++ b/actions/chat/get-chat-initial.ts @@ -6,44 +6,33 @@ import { ChatCompletionUserMessageParam } from 'openai/resources/index.mjs'; import { ChatCompletionRole } from '@/constants/ai'; import { ONE_DAY_SEC } from '@/constants/common'; import { fetchCachedData } from '@/lib/cache'; -import { AIProvider } from '@/server/ai-provider'; -import { getAppConfig } from '../configs/get-app-config'; +import { generateCompletion } from '../ai/generate-completion'; export const getChatInitial = async () => { const locale = await getLocale(); - const config = await getAppConfig(); - - const provider = AIProvider(config?.ai?.provider as string); - - const DEFAULT_MODEL = config?.ai?.['text-models']?.[0].value; try { const introMessages = await fetchCachedData( `chat-initial-[${locale}]`, async () => { - const response = await provider.chat.completions.create({ - messages: [ - { - role: 'system', - content: 'You are a machine that only returns array format.', - }, + const response = await generateCompletion({ + instructions: 'You are a machine that only returns array format.', + input: [ { content: `Generate 4 questions ranging from 120 to 150 characters long for an intelligent chat on the topic of programming. Language code is ${locale}. Write the result to an array.`, role: ChatCompletionRole.USER as unknown as ChatCompletionUserMessageParam['role'], }, ], - model: DEFAULT_MODEL, - temperature: 0.8, }); - return response; + return response.completion; }, ONE_DAY_SEC, ); return { - introMessages: JSON.parse(introMessages.choices[0].message.content || '[]'), + introMessages: JSON.parse(introMessages.output_text ?? '[]'), }; } catch (error) { console.error('[GET_CHAT_INITIAL_ACTION]', error); diff --git a/app/(chat)/(routes)/chat/[[...slug]]/_components/chat-main/chat-input-footer.tsx b/app/(chat)/(routes)/chat/[[...slug]]/_components/chat-main/chat-input-footer.tsx index e3eef37..599b50e 100644 --- a/app/(chat)/(routes)/chat/[[...slug]]/_components/chat-main/chat-input-footer.tsx +++ b/app/(chat)/(routes)/chat/[[...slug]]/_components/chat-main/chat-input-footer.tsx @@ -1,6 +1,6 @@ 'use client'; -import { ImageIcon, Paperclip, SendHorizonal, StopCircle } from 'lucide-react'; +import { ImageIcon, SendHorizonal, StopCircle } from 'lucide-react'; import { useTranslations } from 'next-intl'; import { Badge, Button, Separator } from '@/components/ui'; @@ -54,9 +54,9 @@ export const ChatInputFooter = ({ )} /> - + */}