Skip to content

chore: updated ai libs #66

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Apr 18, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 76 additions & 0 deletions actions/ai/generate-completion.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
'use server';

import { ResponseCreateParamsBase } from 'openai/resources/responses/responses.mjs';

import { isOwner } from '@/lib/owner';
import { AIProvider } from '@/server/ai-provider';

import { getCurrentUser } from '../auth/get-current-user';
import { getAppConfig } from '../configs/get-app-config';

type GenerateCompletion = Omit<ResponseCreateParamsBase, 'model'> & {
model?: string;
};

export const generateCompletion = async ({
input,
instructions,
model,
stream = false,
}: GenerateCompletion) => {
const user = await getCurrentUser();
const config = await getAppConfig();

const aiModel = model ?? config?.ai?.['text-models']?.[0].value ?? '';
const provider = AIProvider(config?.ai?.provider);

const TEXT_MODELS = config?.ai?.['text-models'] ?? [];
const models = (isOwner(user?.userId) ? TEXT_MODELS : TEXT_MODELS.slice(0, 2)).map(
({ value }) => value,
);

if (!models.includes(aiModel)) {
return { completion: null, model: aiModel };
}

const completion = await provider.responses.create({
input,
instructions,
model: aiModel,
stream,
});

if (stream) {
const encoder = new TextEncoder();
const stream_response = new TransformStream();

(async () => {
const writer = stream_response.writable.getWriter();

try {
for await (const event of completion as any) {
if (event.type === 'response.output_text.delta') {
const data = {
item_id: event.item_id,
output_index: event.output_index,
content_index: event.content_index,
delta: event.delta,
};
await writer.write(encoder.encode(`data: ${JSON.stringify(data)}\n\n`));
}
}
} catch (error) {
console.error('Stream processing error:', error);
await writer.write(
encoder.encode(`data: ${JSON.stringify({ error: 'Stream processing error' })}\n\n`),
);
} finally {
await writer.close();
}
})();

return { completion: stream_response.readable, model: aiModel };
}

return { completion, model: aiModel };
};
42 changes: 42 additions & 0 deletions actions/ai/generate-image.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
'use server';

import { ImageGenerateParams } from 'openai/resources/images.mjs';

import { AIProvider } from '@/server/ai-provider';

import { getAppConfig } from '../configs/get-app-config';

type GenerateImage = Omit<ImageGenerateParams, 'model'> & {
model?: string;
};

export const generateImage = async ({ model, prompt }: GenerateImage) => {
const config = await getAppConfig();

const aiModel = model ?? config?.ai?.['image-models']?.[0].value ?? '';
const provider = AIProvider(config?.ai?.provider);

const IMAGE_MODELS = config?.ai?.['image-models'] ?? [];
const models = IMAGE_MODELS.map(({ value }) => value);

if (!models.includes(aiModel)) {
return {
image: null,
model: aiModel,
};
}

const response = await provider.images.generate({
model: aiModel,
n: 1,
prompt,
quality: 'hd',
response_format: 'b64_json',
size: '1024x1024',
});

return {
image: response,
model: null,
};
};
28 changes: 9 additions & 19 deletions actions/auth/get-login-quote.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,56 +6,46 @@ import { ChatCompletionUserMessageParam } from 'openai/resources/index.mjs';
import { ChatCompletionRole } from '@/constants/ai';
import { TEN_MINUTE_SEC } from '@/constants/common';
import { fetchCachedData } from '@/lib/cache';
import { AIProvider } from '@/server/ai-provider';

import { getAppConfig } from '../configs/get-app-config';
import { generateCompletion } from '../ai/generate-completion';

export const getLoginQuote = async () => {
const locale = await getLocale();
const config = await getAppConfig();

const provider = AIProvider(config?.ai?.provider as string);

const DEFAULT_MODEL = config?.ai?.['text-models']?.[0].value;

try {
const response = await fetchCachedData(
`login-quote-[${locale}]`,
async () => {
const response = await provider.chat.completions.create({
messages: [
{
role: 'system',
content:
'You are a machine that only returns JSON object format without unnecessary symbols.',
},
const response = await generateCompletion({
instructions:
'You are a machine that only returns JSON object format without unnecessary symbols.',
input: [
{
content: `Generate a quote from a famous philosopher. Language code is ${locale}. Write it down in JSON format - {"quote": "Quote", "author": "Quote the author"}`,
role: ChatCompletionRole.USER as unknown as ChatCompletionUserMessageParam['role'],
},
],
model: DEFAULT_MODEL,
temperature: 0.8,
});

return response;
},
TEN_MINUTE_SEC,
);

const generatedQuote = JSON.parse(response.choices[0].message.content || '{}');
const generatedQuote = JSON.parse(response.completion.output_text ?? '{}');
const model = response.model ?? '';

return {
author: generatedQuote?.author ?? '',
model: DEFAULT_MODEL,
model,
quote: generatedQuote?.quote ?? '',
};
} catch (error) {
console.error('[GET_LOGIN_CITE_ACTION]', error);

return {
author: '',
model: DEFAULT_MODEL,
model: '',
quote: '',
};
}
Expand Down
23 changes: 6 additions & 17 deletions actions/chat/get-chat-initial.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,44 +6,33 @@ import { ChatCompletionUserMessageParam } from 'openai/resources/index.mjs';
import { ChatCompletionRole } from '@/constants/ai';
import { ONE_DAY_SEC } from '@/constants/common';
import { fetchCachedData } from '@/lib/cache';
import { AIProvider } from '@/server/ai-provider';

import { getAppConfig } from '../configs/get-app-config';
import { generateCompletion } from '../ai/generate-completion';

export const getChatInitial = async () => {
const locale = await getLocale();
const config = await getAppConfig();

const provider = AIProvider(config?.ai?.provider as string);

const DEFAULT_MODEL = config?.ai?.['text-models']?.[0].value;

try {
const introMessages = await fetchCachedData(
`chat-initial-[${locale}]`,
async () => {
const response = await provider.chat.completions.create({
messages: [
{
role: 'system',
content: 'You are a machine that only returns array format.',
},
const response = await generateCompletion({
instructions: 'You are a machine that only returns array format.',
input: [
{
content: `Generate 4 questions ranging from 120 to 150 characters long for an intelligent chat on the topic of programming. Language code is ${locale}. Write the result to an array.`,
role: ChatCompletionRole.USER as unknown as ChatCompletionUserMessageParam['role'],
},
],
model: DEFAULT_MODEL,
temperature: 0.8,
});

return response;
return response.completion;
},
ONE_DAY_SEC,
);

return {
introMessages: JSON.parse(introMessages.choices[0].message.content || '[]'),
introMessages: JSON.parse(introMessages.output_text ?? '[]'),
};
} catch (error) {
console.error('[GET_CHAT_INITIAL_ACTION]', error);
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
'use client';

import { ImageIcon, Paperclip, SendHorizonal, StopCircle } from 'lucide-react';
import { ImageIcon, SendHorizonal, StopCircle } from 'lucide-react';
import { useTranslations } from 'next-intl';

import { Badge, Button, Separator } from '@/components/ui';
Expand Down Expand Up @@ -54,9 +54,9 @@ export const ChatInputFooter = ({
)}
/>
</button>
<button type="button" disabled={isSubmitting}>
{/* <button type="button" disabled={isSubmitting}>
<Paperclip className="w-4 h-4 text-muted-foreground" />
</button>
</button> */}
<Separator orientation="vertical" className="mr-4 ml-2 h-6" />
<Button
className={cn(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,13 +138,14 @@ export const Chat = ({ conversations = [], initialData, isEmbed, isShared }: Cha

const completionStream = await fetcher.post('/api/ai/completions', {
body: {
messages: [...messages, ...(options?.regenerate ? [] : messagesForApi)].map(
input: [...messages, ...(options?.regenerate ? [] : messagesForApi)].map(
({ content, role }) => ({
content,
role,
}),
),
model: currentModel,
stream: true,
},
cache: 'no-cache',
headers: {
Expand All @@ -170,9 +171,16 @@ export const Chat = ({ conversations = [], initialData, isEmbed, isShared }: Cha
}

const chunk = decoder.decode(value);
streamAssistMessage += chunk;
const lines = chunk.split('\n').filter((line) => line.trim());

setAssistantMessage((prev) => prev + chunk);
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = JSON.parse(line.slice(6));

streamAssistMessage += data.delta;
setAssistantMessage((prev) => prev + data.delta);
}
}
}
}
} catch (error: any) {
Expand Down
41 changes: 19 additions & 22 deletions app/api/ai/completions/route.ts
Original file line number Diff line number Diff line change
@@ -1,50 +1,47 @@
import { OpenAIStream, StreamingTextResponse } from 'ai';
import { ReasonPhrases, StatusCodes } from 'http-status-codes';
import { NextRequest, NextResponse } from 'next/server';

import { generateCompletion } from '@/actions/ai/generate-completion';
import { getCurrentUser } from '@/actions/auth/get-current-user';
import { getAppConfig } from '@/actions/configs/get-app-config';
import { isOwner } from '@/lib/owner';
import { AIProvider } from '@/server/ai-provider';

export const maxDuration = 60;

export const POST = async (req: NextRequest) => {
const user = await getCurrentUser();
const config = await getAppConfig();

const provider = AIProvider(config?.ai?.provider as string);

try {
const { messages, model, system } = await req.json();
const { input, instructions, model, stream } = await req.json();

if (!user) {
return new NextResponse(ReasonPhrases.UNAUTHORIZED, { status: StatusCodes.UNAUTHORIZED });
}

const TEXT_MODELS = config?.ai?.['text-models'] ?? [];
const models = (isOwner(user?.userId) ? TEXT_MODELS : TEXT_MODELS.slice(0, 2)).map(
({ value }) => value,
);
const response = await generateCompletion({
input,
instructions,
model,
stream,
});

if (!models.includes(model)) {
if (!response.completion) {
console.error('[OPEN_AI_FORBIDDEN_MODEL]', user);

return new NextResponse(ReasonPhrases.FORBIDDEN, {
status: StatusCodes.FORBIDDEN,
});
}

const completion = await provider.chat.completions.create({
messages: [...(system ? [system] : []), ...messages],
model,
top_p: 0.5,
stream: true,
});

const stream = OpenAIStream(completion);
if (stream) {
return new NextResponse(response.completion as any, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
Connection: 'keep-alive',
},
});
}

return new StreamingTextResponse(stream);
return NextResponse.json({ completion: response.completion });
} catch (error) {
console.error('[OPEN_AI_COMPLETIONS]', error);

Expand Down
Loading