Skip to content

Commit 313a0b3

Browse files
chore: updated ai libs (#66)
* updated ai libs * updated deps * code refactor * sonar
1 parent e7b606f commit 313a0b3

File tree

11 files changed

+252
-306
lines changed

11 files changed

+252
-306
lines changed

actions/ai/generate-completion.ts

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
'use server';
2+
3+
import { ResponseCreateParamsBase } from 'openai/resources/responses/responses.mjs';
4+
5+
import { isOwner } from '@/lib/owner';
6+
import { AIProvider } from '@/server/ai-provider';
7+
8+
import { getCurrentUser } from '../auth/get-current-user';
9+
import { getAppConfig } from '../configs/get-app-config';
10+
11+
type GenerateCompletion = Omit<ResponseCreateParamsBase, 'model'> & {
12+
model?: string;
13+
};
14+
15+
export const generateCompletion = async ({
16+
input,
17+
instructions,
18+
model,
19+
stream = false,
20+
}: GenerateCompletion) => {
21+
const user = await getCurrentUser();
22+
const config = await getAppConfig();
23+
24+
const aiModel = model ?? config?.ai?.['text-models']?.[0].value ?? '';
25+
const provider = AIProvider(config?.ai?.provider);
26+
27+
const TEXT_MODELS = config?.ai?.['text-models'] ?? [];
28+
const models = (isOwner(user?.userId) ? TEXT_MODELS : TEXT_MODELS.slice(0, 2)).map(
29+
({ value }) => value,
30+
);
31+
32+
if (!models.includes(aiModel)) {
33+
return { completion: null, model: aiModel };
34+
}
35+
36+
const completion = await provider.responses.create({
37+
input,
38+
instructions,
39+
model: aiModel,
40+
stream,
41+
});
42+
43+
if (stream) {
44+
const encoder = new TextEncoder();
45+
const stream_response = new TransformStream();
46+
47+
(async () => {
48+
const writer = stream_response.writable.getWriter();
49+
50+
try {
51+
for await (const event of completion as any) {
52+
if (event.type === 'response.output_text.delta') {
53+
const data = {
54+
item_id: event.item_id,
55+
output_index: event.output_index,
56+
content_index: event.content_index,
57+
delta: event.delta,
58+
};
59+
await writer.write(encoder.encode(`data: ${JSON.stringify(data)}\n\n`));
60+
}
61+
}
62+
} catch (error) {
63+
console.error('Stream processing error:', error);
64+
await writer.write(
65+
encoder.encode(`data: ${JSON.stringify({ error: 'Stream processing error' })}\n\n`),
66+
);
67+
} finally {
68+
await writer.close();
69+
}
70+
})();
71+
72+
return { completion: stream_response.readable, model: aiModel };
73+
}
74+
75+
return { completion, model: aiModel };
76+
};

actions/ai/generate-image.ts

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
'use server';
2+
3+
import { ImageGenerateParams } from 'openai/resources/images.mjs';
4+
5+
import { AIProvider } from '@/server/ai-provider';
6+
7+
import { getAppConfig } from '../configs/get-app-config';
8+
9+
type GenerateImage = Omit<ImageGenerateParams, 'model'> & {
10+
model?: string;
11+
};
12+
13+
export const generateImage = async ({ model, prompt }: GenerateImage) => {
14+
const config = await getAppConfig();
15+
16+
const aiModel = model ?? config?.ai?.['image-models']?.[0].value ?? '';
17+
const provider = AIProvider(config?.ai?.provider);
18+
19+
const IMAGE_MODELS = config?.ai?.['image-models'] ?? [];
20+
const models = IMAGE_MODELS.map(({ value }) => value);
21+
22+
if (!models.includes(aiModel)) {
23+
return {
24+
image: null,
25+
model: aiModel,
26+
};
27+
}
28+
29+
const response = await provider.images.generate({
30+
model: aiModel,
31+
n: 1,
32+
prompt,
33+
quality: 'hd',
34+
response_format: 'b64_json',
35+
size: '1024x1024',
36+
});
37+
38+
return {
39+
image: response,
40+
model: null,
41+
};
42+
};

actions/auth/get-login-quote.ts

Lines changed: 9 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -6,56 +6,46 @@ import { ChatCompletionUserMessageParam } from 'openai/resources/index.mjs';
66
import { ChatCompletionRole } from '@/constants/ai';
77
import { TEN_MINUTE_SEC } from '@/constants/common';
88
import { fetchCachedData } from '@/lib/cache';
9-
import { AIProvider } from '@/server/ai-provider';
109

11-
import { getAppConfig } from '../configs/get-app-config';
10+
import { generateCompletion } from '../ai/generate-completion';
1211

1312
export const getLoginQuote = async () => {
1413
const locale = await getLocale();
15-
const config = await getAppConfig();
16-
17-
const provider = AIProvider(config?.ai?.provider as string);
18-
19-
const DEFAULT_MODEL = config?.ai?.['text-models']?.[0].value;
2014

2115
try {
2216
const response = await fetchCachedData(
2317
`login-quote-[${locale}]`,
2418
async () => {
25-
const response = await provider.chat.completions.create({
26-
messages: [
27-
{
28-
role: 'system',
29-
content:
30-
'You are a machine that only returns JSON object format without unnecessary symbols.',
31-
},
19+
const response = await generateCompletion({
20+
instructions:
21+
'You are a machine that only returns JSON object format without unnecessary symbols.',
22+
input: [
3223
{
3324
content: `Generate a quote from a famous philosopher. Language code is ${locale}. Write it down in JSON format - {"quote": "Quote", "author": "Quote the author"}`,
3425
role: ChatCompletionRole.USER as unknown as ChatCompletionUserMessageParam['role'],
3526
},
3627
],
37-
model: DEFAULT_MODEL,
38-
temperature: 0.8,
3928
});
4029

4130
return response;
4231
},
4332
TEN_MINUTE_SEC,
4433
);
4534

46-
const generatedQuote = JSON.parse(response.choices[0].message.content || '{}');
35+
const generatedQuote = JSON.parse(response.completion.output_text ?? '{}');
36+
const model = response.model ?? '';
4737

4838
return {
4939
author: generatedQuote?.author ?? '',
50-
model: DEFAULT_MODEL,
40+
model,
5141
quote: generatedQuote?.quote ?? '',
5242
};
5343
} catch (error) {
5444
console.error('[GET_LOGIN_CITE_ACTION]', error);
5545

5646
return {
5747
author: '',
58-
model: DEFAULT_MODEL,
48+
model: '',
5949
quote: '',
6050
};
6151
}

actions/chat/get-chat-initial.ts

Lines changed: 6 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -6,44 +6,33 @@ import { ChatCompletionUserMessageParam } from 'openai/resources/index.mjs';
66
import { ChatCompletionRole } from '@/constants/ai';
77
import { ONE_DAY_SEC } from '@/constants/common';
88
import { fetchCachedData } from '@/lib/cache';
9-
import { AIProvider } from '@/server/ai-provider';
109

11-
import { getAppConfig } from '../configs/get-app-config';
10+
import { generateCompletion } from '../ai/generate-completion';
1211

1312
export const getChatInitial = async () => {
1413
const locale = await getLocale();
15-
const config = await getAppConfig();
16-
17-
const provider = AIProvider(config?.ai?.provider as string);
18-
19-
const DEFAULT_MODEL = config?.ai?.['text-models']?.[0].value;
2014

2115
try {
2216
const introMessages = await fetchCachedData(
2317
`chat-initial-[${locale}]`,
2418
async () => {
25-
const response = await provider.chat.completions.create({
26-
messages: [
27-
{
28-
role: 'system',
29-
content: 'You are a machine that only returns array format.',
30-
},
19+
const response = await generateCompletion({
20+
instructions: 'You are a machine that only returns array format.',
21+
input: [
3122
{
3223
content: `Generate 4 questions ranging from 120 to 150 characters long for an intelligent chat on the topic of programming. Language code is ${locale}. Write the result to an array.`,
3324
role: ChatCompletionRole.USER as unknown as ChatCompletionUserMessageParam['role'],
3425
},
3526
],
36-
model: DEFAULT_MODEL,
37-
temperature: 0.8,
3827
});
3928

40-
return response;
29+
return response.completion;
4130
},
4231
ONE_DAY_SEC,
4332
);
4433

4534
return {
46-
introMessages: JSON.parse(introMessages.choices[0].message.content || '[]'),
35+
introMessages: JSON.parse(introMessages.output_text ?? '[]'),
4736
};
4837
} catch (error) {
4938
console.error('[GET_CHAT_INITIAL_ACTION]', error);

app/(chat)/(routes)/chat/[[...slug]]/_components/chat-main/chat-input-footer.tsx

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
'use client';
22

3-
import { ImageIcon, Paperclip, SendHorizonal, StopCircle } from 'lucide-react';
3+
import { ImageIcon, SendHorizonal, StopCircle } from 'lucide-react';
44
import { useTranslations } from 'next-intl';
55

66
import { Badge, Button, Separator } from '@/components/ui';
@@ -54,9 +54,9 @@ export const ChatInputFooter = ({
5454
)}
5555
/>
5656
</button>
57-
<button type="button" disabled={isSubmitting}>
57+
{/* <button type="button" disabled={isSubmitting}>
5858
<Paperclip className="w-4 h-4 text-muted-foreground" />
59-
</button>
59+
</button> */}
6060
<Separator orientation="vertical" className="mr-4 ml-2 h-6" />
6161
<Button
6262
className={cn(

app/(chat)/(routes)/chat/[[...slug]]/_components/chat-main/chat.tsx

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -138,13 +138,14 @@ export const Chat = ({ conversations = [], initialData, isEmbed, isShared }: Cha
138138

139139
const completionStream = await fetcher.post('/api/ai/completions', {
140140
body: {
141-
messages: [...messages, ...(options?.regenerate ? [] : messagesForApi)].map(
141+
input: [...messages, ...(options?.regenerate ? [] : messagesForApi)].map(
142142
({ content, role }) => ({
143143
content,
144144
role,
145145
}),
146146
),
147147
model: currentModel,
148+
stream: true,
148149
},
149150
cache: 'no-cache',
150151
headers: {
@@ -170,9 +171,16 @@ export const Chat = ({ conversations = [], initialData, isEmbed, isShared }: Cha
170171
}
171172

172173
const chunk = decoder.decode(value);
173-
streamAssistMessage += chunk;
174+
const lines = chunk.split('\n').filter((line) => line.trim());
174175

175-
setAssistantMessage((prev) => prev + chunk);
176+
for (const line of lines) {
177+
if (line.startsWith('data: ')) {
178+
const data = JSON.parse(line.slice(6));
179+
180+
streamAssistMessage += data.delta;
181+
setAssistantMessage((prev) => prev + data.delta);
182+
}
183+
}
176184
}
177185
}
178186
} catch (error: any) {

app/api/ai/completions/route.ts

Lines changed: 19 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,50 +1,47 @@
1-
import { OpenAIStream, StreamingTextResponse } from 'ai';
21
import { ReasonPhrases, StatusCodes } from 'http-status-codes';
32
import { NextRequest, NextResponse } from 'next/server';
43

4+
import { generateCompletion } from '@/actions/ai/generate-completion';
55
import { getCurrentUser } from '@/actions/auth/get-current-user';
6-
import { getAppConfig } from '@/actions/configs/get-app-config';
7-
import { isOwner } from '@/lib/owner';
8-
import { AIProvider } from '@/server/ai-provider';
96

107
export const maxDuration = 60;
118

129
export const POST = async (req: NextRequest) => {
1310
const user = await getCurrentUser();
14-
const config = await getAppConfig();
15-
16-
const provider = AIProvider(config?.ai?.provider as string);
1711

1812
try {
19-
const { messages, model, system } = await req.json();
13+
const { input, instructions, model, stream } = await req.json();
2014

2115
if (!user) {
2216
return new NextResponse(ReasonPhrases.UNAUTHORIZED, { status: StatusCodes.UNAUTHORIZED });
2317
}
2418

25-
const TEXT_MODELS = config?.ai?.['text-models'] ?? [];
26-
const models = (isOwner(user?.userId) ? TEXT_MODELS : TEXT_MODELS.slice(0, 2)).map(
27-
({ value }) => value,
28-
);
19+
const response = await generateCompletion({
20+
input,
21+
instructions,
22+
model,
23+
stream,
24+
});
2925

30-
if (!models.includes(model)) {
26+
if (!response.completion) {
3127
console.error('[OPEN_AI_FORBIDDEN_MODEL]', user);
3228

3329
return new NextResponse(ReasonPhrases.FORBIDDEN, {
3430
status: StatusCodes.FORBIDDEN,
3531
});
3632
}
3733

38-
const completion = await provider.chat.completions.create({
39-
messages: [...(system ? [system] : []), ...messages],
40-
model,
41-
top_p: 0.5,
42-
stream: true,
43-
});
44-
45-
const stream = OpenAIStream(completion);
34+
if (stream) {
35+
return new NextResponse(response.completion as any, {
36+
headers: {
37+
'Content-Type': 'text/event-stream',
38+
'Cache-Control': 'no-cache',
39+
Connection: 'keep-alive',
40+
},
41+
});
42+
}
4643

47-
return new StreamingTextResponse(stream);
44+
return NextResponse.json({ completion: response.completion });
4845
} catch (error) {
4946
console.error('[OPEN_AI_COMPLETIONS]', error);
5047

0 commit comments

Comments
 (0)