Skip to content

Commit 06b9e01

Browse files
chore: updated chat (#69)
* deepseek ai * code refactored * code refactored
1 parent a0ae6ae commit 06b9e01

File tree

13 files changed

+299
-223
lines changed

13 files changed

+299
-223
lines changed

.env.example

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -27,9 +27,9 @@ YANDEX_CLIENT_ID=
2727
YANDEX_CLIENT_SECRET=
2828

2929
# Copilot secrets
30-
DEEP_SEEK_TOKEN=
30+
DEEPSEEK_API_KEY=
3131
OLLAMA_BASE_URL=
32-
OPEN_AI_TOKEN=
32+
OPENAI_API_KEY=
3333

3434
# Databases
3535
POSTGRES_PRISMA_URL=

actions/ai/generate-completion.ts

Lines changed: 13 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,9 @@ import { ChatCompletionMessageParam } from 'openai/resources/index.mjs';
44
import { ResponseCreateParamsBase } from 'openai/resources/responses/responses.mjs';
55

66
import { AI_PROVIDER, ChatCompletionRole } from '@/constants/ai';
7-
import { isOwner } from '@/lib/owner';
8-
import { AIProvider } from '@/server/ai-provider';
97

108
import { getCurrentUser } from '../auth/get-current-user';
11-
import { getAppConfig } from '../configs/get-app-config';
9+
import { getTargetProvider } from './get-target-provider';
1210

1311
type GenerateCompletion = Omit<ResponseCreateParamsBase, 'model'> & {
1412
model?: string;
@@ -21,34 +19,27 @@ export const generateCompletion = async ({
2119
stream = false,
2220
}: GenerateCompletion) => {
2321
const user = await getCurrentUser();
24-
const config = await getAppConfig();
2522

26-
const aiModel = model || config?.ai?.['text-models']?.[0].value || '';
27-
const provider = AIProvider(config?.ai?.provider);
23+
const { provider, providerName, targetTextModel } = await getTargetProvider(model);
2824

29-
const TEXT_MODELS = config?.ai?.['text-models'] ?? [];
30-
const models = (isOwner(user?.userId) ? TEXT_MODELS : TEXT_MODELS.slice(0, 2)).map(
31-
({ value }) => value,
32-
);
33-
34-
if (!models.includes(aiModel)) {
35-
return { completion: null, model: aiModel };
25+
if (!user?.hasSubscription && targetTextModel.isSubscription) {
26+
return { completion: null, model: targetTextModel.value };
3627
}
3728

3829
const completion =
39-
config?.ai?.provider === AI_PROVIDER.openai
30+
providerName === AI_PROVIDER.openai
4031
? await provider.responses.create({
4132
input,
4233
instructions,
43-
model: aiModel,
34+
model: targetTextModel.value,
4435
stream,
4536
})
4637
: await provider.chat.completions.create({
4738
messages: [
4839
...(instructions ? [{ role: ChatCompletionRole.SYSTEM, content: instructions }] : []),
4940
...input,
5041
] as ChatCompletionMessageParam[],
51-
model: aiModel,
42+
model: targetTextModel.value,
5243
stream,
5344
});
5445

@@ -68,11 +59,15 @@ export const generateCompletion = async ({
6859
content_index: event.content_index,
6960
delta: event.delta,
7061
};
62+
7163
await writer.write(encoder.encode(`data: ${JSON.stringify(data)}\n\n`));
64+
} else if (event.choices[0].finish_reason !== 'stop') {
65+
await writer.write(encoder.encode(event.choices[0].delta.content ?? ''));
7266
}
7367
}
7468
} catch (error) {
7569
console.error('Stream processing error:', error);
70+
7671
await writer.write(
7772
encoder.encode(`data: ${JSON.stringify({ error: 'Stream processing error' })}\n\n`),
7873
);
@@ -81,8 +76,8 @@ export const generateCompletion = async ({
8176
}
8277
})();
8378

84-
return { completion: stream_response.readable, model: aiModel };
79+
return { completion: stream_response.readable, model: targetTextModel.value };
8580
}
8681

87-
return { completion, model: aiModel };
82+
return { completion, model: targetTextModel.value };
8883
};

actions/ai/generate-image.ts

Lines changed: 8 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2,32 +2,27 @@
22

33
import { ImageGenerateParams } from 'openai/resources/images.mjs';
44

5-
import { AIProvider } from '@/server/ai-provider';
6-
7-
import { getAppConfig } from '../configs/get-app-config';
5+
import { getCurrentUser } from '../auth/get-current-user';
6+
import { getTargetProvider } from './get-target-provider';
87

98
type GenerateImage = Omit<ImageGenerateParams, 'model'> & {
109
model?: string;
1110
};
1211

1312
export const generateImage = async ({ model, prompt }: GenerateImage) => {
14-
const config = await getAppConfig();
15-
16-
const aiModel = model || config?.ai?.['image-models']?.[0].value || '';
17-
const provider = AIProvider(config?.ai?.provider);
13+
const user = await getCurrentUser();
1814

19-
const IMAGE_MODELS = config?.ai?.['image-models'] ?? [];
20-
const models = IMAGE_MODELS.map(({ value }) => value);
15+
const { provider, targetImageModel } = await getTargetProvider(model);
2116

22-
if (!models.includes(aiModel)) {
17+
if (!user?.hasSubscription && targetImageModel.isSubscription) {
2318
return {
2419
image: null,
25-
model: aiModel,
20+
model: targetImageModel.value,
2621
};
2722
}
2823

2924
const response = await provider.images.generate({
30-
model: aiModel,
25+
model: targetImageModel.value,
3126
n: 1,
3227
prompt,
3328
quality: 'hd',
@@ -37,6 +32,6 @@ export const generateImage = async ({ model, prompt }: GenerateImage) => {
3732

3833
return {
3934
image: response,
40-
model: null,
35+
model: targetImageModel.value,
4136
};
4237
};

actions/ai/get-target-provider.ts

Lines changed: 67 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,67 @@
1+
'use server';
2+
3+
import OpenAI from 'openai';
4+
5+
import { AI_PROVIDER } from '@/constants/ai';
6+
7+
import { getAppConfig } from '../configs/get-app-config';
8+
9+
const AIProvider = (provider: string) => {
10+
let options = {};
11+
12+
switch (provider) {
13+
case AI_PROVIDER.deepseek:
14+
options = {
15+
apiKey: process.env.DEEPSEEK_API_KEY,
16+
baseURL: 'https://api.deepseek.com',
17+
};
18+
break;
19+
case AI_PROVIDER.openai:
20+
options = {
21+
apiKey: process.env.OPENAI_API_KEY,
22+
};
23+
break;
24+
default:
25+
options = {
26+
apiKey: 'ollama',
27+
baseURL: process.env.OLLAMA_BASE_URL,
28+
};
29+
}
30+
31+
return new OpenAI(options);
32+
};
33+
34+
export const getTargetProvider = async (model: string | undefined) => {
35+
const config = await getAppConfig();
36+
37+
const targetProvider = config.ai.find((ai) => {
38+
const allModels = [...ai['image-models'], ...ai['text-models']].map((model) => model.value);
39+
40+
return allModels.includes(String(model));
41+
});
42+
43+
if (!targetProvider) {
44+
const { provider, 'text-models': textModels, 'image-models': imageModels } = config.ai[0];
45+
46+
return {
47+
provider: AIProvider(provider),
48+
providerName: provider,
49+
targetImageModel: imageModels[0],
50+
targetTextModel: textModels[0],
51+
};
52+
}
53+
54+
const { provider, 'text-models': textModels, 'image-models': imageModels } = targetProvider;
55+
56+
const targetTextModel =
57+
textModels.find((textModel) => textModel.value === model) ?? textModels[0];
58+
const targetImageModel =
59+
imageModels.find((imageModel) => imageModel.value === model) ?? imageModels[0];
60+
61+
return {
62+
provider: AIProvider(provider),
63+
providerName: provider,
64+
targetImageModel,
65+
targetTextModel,
66+
};
67+
};

actions/configs/get-app-config.ts

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,10 @@ import { getGithubContents } from '../github/get-contents';
66

77
export type GetAppConfig = {
88
ai: {
9-
'image-models': { value: string; label: string }[];
10-
'text-models': { value: string; label: string }[];
9+
'image-models': { value: string; label: string; owner: string; isSubscription: boolean }[];
10+
'text-models': { value: string; label: string; owner: string; isSubscription: boolean }[];
1111
provider: string;
12-
};
12+
}[];
1313
auth: { isBlockedNewLogin: true; providers: Record<string, boolean> };
1414
features: { christmas: boolean };
1515
};
@@ -26,11 +26,13 @@ export const getAppConfig = async (): Promise<GetAppConfig> => {
2626
console.error('[GET_APP_CONFIG_ACTION]', error);
2727

2828
return {
29-
ai: {
30-
'image-models': [],
31-
'text-models': [],
32-
provider: 'ollama',
33-
},
29+
ai: [
30+
{
31+
'image-models': [],
32+
'text-models': [],
33+
provider: 'ollama',
34+
},
35+
],
3436
auth: {
3537
isBlockedNewLogin: true,
3638
providers: {

app/(chat)/(routes)/chat/[[...slug]]/_components/chat-main/chat-input-footer.tsx

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ export const ChatInputFooter = ({
2929
config: state.config,
3030
}));
3131

32-
const IMAGE_MODELS = appConfig?.ai?.['image-models'] ?? [];
32+
const IMAGE_MODELS = appConfig?.ai.flatMap((ai) => ai['image-models']) ?? [];
3333

3434
return (
3535
<div className="flex justify-between px-2 py-2 items-center">
@@ -54,9 +54,6 @@ export const ChatInputFooter = ({
5454
)}
5555
/>
5656
</button>
57-
{/* <button type="button" disabled={isSubmitting}>
58-
<Paperclip className="w-4 h-4 text-muted-foreground" />
59-
</button> */}
6057
<Separator orientation="vertical" className="mr-4 ml-2 h-6" />
6158
<Button
6259
className={cn(

0 commit comments

Comments
 (0)