Skip to content

Commit 3449032

Browse files
author
Kerwin
committed
Merge remote-tracking branch 'github/main'
# Conflicts: # README.md # service/.env.example # service/src/chatgpt/index.ts # service/src/index.ts # src/api/index.ts # src/components/common/Setting/index.vue # src/locales/en-US.ts # src/locales/zh-CN.ts # src/locales/zh-TW.ts
2 parents 1540ce9 + 5fee113 commit 3449032

File tree

31 files changed

+378
-74
lines changed

31 files changed

+378
-74
lines changed

.env

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,3 +5,6 @@ VITE_APP_API_BASE_URL=http://localhost:3002/
55

66
# Whether long replies are supported, which may result in higher API fees
77
VITE_GLOB_OPEN_LONG_REPLY=false
8+
9+
# When you want to use PWA
10+
VITE_GLOB_APP_PWA=false

.vscode/launch.json

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
{
2+
// Use IntelliSense to learn about possible attributes.
3+
// Hover to view descriptions of existing attributes.
4+
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5+
"version": "0.2.0",
6+
"configurations": [
7+
{
8+
"type": "chrome",
9+
"request": "launch",
10+
"name": "Launch Web App",
11+
"url": "http://localhost:1002",
12+
"webRoot": "${workspaceFolder}"
13+
},
14+
{
15+
"type": "node",
16+
"request": "launch",
17+
"name": "Launch Service Server",
18+
"runtimeExecutable": "${workspaceFolder}/service/node_modules/.bin/esno",
19+
"skipFiles": ["<node_internals>/**"],
20+
"program": "${workspaceFolder}/service/src/index.ts",
21+
"outFiles": ["${workspaceFolder}/service/**/*.js"],
22+
"envFile": "${workspaceFolder}/service/.env"
23+
}
24+
]
25+
}

README.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -179,6 +179,7 @@ pnpm dev
179179
通用:
180180

181181
- `AUTH_SECRET_KEY` 访问权限密钥,可选
182+
- `MAX_REQUEST_PER_HOUR` 每小时最大请求次数,可选,默认无限
182183
- `TIMEOUT_MS` 超时,单位毫秒,可选
183184
- `SOCKS_PROXY_HOST``SOCKS_PROXY_PORT` 一起时生效,可选
184185
- `SOCKS_PROXY_PORT``SOCKS_PROXY_HOST` 一起时生效,可选
@@ -234,6 +235,8 @@ services:
234235
OPENAI_API_MODEL: xxx
235236
# 反向代理,可选
236237
API_REVERSE_PROXY: xxx
238+
# 每小时最大请求次数,可选,默认无限
239+
MAX_REQUEST_PER_HOUR: 0
237240
# 超时,单位毫秒,可选
238241
TIMEOUT_MS: 60000
239242
# Socks代理,可选,和 SOCKS_PROXY_PORT 一起时生效
@@ -296,6 +299,7 @@ volumes:
296299
| --------------------- | ---------------------- | -------------------------------------------------------------------------------------------------- |
297300
| `PORT` | 必填 | 默认 `3002`
298301
| `AUTH_SECRET_KEY` | 可选 | 访问权限密钥 |
302+
| `MAX_REQUEST_PER_HOUR` | 可选 | 每小时最大请求次数,可选,默认无限 |
299303
| `TIMEOUT_MS` | 可选 | 超时时间,单位毫秒 |
300304
| `OPENAI_API_KEY` | `OpenAI API` 二选一 | 使用 `OpenAI API` 所需的 `apiKey` [(获取 apiKey)](https://platform.openai.com/overview) |
301305
| `OPENAI_ACCESS_TOKEN` | `Web API` 二选一 | 使用 `Web API` 所需的 `accessToken` [(获取 accessToken)](https://chat.openai.com/api/auth/session) |

docker-compose/docker-compose.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,8 @@ services:
2020
API_REVERSE_PROXY: xxx
2121
# 访问jwt加密参数,可选 不为空则允许登录 同时需要设置 MONGODB_URL
2222
AUTH_SECRET_KEY: xxx
23+
# 每小时最大请求次数,可选,默认无限
24+
MAX_REQUEST_PER_HOUR: 0
2325
# 超时,单位毫秒,可选
2426
TIMEOUT_MS: 60000
2527
# Socks代理,可选,和 SOCKS_PROXY_PORT 一起时生效

service/.env.example

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,9 @@ API_REVERSE_PROXY=
1616
# timeout
1717
TIMEOUT_MS=100000
1818

19+
# Rate Limit
20+
MAX_REQUEST_PER_HOUR=
21+
1922
# Socks Proxy Host
2023
SOCKS_PROXY_HOST=
2124

@@ -62,4 +65,6 @@ SMTP_TSL=true
6265
SMTP_USERNAME=yourname@example.com
6366
SMTP_PASSWORD=yourpassword
6467

65-
# ----- Only valid after setting AUTH_SECRET_KEY end ----
68+
# ----- Only valid after setting AUTH_SECRET_KEY end ----
69+
=======
70+
>>>>>>> github/main

service/package.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,11 @@
2525
},
2626
"dependencies": {
2727
"axios": "^1.3.4",
28-
"chatgpt": "^5.0.10",
28+
"chatgpt": "^5.1.2",
2929
"dotenv": "^16.0.3",
3030
"esno": "^0.16.3",
3131
"express": "^4.18.2",
32+
"express-rate-limit": "^6.7.0",
3233
"https-proxy-agent": "^5.0.1",
3334
"isomorphic-fetch": "^3.0.0",
3435
"mongodb": "^5.1.0",

service/pnpm-lock.yaml

Lines changed: 18 additions & 7 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

service/src/chatgpt/index.ts

Lines changed: 33 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,9 @@ import { getCacheConfig, getOriginConfig } from '../storage/config'
1010
import { sendResponse } from '../utils'
1111
import { isNotEmptyString } from '../utils/is'
1212
import type { ApiModel, ChatContext, ChatGPTUnofficialProxyAPIOptions, ModelConfig } from '../types'
13+
import type { RequestOptions } from './types'
14+
15+
dotenv.config()
1316

1417
const ErrorCodeMessage: Record<string, string> = {
1518
401: '[OpenAI] 提供错误的API密钥 | Incorrect API key provided',
@@ -20,10 +23,7 @@ const ErrorCodeMessage: Record<string, string> = {
2023
500: '[OpenAI] 服务器繁忙,请稍后再试 | Internal Server Error',
2124
}
2225

23-
dotenv.config()
24-
2526
let apiModel: ApiModel
26-
2727
let api: ChatGPTAPI | ChatGPTUnofficialProxyAPI
2828

2929
export async function initApi() {
@@ -33,29 +33,45 @@ export async function initApi() {
3333
if (!config.apiKey && !config.accessToken)
3434
throw new Error('Missing OPENAI_API_KEY or OPENAI_ACCESS_TOKEN environment variable')
3535

36-
if (config.apiKey) {
37-
const OPENAI_API_MODEL = config.apiModel
36+
if (isNotEmptyString(config.apiKey)) {
37+
const OPENAI_API_BASE_URL = process.env.OPENAI_API_BASE_URL
38+
const OPENAI_API_MODEL = process.env.OPENAI_API_MODEL
3839
const model = isNotEmptyString(OPENAI_API_MODEL) ? OPENAI_API_MODEL : 'gpt-3.5-turbo'
3940

4041
const options: ChatGPTAPIOptions = {
4142
apiKey: config.apiKey,
4243
completionParams: { model },
4344
debug: true,
4445
}
46+
// increase max token limit if use gpt-4
47+
if (model.toLowerCase().includes('gpt-4')) {
48+
// if use 32k model
49+
if (model.toLowerCase().includes('32k')) {
50+
options.maxModelTokens = 32768
51+
options.maxResponseTokens = 8192
52+
}
53+
else {
54+
options.maxModelTokens = 8192
55+
options.maxResponseTokens = 2048
56+
}
57+
}
4558

46-
if (isNotEmptyString(config.apiBaseUrl))
47-
options.apiBaseUrl = config.apiBaseUrl
59+
if (isNotEmptyString(OPENAI_API_BASE_URL))
60+
options.apiBaseUrl = `${OPENAI_API_BASE_URL}/v1`
4861

4962
await setupProxy(options)
5063

5164
api = new ChatGPTAPI({ ...options })
5265
apiModel = 'ChatGPTAPI'
5366
}
5467
else {
68+
const OPENAI_API_MODEL = process.env.OPENAI_API_MODEL
5569
const options: ChatGPTUnofficialProxyAPIOptions = {
5670
accessToken: config.accessToken,
5771
debug: true,
5872
}
73+
if (isNotEmptyString(OPENAI_API_MODEL))
74+
options.model = OPENAI_API_MODEL
5975

6076
if (isNotEmptyString(config.reverseProxy))
6177
options.apiReverseProxyUrl = config.reverseProxy
@@ -67,18 +83,20 @@ export async function initApi() {
6783
}
6884
}
6985

70-
async function chatReplyProcess(
71-
message: string,
72-
lastContext?: { conversationId?: string; parentMessageId?: string },
73-
process?: (chat: ChatMessage) => void,
74-
) {
75-
const timeoutMs = (await getCacheConfig()).timeoutMs
86+
async function chatReplyProcess(options: RequestOptions) {
87+
const { message, lastContext, process, systemMessage } = options
7688
try {
89+
const timeoutMs = (await getCacheConfig()).timeoutMs
7790
let options: SendMessageOptions = { timeoutMs }
7891

79-
if (lastContext) {
92+
if (apiModel === 'ChatGPTAPI') {
93+
if (isNotEmptyString(systemMessage))
94+
options.systemMessage = systemMessage
95+
}
96+
97+
if (lastContext != null) {
8098
if (apiModel === 'ChatGPTAPI')
81-
options = { parentMessageId: lastContext.parentMessageId }
99+
options.parentMessageId = lastContext.parentMessageId
82100
else
83101
options = { ...lastContext }
84102
}

service/src/chatgpt/types.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
import type { ChatMessage } from 'chatgpt'
2+
3+
export interface RequestOptions {
4+
message: string
5+
lastContext?: { conversationId?: string; parentMessageId?: string }
6+
process?: (chat: ChatMessage) => void
7+
systemMessage?: string
8+
}

service/src/index.ts

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,15 @@
11
import express from 'express'
22
import jwt from 'jsonwebtoken'
33
import { ObjectId } from 'mongodb'
4+
import type { RequestProps } from './types'
45
import type { ChatContext, ChatMessage } from './chatgpt'
56
import { chatConfig, chatReplyProcess, currentModel, initApi } from './chatgpt'
67
import { auth } from './middleware/auth'
78
import { clearConfigCache, getCacheConfig, getOriginConfig } from './storage/config'
89
import type { ChatOptions, Config, MailConfig, SiteConfig, UserInfo } from './storage/model'
910
import { Status } from './storage/model'
1011
import { clearChat, createChatRoom, createUser, deleteChat, deleteChatRoom, existsChatRoom, getChat, getChatRooms, getChats, getUser, getUserById, insertChat, renameChatRoom, updateChat, updateConfig, updateUserInfo, verifyUser } from './storage/mongo'
12+
import { limiter } from './middleware/limiter'
1113
import { isNotEmptyString } from './utils/is'
1214
import { sendMail } from './utils/mail'
1315
import { checkUserVerify, getUserVerifyUrl, md5 } from './utils/security'
@@ -150,19 +152,23 @@ router.post('/chat', auth, async (req, res) => {
150152
}
151153
})
152154

153-
router.post('/chat-process', auth, async (req, res) => {
155+
router.post('/chat-process', [auth, limiter], async (req, res) => {
154156
res.setHeader('Content-type', 'application/octet-stream')
155157

156158
try {
157-
const { roomId, uuid, regenerate, prompt, options = {} } = req.body as
158-
{ roomId: number; uuid: number; regenerate: boolean; prompt: string; options?: ChatContext }
159+
const { roomId, uuid, regenerate, prompt, options = {}, systemMessage } = req.body as RequestProps
159160
const message = regenerate
160161
? await getChat(roomId, uuid)
161162
: await insertChat(uuid, prompt, roomId, options as ChatOptions)
162163
let firstChunk = true
163-
const result = await chatReplyProcess(prompt, options, (chat: ChatMessage) => {
164-
res.write(firstChunk ? JSON.stringify(chat) : `\n${JSON.stringify(chat)}`)
165-
firstChunk = false
164+
const result = await chatReplyProcess({
165+
message: prompt,
166+
lastContext: options,
167+
process: (chat: ChatMessage) => {
168+
res.write(firstChunk ? JSON.stringify(chat) : `\n${JSON.stringify(chat)}`)
169+
firstChunk = false
170+
},
171+
systemMessage,
166172
})
167173
if (result.status === 'Success')
168174
await updateChat(message._id, result.data.text, result.data.id)

service/src/middleware/limiter.ts

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
import { rateLimit } from 'express-rate-limit'
2+
import { isNotEmptyString } from '../utils/is'
3+
4+
const MAX_REQUEST_PER_HOUR = process.env.MAX_REQUEST_PER_HOUR
5+
6+
const maxCount = (isNotEmptyString(MAX_REQUEST_PER_HOUR) && !isNaN(Number(MAX_REQUEST_PER_HOUR)))
7+
? parseInt(MAX_REQUEST_PER_HOUR)
8+
: 0 // 0 means unlimited
9+
10+
const limiter = rateLimit({
11+
windowMs: 60 * 60 * 1000, // Maximum number of accesses within an hour
12+
max: maxCount,
13+
statusCode: 200, // 200 means success,but the message is 'Too many request from this IP in 1 hour'
14+
message: async (req, res) => {
15+
res.send({ status: 'Fail', message: 'Too many request from this IP in 1 hour', data: null })
16+
},
17+
})
18+
19+
export { limiter }

service/src/types.ts

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,14 @@
11
import type { FetchFn } from 'chatgpt'
22

3+
export interface RequestProps {
4+
roomId: number
5+
uuid: number
6+
regenerate: boolean
7+
prompt: string
8+
options?: ChatContext
9+
systemMessage: string
10+
}
11+
312
export interface ChatContext {
413
conversationId?: string
514
parentMessageId?: string

src/api/index.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import type { AxiosProgressEvent, GenericAbortSignal } from 'axios'
22
import { get, post } from '@/utils/request'
33
import type { ConfigState, MailConfig, SiteConfig } from '@/components/common/Setting/model'
4+
import { useSettingStore } from '@/store'
45

56
export function fetchChatAPI<T = any>(
67
prompt: string,
@@ -30,9 +31,11 @@ export function fetchChatAPIProcess<T = any>(
3031
signal?: GenericAbortSignal
3132
onDownloadProgress?: (progressEvent: AxiosProgressEvent) => void },
3233
) {
34+
const settingStore = useSettingStore()
35+
3336
return post<T>({
3437
url: '/chat-process',
35-
data: { roomId: params.roomId, uuid: params.uuid, regenerate: params.regenerate || false, prompt: params.prompt, options: params.options },
38+
data: { roomId: params.roomId, uuid: params.uuid, regenerate: params.regenerate || false, prompt: params.prompt, options: params.options, systemMessage: settingStore.systemMessage },
3639
signal: params.signal,
3740
onDownloadProgress: params.onDownloadProgress,
3841
})

0 commit comments

Comments
 (0)