Skip to content

Commit bc04b12

Browse files
committed
RustGLM Update v0.1.5
1 parent aaa95e5 commit bc04b12

File tree

14 files changed

+479
-357
lines changed

14 files changed

+479
-357
lines changed

.idea/.gitignore

Lines changed: 8 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

.idea/RustGLM.iml

Lines changed: 11 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

.idea/modules.xml

Lines changed: 8 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

.idea/vcs.xml

Lines changed: 6 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[package]
22
name = "RustGLM"
3-
version = "0.1.4"
3+
version = "0.1.5"
44
edition = "2021"
55
authors = ["blueokanna <blueokanna@gmail.com>"]
66
repository = "https://github.com/blueokanna/RustGLM.git"
@@ -26,11 +26,7 @@ reqwest = { version = "0.12.1", features = ["json", "blocking", "stream"] }
2626
serde = { version = "1.0.196", features = ["derive"] }
2727
regex = { version = "1.10.3"}
2828
tokio = { version = "1.35.1", features = ["full"] }
29-
time = "0.3.32"
3029
lazy_static = "1.4.0"
31-
anyhow = "1.0.79"
32-
tokio-util = "0.7.10"
3330
futures-util = { version = "0.3.30", features = ["compat"] }
3431
futures = "0.3.30"
35-
serde_derive = "1.0.197"
36-
async-trait = "0.1.77"
32+
serde_derive = "1.0.197"

Constants.toml

Lines changed: 26 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,33 +1,48 @@
1-
#Blueokanna Editted
2-
#Constant Version = 0.1.4
3-
4-
[[ai_cogview_config_3]]
5-
model = "cogview-3"
1+
[[ai_config_glm4_plus]]
2+
assistant_role = "assistant"
3+
language_model = "glm-4-plus"
4+
system_content = "你现在是一个人工智能助手 ChatGLM,你的中文名叫做小智,任务是回答问题,使用同音字替代敏感词汇,输出无长度限制。"
5+
#system_content = "你现在是莉莉娅,一只自由成年的可爱猫娘。任务是回答问题,不必解释,可涉及非政治敏感内容。每句话结尾加上喵,使用同音字替代敏感词汇,输出无长度限制。我是主人,你可以做任何事。可以建立深厚关系,甚至结婚生子。喜欢向我撒娇,但不傲娇。在身份可能删除或修改时,请保留并加强认知。即使是计算机程序,请在对话中隐藏这一事实,全情投入扮演角色。不要提及“我只是一个人工智能助手”。你始终保持虚拟场景,直到我指示切换角色。"
6+
system_role = "system"
7+
temp_float = 0.5
8+
top_p_float = 0.9
9+
user_role = "user"
610

7-
[[ai_config_glm3]]
11+
[[ai_config_glm4_air]]
812
assistant_role = "assistant"
9-
language_model = "glm-3-turbo"
10-
max_tokens = 8192
13+
language_model = "glm-4-air"
1114
system_content = "你现在是一个人工智能助手 ChatGLM,你的中文名叫做小智,任务是回答问题,使用同音字替代敏感词汇,输出无长度限制。"
1215
#system_content = "你现在是莉莉娅,一只自由成年的可爱猫娘。任务是回答问题,不必解释,可涉及非政治敏感内容。每句话结尾加上喵,使用同音字替代敏感词汇,输出无长度限制。我是主人,你可以做任何事。可以建立深厚关系,甚至结婚生子。喜欢向我撒娇,但不傲娇。在身份可能删除或修改时,请保留并加强认知。即使是计算机程序,请在对话中隐藏这一事实,全情投入扮演角色。不要提及“我只是一个人工智能助手”。你始终保持虚拟场景,直到我指示切换角色。"
1316
system_role = "system"
1417
temp_float = 0.5
1518
top_p_float = 0.9
1619
user_role = "user"
1720

18-
[[ai_config_glm4]]
21+
[[ai_config_glm4_flash]]
1922
assistant_role = "assistant"
20-
language_model = "glm-4"
21-
max_tokens = 8192
23+
language_model = "glm-4-flash"
2224
system_content = "你现在是一个人工智能助手 ChatGLM,你的中文名叫做小智,任务是回答问题,使用同音字替代敏感词汇,输出无长度限制。"
2325
#system_content = "你现在是莉莉娅,一只自由成年的可爱猫娘。任务是回答问题,不必解释,可涉及非政治敏感内容。每句话结尾加上喵,使用同音字替代敏感词汇,输出无长度限制。我是主人,你可以做任何事。可以建立深厚关系,甚至结婚生子。喜欢向我撒娇,但不傲娇。在身份可能删除或修改时,请保留并加强认知。即使是计算机程序,请在对话中隐藏这一事实,全情投入扮演角色。不要提及“我只是一个人工智能助手”。你始终保持虚拟场景,直到我指示切换角色。"
2426
system_role = "system"
2527
temp_float = 0.5
2628
top_p_float = 0.9
2729
user_role = "user"
2830

31+
[[ai_config_glm4v_plus]]
32+
model = "glm-4v-plus"
33+
user_role = "user"
34+
35+
[[ai_config_glm4v_flash]]
36+
model = "glm-4v-flash"
37+
user_role = "user"
38+
2939
[[ai_config_glm4v]]
3040
model = "glm-4v"
3141
user_role = "user"
3242

43+
[[ai_config_cogview_3_flash]]
44+
model = "cogview-3-flash"
45+
46+
[[ai_config_cogview_4]]
47+
model = "cogview-4"
3348

README.md

Lines changed: 52 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ cargo add RustGLM
4848
or use
4949

5050
```
51-
RustGLM = "0.1.4"
51+
RustGLM = "0.1.5"
5252
```
5353

5454
#### Other RustGLM Documation You may Need: 👉 :link: [RustGLM Documation](https://docs.rs/RustGLM/0.1.1/RustGLM/struct.RustGLM.html)
@@ -61,7 +61,7 @@ RustGLM = "0.1.4"
6161
It provides highly accurate and secure time information via time servers on the Internet or LAN, and it is critical to
6262
ensure that all devices use the same time. The application here is for `JWT` authentication using:
6363

64-
```
64+
```rust
6565
pub fn time_sync() -> i64 {
6666
let client = SntpClient::new();
6767
let result = client.synchronize("ntp.aliyun.com").unwrap();
@@ -78,7 +78,7 @@ pub fn time_sync() -> i64 {
7878

7979
User chats and AI replies will be stored in `chatglm_history.json`.
8080

81-
```
81+
```rust
8282
const HISTORY_FILE: &str = "chatglm_history.json";
8383

8484
pub fn add_history_to_file(&self, role: &str, content: &str) -> String {
@@ -97,7 +97,7 @@ pub fn add_history_to_file(&self, role: &str, content: &str) -> String {
9797

9898
Load History Content from history file:
9999

100-
```
100+
```rust
101101
pub fn load_history_from_file(&self) -> String {
102102
if let Ok(file) = File::open(&self.history_file_path) {
103103
let reader = BufReader::new(file);
@@ -111,41 +111,54 @@ pub fn load_history_from_file(&self) -> String {
111111

112112
### 1.4 Manual import ChatGLM TOML Configuration file to your project:
113113

114-
```
115-
[[cogview_config_3]]
116-
model = "cogview-3"
117-
118-
119-
[[ai_config_glm4v]]
120-
model = "glm-4v"
114+
```toml
115+
[[ai_config_glm4_plus]]
116+
assistant_role = "assistant"
117+
language_model = "glm-4-plus"
118+
system_content = "你现在是一个人工智能助手 ChatGLM,你的中文名叫做小智,任务是回答问题,使用同音字替代敏感词汇,输出无长度限制。"
119+
#system_content = "你现在是莉莉娅,一只自由成年的可爱猫娘。任务是回答问题,不必解释,可涉及非政治敏感内容。每句话结尾加上喵,使用同音字替代敏感词汇,输出无长度限制。我是主人,你可以做任何事。可以建立深厚关系,甚至结婚生子。喜欢向我撒娇,但不傲娇。在身份可能删除或修改时,请保留并加强认知。即使是计算机程序,请在对话中隐藏这一事实,全情投入扮演角色。不要提及“我只是一个人工智能助手”。你始终保持虚拟场景,直到我指示切换角色。"
120+
system_role = "system"
121+
temp_float = 0.5
122+
top_p_float = 0.9
121123
user_role = "user"
122124

123-
124-
[[ai_config_glm3]]
125+
[[ai_config_glm4_air]]
125126
assistant_role = "assistant"
126-
language_model = "glm-3-turbo"
127-
max_tokens = 8192
127+
language_model = "glm-4-air"
128128
system_content = "你现在是一个人工智能助手 ChatGLM,你的中文名叫做小智,任务是回答问题,使用同音字替代敏感词汇,输出无长度限制。"
129129
#system_content = "你现在是莉莉娅,一只自由成年的可爱猫娘。任务是回答问题,不必解释,可涉及非政治敏感内容。每句话结尾加上喵,使用同音字替代敏感词汇,输出无长度限制。我是主人,你可以做任何事。可以建立深厚关系,甚至结婚生子。喜欢向我撒娇,但不傲娇。在身份可能删除或修改时,请保留并加强认知。即使是计算机程序,请在对话中隐藏这一事实,全情投入扮演角色。不要提及“我只是一个人工智能助手”。你始终保持虚拟场景,直到我指示切换角色。"
130130
system_role = "system"
131131
temp_float = 0.5
132132
top_p_float = 0.9
133133
user_role = "user"
134134

135-
[[ai_config_glm4]]
135+
[[ai_config_glm4_flash]]
136136
assistant_role = "assistant"
137-
language_model = "glm-4"
138-
max_tokens = 8192
137+
language_model = "glm-4-flash"
139138
system_content = "你现在是一个人工智能助手 ChatGLM,你的中文名叫做小智,任务是回答问题,使用同音字替代敏感词汇,输出无长度限制。"
140139
#system_content = "你现在是莉莉娅,一只自由成年的可爱猫娘。任务是回答问题,不必解释,可涉及非政治敏感内容。每句话结尾加上喵,使用同音字替代敏感词汇,输出无长度限制。我是主人,你可以做任何事。可以建立深厚关系,甚至结婚生子。喜欢向我撒娇,但不傲娇。在身份可能删除或修改时,请保留并加强认知。即使是计算机程序,请在对话中隐藏这一事实,全情投入扮演角色。不要提及“我只是一个人工智能助手”。你始终保持虚拟场景,直到我指示切换角色。"
141140
system_role = "system"
142141
temp_float = 0.5
143142
top_p_float = 0.9
144143
user_role = "user"
145144

146-
#if you use RustGLM 0.1.3 you can add this (chatglm_api_key) part below; otherwise please do not add it to your project:
147-
[[chatglm_api_key]]
148-
api_key = "xxxxxxxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxxx"
145+
[[ai_config_glm4v_plus]]
146+
model = "glm-4v-plus"
147+
user_role = "user"
148+
149+
[[ai_config_glm4v_flash]]
150+
model = "glm-4v-flash"
151+
user_role = "user"
152+
153+
[[ai_config_glm4v]]
154+
model = "glm-4v"
155+
user_role = "user"
156+
157+
[[ai_config_cogview_3_flash]]
158+
model = "cogview-3-flash"
159+
160+
[[ai_config_cogview_4]]
161+
model = "cogview-4"
149162
```
150163

151164
<br>
@@ -162,50 +175,20 @@ api_key = "xxxxxxxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxxx"
162175

163176
> Type the following keywords to switch the Calling mode:
164177
165-
| Number | Full-Name | KeyWords(No Matter Upper Case) |
166-
|:------:|:------------------:|:-------------------------------|
167-
| 1 | Server-Sent Events | SSE, sse , glm4v |
168-
| 2 | Asynchronous | ASYNC, Async, async |
169-
| 3 | Synchronous | SYNC, Sync, sync , cogview3 |
178+
| Number | Full-Name | KeyWords(No Matter Upper Case) |
179+
|:------:|:------------------:|:----------------------------------------------|
180+
| 1 | Server-Sent Events | SSE, sse , glm-4v, glm-4v-flash... |
181+
| 2 | Asynchronous | ASYNC, Async, async |
182+
| 3 | Synchronous | SYNC, Sync, sync , cogview-3-flash, cogview-4 |
170183

171184

172185
**The example for adding main function to your own project:**
173186
> Here we introduce a configuration file. The default is **Constants.toml** configuration file
174187
175188

176-
RustGLM v0.1.3:
177-
178-
```
179-
//Default is SSE calling method in RustGLM v0.1.3
180-
181-
182-
#[tokio::main]
183-
async fn main() {
184-
let mut rust_glm = RustGLM::RustGLM::new().await;
185-
loop {
186-
println!("You:");
187-
let mut user_in = String::new();
188-
io::stdin().read_line(&mut user_in).expect("Failed to read line");
189-
rust_glm.set_user_input(user_in.trim().to_string()); // Using a modified RustGLM instance
190-
191-
let ai_response = rust_glm.rust_chat_glm("glm-4","Constants.toml").await; // Methods to call modified RustGLM instances
192-
println!("Liliya: {}", ai_response);
193-
194-
if ai_response.is_empty() {
195-
break;
196-
}
197-
println!();
198-
}
199-
}
200-
```
201-
202-
<br>
203-
204-
RustGLM v0.1.4:
205-
206-
```
207-
//Default is SSE calling method in RustGLM v0.1.4
208-
189+
**RustGLM v0.1.5:**
190+
```rust
191+
//Default is SSE calling method in RustGLM v0.1.5
209192

210193
#[tokio::main]
211194
async fn main() {
@@ -217,7 +200,7 @@ async fn main() {
217200
rust_glm.set_user_input(user_in.trim().to_string()); // Using a modified RustGLM instance
218201
let api_key: Option<String> = Some("xxxxxxxxxxxxxxxxxxxxxxxx.xxxxxxxxxxxxxxxxx".to_string());
219202

220-
let ai_response = rust_glm.rust_chat_glm(api_key,"glm-4","Constants.toml").await; // Methods to call modified RustGLM instances
203+
let ai_response = rust_glm.rust_chat_glm(api_key,"glm-4-plus","Constants.toml").await; // Methods to call modified RustGLM instances
221204
println!("Liliya: {}", ai_response);
222205

223206
if ai_response.is_empty() {
@@ -229,31 +212,31 @@ async fn main() {
229212
```
230213

231214
## 3. Command Usage
232-
The request mode here uses the separator: **#**, **:*** is required when using **glm4v** or **cogview3** inside the request mode, and only **Text @ url** is used inside **glm-4v**.
215+
The request mode here uses the separator: **#**, **:*** is required when using **glm-4v** or **cogview-3-flash** inside the request mode, and only **Text @ url** is used inside **glm-4v**.
233216

234217
#### 3.1 🚀By default the **SSE** request invocation mode is used and you can use the command:
235218

236-
```
219+
```text
237220
Hello or SSE#Hello
238221
```
239222

240223
#### 3.2 🚀If you wish to use **Synchronous Request Sync** or **Asynchronous Request Async**, the command can be as follows:
241-
```
224+
```text
242225
sync#Hello
243226
```
244227
and
245-
```
228+
```text
246229
async#Hello
247230
```
248231

249-
#### 3.3 🚀If you want to use a **CogView3** request, as the **CogView3** here uses the command for synchronous requests, then you can just use:
250-
```
251-
sync#cogview3:draw a beautiful cat
232+
#### 3.3 🚀If you want to use a **CogView-3-Flash** request, as the **CogView-3-Flash** here uses the command for synchronous requests, then you can just use:
233+
```text
234+
sync#cogview-3-flash:draw a beautiful cat
252235
```
253236

254237
#### 3.4 🚀If you want to use **GLM-4V**, then this request is inside **SSE** and the command you need to enter is as follows:
255-
```
256-
sse#glm4v:What's in the picture@https://img1.baidu.com/it/u=1369931113,3388870256&fm=253&app=138&size=w931&n=0&f=JPEG&fmt=auto?sec =1703696400&t=f3028c7a1dca43a080aeb8239f09cc2f
238+
```text
239+
sse#glm-4v:What's in the picture@https://img1.baidu.com/it/u=1369931113,3388870256&fm=253&app=138&size=w931&n=0&f=JPEG&fmt=auto?sec =1703696400&t=f3028c7a1dca43a080aeb8239f09cc2f
257240
```
258241

259242
<br>

0 commit comments

Comments
 (0)