Skip to content

Commit 4f1f92d

Browse files
committed
Refactoring to reduce the use global variables from functions
1 parent 60eb98d commit 4f1f92d

File tree

1 file changed

+38
-52
lines changed

1 file changed

+38
-52
lines changed

chatgpt.sh

Lines changed: 38 additions & 52 deletions
Original file line numberDiff line numberDiff line change
@@ -63,24 +63,24 @@ handle_error() {
6363
# request to OpenAI API completions endpoint function
6464
# $1 should be the request prompt
6565
request_to_completions() {
66-
request_prompt="$1"
66+
local prompt="$1"
6767

68-
response=$(curl https://api.openai.com/v1/completions \
68+
curl https://api.openai.com/v1/completions \
6969
-sS \
7070
-H 'Content-Type: application/json' \
7171
-H "Authorization: Bearer $OPENAI_KEY" \
7272
-d '{
7373
"model": "'"$MODEL"'",
74-
"prompt": "'"${request_prompt}"'",
74+
"prompt": "'"$prompt"'",
7575
"max_tokens": '$MAX_TOKENS',
7676
"temperature": '$TEMPERATURE'
77-
}')
77+
}'
7878
}
7979

8080
# request to OpenAI API image generations endpoint function
8181
# $1 should be the prompt
8282
request_to_image() {
83-
prompt="$1"
83+
local prompt="$1"
8484
image_response=$(curl https://api.openai.com/v1/images/generations \
8585
-sS \
8686
-H 'Content-Type: application/json' \
@@ -95,8 +95,8 @@ request_to_image() {
9595
# request to OpenAPI API chat completion endpoint function
9696
# $1 should be the message(s) formatted with role and content
9797
request_to_chat() {
98-
message="$1"
99-
response=$(curl https://api.openai.com/v1/chat/completions \
98+
local message="$1"
99+
curl https://api.openai.com/v1/chat/completions \
100100
-sS \
101101
-H 'Content-Type: application/json' \
102102
-H "Authorization: Bearer $OPENAI_KEY" \
@@ -108,20 +108,19 @@ request_to_chat() {
108108
],
109109
"max_tokens": '$MAX_TOKENS',
110110
"temperature": '$TEMPERATURE'
111-
}')
111+
}'
112112
}
113113

114114
# build chat context before each request for /completions (all models except
115115
# gpt turbo and gpt 4)
116-
# $1 should be the chat context
117-
# $2 should be the escaped prompt
116+
# $1 should be the escaped request prompt,
117+
# it extends $chat_context
118118
build_chat_context() {
119-
chat_context="$1"
120-
escaped_prompt="$2"
119+
local escaped_request_prompt="$1"
121120
if [ -z "$chat_context" ]; then
122-
chat_context="$CHAT_INIT_PROMPT\nQ: $escaped_prompt"
121+
chat_context="$CHAT_INIT_PROMPT\nQ: $escaped_request_prompt"
123122
else
124-
chat_context="$chat_context\nQ: $escaped_prompt"
123+
chat_context="$chat_context\nQ: $escaped_request_prompt"
125124
fi
126125
request_prompt="${chat_context//$'\n'/\\n}"
127126
}
@@ -130,13 +129,12 @@ build_chat_context() {
130129
# gpt turbo and gpt 4)
131130
# builds chat context from response,
132131
# keeps chat context length under max token limit
133-
# $1 should be the chat context
134-
# $2 should be the response data (only the text)
132+
# * $1 should be the escaped response data
133+
# * it extends $chat_context
135134
maintain_chat_context() {
136-
chat_context="$1"
137-
response_data="$2"
135+
local escaped_response_data="$1"
138136
# add response to chat context as answer
139-
chat_context="$chat_context${chat_context:+\n}\nA: ${response_data//$'\n'/\\n}"
137+
chat_context="$chat_context${chat_context:+\n}\nA: $escaped_response_data"
140138
# check prompt length, 1 word =~ 1.3 tokens
141139
# reserving 100 tokens for next user prompt
142140
while (($(echo "$chat_context" | wc -c) * 1, 3 > (MAX_TOKENS - 100))); do
@@ -149,36 +147,29 @@ maintain_chat_context() {
149147

150148
# build user chat message function for /chat/completions (gpt models)
151149
# builds chat message before request,
152-
# $1 should be the chat message
153-
# $2 should be the escaped prompt
150+
# $1 should be the escaped request prompt,
151+
# it extends $chat_message
154152
build_user_chat_message() {
155-
chat_message="$1"
156-
escaped_prompt="$2"
153+
local escaped_request_prompt="$1"
157154
if [ -z "$chat_message" ]; then
158-
chat_message="{\"role\": \"user\", \"content\": \"$escaped_prompt\"}"
155+
chat_message="{\"role\": \"user\", \"content\": \"$escaped_request_prompt\"}"
159156
else
160-
chat_message="$chat_message, {\"role\": \"user\", \"content\": \"$escaped_prompt\"}"
157+
chat_message="$chat_message, {\"role\": \"user\", \"content\": \"$escaped_request_prompt\"}"
161158
fi
162-
163-
request_prompt="$chat_message"
164159
}
165160

166161
# adds the assistant response to the message in (chatml) format
167162
# for /chat/completions (gpt models)
168163
# keeps messages length under max token limit
169-
# $1 should be the chat message
170-
# $2 should be the response data (only the text)
164+
# * $1 should be the escaped response data
165+
# * it extends and potentially shrinks $chat_message
171166
add_assistant_response_to_chat_message() {
172-
chat_message="$1"
173-
local local_response_data="$2"
174-
175-
# replace new line characters from response with space
176-
local_response_data=$(echo "$local_response_data" | tr '\n' ' ')
167+
local escaped_response_data="$1"
177168
# add response to chat context as answer
178-
chat_message="$chat_message, {\"role\": \"assistant\", \"content\": \"$local_response_data\"}"
169+
chat_message="$chat_message, {\"role\": \"assistant\", \"content\": \"$escaped_response_data\"}"
179170

180171
# transform to json array to parse with jq
181-
chat_message_json="[ $chat_message ]"
172+
local chat_message_json="[ $chat_message ]"
182173
# check prompt length, 1 word =~ 1.3 tokens
183174
# reserving 100 tokens for next user prompt
184175
while (($(echo "$chat_message" | wc -c) * 1, 3 > (MAX_TOKENS - 100))); do
@@ -334,15 +325,12 @@ while $running; do
334325
echo -e "$OVERWRITE_PROCESSING_LINE"
335326
echo -e "${CHATGPT_CYAN_LABEL}Complete details for model: ${prompt#*model:}\n ${model_data}"
336327
elif [[ "$prompt" =~ ^command: ]]; then
337-
# escape quotation marks
328+
# escape quotation marks, new lines, backslashes...
338329
escaped_prompt=$(echo "$prompt" | sed 's/"/\\"/g')
339-
# escape new lines
340-
if [[ "$prompt" =~ ^command: ]]; then
341-
escaped_prompt=${prompt#command:}
342-
request_prompt=$COMMAND_GENERATION_PROMPT${escaped_prompt//$'\n'/' '}
343-
fi
344-
build_user_chat_message "$chat_message" "$request_prompt"
345-
request_to_chat "$request_prompt"
330+
escaped_prompt=${escaped_prompt#command:}
331+
request_prompt=$COMMAND_GENERATION_PROMPT$escaped_prompt
332+
build_user_chat_message "$request_prompt"
333+
response=$(request_to_chat "$chat_message")
346334
handle_error "$response"
347335
response_data=$(echo $response | jq -r '.choices[].message.content')
348336

@@ -363,8 +351,7 @@ while $running; do
363351
eval $response_data
364352
fi
365353
fi
366-
escaped_response_data=$(echo "$response_data" | sed 's/"/\\"/g')
367-
add_assistant_response_to_chat_message "$chat_message" "$escaped_response_data"
354+
add_assistant_response_to_chat_message "$(echo "$response_data" | tr '\n' ' ')"
368355

369356
timestamp=$(date +"%d/%m/%Y %H:%M")
370357
echo -e "$timestamp $prompt \n$response_data \n" >>~/.chatgpt_history
@@ -375,8 +362,8 @@ while $running; do
375362
# escape new lines
376363
request_prompt=${escaped_prompt//$'\n'/' '}
377364

378-
build_user_chat_message "$chat_message" "$request_prompt"
379-
request_to_chat "$request_prompt"
365+
build_user_chat_message "$request_prompt"
366+
response=$(request_to_chat "$chat_message")
380367
handle_error "$response"
381368
response_data=$(echo "$response" | jq -r '.choices[].message.content')
382369

@@ -401,10 +388,10 @@ while $running; do
401388
request_prompt=${escaped_prompt//$'\n'/' '}
402389

403390
if [ "$CONTEXT" = true ]; then
404-
build_chat_context "$chat_context" "$escaped_prompt"
391+
build_chat_context "$request_prompt"
405392
fi
406393

407-
request_to_completions "$request_prompt"
394+
response=$(request_to_completions "$request_prompt")
408395
handle_error "$response"
409396
response_data=$(echo "$response" | jq -r '.choices[].text')
410397

@@ -420,8 +407,7 @@ while $running; do
420407
fi
421408

422409
if [ "$CONTEXT" = true ]; then
423-
escaped_response_data=$(echo "$response_data" | sed 's/"/\\"/g')
424-
maintain_chat_context "$chat_context" "$escaped_response_data"
410+
maintain_chat_context "$escaped_response_data"
425411
fi
426412

427413
timestamp=$(date +"%d/%m/%Y %H:%M")

0 commit comments

Comments
 (0)