Skip to content

Support OpenAI API #368

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Apr 21, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
72 changes: 72 additions & 0 deletions script/openai-call/customize.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
from mlc import utils
import os
import subprocess
import json


import yaml


def write_openai_yaml(model, system_prompt, user_prompt,
filename='openai-prompt.yaml'):
data = {
'model': model,
'messages': [
{
'role': 'system',
'content': system_prompt
},
{
'role': 'user',
'content': user_prompt
}
],
'max_tokens': 200,
'temperature': 0.7
}

with open(filename, 'w', encoding='utf-8') as f:
yaml.dump(data, f, sort_keys=False, allow_unicode=True)


def preprocess(i):

env = i['env']
state = i['state']

if 'MLC_OPENAI_CONFIG_PATH' not in env or not os.path.exists(
env['MLC_OPENAI_CONFIG_PATH']):
if 'user_prompt' in state:
model = env.get('MLC_OPENAI_MODEL', 'gpt-4o')
user_prompt = state['user_prompt']
system_prompt = state.get(
'system_prompt',
'You are an AI agent expected to answer questions correctly')
write_openai_yaml(
model,
system_prompt,
user_prompt,
'tmp-openai-prompt.yaml')
env['MLC_OPENAI_CONFIG_PATH'] = 'tmp-openai-prompt.yaml'

os_info = i['os_info']

env['MLC_RUN_CMD'] = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(env['MLC_TMP_CURRENT_SCRIPT_PATH'], 'openai_call.py')} """

return {'return': 0}


def postprocess(i):

env = i['env']
state = i['state']

filename = 'tmp-openai-results.json'
with open(filename, 'r', encoding='utf-8') as f:
data = json.load(f)

state['MLC_OPENAI_RESPONSE'] = data['content']

os_info = i['os_info']

return {'return': 0}
35 changes: 35 additions & 0 deletions script/openai-call/meta.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
alias: openai-call
automation_alias: script
automation_uid: 5b4e0237da074764
category: MLC Script Template
deps:
- tags: get,python3
names:
- python
- python3
new_env_keys: []
new_state_keys:
- MLC_OPENAI_RESPONSE
post_deps: []
posthook_deps: []
prehook_deps: []
tags:
- query
- openai
- openai-call
- call
input_mapping:
api_key: MLC_OPENAI_API_KEY
config_path: MLC_OPENAI_CONFIG_PATH
system_prompt: MLC_OPENAI_SYSTEM_PROMPT
user_prompt: MLC_OPENAI_USER_PROMPT
model: MLC_OPENAI_MODEL
tests:
run_inputs: []
uid: 8d341b0a14a64d94
variations:
openai:
group: api_provider
default: true
env:
MLC_OPENAI_API_URL: 'https://api.openai.com/v1/chat/completions'
73 changes: 73 additions & 0 deletions script/openai-call/openai_call.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
import requests
import yaml
import os
import json


def openai_call(message=None):
try:
api_key = os.environ['MLC_OPENAI_API_KEY']
url = os.environ['MLC_OPENAI_API_URL']
config_path = os.environ.get('MLC_OPENAI_CONFIG_PATH')

# Load config if it exists
if config_path and os.path.exists(config_path):
try:
with open(config_path, 'r') as file:
data = yaml.safe_load(file)
except Exception as e:
return {"error": f"Error reading config file: {str(e)}"}

if os.environ.get('MLC_OPENAI_CONFIG_MODIFY', '') == 'yes':
try:
data['messages'][1]['content'] = data['messages'][1]['content'].replace(
"{{ MESSAGE }}", message or "")
except Exception as e:
return {"error": f"Config format issue: {str(e)}"}
else:
system_prompt = os.environ.get(
'MLC_OPENAI_SYSTEM_PROMPT',
'You are an AI agent expected to correctly answer the asked question')
user_prompt = message or os.environ.get(
'MLC_OPENAI_USER_PROMPT', '')
data = {
"model": os.environ.get('MLC_OPENAI_MODEL', 'gpt-4.1'),
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
]
}

headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {api_key}'
}

response = requests.post(url, json=data, headers=headers)
response.raise_for_status()
result = response.json()
content = result['choices'][0]['message']['content']

with open('tmp-openai-results.json', 'w', encoding='utf-8') as f:
json.dump({'content': content}, f, ensure_ascii=False, indent=2)

return {"content": content}

except requests.exceptions.RequestException as e:
return {"error": f"Request error: {str(e)}"}

except KeyError as e:
return {"error": f"Missing key in response: {str(e)}"}

except Exception as e:
return {"error": f"Unexpected error: {str(e)}"}


def main():
result = openai_call()
if 'error' in result:
raise Exception(result['error'])


if __name__ == '__main__':
main()
Loading