Skip to content

Fix imports for open orca #374

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 11 commits into from
Closed
3 changes: 2 additions & 1 deletion script/get-dataset-waymo-calibration/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ variations:
env:
MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_WAYMO_CALIBRATION_PATH
MLC_EXTRACT_FINAL_ENV_NAME: MLC_DATASET_WAYMO_CALIBRATION_PATH
MLC_DOWNLOAD_URL: mlc-waymo:waymo_preprocessed_dataset/kitti_format/testing
MLC_DOWNLOAD_URL: mlc_waymo:waymo_preprocessed_dataset/kitti_format/testing
extra_cache_tags: waymo,dataset
force_cache: true
names:
Expand All @@ -60,6 +60,7 @@ variations:
group: run-mode
env:
MLC_DOWNLOAD_MODE: dry
MLC_BYPASS_RCLONE_AUTH: True
dry-run,rclone:
env:
MLC_DOWNLOAD_EXTRA_OPTIONS: --dry-run
2 changes: 1 addition & 1 deletion script/get-dataset-waymo/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ variations:
env:
MLC_DOWNLOAD_FINAL_ENV_NAME: MLC_DATASET_WAYMO_PATH
MLC_EXTRACT_FINAL_ENV_NAME: MLC_DATASET_WAYMO_PATH
MLC_DOWNLOAD_URL: mlc-waymo:waymo_preprocessed_dataset/kitti_format
MLC_DOWNLOAD_URL: mlc_waymo:waymo_preprocessed_dataset/kitti_format
extra_cache_tags: waymo,dataset
force_cache: true
names:
Expand Down
2 changes: 1 addition & 1 deletion script/get-preprocessed-dataset-openorca/customize.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from mlc import utils
from mlc.utils import *
from utils import *
import os
import shutil

Expand Down
3 changes: 2 additions & 1 deletion script/get-rclone-config/customize.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@ def preprocess(i):
if env.get('MLC_RCLONE_CONFIG_CMD', '') != '':
run_cmds.append(env['MLC_RCLONE_CONFIG_CMD'])

if env.get('MLC_RCLONE_CONNECT_CMD', '') != '':
if env.get('MLC_RCLONE_CONNECT_CMD', '') != '' and not is_true(
env.get('MLC_BYPASS_RCLONE_AUTH', '')):
run_cmds.append(env['MLC_RCLONE_CONNECT_CMD'])

env['MLC_RUN_CMD'] = ' && '.join(run_cmds)
Expand Down
4 changes: 2 additions & 2 deletions script/get-rclone-config/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ variations:
MLC_RCLONE_CONNECT_CMD: 'rclone config reconnect mlc-llama3-1:'
waymo:
env:
MLC_RCLONE_CONFIG_CMD: 'rclone config create mlc-waymo drive config_is_local=false scope=<<<MLC_RCLONE_DRIVE_SCOPE>>> root_folder_id=1xbfnaUurFeXliFFl1i1gj48eRU2NDiH5'
MLC_RCLONE_CONNECT_CMD: 'rclone config reconnect mlc-waymo:'
MLC_RCLONE_CONFIG_CMD: 'rclone config create mlc_waymo drive config_is_local=false scope=<<<MLC_RCLONE_DRIVE_SCOPE>>> root_folder_id=1xbfnaUurFeXliFFl1i1gj48eRU2NDiH5'
MLC_RCLONE_CONNECT_CMD: 'rclone config reconnect mlc_waymo:'
config-name.#:
env:
MLC_RCLONE_CONFIG_CMD: 'rclone config create # drive config_is_local=false scope=<<<MLC_RCLONE_DRIVE_SCOPE>>> root_folder_id=<<<MLC_RCLONE_DRIVE_FOLDER_ID>>>'
Expand Down
72 changes: 72 additions & 0 deletions script/openai-call/customize.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
from mlc import utils
import os
import subprocess
import json


import yaml


def write_openai_yaml(model, system_prompt, user_prompt,
filename='openai-prompt.yaml'):
data = {
'model': model,
'messages': [
{
'role': 'system',
'content': system_prompt
},
{
'role': 'user',
'content': user_prompt
}
],
'max_tokens': 200,
'temperature': 0.7
}

with open(filename, 'w', encoding='utf-8') as f:
yaml.dump(data, f, sort_keys=False, allow_unicode=True)


def preprocess(i):

env = i['env']
state = i['state']

if 'MLC_OPENAI_CONFIG_PATH' not in env or not os.path.exists(
env['MLC_OPENAI_CONFIG_PATH']):
if 'user_prompt' in state:
model = env.get('MLC_OPENAI_MODEL', 'gpt-4o')
user_prompt = state['user_prompt']
system_prompt = state.get(
'system_prompt',
'You are an AI agent expected to answer questions correctly')
write_openai_yaml(
model,
system_prompt,
user_prompt,
'tmp-openai-prompt.yaml')
env['MLC_OPENAI_CONFIG_PATH'] = 'tmp-openai-prompt.yaml'

os_info = i['os_info']

env['MLC_RUN_CMD'] = f"""{env['MLC_PYTHON_BIN_WITH_PATH']} {os.path.join(env['MLC_TMP_CURRENT_SCRIPT_PATH'], 'openai_call.py')} """

return {'return': 0}


def postprocess(i):

env = i['env']
state = i['state']

filename = 'tmp-openai-results.json'
with open(filename, 'r', encoding='utf-8') as f:
data = json.load(f)

state['MLC_OPENAI_RESPONSE'] = data['content']

os_info = i['os_info']

return {'return': 0}
35 changes: 35 additions & 0 deletions script/openai-call/meta.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
alias: openai-call
automation_alias: script
automation_uid: 5b4e0237da074764
category: MLC Script Template
deps:
- tags: get,python3
names:
- python
- python3
new_env_keys: []
new_state_keys:
- MLC_OPENAI_RESPONSE
post_deps: []
posthook_deps: []
prehook_deps: []
tags:
- query
- openai
- openai-call
- call
input_mapping:
api_key: MLC_OPENAI_API_KEY
config_path: MLC_OPENAI_CONFIG_PATH
system_prompt: MLC_OPENAI_SYSTEM_PROMPT
user_prompt: MLC_OPENAI_USER_PROMPT
model: MLC_OPENAI_MODEL
tests:
run_inputs: []
uid: 8d341b0a14a64d94
variations:
openai:
group: api_provider
default: true
env:
MLC_OPENAI_API_URL: 'https://api.openai.com/v1/chat/completions'
73 changes: 73 additions & 0 deletions script/openai-call/openai_call.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
import requests
import yaml
import os
import json


def openai_call(message=None):
try:
api_key = os.environ['MLC_OPENAI_API_KEY']
url = os.environ['MLC_OPENAI_API_URL']
config_path = os.environ.get('MLC_OPENAI_CONFIG_PATH')

# Load config if it exists
if config_path and os.path.exists(config_path):
try:
with open(config_path, 'r') as file:
data = yaml.safe_load(file)
except Exception as e:
return {"error": f"Error reading config file: {str(e)}"}

if os.environ.get('MLC_OPENAI_CONFIG_MODIFY', '') == 'yes':
try:
data['messages'][1]['content'] = data['messages'][1]['content'].replace(
"{{ MESSAGE }}", message or "")
except Exception as e:
return {"error": f"Config format issue: {str(e)}"}
else:
system_prompt = os.environ.get(
'MLC_OPENAI_SYSTEM_PROMPT',
'You are an AI agent expected to correctly answer the asked question')
user_prompt = message or os.environ.get(
'MLC_OPENAI_USER_PROMPT', '')
data = {
"model": os.environ.get('MLC_OPENAI_MODEL', 'gpt-4.1'),
"messages": [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
]
}

headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {api_key}'
}

response = requests.post(url, json=data, headers=headers)
response.raise_for_status()
result = response.json()
content = result['choices'][0]['message']['content']

with open('tmp-openai-results.json', 'w', encoding='utf-8') as f:
json.dump({'content': content}, f, ensure_ascii=False, indent=2)

return {"content": content}

except requests.exceptions.RequestException as e:
return {"error": f"Request error: {str(e)}"}

except KeyError as e:
return {"error": f"Missing key in response: {str(e)}"}

except Exception as e:
return {"error": f"Unexpected error: {str(e)}"}


def main():
result = openai_call()
if 'error' in result:
raise Exception(result['error'])


if __name__ == '__main__':
main()
23 changes: 23 additions & 0 deletions script/openai-call/run.bat
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
@echo off
setlocal enabledelayedexpansion

:: Function to exit if the last command failed
:exit_if_error
if %ERRORLEVEL% NEQ 0 exit /b %ERRORLEVEL%
exit /b 0

:: Function to run a command
:run
echo Running:
echo %1
echo.

if /I "%MLC_FAKE_RUN%" NEQ "yes" (
call %1
call :exit_if_error
)
exit /b 0

:: Add your run commands here...
call :run "%MLC_RUN_CMD%"

17 changes: 17 additions & 0 deletions script/openai-call/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#!/bin/bash
function exit_if_error() {
test $? -eq 0 || exit $?
}

function run() {
echo "Running: "
echo "$1"
echo ""
if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then
eval "$1"
exit_if_error
fi
}

#Add your run commands here...
run "$MLC_RUN_CMD"
Loading