Skip to content

Contest 2025 gemini call #403

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
May 13, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 63 additions & 0 deletions script/gemini_call/customize.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
from mlc import utils
import os
import json
import yaml


def write_gemini_yaml(model, system_prompt, user_prompt,
filename='gemini-prompt.yaml'):
data = {
'model': model,
'contents': [
{
'role': 'user',
'parts': [
{'text': f"{system_prompt}\n\n{user_prompt}"}
]
}
],
'generationConfig': {
'temperature': 0.7,
'maxOutputTokens': 200
}
}

with open(filename, 'w', encoding='utf-8') as f:
yaml.dump(data, f, sort_keys=False, allow_unicode=True)


def preprocess(i):
env = i['env']
state = i['state']

if 'MLC_GEMINI_CONFIG_PATH' not in env or not os.path.exists(
env['MLC_GEMINI_CONFIG_PATH']):
if 'user_prompt' in state:
model = env.get('MLC_GEMINI_MODEL', 'gemini-2.0-flash')
user_prompt = state['user_prompt']
system_prompt = state.get(
'system_prompt',
'You are an AI agent expected to answer questions correctly')
write_gemini_yaml(
model,
system_prompt,
user_prompt,
'tmp-gemini-prompt.yaml')
env['MLC_GEMINI_CONFIG_PATH'] = 'tmp-gemini-prompt.yaml'

env['MLC_RUN_CMD'] = f'{env["MLC_PYTHON_BIN_WITH_PATH"]} "{os.path.join(env["MLC_TMP_CURRENT_SCRIPT_PATH"], "gemini_call.py")}"'

return {'return': 0}


def postprocess(i):
env = i['env']
state = i['state']

filename = 'tmp-gemini-results.json'
with open(filename, 'r', encoding='utf-8') as f:
data = json.load(f)

state['MLC_GEMINI_RESPONSE'] = data['content']
os_info = i['os_info']
return {'return': 0}
95 changes: 95 additions & 0 deletions script/gemini_call/gemini_call.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
import requests
import os
import json
import yaml

import yaml


def extract_prompts(yaml_path):
with open(yaml_path, 'r', encoding='utf-8') as f:
data = yaml.safe_load(f)

full_text = data['contents'][0]['parts'][0]['text']

# Split at "Question Text:"
if "Question Text:" not in full_text:
raise ValueError("Expected 'Question Text:' marker not found.")

system_prompt, question_part = full_text.split("Question Text:", 1)

# Trim whitespace
system_prompt = system_prompt.strip()
user_prompt = question_part.strip()

return system_prompt, user_prompt


def gemini_call(message=None):
try:
api_key = os.environ['MLC_GEMINI_API_KEY']
url = f"https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key={api_key}"
config_path = os.environ.get('MLC_GEMINI_CONFIG_PATH')
# Load config if it exists
if config_path and os.path.exists(config_path):
try:
with open(config_path, 'r', encoding="utf-8") as file:
data = yaml.safe_load(file)
except Exception as e:
return {"error": f"Error reading config file: {str(e)}"}

if os.environ.get('MLC_GEMINI_CONFIG_MODIFY', '') == 'yes':
try:
data['messages'][1]['content'] = data['messages'][1]['content'].replace(
"{{ MESSAGE }}", message or "")
except Exception as e:
return {"error": f"Config format issue: {str(e)}"}
# Load prompts
system_prompt, user_prompt = extract_prompts(config_path)
# Combine both in first message
full_prompt = f"{system_prompt}\n\n{user_prompt}"

data = {
"contents": [
{
"role": "user",
"parts": [
{"text": full_prompt}
]
}
]
}

headers = {
'Content-Type': 'application/json'
}

response = requests.post(url, json=data, headers=headers)
response.raise_for_status()
result = response.json()

content = result['candidates'][0]['content']['parts'][0]['text']

with open('tmp-gemini-results.json', 'w', encoding='utf-8') as f:
json.dump({'content': content}, f, ensure_ascii=False, indent=2)

return {"content": content}

except requests.exceptions.RequestException as e:
return {"error": f"Request error: {str(e)}"}

except KeyError as e:
return {"error": f"Missing key in response: {str(e)}"}

except Exception as e:
return {"error": f"Unexpected error: {str(e)}"}


def main():
result = gemini_call()
if 'error' in result:
raise Exception(result['error'])


if __name__ == '__main__':
main()
36 changes: 36 additions & 0 deletions script/gemini_call/meta.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
alias: gemini_call
automation_alias: script
automation_uid: 5b4e0237da074764
category: MLC Script Template
deps:
- tags: get,python3
names:
- python
- python3
new_env_keys: []
new_state_keys:
- MLC_GEMINI_RESPONSE
post_deps: []
posthook_deps: []
prehook_deps: []
tags:
- gemini-call
- query
input_mapping:
api_key: MLC_GEMINI_API_KEY
config_path: MLC_GEMINI_CONFIG_PATH
system_prompt: MLC_GEMINI_SYSTEM_PROMPT
user_prompt: MLC_GEMINI_USER_PROMPT
model: MLC_GEMINI_MODEL
tests:
run_inputs: []
uid: 1cfe3d0658364a2b
variations:
gemini:
group: api_provider
default: true
env:
MLC_GEMINI_API_URL: 'https://generativelanguage.googleapis.com/v1beta/models/gemini-2.0-flash:generateContent?key='



1 change: 1 addition & 0 deletions script/gemini_call/run.bat
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
%MLC_RUN_CMD%
17 changes: 17 additions & 0 deletions script/gemini_call/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#!/bin/bash
function exit_if_error() {
test $? -eq 0 || exit $?
}

function run() {
echo "Running: "
echo "$1"
echo ""
if [[ ${MLC_FAKE_RUN} != 'yes' ]]; then
eval "$1"
exit_if_error
fi
}

#Add your run commands here...
# run "$MLC_RUN_CMD"
Loading