Skip to content

Commit e98a02a

Browse files
committed
Add Gradio application components for image analysis and configuration
1 parent 1678967 commit e98a02a

File tree

17 files changed

+1639
-105
lines changed

17 files changed

+1639
-105
lines changed

.github/workflows/deploy-to-huggingface.yml

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,13 +14,13 @@ jobs:
1414
with:
1515
fetch-depth: 0
1616
lfs: true
17-
17+
1818
- name: Setup Python
1919
uses: actions/setup-python@v4
2020
with:
21-
python-version: '3.11'
22-
cache: 'pip'
23-
21+
python-version: "3.11"
22+
cache: "pip"
23+
2424
- name: Config git
2525
run: |
2626
git config --global credential.helper store
@@ -43,8 +43,8 @@ jobs:
4343
SPACE_NAME: ${{ secrets.SPACE_NAME }}
4444
run: |
4545
# Create repository URL
46-
REPO_URL="https://huggingface.co/spaces/$HF_USERNAME/template-python"
47-
46+
REPO_URL="https://huggingface.co/spaces/$HF_USERNAME/llm-image"
47+
4848
# Add Hugging Face as a remote and push
4949
git remote add space $REPO_URL || git remote set-url space $REPO_URL
50-
git push -f space main
50+
git push -f space main

Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ ENTRYPOINT []
3232

3333
RUN python setup.py
3434

35-
EXPOSE 8080
35+
EXPOSE 7860
3636
# Run the FastAPI application by default
3737
# Uses `fastapi dev` to enable hot-reloading when the `watch` sync occurs
3838
# Uses `--host 0.0.0.0` to allow access from outside the container

data/image.jpg

180 KB
Loading

main.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ def main() -> None:
1010
"""Main application entry point."""
1111
try:
1212
print("Start app...")
13-
import src.modules.api
13+
import src.modules.gradio_app
1414

1515
except Exception as e:
1616
logging.error(f"Application failed to start: {e}", exc_info=True)

notebooks/notes.ipynb

Lines changed: 211 additions & 0 deletions
Large diffs are not rendered by default.

pyproject.toml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,16 @@
11
[project]
2-
name = "template-python"
2+
name = "llm-image"
33
version = "0.1.0"
44
description = "Add your description here"
55
readme = "README.md"
66
requires-python = ">=3.11"
77
dependencies = [
88
"fastapi[standard]>=0.115.8",
9+
"gradio>=5.3.0",
910
"ipykernel>=6.29.5",
11+
"litellm>=1.61.13",
1012
"numpy>=2.2.3",
13+
"openai>=1.63.2",
1114
"pytest>=8.3.4",
1215
]
1316

src/modules/gradio_app/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
import src.modules.gradio_app.index
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
"""Components for the Gradio application UI"""
2+
3+
from .config_panel import create_config_panel
4+
from .prompt_panel import create_prompt_panel
5+
from .image_panel import create_image_panel
6+
7+
__all__ = ['create_config_panel', 'create_prompt_panel', 'create_image_panel']
Lines changed: 48 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,48 @@
1+
import gradio as gr
2+
from typing import Tuple
3+
4+
from src.modules.gradio_app.utils.api import update_model_list
5+
6+
def create_config_panel() -> Tuple[gr.Textbox, gr.Textbox, gr.Dropdown]:
7+
"""Create configuration panel with endpoint, API key and model selection"""
8+
with gr.Column():
9+
endpoint_url = gr.Textbox(
10+
label="LiteLLM Endpoint URL",
11+
placeholder="Enter your LiteLLM endpoint URL",
12+
)
13+
api_key = gr.Textbox(
14+
label="API Key",
15+
placeholder="Enter your API key",
16+
type="password"
17+
)
18+
refresh_models = gr.Button("Refresh Models")
19+
model_dropdown = gr.Dropdown(
20+
label="Select Model",
21+
choices=[],
22+
interactive=True
23+
)
24+
25+
# Update model list when endpoint or API key changes
26+
def update_models(endpoint_url, api_key):
27+
models = update_model_list(endpoint_url, api_key)
28+
return gr.Dropdown(choices=models)
29+
30+
refresh_models.click(
31+
fn=update_models,
32+
inputs=[endpoint_url, api_key],
33+
outputs=model_dropdown
34+
)
35+
36+
# Update models automatically when endpoint or API key changes
37+
endpoint_url.change(
38+
fn=update_models,
39+
inputs=[endpoint_url, api_key],
40+
outputs=model_dropdown
41+
)
42+
api_key.change(
43+
fn=update_models,
44+
inputs=[endpoint_url, api_key],
45+
outputs=model_dropdown
46+
)
47+
48+
return endpoint_url, api_key, model_dropdown
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
import gradio as gr
2+
from typing import Tuple
3+
4+
def create_image_panel() -> Tuple[gr.Image, gr.Button, gr.Textbox]:
5+
"""Create image upload and analysis panel"""
6+
with gr.Row():
7+
with gr.Column():
8+
image_input = gr.Image(
9+
label="Upload Image",
10+
type="pil"
11+
)
12+
analyze_button = gr.Button("Analyze Image")
13+
14+
with gr.Column():
15+
output = gr.Textbox(
16+
label="Analysis Result",
17+
lines=10
18+
)
19+
20+
return image_input, analyze_button, output
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
import gradio as gr
2+
from typing import Tuple
3+
4+
def create_prompt_panel() -> Tuple[gr.Textbox, gr.Textbox, gr.Slider]:
5+
"""Create custom prompt panel for image analysis"""
6+
with gr.Column():
7+
prompt_input = gr.Textbox(
8+
label="Custom Prompt",
9+
placeholder="Enter your prompt for image analysis...",
10+
lines=3,
11+
value="Please analyze this image in detail."
12+
)
13+
system_prompt = gr.Textbox(
14+
label="System Prompt (Optional)",
15+
placeholder="Enter a system prompt to guide the model...",
16+
lines=2,
17+
visible=False # Hidden by default
18+
)
19+
with gr.Row():
20+
advanced_checkbox = gr.Checkbox(
21+
label="Show Advanced Options",
22+
value=False
23+
)
24+
temperature = gr.Slider(
25+
minimum=0.0,
26+
maximum=2.0,
27+
value=1.0,
28+
step=0.1,
29+
label="Temperature",
30+
visible=False
31+
)
32+
33+
def toggle_advanced(show_advanced):
34+
return {
35+
system_prompt: gr.update(visible=show_advanced),
36+
temperature: gr.update(visible=show_advanced)
37+
}
38+
39+
advanced_checkbox.change(
40+
fn=toggle_advanced,
41+
inputs=[advanced_checkbox],
42+
outputs=[system_prompt, temperature]
43+
)
44+
45+
return prompt_input, system_prompt, temperature

src/modules/gradio_app/index.py

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
import gradio as gr
2+
3+
from src.modules.gradio_app.components.config_panel import create_config_panel
4+
from src.modules.gradio_app.components.prompt_panel import create_prompt_panel
5+
from src.modules.gradio_app.components.image_panel import create_image_panel
6+
from src.modules.gradio_app.utils.analysis import analyze_image
7+
8+
def create_gradio_app():
9+
"""Create the main Gradio application with all components"""
10+
with gr.Blocks() as app:
11+
gr.Markdown("# Image Analysis with LLM")
12+
# Image Analysis Panel
13+
image_input, analyze_button, output = create_image_panel()
14+
15+
# Configuration Panel
16+
with gr.Row():
17+
endpoint_url, api_key, model_dropdown = create_config_panel()
18+
19+
# Prompt Panel
20+
with gr.Row():
21+
prompt_input, system_prompt, temperature = create_prompt_panel()
22+
23+
24+
# Connect the analyze button to the analysis function
25+
analyze_button.click(
26+
fn=analyze_image,
27+
inputs=[
28+
image_input,
29+
endpoint_url,
30+
api_key,
31+
model_dropdown,
32+
prompt_input,
33+
system_prompt,
34+
temperature
35+
],
36+
outputs=output
37+
)
38+
39+
return app
40+
41+
app = create_gradio_app()
42+
43+
# Configure server settings for Docker deployment
44+
server_port = 7860 # Standard Gradio port
45+
server_name = "0.0.0.0" # Allow external connections
46+
47+
def main():
48+
"""Launch the Gradio application"""
49+
app.launch(
50+
server_name=server_name,
51+
server_port=server_port,
52+
share=False, # Disable sharing as we're running in Docker
53+
auth=None, # Can be configured if authentication is needed
54+
ssl_verify=False, # Disable SSL verification for internal Docker network
55+
show_error=True,
56+
favicon_path=None
57+
)
58+
59+
main()
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
"""Utility functions for the Gradio application"""
2+
3+
from .api import get_available_models, update_model_list
4+
from .image import encode_image_to_base64
5+
from .analysis import analyze_image
6+
7+
__all__ = [
8+
'get_available_models',
9+
'update_model_list',
10+
'encode_image_to_base64',
11+
'analyze_image'
12+
]
Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
from typing import Optional
2+
import openai
3+
from .image import encode_image_to_base64
4+
5+
def analyze_image(
6+
image,
7+
endpoint_url: str,
8+
api_key: str,
9+
selected_model: str,
10+
prompt: str,
11+
system_prompt: Optional[str] = None,
12+
temperature: float = 1.0
13+
):
14+
"""Analyze image using LiteLLM with custom prompts"""
15+
if not endpoint_url or not api_key:
16+
return "Please configure both the endpoint URL and API key"
17+
18+
if not selected_model:
19+
return "Please select a model"
20+
21+
print(f"Analyzing image with model: {selected_model}")
22+
23+
try:
24+
client = openai.OpenAI(
25+
api_key=api_key,
26+
base_url=endpoint_url
27+
)
28+
29+
# Convert image to base64
30+
base64_image = encode_image_to_base64(image)
31+
32+
# Prepare messages
33+
messages = []
34+
35+
# Add system prompt if provided
36+
if system_prompt:
37+
messages.append({
38+
"role": "system",
39+
"content": system_prompt
40+
})
41+
42+
# Add user message with image
43+
messages.append({
44+
"role": "user",
45+
"content": [
46+
{"type": "text", "text": prompt},
47+
{"type": "image_url", "image_url": base64_image}
48+
]
49+
})
50+
51+
# Call LiteLLM with custom endpoint
52+
response = client.chat.completions.create(
53+
model=selected_model,
54+
messages=messages,
55+
temperature=temperature
56+
)
57+
58+
return str(response.choices[0].message.content)
59+
60+
except Exception as e:
61+
return f"Error analyzing image: {str(e)}"

src/modules/gradio_app/utils/api.py

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
from typing import List
2+
import requests
3+
4+
def get_available_models(endpoint_url: str, api_key: str) -> List[str]:
5+
"""Fetch available models from LiteLLM endpoint"""
6+
try:
7+
response = requests.get(
8+
f"{endpoint_url.rstrip('/')}/models",
9+
headers={"Authorization": f"Bearer {api_key}"}
10+
)
11+
if response.status_code == 200:
12+
models = response.json()
13+
return [model["id"] for model in models["data"]]
14+
return []
15+
except Exception as e:
16+
print(f"Error fetching models: {e}")
17+
return []
18+
19+
def update_model_list(endpoint_url: str, api_key: str) -> List[str]:
20+
"""Update the model list when endpoint or API key changes"""
21+
if not endpoint_url or not api_key:
22+
return []
23+
return get_available_models(endpoint_url, api_key)

src/modules/gradio_app/utils/image.py

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
import base64
2+
from io import BytesIO
3+
from PIL import Image
4+
5+
def encode_image_to_base64(image: Image.Image) -> str:
6+
"""Convert image to base64 string"""
7+
buffered = BytesIO()
8+
image.save(buffered, format="PNG")
9+
img_str = base64.b64encode(buffered.getvalue()).decode()
10+
return f"data:image/png;base64,{img_str}"

0 commit comments

Comments
 (0)