Skip to content

Commit 43bc819

Browse files
authored
Merge pull request #164 from NPC-Worldwide/chris/alicanto_knowledge_graph_upgrades
some bug fixes in serve to support npc studio, and up stream in compi…
2 parents f7935db + 11b85b7 commit 43bc819

File tree

5 files changed

+406
-74
lines changed

5 files changed

+406
-74
lines changed

examples/get_llm_response_examples.py

Lines changed: 156 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,156 @@
1+
from npcpy.llm_funcs import get_llm_response
2+
3+
response = get_llm_response(
4+
prompt="What is machine learning?",
5+
model="llama3.2",
6+
provider="ollama"
7+
)
8+
9+
print("Response:", response['response'])
10+
11+
12+
13+
from npcpy.llm_funcs import get_llm_response
14+
15+
response = get_llm_response(
16+
prompt="Describe what you see in this image.",
17+
model="gemma3:4b",
18+
provider="ollama",
19+
images=["test_data/markov_chain.png"]
20+
)
21+
22+
print("Response:", response['response'])
23+
24+
25+
26+
from npcpy.llm_funcs import get_llm_response
27+
28+
response = get_llm_response(
29+
prompt="Summarize the key points in this document.",
30+
model="llava:latest",
31+
provider="ollama",
32+
attachments=["test_data/yuan2004.pdf"]
33+
)
34+
35+
print("Response:", response['response'])
36+
37+
38+
39+
from npcpy.llm_funcs import get_llm_response
40+
41+
response = get_llm_response(
42+
prompt="Extract data from these files and highlight the most important information.",
43+
model="llava:7b",
44+
provider="ollama",
45+
attachments=["test_data/yuan2004.pdf", "test_data/markov_chain.png", "test_data/sample_data.csv"]
46+
)
47+
48+
print("Response:", response['response'])
49+
50+
51+
52+
53+
from npcpy.llm_funcs import get_llm_response
54+
from npcpy.npc_compiler import NPC
55+
56+
# Create a simple NPC with custom system message
57+
npc = NPC(
58+
name="OCR_Assistant",
59+
description="An assistant specialized in document processing and OCR.",
60+
model="llava:7b",
61+
provider="ollama",
62+
directive="You are an expert at analyzing documents and extracting valuable information."
63+
)
64+
65+
response = get_llm_response(
66+
prompt="What do you see in this diagram?",
67+
images=["test_data/markov_chain.png"],
68+
npc=npc
69+
)
70+
71+
print("Response:", response['response'])
72+
print("System message used:", response['messages'][0]['content'] if response['messages'] else "No system message")
73+
74+
75+
from npcpy.llm_funcs import get_llm_response
76+
77+
# Create a conversation with history
78+
messages = [
79+
{"role": "system", "content": "You are a document analysis assistant."},
80+
{"role": "user", "content": "I have some engineering diagrams I need to analyze."},
81+
{"role": "assistant", "content": "I'd be happy to help analyze your engineering diagrams. Please share them with me."}
82+
]
83+
84+
response = get_llm_response(
85+
prompt="Here's the diagram I mentioned earlier.",
86+
model="llava:7b",
87+
provider="ollama",
88+
messages=messages,
89+
attachments=["test_data/markov_chain.png"]
90+
)
91+
92+
print("Response:", response['response'])
93+
print("Updated message history length:", len(response['messages']))
94+
95+
96+
97+
from npcpy.llm_funcs import get_llm_response
98+
99+
# Create a conversation with history
100+
messages = [
101+
{"role": "system", "content": "You are a document analysis assistant."},
102+
{"role": "user", "content": "I have some engineering diagrams I need to analyze."},
103+
{"role": "assistant", "content": "I'd be happy to help analyze your engineering diagrams. Please share them with me."}
104+
]
105+
106+
response = get_llm_response(
107+
prompt="Here's the diagram I mentioned earlier.",
108+
model="llava:7b",
109+
provider="ollama",
110+
messages=messages,
111+
attachments=["test_data/markov_chain.png"]
112+
)
113+
114+
print("Response:", response['response'])
115+
print("Updated message history length:", len(response['messages']))
116+
117+
118+
119+
from npcpy.llm_funcs import get_llm_response
120+
121+
response = get_llm_response(
122+
prompt="Analyze this image and give a detailed explanation.",
123+
model="llava:7b",
124+
provider="ollama",
125+
images=["test_data/markov_chain.png"],
126+
stream=True
127+
)
128+
129+
# For streaming responses, you'd typically iterate through them
130+
print("Streaming response object type:", type(response['response']))
131+
132+
133+
from npcpy.llm_funcs import get_llm_response
134+
from npcpy.data.load import load_pdf, load_image
135+
import os
136+
import pandas as pd
137+
import json
138+
from PIL import Image
139+
import io
140+
import numpy as np
141+
142+
# Example paths
143+
pdf_path = 'test_data/yuan2004.pdf'
144+
image_path = 'test_data/markov_chain.png'
145+
csv_path = 'test_data/sample_data.csv'
146+
147+
# Method 1: Simple attachment-based approach
148+
response = get_llm_response(
149+
'Extract and analyze all text and images from these files. What are the key concepts presented?',
150+
model='llava:7b',
151+
provider='ollama',
152+
attachments=[pdf_path, image_path, csv_path]
153+
)
154+
155+
print("\nResponse from attachment-based approach:")
156+
print(response['response'])

0 commit comments

Comments
 (0)