Skip to content

Commit 69a4b67

Browse files
Added IBM watsonx model support
1 parent e092920 commit 69a4b67

File tree

5 files changed

+36
-2
lines changed

5 files changed

+36
-2
lines changed

.env.example

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,10 @@ UNBOUND_API_KEY=
3030
SiliconFLOW_ENDPOINT=https://api.siliconflow.cn/v1/
3131
SiliconFLOW_API_KEY=
3232

33+
IBM_ENDPOINT=https://us-south.ml.cloud.ibm.com
34+
IBM_API_KEY=
35+
IBM_PROJECT_ID=
36+
3337
# Set to false to disable anonymized telemetry
3438
ANONYMIZED_TELEMETRY=false
3539

docker-compose.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,9 @@ services:
2828
- ALIBABA_API_KEY=${ALIBABA_API_KEY:-}
2929
- MOONSHOT_ENDPOINT=${MOONSHOT_ENDPOINT:-https://api.moonshot.cn/v1}
3030
- MOONSHOT_API_KEY=${MOONSHOT_API_KEY:-}
31+
- IBM_API_KEY=${IBM_API_KEY:-}
32+
- IBM_ENDPOINT=${IBM_ENDPOINT:-https://us-south.ml.cloud.ibm.com}
33+
- IBM_PROJECT_ID=${IBM_PROJECT_ID:-}
3134
- BROWSER_USE_LOGGING_LEVEL=${BROWSER_USE_LOGGING_LEVEL:-info}
3235
- ANONYMIZED_TELEMETRY=${ANONYMIZED_TELEMETRY:-false}
3336
- CHROME_PATH=/usr/bin/google-chrome

requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,3 +5,4 @@ json-repair
55
langchain-mistralai==0.2.4
66
langchain-google-genai==2.0.8
77
MainContentExtractor==0.0.4
8+
langchain-ibm==0.3.10

src/utils/utils.py

Lines changed: 21 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
from langchain_google_genai import ChatGoogleGenerativeAI
1414
from langchain_ollama import ChatOllama
1515
from langchain_openai import AzureChatOpenAI, ChatOpenAI
16+
from langchain_ibm import ChatWatsonx
1617

1718
from .llm import DeepSeekR1ChatOpenAI, DeepSeekR1ChatOllama
1819

@@ -24,7 +25,8 @@
2425
"google": "Google",
2526
"alibaba": "Alibaba",
2627
"moonshot": "MoonShot",
27-
"unbound": "Unbound AI"
28+
"unbound": "Unbound AI",
29+
"ibm": "IBM"
2830
}
2931

3032

@@ -154,6 +156,23 @@ def get_llm_model(provider: str, **kwargs):
154156
base_url=base_url,
155157
api_key=api_key,
156158
)
159+
elif provider == "ibm":
160+
parameters = {
161+
"temperature": kwargs.get("temperature", 0.0),
162+
"max_tokens": kwargs.get("num_ctx", 32000)
163+
}
164+
if not kwargs.get("base_url", ""):
165+
base_url = os.getenv("IBM_ENDPOINT", "https://us-south.ml.cloud.ibm.com")
166+
else:
167+
base_url = kwargs.get("base_url")
168+
169+
return ChatWatsonx(
170+
model_id=kwargs.get("model_name", "ibm/granite-vision-3.1-2b-preview"),
171+
url=base_url,
172+
project_id=os.getenv("IBM_PROJECT_ID"),
173+
apikey=os.getenv("IBM_API_KEY"),
174+
params=parameters
175+
)
157176
elif provider == "moonshot":
158177
return ChatOpenAI(
159178
model=kwargs.get("model_name", "moonshot-v1-32k-vision-preview"),
@@ -234,6 +253,7 @@ def get_llm_model(provider: str, **kwargs):
234253
"Pro/THUDM/chatglm3-6b",
235254
"Pro/THUDM/glm-4-9b-chat",
236255
],
256+
"ibm": ["meta-llama/llama-4-maverick-17b-128e-instruct-fp8","meta-llama/llama-3-2-90b-vision-instruct"]
237257
}
238258

239259

tests/test_llm_api.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ def get_env_value(key, provider):
4141
"mistral": {"api_key": "MISTRAL_API_KEY", "base_url": "MISTRAL_ENDPOINT"},
4242
"alibaba": {"api_key": "ALIBABA_API_KEY", "base_url": "ALIBABA_ENDPOINT"},
4343
"moonshot":{"api_key": "MOONSHOT_API_KEY", "base_url": "MOONSHOT_ENDPOINT"},
44+
"ibm": {"api_key": "IBM_API_KEY", "base_url": "IBM_ENDPOINT"}
4445
}
4546

4647
if provider in env_mappings and key in env_mappings[provider]:
@@ -126,12 +127,17 @@ def test_moonshot_model():
126127
config = LLMConfig(provider="moonshot", model_name="moonshot-v1-32k-vision-preview")
127128
test_llm(config, "Describe this image", "assets/examples/test.png")
128129

130+
def test_ibm_model():
131+
config = LLMConfig(provider="ibm", model_name="meta-llama/llama-4-maverick-17b-128e-instruct-fp8")
132+
test_llm(config, "Describe this image", "assets/examples/test.png")
133+
129134
if __name__ == "__main__":
130135
# test_openai_model()
131136
# test_google_model()
132137
# test_azure_openai_model()
133138
#test_deepseek_model()
134139
# test_ollama_model()
135-
test_deepseek_r1_model()
140+
# test_deepseek_r1_model()
136141
# test_deepseek_r1_ollama_model()
137142
# test_mistral_model()
143+
test_ibm_model()

0 commit comments

Comments
 (0)