@@ -31,7 +31,7 @@ By default, the integration uses the same authentication method configured with
31
31
ads.set_auth(auth="resource_principal")
32
32
33
33
llm = ChatOCIModelDeployment(
34
- model="odsc-llm",
34
+ model="odsc-llm", # default model name if deployed on AQUA
35
35
endpoint= f"https://modeldeployment.oci.customer-oci.com/<OCID>/predict",
36
36
# Optionally you can specify additional keyword arguments for the model, e.g. temperature and default_headers.
37
37
temperature=0.1,
@@ -46,7 +46,7 @@ Alternatively, you may use specific authentication for the model:
46
46
from ads.llm import ChatOCIModelDeployment
47
47
48
48
llm = ChatOCIModelDeployment(
49
- model="odsc-llm",
49
+ model="odsc-llm", # default model name if deployed on AQUA
50
50
endpoint= f"https://modeldeployment.oci.customer-oci.com/<OCID>/predict",
51
51
# Use security token authentication for the model
52
52
auth=ads.auth.security_token(profile="my_profile"),
@@ -65,7 +65,7 @@ Completion models takes a text string and input and returns a string with comple
65
65
from ads.llm import OCIModelDeploymentLLM
66
66
67
67
llm = OCIModelDeploymentLLM(
68
- model="odsc-llm",
68
+ model="odsc-llm", # default model name if deployed on AQUA
69
69
endpoint= f"https://modeldeployment.oci.customer-oci.com/<OCID>/predict",
70
70
# Optionally you can specify additional keyword arguments for the model.
71
71
max_tokens=32,
@@ -98,7 +98,7 @@ Chat models takes `chat messages <https://python.langchain.com/docs/concepts/#me
98
98
from ads.llm import ChatOCIModelDeployment
99
99
100
100
llm = ChatOCIModelDeployment(
101
- model="odsc-llm",
101
+ model="odsc-llm", # default model name if deployed on AQUA
102
102
endpoint=f"<oci_model_deployment_url>/predict",
103
103
# Optionally you can specify additional keyword arguments for the model.
104
104
max_tokens=32,
@@ -137,7 +137,7 @@ The vLLM container support `tool/function calling <https://docs.vllm.ai/en/lates
137
137
from ads.llm import ChatOCIModelDeploymentVLLM, ChatTemplates
138
138
139
139
llm = ChatOCIModelDeploymentVLLM(
140
- model="odsc-llm",
140
+ model="odsc-llm", # default model name if deployed on AQUA
141
141
endpoint= f"https://modeldeployment.oci.customer-oci.com/<OCID>/predict",
142
142
# Set tool_choice to "auto" to enable tool/function calling.
143
143
tool_choice="auto",
0 commit comments