File tree Expand file tree Collapse file tree 8 files changed +18
-18
lines changed Expand file tree Collapse file tree 8 files changed +18
-18
lines changed Original file line number Diff line number Diff line change 13
13
# them in a file named `.env`. The `python-dotenv` package will load `.env` as
14
14
# environment variables which can be read by `os.getenv()`.
15
15
load_dotenv ()
16
- chat_model = ChatBedrockAnthropic (
16
+ chat_client = ChatBedrockAnthropic (
17
17
model = "anthropic.claude-3-sonnet-20240229-v1:0" ,
18
18
)
19
19
32
32
# Define a callback to run when the user submits a message
33
33
@chat .on_user_submit
34
34
async def handle_user_input (user_input : str ):
35
- response = await chat_model .stream_async (user_input )
35
+ response = await chat_client .stream_async (user_input )
36
36
await chat .append_message_stream (response )
Original file line number Diff line number Diff line change 12
12
# See the docs for more information on how to obtain one.
13
13
# https://posit-dev.github.io/chatlas/reference/ChatAzureOpenAI.html
14
14
load_dotenv ()
15
- chat_model = ChatAzureOpenAI (
15
+ chat_client = ChatAzureOpenAI (
16
16
api_key = os .getenv ("AZURE_OPENAI_API_KEY" ),
17
17
endpoint = "https://my-endpoint.openai.azure.com" ,
18
18
deployment_id = "gpt-4o-mini" ,
37
37
# Define a callback to run when the user submits a message
38
38
@chat .on_user_submit
39
39
async def handle_user_input (user_input : str ):
40
- response = await chat_model .stream_async (user_input )
40
+ response = await chat_client .stream_async (user_input )
41
41
await chat .append_message_stream (response )
Original file line number Diff line number Diff line change 12
12
# See the docs for more information on how to obtain one.
13
13
# https://posit-dev.github.io/chatlas/reference/ChatAnthropic.html
14
14
load_dotenv ()
15
- chat_model = ChatAnthropic (
15
+ chat_client = ChatAnthropic (
16
16
api_key = os .environ .get ("ANTHROPIC_API_KEY" ),
17
17
model = "claude-3-7-sonnet-latest" ,
18
18
system_prompt = "You are a helpful assistant." ,
37
37
# Generate a response when the user submits a message
38
38
@chat .on_user_submit
39
39
async def handle_user_input (user_input : str ):
40
- response = await chat_model .stream_async (user_input )
40
+ response = await chat_client .stream_async (user_input )
41
41
await chat .append_message_stream (response )
Original file line number Diff line number Diff line change 12
12
# See the docs for more information on how to obtain one.
13
13
# https://posit-dev.github.io/chatlas/reference/ChatGoogle.html
14
14
load_dotenv ()
15
- chat_model = ChatGoogle (
15
+ chat_client = ChatGoogle (
16
16
api_key = os .environ .get ("GOOGLE_API_KEY" ),
17
17
system_prompt = "You are a helpful assistant." ,
18
18
model = "gemini-2.0-flash" ,
33
33
# Generate a response when the user submits a message
34
34
@chat .on_user_submit
35
35
async def handle_user_input (user_input : str ):
36
- response = await chat_model .stream_async (user_input )
36
+ response = await chat_client .stream_async (user_input )
37
37
await chat .append_message_stream (response )
Original file line number Diff line number Diff line change 15
15
# app, or set them in a file named `.env`. The `python-dotenv` package will load `.env`
16
16
# as environment variables which can later be read by `os.getenv()`.
17
17
load_dotenv ()
18
- chat_model = ChatOpenAI (
18
+ chat_client = ChatOpenAI (
19
19
api_key = os .environ .get ("OPENAI_API_KEY" ),
20
20
model = "gpt-4o" ,
21
21
)
38
38
# Define a callback to run when the user submits a message
39
39
@chat .on_user_submit
40
40
async def handle_user_input (user_input : str ):
41
- response = await chat_model .stream_async (user_input )
41
+ response = await chat_client .stream_async (user_input )
42
42
await chat .append_message_stream (response )
Original file line number Diff line number Diff line change 9
9
# ChatOllama() requires an Ollama model server to be running locally.
10
10
# See the docs for more information on how to set up a local Ollama server.
11
11
# https://posit-dev.github.io/chatlas/reference/ChatOllama.html
12
- chat_model = ChatOllama (model = "llama3.2" )
12
+ chat_client = ChatOllama (model = "llama3.2" )
13
13
14
14
# Set some Shiny page options
15
15
ui .page_opts (
29
29
# Generate a response when the user submits a message
30
30
@chat .on_user_submit
31
31
async def handle_user_input (user_input : str ):
32
- response = await chat_model .stream_async (user_input )
32
+ response = await chat_client .stream_async (user_input )
33
33
await chat .append_message_stream (response )
Original file line number Diff line number Diff line change 12
12
# See the docs for more information on how to obtain one.
13
13
# https://posit-dev.github.io/chatlas/reference/ChatOpenAI.html
14
14
load_dotenv ()
15
- chat_model = ChatOpenAI (
15
+ chat_client = ChatOpenAI (
16
16
api_key = os .environ .get ("OPENAI_API_KEY" ),
17
17
model = "gpt-4o" ,
18
18
system_prompt = "You are a helpful assistant." ,
37
37
# Generate a response when the user submits a message
38
38
@chat .on_user_submit
39
39
async def handle_user_input (user_input : str ):
40
- response = await chat_model .stream_async (user_input )
40
+ response = await chat_client .stream_async (user_input )
41
41
await chat .append_message_stream (response )
Original file line number Diff line number Diff line change @@ -63,15 +63,15 @@ def get_model():
63
63
}
64
64
65
65
if input .model () in models ["openai" ]:
66
- chat_model = ctl .ChatOpenAI (** model_params )
66
+ chat_client = ctl .ChatOpenAI (** model_params )
67
67
elif input .model () in models ["claude" ]:
68
- chat_model = ctl .ChatAnthropic (** model_params )
68
+ chat_client = ctl .ChatAnthropic (** model_params )
69
69
elif input .model () in models ["google" ]:
70
- chat_model = ctl .ChatGoogle (** model_params )
70
+ chat_client = ctl .ChatGoogle (** model_params )
71
71
else :
72
72
raise ValueError (f"Invalid model: { input .model ()} " )
73
73
74
- return chat_model
74
+ return chat_client
75
75
76
76
77
77
@reactive .calc
You can’t perform that action at this time.
0 commit comments