Skip to content

Commit b24e6ea

Browse files
authored
Update Cerebras examples to use Llama 3.3 70b (#17301)
1 parent c8c5b03 commit b24e6ea

File tree

4 files changed

+5
-5
lines changed

4 files changed

+5
-5
lines changed

docs/docs/examples/llm/cerebras.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@
103103
" \"Enter your Cerebras API key: \"\n",
104104
")\n",
105105
"\n",
106-
"llm = Cerebras(model=\"llama3.1-70b\", api_key=os.environ[\"CEREBRAS_API_KEY\"])"
106+
"llm = Cerebras(model=\"llama-3.3-70b\", api_key=os.environ[\"CEREBRAS_API_KEY\"])"
107107
]
108108
},
109109
{

llama-index-integrations/llms/llama-index-llms-cerebras/README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ import os
4444
from llama_index.core.llms import ChatMessage
4545
from llama_index.llms.cerebras import Cerebras
4646

47-
llm = Cerebras(model="llama3.1-70b", api_key=os.environ["CEREBRAS_API_KEY"])
47+
llm = Cerebras(model="llama-3.3-70b", api_key=os.environ["CEREBRAS_API_KEY"])
4848

4949
messages = [
5050
ChatMessage(
@@ -63,7 +63,7 @@ import os
6363

6464
from llama_index.llms.cerebras import Cerebras
6565

66-
llm = Cerebras(model="llama3.1-70b", api_key=os.environ["CEREBRAS_API_KEY"])
66+
llm = Cerebras(model="llama-3.3-70b", api_key=os.environ["CEREBRAS_API_KEY"])
6767

6868
response = llm.stream_complete("What is Generative AI?")
6969
for r in response:

llama-index-integrations/llms/llama-index-llms-cerebras/llama_index/llms/cerebras/base.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ class Cerebras(OpenAILike):
1515
from llama_index.llms.cerebras import Cerebras
1616
1717
# Set up the Cerebras class with the required model and API key
18-
llm = Cerebras(model="llama3.1-70b", api_key="your_api_key")
18+
llm = Cerebras(model="llama-3.3-70b", api_key="your_api_key")
1919
2020
# Call the complete method with a query
2121
response = llm.complete("Why is fast inference important?")

llama-index-integrations/llms/llama-index-llms-cerebras/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ license = "MIT"
3030
name = "llama-index-llms-cerebras"
3131
packages = [{include = "llama_index/"}]
3232
readme = "README.md"
33-
version = "0.2.1"
33+
version = "0.2.2"
3434

3535
[tool.poetry.dependencies]
3636
python = ">=3.9,<4.0"

0 commit comments

Comments
 (0)