Skip to content

Commit 7e60a0b

Browse files
committed
fix the imports
1 parent f9163f7 commit 7e60a0b

File tree

1 file changed

+22
-10
lines changed

1 file changed

+22
-10
lines changed

docs/source/user_guide/large_language_model/retrieval.rst

Lines changed: 22 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,9 @@ The following code snippet shows how to use the Generative AI Embedding Models:
1919

2020
.. code-block:: python3
2121
22+
from ads.llm import GenerativeAIEmbeddings
2223
import ads
24+
2325
ads.set_auth("resource_principal")
2426
2527
oci_embedings = GenerativeAIEmbeddings(
@@ -42,8 +44,10 @@ With the OCI OpenSearch and OCI Generative Embedding, you can do semantic search
4244

4345
.. code-block:: python3
4446
47+
from langchain.vectorstores import OpenSearchVectorSearch
4548
import os
46-
os.environ['OCI_OPENSEARCH_USERNAME'] = "username"
49+
# Saving the credentials as environment variables is not recommended. You should save them in Vault instead in prod.
50+
os.environ['OCI_OPENSEARCH_USERNAME'] = "username"
4751
os.environ['OCI_OPENSEARCH_PASSWORD'] = "password"
4852
os.environ['OCI_OPENSEARCH_VERIFY_CERTS'] = "False"
4953
@@ -132,6 +136,7 @@ Similarly, you can use FAISS Vector Store as a retriever to build a retrieval QA
132136
133137
from langchain.chains import RetrievalQA
134138
from ads.llm import GenerativeAI
139+
import ads
135140
136141
ads.set_auth("resource_principal")
137142
@@ -171,6 +176,9 @@ The following code snippet shows how to use ``OpenSearchVectorSearch`` with envi
171176

172177
.. code-block:: python3
173178
179+
from langchain.vectorstores import OpenSearchVectorSearch
180+
import os
181+
174182
os.environ['OCI_OPENSEARCH_USERNAME'] = "username"
175183
os.environ['OCI_OPENSEARCH_PASSWORD'] = "password"
176184
os.environ['OCI_OPENSEARCH_VERIFY_CERTS'] = "False"
@@ -188,7 +196,7 @@ The following code snippet shows how to use ``OpenSearchVectorSearch`` with envi
188196
.. admonition:: Deployment
189197
:class: note
190198

191-
During deployment, it is very important that you remember to pass in those environment variables as well:
199+
During deployment, it is very important that you remember to pass in those environment variables as well or retrieve them from the Vault in score.py which is recommended and more secure:
192200

193201
.. code-block:: python3
194202
@@ -206,9 +214,13 @@ Here is an example code snippet for deployment of Retrieval QA using OpenSearch
206214

207215
.. code-block:: python3
208216
209-
from langchain.vectorstores import OpenSearchVectorSearch
210217
from ads.llm import GenerativeAIEmbeddings, GenerativeAI
218+
from ads.llm.deploy import ChainDeployment
219+
from langchain.chains import RetrievalQA
220+
from langchain.vectorstores import OpenSearchVectorSearch
221+
211222
import ads
223+
import os
212224
213225
ads.set_auth("resource_principal")
214226
@@ -221,8 +233,7 @@ Here is an example code snippet for deployment of Retrieval QA using OpenSearch
221233
compartment_id="ocid1.compartment.####",
222234
client_kwargs=dict(service_endpoint="https://generativeai.aiservice.us-chicago-1.oci.oraclecloud.com") # this can be omitted after Generative AI service is GA.
223235
)
224-
225-
import os
236+
# Saving the credentials as environment variables is not recommended. You should save them in Vault instead in prod.
226237
os.environ['OCI_OPENSEARCH_USERNAME'] = "username"
227238
os.environ['OCI_OPENSEARCH_PASSWORD'] = "password"
228239
os.environ['OCI_OPENSEARCH_VERIFY_CERTS'] = "True" # make sure this is capitalized.
@@ -238,7 +249,7 @@ Here is an example code snippet for deployment of Retrieval QA using OpenSearch
238249
verify_certs=os.environ["OCI_OPENSEARCH_VERIFY_CERTS"],
239250
ca_certs=os.environ["OCI_OPENSEARCH_CA_CERTS"],
240251
)
241-
from langchain.chains import RetrievalQA
252+
242253
retriever = opensearch_vector_search.as_retriever(search_kwargs={"vector_field": "embeds",
243254
"text_field": "text",
244255
"k": 3,
@@ -251,7 +262,7 @@ Here is an example code snippet for deployment of Retrieval QA using OpenSearch
251262
"verbose": True
252263
}
253264
)
254-
from ads.llm.deploy import ChainDeployment
265+
255266
model = ChainDeployment(qa)
256267
model.prepare(force_overwrite=True,
257268
inference_conda_env="<custom_conda_environment_uri>",
@@ -278,12 +289,14 @@ Here is an example code snippet for deployment of Retrieval QA using FAISS as a
278289

279290
.. code-block:: python3
280291
281-
import ads
282292
from ads.llm import GenerativeAIEmbeddings, GenerativeAI
293+
from ads.llm.deploy import ChainDeployment
283294
from langchain.document_loaders import TextLoader
284295
from langchain.text_splitter import CharacterTextSplitter
285296
from langchain.vectorstores import FAISS
286297
from langchain.chains import RetrievalQA
298+
299+
import ads
287300
288301
ads.set_auth("resource_principal")
289302
oci_embedings = GenerativeAIEmbeddings(
@@ -308,7 +321,7 @@ Here is an example code snippet for deployment of Retrieval QA using FAISS as a
308321
embeddings.extend(oci_embedings.embed_documents(subdocs))
309322
310323
texts = [item.page_content for item in docs]
311-
text_embedding_pairs = [(text, embed) for text, embed in zip(texts, embeddings)]
324+
text_embedding_pairs = [(text, embed) for text, embed in zip(texts, embeddings)]
312325
db = FAISS.from_embeddings(text_embedding_pairs, oci_embedings)
313326
314327
retriever = db.as_retriever()
@@ -321,7 +334,6 @@ Here is an example code snippet for deployment of Retrieval QA using FAISS as a
321334
}
322335
)
323336
324-
from ads.llm.deploy import ChainDeployment
325337
model = ChainDeployment(qa)
326338
model.prepare(force_overwrite=True,
327339
inference_conda_env="<custom_conda_environment_uri>",

0 commit comments

Comments
 (0)