|
| 1 | +.. _vector_store: |
| 2 | + |
| 3 | +######################## |
| 4 | +Vector Store integration |
| 5 | +######################## |
| 6 | + |
| 7 | +.. versionadded:: 2.9.1 |
| 8 | + |
| 9 | +Current version of Langchain does not support serialization of any vector stores. This will be a problem when you want to deploy a langchain application with the vector store being one of the components using data science model deployment service. To solve this problem, we extended our support of vector stores serialization: |
| 10 | + |
| 11 | +- ``OpenSearchVectorSearch`` |
| 12 | +- ``FAISS`` |
| 13 | + |
| 14 | +OpenSearchVectorSearch Serialization |
| 15 | +------------------------------------ |
| 16 | + |
| 17 | +langchain does not automatically support serialization of ``OpenSearchVectorSearch``. However, ADS provides a way to serialize ``OpenSearchVectorSearch``. To serialize ``OpenSearchVectorSearch``, you need to use environment variables to pass in the credentials. The following variables can be passed in through the corresponding environment variables: |
| 18 | + |
| 19 | +- http_auth: (``OCI_OPENSEARCH_USERNAME``, ``OCI_OPENSEARCH_PASSWORD``) |
| 20 | +- verify_certs: ``OCI_OPENSEARCH_VERIFY_CERTS`` |
| 21 | +- ca_certs: ``OCI_OPENSEARCH_CA_CERTS`` |
| 22 | + |
| 23 | +The following code snippet shows how to use ``OpenSearchVectorSearch`` with environment variables: |
| 24 | + |
| 25 | +.. code-block:: python3 |
| 26 | +
|
| 27 | + os.environ['OCI_OPENSEARCH_USERNAME'] = "username" |
| 28 | + os.environ['OCI_OPENSEARCH_PASSWORD'] = "password" |
| 29 | + os.environ['OCI_OPENSEARCH_VERIFY_CERTS'] = "False" |
| 30 | +
|
| 31 | + INDEX_NAME = "your_index_name" |
| 32 | + opensearch_vector_search = OpenSearchVectorSearch( |
| 33 | + "https://localhost:9200", |
| 34 | + embedding_function=oci_embedings, |
| 35 | + index_name=INDEX_NAME, |
| 36 | + engine="lucene", |
| 37 | + http_auth=(os.environ["OCI_OPENSEARCH_USERNAME"], os.environ["OCI_OPENSEARCH_PASSWORD"]), |
| 38 | + verify_certs=os.environ["OCI_OPENSEARCH_VERIFY_CERTS"], |
| 39 | + ) |
| 40 | +
|
| 41 | +.. admonition:: Deployment |
| 42 | + :class: note |
| 43 | + |
| 44 | +During deployment, it is very important that you remember to pass in those environment variables as well: |
| 45 | + |
| 46 | +.. code-block:: python3 |
| 47 | +
|
| 48 | + .deploy(deployment_log_group_id="ocid1.loggroup.####", |
| 49 | + deployment_access_log_id="ocid1.log.####", |
| 50 | + deployment_predict_log_id="ocid1.log.####", |
| 51 | + environment_variables={"OCI_OPENSEARCH_USERNAME":"<oci_opensearch_username>", |
| 52 | + "OCI_OPENSEARCH_PASSWORD": "<oci_opensearch_password>", |
| 53 | + "OCI_OPENSEARCH_VERIFY_CERTS": "<oci_opensearch_verify_certs>",) |
| 54 | +
|
| 55 | +OpenSearchVectorSearch Deployment |
| 56 | +--------------------------------- |
| 57 | + |
| 58 | +Here is an example code snippet for OpenSearchVectorSearch deployment: |
| 59 | + |
| 60 | +.. code-block:: python3 |
| 61 | +
|
| 62 | + from langchain.vectorstores import OpenSearchVectorSearch |
| 63 | + from ads.llm import GenerativeAIEmbeddings, GenerativeAI |
| 64 | + import ads |
| 65 | +
|
| 66 | + ads.set_auth("resource_principal") |
| 67 | +
|
| 68 | + oci_embedings = GenerativeAIEmbeddings( |
| 69 | + compartment_id="ocid1.compartment.oc1..aaaaaaaapvb3hearqum6wjvlcpzm5ptfxqa7xfftpth4h72xx46ygavkqteq", |
| 70 | + client_kwargs=dict(service_endpoint="https://generativeai.aiservice.us-chicago-1.oci.oraclecloud.com") # this can be omitted after Generative AI service is GA. |
| 71 | + ) |
| 72 | +
|
| 73 | + oci_llm = GenerativeAI( |
| 74 | + compartment_id="ocid1.compartment.oc1..aaaaaaaapvb3hearqum6wjvlcpzm5ptfxqa7xfftpth4h72xx46ygavkqteq", |
| 75 | + client_kwargs=dict(service_endpoint="https://generativeai.aiservice.us-chicago-1.oci.oraclecloud.com") # this can be omitted after Generative AI service is GA. |
| 76 | + ) |
| 77 | +
|
| 78 | + import os |
| 79 | + os.environ['OCI_OPENSEARCH_USERNAME'] = "username" |
| 80 | + os.environ['OCI_OPENSEARCH_PASSWORD'] = "password" |
| 81 | + os.environ['OCI_OPENSEARCH_VERIFY_CERTS'] = "True" # make sure this is capitalized. |
| 82 | + os.environ['OCI_OPENSEARCH_CA_CERTS'] = "path/to/oci_opensearch_ca.pem" |
| 83 | +
|
| 84 | + INDEX_NAME = "your_index_name" |
| 85 | + opensearch_vector_search = OpenSearchVectorSearch( |
| 86 | + "https://localhost:9200", # your endpoint |
| 87 | + embedding_function=oci_embedings, |
| 88 | + index_name=INDEX_NAME, |
| 89 | + engine="lucene", |
| 90 | + http_auth=(os.environ["OCI_OPENSEARCH_USERNAME"], os.environ["OCI_OPENSEARCH_PASSWORD"]), |
| 91 | + verify_certs=os.environ["OCI_OPENSEARCH_VERIFY_CERTS"], |
| 92 | + ca_certs=os.environ["OCI_OPENSEARCH_CA_CERTS"], |
| 93 | + ) |
| 94 | + from langchain.chains import RetrievalQA |
| 95 | + retriever = opensearch_vector_search.as_retriever(search_kwargs={"vector_field": "embeds", |
| 96 | + "text_field": "text", |
| 97 | + "k": 3, |
| 98 | + "size": 3}) |
| 99 | + qa = RetrievalQA.from_chain_type( |
| 100 | + llm=oci_llm, |
| 101 | + chain_type="stuff", |
| 102 | + retriever=retriever, |
| 103 | + chain_type_kwargs={ |
| 104 | + "verbose": True |
| 105 | + } |
| 106 | + ) |
| 107 | + from ads.llm.deploy import ChainDeployment |
| 108 | + model = ChainDeployment(qa) |
| 109 | + model.prepare(force_overwrite=True, |
| 110 | + inference_conda_env="your_conda_pack", |
| 111 | + ) |
| 112 | +
|
| 113 | + model.save() |
| 114 | + res = model.verify("your prompt") |
| 115 | + model.deploy(deployment_log_group_id="ocid1.loggroup.####", |
| 116 | + deployment_access_log_id="ocid1.log.####", |
| 117 | + deployment_predict_log_id="ocid1.log.####", |
| 118 | + environment_variables={"OCI_OPENSEARCH_USERNAME":"<oci_opensearch_username>", |
| 119 | + "OCI_OPENSEARCH_PASSWORD": "<oci_opensearch_password>", |
| 120 | + "OCI_OPENSEARCH_VERIFY_CERTS": "<oci_opensearch_verify_certs>", |
| 121 | + "OCI_OPENSEARCH_CA_CERTS": "<oci_opensearch_ca_certs>"},) |
| 122 | +
|
| 123 | + model.predict("your prompt") |
| 124 | +
|
| 125 | +
|
| 126 | +FAISS Serialization |
| 127 | +------------------- |
| 128 | + |
| 129 | +If your documents are not too large and you dont have a OCI OpenSearch cluster, you can use ``FAISS`` as your in-memory vector store, which can also do similarty search very efficiently. For ``FAISS``, you can just use it and deploy it as it is. |
| 130 | + |
| 131 | + |
| 132 | +FAISS Deployment |
| 133 | +---------------- |
| 134 | + |
| 135 | +Here is an example code snippet for FAISS deployment: |
| 136 | + |
| 137 | +.. code-block:: python3 |
| 138 | +
|
| 139 | + import ads |
| 140 | + from ads.llm import GenerativeAIEmbeddings, GenerativeAI |
| 141 | + from langchain.document_loaders import TextLoader |
| 142 | + from langchain.text_splitter import CharacterTextSplitter |
| 143 | + from langchain.vectorstores import FAISS |
| 144 | + from langchain.chains import RetrievalQA |
| 145 | +
|
| 146 | + ads.set_auth("resource_principal") |
| 147 | + oci_embedings = GenerativeAIEmbeddings( |
| 148 | + compartment_id="ocid1.compartment.####", |
| 149 | + client_kwargs=dict(service_endpoint="https://generativeai.aiservice.us-chicago-1.oci.oraclecloud.com") # this can be omitted after Generative AI service is GA. |
| 150 | + ) |
| 151 | +
|
| 152 | + oci_llm = GenerativeAI( |
| 153 | + compartment_id="ocid1.compartment.####", |
| 154 | + client_kwargs=dict(service_endpoint="https://generativeai.aiservice.us-chicago-1.oci.oraclecloud.com") # this can be omitted after Generative AI service is GA. |
| 155 | + ) |
| 156 | +
|
| 157 | + loader = TextLoader("your.txt") |
| 158 | + documents = loader.load() |
| 159 | + text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=50) |
| 160 | + docs = text_splitter.split_documents(documents) |
| 161 | +
|
| 162 | + l = len(docs) |
| 163 | + embeddings = [] |
| 164 | + for i in range(l // 16 + 1): |
| 165 | + subdocs = [item.page_content for item in docs[i * 16: (i + 1) * 16]] |
| 166 | + embeddings.extend(oci_embedings.embed_documents(subdocs)) |
| 167 | +
|
| 168 | + texts = [item.page_content for item in docs] |
| 169 | + text_embedding_pairs = [(text, embed) for text, embed in zip(texts, embeddings)] |
| 170 | + db = FAISS.from_embeddings(text_embedding_pairs, oci_embedings) |
| 171 | +
|
| 172 | + retriever = db.as_retriever() |
| 173 | + qa = RetrievalQA.from_chain_type( |
| 174 | + llm=oci_llm, |
| 175 | + chain_type="stuff", |
| 176 | + retriever=retriever, |
| 177 | + chain_type_kwargs={ |
| 178 | + "verbose": True |
| 179 | + } |
| 180 | + ) |
| 181 | +
|
| 182 | + from ads.llm.deploy import ChainDeployment |
| 183 | + model = ChainDeployment(qa) |
| 184 | + model.prepare(force_overwrite=True, |
| 185 | + inference_conda_env="your_conda_pack", |
| 186 | + ) |
| 187 | +
|
| 188 | + model.save() |
| 189 | + res = model.verify("your prompt") |
| 190 | + model.deploy(deployment_log_group_id="ocid1.loggroup.####", |
| 191 | + deployment_access_log_id="ocid1.log.####", |
| 192 | + deployment_predict_log_id="ocid1.log.####") |
| 193 | +
|
| 194 | + model.predict("your prompt") |
0 commit comments