From 314b412a9e1f03fbe0fc5827b96331e086c48d0e Mon Sep 17 00:00:00 2001 From: Xiang Yan Date: Fri, 8 Nov 2024 14:07:36 -0800 Subject: [PATCH 01/12] Use tsp gen code --- .../search/documents/_generated/__init__.py | 11 +- .../search/documents/_generated/_client.py | 148 + .../documents/_generated/_configuration.py | 49 +- .../documents/_generated/_model_base.py | 1159 ++ .../_generated/_search_index_client.py | 100 - .../documents/_generated/_validation.py | 50 + .../search/documents/_generated/_vendor.py | 57 + .../search/documents/_generated/_version.py | 9 + .../documents/_generated/aio/__init__.py | 8 +- .../documents/_generated/aio/_client.py | 155 + .../_generated/aio/_configuration.py | 51 +- .../_generated/aio/_search_index_client.py | 102 - .../documents/_generated/aio/_vendor.py | 57 + .../_generated/aio/operations/__init__.py | 22 +- .../aio/operations/_documents_operations.py | 1103 -- .../_generated/aio/operations/_operations.py | 5435 +++++++ .../documents/_generated/models/__init__.py | 492 +- .../models/_enums.py} | 853 +- .../documents/_generated/models/_models.py | 12173 ++++++++++++++++ .../_generated/models/_models_py3.py | 2978 ---- .../models/_search_index_client_enums.py | 424 - .../_generated/operations/__init__.py | 22 +- .../operations/_documents_operations.py | 1475 -- .../_generated/operations/_operations.py | 6861 +++++++++ .../documents/_index_documents_batch.py | 10 +- .../azure/search/documents/_paging.py | 16 +- .../azure/search/documents/_search_client.py | 74 +- .../_search_indexing_buffered_sender.py | 50 +- .../azure/search/documents/_utils.py | 14 - .../aio/_index_documents_batch_async.py | 10 +- .../azure/search/documents/aio/_paging.py | 11 +- .../documents/aio/_search_client_async.py | 76 +- .../_search_indexing_buffered_sender_async.py | 50 +- .../documents/indexes/_generated/__init__.py | 27 - .../indexes/_generated/_configuration.py | 48 - .../documents/indexes/_generated/_patch.py | 20 - .../_generated/_search_service_client.py | 121 - .../indexes/_generated/_serialization.py | 2114 --- .../documents/indexes/_generated/_vendor.py | 23 - .../indexes/_generated/aio/__init__.py | 27 - .../indexes/_generated/aio/_configuration.py | 48 - .../indexes/_generated/aio/_patch.py | 20 - .../_generated/aio/_search_service_client.py | 123 - .../indexes/_generated/aio/_vendor.py | 23 - .../_generated/aio/operations/__init__.py | 35 - .../aio/operations/_aliases_operations.py | 611 - .../operations/_data_sources_operations.py | 592 - .../aio/operations/_indexers_operations.py | 953 -- .../aio/operations/_indexes_operations.py | 849 -- .../_generated/aio/operations/_patch.py | 20 - .../_search_service_client_operations.py | 94 - .../aio/operations/_skillsets_operations.py | 748 - .../operations/_synonym_maps_operations.py | 579 - .../indexes/_generated/models/__init__.py | 516 - .../indexes/_generated/models/_models_py3.py | 11729 --------------- .../indexes/_generated/models/_patch.py | 20 - .../indexes/_generated/operations/__init__.py | 35 - .../operations/_aliases_operations.py | 756 - .../operations/_data_sources_operations.py | 750 - .../operations/_indexers_operations.py | 1226 -- .../operations/_indexes_operations.py | 1059 -- .../indexes/_generated/operations/_patch.py | 20 - .../_search_service_client_operations.py | 118 - .../operations/_skillsets_operations.py | 941 -- .../operations/_synonym_maps_operations.py | 732 - .../documents/indexes/_generated/py.typed | 1 - .../documents/indexes/_search_index_client.py | 140 +- .../indexes/_search_indexer_client.py | 130 +- .../azure/search/documents/indexes/_utils.py | 36 +- .../indexes/aio/_search_index_client.py | 140 +- .../indexes/aio/_search_indexer_client.py | 136 +- .../documents/indexes/models/__init__.py | 4 +- .../search/documents/indexes/models/_index.py | 134 +- .../documents/indexes/models/_models.py | 513 +- .../azure/search/documents/models/__init__.py | 4 + 75 files changed, 28087 insertions(+), 32233 deletions(-) create mode 100644 sdk/search/azure-search-documents/azure/search/documents/_generated/_client.py create mode 100644 sdk/search/azure-search-documents/azure/search/documents/_generated/_model_base.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/_generated/_search_index_client.py create mode 100644 sdk/search/azure-search-documents/azure/search/documents/_generated/_validation.py create mode 100644 sdk/search/azure-search-documents/azure/search/documents/_generated/_vendor.py create mode 100644 sdk/search/azure-search-documents/azure/search/documents/_generated/_version.py create mode 100644 sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_client.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_search_index_client.py create mode 100644 sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_vendor.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py create mode 100644 sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_operations.py rename sdk/search/azure-search-documents/azure/search/documents/{indexes/_generated/models/_search_service_client_enums.py => _generated/models/_enums.py} (72%) create mode 100644 sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models_py3.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/_generated/models/_search_index_client_enums.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py create mode 100644 sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_operations.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/__init__.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_configuration.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_patch.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_search_service_client.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_serialization.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_vendor.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/__init__.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_configuration.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_patch.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_search_service_client.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_vendor.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/__init__.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_aliases_operations.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_patch.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_service_client_operations.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/__init__.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_patch.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/__init__.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_aliases_operations.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_patch.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_service_client_operations.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py delete mode 100644 sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/py.typed diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/__init__.py index a19da823d02d..63ab1b900961 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/__init__.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position @@ -10,7 +12,10 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._search_index_client import SearchIndexClient # type: ignore +from ._client import SearchClient # type: ignore +from ._version import VERSION + +__version__ = VERSION try: from ._patch import __all__ as _patch_all @@ -20,7 +25,7 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "SearchIndexClient", + "SearchClient", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_client.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_client.py new file mode 100644 index 000000000000..2cb95a31223c --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_client.py @@ -0,0 +1,148 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, TYPE_CHECKING, Union +from typing_extensions import Self + +from azure.core import PipelineClient +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies +from azure.core.rest import HttpRequest, HttpResponse + +from ._configuration import SearchClientConfiguration +from ._serialization import Deserializer, Serializer +from .operations import ( + AliasesOperationsOperations, + DataSourcesOperationsOperations, + DocumentsOperationsOperations, + IndexersOperationsOperations, + IndexesOperationsOperations, + SearchClientOperationsMixin, + SkillsetsOperationsOperations, + SynonymMapsOperationsOperations, +) + +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential + + +class SearchClient(SearchClientOperationsMixin): # pylint: disable=too-many-instance-attributes + """Client that can be used to manage and query indexes and documents, as well as + manage other resources, on a search service. + + :ivar data_sources_operations: DataSourcesOperationsOperations operations + :vartype data_sources_operations: + azure.search.documents.operations.DataSourcesOperationsOperations + :ivar indexers_operations: IndexersOperationsOperations operations + :vartype indexers_operations: azure.search.documents.operations.IndexersOperationsOperations + :ivar skillsets_operations: SkillsetsOperationsOperations operations + :vartype skillsets_operations: azure.search.documents.operations.SkillsetsOperationsOperations + :ivar synonym_maps_operations: SynonymMapsOperationsOperations operations + :vartype synonym_maps_operations: + azure.search.documents.operations.SynonymMapsOperationsOperations + :ivar indexes_operations: IndexesOperationsOperations operations + :vartype indexes_operations: azure.search.documents.operations.IndexesOperationsOperations + :ivar aliases_operations: AliasesOperationsOperations operations + :vartype aliases_operations: azure.search.documents.operations.AliasesOperationsOperations + :ivar documents_operations: DocumentsOperationsOperations operations + :vartype documents_operations: azure.search.documents.operations.DocumentsOperationsOperations + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-11-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: + _endpoint = "{endpoint}" + self._config = SearchClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + self.data_sources_operations = DataSourcesOperationsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.indexers_operations = IndexersOperationsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.skillsets_operations = SkillsetsOperationsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.synonym_maps_operations = SynonymMapsOperationsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.indexes_operations = IndexesOperationsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.aliases_operations = AliasesOperationsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.documents_operations = DocumentsOperationsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + + def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.HttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + def close(self) -> None: + self._client.close() + + def __enter__(self) -> Self: + self._client.__enter__() + return self + + def __exit__(self, *exc_details: Any) -> None: + self._client.__exit__(*exc_details) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_configuration.py index 8eadf8655228..945878c06c91 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_configuration.py @@ -1,46 +1,63 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any +from typing import Any, TYPE_CHECKING, Union +from azure.core.credentials import AzureKeyCredential from azure.core.pipeline import policies -VERSION = "unknown" +from ._version import VERSION +if TYPE_CHECKING: + from azure.core.credentials import TokenCredential -class SearchIndexClientConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for SearchIndexClient. + +class SearchClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for SearchClient. Note that all parameters used to create this instance are saved as instance attributes. - :param endpoint: The endpoint URL of the search service. Required. + :param endpoint: Service host. Required. :type endpoint: str - :param index_name: The name of the index. Required. - :type index_name: str - :keyword api_version: Api Version. Default value is "2024-11-01-preview". Note that overriding - this default value may result in unsupported behavior. + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials.TokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-11-01-preview". Note that overriding this default value may result in unsupported + behavior. :paramtype api_version: str """ - def __init__(self, endpoint: str, index_name: str, **kwargs: Any) -> None: + def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: api_version: str = kwargs.pop("api_version", "2024-11-01-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") - if index_name is None: - raise ValueError("Parameter 'index_name' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") self.endpoint = endpoint - self.index_name = index_name + self.credential = credential self.api_version = api_version - kwargs.setdefault("sdk_moniker", "searchindexclient/{}".format(VERSION)) + self.credential_scopes = kwargs.pop("credential_scopes", ["https://search.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "search-documents/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "api-key", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + def _configure(self, **kwargs: Any) -> None: self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) @@ -51,3 +68,5 @@ def _configure(self, **kwargs: Any) -> None: self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = self._infer_policy(**kwargs) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_model_base.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_model_base.py new file mode 100644 index 000000000000..e6a2730f9276 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_model_base.py @@ -0,0 +1,1159 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=protected-access, broad-except + +import copy +import calendar +import decimal +import functools +import sys +import logging +import base64 +import re +import typing +import enum +import email.utils +from datetime import datetime, date, time, timedelta, timezone +from json import JSONEncoder +import xml.etree.ElementTree as ET +from typing_extensions import Self +import isodate +from azure.core.exceptions import DeserializationError +from azure.core import CaseInsensitiveEnumMeta +from azure.core.pipeline import PipelineResponse +from azure.core.serialization import _Null + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping + +_LOGGER = logging.getLogger(__name__) + +__all__ = ["SdkJSONEncoder", "Model", "rest_field", "rest_discriminator"] + +TZ_UTC = timezone.utc +_T = typing.TypeVar("_T") + + +def _timedelta_as_isostr(td: timedelta) -> str: + """Converts a datetime.timedelta object into an ISO 8601 formatted string, e.g. 'P4DT12H30M05S' + + Function adapted from the Tin Can Python project: https://github.com/RusticiSoftware/TinCanPython + + :param timedelta td: The timedelta to convert + :rtype: str + :return: ISO8601 version of this timedelta + """ + + # Split seconds to larger units + seconds = td.total_seconds() + minutes, seconds = divmod(seconds, 60) + hours, minutes = divmod(minutes, 60) + days, hours = divmod(hours, 24) + + days, hours, minutes = list(map(int, (days, hours, minutes))) + seconds = round(seconds, 6) + + # Build date + date_str = "" + if days: + date_str = "%sD" % days + + if hours or minutes or seconds: + # Build time + time_str = "T" + + # Hours + bigger_exists = date_str or hours + if bigger_exists: + time_str += "{:02}H".format(hours) + + # Minutes + bigger_exists = bigger_exists or minutes + if bigger_exists: + time_str += "{:02}M".format(minutes) + + # Seconds + try: + if seconds.is_integer(): + seconds_string = "{:02}".format(int(seconds)) + else: + # 9 chars long w/ leading 0, 6 digits after decimal + seconds_string = "%09.6f" % seconds + # Remove trailing zeros + seconds_string = seconds_string.rstrip("0") + except AttributeError: # int.is_integer() raises + seconds_string = "{:02}".format(seconds) + + time_str += "{}S".format(seconds_string) + else: + time_str = "" + + return "P" + date_str + time_str + + +def _serialize_bytes(o, format: typing.Optional[str] = None) -> str: + encoded = base64.b64encode(o).decode() + if format == "base64url": + return encoded.strip("=").replace("+", "-").replace("/", "_") + return encoded + + +def _serialize_datetime(o, format: typing.Optional[str] = None): + if hasattr(o, "year") and hasattr(o, "hour"): + if format == "rfc7231": + return email.utils.format_datetime(o, usegmt=True) + if format == "unix-timestamp": + return int(calendar.timegm(o.utctimetuple())) + + # astimezone() fails for naive times in Python 2.7, so make make sure o is aware (tzinfo is set) + if not o.tzinfo: + iso_formatted = o.replace(tzinfo=TZ_UTC).isoformat() + else: + iso_formatted = o.astimezone(TZ_UTC).isoformat() + # Replace the trailing "+00:00" UTC offset with "Z" (RFC 3339: https://www.ietf.org/rfc/rfc3339.txt) + return iso_formatted.replace("+00:00", "Z") + # Next try datetime.date or datetime.time + return o.isoformat() + + +def _is_readonly(p): + try: + return p._visibility == ["read"] + except AttributeError: + return False + + +class SdkJSONEncoder(JSONEncoder): + """A JSON encoder that's capable of serializing datetime objects and bytes.""" + + def __init__(self, *args, exclude_readonly: bool = False, format: typing.Optional[str] = None, **kwargs): + super().__init__(*args, **kwargs) + self.exclude_readonly = exclude_readonly + self.format = format + + def default(self, o): # pylint: disable=too-many-return-statements + if _is_model(o): + if self.exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + return {k: v for k, v in o.items() if k not in readonly_props} + return dict(o.items()) + try: + return super(SdkJSONEncoder, self).default(o) + except TypeError: + if isinstance(o, _Null): + return None + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, self.format) + try: + # First try datetime.datetime + return _serialize_datetime(o, self.format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return super(SdkJSONEncoder, self).default(o) + + +_VALID_DATE = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}" + r"\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") +_VALID_RFC7231 = re.compile( + r"(Mon|Tue|Wed|Thu|Fri|Sat|Sun),\s\d{2}\s" + r"(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s\d{2}:\d{2}:\d{2}\sGMT" +) + + +def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize ISO-8601 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + attr = attr.upper() + match = _VALID_DATE.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + check_decimal = attr.split(".") + if len(check_decimal) > 1: + decimal_str = "" + for digit in check_decimal[1]: + if digit.isdigit(): + decimal_str += digit + else: + break + if len(decimal_str) > 6: + attr = attr.replace(decimal_str, decimal_str[0:6]) + + date_obj = isodate.parse_datetime(attr) + test_utc = date_obj.utctimetuple() + if test_utc.tm_year > 9999 or test_utc.tm_year < 1: + raise OverflowError("Hit max or min date") + return date_obj + + +def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime: + """Deserialize RFC7231 formatted string into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + match = _VALID_RFC7231.match(attr) + if not match: + raise ValueError("Invalid datetime string: " + attr) + + return email.utils.parsedate_to_datetime(attr) + + +def _deserialize_datetime_unix_timestamp(attr: typing.Union[float, datetime]) -> datetime: + """Deserialize unix timestamp into Datetime object. + + :param str attr: response string to be deserialized. + :rtype: ~datetime.datetime + :returns: The datetime object from that input + """ + if isinstance(attr, datetime): + # i'm already deserialized + return attr + return datetime.fromtimestamp(attr, TZ_UTC) + + +def _deserialize_date(attr: typing.Union[str, date]) -> date: + """Deserialize ISO-8601 formatted string into Date object. + :param str attr: response string to be deserialized. + :rtype: date + :returns: The date object from that input + """ + # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. + if isinstance(attr, date): + return attr + return isodate.parse_date(attr, defaultmonth=None, defaultday=None) # type: ignore + + +def _deserialize_time(attr: typing.Union[str, time]) -> time: + """Deserialize ISO-8601 formatted string into time object. + + :param str attr: response string to be deserialized. + :rtype: datetime.time + :returns: The time object from that input + """ + if isinstance(attr, time): + return attr + return isodate.parse_time(attr) + + +def _deserialize_bytes(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + return bytes(base64.b64decode(attr)) + + +def _deserialize_bytes_base64(attr): + if isinstance(attr, (bytes, bytearray)): + return attr + padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore + attr = attr + padding # type: ignore + encoded = attr.replace("-", "+").replace("_", "/") + return bytes(base64.b64decode(encoded)) + + +def _deserialize_duration(attr): + if isinstance(attr, timedelta): + return attr + return isodate.parse_duration(attr) + + +def _deserialize_decimal(attr): + if isinstance(attr, decimal.Decimal): + return attr + return decimal.Decimal(str(attr)) + + +def _deserialize_int_as_str(attr): + if isinstance(attr, int): + return attr + return int(attr) + + +_DESERIALIZE_MAPPING = { + datetime: _deserialize_datetime, + date: _deserialize_date, + time: _deserialize_time, + bytes: _deserialize_bytes, + bytearray: _deserialize_bytes, + timedelta: _deserialize_duration, + typing.Any: lambda x: x, + decimal.Decimal: _deserialize_decimal, +} + +_DESERIALIZE_MAPPING_WITHFORMAT = { + "rfc3339": _deserialize_datetime, + "rfc7231": _deserialize_datetime_rfc7231, + "unix-timestamp": _deserialize_datetime_unix_timestamp, + "base64": _deserialize_bytes, + "base64url": _deserialize_bytes_base64, +} + + +def get_deserializer(annotation: typing.Any, rf: typing.Optional["_RestField"] = None): + if annotation is int and rf and rf._format == "str": + return _deserialize_int_as_str + if rf and rf._format: + return _DESERIALIZE_MAPPING_WITHFORMAT.get(rf._format) + return _DESERIALIZE_MAPPING.get(annotation) # pyright: ignore + + +def _get_type_alias_type(module_name: str, alias_name: str): + types = { + k: v + for k, v in sys.modules[module_name].__dict__.items() + if isinstance(v, typing._GenericAlias) # type: ignore + } + if alias_name not in types: + return alias_name + return types[alias_name] + + +def _get_model(module_name: str, model_name: str): + models = {k: v for k, v in sys.modules[module_name].__dict__.items() if isinstance(v, type)} + module_end = module_name.rsplit(".", 1)[0] + models.update({k: v for k, v in sys.modules[module_end].__dict__.items() if isinstance(v, type)}) + if isinstance(model_name, str): + model_name = model_name.split(".")[-1] + if model_name not in models: + return model_name + return models[model_name] + + +_UNSET = object() + + +class _MyMutableMapping(MutableMapping[str, typing.Any]): # pylint: disable=unsubscriptable-object + def __init__(self, data: typing.Dict[str, typing.Any]) -> None: + self._data = data + + def __contains__(self, key: typing.Any) -> bool: + return key in self._data + + def __getitem__(self, key: str) -> typing.Any: + return self._data.__getitem__(key) + + def __setitem__(self, key: str, value: typing.Any) -> None: + self._data.__setitem__(key, value) + + def __delitem__(self, key: str) -> None: + self._data.__delitem__(key) + + def __iter__(self) -> typing.Iterator[typing.Any]: + return self._data.__iter__() + + def __len__(self) -> int: + return self._data.__len__() + + def __ne__(self, other: typing.Any) -> bool: + return not self.__eq__(other) + + def keys(self) -> typing.KeysView[str]: + return self._data.keys() + + def values(self) -> typing.ValuesView[typing.Any]: + return self._data.values() + + def items(self) -> typing.ItemsView[str, typing.Any]: + return self._data.items() + + def get(self, key: str, default: typing.Any = None) -> typing.Any: + try: + return self[key] + except KeyError: + return default + + @typing.overload + def pop(self, key: str) -> typing.Any: ... + + @typing.overload + def pop(self, key: str, default: _T) -> _T: ... + + @typing.overload + def pop(self, key: str, default: typing.Any) -> typing.Any: ... + + def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + if default is _UNSET: + return self._data.pop(key) + return self._data.pop(key, default) + + def popitem(self) -> typing.Tuple[str, typing.Any]: + return self._data.popitem() + + def clear(self) -> None: + self._data.clear() + + def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: + self._data.update(*args, **kwargs) + + @typing.overload + def setdefault(self, key: str, default: None = None) -> None: ... + + @typing.overload + def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... + + def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + if default is _UNSET: + return self._data.setdefault(key) + return self._data.setdefault(key, default) + + def __eq__(self, other: typing.Any) -> bool: + try: + other_model = self.__class__(other) + except Exception: + return False + return self._data == other_model._data + + def __repr__(self) -> str: + return str(self._data) + + +def _is_model(obj: typing.Any) -> bool: + return getattr(obj, "_is_model", False) + + +def _serialize(o, format: typing.Optional[str] = None): # pylint: disable=too-many-return-statements + if isinstance(o, list): + return [_serialize(x, format) for x in o] + if isinstance(o, dict): + return {k: _serialize(v, format) for k, v in o.items()} + if isinstance(o, set): + return {_serialize(x, format) for x in o} + if isinstance(o, tuple): + return tuple(_serialize(x, format) for x in o) + if isinstance(o, (bytes, bytearray)): + return _serialize_bytes(o, format) + if isinstance(o, decimal.Decimal): + return float(o) + if isinstance(o, enum.Enum): + return o.value + if isinstance(o, int): + if format == "str": + return str(o) + return o + try: + # First try datetime.datetime + return _serialize_datetime(o, format) + except AttributeError: + pass + # Last, try datetime.timedelta + try: + return _timedelta_as_isostr(o) + except AttributeError: + # This will be raised when it hits value.total_seconds in the method above + pass + return o + + +def _get_rest_field( + attr_to_rest_field: typing.Dict[str, "_RestField"], rest_name: str +) -> typing.Optional["_RestField"]: + try: + return next(rf for rf in attr_to_rest_field.values() if rf._rest_name == rest_name) + except StopIteration: + return None + + +def _create_value(rf: typing.Optional["_RestField"], value: typing.Any) -> typing.Any: + if not rf: + return _serialize(value, None) + if rf._is_multipart_file_input: + return value + if rf._is_model: + return _deserialize(rf._type, value) + if isinstance(value, ET.Element): + value = _deserialize(rf._type, value) + return _serialize(value, rf._format) + + +class Model(_MyMutableMapping): + _is_model = True + # label whether current class's _attr_to_rest_field has been calculated + # could not see _attr_to_rest_field directly because subclass inherits it from parent class + _calculated: typing.Set[str] = set() + + def __init__(self, *args: typing.Any, **kwargs: typing.Any) -> None: + class_name = self.__class__.__name__ + if len(args) > 1: + raise TypeError(f"{class_name}.__init__() takes 2 positional arguments but {len(args) + 1} were given") + dict_to_pass = { + rest_field._rest_name: rest_field._default + for rest_field in self._attr_to_rest_field.values() + if rest_field._default is not _UNSET + } + if args: # pylint: disable=too-many-nested-blocks + if isinstance(args[0], ET.Element): + existed_attr_keys = [] + model_meta = getattr(self, "_xml", {}) + + for rf in self._attr_to_rest_field.values(): + prop_meta = getattr(rf, "_xml", {}) + xml_name = prop_meta.get("name", rf._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + # attribute + if prop_meta.get("attribute", False) and args[0].get(xml_name) is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].get(xml_name)) + continue + + # unwrapped element is array + if prop_meta.get("unwrapped", False): + # unwrapped array could either use prop items meta/prop meta + if prop_meta.get("itemsName"): + xml_name = prop_meta.get("itemsName") + xml_ns = prop_meta.get("itemNs") + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + items = args[0].findall(xml_name) # pyright: ignore + if len(items) > 0: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, items) + continue + + # text element is primitive type + if prop_meta.get("text", False): + if args[0].text is not None: + dict_to_pass[rf._rest_name] = _deserialize(rf._type, args[0].text) + continue + + # wrapped element could be normal property or array, it should only have one element + item = args[0].find(xml_name) + if item is not None: + existed_attr_keys.append(xml_name) + dict_to_pass[rf._rest_name] = _deserialize(rf._type, item) + + # rest thing is additional properties + for e in args[0]: + if e.tag not in existed_attr_keys: + dict_to_pass[e.tag] = _convert_element(e) + else: + dict_to_pass.update( + {k: _create_value(_get_rest_field(self._attr_to_rest_field, k), v) for k, v in args[0].items()} + ) + else: + non_attr_kwargs = [k for k in kwargs if k not in self._attr_to_rest_field] + if non_attr_kwargs: + # actual type errors only throw the first wrong keyword arg they see, so following that. + raise TypeError(f"{class_name}.__init__() got an unexpected keyword argument '{non_attr_kwargs[0]}'") + dict_to_pass.update( + { + self._attr_to_rest_field[k]._rest_name: _create_value(self._attr_to_rest_field[k], v) + for k, v in kwargs.items() + if v is not None + } + ) + super().__init__(dict_to_pass) + + def copy(self) -> "Model": + return Model(self.__dict__) + + def __new__(cls, *args: typing.Any, **kwargs: typing.Any) -> Self: + if f"{cls.__module__}.{cls.__qualname__}" not in cls._calculated: + # we know the last nine classes in mro are going to be 'Model', '_MyMutableMapping', 'MutableMapping', + # 'Mapping', 'Collection', 'Sized', 'Iterable', 'Container' and 'object' + mros = cls.__mro__[:-9][::-1] # ignore parents, and reverse the mro order + attr_to_rest_field: typing.Dict[str, _RestField] = { # map attribute name to rest_field property + k: v for mro_class in mros for k, v in mro_class.__dict__.items() if k[0] != "_" and hasattr(v, "_type") + } + annotations = { + k: v + for mro_class in mros + if hasattr(mro_class, "__annotations__") + for k, v in mro_class.__annotations__.items() + } + for attr, rf in attr_to_rest_field.items(): + rf._module = cls.__module__ + if not rf._type: + rf._type = rf._get_deserialize_callable_from_annotation(annotations.get(attr, None)) + if not rf._rest_name_input: + rf._rest_name_input = attr + cls._attr_to_rest_field: typing.Dict[str, _RestField] = dict(attr_to_rest_field.items()) + cls._calculated.add(f"{cls.__module__}.{cls.__qualname__}") + + return super().__new__(cls) # pylint: disable=no-value-for-parameter + + def __init_subclass__(cls, discriminator: typing.Optional[str] = None) -> None: + for base in cls.__bases__: + if hasattr(base, "__mapping__"): + base.__mapping__[discriminator or cls.__name__] = cls # type: ignore + + @classmethod + def _get_discriminator(cls, exist_discriminators) -> typing.Optional["_RestField"]: + for v in cls.__dict__.values(): + if isinstance(v, _RestField) and v._is_discriminator and v._rest_name not in exist_discriminators: + return v + return None + + @classmethod + def _deserialize(cls, data, exist_discriminators): + if not hasattr(cls, "__mapping__"): + return cls(data) + discriminator = cls._get_discriminator(exist_discriminators) + if discriminator is None: + return cls(data) + exist_discriminators.append(discriminator._rest_name) + if isinstance(data, ET.Element): + model_meta = getattr(cls, "_xml", {}) + prop_meta = getattr(discriminator, "_xml", {}) + xml_name = prop_meta.get("name", discriminator._rest_name) + xml_ns = prop_meta.get("ns", model_meta.get("ns", None)) + if xml_ns: + xml_name = "{" + xml_ns + "}" + xml_name + + if data.get(xml_name) is not None: + discriminator_value = data.get(xml_name) + else: + discriminator_value = data.find(xml_name).text # pyright: ignore + else: + discriminator_value = data.get(discriminator._rest_name) + mapped_cls = cls.__mapping__.get(discriminator_value, cls) # pyright: ignore + return mapped_cls._deserialize(data, exist_discriminators) + + def as_dict(self, *, exclude_readonly: bool = False) -> typing.Dict[str, typing.Any]: + """Return a dict that can be turned into json using json.dump. + + :keyword bool exclude_readonly: Whether to remove the readonly properties. + :returns: A dict JSON compatible object + :rtype: dict + """ + + result = {} + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in self._attr_to_rest_field.values() if _is_readonly(p)] + for k, v in self.items(): + if exclude_readonly and k in readonly_props: # pyright: ignore + continue + is_multipart_file_input = False + try: + is_multipart_file_input = next( + rf for rf in self._attr_to_rest_field.values() if rf._rest_name == k + )._is_multipart_file_input + except StopIteration: + pass + result[k] = v if is_multipart_file_input else Model._as_dict_value(v, exclude_readonly=exclude_readonly) + return result + + @staticmethod + def _as_dict_value(v: typing.Any, exclude_readonly: bool = False) -> typing.Any: + if v is None or isinstance(v, _Null): + return None + if isinstance(v, (list, tuple, set)): + return type(v)(Model._as_dict_value(x, exclude_readonly=exclude_readonly) for x in v) + if isinstance(v, dict): + return {dk: Model._as_dict_value(dv, exclude_readonly=exclude_readonly) for dk, dv in v.items()} + return v.as_dict(exclude_readonly=exclude_readonly) if hasattr(v, "as_dict") else v + + +def _deserialize_model(model_deserializer: typing.Optional[typing.Callable], obj): + if _is_model(obj): + return obj + return _deserialize(model_deserializer, obj) + + +def _deserialize_with_optional(if_obj_deserializer: typing.Optional[typing.Callable], obj): + if obj is None: + return obj + return _deserialize_with_callable(if_obj_deserializer, obj) + + +def _deserialize_with_union(deserializers, obj): + for deserializer in deserializers: + try: + return _deserialize(deserializer, obj) + except DeserializationError: + pass + raise DeserializationError() + + +def _deserialize_dict( + value_deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj: typing.Dict[typing.Any, typing.Any], +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = {child.tag: child for child in obj} + return {k: _deserialize(value_deserializer, v, module) for k, v in obj.items()} + + +def _deserialize_multiple_sequence( + entry_deserializers: typing.List[typing.Optional[typing.Callable]], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + return type(obj)(_deserialize(deserializer, entry, module) for entry, deserializer in zip(obj, entry_deserializers)) + + +def _deserialize_sequence( + deserializer: typing.Optional[typing.Callable], + module: typing.Optional[str], + obj, +): + if obj is None: + return obj + if isinstance(obj, ET.Element): + obj = list(obj) + return type(obj)(_deserialize(deserializer, entry, module) for entry in obj) + + +def _sorted_annotations(types: typing.List[typing.Any]) -> typing.List[typing.Any]: + return sorted( + types, + key=lambda x: hasattr(x, "__name__") and x.__name__.lower() in ("str", "float", "int", "bool"), + ) + + +def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-return-statements, too-many-branches + annotation: typing.Any, + module: typing.Optional[str], + rf: typing.Optional["_RestField"] = None, +) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + if not annotation: + return None + + # is it a type alias? + if isinstance(annotation, str): + if module is not None: + annotation = _get_type_alias_type(module, annotation) + + # is it a forward ref / in quotes? + if isinstance(annotation, (str, typing.ForwardRef)): + try: + model_name = annotation.__forward_arg__ # type: ignore + except AttributeError: + model_name = annotation + if module is not None: + annotation = _get_model(module, model_name) + + try: + if module and _is_model(annotation): + if rf: + rf._is_model = True + + return functools.partial(_deserialize_model, annotation) # pyright: ignore + except Exception: + pass + + # is it a literal? + try: + if annotation.__origin__ is typing.Literal: # pyright: ignore + return None + except AttributeError: + pass + + # is it optional? + try: + if any(a for a in annotation.__args__ if a == type(None)): # pyright: ignore + if len(annotation.__args__) <= 2: # pyright: ignore + if_obj_deserializer = _get_deserialize_callable_from_annotation( + next(a for a in annotation.__args__ if a != type(None)), module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_with_optional, if_obj_deserializer) + # the type is Optional[Union[...]], we need to remove the None type from the Union + annotation_copy = copy.copy(annotation) + annotation_copy.__args__ = [a for a in annotation_copy.__args__ if a != type(None)] # pyright: ignore + return _get_deserialize_callable_from_annotation(annotation_copy, module, rf) + except AttributeError: + pass + + # is it union? + if getattr(annotation, "__origin__", None) is typing.Union: + # initial ordering is we make `string` the last deserialization option, because it is often them most generic + deserializers = [ + _get_deserialize_callable_from_annotation(arg, module, rf) + for arg in _sorted_annotations(annotation.__args__) # pyright: ignore + ] + + return functools.partial(_deserialize_with_union, deserializers) + + try: + if annotation._name == "Dict": # pyright: ignore + value_deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[1], module, rf # pyright: ignore + ) + + return functools.partial( + _deserialize_dict, + value_deserializer, + module, + ) + except (AttributeError, IndexError): + pass + try: + if annotation._name in ["List", "Set", "Tuple", "Sequence"]: # pyright: ignore + if len(annotation.__args__) > 1: # pyright: ignore + entry_deserializers = [ + _get_deserialize_callable_from_annotation(dt, module, rf) + for dt in annotation.__args__ # pyright: ignore + ] + return functools.partial(_deserialize_multiple_sequence, entry_deserializers, module) + deserializer = _get_deserialize_callable_from_annotation( + annotation.__args__[0], module, rf # pyright: ignore + ) + + return functools.partial(_deserialize_sequence, deserializer, module) + except (TypeError, IndexError, AttributeError, SyntaxError): + pass + + def _deserialize_default( + deserializer, + obj, + ): + if obj is None: + return obj + try: + return _deserialize_with_callable(deserializer, obj) + except Exception: + pass + return obj + + if get_deserializer(annotation, rf): + return functools.partial(_deserialize_default, get_deserializer(annotation, rf)) + + return functools.partial(_deserialize_default, annotation) + + +def _deserialize_with_callable( + deserializer: typing.Optional[typing.Callable[[typing.Any], typing.Any]], + value: typing.Any, +): # pylint: disable=too-many-return-statements + try: + if value is None or isinstance(value, _Null): + return None + if isinstance(value, ET.Element): + if deserializer is str: + return value.text or "" + if deserializer is int: + return int(value.text) if value.text else None + if deserializer is float: + return float(value.text) if value.text else None + if deserializer is bool: + return value.text == "true" if value.text else None + if deserializer is None: + return value + if deserializer in [int, float, bool]: + return deserializer(value) + if isinstance(deserializer, CaseInsensitiveEnumMeta): + try: + return deserializer(value) + except ValueError: + # for unknown value, return raw value + return value + if isinstance(deserializer, type) and issubclass(deserializer, Model): + return deserializer._deserialize(value, []) + return typing.cast(typing.Callable[[typing.Any], typing.Any], deserializer)(value) + except Exception as e: + raise DeserializationError() from e + + +def _deserialize( + deserializer: typing.Any, + value: typing.Any, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + if isinstance(value, PipelineResponse): + value = value.http_response.json() + if rf is None and format: + rf = _RestField(format=format) + if not isinstance(deserializer, functools.partial): + deserializer = _get_deserialize_callable_from_annotation(deserializer, module, rf) + return _deserialize_with_callable(deserializer, value) + + +class _RestField: + def __init__( + self, + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + is_discriminator: bool = False, + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, + ): + self._type = type + self._rest_name_input = name + self._module: typing.Optional[str] = None + self._is_discriminator = is_discriminator + self._visibility = visibility + self._is_model = False + self._default = default + self._format = format + self._is_multipart_file_input = is_multipart_file_input + self._xml = xml if xml is not None else {} + + @property + def _class_type(self) -> typing.Any: + return getattr(self._type, "args", [None])[0] + + @property + def _rest_name(self) -> str: + if self._rest_name_input is None: + raise ValueError("Rest name was never set") + return self._rest_name_input + + def __get__(self, obj: Model, type=None): # pylint: disable=redefined-builtin + # by this point, type and rest_name will have a value bc we default + # them in __new__ of the Model class + item = obj.get(self._rest_name) + if item is None: + return item + if self._is_model: + return item + return _deserialize(self._type, _serialize(item, self._format), rf=self) + + def __set__(self, obj: Model, value) -> None: + if value is None: + # we want to wipe out entries if users set attr to None + try: + obj.__delitem__(self._rest_name) + except KeyError: + pass + return + if self._is_model: + if not _is_model(value): + value = _deserialize(self._type, value) + obj.__setitem__(self._rest_name, value) + return + obj.__setitem__(self._rest_name, _serialize(value, self._format)) + + def _get_deserialize_callable_from_annotation( + self, annotation: typing.Any + ) -> typing.Optional[typing.Callable[[typing.Any], typing.Any]]: + return _get_deserialize_callable_from_annotation(annotation, self._module, self) + + +def rest_field( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, + default: typing.Any = _UNSET, + format: typing.Optional[str] = None, + is_multipart_file_input: bool = False, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField( + name=name, + type=type, + visibility=visibility, + default=default, + format=format, + is_multipart_file_input=is_multipart_file_input, + xml=xml, + ) + + +def rest_discriminator( + *, + name: typing.Optional[str] = None, + type: typing.Optional[typing.Callable] = None, # pylint: disable=redefined-builtin + visibility: typing.Optional[typing.List[str]] = None, + xml: typing.Optional[typing.Dict[str, typing.Any]] = None, +) -> typing.Any: + return _RestField(name=name, type=type, is_discriminator=True, visibility=visibility, xml=xml) + + +def serialize_xml(model: Model, exclude_readonly: bool = False) -> str: + """Serialize a model to XML. + + :param Model model: The model to serialize. + :param bool exclude_readonly: Whether to exclude readonly properties. + :returns: The XML representation of the model. + :rtype: str + """ + return ET.tostring(_get_element(model, exclude_readonly), encoding="unicode") # type: ignore + + +def _get_element( + o: typing.Any, + exclude_readonly: bool = False, + parent_meta: typing.Optional[typing.Dict[str, typing.Any]] = None, + wrapped_element: typing.Optional[ET.Element] = None, +) -> typing.Union[ET.Element, typing.List[ET.Element]]: + if _is_model(o): + model_meta = getattr(o, "_xml", {}) + + # if prop is a model, then use the prop element directly, else generate a wrapper of model + if wrapped_element is None: + wrapped_element = _create_xml_element( + model_meta.get("name", o.__class__.__name__), + model_meta.get("prefix"), + model_meta.get("ns"), + ) + + readonly_props = [] + if exclude_readonly: + readonly_props = [p._rest_name for p in o._attr_to_rest_field.values() if _is_readonly(p)] + + for k, v in o.items(): + # do not serialize readonly properties + if exclude_readonly and k in readonly_props: + continue + + prop_rest_field = _get_rest_field(o._attr_to_rest_field, k) + if prop_rest_field: + prop_meta = getattr(prop_rest_field, "_xml").copy() + # use the wire name as xml name if no specific name is set + if prop_meta.get("name") is None: + prop_meta["name"] = k + else: + # additional properties will not have rest field, use the wire name as xml name + prop_meta = {"name": k} + + # if no ns for prop, use model's + if prop_meta.get("ns") is None and model_meta.get("ns"): + prop_meta["ns"] = model_meta.get("ns") + prop_meta["prefix"] = model_meta.get("prefix") + + if prop_meta.get("unwrapped", False): + # unwrapped could only set on array + wrapped_element.extend(_get_element(v, exclude_readonly, prop_meta)) + elif prop_meta.get("text", False): + # text could only set on primitive type + wrapped_element.text = _get_primitive_type_value(v) + elif prop_meta.get("attribute", False): + xml_name = prop_meta.get("name", k) + if prop_meta.get("ns"): + ET.register_namespace(prop_meta.get("prefix"), prop_meta.get("ns")) # pyright: ignore + xml_name = "{" + prop_meta.get("ns") + "}" + xml_name # pyright: ignore + # attribute should be primitive type + wrapped_element.set(xml_name, _get_primitive_type_value(v)) + else: + # other wrapped prop element + wrapped_element.append(_get_wrapped_element(v, exclude_readonly, prop_meta)) + return wrapped_element + if isinstance(o, list): + return [_get_element(x, exclude_readonly, parent_meta) for x in o] # type: ignore + if isinstance(o, dict): + result = [] + for k, v in o.items(): + result.append( + _get_wrapped_element( + v, + exclude_readonly, + { + "name": k, + "ns": parent_meta.get("ns") if parent_meta else None, + "prefix": parent_meta.get("prefix") if parent_meta else None, + }, + ) + ) + return result + + # primitive case need to create element based on parent_meta + if parent_meta: + return _get_wrapped_element( + o, + exclude_readonly, + { + "name": parent_meta.get("itemsName", parent_meta.get("name")), + "prefix": parent_meta.get("itemsPrefix", parent_meta.get("prefix")), + "ns": parent_meta.get("itemsNs", parent_meta.get("ns")), + }, + ) + + raise ValueError("Could not serialize value into xml: " + o) + + +def _get_wrapped_element( + v: typing.Any, + exclude_readonly: bool, + meta: typing.Optional[typing.Dict[str, typing.Any]], +) -> ET.Element: + wrapped_element = _create_xml_element( + meta.get("name") if meta else None, meta.get("prefix") if meta else None, meta.get("ns") if meta else None + ) + if isinstance(v, (dict, list)): + wrapped_element.extend(_get_element(v, exclude_readonly, meta)) + elif _is_model(v): + _get_element(v, exclude_readonly, meta, wrapped_element) + else: + wrapped_element.text = _get_primitive_type_value(v) + return wrapped_element + + +def _get_primitive_type_value(v) -> str: + if v is True: + return "true" + if v is False: + return "false" + if isinstance(v, _Null): + return "" + return str(v) + + +def _create_xml_element(tag, prefix=None, ns=None): + if prefix and ns: + ET.register_namespace(prefix, ns) + if ns: + return ET.Element("{" + ns + "}" + tag) + return ET.Element(tag) + + +def _deserialize_xml( + deserializer: typing.Any, + value: str, +) -> typing.Any: + element = ET.fromstring(value) # nosec + return _deserialize(deserializer, element) + + +def _convert_element(e: ET.Element): + # dict case + if len(e.attrib) > 0 or len({child.tag for child in e}) > 1: + dict_result: typing.Dict[str, typing.Any] = {} + for child in e: + if dict_result.get(child.tag) is not None: + if isinstance(dict_result[child.tag], list): + dict_result[child.tag].append(_convert_element(child)) + else: + dict_result[child.tag] = [dict_result[child.tag], _convert_element(child)] + else: + dict_result[child.tag] = _convert_element(child) + dict_result.update(e.attrib) + return dict_result + # array case + if len(e) > 0: + array_result: typing.List[typing.Any] = [] + for child in e: + array_result.append(_convert_element(child)) + return array_result + # primitive case + return e.text diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_search_index_client.py deleted file mode 100644 index 7b3bc7b0fd37..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/_search_index_client.py +++ /dev/null @@ -1,100 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from copy import deepcopy -from typing import Any -from typing_extensions import Self - -from azure.core import PipelineClient -from azure.core.pipeline import policies -from azure.core.rest import HttpRequest, HttpResponse - -from . import models as _models -from ._configuration import SearchIndexClientConfiguration -from ._serialization import Deserializer, Serializer -from .operations import DocumentsOperations - - -class SearchIndexClient: - """Client that can be used to query an index and upload, merge, or delete documents. - - :ivar documents: DocumentsOperations operations - :vartype documents: azure.search.documents.operations.DocumentsOperations - :param endpoint: The endpoint URL of the search service. Required. - :type endpoint: str - :param index_name: The name of the index. Required. - :type index_name: str - :keyword api_version: Api Version. Default value is "2024-11-01-preview". Note that overriding - this default value may result in unsupported behavior. - :paramtype api_version: str - """ - - def __init__( # pylint: disable=missing-client-constructor-parameter-credential - self, endpoint: str, index_name: str, **kwargs: Any - ) -> None: - _endpoint = "{endpoint}/indexes('{indexName}')" - self._config = SearchIndexClientConfiguration(endpoint=endpoint, index_name=index_name, **kwargs) - _policies = kwargs.pop("policies", None) - if _policies is None: - _policies = [ - policies.RequestIdPolicy(**kwargs), - self._config.headers_policy, - self._config.user_agent_policy, - self._config.proxy_policy, - policies.ContentDecodePolicy(**kwargs), - self._config.redirect_policy, - self._config.retry_policy, - self._config.authentication_policy, - self._config.custom_hook_policy, - self._config.logging_policy, - policies.DistributedTracingPolicy(**kwargs), - policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, - self._config.http_logging_policy, - ] - self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) - - client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - self._serialize.client_side_validation = False - self.documents = DocumentsOperations(self._client, self._config, self._serialize, self._deserialize) - - def _send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: - """Runs the network request through the client's chained policies. - - >>> from azure.core.rest import HttpRequest - >>> request = HttpRequest("GET", "https://www.example.org/") - - >>> response = client._send_request(request) - - - For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request - - :param request: The network request you want to make. Required. - :type request: ~azure.core.rest.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to False. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.rest.HttpResponse - """ - - request_copy = deepcopy(request) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - - request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) - return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore - - def close(self) -> None: - self._client.close() - - def __enter__(self) -> Self: - self._client.__enter__() - return self - - def __exit__(self, *exc_details: Any) -> None: - self._client.__exit__(*exc_details) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_validation.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_validation.py new file mode 100644 index 000000000000..752b2822f9d3 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_validation.py @@ -0,0 +1,50 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +import functools + + +def api_version_validation(**kwargs): + params_added_on = kwargs.pop("params_added_on", {}) + method_added_on = kwargs.pop("method_added_on", "") + + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + # this assumes the client has an _api_version attribute + client = args[0] + client_api_version = client._config.api_version # pylint: disable=protected-access + except AttributeError: + return func(*args, **kwargs) + + if method_added_on > client_api_version: + raise ValueError( + f"'{func.__name__}' is not available in API version " + f"{client_api_version}. Pass service API version {method_added_on} or newer to your client." + ) + + unsupported = { + parameter: api_version + for api_version, parameters in params_added_on.items() + for parameter in parameters + if parameter in kwargs and api_version > client_api_version + } + if unsupported: + raise ValueError( + "".join( + [ + f"'{param}' is not available in API version {client_api_version}. " + f"Use service API version {version} or newer.\n" + for param, version in unsupported.items() + ] + ) + ) + return func(*args, **kwargs) + + return wrapper + + return decorator diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_vendor.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_vendor.py new file mode 100644 index 000000000000..9acb4ec12700 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_vendor.py @@ -0,0 +1,57 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from abc import ABC +from typing import Optional, TYPE_CHECKING + +from azure.core import MatchConditions + +from ._configuration import SearchClientConfiguration + +if TYPE_CHECKING: + from azure.core import PipelineClient + + from ._serialization import Deserializer, Serializer + + +class SearchClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "PipelineClient" + _config: SearchClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" + + +def quote_etag(etag: Optional[str]) -> Optional[str]: + if not etag or etag == "*": + return etag + if etag.startswith("W/"): + return etag + if etag.startswith('"') and etag.endswith('"'): + return etag + if etag.startswith("'") and etag.endswith("'"): + return etag + return '"' + etag + '"' + + +def prep_if_match(etag: Optional[str], match_condition: Optional[MatchConditions]) -> Optional[str]: + if match_condition == MatchConditions.IfNotModified: + if_match = quote_etag(etag) if etag else None + return if_match + if match_condition == MatchConditions.IfPresent: + return "*" + return None + + +def prep_if_none_match(etag: Optional[str], match_condition: Optional[MatchConditions]) -> Optional[str]: + if match_condition == MatchConditions.IfModified: + if_none_match = quote_etag(etag) if etag else None + return if_none_match + if match_condition == MatchConditions.IfMissing: + return "*" + return None diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_version.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_version.py new file mode 100644 index 000000000000..be71c81bd282 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_version.py @@ -0,0 +1,9 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +VERSION = "1.0.0b1" diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/__init__.py index a19da823d02d..79294ce3d7df 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/__init__.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position @@ -10,7 +12,7 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._search_index_client import SearchIndexClient # type: ignore +from ._client import SearchClient # type: ignore try: from ._patch import __all__ as _patch_all @@ -20,7 +22,7 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "SearchIndexClient", + "SearchClient", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_client.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_client.py new file mode 100644 index 000000000000..b91435886003 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_client.py @@ -0,0 +1,155 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from copy import deepcopy +from typing import Any, Awaitable, TYPE_CHECKING, Union +from typing_extensions import Self + +from azure.core import AsyncPipelineClient +from azure.core.credentials import AzureKeyCredential +from azure.core.pipeline import policies +from azure.core.rest import AsyncHttpResponse, HttpRequest + +from .._serialization import Deserializer, Serializer +from ._configuration import SearchClientConfiguration +from .operations import ( + AliasesOperationsOperations, + DataSourcesOperationsOperations, + DocumentsOperationsOperations, + IndexersOperationsOperations, + IndexesOperationsOperations, + SearchClientOperationsMixin, + SkillsetsOperationsOperations, + SynonymMapsOperationsOperations, +) + +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential + + +class SearchClient(SearchClientOperationsMixin): # pylint: disable=too-many-instance-attributes + """Client that can be used to manage and query indexes and documents, as well as + manage other resources, on a search service. + + :ivar data_sources_operations: DataSourcesOperationsOperations operations + :vartype data_sources_operations: + azure.search.documents.aio.operations.DataSourcesOperationsOperations + :ivar indexers_operations: IndexersOperationsOperations operations + :vartype indexers_operations: + azure.search.documents.aio.operations.IndexersOperationsOperations + :ivar skillsets_operations: SkillsetsOperationsOperations operations + :vartype skillsets_operations: + azure.search.documents.aio.operations.SkillsetsOperationsOperations + :ivar synonym_maps_operations: SynonymMapsOperationsOperations operations + :vartype synonym_maps_operations: + azure.search.documents.aio.operations.SynonymMapsOperationsOperations + :ivar indexes_operations: IndexesOperationsOperations operations + :vartype indexes_operations: azure.search.documents.aio.operations.IndexesOperationsOperations + :ivar aliases_operations: AliasesOperationsOperations operations + :vartype aliases_operations: azure.search.documents.aio.operations.AliasesOperationsOperations + :ivar documents_operations: DocumentsOperationsOperations operations + :vartype documents_operations: + azure.search.documents.aio.operations.DocumentsOperationsOperations + :param endpoint: Service host. Required. + :type endpoint: str + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-11-01-preview". Note that overriding this default value may result in unsupported + behavior. + :paramtype api_version: str + """ + + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: + _endpoint = "{endpoint}" + self._config = SearchClientConfiguration(endpoint=endpoint, credential=credential, **kwargs) + _policies = kwargs.pop("policies", None) + if _policies is None: + _policies = [ + policies.RequestIdPolicy(**kwargs), + self._config.headers_policy, + self._config.user_agent_policy, + self._config.proxy_policy, + policies.ContentDecodePolicy(**kwargs), + self._config.redirect_policy, + self._config.retry_policy, + self._config.authentication_policy, + self._config.custom_hook_policy, + self._config.logging_policy, + policies.DistributedTracingPolicy(**kwargs), + policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, + self._config.http_logging_policy, + ] + self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) + + self._serialize = Serializer() + self._deserialize = Deserializer() + self._serialize.client_side_validation = False + self.data_sources_operations = DataSourcesOperationsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.indexers_operations = IndexersOperationsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.skillsets_operations = SkillsetsOperationsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.synonym_maps_operations = SynonymMapsOperationsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.indexes_operations = IndexesOperationsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.aliases_operations = AliasesOperationsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + self.documents_operations = DocumentsOperationsOperations( + self._client, self._config, self._serialize, self._deserialize + ) + + def send_request( + self, request: HttpRequest, *, stream: bool = False, **kwargs: Any + ) -> Awaitable[AsyncHttpResponse]: + """Runs the network request through the client's chained policies. + + >>> from azure.core.rest import HttpRequest + >>> request = HttpRequest("GET", "https://www.example.org/") + + >>> response = await client.send_request(request) + + + For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request + + :param request: The network request you want to make. Required. + :type request: ~azure.core.rest.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to False. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.rest.AsyncHttpResponse + """ + + request_copy = deepcopy(request) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + + request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) + return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore + + async def close(self) -> None: + await self._client.close() + + async def __aenter__(self) -> Self: + await self._client.__aenter__() + return self + + async def __aexit__(self, *exc_details: Any) -> None: + await self._client.__aexit__(*exc_details) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_configuration.py index c67d3bc7a87d..6d9f5b83d222 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_configuration.py @@ -1,46 +1,65 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any +from typing import Any, TYPE_CHECKING, Union +from azure.core.credentials import AzureKeyCredential from azure.core.pipeline import policies -VERSION = "unknown" +from .._version import VERSION +if TYPE_CHECKING: + from azure.core.credentials_async import AsyncTokenCredential -class SearchIndexClientConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for SearchIndexClient. + +class SearchClientConfiguration: # pylint: disable=too-many-instance-attributes + """Configuration for SearchClient. Note that all parameters used to create this instance are saved as instance attributes. - :param endpoint: The endpoint URL of the search service. Required. + :param endpoint: Service host. Required. :type endpoint: str - :param index_name: The name of the index. Required. - :type index_name: str - :keyword api_version: Api Version. Default value is "2024-11-01-preview". Note that overriding - this default value may result in unsupported behavior. + :param credential: Credential used to authenticate requests to the service. Is either a + AzureKeyCredential type or a TokenCredential type. Required. + :type credential: ~azure.core.credentials.AzureKeyCredential or + ~azure.core.credentials_async.AsyncTokenCredential + :keyword api_version: The API version to use for this operation. Default value is + "2024-11-01-preview". Note that overriding this default value may result in unsupported + behavior. :paramtype api_version: str """ - def __init__(self, endpoint: str, index_name: str, **kwargs: Any) -> None: + def __init__( + self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any + ) -> None: api_version: str = kwargs.pop("api_version", "2024-11-01-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") - if index_name is None: - raise ValueError("Parameter 'index_name' must not be None.") + if credential is None: + raise ValueError("Parameter 'credential' must not be None.") self.endpoint = endpoint - self.index_name = index_name + self.credential = credential self.api_version = api_version - kwargs.setdefault("sdk_moniker", "searchindexclient/{}".format(VERSION)) + self.credential_scopes = kwargs.pop("credential_scopes", ["https://search.azure.com/.default"]) + kwargs.setdefault("sdk_moniker", "search-documents/{}".format(VERSION)) self.polling_interval = kwargs.get("polling_interval", 30) self._configure(**kwargs) + def _infer_policy(self, **kwargs): + if isinstance(self.credential, AzureKeyCredential): + return policies.AzureKeyCredentialPolicy(self.credential, "api-key", **kwargs) + if hasattr(self.credential, "get_token"): + return policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs) + raise TypeError(f"Unsupported credential: {self.credential}") + def _configure(self, **kwargs: Any) -> None: self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) @@ -51,3 +70,5 @@ def _configure(self, **kwargs: Any) -> None: self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) self.authentication_policy = kwargs.get("authentication_policy") + if self.credential and not self.authentication_policy: + self.authentication_policy = self._infer_policy(**kwargs) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_search_index_client.py deleted file mode 100644 index ebd2f4414142..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_search_index_client.py +++ /dev/null @@ -1,102 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from copy import deepcopy -from typing import Any, Awaitable -from typing_extensions import Self - -from azure.core import AsyncPipelineClient -from azure.core.pipeline import policies -from azure.core.rest import AsyncHttpResponse, HttpRequest - -from .. import models as _models -from .._serialization import Deserializer, Serializer -from ._configuration import SearchIndexClientConfiguration -from .operations import DocumentsOperations - - -class SearchIndexClient: - """Client that can be used to query an index and upload, merge, or delete documents. - - :ivar documents: DocumentsOperations operations - :vartype documents: azure.search.documents.aio.operations.DocumentsOperations - :param endpoint: The endpoint URL of the search service. Required. - :type endpoint: str - :param index_name: The name of the index. Required. - :type index_name: str - :keyword api_version: Api Version. Default value is "2024-11-01-preview". Note that overriding - this default value may result in unsupported behavior. - :paramtype api_version: str - """ - - def __init__( # pylint: disable=missing-client-constructor-parameter-credential - self, endpoint: str, index_name: str, **kwargs: Any - ) -> None: - _endpoint = "{endpoint}/indexes('{indexName}')" - self._config = SearchIndexClientConfiguration(endpoint=endpoint, index_name=index_name, **kwargs) - _policies = kwargs.pop("policies", None) - if _policies is None: - _policies = [ - policies.RequestIdPolicy(**kwargs), - self._config.headers_policy, - self._config.user_agent_policy, - self._config.proxy_policy, - policies.ContentDecodePolicy(**kwargs), - self._config.redirect_policy, - self._config.retry_policy, - self._config.authentication_policy, - self._config.custom_hook_policy, - self._config.logging_policy, - policies.DistributedTracingPolicy(**kwargs), - policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, - self._config.http_logging_policy, - ] - self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) - - client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - self._serialize.client_side_validation = False - self.documents = DocumentsOperations(self._client, self._config, self._serialize, self._deserialize) - - def _send_request( - self, request: HttpRequest, *, stream: bool = False, **kwargs: Any - ) -> Awaitable[AsyncHttpResponse]: - """Runs the network request through the client's chained policies. - - >>> from azure.core.rest import HttpRequest - >>> request = HttpRequest("GET", "https://www.example.org/") - - >>> response = await client._send_request(request) - - - For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request - - :param request: The network request you want to make. Required. - :type request: ~azure.core.rest.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to False. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.rest.AsyncHttpResponse - """ - - request_copy = deepcopy(request) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - - request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) - return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> Self: - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details: Any) -> None: - await self._client.__aexit__(*exc_details) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_vendor.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_vendor.py new file mode 100644 index 000000000000..ac21cbde82f2 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_vendor.py @@ -0,0 +1,57 @@ +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- + +from abc import ABC +from typing import Optional, TYPE_CHECKING + +from azure.core import MatchConditions + +from ._configuration import SearchClientConfiguration + +if TYPE_CHECKING: + from azure.core import AsyncPipelineClient + + from .._serialization import Deserializer, Serializer + + +class SearchClientMixinABC(ABC): + """DO NOT use this class. It is for internal typing use only.""" + + _client: "AsyncPipelineClient" + _config: SearchClientConfiguration + _serialize: "Serializer" + _deserialize: "Deserializer" + + +def quote_etag(etag: Optional[str]) -> Optional[str]: + if not etag or etag == "*": + return etag + if etag.startswith("W/"): + return etag + if etag.startswith('"') and etag.endswith('"'): + return etag + if etag.startswith("'") and etag.endswith("'"): + return etag + return '"' + etag + '"' + + +def prep_if_match(etag: Optional[str], match_condition: Optional[MatchConditions]) -> Optional[str]: + if match_condition == MatchConditions.IfNotModified: + if_match = quote_etag(etag) if etag else None + return if_match + if match_condition == MatchConditions.IfPresent: + return "*" + return None + + +def prep_if_none_match(etag: Optional[str], match_condition: Optional[MatchConditions]) -> Optional[str]: + if match_condition == MatchConditions.IfModified: + if_none_match = quote_etag(etag) if etag else None + return if_none_match + if match_condition == MatchConditions.IfMissing: + return "*" + return None diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/__init__.py index c7d64959c608..cad45d7952dd 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/__init__.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position @@ -10,14 +12,28 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._documents_operations import DocumentsOperations # type: ignore +from ._operations import DataSourcesOperationsOperations # type: ignore +from ._operations import IndexersOperationsOperations # type: ignore +from ._operations import SkillsetsOperationsOperations # type: ignore +from ._operations import SynonymMapsOperationsOperations # type: ignore +from ._operations import IndexesOperationsOperations # type: ignore +from ._operations import AliasesOperationsOperations # type: ignore +from ._operations import DocumentsOperationsOperations # type: ignore +from ._operations import SearchClientOperationsMixin # type: ignore from ._patch import __all__ as _patch_all from ._patch import * from ._patch import patch_sdk as _patch_sdk __all__ = [ - "DocumentsOperations", + "DataSourcesOperationsOperations", + "IndexersOperationsOperations", + "SkillsetsOperationsOperations", + "SynonymMapsOperationsOperations", + "IndexesOperationsOperations", + "AliasesOperationsOperations", + "DocumentsOperationsOperations", + "SearchClientOperationsMixin", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py deleted file mode 100644 index 3c7142a2512c..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_documents_operations.py +++ /dev/null @@ -1,1103 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, overload - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.rest import AsyncHttpResponse, HttpRequest -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.utils import case_insensitive_dict - -from ... import models as _models -from ...operations._documents_operations import ( - build_autocomplete_get_request, - build_autocomplete_post_request, - build_count_request, - build_get_request, - build_index_request, - build_search_get_request, - build_search_post_request, - build_suggest_get_request, - build_suggest_post_request, -) - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - - -class DocumentsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.search.documents.aio.SearchIndexClient`'s - :attr:`documents` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace_async - async def count(self, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any) -> int: - """Queries the number of documents in the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Count-Documents - - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :return: int or the result of cls(response) - :rtype: int - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[int] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_count_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("int", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def search_get( - self, - search_text: Optional[str] = None, - search_options: Optional[_models.SearchOptions] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchDocumentsResult: - """Searches for documents in the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Search-Documents - - :param search_text: A full-text search query expression; Use "*" or omit this parameter to - match all documents. Default value is None. - :type search_text: str - :param search_options: Parameter group. Default value is None. - :type search_options: ~azure.search.documents.models.SearchOptions - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :return: SearchDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.SearchDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SearchDocumentsResult] = kwargs.pop("cls", None) - - _include_total_result_count = None - _facets = None - _filter = None - _highlight_fields = None - _highlight_post_tag = None - _highlight_pre_tag = None - _minimum_coverage = None - _order_by = None - _query_type = None - _scoring_parameters = None - _scoring_profile = None - _search_fields = None - _search_mode = None - _scoring_statistics = None - _session_id = None - _select = None - _skip = None - _top = None - _x_ms_client_request_id = None - _semantic_configuration = None - _semantic_error_handling = None - _semantic_max_wait_in_milliseconds = None - _answers = None - _captions = None - _semantic_query = None - _query_rewrites = None - _debug = None - _query_language = None - _speller = None - _semantic_fields = None - if search_options is not None: - _answers = search_options.answers - _captions = search_options.captions - _debug = search_options.debug - _facets = search_options.facets - _filter = search_options.filter - _highlight_fields = search_options.highlight_fields - _highlight_post_tag = search_options.highlight_post_tag - _highlight_pre_tag = search_options.highlight_pre_tag - _include_total_result_count = search_options.include_total_result_count - _minimum_coverage = search_options.minimum_coverage - _order_by = search_options.order_by - _query_language = search_options.query_language - _query_rewrites = search_options.query_rewrites - _query_type = search_options.query_type - _scoring_parameters = search_options.scoring_parameters - _scoring_profile = search_options.scoring_profile - _scoring_statistics = search_options.scoring_statistics - _search_fields = search_options.search_fields - _search_mode = search_options.search_mode - _select = search_options.select - _semantic_configuration = search_options.semantic_configuration - _semantic_error_handling = search_options.semantic_error_handling - _semantic_fields = search_options.semantic_fields - _semantic_max_wait_in_milliseconds = search_options.semantic_max_wait_in_milliseconds - _semantic_query = search_options.semantic_query - _session_id = search_options.session_id - _skip = search_options.skip - _speller = search_options.speller - _top = search_options.top - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_search_get_request( - search_text=search_text, - include_total_result_count=_include_total_result_count, - facets=_facets, - filter=_filter, - highlight_fields=_highlight_fields, - highlight_post_tag=_highlight_post_tag, - highlight_pre_tag=_highlight_pre_tag, - minimum_coverage=_minimum_coverage, - order_by=_order_by, - query_type=_query_type, - scoring_parameters=_scoring_parameters, - scoring_profile=_scoring_profile, - search_fields=_search_fields, - search_mode=_search_mode, - scoring_statistics=_scoring_statistics, - session_id=_session_id, - select=_select, - skip=_skip, - top=_top, - x_ms_client_request_id=_x_ms_client_request_id, - semantic_configuration=_semantic_configuration, - semantic_error_handling=_semantic_error_handling, - semantic_max_wait_in_milliseconds=_semantic_max_wait_in_milliseconds, - answers=_answers, - captions=_captions, - semantic_query=_semantic_query, - query_rewrites=_query_rewrites, - debug=_debug, - query_language=_query_language, - speller=_speller, - semantic_fields=_semantic_fields, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchDocumentsResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def search_post( - self, - search_request: _models.SearchRequest, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchDocumentsResult: - """Searches for documents in the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Search-Documents - - :param search_request: The definition of the Search request. Required. - :type search_request: ~azure.search.documents.models.SearchRequest - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.SearchDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def search_post( - self, - search_request: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchDocumentsResult: - """Searches for documents in the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Search-Documents - - :param search_request: The definition of the Search request. Required. - :type search_request: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.SearchDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def search_post( - self, - search_request: Union[_models.SearchRequest, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchDocumentsResult: - """Searches for documents in the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Search-Documents - - :param search_request: The definition of the Search request. Is either a SearchRequest type or - a IO[bytes] type. Required. - :type search_request: ~azure.search.documents.models.SearchRequest or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :return: SearchDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.SearchDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchDocumentsResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(search_request, (IOBase, bytes)): - _content = search_request - else: - _json = self._serialize.body(search_request, "SearchRequest") - - _request = build_search_post_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchDocumentsResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get( - self, - key: str, - selected_fields: Optional[List[str]] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> Dict[str, Any]: - """Retrieves a document from the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/lookup-document - - :param key: The key of the document to retrieve. Required. - :type key: str - :param selected_fields: List of field names to retrieve for the document; Any field not - retrieved will be missing from the returned document. Default value is None. - :type selected_fields: list[str] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :return: dict mapping str to any or the result of cls(response) - :rtype: dict[str, any] - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[Dict[str, Any]] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_request( - key=key, - selected_fields=selected_fields, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("{object}", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def suggest_get( - self, - search_text: str, - suggester_name: str, - suggest_options: Optional[_models.SuggestOptions] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SuggestDocumentsResult: - """Suggests documents in the index that match the given partial query text. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/suggestions - - :param search_text: The search text to use to suggest documents. Must be at least 1 character, - and no more than 100 characters. Required. - :type search_text: str - :param suggester_name: The name of the suggester as specified in the suggesters collection - that's part of the index definition. Required. - :type suggester_name: str - :param suggest_options: Parameter group. Default value is None. - :type suggest_options: ~azure.search.documents.models.SuggestOptions - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :return: SuggestDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.SuggestDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SuggestDocumentsResult] = kwargs.pop("cls", None) - - _filter = None - _use_fuzzy_matching = None - _highlight_post_tag = None - _highlight_pre_tag = None - _minimum_coverage = None - _order_by = None - _search_fields = None - _select = None - _top = None - _x_ms_client_request_id = None - if suggest_options is not None: - _filter = suggest_options.filter - _highlight_post_tag = suggest_options.highlight_post_tag - _highlight_pre_tag = suggest_options.highlight_pre_tag - _minimum_coverage = suggest_options.minimum_coverage - _order_by = suggest_options.order_by - _search_fields = suggest_options.search_fields - _select = suggest_options.select - _top = suggest_options.top - _use_fuzzy_matching = suggest_options.use_fuzzy_matching - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_suggest_get_request( - search_text=search_text, - suggester_name=suggester_name, - filter=_filter, - use_fuzzy_matching=_use_fuzzy_matching, - highlight_post_tag=_highlight_post_tag, - highlight_pre_tag=_highlight_pre_tag, - minimum_coverage=_minimum_coverage, - order_by=_order_by, - search_fields=_search_fields, - select=_select, - top=_top, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SuggestDocumentsResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def suggest_post( - self, - suggest_request: _models.SuggestRequest, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SuggestDocumentsResult: - """Suggests documents in the index that match the given partial query text. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/suggestions - - :param suggest_request: The Suggest request. Required. - :type suggest_request: ~azure.search.documents.models.SuggestRequest - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SuggestDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.SuggestDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def suggest_post( - self, - suggest_request: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SuggestDocumentsResult: - """Suggests documents in the index that match the given partial query text. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/suggestions - - :param suggest_request: The Suggest request. Required. - :type suggest_request: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SuggestDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.SuggestDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def suggest_post( - self, - suggest_request: Union[_models.SuggestRequest, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SuggestDocumentsResult: - """Suggests documents in the index that match the given partial query text. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/suggestions - - :param suggest_request: The Suggest request. Is either a SuggestRequest type or a IO[bytes] - type. Required. - :type suggest_request: ~azure.search.documents.models.SuggestRequest or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :return: SuggestDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.SuggestDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SuggestDocumentsResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(suggest_request, (IOBase, bytes)): - _content = suggest_request - else: - _json = self._serialize.body(suggest_request, "SuggestRequest") - - _request = build_suggest_post_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SuggestDocumentsResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def index( - self, - batch: _models.IndexBatch, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.IndexDocumentsResult: - """Sends a batch of document write actions to the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/addupdate-or-delete-documents - - :param batch: The batch of index actions. Required. - :type batch: ~azure.search.documents.models.IndexBatch - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: IndexDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.IndexDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def index( - self, - batch: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.IndexDocumentsResult: - """Sends a batch of document write actions to the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/addupdate-or-delete-documents - - :param batch: The batch of index actions. Required. - :type batch: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: IndexDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.IndexDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def index( - self, - batch: Union[_models.IndexBatch, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.IndexDocumentsResult: - """Sends a batch of document write actions to the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/addupdate-or-delete-documents - - :param batch: The batch of index actions. Is either a IndexBatch type or a IO[bytes] type. - Required. - :type batch: ~azure.search.documents.models.IndexBatch or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :return: IndexDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.IndexDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.IndexDocumentsResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(batch, (IOBase, bytes)): - _content = batch - else: - _json = self._serialize.body(batch, "IndexBatch") - - _request = build_index_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 207]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("IndexDocumentsResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def autocomplete_get( - self, - search_text: str, - suggester_name: str, - request_options: Optional[_models.RequestOptions] = None, - autocomplete_options: Optional[_models.AutocompleteOptions] = None, - **kwargs: Any - ) -> _models.AutocompleteResult: - """Autocompletes incomplete query terms based on input text and matching terms in the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/autocomplete - - :param search_text: The incomplete term which should be auto-completed. Required. - :type search_text: str - :param suggester_name: The name of the suggester as specified in the suggesters collection - that's part of the index definition. Required. - :type suggester_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :param autocomplete_options: Parameter group. Default value is None. - :type autocomplete_options: ~azure.search.documents.models.AutocompleteOptions - :return: AutocompleteResult or the result of cls(response) - :rtype: ~azure.search.documents.models.AutocompleteResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.AutocompleteResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - _autocomplete_mode = None - _filter = None - _use_fuzzy_matching = None - _highlight_post_tag = None - _highlight_pre_tag = None - _minimum_coverage = None - _search_fields = None - _top = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - if autocomplete_options is not None: - _autocomplete_mode = autocomplete_options.autocomplete_mode - _filter = autocomplete_options.filter - _highlight_post_tag = autocomplete_options.highlight_post_tag - _highlight_pre_tag = autocomplete_options.highlight_pre_tag - _minimum_coverage = autocomplete_options.minimum_coverage - _search_fields = autocomplete_options.search_fields - _top = autocomplete_options.top - _use_fuzzy_matching = autocomplete_options.use_fuzzy_matching - - _request = build_autocomplete_get_request( - search_text=search_text, - suggester_name=suggester_name, - x_ms_client_request_id=_x_ms_client_request_id, - autocomplete_mode=_autocomplete_mode, - filter=_filter, - use_fuzzy_matching=_use_fuzzy_matching, - highlight_post_tag=_highlight_post_tag, - highlight_pre_tag=_highlight_pre_tag, - minimum_coverage=_minimum_coverage, - search_fields=_search_fields, - top=_top, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("AutocompleteResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def autocomplete_post( - self, - autocomplete_request: _models.AutocompleteRequest, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.AutocompleteResult: - """Autocompletes incomplete query terms based on input text and matching terms in the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/autocomplete - - :param autocomplete_request: The definition of the Autocomplete request. Required. - :type autocomplete_request: ~azure.search.documents.models.AutocompleteRequest - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AutocompleteResult or the result of cls(response) - :rtype: ~azure.search.documents.models.AutocompleteResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def autocomplete_post( - self, - autocomplete_request: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.AutocompleteResult: - """Autocompletes incomplete query terms based on input text and matching terms in the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/autocomplete - - :param autocomplete_request: The definition of the Autocomplete request. Required. - :type autocomplete_request: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AutocompleteResult or the result of cls(response) - :rtype: ~azure.search.documents.models.AutocompleteResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def autocomplete_post( - self, - autocomplete_request: Union[_models.AutocompleteRequest, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.AutocompleteResult: - """Autocompletes incomplete query terms based on input text and matching terms in the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/autocomplete - - :param autocomplete_request: The definition of the Autocomplete request. Is either a - AutocompleteRequest type or a IO[bytes] type. Required. - :type autocomplete_request: ~azure.search.documents.models.AutocompleteRequest or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :return: AutocompleteResult or the result of cls(response) - :rtype: ~azure.search.documents.models.AutocompleteResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AutocompleteResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(autocomplete_request, (IOBase, bytes)): - _content = autocomplete_request - else: - _json = self._serialize.body(autocomplete_request, "AutocompleteRequest") - - _request = build_autocomplete_post_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("AutocompleteResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_operations.py new file mode 100644 index 000000000000..a6af0faa4564 --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_operations.py @@ -0,0 +1,5435 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import json +import sys +from typing import Any, AsyncIterable, Callable, Dict, IO, List, Literal, Optional, TypeVar, Union, overload +import urllib.parse + +from azure.core import MatchConditions +from azure.core.async_paging import AsyncItemPaged, AsyncList +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceModifiedError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.pipeline import PipelineResponse +from azure.core.rest import AsyncHttpResponse, HttpRequest +from azure.core.tracing.decorator import distributed_trace +from azure.core.tracing.decorator_async import distributed_trace_async +from azure.core.utils import case_insensitive_dict + +from ... import models as _models +from ..._model_base import SdkJSONEncoder, _deserialize +from ..._validation import api_version_validation +from ...operations._operations import ( + build_aliases_operations_create_or_update_request, + build_aliases_operations_create_request, + build_aliases_operations_delete_request, + build_aliases_operations_get_request, + build_aliases_operations_list_request, + build_data_sources_operations_create_or_update_request, + build_data_sources_operations_create_request, + build_data_sources_operations_delete_request, + build_data_sources_operations_get_request, + build_data_sources_operations_list_request, + build_documents_operations_autocomplete_get_request, + build_documents_operations_autocomplete_post_request, + build_documents_operations_count_request, + build_documents_operations_get_request, + build_documents_operations_index_request, + build_documents_operations_search_get_request, + build_documents_operations_search_post_request, + build_documents_operations_suggest_get_request, + build_documents_operations_suggest_post_request, + build_indexers_operations_create_or_update_request, + build_indexers_operations_create_request, + build_indexers_operations_delete_request, + build_indexers_operations_get_request, + build_indexers_operations_get_status_request, + build_indexers_operations_list_request, + build_indexers_operations_reset_docs_request, + build_indexers_operations_reset_request, + build_indexers_operations_run_request, + build_indexes_operations_analyze_request, + build_indexes_operations_create_or_update_request, + build_indexes_operations_create_request, + build_indexes_operations_delete_request, + build_indexes_operations_get_request, + build_indexes_operations_get_statistics_request, + build_indexes_operations_list_request, + build_search_get_service_statistics_request, + build_skillsets_operations_create_or_update_request, + build_skillsets_operations_create_request, + build_skillsets_operations_delete_request, + build_skillsets_operations_get_request, + build_skillsets_operations_list_request, + build_skillsets_operations_reset_skills_request, + build_synonym_maps_operations_create_or_update_request, + build_synonym_maps_operations_create_request, + build_synonym_maps_operations_delete_request, + build_synonym_maps_operations_get_request, + build_synonym_maps_operations_list_request, +) +from .._vendor import SearchClientMixinABC + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] + + +class DataSourcesOperationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.search.documents.aio.SearchClient`'s + :attr:`data_sources_operations` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + async def create_or_update( + self, + data_source_name: str, + data_source: _models.SearchIndexerDataSource, + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexerDataSource: + """Creates a new datasource or updates a datasource if it already exists. + + :param data_source_name: The name of the datasource. Required. + :type data_source_name: str + :param data_source: The definition of the datasource to create or update. Required. + :type data_source: ~azure.search.documents.models.SearchIndexerDataSource + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexerDataSource. The SearchIndexerDataSource is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerDataSource + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update( + self, + data_source_name: str, + data_source: JSON, + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexerDataSource: + """Creates a new datasource or updates a datasource if it already exists. + + :param data_source_name: The name of the datasource. Required. + :type data_source_name: str + :param data_source: The definition of the datasource to create or update. Required. + :type data_source: JSON + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexerDataSource. The SearchIndexerDataSource is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerDataSource + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update( + self, + data_source_name: str, + data_source: IO[bytes], + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexerDataSource: + """Creates a new datasource or updates a datasource if it already exists. + + :param data_source_name: The name of the datasource. Required. + :type data_source_name: str + :param data_source: The definition of the datasource to create or update. Required. + :type data_source: IO[bytes] + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexerDataSource. The SearchIndexerDataSource is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerDataSource + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + @api_version_validation( + params_added_on={"2024-11-01-preview": ["skip_indexer_reset_requirement_for_cache"]}, + ) + async def create_or_update( + self, + data_source_name: str, + data_source: Union[_models.SearchIndexerDataSource, JSON, IO[bytes]], + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexerDataSource: + """Creates a new datasource or updates a datasource if it already exists. + + :param data_source_name: The name of the datasource. Required. + :type data_source_name: str + :param data_source: The definition of the datasource to create or update. Is one of the + following types: SearchIndexerDataSource, JSON, IO[bytes] Required. + :type data_source: ~azure.search.documents.models.SearchIndexerDataSource or JSON or IO[bytes] + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexerDataSource. The SearchIndexerDataSource is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerDataSource + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + prefer: Literal["return=representation"] = kwargs.pop("prefer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchIndexerDataSource] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(data_source, (IOBase, bytes)): + _content = data_source + else: + _content = json.dumps(data_source, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_data_sources_operations_create_or_update_request( + data_source_name=data_source_name, + skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, + etag=etag, + match_condition=match_condition, + prefer=prefer, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexerDataSource, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete( + self, + data_source_name: str, + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes a datasource. + + :param data_source_name: The name of the datasource. Required. + :type data_source_name: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_data_sources_operations_delete_request( + data_source_name=data_source_name, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get(self, data_source_name: str, **kwargs: Any) -> _models.SearchIndexerDataSource: + """Retrieves a datasource definition. + + :param data_source_name: The name of the datasource. Required. + :type data_source_name: str + :return: SearchIndexerDataSource. The SearchIndexerDataSource is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerDataSource + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SearchIndexerDataSource] = kwargs.pop("cls", None) + + _request = build_data_sources_operations_get_request( + data_source_name=data_source_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexerDataSource, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListDataSourcesResult: + """Lists all datasources available for a search service. + + :keyword _select: Selects which top-level properties to retrieve. + Specified as a comma-separated list of JSON property names, + or '*' for all properties. The default is all properties. Default value is None. + :paramtype _select: str + :return: ListDataSourcesResult. The ListDataSourcesResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.ListDataSourcesResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ListDataSourcesResult] = kwargs.pop("cls", None) + + _request = build_data_sources_operations_list_request( + _select=_select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ListDataSourcesResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create( + self, data_source: _models.SearchIndexerDataSource, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndexerDataSource: + """Creates a new datasource. + + :param data_source: The definition of the datasource to create. Required. + :type data_source: ~azure.search.documents.models.SearchIndexerDataSource + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndexerDataSource. The SearchIndexerDataSource is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerDataSource + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, data_source: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndexerDataSource: + """Creates a new datasource. + + :param data_source: The definition of the datasource to create. Required. + :type data_source: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndexerDataSource. The SearchIndexerDataSource is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerDataSource + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, data_source: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndexerDataSource: + """Creates a new datasource. + + :param data_source: The definition of the datasource to create. Required. + :type data_source: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndexerDataSource. The SearchIndexerDataSource is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerDataSource + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create( + self, data_source: Union[_models.SearchIndexerDataSource, JSON, IO[bytes]], **kwargs: Any + ) -> _models.SearchIndexerDataSource: + """Creates a new datasource. + + :param data_source: The definition of the datasource to create. Is one of the following types: + SearchIndexerDataSource, JSON, IO[bytes] Required. + :type data_source: ~azure.search.documents.models.SearchIndexerDataSource or JSON or IO[bytes] + :return: SearchIndexerDataSource. The SearchIndexerDataSource is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerDataSource + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchIndexerDataSource] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(data_source, (IOBase, bytes)): + _content = data_source + else: + _content = json.dumps(data_source, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_data_sources_operations_create_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexerDataSource, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class IndexersOperationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.search.documents.aio.SearchClient`'s + :attr:`indexers_operations` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def reset(self, indexer_name: str, **kwargs: Any) -> None: + """Resets the change tracking state associated with an indexer. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_indexers_operations_reset_request( + indexer_name=indexer_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @overload + async def reset_docs( + self, + indexer_name: str, + keys_or_ids: Optional[_models.DocumentKeysOrIds] = None, + *, + overwrite: Optional[bool] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Resets specific documents in the datasource to be selectively re-ingested by + the indexer. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :param keys_or_ids: The keys or ids of the documents to be re-ingested. If keys are provided, + the + document key field must be specified in the indexer configuration. If ids are + provided, the document key field is ignored. Default value is None. + :type keys_or_ids: ~azure.search.documents.models.DocumentKeysOrIds + :keyword overwrite: If false, keys or ids will be appended to existing ones. If true, only the + keys + or ids in this payload will be queued to be re-ingested. Default value is None. + :paramtype overwrite: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def reset_docs( + self, + indexer_name: str, + keys_or_ids: Optional[JSON] = None, + *, + overwrite: Optional[bool] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Resets specific documents in the datasource to be selectively re-ingested by + the indexer. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :param keys_or_ids: The keys or ids of the documents to be re-ingested. If keys are provided, + the + document key field must be specified in the indexer configuration. If ids are + provided, the document key field is ignored. Default value is None. + :type keys_or_ids: JSON + :keyword overwrite: If false, keys or ids will be appended to existing ones. If true, only the + keys + or ids in this payload will be queued to be re-ingested. Default value is None. + :paramtype overwrite: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def reset_docs( + self, + indexer_name: str, + keys_or_ids: Optional[IO[bytes]] = None, + *, + overwrite: Optional[bool] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Resets specific documents in the datasource to be selectively re-ingested by + the indexer. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :param keys_or_ids: The keys or ids of the documents to be re-ingested. If keys are provided, + the + document key field must be specified in the indexer configuration. If ids are + provided, the document key field is ignored. Default value is None. + :type keys_or_ids: IO[bytes] + :keyword overwrite: If false, keys or ids will be appended to existing ones. If true, only the + keys + or ids in this payload will be queued to be re-ingested. Default value is None. + :paramtype overwrite: bool + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + @api_version_validation( + method_added_on="2024-11-01-preview", + params_added_on={ + "2024-11-01-preview": [ + "api_version", + "overwrite", + "client_request_id", + "indexer_name", + "content_type", + "accept", + ] + }, + ) + async def reset_docs( + self, + indexer_name: str, + keys_or_ids: Optional[Union[_models.DocumentKeysOrIds, JSON, IO[bytes]]] = None, + *, + overwrite: Optional[bool] = None, + **kwargs: Any + ) -> None: + """Resets specific documents in the datasource to be selectively re-ingested by + the indexer. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :param keys_or_ids: The keys or ids of the documents to be re-ingested. If keys are provided, + the + document key field must be specified in the indexer configuration. If ids are + provided, the document key field is ignored. Is one of the following types: DocumentKeysOrIds, + JSON, IO[bytes] Default value is None. + :type keys_or_ids: ~azure.search.documents.models.DocumentKeysOrIds or JSON or IO[bytes] + :keyword overwrite: If false, keys or ids will be appended to existing ones. If true, only the + keys + or ids in this payload will be queued to be re-ingested. Default value is None. + :paramtype overwrite: bool + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(keys_or_ids, (IOBase, bytes)): + _content = keys_or_ids + else: + if keys_or_ids is not None: + _content = json.dumps(keys_or_ids, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + else: + _content = None + + _request = build_indexers_operations_reset_docs_request( + indexer_name=indexer_name, + overwrite=overwrite, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def run(self, indexer_name: str, **kwargs: Any) -> None: + """Runs an indexer on-demand. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_indexers_operations_run_request( + indexer_name=indexer_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @overload + async def create_or_update( + self, + indexer_name: str, + indexer: _models.SearchIndexer, + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + disable_cache_reprocessing_change_detection: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexer: + """Creates a new indexer or updates an indexer if it already exists. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :param indexer: The definition of the indexer to create or update. Required. + :type indexer: ~azure.search.documents.models.SearchIndexer + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. Default value is None. + :paramtype disable_cache_reprocessing_change_detection: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexer. The SearchIndexer is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexer + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update( + self, + indexer_name: str, + indexer: JSON, + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + disable_cache_reprocessing_change_detection: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexer: + """Creates a new indexer or updates an indexer if it already exists. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :param indexer: The definition of the indexer to create or update. Required. + :type indexer: JSON + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. Default value is None. + :paramtype disable_cache_reprocessing_change_detection: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexer. The SearchIndexer is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexer + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update( + self, + indexer_name: str, + indexer: IO[bytes], + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + disable_cache_reprocessing_change_detection: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexer: + """Creates a new indexer or updates an indexer if it already exists. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :param indexer: The definition of the indexer to create or update. Required. + :type indexer: IO[bytes] + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. Default value is None. + :paramtype disable_cache_reprocessing_change_detection: bool + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexer. The SearchIndexer is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexer + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + @api_version_validation( + params_added_on={ + "2024-11-01-preview": [ + "skip_indexer_reset_requirement_for_cache", + "disable_cache_reprocessing_change_detection", + ] + }, + ) + async def create_or_update( + self, + indexer_name: str, + indexer: Union[_models.SearchIndexer, JSON, IO[bytes]], + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + disable_cache_reprocessing_change_detection: Optional[bool] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexer: + """Creates a new indexer or updates an indexer if it already exists. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :param indexer: The definition of the indexer to create or update. Is one of the following + types: SearchIndexer, JSON, IO[bytes] Required. + :type indexer: ~azure.search.documents.models.SearchIndexer or JSON or IO[bytes] + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. Default value is None. + :paramtype disable_cache_reprocessing_change_detection: bool + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexer. The SearchIndexer is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexer + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + prefer: Literal["return=representation"] = kwargs.pop("prefer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchIndexer] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(indexer, (IOBase, bytes)): + _content = indexer + else: + _content = json.dumps(indexer, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_indexers_operations_create_or_update_request( + indexer_name=indexer_name, + skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, + disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, + etag=etag, + match_condition=match_condition, + prefer=prefer, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexer, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete( + self, + indexer_name: str, + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes an indexer. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_indexers_operations_delete_request( + indexer_name=indexer_name, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get(self, indexer_name: str, **kwargs: Any) -> _models.SearchIndexer: + """Retrieves an indexer definition. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :return: SearchIndexer. The SearchIndexer is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexer + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SearchIndexer] = kwargs.pop("cls", None) + + _request = build_indexers_operations_get_request( + indexer_name=indexer_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexer, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListIndexersResult: + """Lists all indexers available for a search service. + + :keyword _select: Selects which top-level properties to retrieve. + Specified as a comma-separated list of JSON property names, + or '*' for all properties. The default is all properties. Default value is None. + :paramtype _select: str + :return: ListIndexersResult. The ListIndexersResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.ListIndexersResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ListIndexersResult] = kwargs.pop("cls", None) + + _request = build_indexers_operations_list_request( + _select=_select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ListIndexersResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create( + self, indexer: _models.SearchIndexer, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndexer: + """Creates a new indexer. + + :param indexer: The definition of the indexer to create. Required. + :type indexer: ~azure.search.documents.models.SearchIndexer + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndexer. The SearchIndexer is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexer + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, indexer: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndexer: + """Creates a new indexer. + + :param indexer: The definition of the indexer to create. Required. + :type indexer: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndexer. The SearchIndexer is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexer + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, indexer: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndexer: + """Creates a new indexer. + + :param indexer: The definition of the indexer to create. Required. + :type indexer: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndexer. The SearchIndexer is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexer + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create( + self, indexer: Union[_models.SearchIndexer, JSON, IO[bytes]], **kwargs: Any + ) -> _models.SearchIndexer: + """Creates a new indexer. + + :param indexer: The definition of the indexer to create. Is one of the following types: + SearchIndexer, JSON, IO[bytes] Required. + :type indexer: ~azure.search.documents.models.SearchIndexer or JSON or IO[bytes] + :return: SearchIndexer. The SearchIndexer is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexer + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchIndexer] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(indexer, (IOBase, bytes)): + _content = indexer + else: + _content = json.dumps(indexer, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_indexers_operations_create_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexer, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_status(self, indexer_name: str, **kwargs: Any) -> _models.SearchIndexerStatus: + """Returns the current status and execution history of an indexer. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :return: SearchIndexerStatus. The SearchIndexerStatus is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SearchIndexerStatus] = kwargs.pop("cls", None) + + _request = build_indexers_operations_get_status_request( + indexer_name=indexer_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexerStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class SkillsetsOperationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.search.documents.aio.SearchClient`'s + :attr:`skillsets_operations` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + async def create_or_update( + self, + skillset_name: str, + skillset: _models.SearchIndexerSkillset, + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + disable_cache_reprocessing_change_detection: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexerSkillset: + """Creates a new skillset in a search service or updates the skillset if it + already exists. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :param skillset: The skillset containing one or more skills to create or update in a search + service. Required. + :type skillset: ~azure.search.documents.models.SearchIndexerSkillset + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. Default value is None. + :paramtype disable_cache_reprocessing_change_detection: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexerSkillset. The SearchIndexerSkillset is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerSkillset + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update( + self, + skillset_name: str, + skillset: JSON, + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + disable_cache_reprocessing_change_detection: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexerSkillset: + """Creates a new skillset in a search service or updates the skillset if it + already exists. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :param skillset: The skillset containing one or more skills to create or update in a search + service. Required. + :type skillset: JSON + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. Default value is None. + :paramtype disable_cache_reprocessing_change_detection: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexerSkillset. The SearchIndexerSkillset is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerSkillset + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update( + self, + skillset_name: str, + skillset: IO[bytes], + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + disable_cache_reprocessing_change_detection: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexerSkillset: + """Creates a new skillset in a search service or updates the skillset if it + already exists. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :param skillset: The skillset containing one or more skills to create or update in a search + service. Required. + :type skillset: IO[bytes] + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. Default value is None. + :paramtype disable_cache_reprocessing_change_detection: bool + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexerSkillset. The SearchIndexerSkillset is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerSkillset + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + @api_version_validation( + params_added_on={ + "2024-11-01-preview": [ + "skip_indexer_reset_requirement_for_cache", + "disable_cache_reprocessing_change_detection", + ] + }, + ) + async def create_or_update( + self, + skillset_name: str, + skillset: Union[_models.SearchIndexerSkillset, JSON, IO[bytes]], + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + disable_cache_reprocessing_change_detection: Optional[bool] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexerSkillset: + """Creates a new skillset in a search service or updates the skillset if it + already exists. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :param skillset: The skillset containing one or more skills to create or update in a search + service. Is one of the following types: SearchIndexerSkillset, JSON, IO[bytes] Required. + :type skillset: ~azure.search.documents.models.SearchIndexerSkillset or JSON or IO[bytes] + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. Default value is None. + :paramtype disable_cache_reprocessing_change_detection: bool + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexerSkillset. The SearchIndexerSkillset is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerSkillset + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + prefer: Literal["return=representation"] = kwargs.pop("prefer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchIndexerSkillset] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(skillset, (IOBase, bytes)): + _content = skillset + else: + _content = json.dumps(skillset, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_skillsets_operations_create_or_update_request( + skillset_name=skillset_name, + skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, + disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, + etag=etag, + match_condition=match_condition, + prefer=prefer, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexerSkillset, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete( + self, + skillset_name: str, + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes a skillset in a search service. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_skillsets_operations_delete_request( + skillset_name=skillset_name, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get(self, skillset_name: str, **kwargs: Any) -> _models.SearchIndexerSkillset: + """Retrieves a skillset in a search service. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :return: SearchIndexerSkillset. The SearchIndexerSkillset is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerSkillset + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SearchIndexerSkillset] = kwargs.pop("cls", None) + + _request = build_skillsets_operations_get_request( + skillset_name=skillset_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexerSkillset, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListSkillsetsResult: + """List all skillsets in a search service. + + :keyword _select: Selects which top-level properties to retrieve. + Specified as a comma-separated list of JSON property names, + or '*' for all properties. The default is all properties. Default value is None. + :paramtype _select: str + :return: ListSkillsetsResult. The ListSkillsetsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.ListSkillsetsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ListSkillsetsResult] = kwargs.pop("cls", None) + + _request = build_skillsets_operations_list_request( + _select=_select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ListSkillsetsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create( + self, skillset: _models.SearchIndexerSkillset, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndexerSkillset: + """Creates a new skillset in a search service. + + :param skillset: The skillset containing one or more skills to create in a search service. + Required. + :type skillset: ~azure.search.documents.models.SearchIndexerSkillset + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndexerSkillset. The SearchIndexerSkillset is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerSkillset + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, skillset: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndexerSkillset: + """Creates a new skillset in a search service. + + :param skillset: The skillset containing one or more skills to create in a search service. + Required. + :type skillset: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndexerSkillset. The SearchIndexerSkillset is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerSkillset + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, skillset: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndexerSkillset: + """Creates a new skillset in a search service. + + :param skillset: The skillset containing one or more skills to create in a search service. + Required. + :type skillset: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndexerSkillset. The SearchIndexerSkillset is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerSkillset + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create( + self, skillset: Union[_models.SearchIndexerSkillset, JSON, IO[bytes]], **kwargs: Any + ) -> _models.SearchIndexerSkillset: + """Creates a new skillset in a search service. + + :param skillset: The skillset containing one or more skills to create in a search service. Is + one of the following types: SearchIndexerSkillset, JSON, IO[bytes] Required. + :type skillset: ~azure.search.documents.models.SearchIndexerSkillset or JSON or IO[bytes] + :return: SearchIndexerSkillset. The SearchIndexerSkillset is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerSkillset + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchIndexerSkillset] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(skillset, (IOBase, bytes)): + _content = skillset + else: + _content = json.dumps(skillset, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_skillsets_operations_create_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexerSkillset, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def reset_skills( + self, + skillset_name: str, + skill_names: _models.SkillNames, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Reset an existing skillset in a search service. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :param skill_names: The names of the skills to reset. If not specified, all skills in the + skillset + will be reset. Required. + :type skill_names: ~azure.search.documents.models.SkillNames + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def reset_skills( + self, skillset_name: str, skill_names: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Reset an existing skillset in a search service. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :param skill_names: The names of the skills to reset. If not specified, all skills in the + skillset + will be reset. Required. + :type skill_names: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def reset_skills( + self, skillset_name: str, skill_names: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Reset an existing skillset in a search service. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :param skill_names: The names of the skills to reset. If not specified, all skills in the + skillset + will be reset. Required. + :type skill_names: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + @api_version_validation( + method_added_on="2024-11-01-preview", + params_added_on={ + "2024-11-01-preview": ["api_version", "client_request_id", "skillset_name", "content_type", "accept"] + }, + ) + async def reset_skills( + self, skillset_name: str, skill_names: Union[_models.SkillNames, JSON, IO[bytes]], **kwargs: Any + ) -> None: + """Reset an existing skillset in a search service. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :param skill_names: The names of the skills to reset. If not specified, all skills in the + skillset + will be reset. Is one of the following types: SkillNames, JSON, IO[bytes] Required. + :type skill_names: ~azure.search.documents.models.SkillNames or JSON or IO[bytes] + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(skill_names, (IOBase, bytes)): + _content = skill_names + else: + _content = json.dumps(skill_names, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_skillsets_operations_reset_skills_request( + skillset_name=skillset_name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + +class SynonymMapsOperationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.search.documents.aio.SearchClient`'s + :attr:`synonym_maps_operations` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + async def create_or_update( + self, + synonym_map_name: str, + synonym_map: _models.SynonymMap, + *, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SynonymMap: + """Creates a new synonym map or updates a synonym map if it already exists. + + :param synonym_map_name: The name of the synonym map. Required. + :type synonym_map_name: str + :param synonym_map: The definition of the synonym map to create or update. Required. + :type synonym_map: ~azure.search.documents.models.SynonymMap + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SynonymMap. The SynonymMap is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SynonymMap + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update( + self, + synonym_map_name: str, + synonym_map: JSON, + *, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SynonymMap: + """Creates a new synonym map or updates a synonym map if it already exists. + + :param synonym_map_name: The name of the synonym map. Required. + :type synonym_map_name: str + :param synonym_map: The definition of the synonym map to create or update. Required. + :type synonym_map: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SynonymMap. The SynonymMap is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SynonymMap + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update( + self, + synonym_map_name: str, + synonym_map: IO[bytes], + *, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SynonymMap: + """Creates a new synonym map or updates a synonym map if it already exists. + + :param synonym_map_name: The name of the synonym map. Required. + :type synonym_map_name: str + :param synonym_map: The definition of the synonym map to create or update. Required. + :type synonym_map: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SynonymMap. The SynonymMap is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SynonymMap + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_or_update( + self, + synonym_map_name: str, + synonym_map: Union[_models.SynonymMap, JSON, IO[bytes]], + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SynonymMap: + """Creates a new synonym map or updates a synonym map if it already exists. + + :param synonym_map_name: The name of the synonym map. Required. + :type synonym_map_name: str + :param synonym_map: The definition of the synonym map to create or update. Is one of the + following types: SynonymMap, JSON, IO[bytes] Required. + :type synonym_map: ~azure.search.documents.models.SynonymMap or JSON or IO[bytes] + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SynonymMap. The SynonymMap is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SynonymMap + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + prefer: Literal["return=representation"] = kwargs.pop("prefer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SynonymMap] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(synonym_map, (IOBase, bytes)): + _content = synonym_map + else: + _content = json.dumps(synonym_map, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_synonym_maps_operations_create_or_update_request( + synonym_map_name=synonym_map_name, + etag=etag, + match_condition=match_condition, + prefer=prefer, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SynonymMap, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete( + self, + synonym_map_name: str, + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes a synonym map. + + :param synonym_map_name: The name of the synonym map. Required. + :type synonym_map_name: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_synonym_maps_operations_delete_request( + synonym_map_name=synonym_map_name, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get(self, synonym_map_name: str, **kwargs: Any) -> _models.SynonymMap: + """Retrieves a synonym map definition. + + :param synonym_map_name: The name of the synonym map. Required. + :type synonym_map_name: str + :return: SynonymMap. The SynonymMap is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SynonymMap + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SynonymMap] = kwargs.pop("cls", None) + + _request = build_synonym_maps_operations_get_request( + synonym_map_name=synonym_map_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SynonymMap, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListSynonymMapsResult: + """Lists all synonym maps available for a search service. + + :keyword _select: Selects which top-level properties to retrieve. + Specified as a comma-separated list of JSON property names, + or '*' for all properties. The default is all properties. Default value is None. + :paramtype _select: str + :return: ListSynonymMapsResult. The ListSynonymMapsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.ListSynonymMapsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ListSynonymMapsResult] = kwargs.pop("cls", None) + + _request = build_synonym_maps_operations_list_request( + _select=_select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ListSynonymMapsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def create( + self, synonym_map: _models.SynonymMap, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SynonymMap: + """Creates a new synonym map. + + :param synonym_map: The definition of the synonym map to create. Required. + :type synonym_map: ~azure.search.documents.models.SynonymMap + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SynonymMap. The SynonymMap is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SynonymMap + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, synonym_map: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SynonymMap: + """Creates a new synonym map. + + :param synonym_map: The definition of the synonym map to create. Required. + :type synonym_map: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SynonymMap. The SynonymMap is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SynonymMap + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, synonym_map: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SynonymMap: + """Creates a new synonym map. + + :param synonym_map: The definition of the synonym map to create. Required. + :type synonym_map: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: SynonymMap. The SynonymMap is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SynonymMap + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create( + self, synonym_map: Union[_models.SynonymMap, JSON, IO[bytes]], **kwargs: Any + ) -> _models.SynonymMap: + """Creates a new synonym map. + + :param synonym_map: The definition of the synonym map to create. Is one of the following types: + SynonymMap, JSON, IO[bytes] Required. + :type synonym_map: ~azure.search.documents.models.SynonymMap or JSON or IO[bytes] + :return: SynonymMap. The SynonymMap is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SynonymMap + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SynonymMap] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(synonym_map, (IOBase, bytes)): + _content = synonym_map + else: + _content = json.dumps(synonym_map, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_synonym_maps_operations_create_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SynonymMap, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class IndexesOperationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.search.documents.aio.SearchClient`'s + :attr:`indexes_operations` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + async def create( + self, index: _models.SearchIndex, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndex: + """Creates a new search index. + + :param index: The definition of the index to create. Required. + :type index: ~azure.search.documents.models.SearchIndex + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndex. The SearchIndex is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndex + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, index: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndex: + """Creates a new search index. + + :param index: The definition of the index to create. Required. + :type index: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndex. The SearchIndex is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndex + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, index: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndex: + """Creates a new search index. + + :param index: The definition of the index to create. Required. + :type index: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndex. The SearchIndex is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndex + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create(self, index: Union[_models.SearchIndex, JSON, IO[bytes]], **kwargs: Any) -> _models.SearchIndex: + """Creates a new search index. + + :param index: The definition of the index to create. Is one of the following types: + SearchIndex, JSON, IO[bytes] Required. + :type index: ~azure.search.documents.models.SearchIndex or JSON or IO[bytes] + :return: SearchIndex. The SearchIndex is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndex + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchIndex] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(index, (IOBase, bytes)): + _content = index + else: + _content = json.dumps(index, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_indexes_operations_create_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndex, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> AsyncIterable["_models.SearchIndex"]: + """Lists all indexes available for a search service. + + :keyword _select: Selects which top-level properties to retrieve. + Specified as a comma-separated list of JSON property names, + or '*' for all properties. The default is all properties. Default value is None. + :paramtype _select: str + :return: An iterator like instance of SearchIndex + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.search.documents.models.SearchIndex] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.SearchIndex]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_indexes_operations_list_request( + _select=_select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.SearchIndex], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @overload + async def create_or_update( + self, + index_name: str, + index: _models.SearchIndex, + *, + allow_index_downtime: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndex: + """Creates a new search index or updates an index if it already exists. + + :param index_name: The name of the index. Required. + :type index_name: str + :param index: The definition of the index to create or update. Required. + :type index: ~azure.search.documents.models.SearchIndex + :keyword allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters + to be added to + an index by taking the index offline for at least a few seconds. This + temporarily causes indexing and query requests to fail. Performance and write + availability of the index can be impaired for several minutes after the index + is updated, or longer for very large indexes. Default value is None. + :paramtype allow_index_downtime: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndex. The SearchIndex is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndex + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update( + self, + index_name: str, + index: JSON, + *, + allow_index_downtime: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndex: + """Creates a new search index or updates an index if it already exists. + + :param index_name: The name of the index. Required. + :type index_name: str + :param index: The definition of the index to create or update. Required. + :type index: JSON + :keyword allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters + to be added to + an index by taking the index offline for at least a few seconds. This + temporarily causes indexing and query requests to fail. Performance and write + availability of the index can be impaired for several minutes after the index + is updated, or longer for very large indexes. Default value is None. + :paramtype allow_index_downtime: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndex. The SearchIndex is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndex + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update( + self, + index_name: str, + index: IO[bytes], + *, + allow_index_downtime: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndex: + """Creates a new search index or updates an index if it already exists. + + :param index_name: The name of the index. Required. + :type index_name: str + :param index: The definition of the index to create or update. Required. + :type index: IO[bytes] + :keyword allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters + to be added to + an index by taking the index offline for at least a few seconds. This + temporarily causes indexing and query requests to fail. Performance and write + availability of the index can be impaired for several minutes after the index + is updated, or longer for very large indexes. Default value is None. + :paramtype allow_index_downtime: bool + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndex. The SearchIndex is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndex + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def create_or_update( + self, + index_name: str, + index: Union[_models.SearchIndex, JSON, IO[bytes]], + *, + allow_index_downtime: Optional[bool] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndex: + """Creates a new search index or updates an index if it already exists. + + :param index_name: The name of the index. Required. + :type index_name: str + :param index: The definition of the index to create or update. Is one of the following types: + SearchIndex, JSON, IO[bytes] Required. + :type index: ~azure.search.documents.models.SearchIndex or JSON or IO[bytes] + :keyword allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters + to be added to + an index by taking the index offline for at least a few seconds. This + temporarily causes indexing and query requests to fail. Performance and write + availability of the index can be impaired for several minutes after the index + is updated, or longer for very large indexes. Default value is None. + :paramtype allow_index_downtime: bool + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndex. The SearchIndex is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndex + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + prefer: Literal["return=representation"] = kwargs.pop("prefer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchIndex] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(index, (IOBase, bytes)): + _content = index + else: + _content = json.dumps(index, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_indexes_operations_create_or_update_request( + index_name=index_name, + allow_index_downtime=allow_index_downtime, + etag=etag, + match_condition=match_condition, + prefer=prefer, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndex, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def delete( + self, + index_name: str, + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes a search index and all the documents it contains. This operation is + permanent, with no recovery option. Make sure you have a master copy of your + index definition, data ingestion code, and a backup of the primary data source + in case you need to re-build the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_indexes_operations_delete_request( + index_name=index_name, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + async def get(self, index_name: str, **kwargs: Any) -> _models.SearchIndex: + """Retrieves an index definition. + + :param index_name: The name of the index. Required. + :type index_name: str + :return: SearchIndex. The SearchIndex is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndex + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SearchIndex] = kwargs.pop("cls", None) + + _request = build_indexes_operations_get_request( + index_name=index_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndex, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get_statistics(self, index_name: str, **kwargs: Any) -> _models.GetIndexStatisticsResult: + """Returns statistics for the given index, including a document count and storage + usage. + + :param index_name: The name of the index. Required. + :type index_name: str + :return: GetIndexStatisticsResult. The GetIndexStatisticsResult is compatible with + MutableMapping + :rtype: ~azure.search.documents.models.GetIndexStatisticsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.GetIndexStatisticsResult] = kwargs.pop("cls", None) + + _request = build_indexes_operations_get_statistics_request( + index_name=index_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.GetIndexStatisticsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def analyze( + self, index_name: str, request: _models.AnalyzeRequest, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AnalyzeResult: + """Shows how an analyzer breaks text into tokens. + + :param index_name: The name of the index. Required. + :type index_name: str + :param request: The text and analyzer or analysis components to test. Required. + :type request: ~azure.search.documents.models.AnalyzeRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AnalyzeResult. The AnalyzeResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.AnalyzeResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def analyze( + self, index_name: str, request: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AnalyzeResult: + """Shows how an analyzer breaks text into tokens. + + :param index_name: The name of the index. Required. + :type index_name: str + :param request: The text and analyzer or analysis components to test. Required. + :type request: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AnalyzeResult. The AnalyzeResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.AnalyzeResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def analyze( + self, index_name: str, request: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AnalyzeResult: + """Shows how an analyzer breaks text into tokens. + + :param index_name: The name of the index. Required. + :type index_name: str + :param request: The text and analyzer or analysis components to test. Required. + :type request: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AnalyzeResult. The AnalyzeResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.AnalyzeResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def analyze( + self, index_name: str, request: Union[_models.AnalyzeRequest, JSON, IO[bytes]], **kwargs: Any + ) -> _models.AnalyzeResult: + """Shows how an analyzer breaks text into tokens. + + :param index_name: The name of the index. Required. + :type index_name: str + :param request: The text and analyzer or analysis components to test. Is one of the following + types: AnalyzeRequest, JSON, IO[bytes] Required. + :type request: ~azure.search.documents.models.AnalyzeRequest or JSON or IO[bytes] + :return: AnalyzeResult. The AnalyzeResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.AnalyzeResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AnalyzeResult] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(request, (IOBase, bytes)): + _content = request + else: + _content = json.dumps(request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_indexes_operations_analyze_request( + index_name=index_name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AnalyzeResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class AliasesOperationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.search.documents.aio.SearchClient`'s + :attr:`aliases_operations` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + async def create( + self, alias: _models.SearchAlias, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchAlias: + """Creates a new search alias. + + :param alias: The definition of the alias to create. Required. + :type alias: ~azure.search.documents.models.SearchAlias + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchAlias. The SearchAlias is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchAlias + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, alias: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchAlias: + """Creates a new search alias. + + :param alias: The definition of the alias to create. Required. + :type alias: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchAlias. The SearchAlias is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchAlias + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create( + self, alias: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchAlias: + """Creates a new search alias. + + :param alias: The definition of the alias to create. Required. + :type alias: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchAlias. The SearchAlias is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchAlias + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + @api_version_validation( + method_added_on="2024-11-01-preview", + params_added_on={"2024-11-01-preview": ["api_version", "client_request_id", "content_type", "accept"]}, + ) + async def create(self, alias: Union[_models.SearchAlias, JSON, IO[bytes]], **kwargs: Any) -> _models.SearchAlias: + """Creates a new search alias. + + :param alias: The definition of the alias to create. Is one of the following types: + SearchAlias, JSON, IO[bytes] Required. + :type alias: ~azure.search.documents.models.SearchAlias or JSON or IO[bytes] + :return: SearchAlias. The SearchAlias is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchAlias + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchAlias] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(alias, (IOBase, bytes)): + _content = alias + else: + _content = json.dumps(alias, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_aliases_operations_create_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchAlias, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + @api_version_validation( + method_added_on="2024-11-01-preview", + params_added_on={"2024-11-01-preview": ["api_version", "client_request_id", "accept"]}, + ) + def list(self, **kwargs: Any) -> AsyncIterable["_models.SearchAlias"]: + """Lists all aliases available for a search service. + + :return: An iterator like instance of SearchAlias + :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.search.documents.models.SearchAlias] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.SearchAlias]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_aliases_operations_list_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.SearchAlias], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return None, AsyncList(list_of_elem) + + async def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) + + @overload + async def create_or_update( + self, + alias_name: str, + alias: _models.SearchAlias, + *, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchAlias: + """Creates a new search alias or updates an alias if it already exists. + + :param alias_name: The name of the alias. Required. + :type alias_name: str + :param alias: The definition of the alias to create or update. Required. + :type alias: ~azure.search.documents.models.SearchAlias + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchAlias. The SearchAlias is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchAlias + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update( + self, + alias_name: str, + alias: JSON, + *, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchAlias: + """Creates a new search alias or updates an alias if it already exists. + + :param alias_name: The name of the alias. Required. + :type alias_name: str + :param alias: The definition of the alias to create or update. Required. + :type alias: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchAlias. The SearchAlias is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchAlias + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def create_or_update( + self, + alias_name: str, + alias: IO[bytes], + *, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchAlias: + """Creates a new search alias or updates an alias if it already exists. + + :param alias_name: The name of the alias. Required. + :type alias_name: str + :param alias: The definition of the alias to create or update. Required. + :type alias: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchAlias. The SearchAlias is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchAlias + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + @api_version_validation( + method_added_on="2024-11-01-preview", + params_added_on={ + "2024-11-01-preview": [ + "api_version", + "prefer", + "client_request_id", + "alias_name", + "content_type", + "accept", + "etag", + "match_condition", + ] + }, + ) + async def create_or_update( + self, + alias_name: str, + alias: Union[_models.SearchAlias, JSON, IO[bytes]], + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchAlias: + """Creates a new search alias or updates an alias if it already exists. + + :param alias_name: The name of the alias. Required. + :type alias_name: str + :param alias: The definition of the alias to create or update. Is one of the following types: + SearchAlias, JSON, IO[bytes] Required. + :type alias: ~azure.search.documents.models.SearchAlias or JSON or IO[bytes] + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchAlias. The SearchAlias is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchAlias + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + prefer: Literal["return=representation"] = kwargs.pop("prefer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchAlias] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(alias, (IOBase, bytes)): + _content = alias + else: + _content = json.dumps(alias, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_aliases_operations_create_or_update_request( + alias_name=alias_name, + etag=etag, + match_condition=match_condition, + prefer=prefer, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchAlias, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + @api_version_validation( + method_added_on="2024-11-01-preview", + params_added_on={ + "2024-11-01-preview": [ + "api_version", + "client_request_id", + "alias_name", + "accept", + "etag", + "match_condition", + ] + }, + ) + async def delete( + self, + alias_name: str, + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes a search alias and its associated mapping to an index. This operation + is permanent, with no recovery option. The mapped index is untouched by this + operation. + + :param alias_name: The name of the alias. Required. + :type alias_name: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_aliases_operations_delete_request( + alias_name=alias_name, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace_async + @api_version_validation( + method_added_on="2024-11-01-preview", + params_added_on={"2024-11-01-preview": ["api_version", "client_request_id", "alias_name", "accept"]}, + ) + async def get(self, alias_name: str, **kwargs: Any) -> _models.SearchAlias: + """Retrieves an alias definition. + + :param alias_name: The name of the alias. Required. + :type alias_name: str + :return: SearchAlias. The SearchAlias is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchAlias + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SearchAlias] = kwargs.pop("cls", None) + + _request = build_aliases_operations_get_request( + alias_name=alias_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchAlias, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class DocumentsOperationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.search.documents.aio.SearchClient`'s + :attr:`documents_operations` attribute. + """ + + def __init__(self, *args, **kwargs) -> None: + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace_async + async def count(self, index_name: str, **kwargs: Any) -> int: + """Queries the number of documents in the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :return: int + :rtype: int + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[int] = kwargs.pop("cls", None) + + _request = build_documents_operations_count_request( + index_name=index_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(int, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + @api_version_validation( + params_added_on={ + "2024-11-01-preview": ["query_rewrites", "debug", "query_language", "speller", "semantic_fields"] + }, + ) + async def search_get( + self, + index_name: str, + *, + search_text: Optional[str] = None, + include_total_result_count: Optional[bool] = None, + facets: Optional[List[str]] = None, + _filter: Optional[str] = None, + highlight_fields: Optional[List[str]] = None, + highlight_post_tag: Optional[str] = None, + highlight_pre_tag: Optional[str] = None, + minimum_coverage: Optional[float] = None, + order_by: Optional[List[str]] = None, + query_type: Optional[Union[str, _models.QueryType]] = None, + scoring_parameters: Optional[List[str]] = None, + scoring_profile: Optional[str] = None, + search_fields: Optional[List[str]] = None, + search_mode: Optional[Union[str, _models.SearchMode]] = None, + scoring_statistics: Optional[Union[str, _models.ScoringStatistics]] = None, + session_id: Optional[str] = None, + _select: Optional[List[str]] = None, + _skip: Optional[int] = None, + _top: Optional[int] = None, + semantic_configuration: Optional[str] = None, + semantic_error_handling: Optional[Union[str, _models.SemanticErrorMode]] = None, + semantic_max_wait_in_milliseconds: Optional[int] = None, + answers: Optional[Union[str, _models.QueryAnswerType]] = None, + captions: Optional[Union[str, _models.QueryCaptionType]] = None, + semantic_query: Optional[str] = None, + query_rewrites: Optional[Union[str, _models.QueryRewritesType]] = None, + debug: Optional[Union[str, _models.QueryDebugMode]] = None, + query_language: Optional[Union[str, _models.QueryLanguage]] = None, + speller: Optional[Union[str, _models.QuerySpellerType]] = None, + semantic_fields: Optional[List[str]] = None, + **kwargs: Any + ) -> _models.SearchDocumentsResult: + """Searches for documents in the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :keyword search_text: A full-text search query expression; Use "*" or omit this parameter to + match + all documents. Default value is None. + :paramtype search_text: str + :keyword include_total_result_count: A value that specifies whether to fetch the total count of + results. Default is + false. Setting this value to true may have a performance impact. Note that the + count returned is an approximation. Default value is None. + :paramtype include_total_result_count: bool + :keyword facets: The list of facet expressions to apply to the search query. Each facet + expression contains a field name, optionally followed by a comma-separated list + of name:value pairs. Default value is None. + :paramtype facets: list[str] + :keyword _filter: The OData $filter expression to apply to the search query. Default value is + None. + :paramtype _filter: str + :keyword highlight_fields: The list of field names to use for hit highlights. Only searchable + fields can + be used for hit highlighting. Default value is None. + :paramtype highlight_fields: list[str] + :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. Default is </em>. Default value is None. + :paramtype highlight_post_tag: str + :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. Default is <em>. Default value is None. + :paramtype highlight_pre_tag: str + :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index + that must be + covered by a search query in order for the query to be reported as a success. + This parameter can be useful for ensuring search availability even for services + with only one replica. The default is 100. Default value is None. + :paramtype minimum_coverage: float + :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each + expression can be either a field name or a call to either the geo.distance() or + the search.score() functions. Each expression can be followed by asc to + indicate ascending, and desc to indicate descending. The default is ascending + order. Ties will be broken by the match scores of documents. If no OrderBy is + specified, the default sort order is descending by document match score. There + can be at most 32 $orderby clauses. Default value is None. + :paramtype order_by: list[str] + :keyword query_type: A value that specifies the syntax of the search query. The default is + 'simple'. + Use 'full' if your query uses the Lucene query syntax. Known values are: "simple", "full", and + "semantic". Default value is None. + :paramtype query_type: str or ~azure.search.documents.models.QueryType + :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for + example, + referencePointParameter) using the format name-values. For example, if the + scoring profile defines a function with a parameter called 'mylocation' the + parameter string would be "mylocation--122.2,44.8" (without the quotes). Default value is + None. + :paramtype scoring_parameters: list[str] + :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching + documents + in order to sort the results. Default value is None. + :paramtype scoring_profile: str + :keyword search_fields: The list of field names to which to scope the full-text search. When + using + fielded search (fieldName:searchExpression) in a full Lucene query, the field + names of each fielded search expression take precedence over any field names + listed in this parameter. Default value is None. + :paramtype search_fields: list[str] + :keyword search_mode: A value that specifies whether any or all of the search terms must be + matched + in order to count the document as a match. Known values are: "any" and "all". Default value is + None. + :paramtype search_mode: str or ~azure.search.documents.models.SearchMode + :keyword scoring_statistics: A value that specifies whether we want to calculate scoring + statistics (such as + document frequency) globally for more consistent scoring, or locally, for lower + latency. Known values are: "local" and "global". Default value is None. + :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics + :keyword session_id: A value to be used to create a sticky session, which can help to get more + consistent results. As long as the same sessionId is used, a best-effort + attempt will be made to target the same replica set. Be wary that reusing the + same sessionID values repeatedly can interfere with the load balancing of the + requests across replicas and adversely affect the performance of the search + service. The value used as sessionId cannot start with a '_' character. Default value is None. + :paramtype session_id: str + :keyword _select: The list of fields to retrieve. If unspecified, all fields marked as + retrievable in the schema are included. Default value is None. + :paramtype _select: list[str] + :keyword _skip: The number of search results to skip. This value cannot be greater than + 100,000. If you need to scan documents in sequence, but cannot use $skip due to + this limitation, consider using $orderby on a totally-ordered key and $filter + with a range query instead. Default value is None. + :paramtype _skip: int + :keyword _top: The number of search results to retrieve. This can be used in conjunction with + $skip to implement client-side paging of search results. If results are + truncated due to server-side paging, the response will include a continuation + token that can be used to issue another Search request for the next page of + results. Default value is None. + :paramtype _top: int + :keyword semantic_configuration: The name of the semantic configuration that lists which fields + should be used + for semantic ranking, captions, highlights, and answers. Default value is None. + :paramtype semantic_configuration: str + :keyword semantic_error_handling: Allows the user to choose whether a semantic call should fail + completely, or to + return partial results (default). Known values are: "partial" and "fail". Default value is + None. + :paramtype semantic_error_handling: str or ~azure.search.documents.models.SemanticErrorMode + :keyword semantic_max_wait_in_milliseconds: Allows the user to set an upper bound on the amount + of time it takes for + semantic enrichment to finish processing before the request fails. Default value is None. + :paramtype semantic_max_wait_in_milliseconds: int + :keyword answers: This parameter is only valid if the query type is ``semantic``. If set, the + query + returns answers extracted from key passages in the highest ranked documents. + The number of answers returned can be configured by appending the pipe + character ``|`` followed by the ``count-`` option after the + answers parameter value, such as ``extractive|count-3``. Default count is 1. The + confidence threshold can be configured by appending the pipe character ``|`` + followed by the ``threshold-`` option after the answers + parameter value, such as ``extractive|threshold-0.9``. Default threshold is 0.7. + The maximum character length of answers can be configured by appending the pipe + character '|' followed by the 'count-:code:``', + such as 'extractive|maxcharlength-600'. Known values are: "none" and "extractive". Default + value is None. + :paramtype answers: str or ~azure.search.documents.models.QueryAnswerType + :keyword captions: This parameter is only valid if the query type is ``semantic``. If set, the + query + returns captions extracted from key passages in the highest ranked documents. + When Captions is set to ``extractive``\\ , highlighting is enabled by default, and + can be configured by appending the pipe character ``|`` followed by the + ``highlight-`` option, such as ``extractive|highlight-true``. Defaults + to ``None``. The maximum character length of captions can be configured by + appending the pipe character '|' followed by the 'count-:code:``', such as 'extractive|maxcharlength-600'. Known values are: "none" and + "extractive". Default value is None. + :paramtype captions: str or ~azure.search.documents.models.QueryCaptionType + :keyword semantic_query: Allows setting a separate search query that will be solely used for + semantic + reranking, semantic captions and semantic answers. Is useful for scenarios + where there is a need to use different queries between the base retrieval and + ranking phase, and the L2 semantic phase. Default value is None. + :paramtype semantic_query: str + :keyword query_rewrites: When QueryRewrites is set to ``generative``\\ , the query terms are + sent to a + generate model which will produce 10 (default) rewrites to help increase the + recall of the request. The requested count can be configured by appending the + pipe character ``|`` followed by the ``count-`` option, such as + ``generative|count-3``. Defaults to ``None``. This parameter is only valid if the + query type is ``semantic``. Known values are: "none" and "generative". Default value is None. + :paramtype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType + :keyword debug: Enables a debugging tool that can be used to further explore your search + results. Known values are: "disabled", "semantic", "vector", "queryRewrites", and "all". + Default value is None. + :paramtype debug: str or ~azure.search.documents.models.QueryDebugMode + :keyword query_language: The language of the query. Known values are: "none", "en-us", "en-gb", + "en-in", "en-ca", "en-au", "fr-fr", "fr-ca", "de-de", "es-es", "es-mx", "zh-cn", "zh-tw", + "pt-br", "pt-pt", "it-it", "ja-jp", "ko-kr", "ru-ru", "cs-cz", "nl-be", "nl-nl", "hu-hu", + "pl-pl", "sv-se", "tr-tr", "hi-in", "ar-sa", "ar-eg", "ar-ma", "ar-kw", "ar-jo", "da-dk", + "no-no", "bg-bg", "hr-hr", "hr-ba", "ms-my", "ms-bn", "sl-sl", "ta-in", "vi-vn", "el-gr", + "ro-ro", "is-is", "id-id", "th-th", "lt-lt", "uk-ua", "lv-lv", "et-ee", "ca-es", "fi-fi", + "sr-ba", "sr-me", "sr-rs", "sk-sk", "nb-no", "hy-am", "bn-in", "eu-es", "gl-es", "gu-in", + "he-il", "ga-ie", "kn-in", "ml-in", "mr-in", "fa-ae", "pa-in", "te-in", and "ur-pk". Default + value is None. + :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage + :keyword speller: Improve search recall by spell-correcting individual search query terms. + Known values are: "none" and "lexicon". Default value is None. + :paramtype speller: str or ~azure.search.documents.models.QuerySpellerType + :keyword semantic_fields: The list of field names used for semantic ranking. Default value is + None. + :paramtype semantic_fields: list[str] + :return: SearchDocumentsResult. The SearchDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SearchDocumentsResult] = kwargs.pop("cls", None) + + _request = build_documents_operations_search_get_request( + index_name=index_name, + search_text=search_text, + include_total_result_count=include_total_result_count, + facets=facets, + _filter=_filter, + highlight_fields=highlight_fields, + highlight_post_tag=highlight_post_tag, + highlight_pre_tag=highlight_pre_tag, + minimum_coverage=minimum_coverage, + order_by=order_by, + query_type=query_type, + scoring_parameters=scoring_parameters, + scoring_profile=scoring_profile, + search_fields=search_fields, + search_mode=search_mode, + scoring_statistics=scoring_statistics, + session_id=session_id, + _select=_select, + _skip=_skip, + _top=_top, + semantic_configuration=semantic_configuration, + semantic_error_handling=semantic_error_handling, + semantic_max_wait_in_milliseconds=semantic_max_wait_in_milliseconds, + answers=answers, + captions=captions, + semantic_query=semantic_query, + query_rewrites=query_rewrites, + debug=debug, + query_language=query_language, + speller=speller, + semantic_fields=semantic_fields, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchDocumentsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def search_post( + self, + index_name: str, + search_request: _models.SearchRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.SearchDocumentsResult: + """Searches for documents in the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param search_request: The definition of the Search request. Required. + :type search_request: ~azure.search.documents.models.SearchRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchDocumentsResult. The SearchDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def search_post( + self, index_name: str, search_request: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchDocumentsResult: + """Searches for documents in the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param search_request: The definition of the Search request. Required. + :type search_request: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchDocumentsResult. The SearchDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def search_post( + self, index_name: str, search_request: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchDocumentsResult: + """Searches for documents in the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param search_request: The definition of the Search request. Required. + :type search_request: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchDocumentsResult. The SearchDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def search_post( + self, index_name: str, search_request: Union[_models.SearchRequest, JSON, IO[bytes]], **kwargs: Any + ) -> _models.SearchDocumentsResult: + """Searches for documents in the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param search_request: The definition of the Search request. Is one of the following types: + SearchRequest, JSON, IO[bytes] Required. + :type search_request: ~azure.search.documents.models.SearchRequest or JSON or IO[bytes] + :return: SearchDocumentsResult. The SearchDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchDocumentsResult] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(search_request, (IOBase, bytes)): + _content = search_request + else: + _content = json.dumps(search_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_documents_operations_search_post_request( + index_name=index_name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchDocumentsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def get( + self, key: str, index_name: str, *, selected_fields: Optional[List[str]] = None, **kwargs: Any + ) -> Dict[str, Any]: + """Retrieves a document from the index. + + :param key: The key of the document to retrieve. Required. + :type key: str + :param index_name: The name of the index. Required. + :type index_name: str + :keyword selected_fields: List of field names to retrieve for the document; Any field not + retrieved will + be missing from the returned document. Default value is None. + :paramtype selected_fields: list[str] + :return: dict mapping str to any + :rtype: dict[str, any] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Dict[str, Any]] = kwargs.pop("cls", None) + + _request = build_documents_operations_get_request( + key=key, + index_name=index_name, + selected_fields=selected_fields, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(Dict[str, Any], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def suggest_get( + self, + index_name: str, + *, + search_text: str, + suggester_name: str, + _filter: Optional[str] = None, + use_fuzzy_matching: Optional[bool] = None, + highlight_post_tag: Optional[str] = None, + highlight_pre_tag: Optional[str] = None, + minimum_coverage: Optional[float] = None, + order_by: Optional[List[str]] = None, + search_fields: Optional[List[str]] = None, + _select: Optional[List[str]] = None, + _top: Optional[int] = None, + **kwargs: Any + ) -> _models.SuggestDocumentsResult: + """Suggests documents in the index that match the given partial query text. + + :param index_name: The name of the index. Required. + :type index_name: str + :keyword search_text: The search text to use to suggest documents. Must be at least 1 + character, and + no more than 100 characters. Required. + :paramtype search_text: str + :keyword suggester_name: The name of the suggester as specified in the suggesters collection + that's part + of the index definition. Required. + :paramtype suggester_name: str + :keyword _filter: An OData expression that filters the documents considered for suggestions. + Default value is None. + :paramtype _filter: str + :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the + suggestions query. + Default is false. When set to true, the query will find terms even if there's a + substituted or missing character in the search text. While this provides a + better experience in some scenarios, it comes at a performance cost as fuzzy + suggestions queries are slower and consume more resources. Default value is None. + :paramtype use_fuzzy_matching: bool + :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. If omitted, hit highlighting of suggestions is disabled. Default value is + None. + :paramtype highlight_post_tag: str + :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. If omitted, hit highlighting of suggestions is disabled. Default value is + None. + :paramtype highlight_pre_tag: str + :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index + that must be + covered by a suggestions query in order for the query to be reported as a + success. This parameter can be useful for ensuring search availability even for + services with only one replica. The default is 80. Default value is None. + :paramtype minimum_coverage: float + :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each + expression can be either a field name or a call to either the geo.distance() or + the search.score() functions. Each expression can be followed by asc to + indicate ascending, or desc to indicate descending. The default is ascending + order. Ties will be broken by the match scores of documents. If no $orderby is + specified, the default sort order is descending by document match score. There + can be at most 32 $orderby clauses. Default value is None. + :paramtype order_by: list[str] + :keyword search_fields: The list of field names to search for the specified search text. Target + fields + must be included in the specified suggester. Default value is None. + :paramtype search_fields: list[str] + :keyword _select: The list of fields to retrieve. If unspecified, only the key field will be + included in the results. Default value is None. + :paramtype _select: list[str] + :keyword _top: The number of suggestions to retrieve. The value must be a number between 1 and + + + #. The default is 5. Default value is None. + :paramtype _top: int + :return: SuggestDocumentsResult. The SuggestDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SuggestDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SuggestDocumentsResult] = kwargs.pop("cls", None) + + _request = build_documents_operations_suggest_get_request( + index_name=index_name, + search_text=search_text, + suggester_name=suggester_name, + _filter=_filter, + use_fuzzy_matching=use_fuzzy_matching, + highlight_post_tag=highlight_post_tag, + highlight_pre_tag=highlight_pre_tag, + minimum_coverage=minimum_coverage, + order_by=order_by, + search_fields=search_fields, + _select=_select, + _top=_top, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SuggestDocumentsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def suggest_post( + self, + index_name: str, + suggest_request: _models.SuggestRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.SuggestDocumentsResult: + """Suggests documents in the index that match the given partial query text. + + :param index_name: The name of the index. Required. + :type index_name: str + :param suggest_request: The Suggest request. Required. + :type suggest_request: ~azure.search.documents.models.SuggestRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SuggestDocumentsResult. The SuggestDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SuggestDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def suggest_post( + self, index_name: str, suggest_request: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SuggestDocumentsResult: + """Suggests documents in the index that match the given partial query text. + + :param index_name: The name of the index. Required. + :type index_name: str + :param suggest_request: The Suggest request. Required. + :type suggest_request: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SuggestDocumentsResult. The SuggestDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SuggestDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def suggest_post( + self, index_name: str, suggest_request: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SuggestDocumentsResult: + """Suggests documents in the index that match the given partial query text. + + :param index_name: The name of the index. Required. + :type index_name: str + :param suggest_request: The Suggest request. Required. + :type suggest_request: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: SuggestDocumentsResult. The SuggestDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SuggestDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def suggest_post( + self, index_name: str, suggest_request: Union[_models.SuggestRequest, JSON, IO[bytes]], **kwargs: Any + ) -> _models.SuggestDocumentsResult: + """Suggests documents in the index that match the given partial query text. + + :param index_name: The name of the index. Required. + :type index_name: str + :param suggest_request: The Suggest request. Is one of the following types: SuggestRequest, + JSON, IO[bytes] Required. + :type suggest_request: ~azure.search.documents.models.SuggestRequest or JSON or IO[bytes] + :return: SuggestDocumentsResult. The SuggestDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SuggestDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SuggestDocumentsResult] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(suggest_request, (IOBase, bytes)): + _content = suggest_request + else: + _content = json.dumps(suggest_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_documents_operations_suggest_post_request( + index_name=index_name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SuggestDocumentsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def index( + self, index_name: str, batch: _models.IndexBatch, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.IndexDocumentsResult: + """Sends a batch of document write actions to the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param batch: The batch of index actions. Required. + :type batch: ~azure.search.documents.models.IndexBatch + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: IndexDocumentsResult. The IndexDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.IndexDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def index( + self, index_name: str, batch: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.IndexDocumentsResult: + """Sends a batch of document write actions to the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param batch: The batch of index actions. Required. + :type batch: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: IndexDocumentsResult. The IndexDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.IndexDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def index( + self, index_name: str, batch: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.IndexDocumentsResult: + """Sends a batch of document write actions to the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param batch: The batch of index actions. Required. + :type batch: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: IndexDocumentsResult. The IndexDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.IndexDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def index( + self, index_name: str, batch: Union[_models.IndexBatch, JSON, IO[bytes]], **kwargs: Any + ) -> _models.IndexDocumentsResult: + """Sends a batch of document write actions to the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param batch: The batch of index actions. Is one of the following types: IndexBatch, JSON, + IO[bytes] Required. + :type batch: ~azure.search.documents.models.IndexBatch or JSON or IO[bytes] + :return: IndexDocumentsResult. The IndexDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.IndexDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.IndexDocumentsResult] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(batch, (IOBase, bytes)): + _content = batch + else: + _content = json.dumps(batch, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_documents_operations_index_request( + index_name=index_name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 207]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.IndexDocumentsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace_async + async def autocomplete_get( + self, + index_name: str, + *, + search_text: str, + suggester_name: str, + autocomplete_mode: Optional[Union[str, _models.AutocompleteMode]] = None, + _filter: Optional[str] = None, + use_fuzzy_matching: Optional[bool] = None, + highlight_post_tag: Optional[str] = None, + highlight_pre_tag: Optional[str] = None, + minimum_coverage: Optional[float] = None, + search_fields: Optional[List[str]] = None, + _top: Optional[int] = None, + **kwargs: Any + ) -> _models.AutocompleteResult: + """Autocompletes incomplete query terms based on input text and matching terms in + the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :keyword search_text: The incomplete term which should be auto-completed. Required. + :paramtype search_text: str + :keyword suggester_name: The name of the suggester as specified in the suggesters collection + that's part + of the index definition. Required. + :paramtype suggester_name: str + :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use + 'twoTerms' + to get shingles and 'oneTermWithContext' to use the current context while + producing auto-completed terms. Known values are: "oneTerm", "twoTerms", and + "oneTermWithContext". Default value is None. + :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode + :keyword _filter: An OData expression that filters the documents used to produce completed + terms + for the Autocomplete result. Default value is None. + :paramtype _filter: str + :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the + autocomplete query. + Default is false. When set to true, the query will find terms even if there's a + substituted or missing character in the search text. While this provides a + better experience in some scenarios, it comes at a performance cost as fuzzy + autocomplete queries are slower and consume more resources. Default value is None. + :paramtype use_fuzzy_matching: bool + :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. If omitted, hit highlighting is disabled. Default value is None. + :paramtype highlight_post_tag: str + :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. If omitted, hit highlighting is disabled. Default value is None. + :paramtype highlight_pre_tag: str + :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index + that must be + covered by an autocomplete query in order for the query to be reported as a + success. This parameter can be useful for ensuring search availability even for + services with only one replica. The default is 80. Default value is None. + :paramtype minimum_coverage: float + :keyword search_fields: The list of field names to consider when querying for auto-completed + terms. + Target fields must be included in the specified suggester. Default value is None. + :paramtype search_fields: list[str] + :keyword _top: The number of auto-completed terms to retrieve. This must be a value between 1 + and 100. The default is 5. Default value is None. + :paramtype _top: int + :return: AutocompleteResult. The AutocompleteResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.AutocompleteResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AutocompleteResult] = kwargs.pop("cls", None) + + _request = build_documents_operations_autocomplete_get_request( + index_name=index_name, + search_text=search_text, + suggester_name=suggester_name, + autocomplete_mode=autocomplete_mode, + _filter=_filter, + use_fuzzy_matching=use_fuzzy_matching, + highlight_post_tag=highlight_post_tag, + highlight_pre_tag=highlight_pre_tag, + minimum_coverage=minimum_coverage, + search_fields=search_fields, + _top=_top, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AutocompleteResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + async def autocomplete_post( + self, + index_name: str, + autocomplete_request: _models.AutocompleteRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.AutocompleteResult: + """Autocompletes incomplete query terms based on input text and matching terms in + the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param autocomplete_request: The definition of the Autocomplete request. Required. + :type autocomplete_request: ~azure.search.documents.models.AutocompleteRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AutocompleteResult. The AutocompleteResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.AutocompleteResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def autocomplete_post( + self, index_name: str, autocomplete_request: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AutocompleteResult: + """Autocompletes incomplete query terms based on input text and matching terms in + the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param autocomplete_request: The definition of the Autocomplete request. Required. + :type autocomplete_request: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AutocompleteResult. The AutocompleteResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.AutocompleteResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + async def autocomplete_post( + self, index_name: str, autocomplete_request: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AutocompleteResult: + """Autocompletes incomplete query terms based on input text and matching terms in + the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param autocomplete_request: The definition of the Autocomplete request. Required. + :type autocomplete_request: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AutocompleteResult. The AutocompleteResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.AutocompleteResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace_async + async def autocomplete_post( + self, index_name: str, autocomplete_request: Union[_models.AutocompleteRequest, JSON, IO[bytes]], **kwargs: Any + ) -> _models.AutocompleteResult: + """Autocompletes incomplete query terms based on input text and matching terms in + the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param autocomplete_request: The definition of the Autocomplete request. Is one of the + following types: AutocompleteRequest, JSON, IO[bytes] Required. + :type autocomplete_request: ~azure.search.documents.models.AutocompleteRequest or JSON or + IO[bytes] + :return: AutocompleteResult. The AutocompleteResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.AutocompleteResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AutocompleteResult] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(autocomplete_request, (IOBase, bytes)): + _content = autocomplete_request + else: + _content = json.dumps(autocomplete_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_documents_operations_autocomplete_post_request( + index_name=index_name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AutocompleteResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class SearchClientOperationsMixin(SearchClientMixinABC): + + @distributed_trace_async + async def get_service_statistics(self, **kwargs: Any) -> _models.SearchServiceStatistics: + """Gets service level statistics for a search service. + + :return: SearchServiceStatistics. The SearchServiceStatistics is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchServiceStatistics + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SearchServiceStatistics] = kwargs.pop("cls", None) + + _request = build_search_get_service_statistics_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchServiceStatistics, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py index 7d8cc33248c2..b88c6249b5aa 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position @@ -11,22 +13,125 @@ from ._patch import * # pylint: disable=unused-wildcard-import -from ._models_py3 import ( # type: ignore +from ._models import ( # type: ignore + AIServicesAccountIdentity, + AIServicesAccountKey, + AIServicesVisionParameters, + AIServicesVisionVectorizer, + AnalyzeRequest, + AnalyzeResult, + AnalyzedTokenInfo, + AsciiFoldingTokenFilter, AutocompleteItem, - AutocompleteOptions, AutocompleteRequest, AutocompleteResult, + AzureActiveDirectoryApplicationCredentials, + AzureMachineLearningParameters, + AzureMachineLearningSkill, + AzureMachineLearningVectorizer, + AzureOpenAIEmbeddingSkill, + AzureOpenAITokenizerParameters, + AzureOpenAIVectorizer, + AzureOpenAIVectorizerParameters, + BM25SimilarityAlgorithm, + BinaryQuantizationCompression, + CharFilter, + CjkBigramTokenFilter, + ClassicSimilarityAlgorithm, + ClassicTokenizer, + CognitiveServicesAccount, + CognitiveServicesAccountKey, + CommonGramTokenFilter, + ConditionalSkill, + CorsOptions, + CustomAnalyzer, + CustomEntity, + CustomEntityAlias, + CustomEntityLookupSkill, + CustomNormalizer, + DataChangeDetectionPolicy, + DataDeletionDetectionPolicy, + DataSourceCredentials, DebugInfo, + DefaultCognitiveServicesAccount, + DictionaryDecompounderTokenFilter, + DistanceScoringFunction, + DistanceScoringParameters, DocumentDebugInfo, + DocumentExtractionSkill, + DocumentIntelligenceLayoutSkill, + DocumentKeysOrIds, + EdgeNGramTokenFilter, + EdgeNGramTokenFilterV2, + EdgeNGramTokenizer, + ElisionTokenFilter, + EntityLinkingSkill, + EntityRecognitionSkill, + EntityRecognitionSkillV3, ErrorAdditionalInfo, ErrorDetail, ErrorResponse, + ExhaustiveKnnAlgorithmConfiguration, + ExhaustiveKnnParameters, FacetResult, + FieldMapping, + FieldMappingFunction, + FreshnessScoringFunction, + FreshnessScoringParameters, + GetIndexStatisticsResult, + HighWaterMarkChangeDetectionPolicy, + HnswAlgorithmConfiguration, + HnswParameters, HybridSearch, + ImageAnalysisSkill, IndexAction, IndexBatch, IndexDocumentsResult, + IndexerCurrentState, + IndexerExecutionResult, + IndexingParameters, + IndexingParametersConfiguration, IndexingResult, + IndexingSchedule, + InputFieldMappingEntry, + KeepTokenFilter, + KeyPhraseExtractionSkill, + KeywordMarkerTokenFilter, + KeywordTokenizer, + KeywordTokenizerV2, + LanguageDetectionSkill, + LengthTokenFilter, + LexicalAnalyzer, + LexicalNormalizer, + LexicalTokenizer, + LimitTokenFilter, + ListDataSourcesResult, + ListIndexersResult, + ListSkillsetsResult, + ListSynonymMapsResult, + LuceneStandardAnalyzer, + LuceneStandardTokenizer, + LuceneStandardTokenizerV2, + MagnitudeScoringFunction, + MagnitudeScoringParameters, + MappingCharFilter, + MergeSkill, + MicrosoftLanguageStemmingTokenizer, + MicrosoftLanguageTokenizer, + NGramTokenFilter, + NGramTokenFilterV2, + NGramTokenizer, + NativeBlobSoftDeleteDeletionDetectionPolicy, + OcrSkill, + OutputFieldMappingEntry, + PIIDetectionSkill, + PathHierarchyTokenizerV2, + PatternAnalyzer, + PatternCaptureTokenFilter, + PatternReplaceCharFilter, + PatternReplaceTokenFilter, + PatternTokenizer, + PhoneticTokenFilter, QueryAnswerResult, QueryCaptionResult, QueryResultDocumentRerankerInput, @@ -34,20 +139,88 @@ QueryResultDocumentSubscores, QueryRewritesDebugInfo, QueryRewritesValuesDebugInfo, - RequestOptions, + RescoringOptions, + ResourceCounter, + ScalarQuantizationCompression, + ScalarQuantizationParameters, + ScoringFunction, + ScoringProfile, + SearchAlias, SearchDocumentsResult, - SearchOptions, + SearchField, + SearchIndex, + SearchIndexer, + SearchIndexerCache, + SearchIndexerDataContainer, + SearchIndexerDataIdentity, + SearchIndexerDataNoneIdentity, + SearchIndexerDataSource, + SearchIndexerDataUserAssignedIdentity, + SearchIndexerError, + SearchIndexerIndexProjection, + SearchIndexerIndexProjectionSelector, + SearchIndexerIndexProjectionsParameters, + SearchIndexerKnowledgeStore, + SearchIndexerKnowledgeStoreBlobProjectionSelector, + SearchIndexerKnowledgeStoreFileProjectionSelector, + SearchIndexerKnowledgeStoreObjectProjectionSelector, + SearchIndexerKnowledgeStoreParameters, + SearchIndexerKnowledgeStoreProjection, + SearchIndexerKnowledgeStoreProjectionSelector, + SearchIndexerKnowledgeStoreTableProjectionSelector, + SearchIndexerLimits, + SearchIndexerSkill, + SearchIndexerSkillset, + SearchIndexerStatus, + SearchIndexerWarning, SearchRequest, + SearchResourceEncryptionKey, SearchResult, SearchScoreThreshold, + SearchServiceCounters, + SearchServiceLimits, + SearchServiceStatistics, + SearchSuggester, + SemanticConfiguration, SemanticDebugInfo, + SemanticField, + SemanticPrioritizedFields, + SemanticSearch, + SentimentSkill, + SentimentSkillV3, + ShaperSkill, + ShingleTokenFilter, + SimilarityAlgorithm, SingleVectorFieldResult, + SkillNames, + SnowballTokenFilter, + SoftDeleteColumnDeletionDetectionPolicy, + SplitSkill, + SqlIntegratedChangeTrackingPolicy, + StemmerOverrideTokenFilter, + StemmerTokenFilter, + StopAnalyzer, + StopwordsTokenFilter, SuggestDocumentsResult, - SuggestOptions, SuggestRequest, SuggestResult, + SynonymMap, + SynonymTokenFilter, + TagScoringFunction, + TagScoringParameters, TextResult, + TextTranslationSkill, + TextWeights, + TokenFilter, + TruncateTokenFilter, + UaxUrlEmailTokenizer, + UniqueTokenFilter, VectorQuery, + VectorSearch, + VectorSearchAlgorithmConfiguration, + VectorSearchCompression, + VectorSearchProfile, + VectorSearchVectorizer, VectorSimilarityThreshold, VectorThreshold, VectorizableImageBinaryQuery, @@ -55,12 +228,51 @@ VectorizableTextQuery, VectorizedQuery, VectorsDebugInfo, + VisionVectorizeSkill, + WebApiSkill, + WebApiVectorizer, + WebApiVectorizerParameters, + WordDelimiterTokenFilter, ) -from ._search_index_client_enums import ( # type: ignore +from ._enums import ( # type: ignore + AIStudioModelCatalogName, AutocompleteMode, + AzureOpenAIModelName, + BlobIndexerDataToExtract, + BlobIndexerImageAction, + BlobIndexerPDFTextRotationAlgorithm, + BlobIndexerParsingMode, + CharFilterName, + CjkBigramTokenFilterScripts, + CustomEntityLookupSkillLanguage, + DocumentIntelligenceLayoutSkillMarkdownHeaderDepth, + DocumentIntelligenceLayoutSkillOutputMode, + EdgeNGramTokenFilterSide, + EntityCategory, + EntityRecognitionSkillLanguage, HybridCountAndFacetMode, + ImageAnalysisSkillLanguage, + ImageDetail, IndexActionType, + IndexProjectionMode, + IndexerExecutionEnvironment, + IndexerExecutionStatus, + IndexerExecutionStatusDetail, + IndexerStatus, + IndexingMode, + KeyPhraseExtractionSkillLanguage, + LexicalAnalyzerName, + LexicalNormalizerName, + LexicalTokenizerName, + MarkdownHeaderDepth, + MarkdownParsingSubmode, + MicrosoftStemmingTokenizerLanguage, + MicrosoftTokenizerLanguage, + OcrLineEnding, + OcrSkillLanguage, + PIIDetectionSkillMaskingMode, + PhoneticEncoder, QueryAnswerType, QueryCaptionType, QueryDebugMode, @@ -68,37 +280,164 @@ QueryRewritesType, QuerySpellerType, QueryType, + RegexFlags, + ScoringFunctionAggregation, + ScoringFunctionInterpolation, ScoringStatistics, + SearchFieldDataType, + SearchIndexerDataSourceType, SearchMode, SemanticErrorMode, SemanticErrorReason, SemanticFieldState, SemanticQueryRewritesResultType, SemanticSearchResultsType, + SentimentSkillLanguage, + SnowballTokenFilterLanguage, + SplitSkillEncoderModelName, + SplitSkillLanguage, + SplitSkillUnit, + StemmerTokenFilterLanguage, + StopwordsList, + TextSplitMode, + TextTranslationSkillLanguage, + TokenCharacterKind, + TokenFilterName, + VectorEncodingFormat, VectorFilterMode, VectorQueryKind, + VectorSearchAlgorithmKind, + VectorSearchAlgorithmMetric, + VectorSearchCompressionKind, + VectorSearchCompressionRescoreStorageMethod, + VectorSearchCompressionTarget, + VectorSearchVectorizerKind, VectorThresholdKind, + VisualFeature, ) from ._patch import __all__ as _patch_all from ._patch import * from ._patch import patch_sdk as _patch_sdk __all__ = [ + "AIServicesAccountIdentity", + "AIServicesAccountKey", + "AIServicesVisionParameters", + "AIServicesVisionVectorizer", + "AnalyzeRequest", + "AnalyzeResult", + "AnalyzedTokenInfo", + "AsciiFoldingTokenFilter", "AutocompleteItem", - "AutocompleteOptions", "AutocompleteRequest", "AutocompleteResult", + "AzureActiveDirectoryApplicationCredentials", + "AzureMachineLearningParameters", + "AzureMachineLearningSkill", + "AzureMachineLearningVectorizer", + "AzureOpenAIEmbeddingSkill", + "AzureOpenAITokenizerParameters", + "AzureOpenAIVectorizer", + "AzureOpenAIVectorizerParameters", + "BM25SimilarityAlgorithm", + "BinaryQuantizationCompression", + "CharFilter", + "CjkBigramTokenFilter", + "ClassicSimilarityAlgorithm", + "ClassicTokenizer", + "CognitiveServicesAccount", + "CognitiveServicesAccountKey", + "CommonGramTokenFilter", + "ConditionalSkill", + "CorsOptions", + "CustomAnalyzer", + "CustomEntity", + "CustomEntityAlias", + "CustomEntityLookupSkill", + "CustomNormalizer", + "DataChangeDetectionPolicy", + "DataDeletionDetectionPolicy", + "DataSourceCredentials", "DebugInfo", + "DefaultCognitiveServicesAccount", + "DictionaryDecompounderTokenFilter", + "DistanceScoringFunction", + "DistanceScoringParameters", "DocumentDebugInfo", + "DocumentExtractionSkill", + "DocumentIntelligenceLayoutSkill", + "DocumentKeysOrIds", + "EdgeNGramTokenFilter", + "EdgeNGramTokenFilterV2", + "EdgeNGramTokenizer", + "ElisionTokenFilter", + "EntityLinkingSkill", + "EntityRecognitionSkill", + "EntityRecognitionSkillV3", "ErrorAdditionalInfo", "ErrorDetail", "ErrorResponse", + "ExhaustiveKnnAlgorithmConfiguration", + "ExhaustiveKnnParameters", "FacetResult", + "FieldMapping", + "FieldMappingFunction", + "FreshnessScoringFunction", + "FreshnessScoringParameters", + "GetIndexStatisticsResult", + "HighWaterMarkChangeDetectionPolicy", + "HnswAlgorithmConfiguration", + "HnswParameters", "HybridSearch", + "ImageAnalysisSkill", "IndexAction", "IndexBatch", "IndexDocumentsResult", + "IndexerCurrentState", + "IndexerExecutionResult", + "IndexingParameters", + "IndexingParametersConfiguration", "IndexingResult", + "IndexingSchedule", + "InputFieldMappingEntry", + "KeepTokenFilter", + "KeyPhraseExtractionSkill", + "KeywordMarkerTokenFilter", + "KeywordTokenizer", + "KeywordTokenizerV2", + "LanguageDetectionSkill", + "LengthTokenFilter", + "LexicalAnalyzer", + "LexicalNormalizer", + "LexicalTokenizer", + "LimitTokenFilter", + "ListDataSourcesResult", + "ListIndexersResult", + "ListSkillsetsResult", + "ListSynonymMapsResult", + "LuceneStandardAnalyzer", + "LuceneStandardTokenizer", + "LuceneStandardTokenizerV2", + "MagnitudeScoringFunction", + "MagnitudeScoringParameters", + "MappingCharFilter", + "MergeSkill", + "MicrosoftLanguageStemmingTokenizer", + "MicrosoftLanguageTokenizer", + "NGramTokenFilter", + "NGramTokenFilterV2", + "NGramTokenizer", + "NativeBlobSoftDeleteDeletionDetectionPolicy", + "OcrSkill", + "OutputFieldMappingEntry", + "PIIDetectionSkill", + "PathHierarchyTokenizerV2", + "PatternAnalyzer", + "PatternCaptureTokenFilter", + "PatternReplaceCharFilter", + "PatternReplaceTokenFilter", + "PatternTokenizer", + "PhoneticTokenFilter", "QueryAnswerResult", "QueryCaptionResult", "QueryResultDocumentRerankerInput", @@ -106,20 +445,88 @@ "QueryResultDocumentSubscores", "QueryRewritesDebugInfo", "QueryRewritesValuesDebugInfo", - "RequestOptions", + "RescoringOptions", + "ResourceCounter", + "ScalarQuantizationCompression", + "ScalarQuantizationParameters", + "ScoringFunction", + "ScoringProfile", + "SearchAlias", "SearchDocumentsResult", - "SearchOptions", + "SearchField", + "SearchIndex", + "SearchIndexer", + "SearchIndexerCache", + "SearchIndexerDataContainer", + "SearchIndexerDataIdentity", + "SearchIndexerDataNoneIdentity", + "SearchIndexerDataSource", + "SearchIndexerDataUserAssignedIdentity", + "SearchIndexerError", + "SearchIndexerIndexProjection", + "SearchIndexerIndexProjectionSelector", + "SearchIndexerIndexProjectionsParameters", + "SearchIndexerKnowledgeStore", + "SearchIndexerKnowledgeStoreBlobProjectionSelector", + "SearchIndexerKnowledgeStoreFileProjectionSelector", + "SearchIndexerKnowledgeStoreObjectProjectionSelector", + "SearchIndexerKnowledgeStoreParameters", + "SearchIndexerKnowledgeStoreProjection", + "SearchIndexerKnowledgeStoreProjectionSelector", + "SearchIndexerKnowledgeStoreTableProjectionSelector", + "SearchIndexerLimits", + "SearchIndexerSkill", + "SearchIndexerSkillset", + "SearchIndexerStatus", + "SearchIndexerWarning", "SearchRequest", + "SearchResourceEncryptionKey", "SearchResult", "SearchScoreThreshold", + "SearchServiceCounters", + "SearchServiceLimits", + "SearchServiceStatistics", + "SearchSuggester", + "SemanticConfiguration", "SemanticDebugInfo", + "SemanticField", + "SemanticPrioritizedFields", + "SemanticSearch", + "SentimentSkill", + "SentimentSkillV3", + "ShaperSkill", + "ShingleTokenFilter", + "SimilarityAlgorithm", "SingleVectorFieldResult", + "SkillNames", + "SnowballTokenFilter", + "SoftDeleteColumnDeletionDetectionPolicy", + "SplitSkill", + "SqlIntegratedChangeTrackingPolicy", + "StemmerOverrideTokenFilter", + "StemmerTokenFilter", + "StopAnalyzer", + "StopwordsTokenFilter", "SuggestDocumentsResult", - "SuggestOptions", "SuggestRequest", "SuggestResult", + "SynonymMap", + "SynonymTokenFilter", + "TagScoringFunction", + "TagScoringParameters", "TextResult", + "TextTranslationSkill", + "TextWeights", + "TokenFilter", + "TruncateTokenFilter", + "UaxUrlEmailTokenizer", + "UniqueTokenFilter", "VectorQuery", + "VectorSearch", + "VectorSearchAlgorithmConfiguration", + "VectorSearchCompression", + "VectorSearchProfile", + "VectorSearchVectorizer", "VectorSimilarityThreshold", "VectorThreshold", "VectorizableImageBinaryQuery", @@ -127,9 +534,48 @@ "VectorizableTextQuery", "VectorizedQuery", "VectorsDebugInfo", + "VisionVectorizeSkill", + "WebApiSkill", + "WebApiVectorizer", + "WebApiVectorizerParameters", + "WordDelimiterTokenFilter", + "AIStudioModelCatalogName", "AutocompleteMode", + "AzureOpenAIModelName", + "BlobIndexerDataToExtract", + "BlobIndexerImageAction", + "BlobIndexerPDFTextRotationAlgorithm", + "BlobIndexerParsingMode", + "CharFilterName", + "CjkBigramTokenFilterScripts", + "CustomEntityLookupSkillLanguage", + "DocumentIntelligenceLayoutSkillMarkdownHeaderDepth", + "DocumentIntelligenceLayoutSkillOutputMode", + "EdgeNGramTokenFilterSide", + "EntityCategory", + "EntityRecognitionSkillLanguage", "HybridCountAndFacetMode", + "ImageAnalysisSkillLanguage", + "ImageDetail", "IndexActionType", + "IndexProjectionMode", + "IndexerExecutionEnvironment", + "IndexerExecutionStatus", + "IndexerExecutionStatusDetail", + "IndexerStatus", + "IndexingMode", + "KeyPhraseExtractionSkillLanguage", + "LexicalAnalyzerName", + "LexicalNormalizerName", + "LexicalTokenizerName", + "MarkdownHeaderDepth", + "MarkdownParsingSubmode", + "MicrosoftStemmingTokenizerLanguage", + "MicrosoftTokenizerLanguage", + "OcrLineEnding", + "OcrSkillLanguage", + "PIIDetectionSkillMaskingMode", + "PhoneticEncoder", "QueryAnswerType", "QueryCaptionType", "QueryDebugMode", @@ -137,16 +583,40 @@ "QueryRewritesType", "QuerySpellerType", "QueryType", + "RegexFlags", + "ScoringFunctionAggregation", + "ScoringFunctionInterpolation", "ScoringStatistics", + "SearchFieldDataType", + "SearchIndexerDataSourceType", "SearchMode", "SemanticErrorMode", "SemanticErrorReason", "SemanticFieldState", "SemanticQueryRewritesResultType", "SemanticSearchResultsType", + "SentimentSkillLanguage", + "SnowballTokenFilterLanguage", + "SplitSkillEncoderModelName", + "SplitSkillLanguage", + "SplitSkillUnit", + "StemmerTokenFilterLanguage", + "StopwordsList", + "TextSplitMode", + "TextTranslationSkillLanguage", + "TokenCharacterKind", + "TokenFilterName", + "VectorEncodingFormat", "VectorFilterMode", "VectorQueryKind", + "VectorSearchAlgorithmKind", + "VectorSearchAlgorithmMetric", + "VectorSearchCompressionKind", + "VectorSearchCompressionRescoreStorageMethod", + "VectorSearchCompressionTarget", + "VectorSearchVectorizerKind", "VectorThresholdKind", + "VisualFeature", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_search_service_client_enums.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_enums.py similarity index 72% rename from sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_search_service_client_enums.py rename to sdk/search/azure-search-documents/azure/search/documents/_generated/models/_enums.py index 2143ef0a9f25..5051c9e624e4 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_search_service_client_enums.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_enums.py @@ -1,7 +1,9 @@ # pylint: disable=too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -10,61 +12,94 @@ class AIStudioModelCatalogName(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The name of the embedding model from the Azure AI Studio Catalog that will be called.""" + """The name of the embedding model from the Azure AI Studio Catalog that will be + called. + """ - OPEN_AI_CLIP_IMAGE_TEXT_EMBEDDINGS_VIT_BASE_PATCH32 = "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32" - OPEN_AI_CLIP_IMAGE_TEXT_EMBEDDINGS_VI_T_LARGE_PATCH14_336 = ( - "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336" - ) + OPEN_AICLIP_IMAGE_TEXT_EMBEDDINGS_VIT_BASE_PATCH32 = "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32" + """OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32""" + OPEN_AICLIP_IMAGE_TEXT_EMBEDDINGS_VI_T_LARGE_PATCH14336 = "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336" + """OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336""" FACEBOOK_DINO_V2_IMAGE_EMBEDDINGS_VI_T_BASE = "Facebook-DinoV2-Image-Embeddings-ViT-Base" + """Facebook-DinoV2-Image-Embeddings-ViT-Base""" FACEBOOK_DINO_V2_IMAGE_EMBEDDINGS_VI_T_GIANT = "Facebook-DinoV2-Image-Embeddings-ViT-Giant" + """Facebook-DinoV2-Image-Embeddings-ViT-Giant""" COHERE_EMBED_V3_ENGLISH = "Cohere-embed-v3-english" + """Cohere-embed-v3-english""" COHERE_EMBED_V3_MULTILINGUAL = "Cohere-embed-v3-multilingual" + """Cohere-embed-v3-multilingual""" + + +class AutocompleteMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' + to get shingles and 'oneTermWithContext' to use the current context in + producing autocomplete terms. + """ + + ONE_TERM = "oneTerm" + """Only one term is suggested. If the query has two terms, only the last term is + completed. For example, if the input is 'washington medic', the suggested terms + could include 'medicaid', 'medicare', and 'medicine'.""" + TWO_TERMS = "twoTerms" + """Matching two-term phrases in the index will be suggested. For example, if the + input is 'medic', the suggested terms could include 'medicare coverage' and + 'medical assistant'.""" + ONE_TERM_WITH_CONTEXT = "oneTermWithContext" + """Completes the last term in a query with two or more terms, where the last two + terms are a phrase that exists in the index. For example, if the input is + 'washington medic', the suggested terms could include 'washington medicaid' and + 'washington medical'.""" class AzureOpenAIModelName(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The Azure Open AI model name that will be called.""" TEXT_EMBEDDING_ADA002 = "text-embedding-ada-002" + """TextEmbeddingAda002 model.""" TEXT_EMBEDDING3_LARGE = "text-embedding-3-large" + """TextEmbedding3Large model.""" TEXT_EMBEDDING3_SMALL = "text-embedding-3-small" + """TextEmbedding3Small model.""" class BlobIndexerDataToExtract(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Specifies the data to extract from Azure blob storage and tells the indexer which data to - extract from image content when "imageAction" is set to a value other than "none". This - applies to embedded image content in a .PDF or other application, or image files such as .jpg - and .png, in Azure blobs. + """Specifies the data to extract from Azure blob storage and tells the indexer + which data to extract from image content when "imageAction" is set to a value + other than "none". This applies to embedded image content in a .PDF or other + application, or image files such as .jpg and .png, in Azure blobs. """ STORAGE_METADATA = "storageMetadata" """Indexes just the standard blob properties and user-specified metadata.""" ALL_METADATA = "allMetadata" - """Extracts metadata provided by the Azure blob storage subsystem and the content-type specific - metadata (for example, metadata unique to just .png files are indexed).""" + """Extracts metadata provided by the Azure blob storage subsystem and the + content-type specific metadata (for example, metadata unique to just .png files + are indexed).""" CONTENT_AND_METADATA = "contentAndMetadata" """Extracts all metadata and textual content from each blob.""" class BlobIndexerImageAction(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Determines how to process embedded images and image files in Azure blob storage. Setting the - "imageAction" configuration to any value other than "none" requires that a skillset also be - attached to that indexer. + """Determines how to process embedded images and image files in Azure blob + storage. Setting the "imageAction" configuration to any value other than + "none" requires that a skillset also be attached to that indexer. """ NONE = "none" """Ignores embedded images or image files in the data set. This is the default.""" GENERATE_NORMALIZED_IMAGES = "generateNormalizedImages" - """Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds - it into the content field. This action requires that "dataToExtract" is set to - "contentAndMetadata". A normalized image refers to additional processing resulting in uniform - image output, sized and rotated to promote consistent rendering when you include images in - visual search results. This information is generated for each image when you use this option.""" + """Extracts text from images (for example, the word "STOP" from a traffic stop + sign), and embeds it into the content field. This action requires that + "dataToExtract" is set to "contentAndMetadata". A normalized image refers to + additional processing resulting in uniform image output, sized and rotated to + promote consistent rendering when you include images in visual search results. + This information is generated for each image when you use this option.""" GENERATE_NORMALIZED_IMAGE_PER_PAGE = "generateNormalizedImagePerPage" - """Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds - it into the content field, but treats PDF files differently in that each page will be rendered - as an image and normalized accordingly, instead of extracting embedded images. Non-PDF file - types will be treated the same as if "generateNormalizedImages" was set.""" + """Extracts text from images (for example, the word "STOP" from a traffic stop + sign), and embeds it into the content field, but treats PDF files differently + in that each page will be rendered as an image and normalized accordingly, + instead of extracting embedded images. Non-PDF file types will be treated the + same as if "generateNormalizedImages" was set.""" class BlobIndexerParsingMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -79,10 +114,11 @@ class BlobIndexerParsingMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): JSON = "json" """Set to json to extract structured content from JSON files.""" JSON_ARRAY = "jsonArray" - """Set to jsonArray to extract individual elements of a JSON array as separate documents.""" - JSON_LINES = "jsonLines" - """Set to jsonLines to extract individual JSON entities, separated by a new line, as separate + """Set to jsonArray to extract individual elements of a JSON array as separate documents.""" + JSON_LINES = "jsonLines" + """Set to jsonLines to extract individual JSON entities, separated by a new line, + as separate documents.""" MARKDOWN = "markdown" """Set to markdown to extract content from markdown files.""" @@ -93,10 +129,11 @@ class BlobIndexerPDFTextRotationAlgorithm(str, Enum, metaclass=CaseInsensitiveEn NONE = "none" """Leverages normal text extraction. This is the default.""" DETECT_ANGLES = "detectAngles" - """May produce better and more readable text extraction from PDF files that have rotated text - within them. Note that there may be a small performance speed impact when this parameter is - used. This parameter only applies to PDF files, and only to PDFs with embedded text. If the - rotated text appears within an embedded image in the PDF, this parameter does not apply.""" + """May produce better and more readable text extraction from PDF files that have + rotated text within them. Note that there may be a small performance speed + impact when this parameter is used. This parameter only applies to PDF files, + and only to PDFs with embedded text. If the rotated text appears within an + embedded image in the PDF, this parameter does not apply.""" class CharFilterName(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -161,7 +198,9 @@ class DocumentIntelligenceLayoutSkillMarkdownHeaderDepth(str, Enum, metaclass=Ca class DocumentIntelligenceLayoutSkillOutputMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Controls the cardinality of the output produced by the skill. Default is 'oneToMany'.""" + """Controls the cardinality of the output produced by the skill. Default is + 'oneToMany'. + """ ONE_TO_MANY = "oneToMany" """Specify the deepest markdown header section to parse.""" @@ -196,7 +235,9 @@ class EntityCategory(str, Enum, metaclass=CaseInsensitiveEnumMeta): class EntityRecognitionSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Deprecated. The language codes supported for input text by EntityRecognitionSkill.""" + """Deprecated. The language codes supported for input text by + EntityRecognitionSkill. + """ AR = "ar" """Arabic""" @@ -246,10 +287,19 @@ class EntityRecognitionSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMet """Turkish""" -class Enum0(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Enum0.""" +class HybridCountAndFacetMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Determines whether the count and facets should includes all documents that + matched the search query, or only the documents that are retrieved within the + 'maxTextRecallSize' window. The default value is 'countAllResults'. + """ - RETURN_REPRESENTATION = "return=representation" + COUNT_RETRIEVABLE_RESULTS = "countRetrievableResults" + """Only include documents that were matched within the 'maxTextRecallSize' + retrieval window when computing 'count' and 'facets'.""" + COUNT_ALL_RESULTS = "countAllResults" + """Include all documents that were matched by the search query when computing + 'count' and 'facets', regardless of whether or not those documents are within + the 'maxTextRecallSize' retrieval window.""" class ImageAnalysisSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -370,24 +420,48 @@ class ImageDetail(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Details recognized as landmarks.""" +class IndexActionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The operation to perform on a document in an indexing batch.""" + + UPLOAD = "upload" + """Inserts the document into the index if it is new and updates it if it exists. + All fields are replaced in the update case.""" + MERGE = "merge" + """Merges the specified field values with an existing document. If the document + does not exist, the merge will fail. Any field you specify in a merge will + replace the existing field in the document. This also applies to collections of + primitive and complex types.""" + MERGE_OR_UPLOAD = "mergeOrUpload" + """Behaves like merge if a document with the given key already exists in the + index. If the document does not exist, it behaves like upload with a new + document.""" + DELETE = "delete" + """Removes the specified document from the index. Any field you specify in a + delete operation other than the key field will be ignored. If you want to + remove an individual field from a document, use merge instead and set the field + explicitly to null.""" + + class IndexerExecutionEnvironment(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Specifies the environment in which the indexer should execute.""" STANDARD = "standard" - """Indicates that the search service can determine where the indexer should execute. This is the - default environment when nothing is specified and is the recommended value.""" + """Indicates that the search service can determine where the indexer should + execute. This is the default environment when nothing is specified and is the + recommended value.""" PRIVATE = "private" - """Indicates that the indexer should run with the environment provisioned specifically for the - search service. This should only be specified as the execution environment if the indexer needs - to access resources securely over shared private link resources.""" + """Indicates that the indexer should run with the environment provisioned + specifically for the search service. This should only be specified as the + execution environment if the indexer needs to access resources securely over + shared private link resources.""" class IndexerExecutionStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Represents the status of an individual indexer execution.""" TRANSIENT_FAILURE = "transientFailure" - """An indexer invocation has failed, but the failure may be transient. Indexer invocations will - continue per schedule.""" + """An indexer invocation has failed, but the failure may be transient. Indexer + invocations will continue per schedule.""" SUCCESS = "success" """Indexer execution completed successfully.""" IN_PROGRESS = "inProgress" @@ -409,8 +483,8 @@ class IndexerStatus(str, Enum, metaclass=CaseInsensitiveEnumMeta): UNKNOWN = "unknown" """Indicates that the indexer is in an unknown state.""" ERROR = "error" - """Indicates that the indexer experienced an error that cannot be corrected without human - intervention.""" + """Indicates that the indexer experienced an error that cannot be corrected + without human intervention.""" RUNNING = "running" """Indicates that the indexer is running normally.""" @@ -421,18 +495,21 @@ class IndexingMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): INDEXING_ALL_DOCS = "indexingAllDocs" """The indexer is indexing all documents in the datasource.""" INDEXING_RESET_DOCS = "indexingResetDocs" - """The indexer is indexing selective, reset documents in the datasource. The documents being - indexed are defined on indexer status.""" + """The indexer is indexing selective, reset documents in the datasource. The + documents being indexed are defined on indexer status.""" class IndexProjectionMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Defines behavior of the index projections in relation to the rest of the indexer.""" + """Defines behavior of the index projections in relation to the rest of the + indexer. + """ SKIP_INDEXING_PARENT_DOCUMENTS = "skipIndexingParentDocuments" - """The source document will be skipped from writing into the indexer's target index.""" + """The source document will be skipped from writing into the indexer's target + index.""" INCLUDE_INDEXING_PARENT_DOCUMENTS = "includeIndexingParentDocuments" - """The source document will be written into the indexer's target index. This is the default - pattern.""" + """The source document will be written into the indexer's target index. This is + the default pattern.""" class KeyPhraseExtractionSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -653,8 +730,8 @@ class LexicalAnalyzerName(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Standard ASCII Folding Lucene analyzer. See https://learn.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#Analyzers""" KEYWORD = "keyword" - """Treats the entire content of a field as a single token. This is useful for data like zip codes, - ids, and some product names. See + """Treats the entire content of a field as a single token. This is useful for data + like zip codes, ids, and some product names. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/KeywordAnalyzer.html""" PATTERN = "pattern" """Flexibly separates text into terms via a regular expression pattern. See @@ -663,7 +740,8 @@ class LexicalAnalyzerName(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Divides text at non-letters and converts them to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/SimpleAnalyzer.html""" STOP = "stop" - """Divides text at non-letters; Applies the lowercase and stopword token filters. See + """Divides text at non-letters; Applies the lowercase and stopword token filters. + See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopAnalyzer.html""" WHITESPACE = "whitespace" """An analyzer that uses the whitespace tokenizer. See @@ -674,12 +752,13 @@ class LexicalNormalizerName(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Defines the names of all text normalizers supported by the search engine.""" ASCII_FOLDING = "asciifolding" - """Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 - ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such - equivalents exist. See + """Converts alphabetic, numeric, and symbolic Unicode characters which are not in + the first 127 ASCII characters (the "Basic Latin" Unicode block) into their + ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html""" ELISION = "elision" - """Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See + """Removes elisions. For example, "l'avion" (the plane) will be converted to + "avion" (plane). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html""" LOWERCASE = "lowercase" """Normalizes token text to lowercase. See @@ -696,7 +775,8 @@ class LexicalTokenizerName(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Defines the names of all tokenizers supported by the search engine.""" CLASSIC = "classic" - """Grammar-based tokenizer that is suitable for processing most European-language documents. See + """Grammar-based tokenizer that is suitable for processing most European-language + documents. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicTokenizer.html""" EDGE_N_GRAM = "edgeNGram" """Tokenizes the input from an edge into n-grams of the given size(s). See @@ -713,7 +793,8 @@ class LexicalTokenizerName(str, Enum, metaclass=CaseInsensitiveEnumMeta): MICROSOFT_LANGUAGE_TOKENIZER = "microsoft_language_tokenizer" """Divides text using language-specific rules.""" MICROSOFT_LANGUAGE_STEMMING_TOKENIZER = "microsoft_language_stemming_tokenizer" - """Divides text using language-specific rules and reduces words to their base forms.""" + """Divides text using language-specific rules and reduces words to their base + forms.""" N_GRAM = "nGram" """Tokenizes the input into n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenizer.html""" @@ -724,8 +805,8 @@ class LexicalTokenizerName(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Tokenizer that uses regex pattern matching to construct distinct tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/pattern/PatternTokenizer.html""" STANDARD = "standard_v2" - """Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. - See + """Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter + and stop filter. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/StandardTokenizer.html""" UAX_URL_EMAIL = "uax_url_email" """Tokenizes urls and emails as one token. See @@ -736,34 +817,41 @@ class LexicalTokenizerName(str, Enum, metaclass=CaseInsensitiveEnumMeta): class MarkdownHeaderDepth(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Specifies the max header depth that will be considered while grouping markdown content. Default - is ``h6``. + """Specifies the max header depth that will be considered while grouping markdown + content. Default is ``h6``. """ H1 = "h1" - """Indicates that headers up to a level of h1 will be considered while grouping markdown content.""" + """Indicates that headers up to a level of h1 will be considered while grouping + markdown content.""" H2 = "h2" - """Indicates that headers up to a level of h2 will be considered while grouping markdown content.""" + """Indicates that headers up to a level of h2 will be considered while grouping + markdown content.""" H3 = "h3" - """Indicates that headers up to a level of h3 will be considered while grouping markdown content.""" + """Indicates that headers up to a level of h3 will be considered while grouping + markdown content.""" H4 = "h4" - """Indicates that headers up to a level of h4 will be considered while grouping markdown content.""" + """Indicates that headers up to a level of h4 will be considered while grouping + markdown content.""" H5 = "h5" - """Indicates that headers up to a level of h5 will be considered while grouping markdown content.""" + """Indicates that headers up to a level of h5 will be considered while grouping + markdown content.""" H6 = "h6" - """Indicates that headers up to a level of h6 will be considered while grouping markdown content. - This is the default.""" + """Indicates that headers up to a level of h6 will be considered while grouping + markdown content. This is the default.""" class MarkdownParsingSubmode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Specifies the submode that will determine whether a markdown file will be parsed into exactly - one search document or multiple search documents. Default is ``oneToMany``. + """Specifies the submode that will determine whether a markdown file will be + parsed into exactly one search document or multiple search documents. Default + is ``oneToMany``. """ ONE_TO_MANY = "oneToMany" - """Indicates that each section of the markdown file (up to a specified depth) will be parsed into - individual search documents. This can result in a single markdown file producing multiple - search documents. This is the default sub-mode.""" + """Indicates that each section of the markdown file (up to a specified depth) will + be parsed into individual search documents. This can result in a single + markdown file producing multiple search documents. This is the default + sub-mode.""" ONE_TO_ONE = "oneToOne" """Indicates that each markdown file will be parsed into a single search document.""" @@ -953,8 +1041,8 @@ class MicrosoftTokenizerLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): class OcrLineEnding(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Defines the sequence of characters to use between the lines of text recognized by the OCR - skill. The default value is "space". + """Defines the sequence of characters to use between the lines of text recognized + by the OCR skill. The default value is "space". """ SPACE = "space" @@ -1310,8 +1398,6 @@ class OcrSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Zulu""" UNK = "unk" """Unknown (All)""" - IS_ENUM = "is" - """Icelandic""" class PhoneticEncoder(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -1342,21 +1428,275 @@ class PhoneticEncoder(str, Enum, metaclass=CaseInsensitiveEnumMeta): class PIIDetectionSkillMaskingMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """A string indicating what maskingMode to use to mask the personal information detected in the - input text. + """A string indicating what maskingMode to use to mask the personal information + detected in the input text. """ NONE = "none" """No masking occurs and the maskedText output will not be returned.""" REPLACE = "replace" - """Replaces the detected entities with the character given in the maskingCharacter parameter. The - character will be repeated to the length of the detected entity so that the offsets will - correctly correspond to both the input text as well as the output maskedText.""" + """Replaces the detected entities with the character given in the maskingCharacter + parameter. The character will be repeated to the length of the detected entity + so that the offsets will correctly correspond to both the input text as well as + the output maskedText.""" + + +class QueryAnswerType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """This parameter is only valid if the query type is ``semantic``. If set, the query + returns answers extracted from key passages in the highest ranked documents. + The number of answers returned can be configured by appending the pipe + character ``|`` followed by the ``count-`` option after the + answers parameter value, such as ``extractive|count-3``. Default count is 1. The + confidence threshold can be configured by appending the pipe character ``|`` + followed by the ``threshold-`` option after the answers + parameter value, such as ``extractive|threshold-0.9``. Default threshold is 0.7. + The maximum character length of answers can be configured by appending the pipe + character '|' followed by the 'count-:code:``', + such as 'extractive|maxcharlength-600'. + """ + + NONE = "none" + """Do not return answers for the query.""" + EXTRACTIVE = "extractive" + """Extracts answer candidates from the contents of the documents returned in + response to a query expressed as a question in natural language.""" + + +class QueryCaptionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """This parameter is only valid if the query type is ``semantic``. If set, the query + returns captions extracted from key passages in the highest ranked documents. + When Captions is set to ``extractive``\\ , highlighting is enabled by default, and + can be configured by appending the pipe character ``|`` followed by the + ``highlight-`` option, such as ``extractive|highlight-true``. Defaults + to ``None``. The maximum character length of captions can be configured by + appending the pipe character '|' followed by the 'count-:code:``', such as 'extractive|maxcharlength-600'. + """ + + NONE = "none" + """Do not return captions for the query.""" + EXTRACTIVE = "extractive" + """Extracts captions from the matching documents that contain passages relevant to + the search query.""" + + +class QueryDebugMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Enables a debugging tool that can be used to further explore your search + results. You can enable multiple debug modes simultaneously by separating them + with a | character, for example: semantic|queryRewrites. + """ + + DISABLED = "disabled" + """No query debugging information will be returned.""" + SEMANTIC = "semantic" + """Allows the user to further explore their reranked results.""" + VECTOR = "vector" + """Allows the user to further explore their hybrid and vector query results.""" + QUERY_REWRITES = "queryRewrites" + """Allows the user to explore the list of query rewrites generated for their + search request.""" + ALL = "all" + """Turn on all debug options.""" + + +class QueryLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The language of the query.""" + + NONE = "none" + """Query language not specified.""" + EN_US = "en-us" + """Query language value for English (United States).""" + EN_GB = "en-gb" + """Query language value for English (Great Britain).""" + EN_IN = "en-in" + """Query language value for English (India).""" + EN_CA = "en-ca" + """Query language value for English (Canada).""" + EN_AU = "en-au" + """Query language value for English (Australia).""" + FR_FR = "fr-fr" + """Query language value for French (France).""" + FR_CA = "fr-ca" + """Query language value for French (Canada).""" + DE_DE = "de-de" + """Query language value for German (Germany).""" + ES_ES = "es-es" + """Query language value for Spanish (Spain).""" + ES_MX = "es-mx" + """Query language value for Spanish (Mexico).""" + ZH_CN = "zh-cn" + """Query language value for Chinese (China).""" + ZH_TW = "zh-tw" + """Query language value for Chinese (Taiwan).""" + PT_BR = "pt-br" + """Query language value for Portuguese (Brazil).""" + PT_PT = "pt-pt" + """Query language value for Portuguese (Portugal).""" + IT_IT = "it-it" + """Query language value for Italian (Italy).""" + JA_JP = "ja-jp" + """Query language value for Japanese (Japan).""" + KO_KR = "ko-kr" + """Query language value for Korean (Korea).""" + RU_RU = "ru-ru" + """Query language value for Russian (Russia).""" + CS_CZ = "cs-cz" + """Query language value for Czech (Czech Republic).""" + NL_BE = "nl-be" + """Query language value for Dutch (Belgium).""" + NL_NL = "nl-nl" + """Query language value for Dutch (Netherlands).""" + HU_HU = "hu-hu" + """Query language value for Hungarian (Hungary).""" + PL_PL = "pl-pl" + """Query language value for Polish (Poland).""" + SV_SE = "sv-se" + """Query language value for Swedish (Sweden).""" + TR_TR = "tr-tr" + """Query language value for Turkish (Turkey).""" + HI_IN = "hi-in" + """Query language value for Hindi (India).""" + AR_SA = "ar-sa" + """Query language value for Arabic (Saudi Arabia).""" + AR_EG = "ar-eg" + """Query language value for Arabic (Egypt).""" + AR_MA = "ar-ma" + """Query language value for Arabic (Morocco).""" + AR_KW = "ar-kw" + """Query language value for Arabic (Kuwait).""" + AR_JO = "ar-jo" + """Query language value for Arabic (Jordan).""" + DA_DK = "da-dk" + """Query language value for Danish (Denmark).""" + NO_NO = "no-no" + """Query language value for Norwegian (Norway).""" + BG_BG = "bg-bg" + """Query language value for Bulgarian (Bulgaria).""" + HR_HR = "hr-hr" + """Query language value for Croatian (Croatia).""" + HR_BA = "hr-ba" + """Query language value for Croatian (Bosnia and Herzegovina).""" + MS_MY = "ms-my" + """Query language value for Malay (Malaysia).""" + MS_BN = "ms-bn" + """Query language value for Malay (Brunei Darussalam).""" + SL_SL = "sl-sl" + """Query language value for Slovenian (Slovenia).""" + TA_IN = "ta-in" + """Query language value for Tamil (India).""" + VI_VN = "vi-vn" + """Query language value for Vietnamese (Viet Nam).""" + EL_GR = "el-gr" + """Query language value for Greek (Greece).""" + RO_RO = "ro-ro" + """Query language value for Romanian (Romania).""" + IS_IS = "is-is" + """Query language value for Icelandic (Iceland).""" + ID_ID = "id-id" + """Query language value for Indonesian (Indonesia).""" + TH_TH = "th-th" + """Query language value for Thai (Thailand).""" + LT_LT = "lt-lt" + """Query language value for Lithuanian (Lithuania).""" + UK_UA = "uk-ua" + """Query language value for Ukrainian (Ukraine).""" + LV_LV = "lv-lv" + """Query language value for Latvian (Latvia).""" + ET_EE = "et-ee" + """Query language value for Estonian (Estonia).""" + CA_ES = "ca-es" + """Query language value for Catalan.""" + FI_FI = "fi-fi" + """Query language value for Finnish (Finland).""" + SR_BA = "sr-ba" + """Query language value for Serbian (Bosnia and Herzegovina).""" + SR_ME = "sr-me" + """Query language value for Serbian (Montenegro).""" + SR_RS = "sr-rs" + """Query language value for Serbian (Serbia).""" + SK_SK = "sk-sk" + """Query language value for Slovak (Slovakia).""" + NB_NO = "nb-no" + """Query language value for Norwegian (Norway).""" + HY_AM = "hy-am" + """Query language value for Armenian (Armenia).""" + BN_IN = "bn-in" + """Query language value for Bengali (India).""" + EU_ES = "eu-es" + """Query language value for Basque.""" + GL_ES = "gl-es" + """Query language value for Galician.""" + GU_IN = "gu-in" + """Query language value for Gujarati (India).""" + HE_IL = "he-il" + """Query language value for Hebrew (Israel).""" + GA_IE = "ga-ie" + """Query language value for Irish (Ireland).""" + KN_IN = "kn-in" + """Query language value for Kannada (India).""" + ML_IN = "ml-in" + """Query language value for Malayalam (India).""" + MR_IN = "mr-in" + """Query language value for Marathi (India).""" + FA_AE = "fa-ae" + """Query language value for Persian (U.A.E.).""" + PA_IN = "pa-in" + """Query language value for Punjabi (India).""" + TE_IN = "te-in" + """Query language value for Telugu (India).""" + UR_PK = "ur-pk" + """Query language value for Urdu (Pakistan).""" + + +class QueryRewritesType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """This parameter is only valid if the query type is ``semantic``. When + QueryRewrites is set to ``generative``\\ , the query terms are sent to a generate + model which will produce 10 (default) rewrites to help increase the recall of + the request. The requested count can be configured by appending the pipe + character ``|`` followed by the ``count-`` option, such as + ``generative|count-3``. Defaults to ``None``. + """ + + NONE = "none" + """Do not generate additional query rewrites for this query.""" + GENERATIVE = "generative" + """Generate alternative query terms to increase the recall of a search request.""" + + +class QuerySpellerType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Improve search recall by spell-correcting individual search query terms.""" + + NONE = "none" + """Speller not enabled.""" + LEXICON = "lexicon" + """Speller corrects individual query terms using a static lexicon for the language + specified by the queryLanguage parameter.""" + + +class QueryType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies the syntax of the search query. The default is 'simple'. Use 'full' + if your query uses the Lucene query syntax and 'semantic' if query syntax is + not needed. + """ + + SIMPLE = "simple" + """Uses the simple query syntax for searches. Search text is interpreted using a + simple query language that allows for symbols such as +, * and "". Queries are + evaluated across all searchable fields by default, unless the searchFields + parameter is specified.""" + FULL = "full" + """Uses the full Lucene query syntax for searches. Search text is interpreted + using the Lucene query language which allows field-specific and weighted + searches, as well as other advanced features.""" + SEMANTIC = "semantic" + """Best suited for queries expressed in natural language as opposed to keywords. + Improves precision of search results by re-ranking the top search results using + a ranking model trained on the Web corpus.""" class RegexFlags(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Defines flags that can be combined to control how regular expressions are used in the pattern - analyzer and pattern tokenizer. + """Defines flags that can be combined to control how regular expressions are used + in the pattern analyzer and pattern tokenizer. """ CANON_EQ = "CANON_EQ" @@ -1378,8 +1718,8 @@ class RegexFlags(str, Enum, metaclass=CaseInsensitiveEnumMeta): class ScoringFunctionAggregation(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Defines the aggregation function used to combine the results of all the scoring functions in a - scoring profile. + """Defines the aggregation function used to combine the results of all the scoring + functions in a scoring profile. """ SUM = "sum" @@ -1395,21 +1735,37 @@ class ScoringFunctionAggregation(str, Enum, metaclass=CaseInsensitiveEnumMeta): class ScoringFunctionInterpolation(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Defines the function used to interpolate score boosting across a range of documents.""" + """Defines the function used to interpolate score boosting across a range of + documents. + """ LINEAR = "linear" - """Boosts scores by a linearly decreasing amount. This is the default interpolation for scoring - functions.""" + """Boosts scores by a linearly decreasing amount. This is the default + interpolation for scoring functions.""" CONSTANT = "constant" """Boosts scores by a constant factor.""" QUADRATIC = "quadratic" - """Boosts scores by an amount that decreases quadratically. Boosts decrease slowly for higher - scores, and more quickly as the scores decrease. This interpolation option is not allowed in - tag scoring functions.""" + """Boosts scores by an amount that decreases quadratically. Boosts decrease slowly + for higher scores, and more quickly as the scores decrease. This interpolation + option is not allowed in tag scoring functions.""" LOGARITHMIC = "logarithmic" - """Boosts scores by an amount that decreases logarithmically. Boosts decrease quickly for higher - scores, and more slowly as the scores decrease. This interpolation option is not allowed in tag - scoring functions.""" + """Boosts scores by an amount that decreases logarithmically. Boosts decrease + quickly for higher scores, and more slowly as the scores decrease. This + interpolation option is not allowed in tag scoring functions.""" + + +class ScoringStatistics(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """A value that specifies whether we want to calculate scoring statistics (such as + document frequency) globally for more consistent scoring, or locally, for lower + latency. The default is 'local'. Use 'global' to aggregate scoring statistics + globally before scoring. Using global scoring statistics can increase latency + of search queries. + """ + + LOCAL = "local" + """The scoring statistics will be calculated locally for lower latency.""" + GLOBAL = "global" + """The scoring statistics will be calculated globally for more consistent scoring.""" class SearchFieldDataType(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -1426,27 +1782,29 @@ class SearchFieldDataType(str, Enum, metaclass=CaseInsensitiveEnumMeta): BOOLEAN = "Edm.Boolean" """Indicates that a field contains a Boolean value (true or false).""" DATE_TIME_OFFSET = "Edm.DateTimeOffset" - """Indicates that a field contains a date/time value, including timezone information.""" + """Indicates that a field contains a date/time value, including timezone + information.""" GEOGRAPHY_POINT = "Edm.GeographyPoint" - """Indicates that a field contains a geo-location in terms of longitude and latitude.""" + """Indicates that a field contains a geo-location in terms of longitude and + latitude.""" COMPLEX = "Edm.ComplexType" - """Indicates that a field contains one or more complex objects that in turn have sub-fields of - other types.""" + """Indicates that a field contains one or more complex objects that in turn have + sub-fields of other types.""" SINGLE = "Edm.Single" - """Indicates that a field contains a single-precision floating point number. This is only valid - when used with Collection(Edm.Single).""" + """Indicates that a field contains a single-precision floating point number. This + is only valid when used with Collection(Edm.Single).""" HALF = "Edm.Half" - """Indicates that a field contains a half-precision floating point number. This is only valid when - used with Collection(Edm.Half).""" + """Indicates that a field contains a half-precision floating point number. This is + only valid when used with Collection(Edm.Half).""" INT16 = "Edm.Int16" - """Indicates that a field contains a 16-bit signed integer. This is only valid when used with - Collection(Edm.Int16).""" + """Indicates that a field contains a 16-bit signed integer. This is only valid + when used with Collection(Edm.Int16).""" S_BYTE = "Edm.SByte" - """Indicates that a field contains a 8-bit signed integer. This is only valid when used with - Collection(Edm.SByte).""" + """Indicates that a field contains a 8-bit signed integer. This is only valid when + used with Collection(Edm.SByte).""" BYTE = "Edm.Byte" - """Indicates that a field contains a 8-bit unsigned integer. This is only valid when used with - Collection(Edm.Byte).""" + """Indicates that a field contains a 8-bit unsigned integer. This is only valid + when used with Collection(Edm.Byte).""" class SearchIndexerDataSourceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -1468,6 +1826,75 @@ class SearchIndexerDataSourceType(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Indicates a Microsoft Fabric OneLake datasource.""" +class SearchMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Specifies whether any or all of the search terms must be matched in order to + count the document as a match. + """ + + ANY = "any" + """Any of the search terms must be matched in order to count the document as a + match.""" + ALL = "all" + """All of the search terms must be matched in order to count the document as a + match.""" + + +class SemanticErrorMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Allows the user to choose whether a semantic call should fail completely, or to + return partial results. + """ + + PARTIAL = "partial" + """If the semantic processing fails, partial results still return. The definition + of partial results depends on what semantic step failed and what was the reason + for failure.""" + FAIL = "fail" + """If there is an exception during the semantic processing step, the query will + fail and return the appropriate HTTP code depending on the error.""" + + +class SemanticErrorReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Reason that a partial response was returned for a semantic ranking request.""" + + MAX_WAIT_EXCEEDED = "maxWaitExceeded" + """If ``semanticMaxWaitInMilliseconds`` was set and the semantic processing duration + exceeded that value. Only the base results were returned.""" + CAPACITY_OVERLOADED = "capacityOverloaded" + """The request was throttled. Only the base results were returned.""" + TRANSIENT = "transient" + """At least one step of the semantic process failed.""" + + +class SemanticFieldState(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The way the field was used for the semantic enrichment process.""" + + USED = "used" + """The field was fully used for semantic enrichment.""" + UNUSED = "unused" + """The field was not used for semantic enrichment.""" + PARTIAL = "partial" + """The field was partially used for semantic enrichment.""" + + +class SemanticQueryRewritesResultType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of query rewrite that was used for this request.""" + + ORIGINAL_QUERY_ONLY = "originalQueryOnly" + """Query rewrites were not successfully generated for this request. Only the + original query was used to retrieve the results.""" + + +class SemanticSearchResultsType(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Type of partial response that was returned for a semantic ranking request.""" + + BASE_RESULTS = "baseResults" + """Results without any semantic enrichment or reranking.""" + RERANKED_RESULTS = "rerankedResults" + """Results have been reranked with the reranker model and will include semantic + captions. They will not include any answers, answers highlights or caption + highlights.""" + + class SentimentSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Deprecated. The language codes supported for input text by SentimentSkill.""" @@ -1525,22 +1952,23 @@ class SnowballTokenFilterLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): GERMAN = "german" """Selects the Lucene Snowball stemming tokenizer for German.""" GERMAN2 = "german2" - """Selects the Lucene Snowball stemming tokenizer that uses the German variant algorithm.""" + """Selects the Lucene Snowball stemming tokenizer that uses the German variant + algorithm.""" HUNGARIAN = "hungarian" """Selects the Lucene Snowball stemming tokenizer for Hungarian.""" ITALIAN = "italian" """Selects the Lucene Snowball stemming tokenizer for Italian.""" KP = "kp" - """Selects the Lucene Snowball stemming tokenizer for Dutch that uses the Kraaij-Pohlmann stemming - algorithm.""" + """Selects the Lucene Snowball stemming tokenizer for Dutch that uses the + Kraaij-Pohlmann stemming algorithm.""" LOVINS = "lovins" - """Selects the Lucene Snowball stemming tokenizer for English that uses the Lovins stemming - algorithm.""" + """Selects the Lucene Snowball stemming tokenizer for English that uses the Lovins + stemming algorithm.""" NORWEGIAN = "norwegian" """Selects the Lucene Snowball stemming tokenizer for Norwegian.""" PORTER = "porter" - """Selects the Lucene Snowball stemming tokenizer for English that uses the Porter stemming - algorithm.""" + """Selects the Lucene Snowball stemming tokenizer for English that uses the Porter + stemming algorithm.""" PORTUGUESE = "portuguese" """Selects the Lucene Snowball stemming tokenizer for Portuguese.""" ROMANIAN = "romanian" @@ -1559,13 +1987,13 @@ class SplitSkillEncoderModelName(str, Enum, metaclass=CaseInsensitiveEnumMeta): """A value indicating which tokenizer to use.""" R50_K_BASE = "r50k_base" - """Refers to a base model trained with a 50,000 token vocabulary, often used in general natural - language processing tasks.""" + """Refers to a base model trained with a 50,000 token vocabulary, often used in + general natural language processing tasks.""" P50_K_BASE = "p50k_base" """A base model with a 50,000 token vocabulary, optimized for prompt-based tasks.""" P50_K_EDIT = "p50k_edit" - """Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token - vocabulary.""" + """Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a + 50,000 token vocabulary.""" CL100_K_BASE = "cl100k_base" """A base model with a 100,000 token vocabulary.""" @@ -1639,8 +2067,6 @@ class SplitSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Urdu""" ZH = "zh" """Chinese (Simplified)""" - IS_ENUM = "is" - """Icelandic""" class SplitSkillUnit(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -1649,7 +2075,8 @@ class SplitSkillUnit(str, Enum, metaclass=CaseInsensitiveEnumMeta): CHARACTERS = "characters" """The length will be measured by character.""" AZURE_OPEN_AI_TOKENS = "azureOpenAITokens" - """The length will be measured by an AzureOpenAI tokenizer from the tiktoken library.""" + """The length will be measured by an AzureOpenAI tokenizer from the tiktoken + library.""" class StemmerTokenFilterLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -1674,8 +2101,8 @@ class StemmerTokenFilterLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): DUTCH = "dutch" """Selects the Lucene stemming tokenizer for Dutch.""" DUTCH_KP = "dutchKp" - """Selects the Lucene stemming tokenizer for Dutch that uses the Kraaij-Pohlmann stemming - algorithm.""" + """Selects the Lucene stemming tokenizer for Dutch that uses the Kraaij-Pohlmann + stemming algorithm.""" ENGLISH = "english" """Selects the Lucene stemming tokenizer for English.""" LIGHT_ENGLISH = "lightEnglish" @@ -1683,11 +2110,14 @@ class StemmerTokenFilterLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): MINIMAL_ENGLISH = "minimalEnglish" """Selects the Lucene stemming tokenizer for English that does minimal stemming.""" POSSESSIVE_ENGLISH = "possessiveEnglish" - """Selects the Lucene stemming tokenizer for English that removes trailing possessives from words.""" + """Selects the Lucene stemming tokenizer for English that removes trailing + possessives from words.""" PORTER2 = "porter2" - """Selects the Lucene stemming tokenizer for English that uses the Porter2 stemming algorithm.""" + """Selects the Lucene stemming tokenizer for English that uses the Porter2 + stemming algorithm.""" LOVINS = "lovins" - """Selects the Lucene stemming tokenizer for English that uses the Lovins stemming algorithm.""" + """Selects the Lucene stemming tokenizer for English that uses the Lovins stemming + algorithm.""" FINNISH = "finnish" """Selects the Lucene stemming tokenizer for Finnish.""" LIGHT_FINNISH = "lightFinnish" @@ -1733,13 +2163,17 @@ class StemmerTokenFilterLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): NORWEGIAN = "norwegian" """Selects the Lucene stemming tokenizer for Norwegian (Bokmål).""" LIGHT_NORWEGIAN = "lightNorwegian" - """Selects the Lucene stemming tokenizer for Norwegian (Bokmål) that does light stemming.""" + """Selects the Lucene stemming tokenizer for Norwegian (Bokmål) that does light + stemming.""" MINIMAL_NORWEGIAN = "minimalNorwegian" - """Selects the Lucene stemming tokenizer for Norwegian (Bokmål) that does minimal stemming.""" + """Selects the Lucene stemming tokenizer for Norwegian (Bokmål) that does minimal + stemming.""" LIGHT_NYNORSK = "lightNynorsk" - """Selects the Lucene stemming tokenizer for Norwegian (Nynorsk) that does light stemming.""" + """Selects the Lucene stemming tokenizer for Norwegian (Nynorsk) that does light + stemming.""" MINIMAL_NYNORSK = "minimalNynorsk" - """Selects the Lucene stemming tokenizer for Norwegian (Nynorsk) that does minimal stemming.""" + """Selects the Lucene stemming tokenizer for Norwegian (Nynorsk) that does minimal + stemming.""" PORTUGUESE = "portuguese" """Selects the Lucene stemming tokenizer for Portuguese.""" LIGHT_PORTUGUESE = "lightPortuguese" @@ -1747,7 +2181,8 @@ class StemmerTokenFilterLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): MINIMAL_PORTUGUESE = "minimalPortuguese" """Selects the Lucene stemming tokenizer for Portuguese that does minimal stemming.""" PORTUGUESE_RSLP = "portugueseRslp" - """Selects the Lucene stemming tokenizer for Portuguese that uses the RSLP stemming algorithm.""" + """Selects the Lucene stemming tokenizer for Portuguese that uses the RSLP + stemming algorithm.""" ROMANIAN = "romanian" """Selects the Lucene stemming tokenizer for Romanian.""" RUSSIAN = "russian" @@ -1989,8 +2424,6 @@ class TextTranslationSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta) """Malayalam""" PA = "pa" """Punjabi""" - IS_ENUM = "is" - """Icelandic""" class TokenCharacterKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -2012,39 +2445,44 @@ class TokenFilterName(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Defines the names of all token filters supported by the search engine.""" ARABIC_NORMALIZATION = "arabic_normalization" - """A token filter that applies the Arabic normalizer to normalize the orthography. See + """A token filter that applies the Arabic normalizer to normalize the orthography. + See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html""" APOSTROPHE = "apostrophe" - """Strips all characters after an apostrophe (including the apostrophe itself). See + """Strips all characters after an apostrophe (including the apostrophe itself). + See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html""" ASCII_FOLDING = "asciifolding" - """Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 - ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such - equivalents exist. See + """Converts alphabetic, numeric, and symbolic Unicode characters which are not in + the first 127 ASCII characters (the "Basic Latin" Unicode block) into their + ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html""" CJK_BIGRAM = "cjk_bigram" """Forms bigrams of CJK terms that are generated from the standard tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html""" CJK_WIDTH = "cjk_width" - """Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic - Latin, and half-width Katakana variants into the equivalent Kana. See + """Normalizes CJK width differences. Folds fullwidth ASCII variants into the + equivalent basic Latin, and half-width Katakana variants into the equivalent + Kana. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html""" CLASSIC = "classic" """Removes English possessives, and dots from acronyms. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html""" COMMON_GRAM = "common_grams" - """Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed - too, with bigrams overlaid. See + """Construct bigrams for frequently occurring terms while indexing. Single terms + are still indexed too, with bigrams overlaid. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html""" EDGE_N_GRAM = "edgeNGram_v2" - """Generates n-grams of the given size(s) starting from the front or the back of an input token. - See + """Generates n-grams of the given size(s) starting from the front or the back of + an input token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html""" ELISION = "elision" - """Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See + """Removes elisions. For example, "l'avion" (the plane) will be converted to + "avion" (plane). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html""" GERMAN_NORMALIZATION = "german_normalization" - """Normalizes German characters according to the heuristics of the German2 snowball algorithm. See + """Normalizes German characters according to the heuristics of the German2 + snowball algorithm. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html""" HINDI_NORMALIZATION = "hindi_normalization" """Normalizes text in Hindi to remove some differences in spelling variations. See @@ -2086,8 +2524,9 @@ class TokenFilterName(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Normalizes use of the interchangeable Scandinavian characters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html""" SCANDINAVIAN_FOLDING_NORMALIZATION = "scandinavian_folding" - """Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of - double vowels aa, ae, ao, oe and oo, leaving just the first one. See + """Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also + discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just + the first one. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html""" SHINGLE = "shingle" """Creates combinations of tokens as a single token. See @@ -2117,7 +2556,8 @@ class TokenFilterName(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Normalizes token text to upper case. See https://lucene.apache.org/core/6_6_1/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html""" WORD_DELIMITER = "word_delimiter" - """Splits words into subwords and performs optional transformations on subword groups.""" + """Splits words into subwords and performs optional transformations on subword + groups.""" class VectorEncodingFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -2127,66 +2567,99 @@ class VectorEncodingFormat(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Encoding format representing bits packed into a wider data type.""" +class VectorFilterMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """Determines whether or not filters are applied before or after the vector search + is performed. + """ + + POST_FILTER = "postFilter" + """The filter will be applied after the candidate set of vector results is + returned. Depending on the filter selectivity, this can result in fewer results + than requested by the parameter 'k'.""" + PRE_FILTER = "preFilter" + """The filter will be applied before the search query.""" + + +class VectorQueryKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The kind of vector query being performed.""" + + VECTOR = "vector" + """Vector query where a raw vector value is provided.""" + TEXT = "text" + """Vector query where a text value that needs to be vectorized is provided.""" + IMAGE_URL = "imageUrl" + """Vector query where an url that represents an image value that needs to be + vectorized is provided.""" + IMAGE_BINARY = "imageBinary" + """Vector query where a base 64 encoded binary of an image that needs to be + vectorized is provided.""" + + class VectorSearchAlgorithmKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The algorithm used for indexing and querying.""" HNSW = "hnsw" - """HNSW (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm.""" + """HNSW (Hierarchical Navigable Small World), a type of approximate nearest + neighbors algorithm.""" EXHAUSTIVE_KNN = "exhaustiveKnn" """Exhaustive KNN algorithm which will perform brute-force search.""" class VectorSearchAlgorithmMetric(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The similarity metric to use for vector comparisons. It is recommended to choose the same - similarity metric as the embedding model was trained on. + """The similarity metric to use for vector comparisons. It is recommended to + choose the same similarity metric as the embedding model was trained on. """ COSINE = "cosine" - """Measures the angle between vectors to quantify their similarity, disregarding magnitude. The - smaller the angle, the closer the similarity.""" + """Measures the angle between vectors to quantify their similarity, disregarding + magnitude. The smaller the angle, the closer the similarity.""" EUCLIDEAN = "euclidean" - """Computes the straight-line distance between vectors in a multi-dimensional space. The smaller - the distance, the closer the similarity.""" + """Computes the straight-line distance between vectors in a multi-dimensional + space. The smaller the distance, the closer the similarity.""" DOT_PRODUCT = "dotProduct" - """Calculates the sum of element-wise products to gauge alignment and magnitude similarity. The - larger and more positive, the closer the similarity.""" + """Calculates the sum of element-wise products to gauge alignment and magnitude + similarity. The larger and more positive, the closer the similarity.""" HAMMING = "hamming" - """Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing - positions in binary vectors. The fewer differences, the closer the similarity.""" + """Only applicable to bit-packed binary data types. Determines dissimilarity by + counting differing positions in binary vectors. The fewer differences, the + closer the similarity.""" class VectorSearchCompressionKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The compression method used for indexing and querying.""" SCALAR_QUANTIZATION = "scalarQuantization" - """Scalar Quantization, a type of compression method. In scalar quantization, the original vectors - values are compressed to a narrower type by discretizing and representing each component of a - vector using a reduced set of quantized values, thereby reducing the overall data size.""" + """Scalar Quantization, a type of compression method. In scalar quantization, the + original vectors values are compressed to a narrower type by discretizing and + representing each component of a vector using a reduced set of quantized + values, thereby reducing the overall data size.""" BINARY_QUANTIZATION = "binaryQuantization" - """Binary Quantization, a type of compression method. In binary quantization, the original vectors - values are compressed to the narrower binary type by discretizing and representing each - component of a vector using binary values, thereby reducing the overall data size.""" + """Binary Quantization, a type of compression method. In binary quantization, the + original vectors values are compressed to the narrower binary type by + discretizing and representing each component of a vector using binary values, + thereby reducing the overall data size.""" class VectorSearchCompressionRescoreStorageMethod(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The storage method for the original full-precision vectors used for rescoring and internal - index operations. + """The storage method for the original full-precision vectors used for rescoring + and internal index operations. """ PRESERVE_ORIGINALS = "preserveOriginals" - """This option preserves the original full-precision vectors. Choose this option for maximum - flexibility and highest quality of compressed search results. This consumes more storage but - allows for rescoring and oversampling.""" + """This option preserves the original full-precision vectors. Choose this option + for maximum flexibility and highest quality of compressed search results. This + consumes more storage but allows for rescoring and oversampling.""" DISCARD_ORIGINALS = "discardOriginals" - """This option discards the original full-precision vectors. Choose this option for maximum - storage savings. Since this option does not allow for rescoring and oversampling, it will often - cause slight to moderate reductions in quality.""" + """This option discards the original full-precision vectors. Choose this option + for maximum storage savings. Since this option does not allow for rescoring and + oversampling, it will often cause slight to moderate reductions in quality.""" class VectorSearchCompressionTarget(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The quantized data type of compressed vector values.""" INT8 = "int8" + """8-bit signed integer.""" class VectorSearchVectorizerKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -2197,11 +2670,25 @@ class VectorSearchVectorizerKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): CUSTOM_WEB_API = "customWebApi" """Generate embeddings using a custom web endpoint at query time.""" AI_SERVICES_VISION = "aiServicesVision" - """Generate embeddings for an image or text input at query time using the Azure AI Services Vision - Vectorize API.""" + """Generate embeddings for an image or text input at query time using the Azure AI + Services Vision Vectorize API.""" AML = "aml" - """Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure AI Studio - Model Catalog at query time.""" + """Generate embeddings using an Azure Machine Learning endpoint deployed via the + Azure AI Studio Model Catalog at query time.""" + + +class VectorThresholdKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): + """The kind of vector query being performed.""" + + VECTOR_SIMILARITY = "vectorSimilarity" + """The results of the vector query will be filtered based on the vector similarity + metric. Note this is the canonical definition of similarity metric, not the + 'distance' version. The threshold direction (larger or smaller) will be chosen + automatically according to the metric used by the field.""" + SEARCH_SCORE = "searchScore" + """The results of the vector query will filter based on the '@search.score' value. + Note this is the @search.score returned as part of the search response. The + threshold direction will be chosen for higher @search.score.""" class VisualFeature(str, Enum, metaclass=CaseInsensitiveEnumMeta): diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py new file mode 100644 index 000000000000..5dd05a7fcdde --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py @@ -0,0 +1,12173 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +# pylint: disable=useless-super-delegation + +import datetime +from typing import Any, Dict, List, Literal, Mapping, Optional, TYPE_CHECKING, Union, overload + +from .. import _model_base +from .._model_base import rest_discriminator, rest_field +from ._enums import ( + VectorQueryKind, + VectorSearchAlgorithmKind, + VectorSearchCompressionKind, + VectorSearchVectorizerKind, + VectorThresholdKind, +) + +if TYPE_CHECKING: + from .. import models as _models + + +class CognitiveServicesAccount(_model_base.Model): + """Base type for describing any Azure AI service resource attached to a skillset. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + AIServicesAccountIdentity, AIServicesAccountKey, CognitiveServicesAccountKey, + DefaultCognitiveServicesAccount + + + :ivar odata_type: The discriminator for derived types. Required. Default value is None. + :vartype odata_type: str + :ivar description: Description of the Azure AI service resource attached to a skillset. + :vartype description: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + odata_type: str = rest_discriminator(name="@odata.type") + """The discriminator for derived types. Required. Default value is None.""" + description: Optional[str] = rest_field() + """Description of the Azure AI service resource attached to a skillset.""" + + @overload + def __init__( + self, + *, + odata_type: str, + description: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AIServicesAccountIdentity(CognitiveServicesAccount, discriminator="#Microsoft.Azure.Search.AIServicesByIdentity"): + """The multi-region account of an Azure AI service resource that's attached to a + skillset. + + + :ivar description: Description of the Azure AI service resource attached to a skillset. + :vartype description: str + :ivar identity: The user-assigned managed identity used for connections to AI Service. If not + specified, the system-assigned managed identity is used. On updates to the + skillset, if the identity is unspecified, the value remains unchanged. If set + to "none", the value of this property is cleared. Required. + :vartype identity: ~azure.search.documents.models.SearchIndexerDataIdentity + :ivar subdomain_url: The subdomain url for the corresponding AI Service. Required. + :vartype subdomain_url: str + :ivar odata_type: A URI fragment specifying the type of Azure AI service resource attached to a + skillset. Required. Default value is "#Microsoft.Azure.Search.AIServicesByIdentity". + :vartype odata_type: str + """ + + identity: "_models.SearchIndexerDataIdentity" = rest_field() + """The user-assigned managed identity used for connections to AI Service. If not + specified, the system-assigned managed identity is used. On updates to the + skillset, if the identity is unspecified, the value remains unchanged. If set + to \"none\", the value of this property is cleared. Required.""" + subdomain_url: str = rest_field(name="subdomainUrl") + """The subdomain url for the corresponding AI Service. Required.""" + odata_type: Literal["#Microsoft.Azure.Search.AIServicesByIdentity"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of Azure AI service resource attached to a + skillset. Required. Default value is \"#Microsoft.Azure.Search.AIServicesByIdentity\".""" + + @overload + def __init__( + self, + *, + identity: "_models.SearchIndexerDataIdentity", + subdomain_url: str, + description: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.AIServicesByIdentity", **kwargs) + + +class AIServicesAccountKey(CognitiveServicesAccount, discriminator="#Microsoft.Azure.Search.AIServicesByKey"): + """The account key of an Azure AI service resource that's attached to a skillset, + to be used with the resource's subdomain. + + + :ivar description: Description of the Azure AI service resource attached to a skillset. + :vartype description: str + :ivar key: The key used to provision the Azure AI service resource attached to a skillset. + Required. + :vartype key: str + :ivar subdomain_url: The subdomain url for the corresponding AI Service. Required. + :vartype subdomain_url: str + :ivar odata_type: A URI fragment specifying the type of Azure AI service resource attached to a + skillset. Required. Default value is "#Microsoft.Azure.Search.AIServicesByKey". + :vartype odata_type: str + """ + + key: str = rest_field() + """The key used to provision the Azure AI service resource attached to a skillset. Required.""" + subdomain_url: str = rest_field(name="subdomainUrl") + """The subdomain url for the corresponding AI Service. Required.""" + odata_type: Literal["#Microsoft.Azure.Search.AIServicesByKey"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of Azure AI service resource attached to a + skillset. Required. Default value is \"#Microsoft.Azure.Search.AIServicesByKey\".""" + + @overload + def __init__( + self, + *, + key: str, + subdomain_url: str, + description: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.AIServicesByKey", **kwargs) + + +class AIServicesVisionParameters(_model_base.Model): + """Specifies the AI Services Vision parameters for vectorizing a query image or + text. + + + :ivar model_version: The version of the model to use when calling the AI Services Vision + service. It + will default to the latest available when not specified. Required. + :vartype model_version: str + :ivar resource_uri: The resource URI of the AI Services resource. Required. + :vartype resource_uri: str + :ivar api_key: API key of the designated AI Services resource. + :vartype api_key: str + :ivar auth_identity: The user-assigned managed identity used for outbound connections. If an + authResourceId is provided and it's not specified, the system-assigned managed + identity is used. On updates to the index, if the identity is unspecified, the + value remains unchanged. If set to "none", the value of this property is + cleared. + :vartype auth_identity: ~azure.search.documents.models.SearchIndexerDataIdentity + """ + + model_version: str = rest_field(name="modelVersion") + """The version of the model to use when calling the AI Services Vision service. It + will default to the latest available when not specified. Required.""" + resource_uri: str = rest_field(name="resourceUri") + """The resource URI of the AI Services resource. Required.""" + api_key: Optional[str] = rest_field(name="apiKey") + """API key of the designated AI Services resource.""" + auth_identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field(name="authIdentity") + """The user-assigned managed identity used for outbound connections. If an + authResourceId is provided and it's not specified, the system-assigned managed + identity is used. On updates to the index, if the identity is unspecified, the + value remains unchanged. If set to \"none\", the value of this property is + cleared.""" + + @overload + def __init__( + self, + *, + model_version: str, + resource_uri: str, + api_key: Optional[str] = None, + auth_identity: Optional["_models.SearchIndexerDataIdentity"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorSearchVectorizer(_model_base.Model): + """Specifies the vectorization method to be used during query time. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + AIServicesVisionVectorizer, AzureMachineLearningVectorizer, AzureOpenAIVectorizer, + WebApiVectorizer + + + :ivar vectorizer_name: The name to associate with this particular vectorization method. + Required. + :vartype vectorizer_name: str + :ivar kind: Type of VectorSearchVectorizer. Required. Known values are: "azureOpenAI", + "customWebApi", "aiServicesVision", and "aml". + :vartype kind: str or ~azure.search.documents.models.VectorSearchVectorizerKind + """ + + __mapping__: Dict[str, _model_base.Model] = {} + vectorizer_name: str = rest_field(name="name") + """The name to associate with this particular vectorization method. Required.""" + kind: str = rest_discriminator(name="kind") + """Type of VectorSearchVectorizer. Required. Known values are: \"azureOpenAI\", \"customWebApi\", + \"aiServicesVision\", and \"aml\".""" + + @overload + def __init__( + self, + *, + vectorizer_name: str, + kind: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AIServicesVisionVectorizer(VectorSearchVectorizer, discriminator="aiServicesVision"): + """Specifies the AI Services Vision parameters for vectorizing a query image or + text. + + + :ivar vectorizer_name: The name to associate with this particular vectorization method. + Required. + :vartype vectorizer_name: str + :ivar ai_services_vision_parameters: Contains the parameters specific to AI Services Vision + embedding vectorization. + :vartype ai_services_vision_parameters: + ~azure.search.documents.models.AIServicesVisionParameters + :ivar kind: The name of the kind of vectorization method being configured for use with + vector search. Required. Generate embeddings for an image or text input at query time using + the Azure AI + Services Vision Vectorize API. + :vartype kind: str or ~azure.search.documents.models.AI_SERVICES_VISION + """ + + ai_services_vision_parameters: Optional["_models.AIServicesVisionParameters"] = rest_field( + name="AIServicesVisionParameters" + ) + """Contains the parameters specific to AI Services Vision embedding vectorization.""" + kind: Literal[VectorSearchVectorizerKind.AI_SERVICES_VISION] = rest_discriminator(name="kind") # type: ignore + """The name of the kind of vectorization method being configured for use with + vector search. Required. Generate embeddings for an image or text input at query time using the + Azure AI + Services Vision Vectorize API.""" + + @overload + def __init__( + self, + *, + vectorizer_name: str, + ai_services_vision_parameters: Optional["_models.AIServicesVisionParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=VectorSearchVectorizerKind.AI_SERVICES_VISION, **kwargs) + + +class AnalyzedTokenInfo(_model_base.Model): + """Information about a token returned by an analyzer. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar token: The token returned by the analyzer. Required. + :vartype token: str + :ivar start_offset: The index of the first character of the token in the input text. Required. + :vartype start_offset: int + :ivar end_offset: The index of the last character of the token in the input text. Required. + :vartype end_offset: int + :ivar position: The position of the token in the input text relative to other tokens. The first + token in the input text has position 0, the next has position 1, and so on. + Depending on the analyzer used, some tokens might have the same position, for + example if they are synonyms of each other. Required. + :vartype position: int + """ + + token: str = rest_field(visibility=["read"]) + """The token returned by the analyzer. Required.""" + start_offset: int = rest_field(name="startOffset", visibility=["read"]) + """The index of the first character of the token in the input text. Required.""" + end_offset: int = rest_field(name="endOffset", visibility=["read"]) + """The index of the last character of the token in the input text. Required.""" + position: int = rest_field(visibility=["read"]) + """The position of the token in the input text relative to other tokens. The first + token in the input text has position 0, the next has position 1, and so on. + Depending on the analyzer used, some tokens might have the same position, for + example if they are synonyms of each other. Required.""" + + +class AnalyzeRequest(_model_base.Model): + """Specifies some text and analysis components used to break that text into tokens. + + All required parameters must be populated in order to send to server. + + :ivar text: The text to break into tokens. Required. + :vartype text: str + :ivar analyzer: The name of the analyzer to use to break the given text. If this parameter is + not specified, you must specify a tokenizer instead. The tokenizer and analyzer + parameters are mutually exclusive. Known values are: "ar.microsoft", "ar.lucene", "hy.lucene", + "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", + "zh-Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", + "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", + "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", + "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", + "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", + "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", + "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", + "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", + "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", + "pt-PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", + "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", + "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", + "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", + "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", + "simple", "stop", and "whitespace". + :vartype analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName + :ivar tokenizer: The name of the tokenizer to use to break the given text. If this parameter is + not specified, you must specify an analyzer instead. The tokenizer and analyzer + parameters are mutually exclusive. Known values are: "classic", "edgeNGram", "keyword_v2", + "letter", "lowercase", "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", + "nGram", "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", and "whitespace". + :vartype tokenizer: str or ~azure.search.documents.models.LexicalTokenizerName + :ivar normalizer: The name of the normalizer to use to normalize the given text. Known values + are: "asciifolding", "elision", "lowercase", "standard", and "uppercase". + :vartype normalizer: str or ~azure.search.documents.models.LexicalNormalizerName + :ivar token_filters: An optional list of token filters to use when breaking the given text. + This + parameter can only be set when using the tokenizer parameter. + :vartype token_filters: list[str or ~azure.search.documents.models.TokenFilterName] + :ivar char_filters: An optional list of character filters to use when breaking the given text. + This + parameter can only be set when using the tokenizer parameter. + :vartype char_filters: list[str or ~azure.search.documents.models.CharFilterName] + """ + + text: str = rest_field() + """The text to break into tokens. Required.""" + analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = rest_field() + """The name of the analyzer to use to break the given text. If this parameter is + not specified, you must specify a tokenizer instead. The tokenizer and analyzer + parameters are mutually exclusive. Known values are: \"ar.microsoft\", \"ar.lucene\", + \"hy.lucene\", \"bn.microsoft\", \"eu.lucene\", \"bg.microsoft\", \"bg.lucene\", + \"ca.microsoft\", \"ca.lucene\", \"zh-Hans.microsoft\", \"zh-Hans.lucene\", + \"zh-Hant.microsoft\", \"zh-Hant.lucene\", \"hr.microsoft\", \"cs.microsoft\", \"cs.lucene\", + \"da.microsoft\", \"da.lucene\", \"nl.microsoft\", \"nl.lucene\", \"en.microsoft\", + \"en.lucene\", \"et.microsoft\", \"fi.microsoft\", \"fi.lucene\", \"fr.microsoft\", + \"fr.lucene\", \"gl.lucene\", \"de.microsoft\", \"de.lucene\", \"el.microsoft\", \"el.lucene\", + \"gu.microsoft\", \"he.microsoft\", \"hi.microsoft\", \"hi.lucene\", \"hu.microsoft\", + \"hu.lucene\", \"is.microsoft\", \"id.microsoft\", \"id.lucene\", \"ga.lucene\", + \"it.microsoft\", \"it.lucene\", \"ja.microsoft\", \"ja.lucene\", \"kn.microsoft\", + \"ko.microsoft\", \"ko.lucene\", \"lv.microsoft\", \"lv.lucene\", \"lt.microsoft\", + \"ml.microsoft\", \"ms.microsoft\", \"mr.microsoft\", \"nb.microsoft\", \"no.lucene\", + \"fa.lucene\", \"pl.microsoft\", \"pl.lucene\", \"pt-BR.microsoft\", \"pt-BR.lucene\", + \"pt-PT.microsoft\", \"pt-PT.lucene\", \"pa.microsoft\", \"ro.microsoft\", \"ro.lucene\", + \"ru.microsoft\", \"ru.lucene\", \"sr-cyrillic.microsoft\", \"sr-latin.microsoft\", + \"sk.microsoft\", \"sl.microsoft\", \"es.microsoft\", \"es.lucene\", \"sv.microsoft\", + \"sv.lucene\", \"ta.microsoft\", \"te.microsoft\", \"th.microsoft\", \"th.lucene\", + \"tr.microsoft\", \"tr.lucene\", \"uk.microsoft\", \"ur.microsoft\", \"vi.microsoft\", + \"standard.lucene\", \"standardasciifolding.lucene\", \"keyword\", \"pattern\", \"simple\", + \"stop\", and \"whitespace\".""" + tokenizer: Optional[Union[str, "_models.LexicalTokenizerName"]] = rest_field() + """The name of the tokenizer to use to break the given text. If this parameter is + not specified, you must specify an analyzer instead. The tokenizer and analyzer + parameters are mutually exclusive. Known values are: \"classic\", \"edgeNGram\", + \"keyword_v2\", \"letter\", \"lowercase\", \"microsoft_language_tokenizer\", + \"microsoft_language_stemming_tokenizer\", \"nGram\", \"path_hierarchy_v2\", \"pattern\", + \"standard_v2\", \"uax_url_email\", and \"whitespace\".""" + normalizer: Optional[Union[str, "_models.LexicalNormalizerName"]] = rest_field() + """The name of the normalizer to use to normalize the given text. Known values are: + \"asciifolding\", \"elision\", \"lowercase\", \"standard\", and \"uppercase\".""" + token_filters: Optional[List[Union[str, "_models.TokenFilterName"]]] = rest_field(name="tokenFilters") + """An optional list of token filters to use when breaking the given text. This + parameter can only be set when using the tokenizer parameter.""" + char_filters: Optional[List[Union[str, "_models.CharFilterName"]]] = rest_field(name="charFilters") + """An optional list of character filters to use when breaking the given text. This + parameter can only be set when using the tokenizer parameter.""" + + @overload + def __init__( + self, + *, + text: str, + analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = None, + tokenizer: Optional[Union[str, "_models.LexicalTokenizerName"]] = None, + normalizer: Optional[Union[str, "_models.LexicalNormalizerName"]] = None, + token_filters: Optional[List[Union[str, "_models.TokenFilterName"]]] = None, + char_filters: Optional[List[Union[str, "_models.CharFilterName"]]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AnalyzeResult(_model_base.Model): + """The result of testing an analyzer on text. + + + :ivar tokens: The list of tokens returned by the analyzer specified in the request. Required. + :vartype tokens: list[~azure.search.documents.models.AnalyzedTokenInfo] + """ + + tokens: List["_models.AnalyzedTokenInfo"] = rest_field() + """The list of tokens returned by the analyzer specified in the request. Required.""" + + @overload + def __init__( + self, + *, + tokens: List["_models.AnalyzedTokenInfo"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TokenFilter(_model_base.Model): + """Base type for token filters. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + AsciiFoldingTokenFilter, CjkBigramTokenFilter, CommonGramTokenFilter, + DictionaryDecompounderTokenFilter, EdgeNGramTokenFilter, EdgeNGramTokenFilterV2, + ElisionTokenFilter, KeepTokenFilter, KeywordMarkerTokenFilter, LengthTokenFilter, + LimitTokenFilter, NGramTokenFilter, NGramTokenFilterV2, PatternCaptureTokenFilter, + PatternReplaceTokenFilter, PhoneticTokenFilter, ShingleTokenFilter, SnowballTokenFilter, + StemmerOverrideTokenFilter, StemmerTokenFilter, StopwordsTokenFilter, SynonymTokenFilter, + TruncateTokenFilter, UniqueTokenFilter, WordDelimiterTokenFilter + + + :ivar odata_type: The discriminator for derived types. Required. Default value is None. + :vartype odata_type: str + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + odata_type: str = rest_discriminator(name="@odata.type") + """The discriminator for derived types. Required. Default value is None.""" + name: str = rest_field() + """The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required.""" + + @overload + def __init__( + self, + *, + odata_type: str, + name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AsciiFoldingTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.AsciiFoldingTokenFilter"): + """Converts alphabetic, numeric, and symbolic Unicode characters which are not in + the first 127 ASCII characters (the "Basic Latin" Unicode block) into their + ASCII equivalents, if such equivalents exist. This token filter is implemented + using Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar preserve_original: A value indicating whether the original token will be kept. Default is + false. + :vartype preserve_original: bool + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.AsciiFoldingTokenFilter". + :vartype odata_type: str + """ + + preserve_original: Optional[bool] = rest_field(name="preserveOriginal") + """A value indicating whether the original token will be kept. Default is false.""" + odata_type: Literal["#Microsoft.Azure.Search.AsciiFoldingTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.AsciiFoldingTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + preserve_original: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.AsciiFoldingTokenFilter", **kwargs) + + +class AutocompleteItem(_model_base.Model): + """The result of Autocomplete requests. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar text: The completed term. Required. + :vartype text: str + :ivar query_plus_text: The query along with the completed term. Required. + :vartype query_plus_text: str + """ + + text: str = rest_field(visibility=["read"]) + """The completed term. Required.""" + query_plus_text: str = rest_field(name="queryPlusText", visibility=["read"]) + """The query along with the completed term. Required.""" + + +class AutocompleteRequest(_model_base.Model): + """Parameters for fuzzy matching, and other autocomplete query behaviors. + + All required parameters must be populated in order to send to server. + + :ivar search_text: The search text on which to base autocomplete results. Required. + :vartype search_text: str + :ivar autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use + 'twoTerms' + to get shingles and 'oneTermWithContext' to use the current context while + producing auto-completed terms. Known values are: "oneTerm", "twoTerms", and + "oneTermWithContext". + :vartype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode + :ivar filter: An OData expression that filters the documents used to produce completed terms + for the Autocomplete result. + :vartype filter: str + :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the autocomplete + query. + Default is false. When set to true, the query will autocomplete terms even if + there's a substituted or missing character in the search text. While this + provides a better experience in some scenarios, it comes at a performance cost + as fuzzy autocomplete queries are slower and consume more resources. + :vartype use_fuzzy_matching: bool + :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. If omitted, hit highlighting is disabled. + :vartype highlight_post_tag: str + :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. If omitted, hit highlighting is disabled. + :vartype highlight_pre_tag: str + :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that + must be + covered by an autocomplete query in order for the query to be reported as a + success. This parameter can be useful for ensuring search availability even for + services with only one replica. The default is 80. + :vartype minimum_coverage: float + :ivar search_fields: The comma-separated list of field names to consider when querying for + auto-completed terms. Target fields must be included in the specified + suggester. + :vartype search_fields: str + :ivar suggester_name: The name of the suggester as specified in the suggesters collection + that's part + of the index definition. Required. + :vartype suggester_name: str + :ivar top: The number of auto-completed terms to retrieve. This must be a value between 1 + and 100. The default is 5. + :vartype top: int + """ + + search_text: str = rest_field(name="search") + """The search text on which to base autocomplete results. Required.""" + autocomplete_mode: Optional[Union[str, "_models.AutocompleteMode"]] = rest_field(name="autocompleteMode") + """Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' + to get shingles and 'oneTermWithContext' to use the current context while + producing auto-completed terms. Known values are: \"oneTerm\", \"twoTerms\", and + \"oneTermWithContext\".""" + filter: Optional[str] = rest_field() + """An OData expression that filters the documents used to produce completed terms + for the Autocomplete result.""" + use_fuzzy_matching: Optional[bool] = rest_field(name="fuzzy") + """A value indicating whether to use fuzzy matching for the autocomplete query. + Default is false. When set to true, the query will autocomplete terms even if + there's a substituted or missing character in the search text. While this + provides a better experience in some scenarios, it comes at a performance cost + as fuzzy autocomplete queries are slower and consume more resources.""" + highlight_post_tag: Optional[str] = rest_field(name="highlightPostTag") + """A string tag that is appended to hit highlights. Must be set with + highlightPreTag. If omitted, hit highlighting is disabled.""" + highlight_pre_tag: Optional[str] = rest_field(name="highlightPreTag") + """A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. If omitted, hit highlighting is disabled.""" + minimum_coverage: Optional[float] = rest_field(name="minimumCoverage") + """A number between 0 and 100 indicating the percentage of the index that must be + covered by an autocomplete query in order for the query to be reported as a + success. This parameter can be useful for ensuring search availability even for + services with only one replica. The default is 80.""" + search_fields: Optional[str] = rest_field(name="searchFields") + """The comma-separated list of field names to consider when querying for + auto-completed terms. Target fields must be included in the specified + suggester.""" + suggester_name: str = rest_field(name="suggesterName") + """The name of the suggester as specified in the suggesters collection that's part + of the index definition. Required.""" + top: Optional[int] = rest_field() + """The number of auto-completed terms to retrieve. This must be a value between 1 + and 100. The default is 5.""" + + @overload + def __init__( + self, + *, + search_text: str, + suggester_name: str, + autocomplete_mode: Optional[Union[str, "_models.AutocompleteMode"]] = None, + filter: Optional[str] = None, # pylint: disable=redefined-builtin + use_fuzzy_matching: Optional[bool] = None, + highlight_post_tag: Optional[str] = None, + highlight_pre_tag: Optional[str] = None, + minimum_coverage: Optional[float] = None, + search_fields: Optional[str] = None, + top: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AutocompleteResult(_model_base.Model): + """The result of Autocomplete query. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar coverage: A value indicating the percentage of the index that was considered by the + autocomplete request, or null if minimumCoverage was not specified in the + request. + :vartype coverage: float + :ivar results: The list of returned Autocompleted items. Required. + :vartype results: list[~azure.search.documents.models.AutocompleteItem] + """ + + coverage: Optional[float] = rest_field(name="@search.coverage", visibility=["read"]) + """A value indicating the percentage of the index that was considered by the + autocomplete request, or null if minimumCoverage was not specified in the + request.""" + results: List["_models.AutocompleteItem"] = rest_field(name="value", visibility=["read"]) + """The list of returned Autocompleted items. Required.""" + + +class AzureActiveDirectoryApplicationCredentials(_model_base.Model): # pylint: disable=name-too-long + """Credentials of a registered application created for your search service, used + for authenticated access to the encryption keys stored in Azure Key Vault. + + + :ivar application_id: An AAD Application ID that was granted the required access permissions to + the + Azure Key Vault that is to be used when encrypting your data at rest. The + Application ID should not be confused with the Object ID for your AAD + Application. Required. + :vartype application_id: str + :ivar application_secret: The authentication key of the specified AAD application. + :vartype application_secret: str + """ + + application_id: str = rest_field(name="applicationId") + """An AAD Application ID that was granted the required access permissions to the + Azure Key Vault that is to be used when encrypting your data at rest. The + Application ID should not be confused with the Object ID for your AAD + Application. Required.""" + application_secret: Optional[str] = rest_field(name="applicationSecret") + """The authentication key of the specified AAD application.""" + + @overload + def __init__( + self, + *, + application_id: str, + application_secret: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AzureMachineLearningParameters(_model_base.Model): + """Specifies the properties for connecting to an AML vectorizer. + + + :ivar scoring_uri: (Required for no authentication or key authentication) The scoring URI of + the + AML service to which the JSON payload will be sent. Only the https URI scheme + is allowed. Required. + :vartype scoring_uri: str + :ivar authentication_key: (Required for key authentication) The key for the AML service. + :vartype authentication_key: str + :ivar resource_id: (Required for token authentication). The Azure Resource Manager resource ID + of + the AML service. It should be in the format + subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long + :vartype resource_id: str + :ivar timeout: (Optional) When specified, indicates the timeout for the http client making the + API call. + :vartype timeout: ~datetime.timedelta + :ivar region: (Optional for token authentication). The region the AML service is deployed in. + :vartype region: str + :ivar model_name: The name of the embedding model from the Azure AI Studio Catalog that is + deployed at the provided endpoint. Known values are: + "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32", + "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336", + "Facebook-DinoV2-Image-Embeddings-ViT-Base", "Facebook-DinoV2-Image-Embeddings-ViT-Giant", + "Cohere-embed-v3-english", and "Cohere-embed-v3-multilingual". + :vartype model_name: str or ~azure.search.documents.models.AIStudioModelCatalogName + """ + + scoring_uri: str = rest_field(name="uri") + """(Required for no authentication or key authentication) The scoring URI of the + AML service to which the JSON payload will be sent. Only the https URI scheme + is allowed. Required.""" + authentication_key: Optional[str] = rest_field(name="key") + """(Required for key authentication) The key for the AML service.""" + resource_id: Optional[str] = rest_field(name="resourceId") + """(Required for token authentication). The Azure Resource Manager resource ID of + the AML service. It should be in the format + subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long""" + timeout: Optional[datetime.timedelta] = rest_field() + """(Optional) When specified, indicates the timeout for the http client making the + API call.""" + region: Optional[str] = rest_field() + """(Optional for token authentication). The region the AML service is deployed in.""" + model_name: Optional[Union[str, "_models.AIStudioModelCatalogName"]] = rest_field(name="modelName") + """The name of the embedding model from the Azure AI Studio Catalog that is + deployed at the provided endpoint. Known values are: + \"OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32\", + \"OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336\", + \"Facebook-DinoV2-Image-Embeddings-ViT-Base\", \"Facebook-DinoV2-Image-Embeddings-ViT-Giant\", + \"Cohere-embed-v3-english\", and \"Cohere-embed-v3-multilingual\".""" + + @overload + def __init__( + self, + *, + scoring_uri: str, + authentication_key: Optional[str] = None, + resource_id: Optional[str] = None, + timeout: Optional[datetime.timedelta] = None, + region: Optional[str] = None, + model_name: Optional[Union[str, "_models.AIStudioModelCatalogName"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexerSkill(_model_base.Model): + """Base type for skills. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + AzureMachineLearningSkill, WebApiSkill, AzureOpenAIEmbeddingSkill, CustomEntityLookupSkill, + EntityRecognitionSkill, KeyPhraseExtractionSkill, LanguageDetectionSkill, MergeSkill, + PIIDetectionSkill, SentimentSkill, SplitSkill, TextTranslationSkill, EntityLinkingSkill, + EntityRecognitionSkillV3, SentimentSkillV3, ConditionalSkill, DocumentExtractionSkill, + DocumentIntelligenceLayoutSkill, ShaperSkill, ImageAnalysisSkill, OcrSkill, + VisionVectorizeSkill + + + :ivar odata_type: The discriminator for derived types. Required. Default value is None. + :vartype odata_type: str + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + """ + + __mapping__: Dict[str, _model_base.Model] = {} + odata_type: str = rest_discriminator(name="@odata.type") + """The discriminator for derived types. Required. Default value is None.""" + name: Optional[str] = rest_field() + """The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'.""" + description: Optional[str] = rest_field() + """The description of the skill which describes the inputs, outputs, and usage of + the skill.""" + context: Optional[str] = rest_field() + """Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document.""" + inputs: List["_models.InputFieldMappingEntry"] = rest_field() + """Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required.""" + outputs: List["_models.OutputFieldMappingEntry"] = rest_field() + """The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required.""" + + @overload + def __init__( + self, + *, + odata_type: str, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AzureMachineLearningSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Custom.AmlSkill"): + """The AML skill allows you to extend AI enrichment with a custom Azure Machine + Learning (AML) model. Once an AML model is trained and deployed, an AML skill + integrates it into AI enrichment. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar scoring_uri: (Required for no authentication or key authentication) The scoring URI of + the + AML service to which the JSON payload will be sent. Only the https URI scheme + is allowed. + :vartype scoring_uri: str + :ivar authentication_key: (Required for key authentication) The key for the AML service. + :vartype authentication_key: str + :ivar resource_id: (Required for token authentication). The Azure Resource Manager resource ID + of + the AML service. It should be in the format + subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long + :vartype resource_id: str + :ivar timeout: (Optional) When specified, indicates the timeout for the http client making the + API call. + :vartype timeout: ~datetime.timedelta + :ivar region: (Optional for token authentication). The region the AML service is deployed in. + :vartype region: str + :ivar degree_of_parallelism: (Optional) When specified, indicates the number of calls the + indexer will make + in parallel to the endpoint you have provided. You can decrease this value if + your endpoint is failing under too high of a request load, or raise it if your + endpoint is able to accept more requests and you would like an increase in the + performance of the indexer. If not set, a default value of 5 is used. The + degreeOfParallelism can be set to a maximum of 10 and a minimum of 1. + :vartype degree_of_parallelism: int + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Custom.AmlSkill". + :vartype odata_type: str + """ + + scoring_uri: Optional[str] = rest_field(name="uri") + """(Required for no authentication or key authentication) The scoring URI of the + AML service to which the JSON payload will be sent. Only the https URI scheme + is allowed.""" + authentication_key: Optional[str] = rest_field(name="key") + """(Required for key authentication) The key for the AML service.""" + resource_id: Optional[str] = rest_field(name="resourceId") + """(Required for token authentication). The Azure Resource Manager resource ID of + the AML service. It should be in the format + subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long""" + timeout: Optional[datetime.timedelta] = rest_field() + """(Optional) When specified, indicates the timeout for the http client making the + API call.""" + region: Optional[str] = rest_field() + """(Optional for token authentication). The region the AML service is deployed in.""" + degree_of_parallelism: Optional[int] = rest_field(name="degreeOfParallelism") + """(Optional) When specified, indicates the number of calls the indexer will make + in parallel to the endpoint you have provided. You can decrease this value if + your endpoint is failing under too high of a request load, or raise it if your + endpoint is able to accept more requests and you would like an increase in the + performance of the indexer. If not set, a default value of 5 is used. The + degreeOfParallelism can be set to a maximum of 10 and a minimum of 1.""" + odata_type: Literal["#Microsoft.Skills.Custom.AmlSkill"] = rest_discriminator(name="@odata.type") # type: ignore + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Custom.AmlSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + scoring_uri: Optional[str] = None, + authentication_key: Optional[str] = None, + resource_id: Optional[str] = None, + timeout: Optional[datetime.timedelta] = None, + region: Optional[str] = None, + degree_of_parallelism: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Custom.AmlSkill", **kwargs) + + +class AzureMachineLearningVectorizer(VectorSearchVectorizer, discriminator="aml"): + """Specifies an Azure Machine Learning endpoint deployed via the Azure AI Studio + Model Catalog for generating the vector embedding of a query string. + + + :ivar vectorizer_name: The name to associate with this particular vectorization method. + Required. + :vartype vectorizer_name: str + :ivar aml_parameters: Specifies the properties of the AML vectorizer. + :vartype aml_parameters: ~azure.search.documents.models.AzureMachineLearningParameters + :ivar kind: The name of the kind of vectorization method being configured for use with + vector search. Required. Generate embeddings using an Azure Machine Learning endpoint deployed + via the + Azure AI Studio Model Catalog at query time. + :vartype kind: str or ~azure.search.documents.models.AML + """ + + aml_parameters: Optional["_models.AzureMachineLearningParameters"] = rest_field(name="AMLParameters") + """Specifies the properties of the AML vectorizer.""" + kind: Literal[VectorSearchVectorizerKind.AML] = rest_discriminator(name="kind") # type: ignore + """The name of the kind of vectorization method being configured for use with + vector search. Required. Generate embeddings using an Azure Machine Learning endpoint deployed + via the + Azure AI Studio Model Catalog at query time.""" + + @overload + def __init__( + self, + *, + vectorizer_name: str, + aml_parameters: Optional["_models.AzureMachineLearningParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=VectorSearchVectorizerKind.AML, **kwargs) + + +class AzureOpenAIEmbeddingSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill"): + """Allows you to generate a vector embedding for a given text input using the + Azure OpenAI resource. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar resource_url: The resource URI of the Azure OpenAI resource. + :vartype resource_url: str + :ivar deployment_name: ID of the Azure OpenAI model deployment on the designated resource. + :vartype deployment_name: str + :ivar api_key: API key of the designated Azure OpenAI resource. + :vartype api_key: str + :ivar auth_identity: The user-assigned managed identity used for outbound connections. + :vartype auth_identity: ~azure.search.documents.models.SearchIndexerDataIdentity + :ivar model_name: The name of the embedding model that is deployed at the provided deploymentId + path. Known values are: "text-embedding-ada-002", "text-embedding-3-large", and + "text-embedding-3-small". + :vartype model_name: str or ~azure.search.documents.models.AzureOpenAIModelName + :ivar dimensions: The number of dimensions the resulting output embeddings should have. Only + supported in text-embedding-3 and later models. + :vartype dimensions: int + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill". + :vartype odata_type: str + """ + + resource_url: Optional[str] = rest_field(name="resourceUri") + """The resource URI of the Azure OpenAI resource.""" + deployment_name: Optional[str] = rest_field(name="deploymentId") + """ID of the Azure OpenAI model deployment on the designated resource.""" + api_key: Optional[str] = rest_field(name="apiKey") + """API key of the designated Azure OpenAI resource.""" + auth_identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field(name="authIdentity") + """The user-assigned managed identity used for outbound connections.""" + model_name: Optional[Union[str, "_models.AzureOpenAIModelName"]] = rest_field(name="modelName") + """The name of the embedding model that is deployed at the provided deploymentId + path. Known values are: \"text-embedding-ada-002\", \"text-embedding-3-large\", and + \"text-embedding-3-small\".""" + dimensions: Optional[int] = rest_field() + """The number of dimensions the resulting output embeddings should have. Only + supported in text-embedding-3 and later models.""" + odata_type: Literal["#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + resource_url: Optional[str] = None, + deployment_name: Optional[str] = None, + api_key: Optional[str] = None, + auth_identity: Optional["_models.SearchIndexerDataIdentity"] = None, + model_name: Optional[Union[str, "_models.AzureOpenAIModelName"]] = None, + dimensions: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill", **kwargs) + + +class AzureOpenAITokenizerParameters(_model_base.Model): + """Azure OpenAI Tokenizer parameters. + + :ivar encoder_model_name: Only applies if the unit is set to azureOpenAITokens. Options include + 'R50k_base', 'P50k_base', 'P50k_edit' and 'CL100k_base'. The default value is + 'CL100k_base'. Known values are: "r50k_base", "p50k_base", "p50k_edit", and "cl100k_base". + :vartype encoder_model_name: str or ~azure.search.documents.models.SplitSkillEncoderModelName + :ivar allowed_special_tokens: (Optional) Only applies if the unit is set to azureOpenAITokens. + This parameter + defines a collection of special tokens that are permitted within the + tokenization process. + :vartype allowed_special_tokens: list[str] + """ + + encoder_model_name: Optional[Union[str, "_models.SplitSkillEncoderModelName"]] = rest_field(name="encoderModelName") + """Only applies if the unit is set to azureOpenAITokens. Options include + 'R50k_base', 'P50k_base', 'P50k_edit' and 'CL100k_base'. The default value is + 'CL100k_base'. Known values are: \"r50k_base\", \"p50k_base\", \"p50k_edit\", and + \"cl100k_base\".""" + allowed_special_tokens: Optional[List[str]] = rest_field(name="allowedSpecialTokens") + """(Optional) Only applies if the unit is set to azureOpenAITokens. This parameter + defines a collection of special tokens that are permitted within the + tokenization process.""" + + @overload + def __init__( + self, + *, + encoder_model_name: Optional[Union[str, "_models.SplitSkillEncoderModelName"]] = None, + allowed_special_tokens: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class AzureOpenAIVectorizer(VectorSearchVectorizer, discriminator="azureOpenAI"): + """Specifies the Azure OpenAI resource used to vectorize a query string. + + + :ivar vectorizer_name: The name to associate with this particular vectorization method. + Required. + :vartype vectorizer_name: str + :ivar parameters: Contains the parameters specific to Azure OpenAI embedding vectorization. + :vartype parameters: ~azure.search.documents.models.AzureOpenAIVectorizerParameters + :ivar kind: The name of the kind of vectorization method being configured for use with + vector search. Required. Generate embeddings using an Azure OpenAI resource at query time. + :vartype kind: str or ~azure.search.documents.models.AZURE_OPEN_AI + """ + + parameters: Optional["_models.AzureOpenAIVectorizerParameters"] = rest_field(name="azureOpenAIParameters") + """Contains the parameters specific to Azure OpenAI embedding vectorization.""" + kind: Literal[VectorSearchVectorizerKind.AZURE_OPEN_AI] = rest_discriminator(name="kind") # type: ignore + """The name of the kind of vectorization method being configured for use with + vector search. Required. Generate embeddings using an Azure OpenAI resource at query time.""" + + @overload + def __init__( + self, + *, + vectorizer_name: str, + parameters: Optional["_models.AzureOpenAIVectorizerParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=VectorSearchVectorizerKind.AZURE_OPEN_AI, **kwargs) + + +class AzureOpenAIVectorizerParameters(_model_base.Model): + """Specifies the parameters for connecting to the Azure OpenAI resource. + + :ivar resource_url: The resource URI of the Azure OpenAI resource. + :vartype resource_url: str + :ivar deployment_name: ID of the Azure OpenAI model deployment on the designated resource. + :vartype deployment_name: str + :ivar api_key: API key of the designated Azure OpenAI resource. + :vartype api_key: str + :ivar auth_identity: The user-assigned managed identity used for outbound connections. + :vartype auth_identity: ~azure.search.documents.models.SearchIndexerDataIdentity + :ivar model_name: The name of the embedding model that is deployed at the provided deploymentId + path. Known values are: "text-embedding-ada-002", "text-embedding-3-large", and + "text-embedding-3-small". + :vartype model_name: str or ~azure.search.documents.models.AzureOpenAIModelName + """ + + resource_url: Optional[str] = rest_field(name="resourceUri") + """The resource URI of the Azure OpenAI resource.""" + deployment_name: Optional[str] = rest_field(name="deploymentId") + """ID of the Azure OpenAI model deployment on the designated resource.""" + api_key: Optional[str] = rest_field(name="apiKey") + """API key of the designated Azure OpenAI resource.""" + auth_identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field(name="authIdentity") + """The user-assigned managed identity used for outbound connections.""" + model_name: Optional[Union[str, "_models.AzureOpenAIModelName"]] = rest_field(name="modelName") + """The name of the embedding model that is deployed at the provided deploymentId + path. Known values are: \"text-embedding-ada-002\", \"text-embedding-3-large\", and + \"text-embedding-3-small\".""" + + @overload + def __init__( + self, + *, + resource_url: Optional[str] = None, + deployment_name: Optional[str] = None, + api_key: Optional[str] = None, + auth_identity: Optional["_models.SearchIndexerDataIdentity"] = None, + model_name: Optional[Union[str, "_models.AzureOpenAIModelName"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorSearchCompression(_model_base.Model): + """Contains configuration options specific to the compression method used during + indexing or querying. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + BinaryQuantizationCompression, ScalarQuantizationCompression + + + :ivar compression_name: The name to associate with this particular configuration. Required. + :vartype compression_name: str + :ivar rerank_with_original_vectors: If set to true, once the ordered set of results calculated + using compressed + vectors are obtained, they will be reranked again by recalculating the + full-precision similarity scores. This will improve recall at the expense of + latency. + :vartype rerank_with_original_vectors: bool + :ivar default_oversampling: Default oversampling factor. Oversampling will internally request + more + documents (specified by this multiplier) in the initial search. This increases + the set of results that will be reranked using recomputed similarity scores + from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). + This parameter can only be set when rerankWithOriginalVectors is true. Higher + values improve recall at the expense of latency. + :vartype default_oversampling: float + :ivar rescoring_options: Contains the options for rescoring. + :vartype rescoring_options: ~azure.search.documents.models.RescoringOptions + :ivar truncation_dimension: The number of dimensions to truncate the vectors to. Truncating the + vectors + reduces the size of the vectors and the amount of data that needs to be + transferred during search. This can save storage cost and improve search + performance at the expense of recall. It should be only used for embeddings + trained with Matryoshka Representation Learning (MRL) such as OpenAI + text-embedding-3-large (small). The default value is null, which means no + truncation. + :vartype truncation_dimension: int + :ivar kind: Type of VectorSearchCompression. Required. Known values are: "scalarQuantization" + and "binaryQuantization". + :vartype kind: str or ~azure.search.documents.models.VectorSearchCompressionKind + """ + + __mapping__: Dict[str, _model_base.Model] = {} + compression_name: str = rest_field(name="name") + """The name to associate with this particular configuration. Required.""" + rerank_with_original_vectors: Optional[bool] = rest_field(name="rerankWithOriginalVectors") + """If set to true, once the ordered set of results calculated using compressed + vectors are obtained, they will be reranked again by recalculating the + full-precision similarity scores. This will improve recall at the expense of + latency.""" + default_oversampling: Optional[float] = rest_field(name="defaultOversampling") + """Default oversampling factor. Oversampling will internally request more + documents (specified by this multiplier) in the initial search. This increases + the set of results that will be reranked using recomputed similarity scores + from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). + This parameter can only be set when rerankWithOriginalVectors is true. Higher + values improve recall at the expense of latency.""" + rescoring_options: Optional["_models.RescoringOptions"] = rest_field(name="rescoringOptions") + """Contains the options for rescoring.""" + truncation_dimension: Optional[int] = rest_field(name="truncationDimension") + """The number of dimensions to truncate the vectors to. Truncating the vectors + reduces the size of the vectors and the amount of data that needs to be + transferred during search. This can save storage cost and improve search + performance at the expense of recall. It should be only used for embeddings + trained with Matryoshka Representation Learning (MRL) such as OpenAI + text-embedding-3-large (small). The default value is null, which means no + truncation.""" + kind: str = rest_discriminator(name="kind") + """Type of VectorSearchCompression. Required. Known values are: \"scalarQuantization\" and + \"binaryQuantization\".""" + + @overload + def __init__( + self, + *, + compression_name: str, + kind: str, + rerank_with_original_vectors: Optional[bool] = None, + default_oversampling: Optional[float] = None, + rescoring_options: Optional["_models.RescoringOptions"] = None, + truncation_dimension: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BinaryQuantizationCompression(VectorSearchCompression, discriminator="binaryQuantization"): + """Contains configuration options specific to the binary quantization compression + method used during indexing and querying. + + + :ivar compression_name: The name to associate with this particular configuration. Required. + :vartype compression_name: str + :ivar rerank_with_original_vectors: If set to true, once the ordered set of results calculated + using compressed + vectors are obtained, they will be reranked again by recalculating the + full-precision similarity scores. This will improve recall at the expense of + latency. + :vartype rerank_with_original_vectors: bool + :ivar default_oversampling: Default oversampling factor. Oversampling will internally request + more + documents (specified by this multiplier) in the initial search. This increases + the set of results that will be reranked using recomputed similarity scores + from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). + This parameter can only be set when rerankWithOriginalVectors is true. Higher + values improve recall at the expense of latency. + :vartype default_oversampling: float + :ivar rescoring_options: Contains the options for rescoring. + :vartype rescoring_options: ~azure.search.documents.models.RescoringOptions + :ivar truncation_dimension: The number of dimensions to truncate the vectors to. Truncating the + vectors + reduces the size of the vectors and the amount of data that needs to be + transferred during search. This can save storage cost and improve search + performance at the expense of recall. It should be only used for embeddings + trained with Matryoshka Representation Learning (MRL) such as OpenAI + text-embedding-3-large (small). The default value is null, which means no + truncation. + :vartype truncation_dimension: int + :ivar kind: The name of the kind of compression method being configured for use with vector + search. Required. Binary Quantization, a type of compression method. In binary quantization, + the + original vectors values are compressed to the narrower binary type by + discretizing and representing each component of a vector using binary values, + thereby reducing the overall data size. + :vartype kind: str or ~azure.search.documents.models.BINARY_QUANTIZATION + """ + + kind: Literal[VectorSearchCompressionKind.BINARY_QUANTIZATION] = rest_discriminator(name="kind") # type: ignore + """The name of the kind of compression method being configured for use with vector + search. Required. Binary Quantization, a type of compression method. In binary quantization, + the + original vectors values are compressed to the narrower binary type by + discretizing and representing each component of a vector using binary values, + thereby reducing the overall data size.""" + + @overload + def __init__( + self, + *, + compression_name: str, + rerank_with_original_vectors: Optional[bool] = None, + default_oversampling: Optional[float] = None, + rescoring_options: Optional["_models.RescoringOptions"] = None, + truncation_dimension: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=VectorSearchCompressionKind.BINARY_QUANTIZATION, **kwargs) + + +class SimilarityAlgorithm(_model_base.Model): + """Base type for similarity algorithms. Similarity algorithms are used to + calculate scores that tie queries to documents. The higher the score, the more + relevant the document is to that specific query. Those scores are used to rank + the search results. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + BM25SimilarityAlgorithm, ClassicSimilarityAlgorithm + + + :ivar odata_type: The discriminator for derived types. Required. Default value is None. + :vartype odata_type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + odata_type: str = rest_discriminator(name="@odata.type") + """The discriminator for derived types. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + odata_type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class BM25SimilarityAlgorithm(SimilarityAlgorithm, discriminator="#Microsoft.Azure.Search.BM25Similarity"): + """Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a + TF-IDF-like algorithm that includes length normalization (controlled by the 'b' + parameter) as well as term frequency saturation (controlled by the 'k1' + parameter). + + + :ivar k1: This property controls the scaling function between the term frequency of each + matching terms and the final relevance score of a document-query pair. By + default, a value of 1.2 is used. A value of 0.0 means the score does not scale + with an increase in term frequency. + :vartype k1: float + :ivar b: This property controls how the length of a document affects the relevance + score. By default, a value of 0.75 is used. A value of 0.0 means no length + normalization is applied, while a value of 1.0 means the score is fully + normalized by the length of the document. + :vartype b: float + :ivar odata_type: Required. Default value is "#Microsoft.Azure.Search.BM25Similarity". + :vartype odata_type: str + """ + + k1: Optional[float] = rest_field() + """This property controls the scaling function between the term frequency of each + matching terms and the final relevance score of a document-query pair. By + default, a value of 1.2 is used. A value of 0.0 means the score does not scale + with an increase in term frequency.""" + b: Optional[float] = rest_field() + """This property controls how the length of a document affects the relevance + score. By default, a value of 0.75 is used. A value of 0.0 means no length + normalization is applied, while a value of 1.0 means the score is fully + normalized by the length of the document.""" + odata_type: Literal["#Microsoft.Azure.Search.BM25Similarity"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """Required. Default value is \"#Microsoft.Azure.Search.BM25Similarity\".""" + + @overload + def __init__( + self, + *, + k1: Optional[float] = None, + b: Optional[float] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.BM25Similarity", **kwargs) + + +class CharFilter(_model_base.Model): + """Base type for character filters. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + MappingCharFilter, PatternReplaceCharFilter + + + :ivar odata_type: The discriminator for derived types. Required. Default value is None. + :vartype odata_type: str + :ivar name: The name of the char filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + odata_type: str = rest_discriminator(name="@odata.type") + """The discriminator for derived types. Required. Default value is None.""" + name: str = rest_field() + """The name of the char filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required.""" + + @overload + def __init__( + self, + *, + odata_type: str, + name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class CjkBigramTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.CjkBigramTokenFilter"): + """Forms bigrams of CJK terms that are generated from the standard tokenizer. This + token filter is implemented using Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar ignore_scripts: The scripts to ignore. + :vartype ignore_scripts: list[str or + ~azure.search.documents.models.CjkBigramTokenFilterScripts] + :ivar output_unigrams: A value indicating whether to output both unigrams and bigrams (if + true), or + just bigrams (if false). Default is false. + :vartype output_unigrams: bool + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.CjkBigramTokenFilter". + :vartype odata_type: str + """ + + ignore_scripts: Optional[List[Union[str, "_models.CjkBigramTokenFilterScripts"]]] = rest_field(name="ignoreScripts") + """The scripts to ignore.""" + output_unigrams: Optional[bool] = rest_field(name="outputUnigrams") + """A value indicating whether to output both unigrams and bigrams (if true), or + just bigrams (if false). Default is false.""" + odata_type: Literal["#Microsoft.Azure.Search.CjkBigramTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.CjkBigramTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + ignore_scripts: Optional[List[Union[str, "_models.CjkBigramTokenFilterScripts"]]] = None, + output_unigrams: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.CjkBigramTokenFilter", **kwargs) + + +class ClassicSimilarityAlgorithm(SimilarityAlgorithm, discriminator="#Microsoft.Azure.Search.ClassicSimilarity"): + """Legacy similarity algorithm which uses the Lucene TFIDFSimilarity + implementation of TF-IDF. This variation of TF-IDF introduces static document + length normalization as well as coordinating factors that penalize documents + that only partially match the searched queries. + + + :ivar odata_type: Required. Default value is "#Microsoft.Azure.Search.ClassicSimilarity". + :vartype odata_type: str + """ + + odata_type: Literal["#Microsoft.Azure.Search.ClassicSimilarity"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """Required. Default value is \"#Microsoft.Azure.Search.ClassicSimilarity\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.ClassicSimilarity", **kwargs) + + +class LexicalTokenizer(_model_base.Model): + """Base type for tokenizers. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + ClassicTokenizer, EdgeNGramTokenizer, KeywordTokenizer, KeywordTokenizerV2, + MicrosoftLanguageStemmingTokenizer, MicrosoftLanguageTokenizer, NGramTokenizer, + PathHierarchyTokenizerV2, PatternTokenizer, LuceneStandardTokenizer, LuceneStandardTokenizerV2, + UaxUrlEmailTokenizer + + + :ivar odata_type: The discriminator for derived types. Required. Default value is None. + :vartype odata_type: str + :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required. + :vartype name: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + odata_type: str = rest_discriminator(name="@odata.type") + """The discriminator for derived types. Required. Default value is None.""" + name: str = rest_field() + """The name of the tokenizer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required.""" + + @overload + def __init__( + self, + *, + odata_type: str, + name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ClassicTokenizer(LexicalTokenizer, discriminator="#Microsoft.Azure.Search.ClassicTokenizer"): + """Grammar-based tokenizer that is suitable for processing most European-language + documents. This tokenizer is implemented using Apache Lucene. + + + :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required. + :vartype name: str + :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length + are split. The maximum token length that can be used is 300 characters. + :vartype max_token_length: int + :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. Default value is + "#Microsoft.Azure.Search.ClassicTokenizer". + :vartype odata_type: str + """ + + max_token_length: Optional[int] = rest_field(name="maxTokenLength") + """The maximum token length. Default is 255. Tokens longer than the maximum length + are split. The maximum token length that can be used is 300 characters.""" + odata_type: Literal["#Microsoft.Azure.Search.ClassicTokenizer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of tokenizer. Required. Default value is + \"#Microsoft.Azure.Search.ClassicTokenizer\".""" + + @overload + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.ClassicTokenizer", **kwargs) + + +class CognitiveServicesAccountKey( + CognitiveServicesAccount, discriminator="#Microsoft.Azure.Search.CognitiveServicesByKey" +): + """The multi-region account key of an Azure AI service resource that's attached to + a skillset. + + + :ivar description: Description of the Azure AI service resource attached to a skillset. + :vartype description: str + :ivar key: The key used to provision the Azure AI service resource attached to a skillset. + Required. + :vartype key: str + :ivar odata_type: A URI fragment specifying the type of Azure AI service resource attached to a + skillset. Required. Default value is "#Microsoft.Azure.Search.CognitiveServicesByKey". + :vartype odata_type: str + """ + + key: str = rest_field() + """The key used to provision the Azure AI service resource attached to a skillset. Required.""" + odata_type: Literal["#Microsoft.Azure.Search.CognitiveServicesByKey"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of Azure AI service resource attached to a + skillset. Required. Default value is \"#Microsoft.Azure.Search.CognitiveServicesByKey\".""" + + @overload + def __init__( + self, + *, + key: str, + description: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.CognitiveServicesByKey", **kwargs) + + +class CommonGramTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.CommonGramTokenFilter"): + """Construct bigrams for frequently occurring terms while indexing. Single terms + are still indexed too, with bigrams overlaid. This token filter is implemented + using Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar common_words: The set of common words. Required. + :vartype common_words: list[str] + :ivar ignore_case: A value indicating whether common words matching will be case insensitive. + Default is false. + :vartype ignore_case: bool + :ivar use_query_mode: A value that indicates whether the token filter is in query mode. When in + query + mode, the token filter generates bigrams and then removes common words and + single terms followed by a common word. Default is false. + :vartype use_query_mode: bool + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.CommonGramTokenFilter". + :vartype odata_type: str + """ + + common_words: List[str] = rest_field(name="commonWords") + """The set of common words. Required.""" + ignore_case: Optional[bool] = rest_field(name="ignoreCase") + """A value indicating whether common words matching will be case insensitive. + Default is false.""" + use_query_mode: Optional[bool] = rest_field(name="queryMode") + """A value that indicates whether the token filter is in query mode. When in query + mode, the token filter generates bigrams and then removes common words and + single terms followed by a common word. Default is false.""" + odata_type: Literal["#Microsoft.Azure.Search.CommonGramTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.CommonGramTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + common_words: List[str], + ignore_case: Optional[bool] = None, + use_query_mode: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.CommonGramTokenFilter", **kwargs) + + +class ConditionalSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Util.ConditionalSkill"): + """A skill that enables scenarios that require a Boolean operation to determine + the data to assign to an output. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Util.ConditionalSkill". + :vartype odata_type: str + """ + + odata_type: Literal["#Microsoft.Skills.Util.ConditionalSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Util.ConditionalSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Util.ConditionalSkill", **kwargs) + + +class CorsOptions(_model_base.Model): + """Defines options to control Cross-Origin Resource Sharing (CORS) for an index. + + + :ivar allowed_origins: The list of origins from which JavaScript code will be granted access to + your + index. Can contain a list of hosts of the form + {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow + all origins (not recommended). Required. + :vartype allowed_origins: list[str] + :ivar max_age_in_seconds: The duration for which browsers should cache CORS preflight + responses. Defaults + to 5 minutes. + :vartype max_age_in_seconds: int + """ + + allowed_origins: List[str] = rest_field(name="allowedOrigins") + """The list of origins from which JavaScript code will be granted access to your + index. Can contain a list of hosts of the form + {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow + all origins (not recommended). Required.""" + max_age_in_seconds: Optional[int] = rest_field(name="maxAgeInSeconds") + """The duration for which browsers should cache CORS preflight responses. Defaults + to 5 minutes.""" + + @overload + def __init__( + self, + *, + allowed_origins: List[str], + max_age_in_seconds: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class LexicalAnalyzer(_model_base.Model): + """Base type for analyzers. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + CustomAnalyzer, PatternAnalyzer, LuceneStandardAnalyzer, StopAnalyzer + + + :ivar odata_type: The discriminator for derived types. Required. Default value is None. + :vartype odata_type: str + :ivar name: The name of the analyzer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required. + :vartype name: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + odata_type: str = rest_discriminator(name="@odata.type") + """The discriminator for derived types. Required. Default value is None.""" + name: str = rest_field() + """The name of the analyzer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required.""" + + @overload + def __init__( + self, + *, + odata_type: str, + name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class CustomAnalyzer(LexicalAnalyzer, discriminator="#Microsoft.Azure.Search.CustomAnalyzer"): + """Allows you to take control over the process of converting text into + indexable/searchable tokens. It's a user-defined configuration consisting of a + single predefined tokenizer and one or more filters. The tokenizer is + responsible for breaking text into tokens, and the filters for modifying tokens + emitted by the tokenizer. + + + :ivar name: The name of the analyzer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required. + :vartype name: str + :ivar tokenizer: The name of the tokenizer to use to divide continuous text into a sequence of + tokens, such as breaking a sentence into words. Required. Known values are: "classic", + "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer", + "microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern", + "standard_v2", "uax_url_email", and "whitespace". + :vartype tokenizer: str or ~azure.search.documents.models.LexicalTokenizerName + :ivar token_filters: A list of token filters used to filter out or modify the tokens generated + by a + tokenizer. For example, you can specify a lowercase filter that converts all + characters to lowercase. The filters are run in the order in which they are + listed. + :vartype token_filters: list[str or ~azure.search.documents.models.TokenFilterName] + :ivar char_filters: A list of character filters used to prepare input text before it is + processed + by the tokenizer. For instance, they can replace certain characters or symbols. + The filters are run in the order in which they are listed. + :vartype char_filters: list[str or ~azure.search.documents.models.CharFilterName] + :ivar odata_type: A URI fragment specifying the type of analyzer. Required. Default value is + "#Microsoft.Azure.Search.CustomAnalyzer". + :vartype odata_type: str + """ + + tokenizer: Union[str, "_models.LexicalTokenizerName"] = rest_field() + """The name of the tokenizer to use to divide continuous text into a sequence of + tokens, such as breaking a sentence into words. Required. Known values are: \"classic\", + \"edgeNGram\", \"keyword_v2\", \"letter\", \"lowercase\", \"microsoft_language_tokenizer\", + \"microsoft_language_stemming_tokenizer\", \"nGram\", \"path_hierarchy_v2\", \"pattern\", + \"standard_v2\", \"uax_url_email\", and \"whitespace\".""" + token_filters: Optional[List[Union[str, "_models.TokenFilterName"]]] = rest_field(name="tokenFilters") + """A list of token filters used to filter out or modify the tokens generated by a + tokenizer. For example, you can specify a lowercase filter that converts all + characters to lowercase. The filters are run in the order in which they are + listed.""" + char_filters: Optional[List[Union[str, "_models.CharFilterName"]]] = rest_field(name="charFilters") + """A list of character filters used to prepare input text before it is processed + by the tokenizer. For instance, they can replace certain characters or symbols. + The filters are run in the order in which they are listed.""" + odata_type: Literal["#Microsoft.Azure.Search.CustomAnalyzer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of analyzer. Required. Default value is + \"#Microsoft.Azure.Search.CustomAnalyzer\".""" + + @overload + def __init__( + self, + *, + name: str, + tokenizer: Union[str, "_models.LexicalTokenizerName"], + token_filters: Optional[List[Union[str, "_models.TokenFilterName"]]] = None, + char_filters: Optional[List[Union[str, "_models.CharFilterName"]]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.CustomAnalyzer", **kwargs) + + +class CustomEntity(_model_base.Model): + """An object that contains information about the matches that were found, and + related metadata. + + + :ivar name: The top-level entity descriptor. Matches in the skill output will be grouped by + this name, and it should represent the "normalized" form of the text being + found. Required. + :vartype name: str + :ivar description: This field can be used as a passthrough for custom metadata about the + matched + text(s). The value of this field will appear with every match of its entity in + the skill output. + :vartype description: str + :ivar type: This field can be used as a passthrough for custom metadata about the matched + text(s). The value of this field will appear with every match of its entity in + the skill output. + :vartype type: str + :ivar subtype: This field can be used as a passthrough for custom metadata about the matched + text(s). The value of this field will appear with every match of its entity in + the skill output. + :vartype subtype: str + :ivar id: This field can be used as a passthrough for custom metadata about the matched + text(s). The value of this field will appear with every match of its entity in + the skill output. + :vartype id: str + :ivar case_sensitive: Defaults to false. Boolean value denoting whether comparisons with the + entity + name should be sensitive to character casing. Sample case insensitive matches + of "Microsoft" could be: microsoft, microSoft, MICROSOFT. + :vartype case_sensitive: bool + :ivar accent_sensitive: Defaults to false. Boolean value denoting whether comparisons with the + entity + name should be sensitive to accent. + :vartype accent_sensitive: bool + :ivar fuzzy_edit_distance: Defaults to 0. Maximum value of 5. Denotes the acceptable number of + divergent + characters that would still constitute a match with the entity name. The + smallest possible fuzziness for any given match is returned. For instance, if + the edit distance is set to 3, "Windows10" would still match "Windows", + "Windows10" and "Windows 7". When case sensitivity is set to false, case + differences do NOT count towards fuzziness tolerance, but otherwise do. + :vartype fuzzy_edit_distance: int + :ivar default_case_sensitive: Changes the default case sensitivity value for this entity. It be + used to + change the default value of all aliases caseSensitive values. + :vartype default_case_sensitive: bool + :ivar default_accent_sensitive: Changes the default accent sensitivity value for this entity. + It be used to + change the default value of all aliases accentSensitive values. + :vartype default_accent_sensitive: bool + :ivar default_fuzzy_edit_distance: Changes the default fuzzy edit distance value for this + entity. It can be used + to change the default value of all aliases fuzzyEditDistance values. + :vartype default_fuzzy_edit_distance: int + :ivar aliases: An array of complex objects that can be used to specify alternative spellings + or synonyms to the root entity name. + :vartype aliases: list[~azure.search.documents.models.CustomEntityAlias] + """ + + name: str = rest_field() + """The top-level entity descriptor. Matches in the skill output will be grouped by + this name, and it should represent the \"normalized\" form of the text being + found. Required.""" + description: Optional[str] = rest_field() + """This field can be used as a passthrough for custom metadata about the matched + text(s). The value of this field will appear with every match of its entity in + the skill output.""" + type: Optional[str] = rest_field() + """This field can be used as a passthrough for custom metadata about the matched + text(s). The value of this field will appear with every match of its entity in + the skill output.""" + subtype: Optional[str] = rest_field() + """This field can be used as a passthrough for custom metadata about the matched + text(s). The value of this field will appear with every match of its entity in + the skill output.""" + id: Optional[str] = rest_field() + """This field can be used as a passthrough for custom metadata about the matched + text(s). The value of this field will appear with every match of its entity in + the skill output.""" + case_sensitive: Optional[bool] = rest_field(name="caseSensitive") + """Defaults to false. Boolean value denoting whether comparisons with the entity + name should be sensitive to character casing. Sample case insensitive matches + of \"Microsoft\" could be: microsoft, microSoft, MICROSOFT.""" + accent_sensitive: Optional[bool] = rest_field(name="accentSensitive") + """Defaults to false. Boolean value denoting whether comparisons with the entity + name should be sensitive to accent.""" + fuzzy_edit_distance: Optional[int] = rest_field(name="fuzzyEditDistance") + """Defaults to 0. Maximum value of 5. Denotes the acceptable number of divergent + characters that would still constitute a match with the entity name. The + smallest possible fuzziness for any given match is returned. For instance, if + the edit distance is set to 3, \"Windows10\" would still match \"Windows\", + \"Windows10\" and \"Windows 7\". When case sensitivity is set to false, case + differences do NOT count towards fuzziness tolerance, but otherwise do.""" + default_case_sensitive: Optional[bool] = rest_field(name="defaultCaseSensitive") + """Changes the default case sensitivity value for this entity. It be used to + change the default value of all aliases caseSensitive values.""" + default_accent_sensitive: Optional[bool] = rest_field(name="defaultAccentSensitive") + """Changes the default accent sensitivity value for this entity. It be used to + change the default value of all aliases accentSensitive values.""" + default_fuzzy_edit_distance: Optional[int] = rest_field(name="defaultFuzzyEditDistance") + """Changes the default fuzzy edit distance value for this entity. It can be used + to change the default value of all aliases fuzzyEditDistance values.""" + aliases: Optional[List["_models.CustomEntityAlias"]] = rest_field() + """An array of complex objects that can be used to specify alternative spellings + or synonyms to the root entity name.""" + + @overload + def __init__( + self, + *, + name: str, + description: Optional[str] = None, + type: Optional[str] = None, + subtype: Optional[str] = None, + id: Optional[str] = None, # pylint: disable=redefined-builtin + case_sensitive: Optional[bool] = None, + accent_sensitive: Optional[bool] = None, + fuzzy_edit_distance: Optional[int] = None, + default_case_sensitive: Optional[bool] = None, + default_accent_sensitive: Optional[bool] = None, + default_fuzzy_edit_distance: Optional[int] = None, + aliases: Optional[List["_models.CustomEntityAlias"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class CustomEntityAlias(_model_base.Model): + """A complex object that can be used to specify alternative spellings or synonyms + to the root entity name. + + + :ivar text: The text of the alias. Required. + :vartype text: str + :ivar case_sensitive: Determine if the alias is case sensitive. + :vartype case_sensitive: bool + :ivar accent_sensitive: Determine if the alias is accent sensitive. + :vartype accent_sensitive: bool + :ivar fuzzy_edit_distance: Determine the fuzzy edit distance of the alias. + :vartype fuzzy_edit_distance: int + """ + + text: str = rest_field() + """The text of the alias. Required.""" + case_sensitive: Optional[bool] = rest_field(name="caseSensitive") + """Determine if the alias is case sensitive.""" + accent_sensitive: Optional[bool] = rest_field(name="accentSensitive") + """Determine if the alias is accent sensitive.""" + fuzzy_edit_distance: Optional[int] = rest_field(name="fuzzyEditDistance") + """Determine the fuzzy edit distance of the alias.""" + + @overload + def __init__( + self, + *, + text: str, + case_sensitive: Optional[bool] = None, + accent_sensitive: Optional[bool] = None, + fuzzy_edit_distance: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class CustomEntityLookupSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.CustomEntityLookupSkill"): + """A skill looks for text from a custom, user-defined list of words and phrases. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is ``en``. + Known values are: "da", "de", "en", "es", "fi", "fr", "it", "ko", and "pt". + :vartype default_language_code: str or + ~azure.search.documents.models.CustomEntityLookupSkillLanguage + :ivar entities_definition_uri: Path to a JSON or CSV file containing all the target text to + match against. + This entity definition is read at the beginning of an indexer run. Any updates + to this file during an indexer run will not take effect until subsequent runs. + This config must be accessible over HTTPS. + :vartype entities_definition_uri: str + :ivar inline_entities_definition: The inline CustomEntity definition. + :vartype inline_entities_definition: list[~azure.search.documents.models.CustomEntity] + :ivar global_default_case_sensitive: A global flag for CaseSensitive. If CaseSensitive is not + set in CustomEntity, + this value will be the default value. + :vartype global_default_case_sensitive: bool + :ivar global_default_accent_sensitive: A global flag for AccentSensitive. If AccentSensitive is + not set in + CustomEntity, this value will be the default value. + :vartype global_default_accent_sensitive: bool + :ivar global_default_fuzzy_edit_distance: A global flag for FuzzyEditDistance. If + FuzzyEditDistance is not set in + CustomEntity, this value will be the default value. + :vartype global_default_fuzzy_edit_distance: int + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Text.CustomEntityLookupSkill". + :vartype odata_type: str + """ + + default_language_code: Optional[Union[str, "_models.CustomEntityLookupSkillLanguage"]] = rest_field( + name="defaultLanguageCode" + ) + """A value indicating which language code to use. Default is ``en``. Known values are: \"da\", + \"de\", \"en\", \"es\", \"fi\", \"fr\", \"it\", \"ko\", and \"pt\".""" + entities_definition_uri: Optional[str] = rest_field(name="entitiesDefinitionUri") + """Path to a JSON or CSV file containing all the target text to match against. + This entity definition is read at the beginning of an indexer run. Any updates + to this file during an indexer run will not take effect until subsequent runs. + This config must be accessible over HTTPS.""" + inline_entities_definition: Optional[List["_models.CustomEntity"]] = rest_field(name="inlineEntitiesDefinition") + """The inline CustomEntity definition.""" + global_default_case_sensitive: Optional[bool] = rest_field(name="globalDefaultCaseSensitive") + """A global flag for CaseSensitive. If CaseSensitive is not set in CustomEntity, + this value will be the default value.""" + global_default_accent_sensitive: Optional[bool] = rest_field(name="globalDefaultAccentSensitive") + """A global flag for AccentSensitive. If AccentSensitive is not set in + CustomEntity, this value will be the default value.""" + global_default_fuzzy_edit_distance: Optional[int] = rest_field(name="globalDefaultFuzzyEditDistance") + """A global flag for FuzzyEditDistance. If FuzzyEditDistance is not set in + CustomEntity, this value will be the default value.""" + odata_type: Literal["#Microsoft.Skills.Text.CustomEntityLookupSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Text.CustomEntityLookupSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + default_language_code: Optional[Union[str, "_models.CustomEntityLookupSkillLanguage"]] = None, + entities_definition_uri: Optional[str] = None, + inline_entities_definition: Optional[List["_models.CustomEntity"]] = None, + global_default_case_sensitive: Optional[bool] = None, + global_default_accent_sensitive: Optional[bool] = None, + global_default_fuzzy_edit_distance: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Text.CustomEntityLookupSkill", **kwargs) + + +class LexicalNormalizer(_model_base.Model): + """Base type for normalizers. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + CustomNormalizer + + + :ivar odata_type: The discriminator for derived types. Required. Default value is None. + :vartype odata_type: str + :ivar name: The name of the char filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + odata_type: str = rest_discriminator(name="@odata.type") + """The discriminator for derived types. Required. Default value is None.""" + name: str = rest_field() + """The name of the char filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required.""" + + @overload + def __init__( + self, + *, + odata_type: str, + name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class CustomNormalizer(LexicalNormalizer, discriminator="#Microsoft.Azure.Search.CustomNormalizer"): + """Allows you to configure normalization for filterable, sortable, and facetable + fields, which by default operate with strict matching. This is a user-defined + configuration consisting of at least one or more filters, which modify the + token that is stored. + + + :ivar name: The name of the char filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar token_filters: A list of token filters used to filter out or modify the input token. For + example, you can specify a lowercase filter that converts all characters to + lowercase. The filters are run in the order in which they are listed. + :vartype token_filters: list[str or ~azure.search.documents.models.TokenFilterName] + :ivar char_filters: A list of character filters used to prepare input text before it is + processed. + For instance, they can replace certain characters or symbols. The filters are + run in the order in which they are listed. + :vartype char_filters: list[str or ~azure.search.documents.models.CharFilterName] + :ivar odata_type: A URI fragment specifying the type of normalizer. Required. Default value is + "#Microsoft.Azure.Search.CustomNormalizer". + :vartype odata_type: str + """ + + token_filters: Optional[List[Union[str, "_models.TokenFilterName"]]] = rest_field(name="tokenFilters") + """A list of token filters used to filter out or modify the input token. For + example, you can specify a lowercase filter that converts all characters to + lowercase. The filters are run in the order in which they are listed.""" + char_filters: Optional[List[Union[str, "_models.CharFilterName"]]] = rest_field(name="charFilters") + """A list of character filters used to prepare input text before it is processed. + For instance, they can replace certain characters or symbols. The filters are + run in the order in which they are listed.""" + odata_type: Literal["#Microsoft.Azure.Search.CustomNormalizer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of normalizer. Required. Default value is + \"#Microsoft.Azure.Search.CustomNormalizer\".""" + + @overload + def __init__( + self, + *, + name: str, + token_filters: Optional[List[Union[str, "_models.TokenFilterName"]]] = None, + char_filters: Optional[List[Union[str, "_models.CharFilterName"]]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.CustomNormalizer", **kwargs) + + +class DataChangeDetectionPolicy(_model_base.Model): + """Base type for data change detection policies. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + HighWaterMarkChangeDetectionPolicy, SqlIntegratedChangeTrackingPolicy + + + :ivar odata_type: The discriminator for derived types. Required. Default value is None. + :vartype odata_type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + odata_type: str = rest_discriminator(name="@odata.type") + """The discriminator for derived types. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + odata_type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class DataDeletionDetectionPolicy(_model_base.Model): + """Base type for data deletion detection policies. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + NativeBlobSoftDeleteDeletionDetectionPolicy, SoftDeleteColumnDeletionDetectionPolicy + + + :ivar odata_type: The discriminator for derived types. Required. Default value is None. + :vartype odata_type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + odata_type: str = rest_discriminator(name="@odata.type") + """The discriminator for derived types. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + odata_type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class DataSourceCredentials(_model_base.Model): + """Represents credentials that can be used to connect to a datasource. + + :ivar connection_string: The connection string for the datasource. Set to ```` (with + brackets) + if you don't want the connection string updated. Set to ```` if you + want to remove the connection string value from the datasource. + :vartype connection_string: str + """ + + connection_string: Optional[str] = rest_field(name="connectionString") + """The connection string for the datasource. Set to ```` (with brackets) + if you don't want the connection string updated. Set to ```` if you + want to remove the connection string value from the datasource.""" + + @overload + def __init__( + self, + *, + connection_string: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class DebugInfo(_model_base.Model): + """Contains debugging information that can be used to further explore your search + results. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar query_rewrites: Contains debugging information specific to query rewrites. + :vartype query_rewrites: ~azure.search.documents.models.QueryRewritesDebugInfo + """ + + query_rewrites: Optional["_models.QueryRewritesDebugInfo"] = rest_field(name="queryRewrites", visibility=["read"]) + """Contains debugging information specific to query rewrites.""" + + +class DefaultCognitiveServicesAccount( + CognitiveServicesAccount, discriminator="#Microsoft.Azure.Search.DefaultCognitiveServices" +): + """An empty object that represents the default Azure AI service resource for a + skillset. + + + :ivar description: Description of the Azure AI service resource attached to a skillset. + :vartype description: str + :ivar odata_type: A URI fragment specifying the type of Azure AI service resource attached to a + skillset. Required. Default value is "#Microsoft.Azure.Search.DefaultCognitiveServices". + :vartype odata_type: str + """ + + odata_type: Literal["#Microsoft.Azure.Search.DefaultCognitiveServices"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of Azure AI service resource attached to a + skillset. Required. Default value is \"#Microsoft.Azure.Search.DefaultCognitiveServices\".""" + + @overload + def __init__( + self, + *, + description: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.DefaultCognitiveServices", **kwargs) + + +class DictionaryDecompounderTokenFilter( + TokenFilter, discriminator="#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter" +): + """Decomposes compound words found in many Germanic languages. This token filter + is implemented using Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar word_list: The list of words to match against. Required. + :vartype word_list: list[str] + :ivar min_word_size: The minimum word size. Only words longer than this get processed. Default + is 5. + Maximum is 300. + :vartype min_word_size: int + :ivar min_subword_size: The minimum subword size. Only subwords longer than this are outputted. + Default + is 2. Maximum is 300. + :vartype min_subword_size: int + :ivar max_subword_size: The maximum subword size. Only subwords shorter than this are + outputted. + Default is 15. Maximum is 300. + :vartype max_subword_size: int + :ivar only_longest_match: A value indicating whether to add only the longest matching subword + to the + output. Default is false. + :vartype only_longest_match: bool + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter". + :vartype odata_type: str + """ + + word_list: List[str] = rest_field(name="wordList") + """The list of words to match against. Required.""" + min_word_size: Optional[int] = rest_field(name="minWordSize") + """The minimum word size. Only words longer than this get processed. Default is 5. + Maximum is 300.""" + min_subword_size: Optional[int] = rest_field(name="minSubwordSize") + """The minimum subword size. Only subwords longer than this are outputted. Default + is 2. Maximum is 300.""" + max_subword_size: Optional[int] = rest_field(name="maxSubwordSize") + """The maximum subword size. Only subwords shorter than this are outputted. + Default is 15. Maximum is 300.""" + only_longest_match: Optional[bool] = rest_field(name="onlyLongestMatch") + """A value indicating whether to add only the longest matching subword to the + output. Default is false.""" + odata_type: Literal["#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + word_list: List[str], + min_word_size: Optional[int] = None, + min_subword_size: Optional[int] = None, + max_subword_size: Optional[int] = None, + only_longest_match: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter", **kwargs) + + +class ScoringFunction(_model_base.Model): + """Base type for functions that can modify document scores during ranking. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + DistanceScoringFunction, FreshnessScoringFunction, MagnitudeScoringFunction, TagScoringFunction + + + :ivar field_name: The name of the field used as input to the scoring function. Required. + :vartype field_name: str + :ivar boost: A multiplier for the raw score. Must be a positive number not equal to 1.0. + Required. + :vartype boost: float + :ivar interpolation: A value indicating how boosting will be interpolated across document + scores; + defaults to "Linear". Known values are: "linear", "constant", "quadratic", and "logarithmic". + :vartype interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation + :ivar type: Type of ScoringFunction. Required. Default value is None. + :vartype type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + field_name: str = rest_field(name="fieldName") + """The name of the field used as input to the scoring function. Required.""" + boost: float = rest_field() + """A multiplier for the raw score. Must be a positive number not equal to 1.0. Required.""" + interpolation: Optional[Union[str, "_models.ScoringFunctionInterpolation"]] = rest_field() + """A value indicating how boosting will be interpolated across document scores; + defaults to \"Linear\". Known values are: \"linear\", \"constant\", \"quadratic\", and + \"logarithmic\".""" + type: str = rest_discriminator(name="type") + """Type of ScoringFunction. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + field_name: str, + boost: float, + type: str, + interpolation: Optional[Union[str, "_models.ScoringFunctionInterpolation"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class DistanceScoringFunction(ScoringFunction, discriminator="distance"): + """Defines a function that boosts scores based on distance from a geographic + location. + + + :ivar field_name: The name of the field used as input to the scoring function. Required. + :vartype field_name: str + :ivar boost: A multiplier for the raw score. Must be a positive number not equal to 1.0. + Required. + :vartype boost: float + :ivar interpolation: A value indicating how boosting will be interpolated across document + scores; + defaults to "Linear". Known values are: "linear", "constant", "quadratic", and "logarithmic". + :vartype interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation + :ivar parameters: Parameter values for the distance scoring function. Required. + :vartype parameters: ~azure.search.documents.models.DistanceScoringParameters + :ivar type: Indicates the type of function to use. Valid values include magnitude, + freshness, distance, and tag. The function type must be lower case. Required. Default value is + "distance". + :vartype type: str + """ + + parameters: "_models.DistanceScoringParameters" = rest_field(name="distance") + """Parameter values for the distance scoring function. Required.""" + type: Literal["distance"] = rest_discriminator(name="type") # type: ignore + """Indicates the type of function to use. Valid values include magnitude, + freshness, distance, and tag. The function type must be lower case. Required. Default value is + \"distance\".""" + + @overload + def __init__( + self, + *, + field_name: str, + boost: float, + parameters: "_models.DistanceScoringParameters", + interpolation: Optional[Union[str, "_models.ScoringFunctionInterpolation"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="distance", **kwargs) + + +class DistanceScoringParameters(_model_base.Model): + """Provides parameter values to a distance scoring function. + + + :ivar reference_point_parameter: The name of the parameter passed in search queries to specify + the reference + location. Required. + :vartype reference_point_parameter: str + :ivar boosting_distance: The distance in kilometers from the reference location where the + boosting range + ends. Required. + :vartype boosting_distance: float + """ + + reference_point_parameter: str = rest_field(name="referencePointParameter") + """The name of the parameter passed in search queries to specify the reference + location. Required.""" + boosting_distance: float = rest_field(name="boostingDistance") + """The distance in kilometers from the reference location where the boosting range + ends. Required.""" + + @overload + def __init__( + self, + *, + reference_point_parameter: str, + boosting_distance: float, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class DocumentDebugInfo(_model_base.Model): + """Contains debugging information that can be used to further explore your search + results. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar semantic: Contains debugging information specific to semantic ranking requests. + :vartype semantic: ~azure.search.documents.models.SemanticDebugInfo + :ivar vectors: Contains debugging information specific to vector and hybrid search. + :vartype vectors: ~azure.search.documents.models.VectorsDebugInfo + """ + + semantic: Optional["_models.SemanticDebugInfo"] = rest_field(visibility=["read"]) + """Contains debugging information specific to semantic ranking requests.""" + vectors: Optional["_models.VectorsDebugInfo"] = rest_field(visibility=["read"]) + """Contains debugging information specific to vector and hybrid search.""" + + +class DocumentExtractionSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Util.DocumentExtractionSkill"): + """A skill that extracts content from a file within the enrichment pipeline. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar parsing_mode: The parsingMode for the skill. Will be set to 'default' if not defined. + :vartype parsing_mode: str + :ivar data_to_extract: The type of data to be extracted for the skill. Will be set to + 'contentAndMetadata' if not defined. + :vartype data_to_extract: str + :ivar configuration: A dictionary of configurations for the skill. + :vartype configuration: dict[str, any] + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Util.DocumentExtractionSkill". + :vartype odata_type: str + """ + + parsing_mode: Optional[str] = rest_field(name="parsingMode") + """The parsingMode for the skill. Will be set to 'default' if not defined.""" + data_to_extract: Optional[str] = rest_field(name="dataToExtract") + """The type of data to be extracted for the skill. Will be set to + 'contentAndMetadata' if not defined.""" + configuration: Optional[Dict[str, Any]] = rest_field() + """A dictionary of configurations for the skill.""" + odata_type: Literal["#Microsoft.Skills.Util.DocumentExtractionSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Util.DocumentExtractionSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + parsing_mode: Optional[str] = None, + data_to_extract: Optional[str] = None, + configuration: Optional[Dict[str, Any]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Util.DocumentExtractionSkill", **kwargs) + + +class DocumentIntelligenceLayoutSkill( + SearchIndexerSkill, discriminator="#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill" +): + """A skill that extracts content and layout information (as markdown), via Azure + AI Services, from files within the enrichment pipeline. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar output_mode: Controls the cardinality of the output produced by the skill. Default is + 'oneToMany'. "oneToMany" + :vartype output_mode: str or + ~azure.search.documents.models.DocumentIntelligenceLayoutSkillOutputMode + :ivar markdown_header_depth: The depth of headers in the markdown output. Default is h6. Known + values are: "h1", "h2", "h3", "h4", "h5", and "h6". + :vartype markdown_header_depth: str or + ~azure.search.documents.models.DocumentIntelligenceLayoutSkillMarkdownHeaderDepth + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill". + :vartype odata_type: str + """ + + output_mode: Optional[Union[str, "_models.DocumentIntelligenceLayoutSkillOutputMode"]] = rest_field( + name="outputMode" + ) + """Controls the cardinality of the output produced by the skill. Default is + 'oneToMany'. \"oneToMany\"""" + markdown_header_depth: Optional[Union[str, "_models.DocumentIntelligenceLayoutSkillMarkdownHeaderDepth"]] = ( + rest_field(name="markdownHeaderDepth") + ) + """The depth of headers in the markdown output. Default is h6. Known values are: \"h1\", \"h2\", + \"h3\", \"h4\", \"h5\", and \"h6\".""" + odata_type: Literal["#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill"] = rest_discriminator(name="@odata.type") # type: ignore + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + output_mode: Optional[Union[str, "_models.DocumentIntelligenceLayoutSkillOutputMode"]] = None, + markdown_header_depth: Optional[ + Union[str, "_models.DocumentIntelligenceLayoutSkillMarkdownHeaderDepth"] + ] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill", **kwargs) + + +class DocumentKeysOrIds(_model_base.Model): + """The type of the keysOrIds. + + :ivar document_keys: document keys to be reset. + :vartype document_keys: list[str] + :ivar datasource_document_ids: datasource document identifiers to be reset. + :vartype datasource_document_ids: list[str] + """ + + document_keys: Optional[List[str]] = rest_field(name="documentKeys") + """document keys to be reset.""" + datasource_document_ids: Optional[List[str]] = rest_field(name="datasourceDocumentIds") + """datasource document identifiers to be reset.""" + + @overload + def __init__( + self, + *, + document_keys: Optional[List[str]] = None, + datasource_document_ids: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class EdgeNGramTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.EdgeNGramTokenFilter"): + """Generates n-grams of the given size(s) starting from the front or the back of + an input token. This token filter is implemented using Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar min_gram: The minimum n-gram length. Default is 1. Must be less than the value of + maxGram. + :vartype min_gram: int + :ivar max_gram: The maximum n-gram length. Default is 2. + :vartype max_gram: int + :ivar side: Specifies which side of the input the n-gram should be generated from. Default + is "front". Known values are: "front" and "back". + :vartype side: str or ~azure.search.documents.models.EdgeNGramTokenFilterSide + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.EdgeNGramTokenFilter". + :vartype odata_type: str + """ + + min_gram: Optional[int] = rest_field(name="minGram") + """The minimum n-gram length. Default is 1. Must be less than the value of maxGram.""" + max_gram: Optional[int] = rest_field(name="maxGram") + """The maximum n-gram length. Default is 2.""" + side: Optional[Union[str, "_models.EdgeNGramTokenFilterSide"]] = rest_field() + """Specifies which side of the input the n-gram should be generated from. Default + is \"front\". Known values are: \"front\" and \"back\".""" + odata_type: Literal["#Microsoft.Azure.Search.EdgeNGramTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.EdgeNGramTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + min_gram: Optional[int] = None, + max_gram: Optional[int] = None, + side: Optional[Union[str, "_models.EdgeNGramTokenFilterSide"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.EdgeNGramTokenFilter", **kwargs) + + +class EdgeNGramTokenFilterV2(TokenFilter, discriminator="#Microsoft.Azure.Search.EdgeNGramTokenFilterV2"): + """Generates n-grams of the given size(s) starting from the front or the back of + an input token. This token filter is implemented using Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :vartype min_gram: int + :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :vartype max_gram: int + :ivar side: Specifies which side of the input the n-gram should be generated from. Default + is "front". Known values are: "front" and "back". + :vartype side: str or ~azure.search.documents.models.EdgeNGramTokenFilterSide + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2". + :vartype odata_type: str + """ + + min_gram: Optional[int] = rest_field(name="minGram") + """The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram.""" + max_gram: Optional[int] = rest_field(name="maxGram") + """The maximum n-gram length. Default is 2. Maximum is 300.""" + side: Optional[Union[str, "_models.EdgeNGramTokenFilterSide"]] = rest_field() + """Specifies which side of the input the n-gram should be generated from. Default + is \"front\". Known values are: \"front\" and \"back\".""" + odata_type: Literal["#Microsoft.Azure.Search.EdgeNGramTokenFilterV2"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.EdgeNGramTokenFilterV2\".""" + + @overload + def __init__( + self, + *, + name: str, + min_gram: Optional[int] = None, + max_gram: Optional[int] = None, + side: Optional[Union[str, "_models.EdgeNGramTokenFilterSide"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.EdgeNGramTokenFilterV2", **kwargs) + + +class EdgeNGramTokenizer(LexicalTokenizer, discriminator="#Microsoft.Azure.Search.EdgeNGramTokenizer"): + """Tokenizes the input from an edge into n-grams of the given size(s). This + tokenizer is implemented using Apache Lucene. + + + :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required. + :vartype name: str + :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :vartype min_gram: int + :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :vartype max_gram: int + :ivar token_chars: Character classes to keep in the tokens. + :vartype token_chars: list[str or ~azure.search.documents.models.TokenCharacterKind] + :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. Default value is + "#Microsoft.Azure.Search.EdgeNGramTokenizer". + :vartype odata_type: str + """ + + min_gram: Optional[int] = rest_field(name="minGram") + """The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram.""" + max_gram: Optional[int] = rest_field(name="maxGram") + """The maximum n-gram length. Default is 2. Maximum is 300.""" + token_chars: Optional[List[Union[str, "_models.TokenCharacterKind"]]] = rest_field(name="tokenChars") + """Character classes to keep in the tokens.""" + odata_type: Literal["#Microsoft.Azure.Search.EdgeNGramTokenizer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of tokenizer. Required. Default value is + \"#Microsoft.Azure.Search.EdgeNGramTokenizer\".""" + + @overload + def __init__( + self, + *, + name: str, + min_gram: Optional[int] = None, + max_gram: Optional[int] = None, + token_chars: Optional[List[Union[str, "_models.TokenCharacterKind"]]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.EdgeNGramTokenizer", **kwargs) + + +class ElisionTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.ElisionTokenFilter"): + """Removes elisions. For example, "l'avion" (the plane) will be converted to + "avion" (plane). This token filter is implemented using Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar articles: The set of articles to remove. + :vartype articles: list[str] + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.ElisionTokenFilter". + :vartype odata_type: str + """ + + articles: Optional[List[str]] = rest_field() + """The set of articles to remove.""" + odata_type: Literal["#Microsoft.Azure.Search.ElisionTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.ElisionTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + articles: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.ElisionTokenFilter", **kwargs) + + +class EntityLinkingSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.V3.EntityLinkingSkill"): + """Using the Text Analytics API, extracts linked entities from text. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is ``en``. + :vartype default_language_code: str + :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose + confidence + score is greater than the value specified. If not set (default), or if + explicitly set to null, all entities will be included. + :vartype minimum_precision: float + :ivar model_version: The version of the model to use when calling the Text Analytics service. + It + will default to the latest available when not specified. We recommend you do + not specify this value unless absolutely necessary. + :vartype model_version: str + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Text.V3.EntityLinkingSkill". + :vartype odata_type: str + """ + + default_language_code: Optional[str] = rest_field(name="defaultLanguageCode") + """A value indicating which language code to use. Default is ``en``.""" + minimum_precision: Optional[float] = rest_field(name="minimumPrecision") + """A value between 0 and 1 that be used to only include entities whose confidence + score is greater than the value specified. If not set (default), or if + explicitly set to null, all entities will be included.""" + model_version: Optional[str] = rest_field(name="modelVersion") + """The version of the model to use when calling the Text Analytics service. It + will default to the latest available when not specified. We recommend you do + not specify this value unless absolutely necessary.""" + odata_type: Literal["#Microsoft.Skills.Text.V3.EntityLinkingSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Text.V3.EntityLinkingSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + default_language_code: Optional[str] = None, + minimum_precision: Optional[float] = None, + model_version: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Text.V3.EntityLinkingSkill", **kwargs) + + +class EntityRecognitionSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.EntityRecognitionSkill"): + """This skill is deprecated. Use the V3.EntityRecognitionSkill instead. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar categories: A list of entity categories that should be extracted. + :vartype categories: list[str or ~azure.search.documents.models.EntityCategory] + :ivar default_language_code: A value indicating which language code to use. Default is ``en``. + Known values are: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de", "el", + "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", and "tr". + :vartype default_language_code: str or + ~azure.search.documents.models.EntityRecognitionSkillLanguage + :ivar include_typeless_entities: Determines whether or not to include entities which are well + known but don't + conform to a pre-defined type. If this configuration is not set (default), set + to null or set to false, entities which don't conform to one of the pre-defined + types will not be surfaced. + :vartype include_typeless_entities: bool + :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose + confidence + score is greater than the value specified. If not set (default), or if + explicitly set to null, all entities will be included. + :vartype minimum_precision: float + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Text.EntityRecognitionSkill". + :vartype odata_type: str + """ + + categories: Optional[List[Union[str, "_models.EntityCategory"]]] = rest_field() + """A list of entity categories that should be extracted.""" + default_language_code: Optional[Union[str, "_models.EntityRecognitionSkillLanguage"]] = rest_field( + name="defaultLanguageCode" + ) + """A value indicating which language code to use. Default is ``en``. Known values are: \"ar\", + \"cs\", \"zh-Hans\", \"zh-Hant\", \"da\", \"nl\", \"en\", \"fi\", \"fr\", \"de\", \"el\", + \"hu\", \"it\", \"ja\", \"ko\", \"no\", \"pl\", \"pt-PT\", \"pt-BR\", \"ru\", \"es\", \"sv\", + and \"tr\".""" + include_typeless_entities: Optional[bool] = rest_field(name="includeTypelessEntities") + """Determines whether or not to include entities which are well known but don't + conform to a pre-defined type. If this configuration is not set (default), set + to null or set to false, entities which don't conform to one of the pre-defined + types will not be surfaced.""" + minimum_precision: Optional[float] = rest_field(name="minimumPrecision") + """A value between 0 and 1 that be used to only include entities whose confidence + score is greater than the value specified. If not set (default), or if + explicitly set to null, all entities will be included.""" + odata_type: Literal["#Microsoft.Skills.Text.EntityRecognitionSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Text.EntityRecognitionSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + categories: Optional[List[Union[str, "_models.EntityCategory"]]] = None, + default_language_code: Optional[Union[str, "_models.EntityRecognitionSkillLanguage"]] = None, + include_typeless_entities: Optional[bool] = None, + minimum_precision: Optional[float] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Text.EntityRecognitionSkill", **kwargs) + + +class EntityRecognitionSkillV3(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.V3.EntityRecognitionSkill"): + """Using the Text Analytics API, extracts entities of different types from text. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar categories: A list of entity categories that should be extracted. + :vartype categories: list[str] + :ivar default_language_code: A value indicating which language code to use. Default is ``en``. + :vartype default_language_code: str + :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose + confidence + score is greater than the value specified. If not set (default), or if + explicitly set to null, all entities will be included. + :vartype minimum_precision: float + :ivar model_version: The version of the model to use when calling the Text Analytics API. It + will + default to the latest available when not specified. We recommend you do not + specify this value unless absolutely necessary. + :vartype model_version: str + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Text.V3.EntityRecognitionSkill". + :vartype odata_type: str + """ + + categories: Optional[List[str]] = rest_field() + """A list of entity categories that should be extracted.""" + default_language_code: Optional[str] = rest_field(name="defaultLanguageCode") + """A value indicating which language code to use. Default is ``en``.""" + minimum_precision: Optional[float] = rest_field(name="minimumPrecision") + """A value between 0 and 1 that be used to only include entities whose confidence + score is greater than the value specified. If not set (default), or if + explicitly set to null, all entities will be included.""" + model_version: Optional[str] = rest_field(name="modelVersion") + """The version of the model to use when calling the Text Analytics API. It will + default to the latest available when not specified. We recommend you do not + specify this value unless absolutely necessary.""" + odata_type: Literal["#Microsoft.Skills.Text.V3.EntityRecognitionSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Text.V3.EntityRecognitionSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + categories: Optional[List[str]] = None, + default_language_code: Optional[str] = None, + minimum_precision: Optional[float] = None, + model_version: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Text.V3.EntityRecognitionSkill", **kwargs) + + +class ErrorAdditionalInfo(_model_base.Model): + """The resource management error additional info. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar type: The additional info type. + :vartype type: str + :ivar info: The additional info. + :vartype info: dict[str, str] + """ + + type: Optional[str] = rest_field(visibility=["read"]) + """The additional info type.""" + info: Optional[Dict[str, str]] = rest_field(visibility=["read"]) + """The additional info.""" + + +class ErrorDetail(_model_base.Model): + """The error detail. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar code: The error code. + :vartype code: str + :ivar message: The error message. + :vartype message: str + :ivar target: The error target. + :vartype target: str + :ivar details: The error details. + :vartype details: list[~azure.search.documents.models.ErrorDetail] + :ivar additional_info: The error additional info. + :vartype additional_info: list[~azure.search.documents.models.ErrorAdditionalInfo] + """ + + code: Optional[str] = rest_field(visibility=["read"]) + """The error code.""" + message: Optional[str] = rest_field(visibility=["read"]) + """The error message.""" + target: Optional[str] = rest_field(visibility=["read"]) + """The error target.""" + details: Optional[List["_models.ErrorDetail"]] = rest_field(visibility=["read"]) + """The error details.""" + additional_info: Optional[List["_models.ErrorAdditionalInfo"]] = rest_field( + name="additionalInfo", visibility=["read"] + ) + """The error additional info.""" + + +class ErrorResponse(_model_base.Model): + """Common error response for all Azure Resource Manager APIs to return error + details for failed operations. (This also follows the OData error response + format.). + + :ivar error: The error object. + :vartype error: ~azure.search.documents.models.ErrorDetail + """ + + error: Optional["_models.ErrorDetail"] = rest_field() + """The error object.""" + + @overload + def __init__( + self, + *, + error: Optional["_models.ErrorDetail"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorSearchAlgorithmConfiguration(_model_base.Model): + """Contains configuration options specific to the algorithm used during indexing + or querying. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + ExhaustiveKnnAlgorithmConfiguration, HnswAlgorithmConfiguration + + + :ivar name: The name to associate with this particular configuration. Required. + :vartype name: str + :ivar kind: Type of VectorSearchAlgorithmConfiguration. Required. Known values are: "hnsw" and + "exhaustiveKnn". + :vartype kind: str or ~azure.search.documents.models.VectorSearchAlgorithmKind + """ + + __mapping__: Dict[str, _model_base.Model] = {} + name: str = rest_field() + """The name to associate with this particular configuration. Required.""" + kind: str = rest_discriminator(name="kind") + """Type of VectorSearchAlgorithmConfiguration. Required. Known values are: \"hnsw\" and + \"exhaustiveKnn\".""" + + @overload + def __init__( + self, + *, + name: str, + kind: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ExhaustiveKnnAlgorithmConfiguration(VectorSearchAlgorithmConfiguration, discriminator="exhaustiveKnn"): + """Contains configuration options specific to the exhaustive KNN algorithm used + during querying, which will perform brute-force search across the entire vector + index. + + + :ivar name: The name to associate with this particular configuration. Required. + :vartype name: str + :ivar parameters: Contains the parameters specific to exhaustive KNN algorithm. + :vartype parameters: ~azure.search.documents.models.ExhaustiveKnnParameters + :ivar kind: The name of the kind of algorithm being configured for use with vector search. + Required. Exhaustive KNN algorithm which will perform brute-force search. + :vartype kind: str or ~azure.search.documents.models.EXHAUSTIVE_KNN + """ + + parameters: Optional["_models.ExhaustiveKnnParameters"] = rest_field(name="exhaustiveKnnParameters") + """Contains the parameters specific to exhaustive KNN algorithm.""" + kind: Literal[VectorSearchAlgorithmKind.EXHAUSTIVE_KNN] = rest_discriminator(name="kind") # type: ignore + """The name of the kind of algorithm being configured for use with vector search. Required. + Exhaustive KNN algorithm which will perform brute-force search.""" + + @overload + def __init__( + self, + *, + name: str, + parameters: Optional["_models.ExhaustiveKnnParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=VectorSearchAlgorithmKind.EXHAUSTIVE_KNN, **kwargs) + + +class ExhaustiveKnnParameters(_model_base.Model): + """Contains the parameters specific to exhaustive KNN algorithm. + + :ivar metric: The similarity metric to use for vector comparisons. Known values are: "cosine", + "euclidean", "dotProduct", and "hamming". + :vartype metric: str or ~azure.search.documents.models.VectorSearchAlgorithmMetric + """ + + metric: Optional[Union[str, "_models.VectorSearchAlgorithmMetric"]] = rest_field() + """The similarity metric to use for vector comparisons. Known values are: \"cosine\", + \"euclidean\", \"dotProduct\", and \"hamming\".""" + + @overload + def __init__( + self, + *, + metric: Optional[Union[str, "_models.VectorSearchAlgorithmMetric"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FacetResult(_model_base.Model): + """A single bucket of a facet query result. Reports the number of documents with a + field value falling within a particular range or having a particular value or + interval. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar count: The approximate count of documents falling within the bucket described by this + facet. + :vartype count: int + :ivar facets: The nested facet query results for the search operation, organized as a + collection of buckets for each faceted field; null if the query did not contain + any nested facets. + :vartype facets: dict[str, list[~azure.search.documents.models.FacetResult]] + """ + + count: Optional[int] = rest_field(visibility=["read"]) + """The approximate count of documents falling within the bucket described by this + facet.""" + facets: Optional[Dict[str, List["_models.FacetResult"]]] = rest_field(name="@search.facets", visibility=["read"]) + """The nested facet query results for the search operation, organized as a + collection of buckets for each faceted field; null if the query did not contain + any nested facets.""" + + +class FieldMapping(_model_base.Model): + """Defines a mapping between a field in a data source and a target field in an + index. + + + :ivar source_field_name: The name of the field in the data source. Required. + :vartype source_field_name: str + :ivar target_field_name: The name of the target field in the index. Same as the source field + name by + default. + :vartype target_field_name: str + :ivar mapping_function: A function to apply to each source field value before indexing. + :vartype mapping_function: ~azure.search.documents.models.FieldMappingFunction + """ + + source_field_name: str = rest_field(name="sourceFieldName") + """The name of the field in the data source. Required.""" + target_field_name: Optional[str] = rest_field(name="targetFieldName") + """The name of the target field in the index. Same as the source field name by + default.""" + mapping_function: Optional["_models.FieldMappingFunction"] = rest_field(name="mappingFunction") + """A function to apply to each source field value before indexing.""" + + @overload + def __init__( + self, + *, + source_field_name: str, + target_field_name: Optional[str] = None, + mapping_function: Optional["_models.FieldMappingFunction"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FieldMappingFunction(_model_base.Model): + """Represents a function that transforms a value from a data source before + indexing. + + + :ivar name: The name of the field mapping function. Required. + :vartype name: str + :ivar parameters: A dictionary of parameter name/value pairs to pass to the function. Each + value + must be of a primitive type. + :vartype parameters: dict[str, any] + """ + + name: str = rest_field() + """The name of the field mapping function. Required.""" + parameters: Optional[Dict[str, Any]] = rest_field() + """A dictionary of parameter name/value pairs to pass to the function. Each value + must be of a primitive type.""" + + @overload + def __init__( + self, + *, + name: str, + parameters: Optional[Dict[str, Any]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class FreshnessScoringFunction(ScoringFunction, discriminator="freshness"): + """Defines a function that boosts scores based on the value of a date-time field. + + + :ivar field_name: The name of the field used as input to the scoring function. Required. + :vartype field_name: str + :ivar boost: A multiplier for the raw score. Must be a positive number not equal to 1.0. + Required. + :vartype boost: float + :ivar interpolation: A value indicating how boosting will be interpolated across document + scores; + defaults to "Linear". Known values are: "linear", "constant", "quadratic", and "logarithmic". + :vartype interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation + :ivar parameters: Parameter values for the freshness scoring function. Required. + :vartype parameters: ~azure.search.documents.models.FreshnessScoringParameters + :ivar type: Indicates the type of function to use. Valid values include magnitude, + freshness, distance, and tag. The function type must be lower case. Required. Default value is + "freshness". + :vartype type: str + """ + + parameters: "_models.FreshnessScoringParameters" = rest_field(name="freshness") + """Parameter values for the freshness scoring function. Required.""" + type: Literal["freshness"] = rest_discriminator(name="type") # type: ignore + """Indicates the type of function to use. Valid values include magnitude, + freshness, distance, and tag. The function type must be lower case. Required. Default value is + \"freshness\".""" + + @overload + def __init__( + self, + *, + field_name: str, + boost: float, + parameters: "_models.FreshnessScoringParameters", + interpolation: Optional[Union[str, "_models.ScoringFunctionInterpolation"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="freshness", **kwargs) + + +class FreshnessScoringParameters(_model_base.Model): + """Provides parameter values to a freshness scoring function. + + + :ivar boosting_duration: The expiration period after which boosting will stop for a particular + document. Required. + :vartype boosting_duration: ~datetime.timedelta + """ + + boosting_duration: datetime.timedelta = rest_field(name="boostingDuration") + """The expiration period after which boosting will stop for a particular document. Required.""" + + @overload + def __init__( + self, + *, + boosting_duration: datetime.timedelta, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class GetIndexStatisticsResult(_model_base.Model): + """Statistics for a given index. Statistics are collected periodically and are not + guaranteed to always be up-to-date. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar document_count: The number of documents in the index. Required. + :vartype document_count: int + :ivar storage_size: The amount of storage in bytes consumed by the index. Required. + :vartype storage_size: int + :ivar vector_index_size: The amount of memory in bytes consumed by vectors in the index. + Required. + :vartype vector_index_size: int + """ + + document_count: int = rest_field(name="documentCount", visibility=["read"]) + """The number of documents in the index. Required.""" + storage_size: int = rest_field(name="storageSize", visibility=["read"]) + """The amount of storage in bytes consumed by the index. Required.""" + vector_index_size: int = rest_field(name="vectorIndexSize", visibility=["read"]) + """The amount of memory in bytes consumed by vectors in the index. Required.""" + + +class HighWaterMarkChangeDetectionPolicy( + DataChangeDetectionPolicy, discriminator="#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy" +): + """Defines a data change detection policy that captures changes based on the value + of a high water mark column. + + + :ivar high_water_mark_column_name: The name of the high water mark column. Required. + :vartype high_water_mark_column_name: str + :ivar odata_type: A URI fragment specifying the type of data change detection policy. Required. + Default value is "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy". + :vartype odata_type: str + """ + + high_water_mark_column_name: str = rest_field(name="highWaterMarkColumnName") + """The name of the high water mark column. Required.""" + odata_type: Literal["#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy"] = rest_discriminator(name="@odata.type") # type: ignore + """A URI fragment specifying the type of data change detection policy. Required. Default value is + \"#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy\".""" + + @overload + def __init__( + self, + *, + high_water_mark_column_name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy", **kwargs) + + +class HnswAlgorithmConfiguration(VectorSearchAlgorithmConfiguration, discriminator="hnsw"): + """Contains configuration options specific to the HNSW approximate nearest + neighbors algorithm used during indexing and querying. The HNSW algorithm + offers a tunable trade-off between search speed and accuracy. + + + :ivar name: The name to associate with this particular configuration. Required. + :vartype name: str + :ivar parameters: Contains the parameters specific to HNSW algorithm. + :vartype parameters: ~azure.search.documents.models.HnswParameters + :ivar kind: The name of the kind of algorithm being configured for use with vector search. + Required. HNSW (Hierarchical Navigable Small World), a type of approximate nearest + neighbors algorithm. + :vartype kind: str or ~azure.search.documents.models.HNSW + """ + + parameters: Optional["_models.HnswParameters"] = rest_field(name="hnswParameters") + """Contains the parameters specific to HNSW algorithm.""" + kind: Literal[VectorSearchAlgorithmKind.HNSW] = rest_discriminator(name="kind") # type: ignore + """The name of the kind of algorithm being configured for use with vector search. Required. HNSW + (Hierarchical Navigable Small World), a type of approximate nearest + neighbors algorithm.""" + + @overload + def __init__( + self, + *, + name: str, + parameters: Optional["_models.HnswParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=VectorSearchAlgorithmKind.HNSW, **kwargs) + + +class HnswParameters(_model_base.Model): + """Contains the parameters specific to the HNSW algorithm. + + :ivar m: The number of bi-directional links created for every new element during + construction. Increasing this parameter value may improve recall and reduce + retrieval times for datasets with high intrinsic dimensionality at the expense + of increased memory consumption and longer indexing time. + :vartype m: int + :ivar ef_construction: The size of the dynamic list containing the nearest neighbors, which is + used + during index time. Increasing this parameter may improve index quality, at the + expense of increased indexing time. At a certain point, increasing this + parameter leads to diminishing returns. + :vartype ef_construction: int + :ivar ef_search: The size of the dynamic list containing the nearest neighbors, which is used + during search time. Increasing this parameter may improve search results, at + the expense of slower search. At a certain point, increasing this parameter + leads to diminishing returns. + :vartype ef_search: int + :ivar metric: The similarity metric to use for vector comparisons. Known values are: "cosine", + "euclidean", "dotProduct", and "hamming". + :vartype metric: str or ~azure.search.documents.models.VectorSearchAlgorithmMetric + """ + + m: Optional[int] = rest_field() + """The number of bi-directional links created for every new element during + construction. Increasing this parameter value may improve recall and reduce + retrieval times for datasets with high intrinsic dimensionality at the expense + of increased memory consumption and longer indexing time.""" + ef_construction: Optional[int] = rest_field(name="efConstruction") + """The size of the dynamic list containing the nearest neighbors, which is used + during index time. Increasing this parameter may improve index quality, at the + expense of increased indexing time. At a certain point, increasing this + parameter leads to diminishing returns.""" + ef_search: Optional[int] = rest_field(name="efSearch") + """The size of the dynamic list containing the nearest neighbors, which is used + during search time. Increasing this parameter may improve search results, at + the expense of slower search. At a certain point, increasing this parameter + leads to diminishing returns.""" + metric: Optional[Union[str, "_models.VectorSearchAlgorithmMetric"]] = rest_field() + """The similarity metric to use for vector comparisons. Known values are: \"cosine\", + \"euclidean\", \"dotProduct\", and \"hamming\".""" + + @overload + def __init__( + self, + *, + m: Optional[int] = None, + ef_construction: Optional[int] = None, + ef_search: Optional[int] = None, + metric: Optional[Union[str, "_models.VectorSearchAlgorithmMetric"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class HybridSearch(_model_base.Model): + """TThe query parameters to configure hybrid search behaviors. + + :ivar max_text_recall_size: Determines the maximum number of documents to be retrieved by the + text query + portion of a hybrid search request. Those documents will be combined with the + documents matching the vector queries to produce a single final list of + results. Choosing a larger maxTextRecallSize value will allow retrieving and + paging through more documents (using the top and skip parameters), at the cost + of higher resource utilization and higher latency. The value needs to be + between 1 and 10,000. Default is 1000. + :vartype max_text_recall_size: int + :ivar count_and_facet_mode: Determines whether the count and facets should includes all + documents that + matched the search query, or only the documents that are retrieved within the + 'maxTextRecallSize' window. Known values are: "countRetrievableResults" and "countAllResults". + :vartype count_and_facet_mode: str or ~azure.search.documents.models.HybridCountAndFacetMode + """ + + max_text_recall_size: Optional[int] = rest_field(name="maxTextRecallSize") + """Determines the maximum number of documents to be retrieved by the text query + portion of a hybrid search request. Those documents will be combined with the + documents matching the vector queries to produce a single final list of + results. Choosing a larger maxTextRecallSize value will allow retrieving and + paging through more documents (using the top and skip parameters), at the cost + of higher resource utilization and higher latency. The value needs to be + between 1 and 10,000. Default is 1000.""" + count_and_facet_mode: Optional[Union[str, "_models.HybridCountAndFacetMode"]] = rest_field(name="countAndFacetMode") + """Determines whether the count and facets should includes all documents that + matched the search query, or only the documents that are retrieved within the + 'maxTextRecallSize' window. Known values are: \"countRetrievableResults\" and + \"countAllResults\".""" + + @overload + def __init__( + self, + *, + max_text_recall_size: Optional[int] = None, + count_and_facet_mode: Optional[Union[str, "_models.HybridCountAndFacetMode"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ImageAnalysisSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Vision.ImageAnalysisSkill"): + """A skill that analyzes image files. It extracts a rich set of visual features + based on the image content. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is ``en``. + Known values are: "ar", "az", "bg", "bs", "ca", "cs", "cy", "da", "de", "el", "en", "es", "et", + "eu", "fi", "fr", "ga", "gl", "he", "hi", "hr", "hu", "id", "it", "ja", "kk", "ko", "lt", "lv", + "mk", "ms", "nb", "nl", "pl", "prs", "pt-BR", "pt", "pt-PT", "ro", "ru", "sk", "sl", "sr-Cyrl", + "sr-Latn", "sv", "th", "tr", "uk", "vi", "zh", "zh-Hans", and "zh-Hant". + :vartype default_language_code: str or + ~azure.search.documents.models.ImageAnalysisSkillLanguage + :ivar visual_features: A list of visual features. + :vartype visual_features: list[str or ~azure.search.documents.models.VisualFeature] + :ivar details: A string indicating which domain-specific details to return. + :vartype details: list[str or ~azure.search.documents.models.ImageDetail] + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Vision.ImageAnalysisSkill". + :vartype odata_type: str + """ + + default_language_code: Optional[Union[str, "_models.ImageAnalysisSkillLanguage"]] = rest_field( + name="defaultLanguageCode" + ) + """A value indicating which language code to use. Default is ``en``. Known values are: \"ar\", + \"az\", \"bg\", \"bs\", \"ca\", \"cs\", \"cy\", \"da\", \"de\", \"el\", \"en\", \"es\", \"et\", + \"eu\", \"fi\", \"fr\", \"ga\", \"gl\", \"he\", \"hi\", \"hr\", \"hu\", \"id\", \"it\", \"ja\", + \"kk\", \"ko\", \"lt\", \"lv\", \"mk\", \"ms\", \"nb\", \"nl\", \"pl\", \"prs\", \"pt-BR\", + \"pt\", \"pt-PT\", \"ro\", \"ru\", \"sk\", \"sl\", \"sr-Cyrl\", \"sr-Latn\", \"sv\", \"th\", + \"tr\", \"uk\", \"vi\", \"zh\", \"zh-Hans\", and \"zh-Hant\".""" + visual_features: Optional[List[Union[str, "_models.VisualFeature"]]] = rest_field(name="visualFeatures") + """A list of visual features.""" + details: Optional[List[Union[str, "_models.ImageDetail"]]] = rest_field() + """A string indicating which domain-specific details to return.""" + odata_type: Literal["#Microsoft.Skills.Vision.ImageAnalysisSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Vision.ImageAnalysisSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + default_language_code: Optional[Union[str, "_models.ImageAnalysisSkillLanguage"]] = None, + visual_features: Optional[List[Union[str, "_models.VisualFeature"]]] = None, + details: Optional[List[Union[str, "_models.ImageDetail"]]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Vision.ImageAnalysisSkill", **kwargs) + + +class IndexAction(_model_base.Model): + """Represents an index action that operates on a document. + + :ivar action_type: The operation to perform on a document in an indexing batch. Known values + are: "upload", "merge", "mergeOrUpload", and "delete". + :vartype action_type: str or ~azure.search.documents.models.IndexActionType + """ + + action_type: Optional[Union[str, "_models.IndexActionType"]] = rest_field(name="@search.action") + """The operation to perform on a document in an indexing batch. Known values are: \"upload\", + \"merge\", \"mergeOrUpload\", and \"delete\".""" + + @overload + def __init__( + self, + *, + action_type: Optional[Union[str, "_models.IndexActionType"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class IndexBatch(_model_base.Model): + """Contains a batch of document write actions to send to the index. + + All required parameters must be populated in order to send to server. + + :ivar actions: The actions in the batch. Required. + :vartype actions: list[~azure.search.documents.models.IndexAction] + """ + + actions: List["_models.IndexAction"] = rest_field(name="value") + """The actions in the batch. Required.""" + + @overload + def __init__( + self, + *, + actions: List["_models.IndexAction"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class IndexDocumentsResult(_model_base.Model): + """Response containing the status of operations for all documents in the indexing + request. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar results: The list of status information for each document in the indexing request. + Required. + :vartype results: list[~azure.search.documents.models.IndexingResult] + """ + + results: List["_models.IndexingResult"] = rest_field(name="value", visibility=["read"]) + """The list of status information for each document in the indexing request. Required.""" + + +class IndexerCurrentState(_model_base.Model): + """Represents all of the state that defines and dictates the indexer's current + execution. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar mode: The mode the indexer is running in. Known values are: "indexingAllDocs" and + "indexingResetDocs". + :vartype mode: str or ~azure.search.documents.models.IndexingMode + :ivar all_docs_initial_change_tracking_state: Change tracking state used when indexing starts + on all documents in the + datasource. + :vartype all_docs_initial_change_tracking_state: str + :ivar all_docs_final_change_tracking_state: Change tracking state value when indexing finishes + on all documents in the + datasource. + :vartype all_docs_final_change_tracking_state: str + :ivar reset_docs_initial_change_tracking_state: Change tracking state used when indexing starts + on select, reset documents in + the datasource. + :vartype reset_docs_initial_change_tracking_state: str + :ivar reset_docs_final_change_tracking_state: Change tracking state value when indexing + finishes on select, reset documents + in the datasource. + :vartype reset_docs_final_change_tracking_state: str + :ivar reset_document_keys: The list of document keys that have been reset. The document key is + the + document's unique identifier for the data in the search index. The indexer will + prioritize selectively re-ingesting these keys. + :vartype reset_document_keys: list[str] + :ivar reset_datasource_document_ids: The list of datasource document ids that have been reset. + The datasource + document id is the unique identifier for the data in the datasource. The + indexer will prioritize selectively re-ingesting these ids. + :vartype reset_datasource_document_ids: list[str] + """ + + mode: Optional[Union[str, "_models.IndexingMode"]] = rest_field(visibility=["read"]) + """The mode the indexer is running in. Known values are: \"indexingAllDocs\" and + \"indexingResetDocs\".""" + all_docs_initial_change_tracking_state: Optional[str] = rest_field( + name="allDocsInitialChangeTrackingState", visibility=["read"] + ) + """Change tracking state used when indexing starts on all documents in the + datasource.""" + all_docs_final_change_tracking_state: Optional[str] = rest_field( + name="allDocsFinalChangeTrackingState", visibility=["read"] + ) + """Change tracking state value when indexing finishes on all documents in the + datasource.""" + reset_docs_initial_change_tracking_state: Optional[str] = rest_field( + name="resetDocsInitialChangeTrackingState", visibility=["read"] + ) + """Change tracking state used when indexing starts on select, reset documents in + the datasource.""" + reset_docs_final_change_tracking_state: Optional[str] = rest_field( + name="resetDocsFinalChangeTrackingState", visibility=["read"] + ) + """Change tracking state value when indexing finishes on select, reset documents + in the datasource.""" + reset_document_keys: Optional[List[str]] = rest_field(name="resetDocumentKeys", visibility=["read"]) + """The list of document keys that have been reset. The document key is the + document's unique identifier for the data in the search index. The indexer will + prioritize selectively re-ingesting these keys.""" + reset_datasource_document_ids: Optional[List[str]] = rest_field( + name="resetDatasourceDocumentIds", visibility=["read"] + ) + """The list of datasource document ids that have been reset. The datasource + document id is the unique identifier for the data in the datasource. The + indexer will prioritize selectively re-ingesting these ids.""" + + +class IndexerExecutionResult(_model_base.Model): + """Represents the result of an individual indexer execution. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar status: The outcome of this indexer execution. Required. Known values are: + "transientFailure", "success", "inProgress", and "reset". + :vartype status: str or ~azure.search.documents.models.IndexerExecutionStatus + :ivar status_detail: The outcome of this indexer execution. "resetDocs" + :vartype status_detail: str or ~azure.search.documents.models.IndexerExecutionStatusDetail + :ivar current_state: All of the state that defines and dictates the indexer's current + execution. + :vartype current_state: ~azure.search.documents.models.IndexerCurrentState + :ivar error_message: The error message indicating the top-level error, if any. + :vartype error_message: str + :ivar start_time: The start time of this indexer execution. + :vartype start_time: ~datetime.datetime + :ivar end_time: The end time of this indexer execution, if the execution has already completed. + :vartype end_time: ~datetime.datetime + :ivar errors: The item-level indexing errors. Required. + :vartype errors: list[~azure.search.documents.models.SearchIndexerError] + :ivar warnings: The item-level indexing warnings. Required. + :vartype warnings: list[~azure.search.documents.models.SearchIndexerWarning] + :ivar item_count: The number of items that were processed during this indexer execution. This + includes both successfully processed items and items where indexing was + attempted but failed. Required. + :vartype item_count: int + :ivar failed_item_count: The number of items that failed to be indexed during this indexer + execution. Required. + :vartype failed_item_count: int + :ivar initial_tracking_state: Change tracking state with which an indexer execution started. + :vartype initial_tracking_state: str + :ivar final_tracking_state: Change tracking state with which an indexer execution finished. + :vartype final_tracking_state: str + """ + + status: Union[str, "_models.IndexerExecutionStatus"] = rest_field(visibility=["read"]) + """The outcome of this indexer execution. Required. Known values are: \"transientFailure\", + \"success\", \"inProgress\", and \"reset\".""" + status_detail: Optional[Union[str, "_models.IndexerExecutionStatusDetail"]] = rest_field( + name="statusDetail", visibility=["read"] + ) + """The outcome of this indexer execution. \"resetDocs\"""" + current_state: Optional["_models.IndexerCurrentState"] = rest_field(name="currentState", visibility=["read"]) + """All of the state that defines and dictates the indexer's current execution.""" + error_message: Optional[str] = rest_field(name="errorMessage", visibility=["read"]) + """The error message indicating the top-level error, if any.""" + start_time: Optional[datetime.datetime] = rest_field(name="startTime", visibility=["read"], format="rfc3339") + """The start time of this indexer execution.""" + end_time: Optional[datetime.datetime] = rest_field(name="endTime", visibility=["read"], format="rfc3339") + """The end time of this indexer execution, if the execution has already completed.""" + errors: List["_models.SearchIndexerError"] = rest_field(visibility=["read"]) + """The item-level indexing errors. Required.""" + warnings: List["_models.SearchIndexerWarning"] = rest_field(visibility=["read"]) + """The item-level indexing warnings. Required.""" + item_count: int = rest_field(name="itemsProcessed", visibility=["read"]) + """The number of items that were processed during this indexer execution. This + includes both successfully processed items and items where indexing was + attempted but failed. Required.""" + failed_item_count: int = rest_field(name="itemsFailed", visibility=["read"]) + """The number of items that failed to be indexed during this indexer execution. Required.""" + initial_tracking_state: Optional[str] = rest_field(name="initialTrackingState", visibility=["read"]) + """Change tracking state with which an indexer execution started.""" + final_tracking_state: Optional[str] = rest_field(name="finalTrackingState", visibility=["read"]) + """Change tracking state with which an indexer execution finished.""" + + +class IndexingParameters(_model_base.Model): + """Represents parameters for indexer execution. + + :ivar batch_size: The number of items that are read from the data source and indexed as a + single + batch in order to improve performance. The default depends on the data source + type. + :vartype batch_size: int + :ivar max_failed_items: The maximum number of items that can fail indexing for indexer + execution to + still be considered successful. -1 means no limit. Default is 0. + :vartype max_failed_items: int + :ivar max_failed_items_per_batch: The maximum number of items in a single batch that can fail + indexing for the + batch to still be considered successful. -1 means no limit. Default is 0. + :vartype max_failed_items_per_batch: int + :ivar configuration: A dictionary of indexer-specific configuration properties. Each name is + the + name of a specific property. Each value must be of a primitive type. + :vartype configuration: ~azure.search.documents.models.IndexingParametersConfiguration + """ + + batch_size: Optional[int] = rest_field(name="batchSize") + """The number of items that are read from the data source and indexed as a single + batch in order to improve performance. The default depends on the data source + type.""" + max_failed_items: Optional[int] = rest_field(name="maxFailedItems") + """The maximum number of items that can fail indexing for indexer execution to + still be considered successful. -1 means no limit. Default is 0.""" + max_failed_items_per_batch: Optional[int] = rest_field(name="maxFailedItemsPerBatch") + """The maximum number of items in a single batch that can fail indexing for the + batch to still be considered successful. -1 means no limit. Default is 0.""" + configuration: Optional["_models.IndexingParametersConfiguration"] = rest_field() + """A dictionary of indexer-specific configuration properties. Each name is the + name of a specific property. Each value must be of a primitive type.""" + + @overload + def __init__( + self, + *, + batch_size: Optional[int] = None, + max_failed_items: Optional[int] = None, + max_failed_items_per_batch: Optional[int] = None, + configuration: Optional["_models.IndexingParametersConfiguration"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class IndexingParametersConfiguration(_model_base.Model): + """A dictionary of indexer-specific configuration properties. Each name is the + name of a specific property. Each value must be of a primitive type. + + :ivar parsing_mode: Represents the parsing mode for indexing from an Azure blob data source. + Known values are: "default", "text", "delimitedText", "json", "jsonArray", "jsonLines", and + "markdown". + :vartype parsing_mode: str or ~azure.search.documents.models.BlobIndexerParsingMode + :ivar excluded_file_name_extensions: Comma-delimited list of filename extensions to ignore when + processing from + Azure blob storage. For example, you could exclude ".png, .mp4" to skip over + those files during indexing. + :vartype excluded_file_name_extensions: str + :ivar indexed_file_name_extensions: Comma-delimited list of filename extensions to select when + processing from + Azure blob storage. For example, you could focus indexing on specific + application files ".docx, .pptx, .msg" to specifically include those file + types. + :vartype indexed_file_name_extensions: str + :ivar fail_on_unsupported_content_type: For Azure blobs, set to false if you want to continue + indexing when an + unsupported content type is encountered, and you don't know all the content + types (file extensions) in advance. + :vartype fail_on_unsupported_content_type: bool + :ivar fail_on_unprocessable_document: For Azure blobs, set to false if you want to continue + indexing if a document + fails indexing. + :vartype fail_on_unprocessable_document: bool + :ivar index_storage_metadata_only_for_oversized_documents: For Azure blobs, set this property + to true to still index storage metadata for + blob content that is too large to process. Oversized blobs are treated as + errors by default. For limits on blob size, see + https://learn.microsoft.com/azure/search/search-limits-quotas-capacity. + :vartype index_storage_metadata_only_for_oversized_documents: bool + :ivar delimited_text_headers: For CSV blobs, specifies a comma-delimited list of column + headers, useful for + mapping source fields to destination fields in an index. + :vartype delimited_text_headers: str + :ivar delimited_text_delimiter: For CSV blobs, specifies the end-of-line single-character + delimiter for CSV + files where each line starts a new document (for example, "|"). + :vartype delimited_text_delimiter: str + :ivar first_line_contains_headers: For CSV blobs, indicates that the first (non-blank) line of + each blob contains + headers. + :vartype first_line_contains_headers: bool + :ivar markdown_parsing_submode: Specifies the submode that will determine whether a markdown + file will be + parsed into exactly one search document or multiple search documents. Default + is ``oneToMany``. Known values are: "oneToMany" and "oneToOne". + :vartype markdown_parsing_submode: str or ~azure.search.documents.models.MarkdownParsingSubmode + :ivar markdown_header_depth: Specifies the max header depth that will be considered while + grouping markdown + content. Default is ``h6``. Known values are: "h1", "h2", "h3", "h4", "h5", and "h6". + :vartype markdown_header_depth: str or ~azure.search.documents.models.MarkdownHeaderDepth + :ivar document_root: For JSON arrays, given a structured or semi-structured document, you can + specify a path to the array using this property. + :vartype document_root: str + :ivar data_to_extract: Specifies the data to extract from Azure blob storage and tells the + indexer + which data to extract from image content when "imageAction" is set to a value + other than "none". This applies to embedded image content in a .PDF or other + application, or image files such as .jpg and .png, in Azure blobs. Known values are: + "storageMetadata", "allMetadata", and "contentAndMetadata". + :vartype data_to_extract: str or ~azure.search.documents.models.BlobIndexerDataToExtract + :ivar image_action: Determines how to process embedded images and image files in Azure blob + storage. Setting the "imageAction" configuration to any value other than + "none" requires that a skillset also be attached to that indexer. Known values are: "none", + "generateNormalizedImages", and "generateNormalizedImagePerPage". + :vartype image_action: str or ~azure.search.documents.models.BlobIndexerImageAction + :ivar allow_skillset_to_read_file_data: If true, will create a path //document//file_data that + is an object + representing the original file data downloaded from your blob data source. + This allows you to pass the original file data to a custom skill for processing + within the enrichment pipeline, or to the Document Extraction skill. + :vartype allow_skillset_to_read_file_data: bool + :ivar pdf_text_rotation_algorithm: Determines algorithm for text extraction from PDF files in + Azure blob storage. Known values are: "none" and "detectAngles". + :vartype pdf_text_rotation_algorithm: str or + ~azure.search.documents.models.BlobIndexerPDFTextRotationAlgorithm + :ivar execution_environment: Specifies the environment in which the indexer should execute. + Known values are: "standard" and "private". + :vartype execution_environment: str or + ~azure.search.documents.models.IndexerExecutionEnvironment + :ivar query_timeout: Increases the timeout beyond the 5-minute default for Azure SQL database + data + sources, specified in the format "hh:mm:ss". + :vartype query_timeout: str + """ + + parsing_mode: Optional[Union[str, "_models.BlobIndexerParsingMode"]] = rest_field(name="parsingMode") + """Represents the parsing mode for indexing from an Azure blob data source. Known values are: + \"default\", \"text\", \"delimitedText\", \"json\", \"jsonArray\", \"jsonLines\", and + \"markdown\".""" + excluded_file_name_extensions: Optional[str] = rest_field(name="excludedFileNameExtensions") + """Comma-delimited list of filename extensions to ignore when processing from + Azure blob storage. For example, you could exclude \".png, .mp4\" to skip over + those files during indexing.""" + indexed_file_name_extensions: Optional[str] = rest_field(name="indexedFileNameExtensions") + """Comma-delimited list of filename extensions to select when processing from + Azure blob storage. For example, you could focus indexing on specific + application files \".docx, .pptx, .msg\" to specifically include those file + types.""" + fail_on_unsupported_content_type: Optional[bool] = rest_field(name="failOnUnsupportedContentType") + """For Azure blobs, set to false if you want to continue indexing when an + unsupported content type is encountered, and you don't know all the content + types (file extensions) in advance.""" + fail_on_unprocessable_document: Optional[bool] = rest_field(name="failOnUnprocessableDocument") + """For Azure blobs, set to false if you want to continue indexing if a document + fails indexing.""" + index_storage_metadata_only_for_oversized_documents: Optional[bool] = rest_field( + name="indexStorageMetadataOnlyForOversizedDocuments" + ) + """For Azure blobs, set this property to true to still index storage metadata for + blob content that is too large to process. Oversized blobs are treated as + errors by default. For limits on blob size, see + https://learn.microsoft.com/azure/search/search-limits-quotas-capacity.""" + delimited_text_headers: Optional[str] = rest_field(name="delimitedTextHeaders") + """For CSV blobs, specifies a comma-delimited list of column headers, useful for + mapping source fields to destination fields in an index.""" + delimited_text_delimiter: Optional[str] = rest_field(name="delimitedTextDelimiter") + """For CSV blobs, specifies the end-of-line single-character delimiter for CSV + files where each line starts a new document (for example, \"|\").""" + first_line_contains_headers: Optional[bool] = rest_field(name="firstLineContainsHeaders") + """For CSV blobs, indicates that the first (non-blank) line of each blob contains + headers.""" + markdown_parsing_submode: Optional[Union[str, "_models.MarkdownParsingSubmode"]] = rest_field( + name="markdownParsingSubmode" + ) + """Specifies the submode that will determine whether a markdown file will be + parsed into exactly one search document or multiple search documents. Default + is ``oneToMany``. Known values are: \"oneToMany\" and \"oneToOne\".""" + markdown_header_depth: Optional[Union[str, "_models.MarkdownHeaderDepth"]] = rest_field(name="markdownHeaderDepth") + """Specifies the max header depth that will be considered while grouping markdown + content. Default is ``h6``. Known values are: \"h1\", \"h2\", \"h3\", \"h4\", \"h5\", and + \"h6\".""" + document_root: Optional[str] = rest_field(name="documentRoot") + """For JSON arrays, given a structured or semi-structured document, you can + specify a path to the array using this property.""" + data_to_extract: Optional[Union[str, "_models.BlobIndexerDataToExtract"]] = rest_field(name="dataToExtract") + """Specifies the data to extract from Azure blob storage and tells the indexer + which data to extract from image content when \"imageAction\" is set to a value + other than \"none\". This applies to embedded image content in a .PDF or other + application, or image files such as .jpg and .png, in Azure blobs. Known values are: + \"storageMetadata\", \"allMetadata\", and \"contentAndMetadata\".""" + image_action: Optional[Union[str, "_models.BlobIndexerImageAction"]] = rest_field(name="imageAction") + """Determines how to process embedded images and image files in Azure blob + storage. Setting the \"imageAction\" configuration to any value other than + \"none\" requires that a skillset also be attached to that indexer. Known values are: \"none\", + \"generateNormalizedImages\", and \"generateNormalizedImagePerPage\".""" + allow_skillset_to_read_file_data: Optional[bool] = rest_field(name="allowSkillsetToReadFileData") + """If true, will create a path //document//file_data that is an object + representing the original file data downloaded from your blob data source. + This allows you to pass the original file data to a custom skill for processing + within the enrichment pipeline, or to the Document Extraction skill.""" + pdf_text_rotation_algorithm: Optional[Union[str, "_models.BlobIndexerPDFTextRotationAlgorithm"]] = rest_field( + name="pdfTextRotationAlgorithm" + ) + """Determines algorithm for text extraction from PDF files in Azure blob storage. Known values + are: \"none\" and \"detectAngles\".""" + execution_environment: Optional[Union[str, "_models.IndexerExecutionEnvironment"]] = rest_field( + name="executionEnvironment" + ) + """Specifies the environment in which the indexer should execute. Known values are: \"standard\" + and \"private\".""" + query_timeout: Optional[str] = rest_field(name="queryTimeout") + """Increases the timeout beyond the 5-minute default for Azure SQL database data + sources, specified in the format \"hh:mm:ss\".""" + + @overload + def __init__( + self, + *, + parsing_mode: Optional[Union[str, "_models.BlobIndexerParsingMode"]] = None, + excluded_file_name_extensions: Optional[str] = None, + indexed_file_name_extensions: Optional[str] = None, + fail_on_unsupported_content_type: Optional[bool] = None, + fail_on_unprocessable_document: Optional[bool] = None, + index_storage_metadata_only_for_oversized_documents: Optional[bool] = None, + delimited_text_headers: Optional[str] = None, + delimited_text_delimiter: Optional[str] = None, + first_line_contains_headers: Optional[bool] = None, + markdown_parsing_submode: Optional[Union[str, "_models.MarkdownParsingSubmode"]] = None, + markdown_header_depth: Optional[Union[str, "_models.MarkdownHeaderDepth"]] = None, + document_root: Optional[str] = None, + data_to_extract: Optional[Union[str, "_models.BlobIndexerDataToExtract"]] = None, + image_action: Optional[Union[str, "_models.BlobIndexerImageAction"]] = None, + allow_skillset_to_read_file_data: Optional[bool] = None, + pdf_text_rotation_algorithm: Optional[Union[str, "_models.BlobIndexerPDFTextRotationAlgorithm"]] = None, + execution_environment: Optional[Union[str, "_models.IndexerExecutionEnvironment"]] = None, + query_timeout: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class IndexingResult(_model_base.Model): + """Status of an indexing operation for a single document. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar key: The key of a document that was in the indexing request. Required. + :vartype key: str + :ivar error_message: The error message explaining why the indexing operation failed for the + document + identified by the key; null if indexing succeeded. + :vartype error_message: str + :ivar succeeded: A value indicating whether the indexing operation succeeded for the document + identified by the key. Required. + :vartype succeeded: bool + :ivar status_code: The status code of the indexing operation. Possible values include: 200 for + a + successful update or delete, 201 for successful document creation, 400 for a + malformed input document, 404 for document not found, 409 for a version + conflict, 422 when the index is temporarily unavailable, or 503 for when the + service is too busy. Required. + :vartype status_code: int + """ + + key: str = rest_field(visibility=["read"]) + """The key of a document that was in the indexing request. Required.""" + error_message: Optional[str] = rest_field(name="errorMessage", visibility=["read"]) + """The error message explaining why the indexing operation failed for the document + identified by the key; null if indexing succeeded.""" + succeeded: bool = rest_field(name="status", visibility=["read"]) + """A value indicating whether the indexing operation succeeded for the document + identified by the key. Required.""" + status_code: int = rest_field(name="statusCode", visibility=["read"]) + """The status code of the indexing operation. Possible values include: 200 for a + successful update or delete, 201 for successful document creation, 400 for a + malformed input document, 404 for document not found, 409 for a version + conflict, 422 when the index is temporarily unavailable, or 503 for when the + service is too busy. Required.""" + + +class IndexingSchedule(_model_base.Model): + """Represents a schedule for indexer execution. + + + :ivar interval: The interval of time between indexer executions. Required. + :vartype interval: ~datetime.timedelta + :ivar start_time: The time when an indexer should start running. + :vartype start_time: ~datetime.datetime + """ + + interval: datetime.timedelta = rest_field() + """The interval of time between indexer executions. Required.""" + start_time: Optional[datetime.datetime] = rest_field(name="startTime", format="rfc3339") + """The time when an indexer should start running.""" + + @overload + def __init__( + self, + *, + interval: datetime.timedelta, + start_time: Optional[datetime.datetime] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class InputFieldMappingEntry(_model_base.Model): + """Input field mapping for a skill. + + + :ivar name: The name of the input. Required. + :vartype name: str + :ivar source: The source of the input. + :vartype source: str + :ivar source_context: The source context used for selecting recursive inputs. + :vartype source_context: str + :ivar inputs: The recursive inputs used when creating a complex type. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + """ + + name: str = rest_field() + """The name of the input. Required.""" + source: Optional[str] = rest_field() + """The source of the input.""" + source_context: Optional[str] = rest_field(name="sourceContext") + """The source context used for selecting recursive inputs.""" + inputs: Optional[List["_models.InputFieldMappingEntry"]] = rest_field() + """The recursive inputs used when creating a complex type.""" + + @overload + def __init__( + self, + *, + name: str, + source: Optional[str] = None, + source_context: Optional[str] = None, + inputs: Optional[List["_models.InputFieldMappingEntry"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class KeepTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.KeepTokenFilter"): + """A token filter that only keeps tokens with text contained in a specified list + of words. This token filter is implemented using Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar keep_words: The list of words to keep. Required. + :vartype keep_words: list[str] + :ivar lower_case_keep_words: A value indicating whether to lower case all words first. Default + is false. + :vartype lower_case_keep_words: bool + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.KeepTokenFilter". + :vartype odata_type: str + """ + + keep_words: List[str] = rest_field(name="keepWords") + """The list of words to keep. Required.""" + lower_case_keep_words: Optional[bool] = rest_field(name="keepWordsCase") + """A value indicating whether to lower case all words first. Default is false.""" + odata_type: Literal["#Microsoft.Azure.Search.KeepTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.KeepTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + keep_words: List[str], + lower_case_keep_words: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.KeepTokenFilter", **kwargs) + + +class KeyPhraseExtractionSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.KeyPhraseExtractionSkill"): + """A skill that uses text analytics for key phrase extraction. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is ``en``. + Known values are: "da", "nl", "en", "fi", "fr", "de", "it", "ja", "ko", "no", "pl", "pt-PT", + "pt-BR", "ru", "es", and "sv". + :vartype default_language_code: str or + ~azure.search.documents.models.KeyPhraseExtractionSkillLanguage + :ivar max_key_phrase_count: A number indicating how many key phrases to return. If absent, all + identified + key phrases will be returned. + :vartype max_key_phrase_count: int + :ivar model_version: The version of the model to use when calling the Text Analytics service. + It + will default to the latest available when not specified. We recommend you do + not specify this value unless absolutely necessary. + :vartype model_version: str + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Text.KeyPhraseExtractionSkill". + :vartype odata_type: str + """ + + default_language_code: Optional[Union[str, "_models.KeyPhraseExtractionSkillLanguage"]] = rest_field( + name="defaultLanguageCode" + ) + """A value indicating which language code to use. Default is ``en``. Known values are: \"da\", + \"nl\", \"en\", \"fi\", \"fr\", \"de\", \"it\", \"ja\", \"ko\", \"no\", \"pl\", \"pt-PT\", + \"pt-BR\", \"ru\", \"es\", and \"sv\".""" + max_key_phrase_count: Optional[int] = rest_field(name="maxKeyPhraseCount") + """A number indicating how many key phrases to return. If absent, all identified + key phrases will be returned.""" + model_version: Optional[str] = rest_field(name="modelVersion") + """The version of the model to use when calling the Text Analytics service. It + will default to the latest available when not specified. We recommend you do + not specify this value unless absolutely necessary.""" + odata_type: Literal["#Microsoft.Skills.Text.KeyPhraseExtractionSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Text.KeyPhraseExtractionSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + default_language_code: Optional[Union[str, "_models.KeyPhraseExtractionSkillLanguage"]] = None, + max_key_phrase_count: Optional[int] = None, + model_version: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Text.KeyPhraseExtractionSkill", **kwargs) + + +class KeywordMarkerTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.KeywordMarkerTokenFilter"): + """Marks terms as keywords. This token filter is implemented using Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar keywords: A list of words to mark as keywords. Required. + :vartype keywords: list[str] + :ivar ignore_case: A value indicating whether to ignore case. If true, all words are converted + to + lower case first. Default is false. + :vartype ignore_case: bool + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.KeywordMarkerTokenFilter". + :vartype odata_type: str + """ + + keywords: List[str] = rest_field() + """A list of words to mark as keywords. Required.""" + ignore_case: Optional[bool] = rest_field(name="ignoreCase") + """A value indicating whether to ignore case. If true, all words are converted to + lower case first. Default is false.""" + odata_type: Literal["#Microsoft.Azure.Search.KeywordMarkerTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.KeywordMarkerTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + keywords: List[str], + ignore_case: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.KeywordMarkerTokenFilter", **kwargs) + + +class KeywordTokenizer(LexicalTokenizer, discriminator="#Microsoft.Azure.Search.KeywordTokenizer"): + """Emits the entire input as a single token. This tokenizer is implemented using + Apache Lucene. + + + :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required. + :vartype name: str + :ivar buffer_size: The read buffer size in bytes. Default is 256. + :vartype buffer_size: int + :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. Default value is + "#Microsoft.Azure.Search.KeywordTokenizer". + :vartype odata_type: str + """ + + buffer_size: Optional[int] = rest_field(name="bufferSize") + """The read buffer size in bytes. Default is 256.""" + odata_type: Literal["#Microsoft.Azure.Search.KeywordTokenizer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of tokenizer. Required. Default value is + \"#Microsoft.Azure.Search.KeywordTokenizer\".""" + + @overload + def __init__( + self, + *, + name: str, + buffer_size: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.KeywordTokenizer", **kwargs) + + +class KeywordTokenizerV2(LexicalTokenizer, discriminator="#Microsoft.Azure.Search.KeywordTokenizerV2"): + """Emits the entire input as a single token. This tokenizer is implemented using + Apache Lucene. + + + :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required. + :vartype name: str + :ivar max_token_length: The maximum token length. Default is 256. Tokens longer than the + maximum length + are split. The maximum token length that can be used is 300 characters. + :vartype max_token_length: int + :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. Default value is + "#Microsoft.Azure.Search.KeywordTokenizerV2". + :vartype odata_type: str + """ + + max_token_length: Optional[int] = rest_field(name="maxTokenLength") + """The maximum token length. Default is 256. Tokens longer than the maximum length + are split. The maximum token length that can be used is 300 characters.""" + odata_type: Literal["#Microsoft.Azure.Search.KeywordTokenizerV2"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of tokenizer. Required. Default value is + \"#Microsoft.Azure.Search.KeywordTokenizerV2\".""" + + @overload + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.KeywordTokenizerV2", **kwargs) + + +class LanguageDetectionSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.LanguageDetectionSkill"): + """A skill that detects the language of input text and reports a single language + code for every document submitted on the request. The language code is paired + with a score indicating the confidence of the analysis. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar default_country_hint: A country code to use as a hint to the language detection model if + it cannot + disambiguate the language. + :vartype default_country_hint: str + :ivar model_version: The version of the model to use when calling the Text Analytics service. + It + will default to the latest available when not specified. We recommend you do + not specify this value unless absolutely necessary. + :vartype model_version: str + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Text.LanguageDetectionSkill". + :vartype odata_type: str + """ + + default_country_hint: Optional[str] = rest_field(name="defaultCountryHint") + """A country code to use as a hint to the language detection model if it cannot + disambiguate the language.""" + model_version: Optional[str] = rest_field(name="modelVersion") + """The version of the model to use when calling the Text Analytics service. It + will default to the latest available when not specified. We recommend you do + not specify this value unless absolutely necessary.""" + odata_type: Literal["#Microsoft.Skills.Text.LanguageDetectionSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Text.LanguageDetectionSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + default_country_hint: Optional[str] = None, + model_version: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Text.LanguageDetectionSkill", **kwargs) + + +class LengthTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.LengthTokenFilter"): + """Removes words that are too long or too short. This token filter is implemented + using Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be less + than the value of max. + :vartype min_length: int + :ivar max_length: The maximum length in characters. Default and maximum is 300. + :vartype max_length: int + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.LengthTokenFilter". + :vartype odata_type: str + """ + + min_length: Optional[int] = rest_field(name="min") + """The minimum length in characters. Default is 0. Maximum is 300. Must be less + than the value of max.""" + max_length: Optional[int] = rest_field(name="max") + """The maximum length in characters. Default and maximum is 300.""" + odata_type: Literal["#Microsoft.Azure.Search.LengthTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.LengthTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + min_length: Optional[int] = None, + max_length: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.LengthTokenFilter", **kwargs) + + +class LimitTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.LimitTokenFilter"): + """Limits the number of tokens while indexing. This token filter is implemented + using Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar max_token_count: The maximum number of tokens to produce. Default is 1. + :vartype max_token_count: int + :ivar consume_all_tokens: A value indicating whether all tokens from the input must be consumed + even if + maxTokenCount is reached. Default is false. + :vartype consume_all_tokens: bool + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.LimitTokenFilter". + :vartype odata_type: str + """ + + max_token_count: Optional[int] = rest_field(name="maxTokenCount") + """The maximum number of tokens to produce. Default is 1.""" + consume_all_tokens: Optional[bool] = rest_field(name="consumeAllTokens") + """A value indicating whether all tokens from the input must be consumed even if + maxTokenCount is reached. Default is false.""" + odata_type: Literal["#Microsoft.Azure.Search.LimitTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.LimitTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + max_token_count: Optional[int] = None, + consume_all_tokens: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.LimitTokenFilter", **kwargs) + + +class ListDataSourcesResult(_model_base.Model): + """Response from a List Datasources request. If successful, it includes the full + definitions of all datasources. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar data_sources: The datasources in the Search service. Required. + :vartype data_sources: list[~azure.search.documents.models.SearchIndexerDataSource] + """ + + data_sources: List["_models.SearchIndexerDataSource"] = rest_field(name="value", visibility=["read"]) + """The datasources in the Search service. Required.""" + + +class ListIndexersResult(_model_base.Model): + """Response from a List Indexers request. If successful, it includes the full + definitions of all indexers. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar indexers: The indexers in the Search service. Required. + :vartype indexers: list[~azure.search.documents.models.SearchIndexer] + """ + + indexers: List["_models.SearchIndexer"] = rest_field(name="value", visibility=["read"]) + """The indexers in the Search service. Required.""" + + +class ListSkillsetsResult(_model_base.Model): + """Response from a list skillset request. If successful, it includes the full + definitions of all skillsets. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar skillsets: The skillsets defined in the Search service. Required. + :vartype skillsets: list[~azure.search.documents.models.SearchIndexerSkillset] + """ + + skillsets: List["_models.SearchIndexerSkillset"] = rest_field(name="value", visibility=["read"]) + """The skillsets defined in the Search service. Required.""" + + +class ListSynonymMapsResult(_model_base.Model): + """Response from a List SynonymMaps request. If successful, it includes the full + definitions of all synonym maps. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar synonym_maps: The synonym maps in the Search service. Required. + :vartype synonym_maps: list[~azure.search.documents.models.SynonymMap] + """ + + synonym_maps: List["_models.SynonymMap"] = rest_field(name="value", visibility=["read"]) + """The synonym maps in the Search service. Required.""" + + +class LuceneStandardAnalyzer(LexicalAnalyzer, discriminator="#Microsoft.Azure.Search.StandardAnalyzer"): + """Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase + filter and stop filter. + + + :ivar name: The name of the analyzer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required. + :vartype name: str + :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length + are split. The maximum token length that can be used is 300 characters. + :vartype max_token_length: int + :ivar stopwords: A list of stopwords. + :vartype stopwords: list[str] + :ivar odata_type: A URI fragment specifying the type of analyzer. Required. Default value is + "#Microsoft.Azure.Search.StandardAnalyzer". + :vartype odata_type: str + """ + + max_token_length: Optional[int] = rest_field(name="maxTokenLength") + """The maximum token length. Default is 255. Tokens longer than the maximum length + are split. The maximum token length that can be used is 300 characters.""" + stopwords: Optional[List[str]] = rest_field() + """A list of stopwords.""" + odata_type: Literal["#Microsoft.Azure.Search.StandardAnalyzer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of analyzer. Required. Default value is + \"#Microsoft.Azure.Search.StandardAnalyzer\".""" + + @overload + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = None, + stopwords: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.StandardAnalyzer", **kwargs) + + +class LuceneStandardTokenizer(LexicalTokenizer, discriminator="#Microsoft.Azure.Search.StandardTokenizer"): + """Breaks text following the Unicode Text Segmentation rules. This tokenizer is + implemented using Apache Lucene. + + + :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required. + :vartype name: str + :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length + are split. + :vartype max_token_length: int + :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. Default value is + "#Microsoft.Azure.Search.StandardTokenizer". + :vartype odata_type: str + """ + + max_token_length: Optional[int] = rest_field(name="maxTokenLength") + """The maximum token length. Default is 255. Tokens longer than the maximum length + are split.""" + odata_type: Literal["#Microsoft.Azure.Search.StandardTokenizer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of tokenizer. Required. Default value is + \"#Microsoft.Azure.Search.StandardTokenizer\".""" + + @overload + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.StandardTokenizer", **kwargs) + + +class LuceneStandardTokenizerV2(LexicalTokenizer, discriminator="#Microsoft.Azure.Search.StandardTokenizerV2"): + """Breaks text following the Unicode Text Segmentation rules. This tokenizer is + implemented using Apache Lucene. + + + :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required. + :vartype name: str + :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length + are split. The maximum token length that can be used is 300 characters. + :vartype max_token_length: int + :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. Default value is + "#Microsoft.Azure.Search.StandardTokenizerV2". + :vartype odata_type: str + """ + + max_token_length: Optional[int] = rest_field(name="maxTokenLength") + """The maximum token length. Default is 255. Tokens longer than the maximum length + are split. The maximum token length that can be used is 300 characters.""" + odata_type: Literal["#Microsoft.Azure.Search.StandardTokenizerV2"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of tokenizer. Required. Default value is + \"#Microsoft.Azure.Search.StandardTokenizerV2\".""" + + @overload + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.StandardTokenizerV2", **kwargs) + + +class MagnitudeScoringFunction(ScoringFunction, discriminator="magnitude"): + """Defines a function that boosts scores based on the magnitude of a numeric field. + + + :ivar field_name: The name of the field used as input to the scoring function. Required. + :vartype field_name: str + :ivar boost: A multiplier for the raw score. Must be a positive number not equal to 1.0. + Required. + :vartype boost: float + :ivar interpolation: A value indicating how boosting will be interpolated across document + scores; + defaults to "Linear". Known values are: "linear", "constant", "quadratic", and "logarithmic". + :vartype interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation + :ivar parameters: Parameter values for the magnitude scoring function. Required. + :vartype parameters: ~azure.search.documents.models.MagnitudeScoringParameters + :ivar type: Indicates the type of function to use. Valid values include magnitude, + freshness, distance, and tag. The function type must be lower case. Required. Default value is + "magnitude". + :vartype type: str + """ + + parameters: "_models.MagnitudeScoringParameters" = rest_field(name="magnitude") + """Parameter values for the magnitude scoring function. Required.""" + type: Literal["magnitude"] = rest_discriminator(name="type") # type: ignore + """Indicates the type of function to use. Valid values include magnitude, + freshness, distance, and tag. The function type must be lower case. Required. Default value is + \"magnitude\".""" + + @overload + def __init__( + self, + *, + field_name: str, + boost: float, + parameters: "_models.MagnitudeScoringParameters", + interpolation: Optional[Union[str, "_models.ScoringFunctionInterpolation"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="magnitude", **kwargs) + + +class MagnitudeScoringParameters(_model_base.Model): + """Provides parameter values to a magnitude scoring function. + + + :ivar boosting_range_start: The field value at which boosting starts. Required. + :vartype boosting_range_start: float + :ivar boosting_range_end: The field value at which boosting ends. Required. + :vartype boosting_range_end: float + :ivar should_boost_beyond_range_by_constant: A value indicating whether to apply a constant + boost for field values beyond + the range end value; default is false. + :vartype should_boost_beyond_range_by_constant: bool + """ + + boosting_range_start: float = rest_field(name="boostingRangeStart") + """The field value at which boosting starts. Required.""" + boosting_range_end: float = rest_field(name="boostingRangeEnd") + """The field value at which boosting ends. Required.""" + should_boost_beyond_range_by_constant: Optional[bool] = rest_field(name="constantBoostBeyondRange") + """A value indicating whether to apply a constant boost for field values beyond + the range end value; default is false.""" + + @overload + def __init__( + self, + *, + boosting_range_start: float, + boosting_range_end: float, + should_boost_beyond_range_by_constant: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class MappingCharFilter(CharFilter, discriminator="#Microsoft.Azure.Search.MappingCharFilter"): + """A character filter that applies mappings defined with the mappings option. + Matching is greedy (longest pattern matching at a given point wins). + Replacement is allowed to be the empty string. This character filter is + implemented using Apache Lucene. + + + :ivar name: The name of the char filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar mappings: A list of mappings of the following format: "a=>b" (all occurrences of the + character "a" will be replaced with character "b"). Required. + :vartype mappings: list[str] + :ivar odata_type: A URI fragment specifying the type of char filter. Required. Default value is + "#Microsoft.Azure.Search.MappingCharFilter". + :vartype odata_type: str + """ + + mappings: List[str] = rest_field() + """A list of mappings of the following format: \"a=>b\" (all occurrences of the + character \"a\" will be replaced with character \"b\"). Required.""" + odata_type: Literal["#Microsoft.Azure.Search.MappingCharFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of char filter. Required. Default value is + \"#Microsoft.Azure.Search.MappingCharFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + mappings: List[str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.MappingCharFilter", **kwargs) + + +class MergeSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.MergeSkill"): + """A skill for merging two or more strings into a single unified string, with an + optional user-defined delimiter separating each component part. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is an + empty + space. + :vartype insert_pre_tag: str + :ivar insert_post_tag: The tag indicates the end of the merged text. By default, the tag is an + empty + space. + :vartype insert_post_tag: str + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Text.MergeSkill". + :vartype odata_type: str + """ + + insert_pre_tag: Optional[str] = rest_field(name="insertPreTag") + """The tag indicates the start of the merged text. By default, the tag is an empty + space.""" + insert_post_tag: Optional[str] = rest_field(name="insertPostTag") + """The tag indicates the end of the merged text. By default, the tag is an empty + space.""" + odata_type: Literal["#Microsoft.Skills.Text.MergeSkill"] = rest_discriminator(name="@odata.type") # type: ignore + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Text.MergeSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + insert_pre_tag: Optional[str] = None, + insert_post_tag: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Text.MergeSkill", **kwargs) + + +class MicrosoftLanguageStemmingTokenizer( + LexicalTokenizer, discriminator="#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer" +): + """Divides text using language-specific rules and reduces words to their base + forms. + + + :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required. + :vartype name: str + :ivar max_token_length: The maximum token length. Tokens longer than the maximum length are + split. + Maximum token length that can be used is 300 characters. Tokens longer than 300 + characters are first split into tokens of length 300 and then each of those + tokens is split based on the max token length set. Default is 255. + :vartype max_token_length: int + :ivar is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used as + the search + tokenizer, set to false if used as the indexing tokenizer. Default is false. + :vartype is_search_tokenizer: bool + :ivar language: The language to use. The default is English. Known values are: "arabic", + "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english", + "estonian", "finnish", "french", "german", "greek", "gujarati", "hebrew", "hindi", "hungarian", + "icelandic", "indonesian", "italian", "kannada", "latvian", "lithuanian", "malay", "malayalam", + "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", + "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovak", "slovenian", "spanish", + "swedish", "tamil", "telugu", "turkish", "ukrainian", and "urdu". + :vartype language: str or ~azure.search.documents.models.MicrosoftStemmingTokenizerLanguage + :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. Default value is + "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer". + :vartype odata_type: str + """ + + max_token_length: Optional[int] = rest_field(name="maxTokenLength") + """The maximum token length. Tokens longer than the maximum length are split. + Maximum token length that can be used is 300 characters. Tokens longer than 300 + characters are first split into tokens of length 300 and then each of those + tokens is split based on the max token length set. Default is 255.""" + is_search_tokenizer: Optional[bool] = rest_field(name="isSearchTokenizer") + """A value indicating how the tokenizer is used. Set to true if used as the search + tokenizer, set to false if used as the indexing tokenizer. Default is false.""" + language: Optional[Union[str, "_models.MicrosoftStemmingTokenizerLanguage"]] = rest_field() + """The language to use. The default is English. Known values are: \"arabic\", \"bangla\", + \"bulgarian\", \"catalan\", \"croatian\", \"czech\", \"danish\", \"dutch\", \"english\", + \"estonian\", \"finnish\", \"french\", \"german\", \"greek\", \"gujarati\", \"hebrew\", + \"hindi\", \"hungarian\", \"icelandic\", \"indonesian\", \"italian\", \"kannada\", \"latvian\", + \"lithuanian\", \"malay\", \"malayalam\", \"marathi\", \"norwegianBokmaal\", \"polish\", + \"portuguese\", \"portugueseBrazilian\", \"punjabi\", \"romanian\", \"russian\", + \"serbianCyrillic\", \"serbianLatin\", \"slovak\", \"slovenian\", \"spanish\", \"swedish\", + \"tamil\", \"telugu\", \"turkish\", \"ukrainian\", and \"urdu\".""" + odata_type: Literal["#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer"] = rest_discriminator(name="@odata.type") # type: ignore + """A URI fragment specifying the type of tokenizer. Required. Default value is + \"#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer\".""" + + @overload + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = None, + is_search_tokenizer: Optional[bool] = None, + language: Optional[Union[str, "_models.MicrosoftStemmingTokenizerLanguage"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer", **kwargs) + + +class MicrosoftLanguageTokenizer(LexicalTokenizer, discriminator="#Microsoft.Azure.Search.MicrosoftLanguageTokenizer"): + """Divides text using language-specific rules. + + + :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required. + :vartype name: str + :ivar max_token_length: The maximum token length. Tokens longer than the maximum length are + split. + Maximum token length that can be used is 300 characters. Tokens longer than 300 + characters are first split into tokens of length 300 and then each of those + tokens is split based on the max token length set. Default is 255. + :vartype max_token_length: int + :ivar is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used as + the search + tokenizer, set to false if used as the indexing tokenizer. Default is false. + :vartype is_search_tokenizer: bool + :ivar language: The language to use. The default is English. Known values are: "bangla", + "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian", "czech", + "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi", "icelandic", + "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam", "marathi", + "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", "romanian", + "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish", "tamil", + "telugu", "thai", "ukrainian", "urdu", and "vietnamese". + :vartype language: str or ~azure.search.documents.models.MicrosoftTokenizerLanguage + :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. Default value is + "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer". + :vartype odata_type: str + """ + + max_token_length: Optional[int] = rest_field(name="maxTokenLength") + """The maximum token length. Tokens longer than the maximum length are split. + Maximum token length that can be used is 300 characters. Tokens longer than 300 + characters are first split into tokens of length 300 and then each of those + tokens is split based on the max token length set. Default is 255.""" + is_search_tokenizer: Optional[bool] = rest_field(name="isSearchTokenizer") + """A value indicating how the tokenizer is used. Set to true if used as the search + tokenizer, set to false if used as the indexing tokenizer. Default is false.""" + language: Optional[Union[str, "_models.MicrosoftTokenizerLanguage"]] = rest_field() + """The language to use. The default is English. Known values are: \"bangla\", \"bulgarian\", + \"catalan\", \"chineseSimplified\", \"chineseTraditional\", \"croatian\", \"czech\", + \"danish\", \"dutch\", \"english\", \"french\", \"german\", \"greek\", \"gujarati\", \"hindi\", + \"icelandic\", \"indonesian\", \"italian\", \"japanese\", \"kannada\", \"korean\", \"malay\", + \"malayalam\", \"marathi\", \"norwegianBokmaal\", \"polish\", \"portuguese\", + \"portugueseBrazilian\", \"punjabi\", \"romanian\", \"russian\", \"serbianCyrillic\", + \"serbianLatin\", \"slovenian\", \"spanish\", \"swedish\", \"tamil\", \"telugu\", \"thai\", + \"ukrainian\", \"urdu\", and \"vietnamese\".""" + odata_type: Literal["#Microsoft.Azure.Search.MicrosoftLanguageTokenizer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of tokenizer. Required. Default value is + \"#Microsoft.Azure.Search.MicrosoftLanguageTokenizer\".""" + + @overload + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = None, + is_search_tokenizer: Optional[bool] = None, + language: Optional[Union[str, "_models.MicrosoftTokenizerLanguage"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.MicrosoftLanguageTokenizer", **kwargs) + + +class NativeBlobSoftDeleteDeletionDetectionPolicy( + DataDeletionDetectionPolicy, discriminator="#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy" +): # pylint: disable=name-too-long + """Defines a data deletion detection policy utilizing Azure Blob Storage's native + soft delete feature for deletion detection. + + + :ivar odata_type: A URI fragment specifying the type of data deletion detection policy. + Required. Default value is + "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy". + :vartype odata_type: str + """ + + odata_type: Literal["#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy"] = rest_discriminator(name="@odata.type") # type: ignore + """A URI fragment specifying the type of data deletion detection policy. Required. Default value + is \"#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__( + *args, odata_type="#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy", **kwargs + ) + + +class NGramTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.NGramTokenFilter"): + """Generates n-grams of the given size(s). This token filter is implemented using + Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar min_gram: The minimum n-gram length. Default is 1. Must be less than the value of + maxGram. + :vartype min_gram: int + :ivar max_gram: The maximum n-gram length. Default is 2. + :vartype max_gram: int + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.NGramTokenFilter". + :vartype odata_type: str + """ + + min_gram: Optional[int] = rest_field(name="minGram") + """The minimum n-gram length. Default is 1. Must be less than the value of maxGram.""" + max_gram: Optional[int] = rest_field(name="maxGram") + """The maximum n-gram length. Default is 2.""" + odata_type: Literal["#Microsoft.Azure.Search.NGramTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.NGramTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + min_gram: Optional[int] = None, + max_gram: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.NGramTokenFilter", **kwargs) + + +class NGramTokenFilterV2(TokenFilter, discriminator="#Microsoft.Azure.Search.NGramTokenFilterV2"): + """Generates n-grams of the given size(s). This token filter is implemented using + Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :vartype min_gram: int + :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :vartype max_gram: int + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.NGramTokenFilterV2". + :vartype odata_type: str + """ + + min_gram: Optional[int] = rest_field(name="minGram") + """The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram.""" + max_gram: Optional[int] = rest_field(name="maxGram") + """The maximum n-gram length. Default is 2. Maximum is 300.""" + odata_type: Literal["#Microsoft.Azure.Search.NGramTokenFilterV2"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.NGramTokenFilterV2\".""" + + @overload + def __init__( + self, + *, + name: str, + min_gram: Optional[int] = None, + max_gram: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.NGramTokenFilterV2", **kwargs) + + +class NGramTokenizer(LexicalTokenizer, discriminator="#Microsoft.Azure.Search.NGramTokenizer"): + """Tokenizes the input into n-grams of the given size(s). This tokenizer is + implemented using Apache Lucene. + + + :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required. + :vartype name: str + :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram. + :vartype min_gram: int + :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300. + :vartype max_gram: int + :ivar token_chars: Character classes to keep in the tokens. + :vartype token_chars: list[str or ~azure.search.documents.models.TokenCharacterKind] + :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. Default value is + "#Microsoft.Azure.Search.NGramTokenizer". + :vartype odata_type: str + """ + + min_gram: Optional[int] = rest_field(name="minGram") + """The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the + value of maxGram.""" + max_gram: Optional[int] = rest_field(name="maxGram") + """The maximum n-gram length. Default is 2. Maximum is 300.""" + token_chars: Optional[List[Union[str, "_models.TokenCharacterKind"]]] = rest_field(name="tokenChars") + """Character classes to keep in the tokens.""" + odata_type: Literal["#Microsoft.Azure.Search.NGramTokenizer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of tokenizer. Required. Default value is + \"#Microsoft.Azure.Search.NGramTokenizer\".""" + + @overload + def __init__( + self, + *, + name: str, + min_gram: Optional[int] = None, + max_gram: Optional[int] = None, + token_chars: Optional[List[Union[str, "_models.TokenCharacterKind"]]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.NGramTokenizer", **kwargs) + + +class OcrSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Vision.OcrSkill"): + """A skill that extracts text from image files. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is ``en``. + Known values are: "af", "sq", "anp", "ar", "ast", "awa", "az", "bfy", "eu", "be", "be-cyrl", + "be-latn", "bho", "bi", "brx", "bs", "bra", "br", "bg", "bns", "bua", "ca", "ceb", "rab", "ch", + "hne", "zh-Hans", "zh-Hant", "kw", "co", "crh", "hr", "cs", "da", "prs", "dhi", "doi", "nl", + "en", "myv", "et", "fo", "fj", "fil", "fi", "fr", "fur", "gag", "gl", "de", "gil", "gon", "el", + "kl", "gvr", "ht", "hlb", "hni", "bgc", "haw", "hi", "mww", "hoc", "hu", "is", "smn", "id", + "ia", "iu", "ga", "it", "ja", "Jns", "jv", "kea", "kac", "xnr", "krc", "kaa-cyrl", "kaa", + "csb", "kk-cyrl", "kk-latn", "klr", "kha", "quc", "ko", "kfq", "kpy", "kos", "kum", "ku-arab", + "ku-latn", "kru", "ky", "lkt", "la", "lt", "dsb", "smj", "lb", "bfz", "ms", "mt", "kmj", "gv", + "mi", "mr", "mn", "cnr-cyrl", "cnr-latn", "nap", "ne", "niu", "nog", "sme", "nb", "no", "oc", + "os", "ps", "fa", "pl", "pt", "pa", "ksh", "ro", "rm", "ru", "sck", "sm", "sa", "sat", "sco", + "gd", "sr", "sr-Cyrl", "sr-Latn", "xsr", "srx", "sms", "sk", "sl", "so", "sma", "es", "sw", + "sv", "tg", "tt", "tet", "thf", "to", "tr", "tk", "tyv", "hsb", "ur", "ug", "uz-arab", + "uz-cyrl", "uz", "vo", "wae", "cy", "fy", "yua", "za", "zu", and "unk". + :vartype default_language_code: str or ~azure.search.documents.models.OcrSkillLanguage + :ivar should_detect_orientation: A value indicating to turn orientation detection on or not. + Default is false. + :vartype should_detect_orientation: bool + :ivar line_ending: Defines the sequence of characters to use between the lines of text + recognized + by the OCR skill. The default value is "space". Known values are: "space", "carriageReturn", + "lineFeed", and "carriageReturnLineFeed". + :vartype line_ending: str or ~azure.search.documents.models.OcrLineEnding + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Vision.OcrSkill". + :vartype odata_type: str + """ + + default_language_code: Optional[Union[str, "_models.OcrSkillLanguage"]] = rest_field(name="defaultLanguageCode") + """A value indicating which language code to use. Default is ``en``. Known values are: \"af\", + \"sq\", \"anp\", \"ar\", \"ast\", \"awa\", \"az\", \"bfy\", \"eu\", \"be\", \"be-cyrl\", + \"be-latn\", \"bho\", \"bi\", \"brx\", \"bs\", \"bra\", \"br\", \"bg\", \"bns\", \"bua\", + \"ca\", \"ceb\", \"rab\", \"ch\", \"hne\", \"zh-Hans\", \"zh-Hant\", \"kw\", \"co\", \"crh\", + \"hr\", \"cs\", \"da\", \"prs\", \"dhi\", \"doi\", \"nl\", \"en\", \"myv\", \"et\", \"fo\", + \"fj\", \"fil\", \"fi\", \"fr\", \"fur\", \"gag\", \"gl\", \"de\", \"gil\", \"gon\", \"el\", + \"kl\", \"gvr\", \"ht\", \"hlb\", \"hni\", \"bgc\", \"haw\", \"hi\", \"mww\", \"hoc\", \"hu\", + \"is\", \"smn\", \"id\", \"ia\", \"iu\", \"ga\", \"it\", \"ja\", \"Jns\", \"jv\", \"kea\", + \"kac\", \"xnr\", \"krc\", \"kaa-cyrl\", \"kaa\", \"csb\", \"kk-cyrl\", \"kk-latn\", \"klr\", + \"kha\", \"quc\", \"ko\", \"kfq\", \"kpy\", \"kos\", \"kum\", \"ku-arab\", \"ku-latn\", + \"kru\", \"ky\", \"lkt\", \"la\", \"lt\", \"dsb\", \"smj\", \"lb\", \"bfz\", \"ms\", \"mt\", + \"kmj\", \"gv\", \"mi\", \"mr\", \"mn\", \"cnr-cyrl\", \"cnr-latn\", \"nap\", \"ne\", \"niu\", + \"nog\", \"sme\", \"nb\", \"no\", \"oc\", \"os\", \"ps\", \"fa\", \"pl\", \"pt\", \"pa\", + \"ksh\", \"ro\", \"rm\", \"ru\", \"sck\", \"sm\", \"sa\", \"sat\", \"sco\", \"gd\", \"sr\", + \"sr-Cyrl\", \"sr-Latn\", \"xsr\", \"srx\", \"sms\", \"sk\", \"sl\", \"so\", \"sma\", \"es\", + \"sw\", \"sv\", \"tg\", \"tt\", \"tet\", \"thf\", \"to\", \"tr\", \"tk\", \"tyv\", \"hsb\", + \"ur\", \"ug\", \"uz-arab\", \"uz-cyrl\", \"uz\", \"vo\", \"wae\", \"cy\", \"fy\", \"yua\", + \"za\", \"zu\", and \"unk\".""" + should_detect_orientation: Optional[bool] = rest_field(name="detectOrientation") + """A value indicating to turn orientation detection on or not. Default is false.""" + line_ending: Optional[Union[str, "_models.OcrLineEnding"]] = rest_field(name="lineEnding") + """Defines the sequence of characters to use between the lines of text recognized + by the OCR skill. The default value is \"space\". Known values are: \"space\", + \"carriageReturn\", \"lineFeed\", and \"carriageReturnLineFeed\".""" + odata_type: Literal["#Microsoft.Skills.Vision.OcrSkill"] = rest_discriminator(name="@odata.type") # type: ignore + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Vision.OcrSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + default_language_code: Optional[Union[str, "_models.OcrSkillLanguage"]] = None, + should_detect_orientation: Optional[bool] = None, + line_ending: Optional[Union[str, "_models.OcrLineEnding"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Vision.OcrSkill", **kwargs) + + +class OutputFieldMappingEntry(_model_base.Model): + """Output field mapping for a skill. + + + :ivar name: The name of the output defined by the skill. Required. + :vartype name: str + :ivar target_name: The target name of the output. It is optional and default to name. + :vartype target_name: str + """ + + name: str = rest_field() + """The name of the output defined by the skill. Required.""" + target_name: Optional[str] = rest_field(name="targetName") + """The target name of the output. It is optional and default to name.""" + + @overload + def __init__( + self, + *, + name: str, + target_name: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class PathHierarchyTokenizerV2(LexicalTokenizer, discriminator="#Microsoft.Azure.Search.PathHierarchyTokenizerV2"): + """Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache + Lucene. + + + :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required. + :vartype name: str + :ivar delimiter: The delimiter character to use. Default is "/". + :vartype delimiter: str + :ivar replacement: A value that, if set, replaces the delimiter character. Default is "/". + :vartype replacement: str + :ivar max_token_length: The maximum token length. Default and maximum is 300. + :vartype max_token_length: int + :ivar reverse_token_order: A value indicating whether to generate tokens in reverse order. + Default is + false. + :vartype reverse_token_order: bool + :ivar number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0. + :vartype number_of_tokens_to_skip: int + :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. Default value is + "#Microsoft.Azure.Search.PathHierarchyTokenizerV2". + :vartype odata_type: str + """ + + delimiter: Optional[str] = rest_field() + """The delimiter character to use. Default is \"/\".""" + replacement: Optional[str] = rest_field() + """A value that, if set, replaces the delimiter character. Default is \"/\".""" + max_token_length: Optional[int] = rest_field(name="maxTokenLength") + """The maximum token length. Default and maximum is 300.""" + reverse_token_order: Optional[bool] = rest_field(name="reverse") + """A value indicating whether to generate tokens in reverse order. Default is + false.""" + number_of_tokens_to_skip: Optional[int] = rest_field(name="skip") + """The number of initial tokens to skip. Default is 0.""" + odata_type: Literal["#Microsoft.Azure.Search.PathHierarchyTokenizerV2"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of tokenizer. Required. Default value is + \"#Microsoft.Azure.Search.PathHierarchyTokenizerV2\".""" + + @overload + def __init__( + self, + *, + name: str, + delimiter: Optional[str] = None, + replacement: Optional[str] = None, + max_token_length: Optional[int] = None, + reverse_token_order: Optional[bool] = None, + number_of_tokens_to_skip: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.PathHierarchyTokenizerV2", **kwargs) + + +class PatternAnalyzer(LexicalAnalyzer, discriminator="#Microsoft.Azure.Search.PatternAnalyzer"): + """Flexibly separates text into terms via a regular expression pattern. This + analyzer is implemented using Apache Lucene. + + + :ivar name: The name of the analyzer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required. + :vartype name: str + :ivar lower_case_terms: A value indicating whether terms should be lower-cased. Default is + true. + :vartype lower_case_terms: bool + :ivar pattern: A regular expression pattern to match token separators. Default is an + expression that matches one or more non-word characters. + :vartype pattern: str + :ivar flags: Regular expression flags. Known values are: "CANON_EQ", "CASE_INSENSITIVE", + "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", and "UNIX_LINES". + :vartype flags: str or ~azure.search.documents.models.RegexFlags + :ivar stopwords: A list of stopwords. + :vartype stopwords: list[str] + :ivar odata_type: A URI fragment specifying the type of analyzer. Required. Default value is + "#Microsoft.Azure.Search.PatternAnalyzer". + :vartype odata_type: str + """ + + lower_case_terms: Optional[bool] = rest_field(name="lowercase") + """A value indicating whether terms should be lower-cased. Default is true.""" + pattern: Optional[str] = rest_field() + """A regular expression pattern to match token separators. Default is an + expression that matches one or more non-word characters.""" + flags: Optional[Union[str, "_models.RegexFlags"]] = rest_field() + """Regular expression flags. Known values are: \"CANON_EQ\", \"CASE_INSENSITIVE\", \"COMMENTS\", + \"DOTALL\", \"LITERAL\", \"MULTILINE\", \"UNICODE_CASE\", and \"UNIX_LINES\".""" + stopwords: Optional[List[str]] = rest_field() + """A list of stopwords.""" + odata_type: Literal["#Microsoft.Azure.Search.PatternAnalyzer"] = rest_discriminator(name="odataType") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of analyzer. Required. Default value is + \"#Microsoft.Azure.Search.PatternAnalyzer\".""" + + @overload + def __init__( + self, + *, + name: str, + lower_case_terms: Optional[bool] = None, + pattern: Optional[str] = None, + flags: Optional[Union[str, "_models.RegexFlags"]] = None, + stopwords: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.PatternAnalyzer", **kwargs) + + +class PatternCaptureTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.PatternCaptureTokenFilter"): + """Uses Java regexes to emit multiple tokens - one for each capture group in one + or more patterns. This token filter is implemented using Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar patterns: A list of patterns to match against each token. Required. + :vartype patterns: list[str] + :ivar preserve_original: A value indicating whether to return the original token even if one of + the + patterns matches. Default is true. + :vartype preserve_original: bool + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.PatternCaptureTokenFilter". + :vartype odata_type: str + """ + + patterns: List[str] = rest_field() + """A list of patterns to match against each token. Required.""" + preserve_original: Optional[bool] = rest_field(name="preserveOriginal") + """A value indicating whether to return the original token even if one of the + patterns matches. Default is true.""" + odata_type: Literal["#Microsoft.Azure.Search.PatternCaptureTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.PatternCaptureTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + patterns: List[str], + preserve_original: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.PatternCaptureTokenFilter", **kwargs) + + +class PatternReplaceCharFilter(CharFilter, discriminator="#Microsoft.Azure.Search.PatternReplaceCharFilter"): + """A character filter that replaces characters in the input string. It uses a + regular expression to identify character sequences to preserve and a + replacement pattern to identify characters to replace. For example, given the + input text "aa bb aa bb", pattern "(aa)\\s+(bb)", and replacement "$1#$2", the + result would be "aa#bb aa#bb". This character filter is implemented using + Apache Lucene. + + + :ivar name: The name of the char filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar pattern: A regular expression pattern. Required. + :vartype pattern: str + :ivar replacement: The replacement text. Required. + :vartype replacement: str + :ivar odata_type: A URI fragment specifying the type of char filter. Required. Default value is + "#Microsoft.Azure.Search.PatternReplaceCharFilter". + :vartype odata_type: str + """ + + pattern: str = rest_field() + """A regular expression pattern. Required.""" + replacement: str = rest_field() + """The replacement text. Required.""" + odata_type: Literal["#Microsoft.Azure.Search.PatternReplaceCharFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of char filter. Required. Default value is + \"#Microsoft.Azure.Search.PatternReplaceCharFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + pattern: str, + replacement: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.PatternReplaceCharFilter", **kwargs) + + +class PatternReplaceTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.PatternReplaceTokenFilter"): + """A character filter that replaces characters in the input string. It uses a + regular expression to identify character sequences to preserve and a + replacement pattern to identify characters to replace. For example, given the + input text "aa bb aa bb", pattern "(aa)\\s+(bb)", and replacement "$1#$2", the + result would be "aa#bb aa#bb". This token filter is implemented using Apache + Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar pattern: A regular expression pattern. Required. + :vartype pattern: str + :ivar replacement: The replacement text. Required. + :vartype replacement: str + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.PatternReplaceTokenFilter". + :vartype odata_type: str + """ + + pattern: str = rest_field() + """A regular expression pattern. Required.""" + replacement: str = rest_field() + """The replacement text. Required.""" + odata_type: Literal["#Microsoft.Azure.Search.PatternReplaceTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.PatternReplaceTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + pattern: str, + replacement: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.PatternReplaceTokenFilter", **kwargs) + + +class PatternTokenizer(LexicalTokenizer, discriminator="#Microsoft.Azure.Search.PatternTokenizer"): + """Tokenizer that uses regex pattern matching to construct distinct tokens. This + tokenizer is implemented using Apache Lucene. + + + :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required. + :vartype name: str + :ivar pattern: A regular expression pattern to match token separators. Default is an + expression that matches one or more non-word characters. + :vartype pattern: str + :ivar flags: Regular expression flags. Known values are: "CANON_EQ", "CASE_INSENSITIVE", + "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", and "UNIX_LINES". + :vartype flags: str or ~azure.search.documents.models.RegexFlags + :ivar group: The zero-based ordinal of the matching group in the regular expression pattern + to extract into tokens. Use -1 if you want to use the entire pattern to split + the input into tokens, irrespective of matching groups. Default is -1. + :vartype group: int + :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. Default value is + "#Microsoft.Azure.Search.PatternTokenizer". + :vartype odata_type: str + """ + + pattern: Optional[str] = rest_field() + """A regular expression pattern to match token separators. Default is an + expression that matches one or more non-word characters.""" + flags: Optional[Union[str, "_models.RegexFlags"]] = rest_field() + """Regular expression flags. Known values are: \"CANON_EQ\", \"CASE_INSENSITIVE\", \"COMMENTS\", + \"DOTALL\", \"LITERAL\", \"MULTILINE\", \"UNICODE_CASE\", and \"UNIX_LINES\".""" + group: Optional[int] = rest_field() + """The zero-based ordinal of the matching group in the regular expression pattern + to extract into tokens. Use -1 if you want to use the entire pattern to split + the input into tokens, irrespective of matching groups. Default is -1.""" + odata_type: Literal["#Microsoft.Azure.Search.PatternTokenizer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of tokenizer. Required. Default value is + \"#Microsoft.Azure.Search.PatternTokenizer\".""" + + @overload + def __init__( + self, + *, + name: str, + pattern: Optional[str] = None, + flags: Optional[Union[str, "_models.RegexFlags"]] = None, + group: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.PatternTokenizer", **kwargs) + + +class PhoneticTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.PhoneticTokenFilter"): + """Create tokens for phonetic matches. This token filter is implemented using + Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar encoder: The phonetic encoder to use. Default is "metaphone". Known values are: + "metaphone", "doubleMetaphone", "soundex", "refinedSoundex", "caverphone1", "caverphone2", + "cologne", "nysiis", "koelnerPhonetik", "haasePhonetik", and "beiderMorse". + :vartype encoder: str or ~azure.search.documents.models.PhoneticEncoder + :ivar replace_original_tokens: A value indicating whether encoded tokens should replace + original tokens. If + false, encoded tokens are added as synonyms. Default is true. + :vartype replace_original_tokens: bool + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.PhoneticTokenFilter". + :vartype odata_type: str + """ + + encoder: Optional[Union[str, "_models.PhoneticEncoder"]] = rest_field() + """The phonetic encoder to use. Default is \"metaphone\". Known values are: \"metaphone\", + \"doubleMetaphone\", \"soundex\", \"refinedSoundex\", \"caverphone1\", \"caverphone2\", + \"cologne\", \"nysiis\", \"koelnerPhonetik\", \"haasePhonetik\", and \"beiderMorse\".""" + replace_original_tokens: Optional[bool] = rest_field(name="replace") + """A value indicating whether encoded tokens should replace original tokens. If + false, encoded tokens are added as synonyms. Default is true.""" + odata_type: Literal["#Microsoft.Azure.Search.PhoneticTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.PhoneticTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + encoder: Optional[Union[str, "_models.PhoneticEncoder"]] = None, + replace_original_tokens: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.PhoneticTokenFilter", **kwargs) + + +class PIIDetectionSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.PIIDetectionSkill"): + """Using the Text Analytics API, extracts personal information from an input text + and gives you the option of masking it. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is ``en``. + :vartype default_language_code: str + :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose + confidence + score is greater than the value specified. If not set (default), or if + explicitly set to null, all entities will be included. + :vartype minimum_precision: float + :ivar masking_mode: A parameter that provides various ways to mask the personal information + detected in the input text. Default is 'none'. Known values are: "none" and "replace". + :vartype masking_mode: str or ~azure.search.documents.models.PIIDetectionSkillMaskingMode + :ivar mask: The character used to mask the text if the maskingMode parameter is set to + replace. Default is '*'. + :vartype mask: str + :ivar model_version: The version of the model to use when calling the Text Analytics service. + It + will default to the latest available when not specified. We recommend you do + not specify this value unless absolutely necessary. + :vartype model_version: str + :ivar pii_categories: A list of PII entity categories that should be extracted and masked. + :vartype pii_categories: list[str] + :ivar domain: If specified, will set the PII domain to include only a subset of the entity + categories. Possible values include: 'phi', 'none'. Default is 'none'. + :vartype domain: str + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Text.PIIDetectionSkill". + :vartype odata_type: str + """ + + default_language_code: Optional[str] = rest_field(name="defaultLanguageCode") + """A value indicating which language code to use. Default is ``en``.""" + minimum_precision: Optional[float] = rest_field(name="minimumPrecision") + """A value between 0 and 1 that be used to only include entities whose confidence + score is greater than the value specified. If not set (default), or if + explicitly set to null, all entities will be included.""" + masking_mode: Optional[Union[str, "_models.PIIDetectionSkillMaskingMode"]] = rest_field(name="maskingMode") + """A parameter that provides various ways to mask the personal information + detected in the input text. Default is 'none'. Known values are: \"none\" and \"replace\".""" + mask: Optional[str] = rest_field(name="maskingCharacter") + """The character used to mask the text if the maskingMode parameter is set to + replace. Default is '*'.""" + model_version: Optional[str] = rest_field(name="modelVersion") + """The version of the model to use when calling the Text Analytics service. It + will default to the latest available when not specified. We recommend you do + not specify this value unless absolutely necessary.""" + pii_categories: Optional[List[str]] = rest_field(name="piiCategories") + """A list of PII entity categories that should be extracted and masked.""" + domain: Optional[str] = rest_field() + """If specified, will set the PII domain to include only a subset of the entity + categories. Possible values include: 'phi', 'none'. Default is 'none'.""" + odata_type: Literal["#Microsoft.Skills.Text.PIIDetectionSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Text.PIIDetectionSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + default_language_code: Optional[str] = None, + minimum_precision: Optional[float] = None, + masking_mode: Optional[Union[str, "_models.PIIDetectionSkillMaskingMode"]] = None, + mask: Optional[str] = None, + model_version: Optional[str] = None, + pii_categories: Optional[List[str]] = None, + domain: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Text.PIIDetectionSkill", **kwargs) + + +class QueryAnswerResult(_model_base.Model): + """An answer is a text passage extracted from the contents of the most relevant + documents that matched the query. Answers are extracted from the top search + results. Answer candidates are scored and the top answers are selected. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar score: The score value represents how relevant the answer is to the query relative to + other answers returned for the query. + :vartype score: float + :ivar key: The key of the document the answer was extracted from. + :vartype key: str + :ivar text: The text passage extracted from the document contents as the answer. + :vartype text: str + :ivar highlights: Same text passage as in the Text property with highlighted text phrases most + relevant to the query. + :vartype highlights: str + """ + + score: Optional[float] = rest_field(visibility=["read"]) + """The score value represents how relevant the answer is to the query relative to + other answers returned for the query.""" + key: Optional[str] = rest_field(visibility=["read"]) + """The key of the document the answer was extracted from.""" + text: Optional[str] = rest_field(visibility=["read"]) + """The text passage extracted from the document contents as the answer.""" + highlights: Optional[str] = rest_field(visibility=["read"]) + """Same text passage as in the Text property with highlighted text phrases most + relevant to the query.""" + + +class QueryCaptionResult(_model_base.Model): + """Captions are the most representative passages from the document relatively to + the search query. They are often used as document summary. Captions are only + returned for queries of type ``semantic``. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar text: A representative text passage extracted from the document most relevant to the + search query. + :vartype text: str + :ivar highlights: Same text passage as in the Text property with highlighted phrases most + relevant to the query. + :vartype highlights: str + """ + + text: Optional[str] = rest_field(visibility=["read"]) + """A representative text passage extracted from the document most relevant to the + search query.""" + highlights: Optional[str] = rest_field(visibility=["read"]) + """Same text passage as in the Text property with highlighted phrases most + relevant to the query.""" + + +class QueryResultDocumentRerankerInput(_model_base.Model): + """The raw concatenated strings that were sent to the semantic enrichment process. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar title: The raw string for the title field that was used for semantic enrichment. + :vartype title: str + :ivar content: The raw concatenated strings for the content fields that were used for semantic + enrichment. + :vartype content: str + :ivar keywords: The raw concatenated strings for the keyword fields that were used for semantic + enrichment. + :vartype keywords: str + """ + + title: Optional[str] = rest_field(visibility=["read"]) + """The raw string for the title field that was used for semantic enrichment.""" + content: Optional[str] = rest_field(visibility=["read"]) + """The raw concatenated strings for the content fields that were used for semantic + enrichment.""" + keywords: Optional[str] = rest_field(visibility=["read"]) + """The raw concatenated strings for the keyword fields that were used for semantic + enrichment.""" + + +class QueryResultDocumentSemanticField(_model_base.Model): + """Description of fields that were sent to the semantic enrichment process, as + well as how they were used. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar name: The name of the field that was sent to the semantic enrichment process. + :vartype name: str + :ivar state: The way the field was used for the semantic enrichment process (fully used, + partially used, or unused). Known values are: "used", "unused", and "partial". + :vartype state: str or ~azure.search.documents.models.SemanticFieldState + """ + + name: Optional[str] = rest_field(visibility=["read"]) + """The name of the field that was sent to the semantic enrichment process.""" + state: Optional[Union[str, "_models.SemanticFieldState"]] = rest_field(visibility=["read"]) + """The way the field was used for the semantic enrichment process (fully used, + partially used, or unused). Known values are: \"used\", \"unused\", and \"partial\".""" + + +class QueryResultDocumentSubscores(_model_base.Model): + """The breakdown of subscores between the text and vector query components of the + search query for this document. Each vector query is shown as a separate object + in the same order they were received. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar text: The BM25 or Classic score for the text portion of the query. + :vartype text: ~azure.search.documents.models.TextResult + :ivar vectors: The vector similarity and @search.score values for each vector query. + :vartype vectors: list[dict[str, ~azure.search.documents.models.SingleVectorFieldResult]] + :ivar document_boost: The BM25 or Classic score for the text portion of the query. + :vartype document_boost: float + """ + + text: Optional["_models.TextResult"] = rest_field(visibility=["read"]) + """The BM25 or Classic score for the text portion of the query.""" + vectors: Optional[List[Dict[str, "_models.SingleVectorFieldResult"]]] = rest_field(visibility=["read"]) + """The vector similarity and @search.score values for each vector query.""" + document_boost: Optional[float] = rest_field(name="documentBoost", visibility=["read"]) + """The BM25 or Classic score for the text portion of the query.""" + + +class QueryRewritesDebugInfo(_model_base.Model): + """Contains debugging information specific to query rewrites. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar text: List of query rewrites generated for the text query. + :vartype text: ~azure.search.documents.models.QueryRewritesValuesDebugInfo + :ivar vectors: List of query rewrites generated for the vectorizable text queries. + :vartype vectors: list[~azure.search.documents.models.QueryRewritesValuesDebugInfo] + """ + + text: Optional["_models.QueryRewritesValuesDebugInfo"] = rest_field(visibility=["read"]) + """List of query rewrites generated for the text query.""" + vectors: Optional[List["_models.QueryRewritesValuesDebugInfo"]] = rest_field(visibility=["read"]) + """List of query rewrites generated for the vectorizable text queries.""" + + +class QueryRewritesValuesDebugInfo(_model_base.Model): + """Contains debugging information specific to query rewrites. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar input_query: The input text to the generative query rewriting model. There may be cases + where the user query and the input to the generative model are not identical. + :vartype input_query: str + :ivar rewrites: List of query rewrites. + :vartype rewrites: list[str] + """ + + input_query: Optional[str] = rest_field(name="inputQuery", visibility=["read"]) + """The input text to the generative query rewriting model. There may be cases + where the user query and the input to the generative model are not identical.""" + rewrites: Optional[List[str]] = rest_field(visibility=["read"]) + """List of query rewrites.""" + + +class RescoringOptions(_model_base.Model): + """Contains the options for rescoring. + + :ivar enable_rescoring: If set to true, after the initial search on the compressed vectors, the + similarity scores are recalculated using the full-precision vectors. This will + improve recall at the expense of latency. + :vartype enable_rescoring: bool + :ivar default_oversampling: Default oversampling factor. Oversampling retrieves a greater set + of potential + documents to offset the resolution loss due to quantization. This increases the + set of results that will be rescored on full-precision vectors. Minimum value + is 1, meaning no oversampling (1x). This parameter can only be set when + 'enableRescoring' is true. Higher values improve recall at the expense of + latency. + :vartype default_oversampling: float + :ivar rescore_storage_method: Controls the storage method for original vectors. This setting is + immutable. Known values are: "preserveOriginals" and "discardOriginals". + :vartype rescore_storage_method: str or + ~azure.search.documents.models.VectorSearchCompressionRescoreStorageMethod + """ + + enable_rescoring: Optional[bool] = rest_field(name="enableRescoring") + """If set to true, after the initial search on the compressed vectors, the + similarity scores are recalculated using the full-precision vectors. This will + improve recall at the expense of latency.""" + default_oversampling: Optional[float] = rest_field(name="defaultOversampling") + """Default oversampling factor. Oversampling retrieves a greater set of potential + documents to offset the resolution loss due to quantization. This increases the + set of results that will be rescored on full-precision vectors. Minimum value + is 1, meaning no oversampling (1x). This parameter can only be set when + 'enableRescoring' is true. Higher values improve recall at the expense of + latency.""" + rescore_storage_method: Optional[Union[str, "_models.VectorSearchCompressionRescoreStorageMethod"]] = rest_field( + name="rescoreStorageMethod" + ) + """Controls the storage method for original vectors. This setting is immutable. Known values are: + \"preserveOriginals\" and \"discardOriginals\".""" + + @overload + def __init__( + self, + *, + enable_rescoring: Optional[bool] = None, + default_oversampling: Optional[float] = None, + rescore_storage_method: Optional[Union[str, "_models.VectorSearchCompressionRescoreStorageMethod"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ResourceCounter(_model_base.Model): + """Represents a resource's usage and quota. + + + :ivar usage: The resource usage amount. Required. + :vartype usage: int + :ivar quota: The resource amount quota. + :vartype quota: int + """ + + usage: int = rest_field() + """The resource usage amount. Required.""" + quota: Optional[int] = rest_field() + """The resource amount quota.""" + + @overload + def __init__( + self, + *, + usage: int, + quota: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ScalarQuantizationCompression(VectorSearchCompression, discriminator="scalarQuantization"): + """Contains configuration options specific to the scalar quantization compression + method used during indexing and querying. + + + :ivar compression_name: The name to associate with this particular configuration. Required. + :vartype compression_name: str + :ivar rerank_with_original_vectors: If set to true, once the ordered set of results calculated + using compressed + vectors are obtained, they will be reranked again by recalculating the + full-precision similarity scores. This will improve recall at the expense of + latency. + :vartype rerank_with_original_vectors: bool + :ivar default_oversampling: Default oversampling factor. Oversampling will internally request + more + documents (specified by this multiplier) in the initial search. This increases + the set of results that will be reranked using recomputed similarity scores + from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). + This parameter can only be set when rerankWithOriginalVectors is true. Higher + values improve recall at the expense of latency. + :vartype default_oversampling: float + :ivar rescoring_options: Contains the options for rescoring. + :vartype rescoring_options: ~azure.search.documents.models.RescoringOptions + :ivar truncation_dimension: The number of dimensions to truncate the vectors to. Truncating the + vectors + reduces the size of the vectors and the amount of data that needs to be + transferred during search. This can save storage cost and improve search + performance at the expense of recall. It should be only used for embeddings + trained with Matryoshka Representation Learning (MRL) such as OpenAI + text-embedding-3-large (small). The default value is null, which means no + truncation. + :vartype truncation_dimension: int + :ivar parameters: Contains the parameters specific to Scalar Quantization. + :vartype parameters: ~azure.search.documents.models.ScalarQuantizationParameters + :ivar kind: The name of the kind of compression method being configured for use with vector + search. Required. Scalar Quantization, a type of compression method. In scalar quantization, + the + original vectors values are compressed to a narrower type by discretizing and + representing each component of a vector using a reduced set of quantized + values, thereby reducing the overall data size. + :vartype kind: str or ~azure.search.documents.models.SCALAR_QUANTIZATION + """ + + parameters: Optional["_models.ScalarQuantizationParameters"] = rest_field(name="scalarQuantizationParameters") + """Contains the parameters specific to Scalar Quantization.""" + kind: Literal[VectorSearchCompressionKind.SCALAR_QUANTIZATION] = rest_discriminator(name="kind") # type: ignore + """The name of the kind of compression method being configured for use with vector + search. Required. Scalar Quantization, a type of compression method. In scalar quantization, + the + original vectors values are compressed to a narrower type by discretizing and + representing each component of a vector using a reduced set of quantized + values, thereby reducing the overall data size.""" + + @overload + def __init__( + self, + *, + compression_name: str, + rerank_with_original_vectors: Optional[bool] = None, + default_oversampling: Optional[float] = None, + rescoring_options: Optional["_models.RescoringOptions"] = None, + truncation_dimension: Optional[int] = None, + parameters: Optional["_models.ScalarQuantizationParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=VectorSearchCompressionKind.SCALAR_QUANTIZATION, **kwargs) + + +class ScalarQuantizationParameters(_model_base.Model): + """Contains the parameters specific to Scalar Quantization. + + :ivar quantized_data_type: The quantized data type of compressed vector values. "int8" + :vartype quantized_data_type: str or + ~azure.search.documents.models.VectorSearchCompressionTarget + """ + + quantized_data_type: Optional[Union[str, "_models.VectorSearchCompressionTarget"]] = rest_field( + name="quantizedDataType" + ) + """The quantized data type of compressed vector values. \"int8\"""" + + @overload + def __init__( + self, + *, + quantized_data_type: Optional[Union[str, "_models.VectorSearchCompressionTarget"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ScoringProfile(_model_base.Model): + """Defines parameters for a search index that influence scoring in search queries. + + + :ivar name: The name of the scoring profile. Required. + :vartype name: str + :ivar text_weights: Parameters that boost scoring based on text matches in certain index + fields. + :vartype text_weights: ~azure.search.documents.models.TextWeights + :ivar functions: The collection of functions that influence the scoring of documents. + :vartype functions: list[~azure.search.documents.models.ScoringFunction] + :ivar function_aggregation: A value indicating how the results of individual scoring functions + should be + combined. Defaults to "Sum". Ignored if there are no scoring functions. Known values are: + "sum", "average", "minimum", "maximum", and "firstMatching". + :vartype function_aggregation: str or ~azure.search.documents.models.ScoringFunctionAggregation + """ + + name: str = rest_field() + """The name of the scoring profile. Required.""" + text_weights: Optional["_models.TextWeights"] = rest_field(name="text") + """Parameters that boost scoring based on text matches in certain index fields.""" + functions: Optional[List["_models.ScoringFunction"]] = rest_field() + """The collection of functions that influence the scoring of documents.""" + function_aggregation: Optional[Union[str, "_models.ScoringFunctionAggregation"]] = rest_field( + name="functionAggregation" + ) + """A value indicating how the results of individual scoring functions should be + combined. Defaults to \"Sum\". Ignored if there are no scoring functions. Known values are: + \"sum\", \"average\", \"minimum\", \"maximum\", and \"firstMatching\".""" + + @overload + def __init__( + self, + *, + name: str, + text_weights: Optional["_models.TextWeights"] = None, + functions: Optional[List["_models.ScoringFunction"]] = None, + function_aggregation: Optional[Union[str, "_models.ScoringFunctionAggregation"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchAlias(_model_base.Model): + """Represents an index alias, which describes a mapping from the alias name to an + index. The alias name can be used in place of the index name for supported + operations. + + + :ivar name: The name of the alias. Required. + :vartype name: str + :ivar indexes: The name of the index this alias maps to. Only one index name may be specified. + Required. + :vartype indexes: list[str] + :ivar e_tag: The ETag of the alias. + :vartype e_tag: str + """ + + name: str = rest_field() + """The name of the alias. Required.""" + indexes: List[str] = rest_field() + """The name of the index this alias maps to. Only one index name may be specified. Required.""" + e_tag: Optional[str] = rest_field(name="@odata.etag") + """The ETag of the alias.""" + + @overload + def __init__( + self, + *, + name: str, + indexes: List[str], + e_tag: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchDocumentsResult(_model_base.Model): + """Response containing search results from an index. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar count: The total count of results found by the search operation, or null if the count + was not requested. If present, the count may be greater than the number of + results in this response. This can happen if you use the $top or $skip + parameters, or if the query can't return all the requested documents in a + single response. + :vartype count: int + :ivar coverage: A value indicating the percentage of the index that was included in the query, + or null if minimumCoverage was not specified in the request. + :vartype coverage: float + :ivar facets: The facet query results for the search operation, organized as a collection of + buckets for each faceted field; null if the query did not include any facet + expressions. + :vartype facets: dict[str, list[~azure.search.documents.models.FacetResult]] + :ivar answers: The answers query results for the search operation; null if the answers query + parameter was not specified or set to 'none'. + :vartype answers: list[~azure.search.documents.models.QueryAnswerResult] + :ivar debug_info: Debug information that applies to the search results as a whole. + :vartype debug_info: ~azure.search.documents.models.DebugInfo + :ivar next_page_parameters: Continuation JSON payload returned when the query can't return all + the + requested results in a single response. You can use this JSON along with + @odata.nextLink to formulate another POST Search request to get the next part + of the search response. + :vartype next_page_parameters: ~azure.search.documents.models.SearchRequest + :ivar results: The sequence of results returned by the query. Required. + :vartype results: list[~azure.search.documents.models.SearchResult] + :ivar next_link: Continuation URL returned when the query can't return all the requested + results + in a single response. You can use this URL to formulate another GET or POST + Search request to get the next part of the search response. Make sure to use + the same verb (GET or POST) as the request that produced this response. + :vartype next_link: str + :ivar semantic_partial_response_reason: Reason that a partial response was returned for a + semantic ranking request. Known values are: "maxWaitExceeded", "capacityOverloaded", and + "transient". + :vartype semantic_partial_response_reason: str or + ~azure.search.documents.models.SemanticErrorReason + :ivar semantic_partial_response_type: Type of partial response that was returned for a semantic + ranking request. Known values are: "baseResults" and "rerankedResults". + :vartype semantic_partial_response_type: str or + ~azure.search.documents.models.SemanticSearchResultsType + :ivar semantic_query_rewrites_result_type: Type of query rewrite that was used to retrieve + documents. "originalQueryOnly" + :vartype semantic_query_rewrites_result_type: str or + ~azure.search.documents.models.SemanticQueryRewritesResultType + """ + + count: Optional[int] = rest_field(name="@odata.count", visibility=["read"]) + """The total count of results found by the search operation, or null if the count + was not requested. If present, the count may be greater than the number of + results in this response. This can happen if you use the $top or $skip + parameters, or if the query can't return all the requested documents in a + single response.""" + coverage: Optional[float] = rest_field(name="@search.coverage", visibility=["read"]) + """A value indicating the percentage of the index that was included in the query, + or null if minimumCoverage was not specified in the request.""" + facets: Optional[Dict[str, List["_models.FacetResult"]]] = rest_field(name="@search.facets", visibility=["read"]) + """The facet query results for the search operation, organized as a collection of + buckets for each faceted field; null if the query did not include any facet + expressions.""" + answers: Optional[List["_models.QueryAnswerResult"]] = rest_field(name="@search.answers", visibility=["read"]) + """The answers query results for the search operation; null if the answers query + parameter was not specified or set to 'none'.""" + debug_info: Optional["_models.DebugInfo"] = rest_field(name="@search.debugInfo", visibility=["read"]) + """Debug information that applies to the search results as a whole.""" + next_page_parameters: Optional["_models.SearchRequest"] = rest_field( + name="@search.nextPageParameters", visibility=["read"] + ) + """Continuation JSON payload returned when the query can't return all the + requested results in a single response. You can use this JSON along with + @odata.nextLink to formulate another POST Search request to get the next part + of the search response.""" + results: List["_models.SearchResult"] = rest_field(name="value", visibility=["read"]) + """The sequence of results returned by the query. Required.""" + next_link: Optional[str] = rest_field(name="@odata.nextLink", visibility=["read"]) + """Continuation URL returned when the query can't return all the requested results + in a single response. You can use this URL to formulate another GET or POST + Search request to get the next part of the search response. Make sure to use + the same verb (GET or POST) as the request that produced this response.""" + semantic_partial_response_reason: Optional[Union[str, "_models.SemanticErrorReason"]] = rest_field( + name="@search.semanticPartialResponseReason", visibility=["read"] + ) + """Reason that a partial response was returned for a semantic ranking request. Known values are: + \"maxWaitExceeded\", \"capacityOverloaded\", and \"transient\".""" + semantic_partial_response_type: Optional[Union[str, "_models.SemanticSearchResultsType"]] = rest_field( + name="@search.semanticPartialResponseType", visibility=["read"] + ) + """Type of partial response that was returned for a semantic ranking request. Known values are: + \"baseResults\" and \"rerankedResults\".""" + semantic_query_rewrites_result_type: Optional[Union[str, "_models.SemanticQueryRewritesResultType"]] = rest_field( + name="@search.semanticQueryRewritesResultType", visibility=["read"] + ) + """Type of query rewrite that was used to retrieve documents. \"originalQueryOnly\"""" + + +class SearchField(_model_base.Model): + """Represents a field in an index definition, which describes the name, data type, + and search behavior of a field. + + + :ivar name: The name of the field, which must be unique within the fields collection of the + index or parent field. Required. + :vartype name: str + :ivar type: The data type of the field. Required. Known values are: "Edm.String", "Edm.Int32", + "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset", "Edm.GeographyPoint", + "Edm.ComplexType", "Edm.Single", "Edm.Half", "Edm.Int16", "Edm.SByte", and "Edm.Byte". + :vartype type: str or ~azure.search.documents.models.SearchFieldDataType + :ivar key: A value indicating whether the field uniquely identifies documents in the + index. Exactly one top-level field in each index must be chosen as the key + field and it must be of type Edm.String. Key fields can be used to look up + documents directly and update or delete specific documents. Default is false + for simple fields and null for complex fields. + :vartype key: bool + :ivar retrievable: A value indicating whether the field can be returned in a search result. You + can disable this option if you want to use a field (for example, margin) as a + filter, sorting, or scoring mechanism but do not want the field to be visible + to the end user. This property must be true for key fields, and it must be null + for complex fields. This property can be changed on existing fields. Enabling + this property does not cause any increase in index storage requirements. + Default is true for simple fields, false for vector fields, and null for + complex fields. + :vartype retrievable: bool + :ivar stored: An immutable value indicating whether the field will be persisted separately on + disk to be returned in a search result. You can disable this option if you + don't plan to return the field contents in a search response to save on storage + overhead. This can only be set during index creation and only for vector + fields. This property cannot be changed for existing fields or set as false for + new fields. If this property is set as false, the property 'retrievable' must + also be set to false. This property must be true or unset for key fields, for + new fields, and for non-vector fields, and it must be null for complex fields. + Disabling this property will reduce index storage requirements. The default is + true for vector fields. + :vartype stored: bool + :ivar searchable: A value indicating whether the field is full-text searchable. This means it + will undergo analysis such as word-breaking during indexing. If you set a + searchable field to a value like "sunny day", internally it will be split into + the individual tokens "sunny" and "day". This enables full-text searches for + these terms. Fields of type Edm.String or Collection(Edm.String) are searchable + by default. This property must be false for simple fields of other non-string + data types, and it must be null for complex fields. Note: searchable fields + consume extra space in your index to accommodate additional tokenized versions + of the field value for full-text searches. If you want to save space in your + index and you don't need a field to be included in searches, set searchable to + false. + :vartype searchable: bool + :ivar filterable: A value indicating whether to enable the field to be referenced in $filter + queries. filterable differs from searchable in how strings are handled. Fields + of type Edm.String or Collection(Edm.String) that are filterable do not undergo + word-breaking, so comparisons are for exact matches only. For example, if you + set such a field f to "sunny day", $filter=f eq 'sunny' will find no matches, + but $filter=f eq 'sunny day' will. This property must be null for complex + fields. Default is true for simple fields and null for complex fields. + :vartype filterable: bool + :ivar sortable: A value indicating whether to enable the field to be referenced in $orderby + expressions. By default, the search engine sorts results by score, but in many + experiences users will want to sort by fields in the documents. A simple field + can be sortable only if it is single-valued (it has a single value in the scope + of the parent document). Simple collection fields cannot be sortable, since + they are multi-valued. Simple sub-fields of complex collections are also + multi-valued, and therefore cannot be sortable. This is true whether it's an + immediate parent field, or an ancestor field, that's the complex collection. + Complex fields cannot be sortable and the sortable property must be null for + such fields. The default for sortable is true for single-valued simple fields, + false for multi-valued simple fields, and null for complex fields. + :vartype sortable: bool + :ivar facetable: A value indicating whether to enable the field to be referenced in facet + queries. Typically used in a presentation of search results that includes hit + count by category (for example, search for digital cameras and see hits by + brand, by megapixels, by price, and so on). This property must be null for + complex fields. Fields of type Edm.GeographyPoint or + Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all + other simple fields. + :vartype facetable: bool + :ivar analyzer: The name of the analyzer to use for the field. This option can be used only + with searchable fields and it can't be set together with either searchAnalyzer + or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the + field. Must be null for complex fields. Known values are: "ar.microsoft", "ar.lucene", + "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", + "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", + "hr.microsoft", "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", + "nl.lucene", "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", + "fr.microsoft", "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", + "el.lucene", "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", + "hu.lucene", "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", + "it.lucene", "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", + "lv.microsoft", "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", + "nb.microsoft", "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", + "pt-BR.lucene", "pt-PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", + "ru.microsoft", "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", + "sl.microsoft", "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", + "te.microsoft", "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", + "ur.microsoft", "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", + "pattern", "simple", "stop", and "whitespace". + :vartype analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName + :ivar search_analyzer: The name of the analyzer used at search time for the field. This option + can be + used only with searchable fields. It must be set together with indexAnalyzer + and it cannot be set together with the analyzer option. This property cannot be + set to the name of a language analyzer; use the analyzer property instead if + you need a language analyzer. This analyzer can be updated on an existing + field. Must be null for complex fields. Known values are: "ar.microsoft", "ar.lucene", + "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", + "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", + "hr.microsoft", "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", + "nl.lucene", "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", + "fr.microsoft", "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", + "el.lucene", "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", + "hu.lucene", "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", + "it.lucene", "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", + "lv.microsoft", "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", + "nb.microsoft", "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", + "pt-BR.lucene", "pt-PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", + "ru.microsoft", "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", + "sl.microsoft", "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", + "te.microsoft", "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", + "ur.microsoft", "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", + "pattern", "simple", "stop", and "whitespace". + :vartype search_analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName + :ivar index_analyzer: The name of the analyzer used at indexing time for the field. This option + can + be used only with searchable fields. It must be set together with + searchAnalyzer and it cannot be set together with the analyzer option. This + property cannot be set to the name of a language analyzer; use the analyzer + property instead if you need a language analyzer. Once the analyzer is chosen, + it cannot be changed for the field. Must be null for complex fields. Known values are: + "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", + "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", + "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene", + "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene", + "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene", + "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft", + "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft", + "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene", + "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft", + "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene", + "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", + "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", + "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", + "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", + "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", + "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", and + "whitespace". + :vartype index_analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName + :ivar normalizer: The name of the normalizer to use for the field. This option can be used only + with fields with filterable, sortable, or facetable enabled. Once the + normalizer is chosen, it cannot be changed for the field. Must be null for + complex fields. Known values are: "asciifolding", "elision", "lowercase", "standard", and + "uppercase". + :vartype normalizer: str or ~azure.search.documents.models.LexicalNormalizerName + :ivar vector_search_dimensions: The dimensionality of the vector field. + :vartype vector_search_dimensions: int + :ivar vector_search_profile_name: The name of the vector search profile that specifies the + algorithm and + vectorizer to use when searching the vector field. + :vartype vector_search_profile_name: str + :ivar vector_encoding_format: The encoding format to interpret the field contents. "packedBit" + :vartype vector_encoding_format: str or ~azure.search.documents.models.VectorEncodingFormat + :ivar synonym_maps: A list of the names of synonym maps to associate with this field. This + option + can be used only with searchable fields. Currently only one synonym map per + field is supported. Assigning a synonym map to a field ensures that query terms + targeting that field are expanded at query-time using the rules in the synonym + map. This attribute can be changed on existing fields. Must be null or an empty + collection for complex fields. + :vartype synonym_maps: list[str] + :ivar fields: A list of sub-fields if this is a field of type Edm.ComplexType or + Collection(Edm.ComplexType). Must be null or empty for simple fields. + :vartype fields: list[~azure.search.documents.models.SearchField] + """ + + name: str = rest_field() + """The name of the field, which must be unique within the fields collection of the + index or parent field. Required.""" + type: Union[str, "_models.SearchFieldDataType"] = rest_field() + """The data type of the field. Required. Known values are: \"Edm.String\", \"Edm.Int32\", + \"Edm.Int64\", \"Edm.Double\", \"Edm.Boolean\", \"Edm.DateTimeOffset\", \"Edm.GeographyPoint\", + \"Edm.ComplexType\", \"Edm.Single\", \"Edm.Half\", \"Edm.Int16\", \"Edm.SByte\", and + \"Edm.Byte\".""" + key: Optional[bool] = rest_field() + """A value indicating whether the field uniquely identifies documents in the + index. Exactly one top-level field in each index must be chosen as the key + field and it must be of type Edm.String. Key fields can be used to look up + documents directly and update or delete specific documents. Default is false + for simple fields and null for complex fields.""" + retrievable: Optional[bool] = rest_field() + """A value indicating whether the field can be returned in a search result. You + can disable this option if you want to use a field (for example, margin) as a + filter, sorting, or scoring mechanism but do not want the field to be visible + to the end user. This property must be true for key fields, and it must be null + for complex fields. This property can be changed on existing fields. Enabling + this property does not cause any increase in index storage requirements. + Default is true for simple fields, false for vector fields, and null for + complex fields.""" + stored: Optional[bool] = rest_field() + """An immutable value indicating whether the field will be persisted separately on + disk to be returned in a search result. You can disable this option if you + don't plan to return the field contents in a search response to save on storage + overhead. This can only be set during index creation and only for vector + fields. This property cannot be changed for existing fields or set as false for + new fields. If this property is set as false, the property 'retrievable' must + also be set to false. This property must be true or unset for key fields, for + new fields, and for non-vector fields, and it must be null for complex fields. + Disabling this property will reduce index storage requirements. The default is + true for vector fields.""" + searchable: Optional[bool] = rest_field() + """A value indicating whether the field is full-text searchable. This means it + will undergo analysis such as word-breaking during indexing. If you set a + searchable field to a value like \"sunny day\", internally it will be split into + the individual tokens \"sunny\" and \"day\". This enables full-text searches for + these terms. Fields of type Edm.String or Collection(Edm.String) are searchable + by default. This property must be false for simple fields of other non-string + data types, and it must be null for complex fields. Note: searchable fields + consume extra space in your index to accommodate additional tokenized versions + of the field value for full-text searches. If you want to save space in your + index and you don't need a field to be included in searches, set searchable to + false.""" + filterable: Optional[bool] = rest_field() + """A value indicating whether to enable the field to be referenced in $filter + queries. filterable differs from searchable in how strings are handled. Fields + of type Edm.String or Collection(Edm.String) that are filterable do not undergo + word-breaking, so comparisons are for exact matches only. For example, if you + set such a field f to \"sunny day\", $filter=f eq 'sunny' will find no matches, + but $filter=f eq 'sunny day' will. This property must be null for complex + fields. Default is true for simple fields and null for complex fields.""" + sortable: Optional[bool] = rest_field() + """A value indicating whether to enable the field to be referenced in $orderby + expressions. By default, the search engine sorts results by score, but in many + experiences users will want to sort by fields in the documents. A simple field + can be sortable only if it is single-valued (it has a single value in the scope + of the parent document). Simple collection fields cannot be sortable, since + they are multi-valued. Simple sub-fields of complex collections are also + multi-valued, and therefore cannot be sortable. This is true whether it's an + immediate parent field, or an ancestor field, that's the complex collection. + Complex fields cannot be sortable and the sortable property must be null for + such fields. The default for sortable is true for single-valued simple fields, + false for multi-valued simple fields, and null for complex fields.""" + facetable: Optional[bool] = rest_field() + """A value indicating whether to enable the field to be referenced in facet + queries. Typically used in a presentation of search results that includes hit + count by category (for example, search for digital cameras and see hits by + brand, by megapixels, by price, and so on). This property must be null for + complex fields. Fields of type Edm.GeographyPoint or + Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all + other simple fields.""" + analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = rest_field() + """The name of the analyzer to use for the field. This option can be used only + with searchable fields and it can't be set together with either searchAnalyzer + or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the + field. Must be null for complex fields. Known values are: \"ar.microsoft\", \"ar.lucene\", + \"hy.lucene\", \"bn.microsoft\", \"eu.lucene\", \"bg.microsoft\", \"bg.lucene\", + \"ca.microsoft\", \"ca.lucene\", \"zh-Hans.microsoft\", \"zh-Hans.lucene\", + \"zh-Hant.microsoft\", \"zh-Hant.lucene\", \"hr.microsoft\", \"cs.microsoft\", \"cs.lucene\", + \"da.microsoft\", \"da.lucene\", \"nl.microsoft\", \"nl.lucene\", \"en.microsoft\", + \"en.lucene\", \"et.microsoft\", \"fi.microsoft\", \"fi.lucene\", \"fr.microsoft\", + \"fr.lucene\", \"gl.lucene\", \"de.microsoft\", \"de.lucene\", \"el.microsoft\", \"el.lucene\", + \"gu.microsoft\", \"he.microsoft\", \"hi.microsoft\", \"hi.lucene\", \"hu.microsoft\", + \"hu.lucene\", \"is.microsoft\", \"id.microsoft\", \"id.lucene\", \"ga.lucene\", + \"it.microsoft\", \"it.lucene\", \"ja.microsoft\", \"ja.lucene\", \"kn.microsoft\", + \"ko.microsoft\", \"ko.lucene\", \"lv.microsoft\", \"lv.lucene\", \"lt.microsoft\", + \"ml.microsoft\", \"ms.microsoft\", \"mr.microsoft\", \"nb.microsoft\", \"no.lucene\", + \"fa.lucene\", \"pl.microsoft\", \"pl.lucene\", \"pt-BR.microsoft\", \"pt-BR.lucene\", + \"pt-PT.microsoft\", \"pt-PT.lucene\", \"pa.microsoft\", \"ro.microsoft\", \"ro.lucene\", + \"ru.microsoft\", \"ru.lucene\", \"sr-cyrillic.microsoft\", \"sr-latin.microsoft\", + \"sk.microsoft\", \"sl.microsoft\", \"es.microsoft\", \"es.lucene\", \"sv.microsoft\", + \"sv.lucene\", \"ta.microsoft\", \"te.microsoft\", \"th.microsoft\", \"th.lucene\", + \"tr.microsoft\", \"tr.lucene\", \"uk.microsoft\", \"ur.microsoft\", \"vi.microsoft\", + \"standard.lucene\", \"standardasciifolding.lucene\", \"keyword\", \"pattern\", \"simple\", + \"stop\", and \"whitespace\".""" + search_analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = rest_field(name="searchAnalyzer") + """The name of the analyzer used at search time for the field. This option can be + used only with searchable fields. It must be set together with indexAnalyzer + and it cannot be set together with the analyzer option. This property cannot be + set to the name of a language analyzer; use the analyzer property instead if + you need a language analyzer. This analyzer can be updated on an existing + field. Must be null for complex fields. Known values are: \"ar.microsoft\", \"ar.lucene\", + \"hy.lucene\", \"bn.microsoft\", \"eu.lucene\", \"bg.microsoft\", \"bg.lucene\", + \"ca.microsoft\", \"ca.lucene\", \"zh-Hans.microsoft\", \"zh-Hans.lucene\", + \"zh-Hant.microsoft\", \"zh-Hant.lucene\", \"hr.microsoft\", \"cs.microsoft\", \"cs.lucene\", + \"da.microsoft\", \"da.lucene\", \"nl.microsoft\", \"nl.lucene\", \"en.microsoft\", + \"en.lucene\", \"et.microsoft\", \"fi.microsoft\", \"fi.lucene\", \"fr.microsoft\", + \"fr.lucene\", \"gl.lucene\", \"de.microsoft\", \"de.lucene\", \"el.microsoft\", \"el.lucene\", + \"gu.microsoft\", \"he.microsoft\", \"hi.microsoft\", \"hi.lucene\", \"hu.microsoft\", + \"hu.lucene\", \"is.microsoft\", \"id.microsoft\", \"id.lucene\", \"ga.lucene\", + \"it.microsoft\", \"it.lucene\", \"ja.microsoft\", \"ja.lucene\", \"kn.microsoft\", + \"ko.microsoft\", \"ko.lucene\", \"lv.microsoft\", \"lv.lucene\", \"lt.microsoft\", + \"ml.microsoft\", \"ms.microsoft\", \"mr.microsoft\", \"nb.microsoft\", \"no.lucene\", + \"fa.lucene\", \"pl.microsoft\", \"pl.lucene\", \"pt-BR.microsoft\", \"pt-BR.lucene\", + \"pt-PT.microsoft\", \"pt-PT.lucene\", \"pa.microsoft\", \"ro.microsoft\", \"ro.lucene\", + \"ru.microsoft\", \"ru.lucene\", \"sr-cyrillic.microsoft\", \"sr-latin.microsoft\", + \"sk.microsoft\", \"sl.microsoft\", \"es.microsoft\", \"es.lucene\", \"sv.microsoft\", + \"sv.lucene\", \"ta.microsoft\", \"te.microsoft\", \"th.microsoft\", \"th.lucene\", + \"tr.microsoft\", \"tr.lucene\", \"uk.microsoft\", \"ur.microsoft\", \"vi.microsoft\", + \"standard.lucene\", \"standardasciifolding.lucene\", \"keyword\", \"pattern\", \"simple\", + \"stop\", and \"whitespace\".""" + index_analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = rest_field(name="indexAnalyzer") + """The name of the analyzer used at indexing time for the field. This option can + be used only with searchable fields. It must be set together with + searchAnalyzer and it cannot be set together with the analyzer option. This + property cannot be set to the name of a language analyzer; use the analyzer + property instead if you need a language analyzer. Once the analyzer is chosen, + it cannot be changed for the field. Must be null for complex fields. Known values are: + \"ar.microsoft\", \"ar.lucene\", \"hy.lucene\", \"bn.microsoft\", \"eu.lucene\", + \"bg.microsoft\", \"bg.lucene\", \"ca.microsoft\", \"ca.lucene\", \"zh-Hans.microsoft\", + \"zh-Hans.lucene\", \"zh-Hant.microsoft\", \"zh-Hant.lucene\", \"hr.microsoft\", + \"cs.microsoft\", \"cs.lucene\", \"da.microsoft\", \"da.lucene\", \"nl.microsoft\", + \"nl.lucene\", \"en.microsoft\", \"en.lucene\", \"et.microsoft\", \"fi.microsoft\", + \"fi.lucene\", \"fr.microsoft\", \"fr.lucene\", \"gl.lucene\", \"de.microsoft\", \"de.lucene\", + \"el.microsoft\", \"el.lucene\", \"gu.microsoft\", \"he.microsoft\", \"hi.microsoft\", + \"hi.lucene\", \"hu.microsoft\", \"hu.lucene\", \"is.microsoft\", \"id.microsoft\", + \"id.lucene\", \"ga.lucene\", \"it.microsoft\", \"it.lucene\", \"ja.microsoft\", \"ja.lucene\", + \"kn.microsoft\", \"ko.microsoft\", \"ko.lucene\", \"lv.microsoft\", \"lv.lucene\", + \"lt.microsoft\", \"ml.microsoft\", \"ms.microsoft\", \"mr.microsoft\", \"nb.microsoft\", + \"no.lucene\", \"fa.lucene\", \"pl.microsoft\", \"pl.lucene\", \"pt-BR.microsoft\", + \"pt-BR.lucene\", \"pt-PT.microsoft\", \"pt-PT.lucene\", \"pa.microsoft\", \"ro.microsoft\", + \"ro.lucene\", \"ru.microsoft\", \"ru.lucene\", \"sr-cyrillic.microsoft\", + \"sr-latin.microsoft\", \"sk.microsoft\", \"sl.microsoft\", \"es.microsoft\", \"es.lucene\", + \"sv.microsoft\", \"sv.lucene\", \"ta.microsoft\", \"te.microsoft\", \"th.microsoft\", + \"th.lucene\", \"tr.microsoft\", \"tr.lucene\", \"uk.microsoft\", \"ur.microsoft\", + \"vi.microsoft\", \"standard.lucene\", \"standardasciifolding.lucene\", \"keyword\", + \"pattern\", \"simple\", \"stop\", and \"whitespace\".""" + normalizer: Optional[Union[str, "_models.LexicalNormalizerName"]] = rest_field() + """The name of the normalizer to use for the field. This option can be used only + with fields with filterable, sortable, or facetable enabled. Once the + normalizer is chosen, it cannot be changed for the field. Must be null for + complex fields. Known values are: \"asciifolding\", \"elision\", \"lowercase\", \"standard\", + and \"uppercase\".""" + vector_search_dimensions: Optional[int] = rest_field(name="dimensions") + """The dimensionality of the vector field.""" + vector_search_profile_name: Optional[str] = rest_field(name="vectorSearchProfile") + """The name of the vector search profile that specifies the algorithm and + vectorizer to use when searching the vector field.""" + vector_encoding_format: Optional[Union[str, "_models.VectorEncodingFormat"]] = rest_field(name="vectorEncoding") + """The encoding format to interpret the field contents. \"packedBit\"""" + synonym_maps: Optional[List[str]] = rest_field(name="synonymMaps") + """A list of the names of synonym maps to associate with this field. This option + can be used only with searchable fields. Currently only one synonym map per + field is supported. Assigning a synonym map to a field ensures that query terms + targeting that field are expanded at query-time using the rules in the synonym + map. This attribute can be changed on existing fields. Must be null or an empty + collection for complex fields.""" + fields: Optional[List["_models.SearchField"]] = rest_field() + """A list of sub-fields if this is a field of type Edm.ComplexType or + Collection(Edm.ComplexType). Must be null or empty for simple fields.""" + + @overload + def __init__( + self, + *, + name: str, + type: Union[str, "_models.SearchFieldDataType"], + key: Optional[bool] = None, + retrievable: Optional[bool] = None, + stored: Optional[bool] = None, + searchable: Optional[bool] = None, + filterable: Optional[bool] = None, + sortable: Optional[bool] = None, + facetable: Optional[bool] = None, + analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = None, + search_analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = None, + index_analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = None, + normalizer: Optional[Union[str, "_models.LexicalNormalizerName"]] = None, + vector_search_dimensions: Optional[int] = None, + vector_search_profile_name: Optional[str] = None, + vector_encoding_format: Optional[Union[str, "_models.VectorEncodingFormat"]] = None, + synonym_maps: Optional[List[str]] = None, + fields: Optional[List["_models.SearchField"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndex(_model_base.Model): + """Represents a search index definition, which describes the fields and search + behavior of an index. + + + :ivar name: The name of the index. Required. + :vartype name: str + :ivar fields: The fields of the index. Required. + :vartype fields: list[~azure.search.documents.models.SearchField] + :ivar scoring_profiles: The scoring profiles for the index. + :vartype scoring_profiles: list[~azure.search.documents.models.ScoringProfile] + :ivar default_scoring_profile: The name of the scoring profile to use if none is specified in + the query. If + this property is not set and no scoring profile is specified in the query, then + default scoring (tf-idf) will be used. + :vartype default_scoring_profile: str + :ivar cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. + :vartype cors_options: ~azure.search.documents.models.CorsOptions + :ivar suggesters: The suggesters for the index. + :vartype suggesters: list[~azure.search.documents.models.SearchSuggester] + :ivar analyzers: The analyzers for the index. + :vartype analyzers: list[~azure.search.documents.models.LexicalAnalyzer] + :ivar tokenizers: The tokenizers for the index. + :vartype tokenizers: list[~azure.search.documents.models.LexicalTokenizer] + :ivar token_filters: The token filters for the index. + :vartype token_filters: list[~azure.search.documents.models.TokenFilter] + :ivar char_filters: The character filters for the index. + :vartype char_filters: list[~azure.search.documents.models.CharFilter] + :ivar normalizers: The normalizers for the index. + :vartype normalizers: list[~azure.search.documents.models.LexicalNormalizer] + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key + is used to provide an additional level of encryption-at-rest for your data when + you want full assurance that no one, not even Microsoft, can decrypt your data. + Once you have encrypted your data, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this + property as needed if you want to rotate your encryption key; Your data will be + unaffected. Encryption with customer-managed keys is not available for free + search services, and is only available for paid services created on or after + January 1, 2019. + :vartype encryption_key: ~azure.search.documents.models.SearchResourceEncryptionKey + :ivar similarity: The type of similarity algorithm to be used when scoring and ranking the + documents matching a search query. The similarity algorithm can only be defined + at index creation time and cannot be modified on existing indexes. If null, the + ClassicSimilarity algorithm is used. + :vartype similarity: ~azure.search.documents.models.SimilarityAlgorithm + :ivar semantic_search: Defines parameters for a search index that influence semantic + capabilities. + :vartype semantic_search: ~azure.search.documents.models.SemanticSearch + :ivar vector_search: Contains configuration options related to vector search. + :vartype vector_search: ~azure.search.documents.models.VectorSearch + :ivar e_tag: The ETag of the index. + :vartype e_tag: str + """ + + name: str = rest_field() + """The name of the index. Required.""" + fields: List["_models.SearchField"] = rest_field() + """The fields of the index. Required.""" + scoring_profiles: Optional[List["_models.ScoringProfile"]] = rest_field(name="scoringProfiles") + """The scoring profiles for the index.""" + default_scoring_profile: Optional[str] = rest_field(name="defaultScoringProfile") + """The name of the scoring profile to use if none is specified in the query. If + this property is not set and no scoring profile is specified in the query, then + default scoring (tf-idf) will be used.""" + cors_options: Optional["_models.CorsOptions"] = rest_field(name="corsOptions") + """Options to control Cross-Origin Resource Sharing (CORS) for the index.""" + suggesters: Optional[List["_models.SearchSuggester"]] = rest_field() + """The suggesters for the index.""" + analyzers: Optional[List["_models.LexicalAnalyzer"]] = rest_field() + """The analyzers for the index.""" + tokenizers: Optional[List["_models.LexicalTokenizer"]] = rest_field() + """The tokenizers for the index.""" + token_filters: Optional[List["_models.TokenFilter"]] = rest_field(name="tokenFilters") + """The token filters for the index.""" + char_filters: Optional[List["_models.CharFilter"]] = rest_field(name="charFilters") + """The character filters for the index.""" + normalizers: Optional[List["_models.LexicalNormalizer"]] = rest_field() + """The normalizers for the index.""" + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = rest_field(name="encryptionKey") + """A description of an encryption key that you create in Azure Key Vault. This key + is used to provide an additional level of encryption-at-rest for your data when + you want full assurance that no one, not even Microsoft, can decrypt your data. + Once you have encrypted your data, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this + property as needed if you want to rotate your encryption key; Your data will be + unaffected. Encryption with customer-managed keys is not available for free + search services, and is only available for paid services created on or after + January 1, 2019.""" + similarity: Optional["_models.SimilarityAlgorithm"] = rest_field() + """The type of similarity algorithm to be used when scoring and ranking the + documents matching a search query. The similarity algorithm can only be defined + at index creation time and cannot be modified on existing indexes. If null, the + ClassicSimilarity algorithm is used.""" + semantic_search: Optional["_models.SemanticSearch"] = rest_field(name="semantic") + """Defines parameters for a search index that influence semantic capabilities.""" + vector_search: Optional["_models.VectorSearch"] = rest_field(name="vectorSearch") + """Contains configuration options related to vector search.""" + e_tag: Optional[str] = rest_field(name="@odata.etag") + """The ETag of the index.""" + + @overload + def __init__( + self, + *, + name: str, + fields: List["_models.SearchField"], + scoring_profiles: Optional[List["_models.ScoringProfile"]] = None, + default_scoring_profile: Optional[str] = None, + cors_options: Optional["_models.CorsOptions"] = None, + suggesters: Optional[List["_models.SearchSuggester"]] = None, + analyzers: Optional[List["_models.LexicalAnalyzer"]] = None, + tokenizers: Optional[List["_models.LexicalTokenizer"]] = None, + token_filters: Optional[List["_models.TokenFilter"]] = None, + char_filters: Optional[List["_models.CharFilter"]] = None, + normalizers: Optional[List["_models.LexicalNormalizer"]] = None, + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, + similarity: Optional["_models.SimilarityAlgorithm"] = None, + semantic_search: Optional["_models.SemanticSearch"] = None, + vector_search: Optional["_models.VectorSearch"] = None, + e_tag: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexer(_model_base.Model): + """Represents an indexer. + + + :ivar name: The name of the indexer. Required. + :vartype name: str + :ivar description: The description of the indexer. + :vartype description: str + :ivar data_source_name: The name of the datasource from which this indexer reads data. + Required. + :vartype data_source_name: str + :ivar skillset_name: The name of the skillset executing with this indexer. + :vartype skillset_name: str + :ivar target_index_name: The name of the index to which this indexer writes data. Required. + :vartype target_index_name: str + :ivar schedule: The schedule for this indexer. + :vartype schedule: ~azure.search.documents.models.IndexingSchedule + :ivar parameters: Parameters for indexer execution. + :vartype parameters: ~azure.search.documents.models.IndexingParameters + :ivar field_mappings: Defines mappings between fields in the data source and corresponding + target + fields in the index. + :vartype field_mappings: list[~azure.search.documents.models.FieldMapping] + :ivar output_field_mappings: Output field mappings are applied after enrichment and immediately + before + indexing. + :vartype output_field_mappings: list[~azure.search.documents.models.FieldMapping] + :ivar is_disabled: A value indicating whether the indexer is disabled. Default is false. + :vartype is_disabled: bool + :ivar e_tag: The ETag of the indexer. + :vartype e_tag: str + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key + is used to provide an additional level of encryption-at-rest for your indexer + definition (as well as indexer execution status) when you want full assurance + that no one, not even Microsoft, can decrypt them. Once you have encrypted your + indexer definition, it will always remain encrypted. The search service will + ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your indexer definition (and + indexer execution status) will be unaffected. Encryption with customer-managed + keys is not available for free search services, and is only available for paid + services created on or after January 1, 2019. + :vartype encryption_key: ~azure.search.documents.models.SearchResourceEncryptionKey + :ivar cache: Adds caching to an enrichment pipeline to allow for incremental modification + steps without having to rebuild the index every time. + :vartype cache: ~azure.search.documents.models.SearchIndexerCache + """ + + name: str = rest_field() + """The name of the indexer. Required.""" + description: Optional[str] = rest_field() + """The description of the indexer.""" + data_source_name: str = rest_field(name="dataSourceName") + """The name of the datasource from which this indexer reads data. Required.""" + skillset_name: Optional[str] = rest_field(name="skillsetName") + """The name of the skillset executing with this indexer.""" + target_index_name: str = rest_field(name="targetIndexName") + """The name of the index to which this indexer writes data. Required.""" + schedule: Optional["_models.IndexingSchedule"] = rest_field() + """The schedule for this indexer.""" + parameters: Optional["_models.IndexingParameters"] = rest_field() + """Parameters for indexer execution.""" + field_mappings: Optional[List["_models.FieldMapping"]] = rest_field(name="fieldMappings") + """Defines mappings between fields in the data source and corresponding target + fields in the index.""" + output_field_mappings: Optional[List["_models.FieldMapping"]] = rest_field(name="outputFieldMappings") + """Output field mappings are applied after enrichment and immediately before + indexing.""" + is_disabled: Optional[bool] = rest_field(name="disabled") + """A value indicating whether the indexer is disabled. Default is false.""" + e_tag: Optional[str] = rest_field(name="@odata.etag") + """The ETag of the indexer.""" + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = rest_field(name="encryptionKey") + """A description of an encryption key that you create in Azure Key Vault. This key + is used to provide an additional level of encryption-at-rest for your indexer + definition (as well as indexer execution status) when you want full assurance + that no one, not even Microsoft, can decrypt them. Once you have encrypted your + indexer definition, it will always remain encrypted. The search service will + ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your indexer definition (and + indexer execution status) will be unaffected. Encryption with customer-managed + keys is not available for free search services, and is only available for paid + services created on or after January 1, 2019.""" + cache: Optional["_models.SearchIndexerCache"] = rest_field() + """Adds caching to an enrichment pipeline to allow for incremental modification + steps without having to rebuild the index every time.""" + + @overload + def __init__( + self, + *, + name: str, + data_source_name: str, + target_index_name: str, + description: Optional[str] = None, + skillset_name: Optional[str] = None, + schedule: Optional["_models.IndexingSchedule"] = None, + parameters: Optional["_models.IndexingParameters"] = None, + field_mappings: Optional[List["_models.FieldMapping"]] = None, + output_field_mappings: Optional[List["_models.FieldMapping"]] = None, + is_disabled: Optional[bool] = None, + e_tag: Optional[str] = None, + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, + cache: Optional["_models.SearchIndexerCache"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexerCache(_model_base.Model): + """The type of the cache. + + :ivar storage_connection_string: The connection string to the storage account where the cache + data will be + persisted. + :vartype storage_connection_string: str + :ivar enable_reprocessing: Specifies whether incremental reprocessing is enabled. + :vartype enable_reprocessing: bool + :ivar identity: The user-assigned managed identity used for connections to the enrichment + cache. If the connection string indicates an identity (ResourceId) and it's + not specified, the system-assigned managed identity is used. On updates to the + indexer, if the identity is unspecified, the value remains unchanged. If set to + "none", the value of this property is cleared. + :vartype identity: ~azure.search.documents.models.SearchIndexerDataIdentity + """ + + storage_connection_string: Optional[str] = rest_field(name="storageConnectionString") + """The connection string to the storage account where the cache data will be + persisted.""" + enable_reprocessing: Optional[bool] = rest_field(name="enableReprocessing") + """Specifies whether incremental reprocessing is enabled.""" + identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field() + """The user-assigned managed identity used for connections to the enrichment + cache. If the connection string indicates an identity (ResourceId) and it's + not specified, the system-assigned managed identity is used. On updates to the + indexer, if the identity is unspecified, the value remains unchanged. If set to + \"none\", the value of this property is cleared.""" + + @overload + def __init__( + self, + *, + storage_connection_string: Optional[str] = None, + enable_reprocessing: Optional[bool] = None, + identity: Optional["_models.SearchIndexerDataIdentity"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexerDataContainer(_model_base.Model): + """Represents information about the entity (such as Azure SQL table or CosmosDB + collection) that will be indexed. + + + :ivar name: The name of the table or view (for Azure SQL data source) or collection (for + CosmosDB data source) that will be indexed. Required. + :vartype name: str + :ivar query: A query that is applied to this data container. The syntax and meaning of this + parameter is datasource-specific. Not supported by Azure SQL datasources. + :vartype query: str + """ + + name: str = rest_field() + """The name of the table or view (for Azure SQL data source) or collection (for + CosmosDB data source) that will be indexed. Required.""" + query: Optional[str] = rest_field() + """A query that is applied to this data container. The syntax and meaning of this + parameter is datasource-specific. Not supported by Azure SQL datasources.""" + + @overload + def __init__( + self, + *, + name: str, + query: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexerDataIdentity(_model_base.Model): + """Abstract base type for data identities. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + SearchIndexerDataNoneIdentity, SearchIndexerDataUserAssignedIdentity + + + :ivar odata_type: The discriminator for derived types. Required. Default value is None. + :vartype odata_type: str + """ + + __mapping__: Dict[str, _model_base.Model] = {} + odata_type: str = rest_discriminator(name="@odata.type") + """The discriminator for derived types. Required. Default value is None.""" + + @overload + def __init__( + self, + *, + odata_type: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexerDataNoneIdentity( + SearchIndexerDataIdentity, discriminator="#Microsoft.Azure.Search.DataNoneIdentity" +): + """Clears the identity property of a datasource. + + + :ivar odata_type: A URI fragment specifying the type of identity. Required. Default value is + "#Microsoft.Azure.Search.DataNoneIdentity". + :vartype odata_type: str + """ + + odata_type: Literal["#Microsoft.Azure.Search.DataNoneIdentity"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of identity. Required. Default value is + \"#Microsoft.Azure.Search.DataNoneIdentity\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.DataNoneIdentity", **kwargs) + + +class SearchIndexerDataSource(_model_base.Model): + """Represents a datasource definition, which can be used to configure an indexer. + + + :ivar name: The name of the datasource. Required. + :vartype name: str + :ivar description: The description of the datasource. + :vartype description: str + :ivar type: The type of the datasource. Required. Known values are: "azuresql", "cosmosdb", + "azureblob", "azuretable", "mysql", "adlsgen2", and "onelake". + :vartype type: str or ~azure.search.documents.models.SearchIndexerDataSourceType + :ivar credentials: Credentials for the datasource. Required. + :vartype credentials: ~azure.search.documents.models.DataSourceCredentials + :ivar container: The data container for the datasource. Required. + :vartype container: ~azure.search.documents.models.SearchIndexerDataContainer + :ivar identity: An explicit managed identity to use for this datasource. If not specified and + the connection string is a managed identity, the system-assigned managed + identity is used. If not specified, the value remains unchanged. If "none" is + specified, the value of this property is cleared. + :vartype identity: ~azure.search.documents.models.SearchIndexerDataIdentity + :ivar data_change_detection_policy: The data change detection policy for the datasource. + :vartype data_change_detection_policy: ~azure.search.documents.models.DataChangeDetectionPolicy + :ivar data_deletion_detection_policy: The data deletion detection policy for the datasource. + :vartype data_deletion_detection_policy: + ~azure.search.documents.models.DataDeletionDetectionPolicy + :ivar e_tag: The ETag of the data source. + :vartype e_tag: str + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key + is used to provide an additional level of encryption-at-rest for your + datasource definition when you want full assurance that no one, not even + Microsoft, can decrypt your data source definition. Once you have encrypted + your data source definition, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this + property as needed if you want to rotate your encryption key; Your datasource + definition will be unaffected. Encryption with customer-managed keys is not + available for free search services, and is only available for paid services + created on or after January 1, 2019. + :vartype encryption_key: ~azure.search.documents.models.SearchResourceEncryptionKey + """ + + name: str = rest_field() + """The name of the datasource. Required.""" + description: Optional[str] = rest_field() + """The description of the datasource.""" + type: Union[str, "_models.SearchIndexerDataSourceType"] = rest_field() + """The type of the datasource. Required. Known values are: \"azuresql\", \"cosmosdb\", + \"azureblob\", \"azuretable\", \"mysql\", \"adlsgen2\", and \"onelake\".""" + credentials: "_models.DataSourceCredentials" = rest_field() + """Credentials for the datasource. Required.""" + container: "_models.SearchIndexerDataContainer" = rest_field() + """The data container for the datasource. Required.""" + identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field() + """An explicit managed identity to use for this datasource. If not specified and + the connection string is a managed identity, the system-assigned managed + identity is used. If not specified, the value remains unchanged. If \"none\" is + specified, the value of this property is cleared.""" + data_change_detection_policy: Optional["_models.DataChangeDetectionPolicy"] = rest_field( + name="dataChangeDetectionPolicy" + ) + """The data change detection policy for the datasource.""" + data_deletion_detection_policy: Optional["_models.DataDeletionDetectionPolicy"] = rest_field( + name="dataDeletionDetectionPolicy" + ) + """The data deletion detection policy for the datasource.""" + e_tag: Optional[str] = rest_field(name="@odata.etag") + """The ETag of the data source.""" + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = rest_field(name="encryptionKey") + """A description of an encryption key that you create in Azure Key Vault. This key + is used to provide an additional level of encryption-at-rest for your + datasource definition when you want full assurance that no one, not even + Microsoft, can decrypt your data source definition. Once you have encrypted + your data source definition, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this + property as needed if you want to rotate your encryption key; Your datasource + definition will be unaffected. Encryption with customer-managed keys is not + available for free search services, and is only available for paid services + created on or after January 1, 2019.""" + + @overload + def __init__( + self, + *, + name: str, + type: Union[str, "_models.SearchIndexerDataSourceType"], + credentials: "_models.DataSourceCredentials", + container: "_models.SearchIndexerDataContainer", + description: Optional[str] = None, + identity: Optional["_models.SearchIndexerDataIdentity"] = None, + data_change_detection_policy: Optional["_models.DataChangeDetectionPolicy"] = None, + data_deletion_detection_policy: Optional["_models.DataDeletionDetectionPolicy"] = None, + e_tag: Optional[str] = None, + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexerDataUserAssignedIdentity( + SearchIndexerDataIdentity, discriminator="#Microsoft.Azure.Search.DataUserAssignedIdentity" +): + """Specifies the identity for a datasource to use. + + + :ivar resource_id: The fully qualified Azure resource Id of a user assigned managed identity + typically in the form + "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" # pylint: disable=line-too-long + that should have been assigned to the search service. Required. + :vartype resource_id: str + :ivar odata_type: A URI fragment specifying the type of identity. Required. Default value is + "#Microsoft.Azure.Search.DataUserAssignedIdentity". + :vartype odata_type: str + """ + + resource_id: str = rest_field(name="userAssignedIdentity") + """The fully qualified Azure resource Id of a user assigned managed identity + typically in the form + \"/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId\" # pylint: disable=line-too-long + that should have been assigned to the search service. Required.""" + odata_type: Literal["#Microsoft.Azure.Search.DataUserAssignedIdentity"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of identity. Required. Default value is + \"#Microsoft.Azure.Search.DataUserAssignedIdentity\".""" + + @overload + def __init__( + self, + *, + resource_id: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.DataUserAssignedIdentity", **kwargs) + + +class SearchIndexerError(_model_base.Model): + """Represents an item- or document-level indexing error. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar key: The key of the item for which indexing failed. + :vartype key: str + :ivar error_message: The message describing the error that occurred while processing the item. + Required. + :vartype error_message: str + :ivar status_code: The status code indicating why the indexing operation failed. Possible + values + include: 400 for a malformed input document, 404 for document not found, 409 + for a version conflict, 422 when the index is temporarily unavailable, or 503 + for when the service is too busy. Required. + :vartype status_code: int + :ivar name: The name of the source at which the error originated. For example, this could + refer to a particular skill in the attached skillset. This may not be always + available. + :vartype name: str + :ivar details: Additional, verbose details about the error to assist in debugging the indexer. + This may not be always available. + :vartype details: str + :ivar documentation_link: A link to a troubleshooting guide for these classes of errors. This + may not be + always available. + :vartype documentation_link: str + """ + + key: Optional[str] = rest_field(visibility=["read"]) + """The key of the item for which indexing failed.""" + error_message: str = rest_field(name="errorMessage", visibility=["read"]) + """The message describing the error that occurred while processing the item. Required.""" + status_code: int = rest_field(name="statusCode", visibility=["read"]) + """The status code indicating why the indexing operation failed. Possible values + include: 400 for a malformed input document, 404 for document not found, 409 + for a version conflict, 422 when the index is temporarily unavailable, or 503 + for when the service is too busy. Required.""" + name: Optional[str] = rest_field(visibility=["read"]) + """The name of the source at which the error originated. For example, this could + refer to a particular skill in the attached skillset. This may not be always + available.""" + details: Optional[str] = rest_field(visibility=["read"]) + """Additional, verbose details about the error to assist in debugging the indexer. + This may not be always available.""" + documentation_link: Optional[str] = rest_field(name="documentationLink", visibility=["read"]) + """A link to a troubleshooting guide for these classes of errors. This may not be + always available.""" + + +class SearchIndexerIndexProjection(_model_base.Model): + """Definition of additional projections to secondary search indexes. + + + :ivar selectors: A list of projections to be performed to secondary search indexes. Required. + :vartype selectors: list[~azure.search.documents.models.SearchIndexerIndexProjectionSelector] + :ivar parameters: A dictionary of index projection-specific configuration properties. Each name + is the name of a specific property. Each value must be of a primitive type. + :vartype parameters: ~azure.search.documents.models.SearchIndexerIndexProjectionsParameters + """ + + selectors: List["_models.SearchIndexerIndexProjectionSelector"] = rest_field() + """A list of projections to be performed to secondary search indexes. Required.""" + parameters: Optional["_models.SearchIndexerIndexProjectionsParameters"] = rest_field() + """A dictionary of index projection-specific configuration properties. Each name + is the name of a specific property. Each value must be of a primitive type.""" + + @overload + def __init__( + self, + *, + selectors: List["_models.SearchIndexerIndexProjectionSelector"], + parameters: Optional["_models.SearchIndexerIndexProjectionsParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexerIndexProjectionSelector(_model_base.Model): + """Description for what data to store in the designated search index. + + + :ivar target_index_name: Name of the search index to project to. Must have a key field with the + 'keyword' analyzer set. Required. + :vartype target_index_name: str + :ivar parent_key_field_name: Name of the field in the search index to map the parent document's + key value + to. Must be a string field that is filterable and not the key field. Required. + :vartype parent_key_field_name: str + :ivar source_context: Source context for the projections. Represents the cardinality at which + the + document will be split into multiple sub documents. Required. + :vartype source_context: str + :ivar mappings: Mappings for the projection, or which source should be mapped to which field in + the target index. Required. + :vartype mappings: list[~azure.search.documents.models.InputFieldMappingEntry] + """ + + target_index_name: str = rest_field(name="targetIndexName") + """Name of the search index to project to. Must have a key field with the + 'keyword' analyzer set. Required.""" + parent_key_field_name: str = rest_field(name="parentKeyFieldName") + """Name of the field in the search index to map the parent document's key value + to. Must be a string field that is filterable and not the key field. Required.""" + source_context: str = rest_field(name="sourceContext") + """Source context for the projections. Represents the cardinality at which the + document will be split into multiple sub documents. Required.""" + mappings: List["_models.InputFieldMappingEntry"] = rest_field() + """Mappings for the projection, or which source should be mapped to which field in + the target index. Required.""" + + @overload + def __init__( + self, + *, + target_index_name: str, + parent_key_field_name: str, + source_context: str, + mappings: List["_models.InputFieldMappingEntry"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexerIndexProjectionsParameters(_model_base.Model): + """A dictionary of index projection-specific configuration properties. Each name + is the name of a specific property. Each value must be of a primitive type. + + :ivar projection_mode: Defines behavior of the index projections in relation to the rest of the + indexer. Known values are: "skipIndexingParentDocuments" and "includeIndexingParentDocuments". + :vartype projection_mode: str or ~azure.search.documents.models.IndexProjectionMode + """ + + projection_mode: Optional[Union[str, "_models.IndexProjectionMode"]] = rest_field(name="projectionMode") + """Defines behavior of the index projections in relation to the rest of the + indexer. Known values are: \"skipIndexingParentDocuments\" and + \"includeIndexingParentDocuments\".""" + + @overload + def __init__( + self, + *, + projection_mode: Optional[Union[str, "_models.IndexProjectionMode"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexerKnowledgeStore(_model_base.Model): + """Definition of additional projections to azure blob, table, or files, of + enriched data. + + + :ivar storage_connection_string: The connection string to the storage account projections will + be stored in. Required. + :vartype storage_connection_string: str + :ivar projections: A list of additional projections to perform during indexing. Required. + :vartype projections: + list[~azure.search.documents.models.SearchIndexerKnowledgeStoreProjection] + :ivar identity: The user-assigned managed identity used for connections to Azure Storage when + writing knowledge store projections. If the connection string indicates an + identity (ResourceId) and it's not specified, the system-assigned managed + identity is used. On updates to the indexer, if the identity is unspecified, + the value remains unchanged. If set to "none", the value of this property is + cleared. + :vartype identity: ~azure.search.documents.models.SearchIndexerDataIdentity + :ivar parameters: A dictionary of knowledge store-specific configuration properties. Each name + is + the name of a specific property. Each value must be of a primitive type. + :vartype parameters: ~azure.search.documents.models.SearchIndexerKnowledgeStoreParameters + """ + + storage_connection_string: str = rest_field(name="storageConnectionString") + """The connection string to the storage account projections will be stored in. Required.""" + projections: List["_models.SearchIndexerKnowledgeStoreProjection"] = rest_field() + """A list of additional projections to perform during indexing. Required.""" + identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field() + """The user-assigned managed identity used for connections to Azure Storage when + writing knowledge store projections. If the connection string indicates an + identity (ResourceId) and it's not specified, the system-assigned managed + identity is used. On updates to the indexer, if the identity is unspecified, + the value remains unchanged. If set to \"none\", the value of this property is + cleared.""" + parameters: Optional["_models.SearchIndexerKnowledgeStoreParameters"] = rest_field() + """A dictionary of knowledge store-specific configuration properties. Each name is + the name of a specific property. Each value must be of a primitive type.""" + + @overload + def __init__( + self, + *, + storage_connection_string: str, + projections: List["_models.SearchIndexerKnowledgeStoreProjection"], + identity: Optional["_models.SearchIndexerDataIdentity"] = None, + parameters: Optional["_models.SearchIndexerKnowledgeStoreParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexerKnowledgeStoreProjectionSelector(_model_base.Model): # pylint: disable=name-too-long + """Abstract class to share properties between concrete selectors. + + :ivar reference_key_name: Name of reference key to different projection. + :vartype reference_key_name: str + :ivar generated_key_name: Name of generated key to store projection under. + :vartype generated_key_name: str + :ivar source: Source data to project. + :vartype source: str + :ivar source_context: Source context for complex projections. + :vartype source_context: str + :ivar inputs: Nested inputs for complex projections. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + """ + + reference_key_name: Optional[str] = rest_field(name="referenceKeyName") + """Name of reference key to different projection.""" + generated_key_name: Optional[str] = rest_field(name="generatedKeyName") + """Name of generated key to store projection under.""" + source: Optional[str] = rest_field() + """Source data to project.""" + source_context: Optional[str] = rest_field(name="sourceContext") + """Source context for complex projections.""" + inputs: Optional[List["_models.InputFieldMappingEntry"]] = rest_field() + """Nested inputs for complex projections.""" + + @overload + def __init__( + self, + *, + reference_key_name: Optional[str] = None, + generated_key_name: Optional[str] = None, + source: Optional[str] = None, + source_context: Optional[str] = None, + inputs: Optional[List["_models.InputFieldMappingEntry"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexerKnowledgeStoreBlobProjectionSelector( + SearchIndexerKnowledgeStoreProjectionSelector +): # pylint: disable=name-too-long + """Abstract class to share properties between concrete selectors. + + + :ivar reference_key_name: Name of reference key to different projection. + :vartype reference_key_name: str + :ivar generated_key_name: Name of generated key to store projection under. + :vartype generated_key_name: str + :ivar source: Source data to project. + :vartype source: str + :ivar source_context: Source context for complex projections. + :vartype source_context: str + :ivar inputs: Nested inputs for complex projections. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar storage_container: Blob container to store projections in. Required. + :vartype storage_container: str + """ + + storage_container: str = rest_field(name="storageContainer") + """Blob container to store projections in. Required.""" + + @overload + def __init__( + self, + *, + storage_container: str, + reference_key_name: Optional[str] = None, + generated_key_name: Optional[str] = None, + source: Optional[str] = None, + source_context: Optional[str] = None, + inputs: Optional[List["_models.InputFieldMappingEntry"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexerKnowledgeStoreFileProjectionSelector( + SearchIndexerKnowledgeStoreBlobProjectionSelector +): # pylint: disable=name-too-long + """Projection definition for what data to store in Azure Files. + + + :ivar reference_key_name: Name of reference key to different projection. + :vartype reference_key_name: str + :ivar generated_key_name: Name of generated key to store projection under. + :vartype generated_key_name: str + :ivar source: Source data to project. + :vartype source: str + :ivar source_context: Source context for complex projections. + :vartype source_context: str + :ivar inputs: Nested inputs for complex projections. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar storage_container: Blob container to store projections in. Required. + :vartype storage_container: str + """ + + @overload + def __init__( + self, + *, + storage_container: str, + reference_key_name: Optional[str] = None, + generated_key_name: Optional[str] = None, + source: Optional[str] = None, + source_context: Optional[str] = None, + inputs: Optional[List["_models.InputFieldMappingEntry"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexerKnowledgeStoreObjectProjectionSelector( + SearchIndexerKnowledgeStoreBlobProjectionSelector +): # pylint: disable=name-too-long + """Projection definition for what data to store in Azure Blob. + + + :ivar reference_key_name: Name of reference key to different projection. + :vartype reference_key_name: str + :ivar generated_key_name: Name of generated key to store projection under. + :vartype generated_key_name: str + :ivar source: Source data to project. + :vartype source: str + :ivar source_context: Source context for complex projections. + :vartype source_context: str + :ivar inputs: Nested inputs for complex projections. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar storage_container: Blob container to store projections in. Required. + :vartype storage_container: str + """ + + @overload + def __init__( + self, + *, + storage_container: str, + reference_key_name: Optional[str] = None, + generated_key_name: Optional[str] = None, + source: Optional[str] = None, + source_context: Optional[str] = None, + inputs: Optional[List["_models.InputFieldMappingEntry"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexerKnowledgeStoreParameters(_model_base.Model): + """A dictionary of knowledge store-specific configuration properties. Each name is + the name of a specific property. Each value must be of a primitive type. + + :ivar synthesize_generated_key_name: Whether or not projections should synthesize a generated + key name if one isn't + already present. + :vartype synthesize_generated_key_name: bool + """ + + synthesize_generated_key_name: Optional[bool] = rest_field(name="synthesizeGeneratedKeyName") + """Whether or not projections should synthesize a generated key name if one isn't + already present.""" + + @overload + def __init__( + self, + *, + synthesize_generated_key_name: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexerKnowledgeStoreProjection(_model_base.Model): + """Container object for various projection selectors. + + :ivar tables: Projections to Azure Table storage. + :vartype tables: + list[~azure.search.documents.models.SearchIndexerKnowledgeStoreTableProjectionSelector] + :ivar objects: Projections to Azure Blob storage. + :vartype objects: + list[~azure.search.documents.models.SearchIndexerKnowledgeStoreObjectProjectionSelector] + :ivar files: Projections to Azure File storage. + :vartype files: + list[~azure.search.documents.models.SearchIndexerKnowledgeStoreFileProjectionSelector] + """ + + tables: Optional[List["_models.SearchIndexerKnowledgeStoreTableProjectionSelector"]] = rest_field() + """Projections to Azure Table storage.""" + objects: Optional[List["_models.SearchIndexerKnowledgeStoreObjectProjectionSelector"]] = rest_field() + """Projections to Azure Blob storage.""" + files: Optional[List["_models.SearchIndexerKnowledgeStoreFileProjectionSelector"]] = rest_field() + """Projections to Azure File storage.""" + + @overload + def __init__( + self, + *, + tables: Optional[List["_models.SearchIndexerKnowledgeStoreTableProjectionSelector"]] = None, + objects: Optional[List["_models.SearchIndexerKnowledgeStoreObjectProjectionSelector"]] = None, + files: Optional[List["_models.SearchIndexerKnowledgeStoreFileProjectionSelector"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexerKnowledgeStoreTableProjectionSelector( + SearchIndexerKnowledgeStoreProjectionSelector +): # pylint: disable=name-too-long + """Description for what data to store in Azure Tables. + + + :ivar reference_key_name: Name of reference key to different projection. + :vartype reference_key_name: str + :ivar generated_key_name: Name of generated key to store projection under. + :vartype generated_key_name: str + :ivar source: Source data to project. + :vartype source: str + :ivar source_context: Source context for complex projections. + :vartype source_context: str + :ivar inputs: Nested inputs for complex projections. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar table_name: Name of the Azure table to store projected data in. Required. + :vartype table_name: str + """ + + table_name: str = rest_field(name="tableName") + """Name of the Azure table to store projected data in. Required.""" + + @overload + def __init__( + self, + *, + table_name: str, + reference_key_name: Optional[str] = None, + generated_key_name: Optional[str] = None, + source: Optional[str] = None, + source_context: Optional[str] = None, + inputs: Optional[List["_models.InputFieldMappingEntry"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexerLimits(_model_base.Model): + """Represents the limits that can be applied to an indexer. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar max_run_time: The maximum duration that the indexer is permitted to run for one + execution. + :vartype max_run_time: ~datetime.timedelta + :ivar max_document_extraction_size: The maximum size of a document, in bytes, which will be + considered valid for + indexing. + :vartype max_document_extraction_size: int + :ivar max_document_content_characters_to_extract: The maximum number of characters that will be + extracted from a document picked + up for indexing. + :vartype max_document_content_characters_to_extract: int + """ + + max_run_time: Optional[datetime.timedelta] = rest_field(name="maxRunTime", visibility=["read"]) + """The maximum duration that the indexer is permitted to run for one execution.""" + max_document_extraction_size: Optional[int] = rest_field(name="maxDocumentExtractionSize", visibility=["read"]) + """The maximum size of a document, in bytes, which will be considered valid for + indexing.""" + max_document_content_characters_to_extract: Optional[int] = rest_field( + name="maxDocumentContentCharactersToExtract", visibility=["read"] + ) + """The maximum number of characters that will be extracted from a document picked + up for indexing.""" + + +class SearchIndexerSkillset(_model_base.Model): + """A list of skills. + + + :ivar name: The name of the skillset. Required. + :vartype name: str + :ivar description: The description of the skillset. + :vartype description: str + :ivar skills: A list of skills in the skillset. Required. + :vartype skills: list[~azure.search.documents.models.SearchIndexerSkill] + :ivar cognitive_services_account: Details about the Azure AI service to be used when running + skills. + :vartype cognitive_services_account: ~azure.search.documents.models.CognitiveServicesAccount + :ivar knowledge_store: Definition of additional projections to Azure blob, table, or files, of + enriched data. + :vartype knowledge_store: ~azure.search.documents.models.SearchIndexerKnowledgeStore + :ivar index_projection: Definition of additional projections to secondary search index(es). + :vartype index_projection: ~azure.search.documents.models.SearchIndexerIndexProjection + :ivar e_tag: The ETag of the skillset. + :vartype e_tag: str + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key + is used to provide an additional level of encryption-at-rest for your skillset + definition when you want full assurance that no one, not even Microsoft, can + decrypt your skillset definition. Once you have encrypted your skillset + definition, it will always remain encrypted. The search service will ignore + attempts to set this property to null. You can change this property as needed + if you want to rotate your encryption key; Your skillset definition will be + unaffected. Encryption with customer-managed keys is not available for free + search services, and is only available for paid services created on or after + January 1, 2019. + :vartype encryption_key: ~azure.search.documents.models.SearchResourceEncryptionKey + """ + + name: str = rest_field() + """The name of the skillset. Required.""" + description: Optional[str] = rest_field() + """The description of the skillset.""" + skills: List["_models.SearchIndexerSkill"] = rest_field() + """A list of skills in the skillset. Required.""" + cognitive_services_account: Optional["_models.CognitiveServicesAccount"] = rest_field(name="cognitiveServices") + """Details about the Azure AI service to be used when running skills.""" + knowledge_store: Optional["_models.SearchIndexerKnowledgeStore"] = rest_field(name="knowledgeStore") + """Definition of additional projections to Azure blob, table, or files, of + enriched data.""" + index_projection: Optional["_models.SearchIndexerIndexProjection"] = rest_field(name="indexProjections") + """Definition of additional projections to secondary search index(es).""" + e_tag: Optional[str] = rest_field(name="@odata.etag") + """The ETag of the skillset.""" + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = rest_field(name="encryptionKey") + """A description of an encryption key that you create in Azure Key Vault. This key + is used to provide an additional level of encryption-at-rest for your skillset + definition when you want full assurance that no one, not even Microsoft, can + decrypt your skillset definition. Once you have encrypted your skillset + definition, it will always remain encrypted. The search service will ignore + attempts to set this property to null. You can change this property as needed + if you want to rotate your encryption key; Your skillset definition will be + unaffected. Encryption with customer-managed keys is not available for free + search services, and is only available for paid services created on or after + January 1, 2019.""" + + @overload + def __init__( + self, + *, + name: str, + skills: List["_models.SearchIndexerSkill"], + description: Optional[str] = None, + cognitive_services_account: Optional["_models.CognitiveServicesAccount"] = None, + knowledge_store: Optional["_models.SearchIndexerKnowledgeStore"] = None, + index_projection: Optional["_models.SearchIndexerIndexProjection"] = None, + e_tag: Optional[str] = None, + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchIndexerStatus(_model_base.Model): + """Represents the current status and execution history of an indexer. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar status: Overall indexer status. Required. Known values are: "unknown", "error", and + "running". + :vartype status: str or ~azure.search.documents.models.IndexerStatus + :ivar last_result: The result of the most recent or an in-progress indexer execution. + :vartype last_result: ~azure.search.documents.models.IndexerExecutionResult + :ivar execution_history: History of the recent indexer executions, sorted in reverse + chronological order. Required. + :vartype execution_history: list[~azure.search.documents.models.IndexerExecutionResult] + :ivar limits: The execution limits for the indexer. Required. + :vartype limits: ~azure.search.documents.models.SearchIndexerLimits + """ + + status: Union[str, "_models.IndexerStatus"] = rest_field(visibility=["read"]) + """Overall indexer status. Required. Known values are: \"unknown\", \"error\", and \"running\".""" + last_result: Optional["_models.IndexerExecutionResult"] = rest_field(name="lastResult", visibility=["read"]) + """The result of the most recent or an in-progress indexer execution.""" + execution_history: List["_models.IndexerExecutionResult"] = rest_field(name="executionHistory", visibility=["read"]) + """History of the recent indexer executions, sorted in reverse chronological order. Required.""" + limits: "_models.SearchIndexerLimits" = rest_field(visibility=["read"]) + """The execution limits for the indexer. Required.""" + + +class SearchIndexerWarning(_model_base.Model): + """Represents an item-level warning. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar key: The key of the item which generated a warning. + :vartype key: str + :ivar message: The message describing the warning that occurred while processing the item. + Required. + :vartype message: str + :ivar name: The name of the source at which the warning originated. For example, this could + refer to a particular skill in the attached skillset. This may not be always + available. + :vartype name: str + :ivar details: Additional, verbose details about the warning to assist in debugging the + indexer. This may not be always available. + :vartype details: str + :ivar documentation_link: A link to a troubleshooting guide for these classes of warnings. This + may not + be always available. + :vartype documentation_link: str + """ + + key: Optional[str] = rest_field(visibility=["read"]) + """The key of the item which generated a warning.""" + message: str = rest_field(visibility=["read"]) + """The message describing the warning that occurred while processing the item. Required.""" + name: Optional[str] = rest_field(visibility=["read"]) + """The name of the source at which the warning originated. For example, this could + refer to a particular skill in the attached skillset. This may not be always + available.""" + details: Optional[str] = rest_field(visibility=["read"]) + """Additional, verbose details about the warning to assist in debugging the + indexer. This may not be always available.""" + documentation_link: Optional[str] = rest_field(name="documentationLink", visibility=["read"]) + """A link to a troubleshooting guide for these classes of warnings. This may not + be always available.""" + + +class SearchRequest(_model_base.Model): + """Parameters for filtering, sorting, faceting, paging, and other search query + behaviors. + + :ivar include_total_result_count: A value that specifies whether to fetch the total count of + results. Default is + false. Setting this value to true may have a performance impact. Note that the + count returned is an approximation. + :vartype include_total_result_count: bool + :ivar facets: The list of facet expressions to apply to the search query. Each facet + expression contains a field name, optionally followed by a comma-separated list + of name:value pairs. + :vartype facets: list[str] + :ivar filter: The OData $filter expression to apply to the search query. + :vartype filter: str + :ivar highlight_fields: The comma-separated list of field names to use for hit highlights. Only + searchable fields can be used for hit highlighting. + :vartype highlight_fields: str + :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. Default is </em>. + :vartype highlight_post_tag: str + :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. Default is <em>. + :vartype highlight_pre_tag: str + :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that + must be + covered by a search query in order for the query to be reported as a success. + This parameter can be useful for ensuring search availability even for services + with only one replica. The default is 100. + :vartype minimum_coverage: float + :ivar order_by: The comma-separated list of OData $orderby expressions by which to sort the + results. Each expression can be either a field name or a call to either the + geo.distance() or the search.score() functions. Each expression can be followed + by asc to indicate ascending, or desc to indicate descending. The default is + ascending order. Ties will be broken by the match scores of documents. If no + $orderby is specified, the default sort order is descending by document match + score. There can be at most 32 $orderby clauses. + :vartype order_by: str + :ivar query_type: A value that specifies the syntax of the search query. The default is + 'simple'. + Use 'full' if your query uses the Lucene query syntax. Known values are: "simple", "full", and + "semantic". + :vartype query_type: str or ~azure.search.documents.models.QueryType + :ivar scoring_statistics: A value that specifies whether we want to calculate scoring + statistics (such as + document frequency) globally for more consistent scoring, or locally, for lower + latency. The default is 'local'. Use 'global' to aggregate scoring statistics + globally before scoring. Using global scoring statistics can increase latency + of search queries. Known values are: "local" and "global". + :vartype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics + :ivar session_id: A value to be used to create a sticky session, which can help getting more + consistent results. As long as the same sessionId is used, a best-effort + attempt will be made to target the same replica set. Be wary that reusing the + same sessionID values repeatedly can interfere with the load balancing of the + requests across replicas and adversely affect the performance of the search + service. The value used as sessionId cannot start with a '_' character. + :vartype session_id: str + :ivar scoring_parameters: The list of parameter values to be used in scoring functions (for + example, + referencePointParameter) using the format name-values. For example, if the + scoring profile defines a function with a parameter called 'mylocation' the + parameter string would be "mylocation--122.2,44.8" (without the quotes). + :vartype scoring_parameters: list[str] + :ivar scoring_profile: The name of a scoring profile to evaluate match scores for matching + documents + in order to sort the results. + :vartype scoring_profile: str + :ivar debug: Enables a debugging tool that can be used to further explore your reranked + results. Known values are: "disabled", "semantic", "vector", "queryRewrites", and "all". + :vartype debug: str or ~azure.search.documents.models.QueryDebugMode + :ivar search_text: A full-text search query expression; Use "*" or omit this parameter to match + all documents. + :vartype search_text: str + :ivar search_fields: The comma-separated list of field names to which to scope the full-text + search. + When using fielded search (fieldName:searchExpression) in a full Lucene query, + the field names of each fielded search expression take precedence over any + field names listed in this parameter. + :vartype search_fields: str + :ivar search_mode: A value that specifies whether any or all of the search terms must be + matched + in order to count the document as a match. Known values are: "any" and "all". + :vartype search_mode: str or ~azure.search.documents.models.SearchMode + :ivar query_language: A value that specifies the language of the search query. Known values + are: "none", "en-us", "en-gb", "en-in", "en-ca", "en-au", "fr-fr", "fr-ca", "de-de", "es-es", + "es-mx", "zh-cn", "zh-tw", "pt-br", "pt-pt", "it-it", "ja-jp", "ko-kr", "ru-ru", "cs-cz", + "nl-be", "nl-nl", "hu-hu", "pl-pl", "sv-se", "tr-tr", "hi-in", "ar-sa", "ar-eg", "ar-ma", + "ar-kw", "ar-jo", "da-dk", "no-no", "bg-bg", "hr-hr", "hr-ba", "ms-my", "ms-bn", "sl-sl", + "ta-in", "vi-vn", "el-gr", "ro-ro", "is-is", "id-id", "th-th", "lt-lt", "uk-ua", "lv-lv", + "et-ee", "ca-es", "fi-fi", "sr-ba", "sr-me", "sr-rs", "sk-sk", "nb-no", "hy-am", "bn-in", + "eu-es", "gl-es", "gu-in", "he-il", "ga-ie", "kn-in", "ml-in", "mr-in", "fa-ae", "pa-in", + "te-in", and "ur-pk". + :vartype query_language: str or ~azure.search.documents.models.QueryLanguage + :ivar speller: A value that specified the type of the speller to use to spell-correct + individual search query terms. Known values are: "none" and "lexicon". + :vartype speller: str or ~azure.search.documents.models.QuerySpellerType + :ivar select: The comma-separated list of fields to retrieve. If unspecified, all fields + marked as retrievable in the schema are included. + :vartype select: str + :ivar skip: The number of search results to skip. This value cannot be greater than + 100,000. If you need to scan documents in sequence, but cannot use skip due to + this limitation, consider using orderby on a totally-ordered key and filter + with a range query instead. + :vartype skip: int + :ivar top: The number of search results to retrieve. This can be used in conjunction with + $skip to implement client-side paging of search results. If results are + truncated due to server-side paging, the response will include a continuation + token that can be used to issue another Search request for the next page of + results. + :vartype top: int + :ivar semantic_configuration: The name of a semantic configuration that will be used when + processing + documents for queries of type semantic. + :vartype semantic_configuration: str + :ivar semantic_error_handling: Allows the user to choose whether a semantic call should fail + completely + (default / current behavior), or to return partial results. Known values are: "partial" and + "fail". + :vartype semantic_error_handling: str or ~azure.search.documents.models.SemanticErrorMode + :ivar semantic_max_wait_in_milliseconds: Allows the user to set an upper bound on the amount of + time it takes for + semantic enrichment to finish processing before the request fails. + :vartype semantic_max_wait_in_milliseconds: int + :ivar semantic_query: Allows setting a separate search query that will be solely used for + semantic + reranking, semantic captions and semantic answers. Is useful for scenarios + where there is a need to use different queries between the base retrieval and + ranking phase, and the L2 semantic phase. + :vartype semantic_query: str + :ivar answers: A value that specifies whether answers should be returned as part of the search + response. Known values are: "none" and "extractive". + :vartype answers: str or ~azure.search.documents.models.QueryAnswerType + :ivar captions: A value that specifies whether captions should be returned as part of the + search response. Known values are: "none" and "extractive". + :vartype captions: str or ~azure.search.documents.models.QueryCaptionType + :ivar query_rewrites: A value that specifies whether query rewrites should be generated to + augment + the search query. Known values are: "none" and "generative". + :vartype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType + :ivar semantic_fields: The comma-separated list of field names used for semantic ranking. + :vartype semantic_fields: str + :ivar vector_queries: The query parameters for vector and hybrid search queries. + :vartype vector_queries: list[~azure.search.documents.models.VectorQuery] + :ivar vector_filter_mode: Determines whether or not filters are applied before or after the + vector search + is performed. Default is 'preFilter' for new indexes. Known values are: "postFilter" and + "preFilter". + :vartype vector_filter_mode: str or ~azure.search.documents.models.VectorFilterMode + :ivar hybrid_search: The query parameters to configure hybrid search behaviors. + :vartype hybrid_search: ~azure.search.documents.models.HybridSearch + """ + + include_total_result_count: Optional[bool] = rest_field(name="count") + """A value that specifies whether to fetch the total count of results. Default is + false. Setting this value to true may have a performance impact. Note that the + count returned is an approximation.""" + facets: Optional[List[str]] = rest_field() + """The list of facet expressions to apply to the search query. Each facet + expression contains a field name, optionally followed by a comma-separated list + of name:value pairs.""" + filter: Optional[str] = rest_field() + """The OData $filter expression to apply to the search query.""" + highlight_fields: Optional[str] = rest_field(name="highlight") + """The comma-separated list of field names to use for hit highlights. Only + searchable fields can be used for hit highlighting.""" + highlight_post_tag: Optional[str] = rest_field(name="highlightPostTag") + """A string tag that is appended to hit highlights. Must be set with + highlightPreTag. Default is </em>.""" + highlight_pre_tag: Optional[str] = rest_field(name="highlightPreTag") + """A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. Default is <em>.""" + minimum_coverage: Optional[float] = rest_field(name="minimumCoverage") + """A number between 0 and 100 indicating the percentage of the index that must be + covered by a search query in order for the query to be reported as a success. + This parameter can be useful for ensuring search availability even for services + with only one replica. The default is 100.""" + order_by: Optional[str] = rest_field(name="orderby") + """The comma-separated list of OData $orderby expressions by which to sort the + results. Each expression can be either a field name or a call to either the + geo.distance() or the search.score() functions. Each expression can be followed + by asc to indicate ascending, or desc to indicate descending. The default is + ascending order. Ties will be broken by the match scores of documents. If no + $orderby is specified, the default sort order is descending by document match + score. There can be at most 32 $orderby clauses.""" + query_type: Optional[Union[str, "_models.QueryType"]] = rest_field(name="queryType") + """A value that specifies the syntax of the search query. The default is 'simple'. + Use 'full' if your query uses the Lucene query syntax. Known values are: \"simple\", \"full\", + and \"semantic\".""" + scoring_statistics: Optional[Union[str, "_models.ScoringStatistics"]] = rest_field(name="scoringStatistics") + """A value that specifies whether we want to calculate scoring statistics (such as + document frequency) globally for more consistent scoring, or locally, for lower + latency. The default is 'local'. Use 'global' to aggregate scoring statistics + globally before scoring. Using global scoring statistics can increase latency + of search queries. Known values are: \"local\" and \"global\".""" + session_id: Optional[str] = rest_field(name="sessionId") + """A value to be used to create a sticky session, which can help getting more + consistent results. As long as the same sessionId is used, a best-effort + attempt will be made to target the same replica set. Be wary that reusing the + same sessionID values repeatedly can interfere with the load balancing of the + requests across replicas and adversely affect the performance of the search + service. The value used as sessionId cannot start with a '_' character.""" + scoring_parameters: Optional[List[str]] = rest_field(name="scoringParameters") + """The list of parameter values to be used in scoring functions (for example, + referencePointParameter) using the format name-values. For example, if the + scoring profile defines a function with a parameter called 'mylocation' the + parameter string would be \"mylocation--122.2,44.8\" (without the quotes).""" + scoring_profile: Optional[str] = rest_field(name="scoringProfile") + """The name of a scoring profile to evaluate match scores for matching documents + in order to sort the results.""" + debug: Optional[Union[str, "_models.QueryDebugMode"]] = rest_field() + """Enables a debugging tool that can be used to further explore your reranked + results. Known values are: \"disabled\", \"semantic\", \"vector\", \"queryRewrites\", and + \"all\".""" + search_text: Optional[str] = rest_field(name="search") + """A full-text search query expression; Use \"*\" or omit this parameter to match + all documents.""" + search_fields: Optional[str] = rest_field(name="searchFields") + """The comma-separated list of field names to which to scope the full-text search. + When using fielded search (fieldName:searchExpression) in a full Lucene query, + the field names of each fielded search expression take precedence over any + field names listed in this parameter.""" + search_mode: Optional[Union[str, "_models.SearchMode"]] = rest_field(name="searchMode") + """A value that specifies whether any or all of the search terms must be matched + in order to count the document as a match. Known values are: \"any\" and \"all\".""" + query_language: Optional[Union[str, "_models.QueryLanguage"]] = rest_field(name="queryLanguage") + """A value that specifies the language of the search query. Known values are: \"none\", \"en-us\", + \"en-gb\", \"en-in\", \"en-ca\", \"en-au\", \"fr-fr\", \"fr-ca\", \"de-de\", \"es-es\", + \"es-mx\", \"zh-cn\", \"zh-tw\", \"pt-br\", \"pt-pt\", \"it-it\", \"ja-jp\", \"ko-kr\", + \"ru-ru\", \"cs-cz\", \"nl-be\", \"nl-nl\", \"hu-hu\", \"pl-pl\", \"sv-se\", \"tr-tr\", + \"hi-in\", \"ar-sa\", \"ar-eg\", \"ar-ma\", \"ar-kw\", \"ar-jo\", \"da-dk\", \"no-no\", + \"bg-bg\", \"hr-hr\", \"hr-ba\", \"ms-my\", \"ms-bn\", \"sl-sl\", \"ta-in\", \"vi-vn\", + \"el-gr\", \"ro-ro\", \"is-is\", \"id-id\", \"th-th\", \"lt-lt\", \"uk-ua\", \"lv-lv\", + \"et-ee\", \"ca-es\", \"fi-fi\", \"sr-ba\", \"sr-me\", \"sr-rs\", \"sk-sk\", \"nb-no\", + \"hy-am\", \"bn-in\", \"eu-es\", \"gl-es\", \"gu-in\", \"he-il\", \"ga-ie\", \"kn-in\", + \"ml-in\", \"mr-in\", \"fa-ae\", \"pa-in\", \"te-in\", and \"ur-pk\".""" + speller: Optional[Union[str, "_models.QuerySpellerType"]] = rest_field() + """A value that specified the type of the speller to use to spell-correct + individual search query terms. Known values are: \"none\" and \"lexicon\".""" + select: Optional[str] = rest_field() + """The comma-separated list of fields to retrieve. If unspecified, all fields + marked as retrievable in the schema are included.""" + skip: Optional[int] = rest_field() + """The number of search results to skip. This value cannot be greater than + 100,000. If you need to scan documents in sequence, but cannot use skip due to + this limitation, consider using orderby on a totally-ordered key and filter + with a range query instead.""" + top: Optional[int] = rest_field() + """The number of search results to retrieve. This can be used in conjunction with + $skip to implement client-side paging of search results. If results are + truncated due to server-side paging, the response will include a continuation + token that can be used to issue another Search request for the next page of + results.""" + semantic_configuration: Optional[str] = rest_field(name="semanticConfiguration") + """The name of a semantic configuration that will be used when processing + documents for queries of type semantic.""" + semantic_error_handling: Optional[Union[str, "_models.SemanticErrorMode"]] = rest_field( + name="semanticErrorHandling" + ) + """Allows the user to choose whether a semantic call should fail completely + (default / current behavior), or to return partial results. Known values are: \"partial\" and + \"fail\".""" + semantic_max_wait_in_milliseconds: Optional[int] = rest_field(name="semanticMaxWaitInMilliseconds") + """Allows the user to set an upper bound on the amount of time it takes for + semantic enrichment to finish processing before the request fails.""" + semantic_query: Optional[str] = rest_field(name="semanticQuery") + """Allows setting a separate search query that will be solely used for semantic + reranking, semantic captions and semantic answers. Is useful for scenarios + where there is a need to use different queries between the base retrieval and + ranking phase, and the L2 semantic phase.""" + answers: Optional[Union[str, "_models.QueryAnswerType"]] = rest_field() + """A value that specifies whether answers should be returned as part of the search + response. Known values are: \"none\" and \"extractive\".""" + captions: Optional[Union[str, "_models.QueryCaptionType"]] = rest_field() + """A value that specifies whether captions should be returned as part of the + search response. Known values are: \"none\" and \"extractive\".""" + query_rewrites: Optional[Union[str, "_models.QueryRewritesType"]] = rest_field(name="queryRewrites") + """A value that specifies whether query rewrites should be generated to augment + the search query. Known values are: \"none\" and \"generative\".""" + semantic_fields: Optional[str] = rest_field(name="semanticFields") + """The comma-separated list of field names used for semantic ranking.""" + vector_queries: Optional[List["_models.VectorQuery"]] = rest_field(name="vectorQueries") + """The query parameters for vector and hybrid search queries.""" + vector_filter_mode: Optional[Union[str, "_models.VectorFilterMode"]] = rest_field(name="vectorFilterMode") + """Determines whether or not filters are applied before or after the vector search + is performed. Default is 'preFilter' for new indexes. Known values are: \"postFilter\" and + \"preFilter\".""" + hybrid_search: Optional["_models.HybridSearch"] = rest_field(name="hybridSearch") + """The query parameters to configure hybrid search behaviors.""" + + @overload + def __init__( # pylint: disable=too-many-locals + self, + *, + include_total_result_count: Optional[bool] = None, + facets: Optional[List[str]] = None, + filter: Optional[str] = None, # pylint: disable=redefined-builtin + highlight_fields: Optional[str] = None, + highlight_post_tag: Optional[str] = None, + highlight_pre_tag: Optional[str] = None, + minimum_coverage: Optional[float] = None, + order_by: Optional[str] = None, + query_type: Optional[Union[str, "_models.QueryType"]] = None, + scoring_statistics: Optional[Union[str, "_models.ScoringStatistics"]] = None, + session_id: Optional[str] = None, + scoring_parameters: Optional[List[str]] = None, + scoring_profile: Optional[str] = None, + debug: Optional[Union[str, "_models.QueryDebugMode"]] = None, + search_text: Optional[str] = None, + search_fields: Optional[str] = None, + search_mode: Optional[Union[str, "_models.SearchMode"]] = None, + query_language: Optional[Union[str, "_models.QueryLanguage"]] = None, + speller: Optional[Union[str, "_models.QuerySpellerType"]] = None, + select: Optional[str] = None, + skip: Optional[int] = None, + top: Optional[int] = None, + semantic_configuration: Optional[str] = None, + semantic_error_handling: Optional[Union[str, "_models.SemanticErrorMode"]] = None, + semantic_max_wait_in_milliseconds: Optional[int] = None, + semantic_query: Optional[str] = None, + answers: Optional[Union[str, "_models.QueryAnswerType"]] = None, + captions: Optional[Union[str, "_models.QueryCaptionType"]] = None, + query_rewrites: Optional[Union[str, "_models.QueryRewritesType"]] = None, + semantic_fields: Optional[str] = None, + vector_queries: Optional[List["_models.VectorQuery"]] = None, + vector_filter_mode: Optional[Union[str, "_models.VectorFilterMode"]] = None, + hybrid_search: Optional["_models.HybridSearch"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchResourceEncryptionKey(_model_base.Model): + """A customer-managed encryption key in Azure Key Vault. Keys that you create and + manage can be used to encrypt or decrypt data-at-rest, such as indexes and + synonym maps. + + + :ivar key_name: The name of your Azure Key Vault key to be used to encrypt your data at rest. + Required. + :vartype key_name: str + :ivar key_version: The version of your Azure Key Vault key to be used to encrypt your data at + rest. Required. + :vartype key_version: str + :ivar vault_uri: The URI of your Azure Key Vault, also referred to as DNS name, that contains + the key to be used to encrypt your data at rest. An example URI might be + ``https://my-keyvault-name.vault.azure.net``. Required. + :vartype vault_uri: str + :ivar access_credentials: Optional Azure Active Directory credentials used for accessing your + Azure Key + Vault. Not required if using managed identity instead. + :vartype access_credentials: + ~azure.search.documents.models.AzureActiveDirectoryApplicationCredentials + :ivar identity: An explicit managed identity to use for this encryption key. If not specified + and the access credentials property is null, the system-assigned managed + identity is used. On update to the resource, if the explicit identity is + unspecified, it remains unchanged. If "none" is specified, the value of this + property is cleared. + :vartype identity: ~azure.search.documents.models.SearchIndexerDataIdentity + """ + + key_name: str = rest_field(name="keyVaultKeyName") + """The name of your Azure Key Vault key to be used to encrypt your data at rest. Required.""" + key_version: str = rest_field(name="keyVaultKeyVersion") + """The version of your Azure Key Vault key to be used to encrypt your data at rest. Required.""" + vault_uri: str = rest_field(name="keyVaultUri") + """The URI of your Azure Key Vault, also referred to as DNS name, that contains + the key to be used to encrypt your data at rest. An example URI might be + ``https://my-keyvault-name.vault.azure.net``. Required.""" + access_credentials: Optional["_models.AzureActiveDirectoryApplicationCredentials"] = rest_field( + name="accessCredentials" + ) + """Optional Azure Active Directory credentials used for accessing your Azure Key + Vault. Not required if using managed identity instead.""" + identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field() + """An explicit managed identity to use for this encryption key. If not specified + and the access credentials property is null, the system-assigned managed + identity is used. On update to the resource, if the explicit identity is + unspecified, it remains unchanged. If \"none\" is specified, the value of this + property is cleared.""" + + @overload + def __init__( + self, + *, + key_name: str, + key_version: str, + vault_uri: str, + access_credentials: Optional["_models.AzureActiveDirectoryApplicationCredentials"] = None, + identity: Optional["_models.SearchIndexerDataIdentity"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchResult(_model_base.Model): + """Contains a document found by a search query, plus associated metadata. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar score: The relevance score of the document compared to other documents returned by the + query. Required. + :vartype score: float + :ivar reranker_score: The relevance score computed by the semantic ranker for the top search + results. + Search results are sorted by the RerankerScore first and then by the Score. + RerankerScore is only returned for queries of type 'semantic'. + :vartype reranker_score: float + :ivar highlights: Text fragments from the document that indicate the matching search terms, + organized by each applicable field; null if hit highlighting was not enabled + for the query. + :vartype highlights: dict[str, list[str]] + :ivar captions: Captions are the most representative passages from the document relatively to + the search query. They are often used as document summary. Captions are only + returned for queries of type 'semantic'. + :vartype captions: list[~azure.search.documents.models.QueryCaptionResult] + :ivar document_debug_info: Contains debugging information that can be used to further explore + your search + results. + :vartype document_debug_info: list[~azure.search.documents.models.DocumentDebugInfo] + """ + + score: float = rest_field(name="@search.score", visibility=["read"]) + """The relevance score of the document compared to other documents returned by the + query. Required.""" + reranker_score: Optional[float] = rest_field(name="@search.rerankerScore", visibility=["read"]) + """The relevance score computed by the semantic ranker for the top search results. + Search results are sorted by the RerankerScore first and then by the Score. + RerankerScore is only returned for queries of type 'semantic'.""" + highlights: Optional[Dict[str, List[str]]] = rest_field(name="@search.highlights", visibility=["read"]) + """Text fragments from the document that indicate the matching search terms, + organized by each applicable field; null if hit highlighting was not enabled + for the query.""" + captions: Optional[List["_models.QueryCaptionResult"]] = rest_field(name="@search.captions", visibility=["read"]) + """Captions are the most representative passages from the document relatively to + the search query. They are often used as document summary. Captions are only + returned for queries of type 'semantic'.""" + document_debug_info: Optional[List["_models.DocumentDebugInfo"]] = rest_field( + name="@search.documentDebugInfo", visibility=["read"] + ) + """Contains debugging information that can be used to further explore your search + results.""" + + +class VectorThreshold(_model_base.Model): + """The threshold used for vector queries. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + SearchScoreThreshold, VectorSimilarityThreshold + + + :ivar kind: Type of threshold. Required. Known values are: "vectorSimilarity" and + "searchScore". + :vartype kind: str or ~azure.search.documents.models.VectorThresholdKind + """ + + __mapping__: Dict[str, _model_base.Model] = {} + kind: str = rest_discriminator(name="kind") + """Type of threshold. Required. Known values are: \"vectorSimilarity\" and \"searchScore\".""" + + @overload + def __init__( + self, + *, + kind: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchScoreThreshold(VectorThreshold, discriminator="searchScore"): + """The results of the vector query will filter based on the '@search.score' value. + Note this is the @search.score returned as part of the search response. The + threshold direction will be chosen for higher @search.score. + + + :ivar value: The threshold will filter based on the '@search.score' value. Note this is the + @search.score returned as part of the search response. The threshold direction + will be chosen for higher @search.score. Required. + :vartype value: float + :ivar kind: The kind of threshold used to filter vector queries. Required. The results of the + vector query will filter based on the '@search.score' value. + Note this is the @search.score returned as part of the search response. The + threshold direction will be chosen for higher @search.score. + :vartype kind: str or ~azure.search.documents.models.SEARCH_SCORE + """ + + value: float = rest_field() + """The threshold will filter based on the '@search.score' value. Note this is the + @search.score returned as part of the search response. The threshold direction + will be chosen for higher @search.score. Required.""" + kind: Literal[VectorThresholdKind.SEARCH_SCORE] = rest_discriminator(name="kind") # type: ignore + """The kind of threshold used to filter vector queries. Required. The results of the vector query + will filter based on the '@search.score' value. + Note this is the @search.score returned as part of the search response. The + threshold direction will be chosen for higher @search.score.""" + + @overload + def __init__( + self, + *, + value: float, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=VectorThresholdKind.SEARCH_SCORE, **kwargs) + + +class SearchServiceCounters(_model_base.Model): + """Represents service-level resource counters and quotas. + + + :ivar document_counter: Total number of documents across all indexes in the service. Required. + :vartype document_counter: ~azure.search.documents.models.ResourceCounter + :ivar index_counter: Total number of indexes. Required. + :vartype index_counter: ~azure.search.documents.models.ResourceCounter + :ivar indexer_counter: Total number of indexers. Required. + :vartype indexer_counter: ~azure.search.documents.models.ResourceCounter + :ivar data_source_counter: Total number of data sources. Required. + :vartype data_source_counter: ~azure.search.documents.models.ResourceCounter + :ivar storage_size_counter: Total size of used storage in bytes. Required. + :vartype storage_size_counter: ~azure.search.documents.models.ResourceCounter + :ivar synonym_map_counter: Total number of synonym maps. Required. + :vartype synonym_map_counter: ~azure.search.documents.models.ResourceCounter + :ivar skillset_counter: Total number of skillsets. Required. + :vartype skillset_counter: ~azure.search.documents.models.ResourceCounter + :ivar vector_index_size_counter: Total memory consumption of all vector indexes within the + service, in bytes. Required. + :vartype vector_index_size_counter: ~azure.search.documents.models.ResourceCounter + """ + + document_counter: "_models.ResourceCounter" = rest_field(name="documentCount") + """Total number of documents across all indexes in the service. Required.""" + index_counter: "_models.ResourceCounter" = rest_field(name="indexesCount") + """Total number of indexes. Required.""" + indexer_counter: "_models.ResourceCounter" = rest_field(name="indexersCount") + """Total number of indexers. Required.""" + data_source_counter: "_models.ResourceCounter" = rest_field(name="dataSourcesCount") + """Total number of data sources. Required.""" + storage_size_counter: "_models.ResourceCounter" = rest_field(name="storageSize") + """Total size of used storage in bytes. Required.""" + synonym_map_counter: "_models.ResourceCounter" = rest_field(name="synonymMaps") + """Total number of synonym maps. Required.""" + skillset_counter: "_models.ResourceCounter" = rest_field(name="skillsetCount") + """Total number of skillsets. Required.""" + vector_index_size_counter: "_models.ResourceCounter" = rest_field(name="vectorIndexSize") + """Total memory consumption of all vector indexes within the service, in bytes. Required.""" + + @overload + def __init__( + self, + *, + document_counter: "_models.ResourceCounter", + index_counter: "_models.ResourceCounter", + indexer_counter: "_models.ResourceCounter", + data_source_counter: "_models.ResourceCounter", + storage_size_counter: "_models.ResourceCounter", + synonym_map_counter: "_models.ResourceCounter", + skillset_counter: "_models.ResourceCounter", + vector_index_size_counter: "_models.ResourceCounter", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchServiceLimits(_model_base.Model): + """Represents various service level limits. + + :ivar max_fields_per_index: The maximum allowed fields per index. + :vartype max_fields_per_index: int + :ivar max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in an + index, including the + top-level complex field. For example, a/b/c has a nesting depth of 3. + :vartype max_field_nesting_depth_per_index: int + :ivar max_complex_collection_fields_per_index: The maximum number of fields of type + Collection(Edm.ComplexType) allowed in an + index. + :vartype max_complex_collection_fields_per_index: int + :ivar max_complex_objects_in_collections_per_document: The maximum number of objects in complex + collections allowed per document. + :vartype max_complex_objects_in_collections_per_document: int + :ivar max_storage_per_index_in_bytes: The maximum amount of storage in bytes allowed per index. + :vartype max_storage_per_index_in_bytes: int + """ + + max_fields_per_index: Optional[int] = rest_field(name="maxFieldsPerIndex") + """The maximum allowed fields per index.""" + max_field_nesting_depth_per_index: Optional[int] = rest_field(name="maxFieldNestingDepthPerIndex") + """The maximum depth which you can nest sub-fields in an index, including the + top-level complex field. For example, a/b/c has a nesting depth of 3.""" + max_complex_collection_fields_per_index: Optional[int] = rest_field(name="maxComplexCollectionFieldsPerIndex") + """The maximum number of fields of type Collection(Edm.ComplexType) allowed in an + index.""" + max_complex_objects_in_collections_per_document: Optional[int] = rest_field( + name="maxComplexObjectsInCollectionsPerDocument" + ) + """The maximum number of objects in complex collections allowed per document.""" + max_storage_per_index_in_bytes: Optional[int] = rest_field(name="maxStoragePerIndex") + """The maximum amount of storage in bytes allowed per index.""" + + @overload + def __init__( + self, + *, + max_fields_per_index: Optional[int] = None, + max_field_nesting_depth_per_index: Optional[int] = None, + max_complex_collection_fields_per_index: Optional[int] = None, + max_complex_objects_in_collections_per_document: Optional[int] = None, + max_storage_per_index_in_bytes: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchServiceStatistics(_model_base.Model): + """Response from a get service statistics request. If successful, it includes + service level counters and limits. + + + :ivar counters: Service level resource counters. Required. + :vartype counters: ~azure.search.documents.models.SearchServiceCounters + :ivar limits: Service level general limits. Required. + :vartype limits: ~azure.search.documents.models.SearchServiceLimits + """ + + counters: "_models.SearchServiceCounters" = rest_field() + """Service level resource counters. Required.""" + limits: "_models.SearchServiceLimits" = rest_field() + """Service level general limits. Required.""" + + @overload + def __init__( + self, + *, + counters: "_models.SearchServiceCounters", + limits: "_models.SearchServiceLimits", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SearchSuggester(_model_base.Model): + """Defines how the Suggest API should apply to a group of fields in the index. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar name: The name of the suggester. Required. + :vartype name: str + :ivar search_mode: A value indicating the capabilities of the suggester. Required. Default + value is "analyzingInfixMatching". + :vartype search_mode: str + :ivar source_fields: The list of field names to which the suggester applies. Each field must be + searchable. Required. + :vartype source_fields: list[str] + """ + + name: str = rest_field() + """The name of the suggester. Required.""" + search_mode: Literal["analyzingInfixMatching"] = rest_field(name="searchMode") + """A value indicating the capabilities of the suggester. Required. Default value is + \"analyzingInfixMatching\".""" + source_fields: List[str] = rest_field(name="sourceFields") + """The list of field names to which the suggester applies. Each field must be + searchable. Required.""" + + @overload + def __init__( + self, + *, + name: str, + source_fields: List[str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.search_mode: Literal["analyzingInfixMatching"] = "analyzingInfixMatching" + + +class SemanticConfiguration(_model_base.Model): + """Defines a specific configuration to be used in the context of semantic + capabilities. + + + :ivar name: The name of the semantic configuration. Required. + :vartype name: str + :ivar prioritized_fields: Describes the title, content, and keyword fields to be used for + semantic + ranking, captions, highlights, and answers. At least one of the three sub + properties (titleField, prioritizedKeywordsFields and prioritizedContentFields) + need to be set. Required. + :vartype prioritized_fields: ~azure.search.documents.models.SemanticPrioritizedFields + """ + + name: str = rest_field() + """The name of the semantic configuration. Required.""" + prioritized_fields: "_models.SemanticPrioritizedFields" = rest_field(name="prioritizedFields") + """Describes the title, content, and keyword fields to be used for semantic + ranking, captions, highlights, and answers. At least one of the three sub + properties (titleField, prioritizedKeywordsFields and prioritizedContentFields) + need to be set. Required.""" + + @overload + def __init__( + self, + *, + name: str, + prioritized_fields: "_models.SemanticPrioritizedFields", + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SemanticDebugInfo(_model_base.Model): + """Contains debugging information specific to semantic ranking requests. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar title_field: The title field that was sent to the semantic enrichment process, as well as + how it was used. + :vartype title_field: ~azure.search.documents.models.QueryResultDocumentSemanticField + :ivar content_fields: The content fields that were sent to the semantic enrichment process, as + well + as how they were used. + :vartype content_fields: list[~azure.search.documents.models.QueryResultDocumentSemanticField] + :ivar keyword_fields: The keyword fields that were sent to the semantic enrichment process, as + well + as how they were used. + :vartype keyword_fields: list[~azure.search.documents.models.QueryResultDocumentSemanticField] + :ivar reranker_input: The raw concatenated strings that were sent to the semantic enrichment + process. + :vartype reranker_input: ~azure.search.documents.models.QueryResultDocumentRerankerInput + """ + + title_field: Optional["_models.QueryResultDocumentSemanticField"] = rest_field( + name="titleField", visibility=["read"] + ) + """The title field that was sent to the semantic enrichment process, as well as + how it was used.""" + content_fields: Optional[List["_models.QueryResultDocumentSemanticField"]] = rest_field( + name="contentFields", visibility=["read"] + ) + """The content fields that were sent to the semantic enrichment process, as well + as how they were used.""" + keyword_fields: Optional[List["_models.QueryResultDocumentSemanticField"]] = rest_field( + name="keywordFields", visibility=["read"] + ) + """The keyword fields that were sent to the semantic enrichment process, as well + as how they were used.""" + reranker_input: Optional["_models.QueryResultDocumentRerankerInput"] = rest_field( + name="rerankerInput", visibility=["read"] + ) + """The raw concatenated strings that were sent to the semantic enrichment process.""" + + +class SemanticField(_model_base.Model): + """A field that is used as part of the semantic configuration. + + + :ivar field_name: File name. Required. + :vartype field_name: str + """ + + field_name: str = rest_field(name="fieldName") + """File name. Required.""" + + @overload + def __init__( + self, + *, + field_name: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SemanticPrioritizedFields(_model_base.Model): + """Describes the title, content, and keywords fields to be used for semantic + ranking, captions, highlights, and answers. + + :ivar title_field: Defines the title field to be used for semantic ranking, captions, + highlights, + and answers. If you don't have a title field in your index, leave this blank. + :vartype title_field: ~azure.search.documents.models.SemanticField + :ivar content_fields: Defines the content fields to be used for semantic ranking, captions, + highlights, and answers. For the best result, the selected fields should + contain text in natural language form. The order of the fields in the array + represents their priority. Fields with lower priority may get truncated if the + content is long. + :vartype content_fields: list[~azure.search.documents.models.SemanticField] + :ivar keywords_fields: Defines the keyword fields to be used for semantic ranking, captions, + highlights, and answers. For the best result, the selected fields should + contain a list of keywords. The order of the fields in the array represents + their priority. Fields with lower priority may get truncated if the content is + long. + :vartype keywords_fields: list[~azure.search.documents.models.SemanticField] + """ + + title_field: Optional["_models.SemanticField"] = rest_field(name="titleField") + """Defines the title field to be used for semantic ranking, captions, highlights, + and answers. If you don't have a title field in your index, leave this blank.""" + content_fields: Optional[List["_models.SemanticField"]] = rest_field(name="prioritizedContentFields") + """Defines the content fields to be used for semantic ranking, captions, + highlights, and answers. For the best result, the selected fields should + contain text in natural language form. The order of the fields in the array + represents their priority. Fields with lower priority may get truncated if the + content is long.""" + keywords_fields: Optional[List["_models.SemanticField"]] = rest_field(name="prioritizedKeywordsFields") + """Defines the keyword fields to be used for semantic ranking, captions, + highlights, and answers. For the best result, the selected fields should + contain a list of keywords. The order of the fields in the array represents + their priority. Fields with lower priority may get truncated if the content is + long.""" + + @overload + def __init__( + self, + *, + title_field: Optional["_models.SemanticField"] = None, + content_fields: Optional[List["_models.SemanticField"]] = None, + keywords_fields: Optional[List["_models.SemanticField"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SemanticSearch(_model_base.Model): + """Defines parameters for a search index that influence semantic capabilities. + + :ivar default_configuration_name: Allows you to set the name of a default semantic + configuration in your index, + making it optional to pass it on as a query parameter every time. + :vartype default_configuration_name: str + :ivar configurations: The semantic configurations for the index. + :vartype configurations: list[~azure.search.documents.models.SemanticConfiguration] + """ + + default_configuration_name: Optional[str] = rest_field(name="defaultConfiguration") + """Allows you to set the name of a default semantic configuration in your index, + making it optional to pass it on as a query parameter every time.""" + configurations: Optional[List["_models.SemanticConfiguration"]] = rest_field() + """The semantic configurations for the index.""" + + @overload + def __init__( + self, + *, + default_configuration_name: Optional[str] = None, + configurations: Optional[List["_models.SemanticConfiguration"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SentimentSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.SentimentSkill"): + """This skill is deprecated. Use the V3.SentimentSkill instead. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is ``en``. + Known values are: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT", "ru", + "es", "sv", and "tr". + :vartype default_language_code: str or ~azure.search.documents.models.SentimentSkillLanguage + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Text.SentimentSkill". + :vartype odata_type: str + """ + + default_language_code: Optional[Union[str, "_models.SentimentSkillLanguage"]] = rest_field( + name="defaultLanguageCode" + ) + """A value indicating which language code to use. Default is ``en``. Known values are: \"da\", + \"nl\", \"en\", \"fi\", \"fr\", \"de\", \"el\", \"it\", \"no\", \"pl\", \"pt-PT\", \"ru\", + \"es\", \"sv\", and \"tr\".""" + odata_type: Literal["#Microsoft.Skills.Text.SentimentSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Text.SentimentSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + default_language_code: Optional[Union[str, "_models.SentimentSkillLanguage"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Text.SentimentSkill", **kwargs) + + +class SentimentSkillV3(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.V3.SentimentSkill"): + """Using the Text Analytics API, evaluates unstructured text and for each record, + provides sentiment labels (such as "negative", "neutral" and "positive") based + on the highest confidence score found by the service at a sentence and + document-level. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is ``en``. + :vartype default_language_code: str + :ivar include_opinion_mining: If set to true, the skill output will include information from + Text Analytics + for opinion mining, namely targets (nouns or verbs) and their associated + assessment (adjective) in the text. Default is false. + :vartype include_opinion_mining: bool + :ivar model_version: The version of the model to use when calling the Text Analytics service. + It + will default to the latest available when not specified. We recommend you do + not specify this value unless absolutely necessary. + :vartype model_version: str + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Text.V3.SentimentSkill". + :vartype odata_type: str + """ + + default_language_code: Optional[str] = rest_field(name="defaultLanguageCode") + """A value indicating which language code to use. Default is ``en``.""" + include_opinion_mining: Optional[bool] = rest_field(name="includeOpinionMining") + """If set to true, the skill output will include information from Text Analytics + for opinion mining, namely targets (nouns or verbs) and their associated + assessment (adjective) in the text. Default is false.""" + model_version: Optional[str] = rest_field(name="modelVersion") + """The version of the model to use when calling the Text Analytics service. It + will default to the latest available when not specified. We recommend you do + not specify this value unless absolutely necessary.""" + odata_type: Literal["#Microsoft.Skills.Text.V3.SentimentSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Text.V3.SentimentSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + default_language_code: Optional[str] = None, + include_opinion_mining: Optional[bool] = None, + model_version: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Text.V3.SentimentSkill", **kwargs) + + +class ShaperSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Util.ShaperSkill"): + """A skill for reshaping the outputs. It creates a complex type to support + composite fields (also known as multipart fields). + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Util.ShaperSkill". + :vartype odata_type: str + """ + + odata_type: Literal["#Microsoft.Skills.Util.ShaperSkill"] = rest_discriminator(name="@odata.type") # type: ignore + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Util.ShaperSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Util.ShaperSkill", **kwargs) + + +class ShingleTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.ShingleTokenFilter"): + """Creates combinations of tokens as a single token. This token filter is + implemented using Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar max_shingle_size: The maximum shingle size. Default and minimum value is 2. + :vartype max_shingle_size: int + :ivar min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be less + than the + value of maxShingleSize. + :vartype min_shingle_size: int + :ivar output_unigrams: A value indicating whether the output stream will contain the input + tokens + (unigrams) as well as shingles. Default is true. + :vartype output_unigrams: bool + :ivar output_unigrams_if_no_shingles: A value indicating whether to output unigrams for those + times when no shingles + are available. This property takes precedence when outputUnigrams is set to + false. Default is false. + :vartype output_unigrams_if_no_shingles: bool + :ivar token_separator: The string to use when joining adjacent tokens to form a shingle. + Default is a + single space (" "). + :vartype token_separator: str + :ivar filter_token: The string to insert for each position at which there is no token. Default + is + an underscore ("_"). + :vartype filter_token: str + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.ShingleTokenFilter". + :vartype odata_type: str + """ + + max_shingle_size: Optional[int] = rest_field(name="maxShingleSize") + """The maximum shingle size. Default and minimum value is 2.""" + min_shingle_size: Optional[int] = rest_field(name="minShingleSize") + """The minimum shingle size. Default and minimum value is 2. Must be less than the + value of maxShingleSize.""" + output_unigrams: Optional[bool] = rest_field(name="outputUnigrams") + """A value indicating whether the output stream will contain the input tokens + (unigrams) as well as shingles. Default is true.""" + output_unigrams_if_no_shingles: Optional[bool] = rest_field(name="outputUnigramsIfNoShingles") + """A value indicating whether to output unigrams for those times when no shingles + are available. This property takes precedence when outputUnigrams is set to + false. Default is false.""" + token_separator: Optional[str] = rest_field(name="tokenSeparator") + """The string to use when joining adjacent tokens to form a shingle. Default is a + single space (\" \").""" + filter_token: Optional[str] = rest_field(name="filterToken") + """The string to insert for each position at which there is no token. Default is + an underscore (\"_\").""" + odata_type: Literal["#Microsoft.Azure.Search.ShingleTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.ShingleTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + max_shingle_size: Optional[int] = None, + min_shingle_size: Optional[int] = None, + output_unigrams: Optional[bool] = None, + output_unigrams_if_no_shingles: Optional[bool] = None, + token_separator: Optional[str] = None, + filter_token: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.ShingleTokenFilter", **kwargs) + + +class SingleVectorFieldResult(_model_base.Model): + """A single vector field result. Both @search.score and vector similarity values + are returned. Vector similarity is related to @search.score by an equation. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar search_score: The @search.score value that is calculated from the vector similarity + score. + This is the score that's visible in a pure single-field single-vector query. + :vartype search_score: float + :ivar vector_similarity: The vector similarity score for this document. Note this is the + canonical + definition of similarity metric, not the 'distance' version. For example, + cosine similarity instead of cosine distance. + :vartype vector_similarity: float + """ + + search_score: Optional[float] = rest_field(name="searchScore", visibility=["read"]) + """The @search.score value that is calculated from the vector similarity score. + This is the score that's visible in a pure single-field single-vector query.""" + vector_similarity: Optional[float] = rest_field(name="vectorSimilarity", visibility=["read"]) + """The vector similarity score for this document. Note this is the canonical + definition of similarity metric, not the 'distance' version. For example, + cosine similarity instead of cosine distance.""" + + +class SkillNames(_model_base.Model): + """The type of the skill names. + + :ivar skill_names: the names of skills to be reset. + :vartype skill_names: list[str] + """ + + skill_names: Optional[List[str]] = rest_field(name="skillNames") + """the names of skills to be reset.""" + + @overload + def __init__( + self, + *, + skill_names: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SnowballTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.SnowballTokenFilter"): + """A filter that stems words using a Snowball-generated stemmer. This token filter + is implemented using Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar language: The language to use. Required. Known values are: "armenian", "basque", + "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2", "hungarian", + "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian", "russian", + "spanish", "swedish", and "turkish". + :vartype language: str or ~azure.search.documents.models.SnowballTokenFilterLanguage + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.SnowballTokenFilter". + :vartype odata_type: str + """ + + language: Union[str, "_models.SnowballTokenFilterLanguage"] = rest_field() + """The language to use. Required. Known values are: \"armenian\", \"basque\", \"catalan\", + \"danish\", \"dutch\", \"english\", \"finnish\", \"french\", \"german\", \"german2\", + \"hungarian\", \"italian\", \"kp\", \"lovins\", \"norwegian\", \"porter\", \"portuguese\", + \"romanian\", \"russian\", \"spanish\", \"swedish\", and \"turkish\".""" + odata_type: Literal["#Microsoft.Azure.Search.SnowballTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.SnowballTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + language: Union[str, "_models.SnowballTokenFilterLanguage"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.SnowballTokenFilter", **kwargs) + + +class SoftDeleteColumnDeletionDetectionPolicy( + DataDeletionDetectionPolicy, discriminator="#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy" +): + """Defines a data deletion detection policy that implements a soft-deletion + strategy. It determines whether an item should be deleted based on the value of + a designated 'soft delete' column. + + + :ivar soft_delete_column_name: The name of the column to use for soft-deletion detection. + :vartype soft_delete_column_name: str + :ivar soft_delete_marker_value: The marker value that identifies an item as deleted. + :vartype soft_delete_marker_value: str + :ivar odata_type: A URI fragment specifying the type of data deletion detection policy. + Required. Default value is "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy". + :vartype odata_type: str + """ + + soft_delete_column_name: Optional[str] = rest_field(name="softDeleteColumnName") + """The name of the column to use for soft-deletion detection.""" + soft_delete_marker_value: Optional[str] = rest_field(name="softDeleteMarkerValue") + """The marker value that identifies an item as deleted.""" + odata_type: Literal["#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy"] = rest_discriminator(name="@odata.type") # type: ignore + """A URI fragment specifying the type of data deletion detection policy. Required. Default value + is \"#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy\".""" + + @overload + def __init__( + self, + *, + soft_delete_column_name: Optional[str] = None, + soft_delete_marker_value: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy", **kwargs) + + +class SplitSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.SplitSkill"): + """A skill to split a string into chunks of text. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar default_language_code: A value indicating which language code to use. Default is ``en``. + Known values are: "am", "bs", "cs", "da", "de", "en", "es", "et", "fi", "fr", "he", "hi", "hr", + "hu", "id", "is", "it", "ja", "ko", "lv", "nb", "nl", "pl", "pt", "pt-br", "ru", "sk", "sl", + "sr", "sv", "tr", "ur", and "zh". + :vartype default_language_code: str or ~azure.search.documents.models.SplitSkillLanguage + :ivar text_split_mode: A value indicating which split mode to perform. Known values are: + "pages" and "sentences". + :vartype text_split_mode: str or ~azure.search.documents.models.TextSplitMode + :ivar maximum_page_length: The desired maximum page length. Default is 10000. + :vartype maximum_page_length: int + :ivar page_overlap_length: Only applicable when textSplitMode is set to 'pages'. If specified, + n+1th chunk + will start with this number of characters/tokens from the end of the nth chunk. + :vartype page_overlap_length: int + :ivar maximum_pages_to_take: Only applicable when textSplitMode is set to 'pages'. If + specified, the + SplitSkill will discontinue splitting after processing the first + 'maximumPagesToTake' pages, in order to improve performance when only a few + initial pages are needed from each document. + :vartype maximum_pages_to_take: int + :ivar unit: Only applies if textSplitMode is set to pages. There are two possible values. + The choice of the values will decide the length (maximumPageLength and + pageOverlapLength) measurement. The default is 'characters', which means the + length will be measured by character. Known values are: "characters" and "azureOpenAITokens". + :vartype unit: str or ~azure.search.documents.models.SplitSkillUnit + :ivar azure_open_ai_tokenizer_parameters: Only applies if the unit is set to azureOpenAITokens. + If specified, the + splitSkill will use these parameters when performing the tokenization. The + parameters are a valid 'encoderModelName' and an optional + 'allowedSpecialTokens' property. + :vartype azure_open_ai_tokenizer_parameters: + ~azure.search.documents.models.AzureOpenAITokenizerParameters + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Text.SplitSkill". + :vartype odata_type: str + """ + + default_language_code: Optional[Union[str, "_models.SplitSkillLanguage"]] = rest_field(name="defaultLanguageCode") + """A value indicating which language code to use. Default is ``en``. Known values are: \"am\", + \"bs\", \"cs\", \"da\", \"de\", \"en\", \"es\", \"et\", \"fi\", \"fr\", \"he\", \"hi\", \"hr\", + \"hu\", \"id\", \"is\", \"it\", \"ja\", \"ko\", \"lv\", \"nb\", \"nl\", \"pl\", \"pt\", + \"pt-br\", \"ru\", \"sk\", \"sl\", \"sr\", \"sv\", \"tr\", \"ur\", and \"zh\".""" + text_split_mode: Optional[Union[str, "_models.TextSplitMode"]] = rest_field(name="textSplitMode") + """A value indicating which split mode to perform. Known values are: \"pages\" and \"sentences\".""" + maximum_page_length: Optional[int] = rest_field(name="maximumPageLength") + """The desired maximum page length. Default is 10000.""" + page_overlap_length: Optional[int] = rest_field(name="pageOverlapLength") + """Only applicable when textSplitMode is set to 'pages'. If specified, n+1th chunk + will start with this number of characters/tokens from the end of the nth chunk.""" + maximum_pages_to_take: Optional[int] = rest_field(name="maximumPagesToTake") + """Only applicable when textSplitMode is set to 'pages'. If specified, the + SplitSkill will discontinue splitting after processing the first + 'maximumPagesToTake' pages, in order to improve performance when only a few + initial pages are needed from each document.""" + unit: Optional[Union[str, "_models.SplitSkillUnit"]] = rest_field() + """Only applies if textSplitMode is set to pages. There are two possible values. + The choice of the values will decide the length (maximumPageLength and + pageOverlapLength) measurement. The default is 'characters', which means the + length will be measured by character. Known values are: \"characters\" and + \"azureOpenAITokens\".""" + azure_open_ai_tokenizer_parameters: Optional["_models.AzureOpenAITokenizerParameters"] = rest_field( + name="azureOpenAITokenizerParameters" + ) + """Only applies if the unit is set to azureOpenAITokens. If specified, the + splitSkill will use these parameters when performing the tokenization. The + parameters are a valid 'encoderModelName' and an optional + 'allowedSpecialTokens' property.""" + odata_type: Literal["#Microsoft.Skills.Text.SplitSkill"] = rest_discriminator(name="@odata.type") # type: ignore + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Text.SplitSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + default_language_code: Optional[Union[str, "_models.SplitSkillLanguage"]] = None, + text_split_mode: Optional[Union[str, "_models.TextSplitMode"]] = None, + maximum_page_length: Optional[int] = None, + page_overlap_length: Optional[int] = None, + maximum_pages_to_take: Optional[int] = None, + unit: Optional[Union[str, "_models.SplitSkillUnit"]] = None, + azure_open_ai_tokenizer_parameters: Optional["_models.AzureOpenAITokenizerParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Text.SplitSkill", **kwargs) + + +class SqlIntegratedChangeTrackingPolicy( + DataChangeDetectionPolicy, discriminator="#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy" +): + """Defines a data change detection policy that captures changes using the + Integrated Change Tracking feature of Azure SQL Database. + + + :ivar odata_type: A URI fragment specifying the type of data change detection policy. Required. + Default value is "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy". + :vartype odata_type: str + """ + + odata_type: Literal["#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy"] = rest_discriminator(name="@odata.type") # type: ignore + """A URI fragment specifying the type of data change detection policy. Required. Default value is + \"#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy\".""" + + @overload + def __init__( + self, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy", **kwargs) + + +class StemmerOverrideTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.StemmerOverrideTokenFilter"): + """Provides the ability to override other stemming filters with custom + dictionary-based stemming. Any dictionary-stemmed terms will be marked as + keywords so that they will not be stemmed with stemmers down the chain. Must be + placed before any stemming filters. This token filter is implemented using + Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar rules: A list of stemming rules in the following format: "word => stem", for example: + "ran => run". Required. + :vartype rules: list[str] + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.StemmerOverrideTokenFilter". + :vartype odata_type: str + """ + + rules: List[str] = rest_field() + """A list of stemming rules in the following format: \"word => stem\", for example: + \"ran => run\". Required.""" + odata_type: Literal["#Microsoft.Azure.Search.StemmerOverrideTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.StemmerOverrideTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + rules: List[str], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.StemmerOverrideTokenFilter", **kwargs) + + +class StemmerTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.StemmerTokenFilter"): + """Language specific stemming filter. This token filter is implemented using + Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar language: The language to use. Required. Known values are: "arabic", "armenian", + "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "dutchKp", + "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2", "lovins", + "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician", + "minimalGalician", "german", "german2", "lightGerman", "minimalGerman", "greek", "hindi", + "hungarian", "lightHungarian", "indonesian", "irish", "italian", "lightItalian", "sorani", + "latvian", "norwegian", "lightNorwegian", "minimalNorwegian", "lightNynorsk", "minimalNynorsk", + "portuguese", "lightPortuguese", "minimalPortuguese", "portugueseRslp", "romanian", "russian", + "lightRussian", "spanish", "lightSpanish", "swedish", "lightSwedish", and "turkish". + :vartype language: str or ~azure.search.documents.models.StemmerTokenFilterLanguage + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.StemmerTokenFilter". + :vartype odata_type: str + """ + + language: Union[str, "_models.StemmerTokenFilterLanguage"] = rest_field() + """The language to use. Required. Known values are: \"arabic\", \"armenian\", \"basque\", + \"brazilian\", \"bulgarian\", \"catalan\", \"czech\", \"danish\", \"dutch\", \"dutchKp\", + \"english\", \"lightEnglish\", \"minimalEnglish\", \"possessiveEnglish\", \"porter2\", + \"lovins\", \"finnish\", \"lightFinnish\", \"french\", \"lightFrench\", \"minimalFrench\", + \"galician\", \"minimalGalician\", \"german\", \"german2\", \"lightGerman\", \"minimalGerman\", + \"greek\", \"hindi\", \"hungarian\", \"lightHungarian\", \"indonesian\", \"irish\", + \"italian\", \"lightItalian\", \"sorani\", \"latvian\", \"norwegian\", \"lightNorwegian\", + \"minimalNorwegian\", \"lightNynorsk\", \"minimalNynorsk\", \"portuguese\", + \"lightPortuguese\", \"minimalPortuguese\", \"portugueseRslp\", \"romanian\", \"russian\", + \"lightRussian\", \"spanish\", \"lightSpanish\", \"swedish\", \"lightSwedish\", and + \"turkish\".""" + odata_type: Literal["#Microsoft.Azure.Search.StemmerTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.StemmerTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + language: Union[str, "_models.StemmerTokenFilterLanguage"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.StemmerTokenFilter", **kwargs) + + +class StopAnalyzer(LexicalAnalyzer, discriminator="#Microsoft.Azure.Search.StopAnalyzer"): + """Divides text at non-letters; Applies the lowercase and stopword token filters. + This analyzer is implemented using Apache Lucene. + + + :ivar name: The name of the analyzer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required. + :vartype name: str + :ivar stopwords: A list of stopwords. + :vartype stopwords: list[str] + :ivar odata_type: A URI fragment specifying the type of analyzer. Required. Default value is + "#Microsoft.Azure.Search.StopAnalyzer". + :vartype odata_type: str + """ + + stopwords: Optional[List[str]] = rest_field() + """A list of stopwords.""" + odata_type: Literal["#Microsoft.Azure.Search.StopAnalyzer"] = rest_discriminator(name="@odata.type") # type: ignore + """A URI fragment specifying the type of analyzer. Required. Default value is + \"#Microsoft.Azure.Search.StopAnalyzer\".""" + + @overload + def __init__( + self, + *, + name: str, + stopwords: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.StopAnalyzer", **kwargs) + + +class StopwordsTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.StopwordsTokenFilter"): + """Removes stop words from a token stream. This token filter is implemented using + Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar stopwords: The list of stopwords. This property and the stopwords list property cannot + both be set. + :vartype stopwords: list[str] + :ivar stopwords_list: A predefined list of stopwords to use. This property and the stopwords + property + cannot both be set. Default is English. Known values are: "arabic", "armenian", "basque", + "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "english", "finnish", + "french", "galician", "german", "greek", "hindi", "hungarian", "indonesian", "irish", + "italian", "latvian", "norwegian", "persian", "portuguese", "romanian", "russian", "sorani", + "spanish", "swedish", "thai", and "turkish". + :vartype stopwords_list: str or ~azure.search.documents.models.StopwordsList + :ivar ignore_case: A value indicating whether to ignore case. If true, all words are converted + to + lower case first. Default is false. + :vartype ignore_case: bool + :ivar remove_trailing_stop_words: A value indicating whether to ignore the last search term if + it's a stop word. + Default is true. + :vartype remove_trailing_stop_words: bool + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.StopwordsTokenFilter". + :vartype odata_type: str + """ + + stopwords: Optional[List[str]] = rest_field() + """The list of stopwords. This property and the stopwords list property cannot + both be set.""" + stopwords_list: Optional[Union[str, "_models.StopwordsList"]] = rest_field(name="stopwordsList") + """A predefined list of stopwords to use. This property and the stopwords property + cannot both be set. Default is English. Known values are: \"arabic\", \"armenian\", \"basque\", + \"brazilian\", \"bulgarian\", \"catalan\", \"czech\", \"danish\", \"dutch\", \"english\", + \"finnish\", \"french\", \"galician\", \"german\", \"greek\", \"hindi\", \"hungarian\", + \"indonesian\", \"irish\", \"italian\", \"latvian\", \"norwegian\", \"persian\", + \"portuguese\", \"romanian\", \"russian\", \"sorani\", \"spanish\", \"swedish\", \"thai\", and + \"turkish\".""" + ignore_case: Optional[bool] = rest_field(name="ignoreCase") + """A value indicating whether to ignore case. If true, all words are converted to + lower case first. Default is false.""" + remove_trailing_stop_words: Optional[bool] = rest_field(name="removeTrailing") + """A value indicating whether to ignore the last search term if it's a stop word. + Default is true.""" + odata_type: Literal["#Microsoft.Azure.Search.StopwordsTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.StopwordsTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + stopwords: Optional[List[str]] = None, + stopwords_list: Optional[Union[str, "_models.StopwordsList"]] = None, + ignore_case: Optional[bool] = None, + remove_trailing_stop_words: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.StopwordsTokenFilter", **kwargs) + + +class SuggestDocumentsResult(_model_base.Model): + """Response containing suggestion query results from an index. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar results: The sequence of results returned by the query. Required. + :vartype results: list[~azure.search.documents.models.SuggestResult] + :ivar coverage: A value indicating the percentage of the index that was included in the query, + or null if minimumCoverage was not set in the request. + :vartype coverage: float + """ + + results: List["_models.SuggestResult"] = rest_field(name="value", visibility=["read"]) + """The sequence of results returned by the query. Required.""" + coverage: Optional[float] = rest_field(name="@search.coverage", visibility=["read"]) + """A value indicating the percentage of the index that was included in the query, + or null if minimumCoverage was not set in the request.""" + + +class SuggestRequest(_model_base.Model): + """Parameters for filtering, sorting, fuzzy matching, and other suggestions query + behaviors. + + All required parameters must be populated in order to send to server. + + :ivar filter: An OData expression that filters the documents considered for suggestions. + :vartype filter: str + :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the suggestion + query. + Default is false. When set to true, the query will find suggestions even if + there's a substituted or missing character in the search text. While this + provides a better experience in some scenarios, it comes at a performance cost + as fuzzy suggestion searches are slower and consume more resources. + :vartype use_fuzzy_matching: bool + :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. If omitted, hit highlighting of suggestions is disabled. + :vartype highlight_post_tag: str + :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. If omitted, hit highlighting of suggestions is disabled. + :vartype highlight_pre_tag: str + :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that + must be + covered by a suggestion query in order for the query to be reported as a + success. This parameter can be useful for ensuring search availability even for + services with only one replica. The default is 80. + :vartype minimum_coverage: float + :ivar order_by: The comma-separated list of OData $orderby expressions by which to sort the + results. Each expression can be either a field name or a call to either the + geo.distance() or the search.score() functions. Each expression can be followed + by asc to indicate ascending, or desc to indicate descending. The default is + ascending order. Ties will be broken by the match scores of documents. If no + $orderby is specified, the default sort order is descending by document match + score. There can be at most 32 $orderby clauses. + :vartype order_by: str + :ivar search_text: The search text to use to suggest documents. Must be at least 1 character, + and + no more than 100 characters. Required. + :vartype search_text: str + :ivar search_fields: The comma-separated list of field names to search for the specified search + text. Target fields must be included in the specified suggester. + :vartype search_fields: str + :ivar select: The comma-separated list of fields to retrieve. If unspecified, only the key + field will be included in the results. + :vartype select: str + :ivar suggester_name: The name of the suggester as specified in the suggesters collection + that's part + of the index definition. Required. + :vartype suggester_name: str + :ivar top: The number of suggestions to retrieve. This must be a value between 1 and 100. + The default is 5. + :vartype top: int + """ + + filter: Optional[str] = rest_field() + """An OData expression that filters the documents considered for suggestions.""" + use_fuzzy_matching: Optional[bool] = rest_field(name="fuzzy") + """A value indicating whether to use fuzzy matching for the suggestion query. + Default is false. When set to true, the query will find suggestions even if + there's a substituted or missing character in the search text. While this + provides a better experience in some scenarios, it comes at a performance cost + as fuzzy suggestion searches are slower and consume more resources.""" + highlight_post_tag: Optional[str] = rest_field(name="highlightPostTag") + """A string tag that is appended to hit highlights. Must be set with + highlightPreTag. If omitted, hit highlighting of suggestions is disabled.""" + highlight_pre_tag: Optional[str] = rest_field(name="highlightPreTag") + """A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. If omitted, hit highlighting of suggestions is disabled.""" + minimum_coverage: Optional[float] = rest_field(name="minimumCoverage") + """A number between 0 and 100 indicating the percentage of the index that must be + covered by a suggestion query in order for the query to be reported as a + success. This parameter can be useful for ensuring search availability even for + services with only one replica. The default is 80.""" + order_by: Optional[str] = rest_field(name="orderby") + """The comma-separated list of OData $orderby expressions by which to sort the + results. Each expression can be either a field name or a call to either the + geo.distance() or the search.score() functions. Each expression can be followed + by asc to indicate ascending, or desc to indicate descending. The default is + ascending order. Ties will be broken by the match scores of documents. If no + $orderby is specified, the default sort order is descending by document match + score. There can be at most 32 $orderby clauses.""" + search_text: str = rest_field(name="search") + """The search text to use to suggest documents. Must be at least 1 character, and + no more than 100 characters. Required.""" + search_fields: Optional[str] = rest_field(name="searchFields") + """The comma-separated list of field names to search for the specified search + text. Target fields must be included in the specified suggester.""" + select: Optional[str] = rest_field() + """The comma-separated list of fields to retrieve. If unspecified, only the key + field will be included in the results.""" + suggester_name: str = rest_field(name="suggesterName") + """The name of the suggester as specified in the suggesters collection that's part + of the index definition. Required.""" + top: Optional[int] = rest_field() + """The number of suggestions to retrieve. This must be a value between 1 and 100. + The default is 5.""" + + @overload + def __init__( + self, + *, + search_text: str, + suggester_name: str, + filter: Optional[str] = None, # pylint: disable=redefined-builtin + use_fuzzy_matching: Optional[bool] = None, + highlight_post_tag: Optional[str] = None, + highlight_pre_tag: Optional[str] = None, + minimum_coverage: Optional[float] = None, + order_by: Optional[str] = None, + search_fields: Optional[str] = None, + select: Optional[str] = None, + top: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class SuggestResult(_model_base.Model): + """A result containing a document found by a suggestion query, plus associated + metadata. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar text: The text of the suggestion result. Required. + :vartype text: str + """ + + text: str = rest_field(name="@search.text", visibility=["read"]) + """The text of the suggestion result. Required.""" + + +class SynonymMap(_model_base.Model): + """Represents a synonym map definition. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar name: The name of the synonym map. Required. + :vartype name: str + :ivar format: The format of the synonym map. Only the 'solr' format is currently supported. + Required. Default value is "solr". + :vartype format: str + :ivar synonyms: A series of synonym rules in the specified synonym map format. The rules must + be separated by newlines. Required. + :vartype synonyms: str + :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key + is used to provide an additional level of encryption-at-rest for your data when + you want full assurance that no one, not even Microsoft, can decrypt your data. + Once you have encrypted your data, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this + property as needed if you want to rotate your encryption key; Your data will be + unaffected. Encryption with customer-managed keys is not available for free + search services, and is only available for paid services created on or after + January 1, 2019. + :vartype encryption_key: ~azure.search.documents.models.SearchResourceEncryptionKey + :ivar e_tag: The ETag of the synonym map. + :vartype e_tag: str + """ + + name: str = rest_field() + """The name of the synonym map. Required.""" + format: Literal["solr"] = rest_field() + """The format of the synonym map. Only the 'solr' format is currently supported. Required. Default + value is \"solr\".""" + synonyms: str = rest_field() + """A series of synonym rules in the specified synonym map format. The rules must + be separated by newlines. Required.""" + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = rest_field(name="encryptionKey") + """A description of an encryption key that you create in Azure Key Vault. This key + is used to provide an additional level of encryption-at-rest for your data when + you want full assurance that no one, not even Microsoft, can decrypt your data. + Once you have encrypted your data, it will always remain encrypted. The search + service will ignore attempts to set this property to null. You can change this + property as needed if you want to rotate your encryption key; Your data will be + unaffected. Encryption with customer-managed keys is not available for free + search services, and is only available for paid services created on or after + January 1, 2019.""" + e_tag: Optional[str] = rest_field(name="@odata.etag") + """The ETag of the synonym map.""" + + @overload + def __init__( + self, + *, + name: str, + synonyms: str, + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, + e_tag: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.format: Literal["solr"] = "solr" + + +class SynonymTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.SynonymTokenFilter"): + """Matches single or multi-word synonyms in a token stream. This token filter is + implemented using Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar synonyms: A list of synonyms in following one of two formats: 1. incredible, + unbelievable, fabulous => amazing - all terms on the left side of => symbol + will be replaced with all terms on its right side; 2. incredible, unbelievable, + fabulous, amazing - comma separated list of equivalent words. Set the expand + option to change how this list is interpreted. Required. + :vartype synonyms: list[str] + :ivar ignore_case: A value indicating whether to case-fold input for matching. Default is + false. + :vartype ignore_case: bool + :ivar expand: A value indicating whether all words in the list of synonyms (if => notation is + not used) will map to one another. If true, all words in the list of synonyms + (if => notation is not used) will map to one another. The following list: + incredible, unbelievable, fabulous, amazing is equivalent to: incredible, + unbelievable, fabulous, amazing => incredible, unbelievable, fabulous, amazing. + If false, the following list: incredible, unbelievable, fabulous, amazing will + be equivalent to: incredible, unbelievable, fabulous, amazing => incredible. + Default is true. + :vartype expand: bool + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.SynonymTokenFilter". + :vartype odata_type: str + """ + + synonyms: List[str] = rest_field() + """A list of synonyms in following one of two formats: 1. incredible, + unbelievable, fabulous => amazing - all terms on the left side of => symbol + will be replaced with all terms on its right side; 2. incredible, unbelievable, + fabulous, amazing - comma separated list of equivalent words. Set the expand + option to change how this list is interpreted. Required.""" + ignore_case: Optional[bool] = rest_field(name="ignoreCase") + """A value indicating whether to case-fold input for matching. Default is false.""" + expand: Optional[bool] = rest_field() + """A value indicating whether all words in the list of synonyms (if => notation is + not used) will map to one another. If true, all words in the list of synonyms + (if => notation is not used) will map to one another. The following list: + incredible, unbelievable, fabulous, amazing is equivalent to: incredible, + unbelievable, fabulous, amazing => incredible, unbelievable, fabulous, amazing. + If false, the following list: incredible, unbelievable, fabulous, amazing will + be equivalent to: incredible, unbelievable, fabulous, amazing => incredible. + Default is true.""" + odata_type: Literal["#Microsoft.Azure.Search.SynonymTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.SynonymTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + synonyms: List[str], + ignore_case: Optional[bool] = None, + expand: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.SynonymTokenFilter", **kwargs) + + +class TagScoringFunction(ScoringFunction, discriminator="tag"): + """Defines a function that boosts scores of documents with string values matching + a given list of tags. + + + :ivar field_name: The name of the field used as input to the scoring function. Required. + :vartype field_name: str + :ivar boost: A multiplier for the raw score. Must be a positive number not equal to 1.0. + Required. + :vartype boost: float + :ivar interpolation: A value indicating how boosting will be interpolated across document + scores; + defaults to "Linear". Known values are: "linear", "constant", "quadratic", and "logarithmic". + :vartype interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation + :ivar parameters: Parameter values for the tag scoring function. Required. + :vartype parameters: ~azure.search.documents.models.TagScoringParameters + :ivar type: Indicates the type of function to use. Valid values include magnitude, + freshness, distance, and tag. The function type must be lower case. Required. Default value is + "tag". + :vartype type: str + """ + + parameters: "_models.TagScoringParameters" = rest_field(name="tag") + """Parameter values for the tag scoring function. Required.""" + type: Literal["tag"] = rest_discriminator(name="type") # type: ignore + """Indicates the type of function to use. Valid values include magnitude, + freshness, distance, and tag. The function type must be lower case. Required. Default value is + \"tag\".""" + + @overload + def __init__( + self, + *, + field_name: str, + boost: float, + parameters: "_models.TagScoringParameters", + interpolation: Optional[Union[str, "_models.ScoringFunctionInterpolation"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, type="tag", **kwargs) + + +class TagScoringParameters(_model_base.Model): + """Provides parameter values to a tag scoring function. + + + :ivar tags_parameter: The name of the parameter passed in search queries to specify the list of + tags + to compare against the target field. Required. + :vartype tags_parameter: str + """ + + tags_parameter: str = rest_field(name="tagsParameter") + """The name of the parameter passed in search queries to specify the list of tags + to compare against the target field. Required.""" + + @overload + def __init__( + self, + *, + tags_parameter: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TextResult(_model_base.Model): + """The BM25 or Classic score for the text portion of the query. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar search_score: The BM25 or Classic score for the text portion of the query. + :vartype search_score: float + """ + + search_score: Optional[float] = rest_field(name="searchScore", visibility=["read"]) + """The BM25 or Classic score for the text portion of the query.""" + + +class TextTranslationSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.TranslationSkill"): + """A skill to translate text from one language to another. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar default_to_language_code: The language code to translate documents into for documents + that don't specify + the to language explicitly. Required. Known values are: "af", "ar", "bn", "bs", "bg", "yue", + "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", "fil", "fi", "fr", "de", + "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", "tlh", "tlh-Latn", + "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "pt-br", "pt-PT", + "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", "te", "th", + "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", and "pa". + :vartype default_to_language_code: str or + ~azure.search.documents.models.TextTranslationSkillLanguage + :ivar default_from_language_code: The language code to translate documents from for documents + that don't specify + the from language explicitly. Known values are: "af", "ar", "bn", "bs", "bg", "yue", "ca", + "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", "fil", "fi", "fr", "de", "el", + "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", "tlh", "tlh-Latn", "tlh-Piqd", + "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "pt-br", "pt-PT", "otq", "ro", + "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", "te", "th", "to", "tr", + "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", and "pa". + :vartype default_from_language_code: str or + ~azure.search.documents.models.TextTranslationSkillLanguage + :ivar suggested_from: The language code to translate documents from when neither the + fromLanguageCode + input nor the defaultFromLanguageCode parameter are provided, and the automatic + language detection is unsuccessful. Default is ``en``. Known values are: "af", "ar", "bn", + "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", "fil", + "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", "tlh", + "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "pt-br", + "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", + "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", and "pa". + :vartype suggested_from: str or ~azure.search.documents.models.TextTranslationSkillLanguage + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Text.TranslationSkill". + :vartype odata_type: str + """ + + default_to_language_code: Union[str, "_models.TextTranslationSkillLanguage"] = rest_field( + name="defaultToLanguageCode" + ) + """The language code to translate documents into for documents that don't specify + the to language explicitly. Required. Known values are: \"af\", \"ar\", \"bn\", \"bs\", \"bg\", + \"yue\", \"ca\", \"zh-Hans\", \"zh-Hant\", \"hr\", \"cs\", \"da\", \"nl\", \"en\", \"et\", + \"fj\", \"fil\", \"fi\", \"fr\", \"de\", \"el\", \"ht\", \"he\", \"hi\", \"mww\", \"hu\", + \"is\", \"id\", \"it\", \"ja\", \"sw\", \"tlh\", \"tlh-Latn\", \"tlh-Piqd\", \"ko\", \"lv\", + \"lt\", \"mg\", \"ms\", \"mt\", \"nb\", \"fa\", \"pl\", \"pt\", \"pt-br\", \"pt-PT\", \"otq\", + \"ro\", \"ru\", \"sm\", \"sr-Cyrl\", \"sr-Latn\", \"sk\", \"sl\", \"es\", \"sv\", \"ty\", + \"ta\", \"te\", \"th\", \"to\", \"tr\", \"uk\", \"ur\", \"vi\", \"cy\", \"yua\", \"ga\", + \"kn\", \"mi\", \"ml\", and \"pa\".""" + default_from_language_code: Optional[Union[str, "_models.TextTranslationSkillLanguage"]] = rest_field( + name="defaultFromLanguageCode" + ) + """The language code to translate documents from for documents that don't specify + the from language explicitly. Known values are: \"af\", \"ar\", \"bn\", \"bs\", \"bg\", + \"yue\", \"ca\", \"zh-Hans\", \"zh-Hant\", \"hr\", \"cs\", \"da\", \"nl\", \"en\", \"et\", + \"fj\", \"fil\", \"fi\", \"fr\", \"de\", \"el\", \"ht\", \"he\", \"hi\", \"mww\", \"hu\", + \"is\", \"id\", \"it\", \"ja\", \"sw\", \"tlh\", \"tlh-Latn\", \"tlh-Piqd\", \"ko\", \"lv\", + \"lt\", \"mg\", \"ms\", \"mt\", \"nb\", \"fa\", \"pl\", \"pt\", \"pt-br\", \"pt-PT\", \"otq\", + \"ro\", \"ru\", \"sm\", \"sr-Cyrl\", \"sr-Latn\", \"sk\", \"sl\", \"es\", \"sv\", \"ty\", + \"ta\", \"te\", \"th\", \"to\", \"tr\", \"uk\", \"ur\", \"vi\", \"cy\", \"yua\", \"ga\", + \"kn\", \"mi\", \"ml\", and \"pa\".""" + suggested_from: Optional[Union[str, "_models.TextTranslationSkillLanguage"]] = rest_field(name="suggestedFrom") + """The language code to translate documents from when neither the fromLanguageCode + input nor the defaultFromLanguageCode parameter are provided, and the automatic + language detection is unsuccessful. Default is ``en``. Known values are: \"af\", \"ar\", + \"bn\", \"bs\", \"bg\", \"yue\", \"ca\", \"zh-Hans\", \"zh-Hant\", \"hr\", \"cs\", \"da\", + \"nl\", \"en\", \"et\", \"fj\", \"fil\", \"fi\", \"fr\", \"de\", \"el\", \"ht\", \"he\", + \"hi\", \"mww\", \"hu\", \"is\", \"id\", \"it\", \"ja\", \"sw\", \"tlh\", \"tlh-Latn\", + \"tlh-Piqd\", \"ko\", \"lv\", \"lt\", \"mg\", \"ms\", \"mt\", \"nb\", \"fa\", \"pl\", \"pt\", + \"pt-br\", \"pt-PT\", \"otq\", \"ro\", \"ru\", \"sm\", \"sr-Cyrl\", \"sr-Latn\", \"sk\", + \"sl\", \"es\", \"sv\", \"ty\", \"ta\", \"te\", \"th\", \"to\", \"tr\", \"uk\", \"ur\", \"vi\", + \"cy\", \"yua\", \"ga\", \"kn\", \"mi\", \"ml\", and \"pa\".""" + odata_type: Literal["#Microsoft.Skills.Text.TranslationSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Text.TranslationSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + default_to_language_code: Union[str, "_models.TextTranslationSkillLanguage"], + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + default_from_language_code: Optional[Union[str, "_models.TextTranslationSkillLanguage"]] = None, + suggested_from: Optional[Union[str, "_models.TextTranslationSkillLanguage"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Text.TranslationSkill", **kwargs) + + +class TextWeights(_model_base.Model): + """Defines weights on index fields for which matches should boost scoring in + search queries. + + + :ivar weights: The dictionary of per-field weights to boost document scoring. The keys are + field names and the values are the weights for each field. Required. + :vartype weights: dict[str, float] + """ + + weights: Dict[str, float] = rest_field() + """The dictionary of per-field weights to boost document scoring. The keys are + field names and the values are the weights for each field. Required.""" + + @overload + def __init__( + self, + *, + weights: Dict[str, float], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class TruncateTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.TruncateTokenFilter"): + """Truncates the terms to a specific length. This token filter is implemented + using Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar length: The length at which terms will be truncated. Default and maximum is 300. + :vartype length: int + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.TruncateTokenFilter". + :vartype odata_type: str + """ + + length: Optional[int] = rest_field() + """The length at which terms will be truncated. Default and maximum is 300.""" + odata_type: Literal["#Microsoft.Azure.Search.TruncateTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.TruncateTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + length: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.TruncateTokenFilter", **kwargs) + + +class UaxUrlEmailTokenizer(LexicalTokenizer, discriminator="#Microsoft.Azure.Search.UaxUrlEmailTokenizer"): + """Tokenizes urls and emails as one token. This tokenizer is implemented using + Apache Lucene. + + + :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes + or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. Required. + :vartype name: str + :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length + are split. The maximum token length that can be used is 300 characters. + :vartype max_token_length: int + :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. Default value is + "#Microsoft.Azure.Search.UaxUrlEmailTokenizer". + :vartype odata_type: str + """ + + max_token_length: Optional[int] = rest_field(name="maxTokenLength") + """The maximum token length. Default is 255. Tokens longer than the maximum length + are split. The maximum token length that can be used is 300 characters.""" + odata_type: Literal["#Microsoft.Azure.Search.UaxUrlEmailTokenizer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of tokenizer. Required. Default value is + \"#Microsoft.Azure.Search.UaxUrlEmailTokenizer\".""" + + @overload + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.UaxUrlEmailTokenizer", **kwargs) + + +class UniqueTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.UniqueTokenFilter"): + """Filters out tokens with same text as the previous token. This token filter is + implemented using Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar only_on_same_position: A value indicating whether to remove duplicates only at the same + position. + Default is false. + :vartype only_on_same_position: bool + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.UniqueTokenFilter". + :vartype odata_type: str + """ + + only_on_same_position: Optional[bool] = rest_field(name="onlyOnSamePosition") + """A value indicating whether to remove duplicates only at the same position. + Default is false.""" + odata_type: Literal["#Microsoft.Azure.Search.UniqueTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.UniqueTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + only_on_same_position: Optional[bool] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.UniqueTokenFilter", **kwargs) + + +class VectorQuery(_model_base.Model): + """The query parameters for vector and hybrid search queries. + + You probably want to use the sub-classes and not this class directly. Known sub-classes are: + VectorizableImageBinaryQuery, VectorizableImageUrlQuery, VectorizableTextQuery, VectorizedQuery + + + :ivar k: Number of nearest neighbors to return as top hits. + :vartype k: int + :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector + searched. + :vartype fields: str + :ivar exhaustive: When true, triggers an exhaustive k-nearest neighbor search across all + vectors + within the vector index. Useful for scenarios where exact matches are critical, + such as determining ground truth values. + :vartype exhaustive: bool + :ivar oversampling: Oversampling factor. Minimum value is 1. It overrides the + 'defaultOversampling' + parameter configured in the index definition. It can be set only when + 'rerankWithOriginalVectors' is true. This parameter is only permitted when a + compression method is used on the underlying vector field. + :vartype oversampling: float + :ivar weight: Relative weight of the vector query when compared to other vector query and/or + the text query within the same search request. This value is used when + combining the results of multiple ranking lists produced by the different + vector queries and/or the results retrieved through the text query. The higher + the weight, the higher the documents that matched that query will be in the + final ranking. Default is 1.0 and the value needs to be a positive number + larger than zero. + :vartype weight: float + :ivar threshold: The threshold used for vector queries. Note this can only be set if all + 'fields' use the same similarity metric. + :vartype threshold: ~azure.search.documents.models.VectorThreshold + :ivar filter_override: The OData filter expression to apply to this specific vector query. If + no + filter expression is defined at the vector level, the expression defined in the + top level filter parameter is used instead. + :vartype filter_override: str + :ivar kind: Type of query. Required. Known values are: "vector", "text", "imageUrl", and + "imageBinary". + :vartype kind: str or ~azure.search.documents.models.VectorQueryKind + """ + + __mapping__: Dict[str, _model_base.Model] = {} + k: Optional[int] = rest_field() + """Number of nearest neighbors to return as top hits.""" + fields: Optional[str] = rest_field() + """Vector Fields of type Collection(Edm.Single) to be included in the vector + searched.""" + exhaustive: Optional[bool] = rest_field() + """When true, triggers an exhaustive k-nearest neighbor search across all vectors + within the vector index. Useful for scenarios where exact matches are critical, + such as determining ground truth values.""" + oversampling: Optional[float] = rest_field() + """Oversampling factor. Minimum value is 1. It overrides the 'defaultOversampling' + parameter configured in the index definition. It can be set only when + 'rerankWithOriginalVectors' is true. This parameter is only permitted when a + compression method is used on the underlying vector field.""" + weight: Optional[float] = rest_field() + """Relative weight of the vector query when compared to other vector query and/or + the text query within the same search request. This value is used when + combining the results of multiple ranking lists produced by the different + vector queries and/or the results retrieved through the text query. The higher + the weight, the higher the documents that matched that query will be in the + final ranking. Default is 1.0 and the value needs to be a positive number + larger than zero.""" + threshold: Optional["_models.VectorThreshold"] = rest_field() + """The threshold used for vector queries. Note this can only be set if all + 'fields' use the same similarity metric.""" + filter_override: Optional[str] = rest_field(name="filterOverride") + """The OData filter expression to apply to this specific vector query. If no + filter expression is defined at the vector level, the expression defined in the + top level filter parameter is used instead.""" + kind: str = rest_discriminator(name="kind") + """Type of query. Required. Known values are: \"vector\", \"text\", \"imageUrl\", and + \"imageBinary\".""" + + @overload + def __init__( + self, + *, + kind: str, + k: Optional[int] = None, + fields: Optional[str] = None, + exhaustive: Optional[bool] = None, + oversampling: Optional[float] = None, + weight: Optional[float] = None, + threshold: Optional["_models.VectorThreshold"] = None, + filter_override: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorizableImageBinaryQuery(VectorQuery, discriminator="imageBinary"): + """The query parameters to use for vector search when a base 64 encoded binary of + an image that needs to be vectorized is provided. + + + :ivar k: Number of nearest neighbors to return as top hits. + :vartype k: int + :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector + searched. + :vartype fields: str + :ivar exhaustive: When true, triggers an exhaustive k-nearest neighbor search across all + vectors + within the vector index. Useful for scenarios where exact matches are critical, + such as determining ground truth values. + :vartype exhaustive: bool + :ivar oversampling: Oversampling factor. Minimum value is 1. It overrides the + 'defaultOversampling' + parameter configured in the index definition. It can be set only when + 'rerankWithOriginalVectors' is true. This parameter is only permitted when a + compression method is used on the underlying vector field. + :vartype oversampling: float + :ivar weight: Relative weight of the vector query when compared to other vector query and/or + the text query within the same search request. This value is used when + combining the results of multiple ranking lists produced by the different + vector queries and/or the results retrieved through the text query. The higher + the weight, the higher the documents that matched that query will be in the + final ranking. Default is 1.0 and the value needs to be a positive number + larger than zero. + :vartype weight: float + :ivar threshold: The threshold used for vector queries. Note this can only be set if all + 'fields' use the same similarity metric. + :vartype threshold: ~azure.search.documents.models.VectorThreshold + :ivar filter_override: The OData filter expression to apply to this specific vector query. If + no + filter expression is defined at the vector level, the expression defined in the + top level filter parameter is used instead. + :vartype filter_override: str + :ivar base64_image: The base 64 encoded binary of an image to be vectorized to perform a vector + search query. + :vartype base64_image: str + :ivar kind: The kind of vector query being performed. Required. Vector query where a base 64 + encoded binary of an image that needs to be + vectorized is provided. + :vartype kind: str or ~azure.search.documents.models.IMAGE_BINARY + """ + + base64_image: Optional[str] = rest_field(name="base64Image") + """The base 64 encoded binary of an image to be vectorized to perform a vector + search query.""" + kind: Literal[VectorQueryKind.IMAGE_BINARY] = rest_discriminator(name="kind") # type: ignore + """The kind of vector query being performed. Required. Vector query where a base 64 encoded binary + of an image that needs to be + vectorized is provided.""" + + @overload + def __init__( + self, + *, + k: Optional[int] = None, + fields: Optional[str] = None, + exhaustive: Optional[bool] = None, + oversampling: Optional[float] = None, + weight: Optional[float] = None, + threshold: Optional["_models.VectorThreshold"] = None, + filter_override: Optional[str] = None, + base64_image: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=VectorQueryKind.IMAGE_BINARY, **kwargs) + + +class VectorizableImageUrlQuery(VectorQuery, discriminator="imageUrl"): + """The query parameters to use for vector search when an url that represents an + image value that needs to be vectorized is provided. + + + :ivar k: Number of nearest neighbors to return as top hits. + :vartype k: int + :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector + searched. + :vartype fields: str + :ivar exhaustive: When true, triggers an exhaustive k-nearest neighbor search across all + vectors + within the vector index. Useful for scenarios where exact matches are critical, + such as determining ground truth values. + :vartype exhaustive: bool + :ivar oversampling: Oversampling factor. Minimum value is 1. It overrides the + 'defaultOversampling' + parameter configured in the index definition. It can be set only when + 'rerankWithOriginalVectors' is true. This parameter is only permitted when a + compression method is used on the underlying vector field. + :vartype oversampling: float + :ivar weight: Relative weight of the vector query when compared to other vector query and/or + the text query within the same search request. This value is used when + combining the results of multiple ranking lists produced by the different + vector queries and/or the results retrieved through the text query. The higher + the weight, the higher the documents that matched that query will be in the + final ranking. Default is 1.0 and the value needs to be a positive number + larger than zero. + :vartype weight: float + :ivar threshold: The threshold used for vector queries. Note this can only be set if all + 'fields' use the same similarity metric. + :vartype threshold: ~azure.search.documents.models.VectorThreshold + :ivar filter_override: The OData filter expression to apply to this specific vector query. If + no + filter expression is defined at the vector level, the expression defined in the + top level filter parameter is used instead. + :vartype filter_override: str + :ivar url: The URL of an image to be vectorized to perform a vector search query. + :vartype url: str + :ivar kind: The kind of vector query being performed. Required. Vector query where an url that + represents an image value that needs to be + vectorized is provided. + :vartype kind: str or ~azure.search.documents.models.IMAGE_URL + """ + + url: Optional[str] = rest_field() + """The URL of an image to be vectorized to perform a vector search query.""" + kind: Literal[VectorQueryKind.IMAGE_URL] = rest_discriminator(name="kind") # type: ignore + """The kind of vector query being performed. Required. Vector query where an url that represents + an image value that needs to be + vectorized is provided.""" + + @overload + def __init__( + self, + *, + k: Optional[int] = None, + fields: Optional[str] = None, + exhaustive: Optional[bool] = None, + oversampling: Optional[float] = None, + weight: Optional[float] = None, + threshold: Optional["_models.VectorThreshold"] = None, + filter_override: Optional[str] = None, + url: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=VectorQueryKind.IMAGE_URL, **kwargs) + + +class VectorizableTextQuery(VectorQuery, discriminator="text"): + """The query parameters to use for vector search when a text value that needs to + be vectorized is provided. + + + :ivar k: Number of nearest neighbors to return as top hits. + :vartype k: int + :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector + searched. + :vartype fields: str + :ivar exhaustive: When true, triggers an exhaustive k-nearest neighbor search across all + vectors + within the vector index. Useful for scenarios where exact matches are critical, + such as determining ground truth values. + :vartype exhaustive: bool + :ivar oversampling: Oversampling factor. Minimum value is 1. It overrides the + 'defaultOversampling' + parameter configured in the index definition. It can be set only when + 'rerankWithOriginalVectors' is true. This parameter is only permitted when a + compression method is used on the underlying vector field. + :vartype oversampling: float + :ivar weight: Relative weight of the vector query when compared to other vector query and/or + the text query within the same search request. This value is used when + combining the results of multiple ranking lists produced by the different + vector queries and/or the results retrieved through the text query. The higher + the weight, the higher the documents that matched that query will be in the + final ranking. Default is 1.0 and the value needs to be a positive number + larger than zero. + :vartype weight: float + :ivar threshold: The threshold used for vector queries. Note this can only be set if all + 'fields' use the same similarity metric. + :vartype threshold: ~azure.search.documents.models.VectorThreshold + :ivar filter_override: The OData filter expression to apply to this specific vector query. If + no + filter expression is defined at the vector level, the expression defined in the + top level filter parameter is used instead. + :vartype filter_override: str + :ivar text: The text to be vectorized to perform a vector search query. Required. + :vartype text: str + :ivar query_rewrites: Can be configured to let a generative model rewrite the query before + sending it + to be vectorized. Known values are: "none" and "generative". + :vartype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType + :ivar kind: The kind of vector query being performed. Required. Vector query where a text value + that needs to be vectorized is provided. + :vartype kind: str or ~azure.search.documents.models.TEXT + """ + + text: str = rest_field() + """The text to be vectorized to perform a vector search query. Required.""" + query_rewrites: Optional[Union[str, "_models.QueryRewritesType"]] = rest_field(name="queryRewrites") + """Can be configured to let a generative model rewrite the query before sending it + to be vectorized. Known values are: \"none\" and \"generative\".""" + kind: Literal[VectorQueryKind.TEXT] = rest_discriminator(name="kind") # type: ignore + """The kind of vector query being performed. Required. Vector query where a text value that needs + to be vectorized is provided.""" + + @overload + def __init__( + self, + *, + text: str, + k: Optional[int] = None, + fields: Optional[str] = None, + exhaustive: Optional[bool] = None, + oversampling: Optional[float] = None, + weight: Optional[float] = None, + threshold: Optional["_models.VectorThreshold"] = None, + filter_override: Optional[str] = None, + query_rewrites: Optional[Union[str, "_models.QueryRewritesType"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=VectorQueryKind.TEXT, **kwargs) + + +class VectorizedQuery(VectorQuery, discriminator="vector"): + """The query parameters to use for vector search when a raw vector value is + provided. + + + :ivar k: Number of nearest neighbors to return as top hits. + :vartype k: int + :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector + searched. + :vartype fields: str + :ivar exhaustive: When true, triggers an exhaustive k-nearest neighbor search across all + vectors + within the vector index. Useful for scenarios where exact matches are critical, + such as determining ground truth values. + :vartype exhaustive: bool + :ivar oversampling: Oversampling factor. Minimum value is 1. It overrides the + 'defaultOversampling' + parameter configured in the index definition. It can be set only when + 'rerankWithOriginalVectors' is true. This parameter is only permitted when a + compression method is used on the underlying vector field. + :vartype oversampling: float + :ivar weight: Relative weight of the vector query when compared to other vector query and/or + the text query within the same search request. This value is used when + combining the results of multiple ranking lists produced by the different + vector queries and/or the results retrieved through the text query. The higher + the weight, the higher the documents that matched that query will be in the + final ranking. Default is 1.0 and the value needs to be a positive number + larger than zero. + :vartype weight: float + :ivar threshold: The threshold used for vector queries. Note this can only be set if all + 'fields' use the same similarity metric. + :vartype threshold: ~azure.search.documents.models.VectorThreshold + :ivar filter_override: The OData filter expression to apply to this specific vector query. If + no + filter expression is defined at the vector level, the expression defined in the + top level filter parameter is used instead. + :vartype filter_override: str + :ivar vector: The vector representation of a search query. Required. + :vartype vector: list[float] + :ivar kind: The kind of vector query being performed. Required. Vector query where a raw vector + value is provided. + :vartype kind: str or ~azure.search.documents.models.VECTOR + """ + + vector: List[float] = rest_field() + """The vector representation of a search query. Required.""" + kind: Literal[VectorQueryKind.VECTOR] = rest_discriminator(name="kind") # type: ignore + """The kind of vector query being performed. Required. Vector query where a raw vector value is + provided.""" + + @overload + def __init__( + self, + *, + vector: List[float], + k: Optional[int] = None, + fields: Optional[str] = None, + exhaustive: Optional[bool] = None, + oversampling: Optional[float] = None, + weight: Optional[float] = None, + threshold: Optional["_models.VectorThreshold"] = None, + filter_override: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=VectorQueryKind.VECTOR, **kwargs) + + +class VectorsDebugInfo(_model_base.Model): + """Contains debugging information specific to vector and hybrid search. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + :ivar subscores: The breakdown of subscores of the document prior to the chosen result set + fusion/combination method such as RRF. + :vartype subscores: ~azure.search.documents.models.QueryResultDocumentSubscores + """ + + subscores: Optional["_models.QueryResultDocumentSubscores"] = rest_field(visibility=["read"]) + """The breakdown of subscores of the document prior to the chosen result set + fusion/combination method such as RRF.""" + + +class VectorSearch(_model_base.Model): + """Contains configuration options related to vector search. + + :ivar profiles: Defines combinations of configurations to use with vector search. + :vartype profiles: list[~azure.search.documents.models.VectorSearchProfile] + :ivar algorithms: Contains configuration options specific to the algorithm used during indexing + or querying. + :vartype algorithms: list[~azure.search.documents.models.VectorSearchAlgorithmConfiguration] + :ivar vectorizers: Contains configuration options on how to vectorize text vector queries. + :vartype vectorizers: list[~azure.search.documents.models.VectorSearchVectorizer] + :ivar compressions: Contains configuration options specific to the compression method used + during + indexing or querying. + :vartype compressions: list[~azure.search.documents.models.VectorSearchCompression] + """ + + profiles: Optional[List["_models.VectorSearchProfile"]] = rest_field() + """Defines combinations of configurations to use with vector search.""" + algorithms: Optional[List["_models.VectorSearchAlgorithmConfiguration"]] = rest_field() + """Contains configuration options specific to the algorithm used during indexing + or querying.""" + vectorizers: Optional[List["_models.VectorSearchVectorizer"]] = rest_field() + """Contains configuration options on how to vectorize text vector queries.""" + compressions: Optional[List["_models.VectorSearchCompression"]] = rest_field() + """Contains configuration options specific to the compression method used during + indexing or querying.""" + + @overload + def __init__( + self, + *, + profiles: Optional[List["_models.VectorSearchProfile"]] = None, + algorithms: Optional[List["_models.VectorSearchAlgorithmConfiguration"]] = None, + vectorizers: Optional[List["_models.VectorSearchVectorizer"]] = None, + compressions: Optional[List["_models.VectorSearchCompression"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorSearchProfile(_model_base.Model): + """Defines a combination of configurations to use with vector search. + + + :ivar name: The name to associate with this particular vector search profile. Required. + :vartype name: str + :ivar algorithm_configuration_name: The name of the vector search algorithm configuration that + specifies the + algorithm and optional parameters. Required. + :vartype algorithm_configuration_name: str + :ivar vectorizer_name: The name of the vectorization being configured for use with vector + search. + :vartype vectorizer_name: str + :ivar compression_name: The name of the compression method configuration that specifies the + compression + method and optional parameters. + :vartype compression_name: str + """ + + name: str = rest_field() + """The name to associate with this particular vector search profile. Required.""" + algorithm_configuration_name: str = rest_field(name="algorithm") + """The name of the vector search algorithm configuration that specifies the + algorithm and optional parameters. Required.""" + vectorizer_name: Optional[str] = rest_field(name="vectorizer") + """The name of the vectorization being configured for use with vector search.""" + compression_name: Optional[str] = rest_field(name="compression") + """The name of the compression method configuration that specifies the compression + method and optional parameters.""" + + @overload + def __init__( + self, + *, + name: str, + algorithm_configuration_name: str, + vectorizer_name: Optional[str] = None, + compression_name: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class VectorSimilarityThreshold(VectorThreshold, discriminator="vectorSimilarity"): + """The results of the vector query will be filtered based on the vector similarity + metric. Note this is the canonical definition of similarity metric, not the + 'distance' version. The threshold direction (larger or smaller) will be chosen + automatically according to the metric used by the field. + + + :ivar value: The threshold will filter based on the similarity metric value. Note this is + the canonical definition of similarity metric, not the 'distance' version. The + threshold direction (larger or smaller) will be chosen automatically according + to the metric used by the field. Required. + :vartype value: float + :ivar kind: The kind of threshold used to filter vector queries. Required. The results of the + vector query will be filtered based on the vector similarity + metric. Note this is the canonical definition of similarity metric, not the + 'distance' version. The threshold direction (larger or smaller) will be chosen + automatically according to the metric used by the field. + :vartype kind: str or ~azure.search.documents.models.VECTOR_SIMILARITY + """ + + value: float = rest_field() + """The threshold will filter based on the similarity metric value. Note this is + the canonical definition of similarity metric, not the 'distance' version. The + threshold direction (larger or smaller) will be chosen automatically according + to the metric used by the field. Required.""" + kind: Literal[VectorThresholdKind.VECTOR_SIMILARITY] = rest_discriminator(name="kind") # type: ignore + """The kind of threshold used to filter vector queries. Required. The results of the vector query + will be filtered based on the vector similarity + metric. Note this is the canonical definition of similarity metric, not the + 'distance' version. The threshold direction (larger or smaller) will be chosen + automatically according to the metric used by the field.""" + + @overload + def __init__( + self, + *, + value: float, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=VectorThresholdKind.VECTOR_SIMILARITY, **kwargs) + + +class VisionVectorizeSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Vision.VectorizeSkill"): + """Allows you to generate a vector embedding for a given image or text input using + the Azure AI Services Vision Vectorize API. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar model_version: The version of the model to use when calling the AI Services Vision + service. It + will default to the latest available when not specified. Required. + :vartype model_version: str + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Vision.VectorizeSkill". + :vartype odata_type: str + """ + + model_version: str = rest_field(name="modelVersion") + """The version of the model to use when calling the AI Services Vision service. It + will default to the latest available when not specified. Required.""" + odata_type: Literal["#Microsoft.Skills.Vision.VectorizeSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Vision.VectorizeSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + model_version: str, + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Vision.VectorizeSkill", **kwargs) + + +class WebApiSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Custom.WebApiSkill"): + """A skill that can call a Web API endpoint, allowing you to extend a skillset by + having it call your custom code. + + + :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill + with no name defined will be given a default name of its 1-based index in the + skills array, prefixed with the character '#'. + :vartype name: str + :ivar description: The description of the skill which describes the inputs, outputs, and usage + of + the skill. + :vartype description: str + :ivar context: Represents the level at which operations take place, such as the document root + or document content (for example, /document or /document/content). The default + is /document. + :vartype context: str + :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of + an upstream skill. Required. + :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] + :ivar outputs: The output of a skill is either a field in a search index, or a value that can + be consumed as an input by another skill. Required. + :vartype outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] + :ivar uri: The url for the Web API. Required. + :vartype uri: str + :ivar http_headers: The headers required to make the http request. + :vartype http_headers: dict[str, str] + :ivar http_method: The method for the http request. + :vartype http_method: str + :ivar timeout: The desired timeout for the request. Default is 30 seconds. + :vartype timeout: ~datetime.timedelta + :ivar batch_size: The desired batch size which indicates number of documents. + :vartype batch_size: int + :ivar degree_of_parallelism: If set, the number of parallel calls that can be made to the Web + API. + :vartype degree_of_parallelism: int + :ivar auth_resource_id: Applies to custom skills that connect to external code in an Azure + function or + some other application that provides the transformations. This value should be + the application ID created for the function or app when it was registered with + Azure Active Directory. When specified, the custom skill connects to the + function or app using a managed ID (either system or user-assigned) of the + search service and the access token of the function or app, using this value as + the resource id for creating the scope of the access token. + :vartype auth_resource_id: str + :ivar auth_identity: The user-assigned managed identity used for outbound connections. If an + authResourceId is provided and it's not specified, the system-assigned managed + identity is used. On updates to the indexer, if the identity is unspecified, + the value remains unchanged. If set to "none", the value of this property is + cleared. + :vartype auth_identity: ~azure.search.documents.models.SearchIndexerDataIdentity + :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is + "#Microsoft.Skills.Custom.WebApiSkill". + :vartype odata_type: str + """ + + uri: str = rest_field() + """The url for the Web API. Required.""" + http_headers: Optional[Dict[str, str]] = rest_field(name="httpHeaders") + """The headers required to make the http request.""" + http_method: Optional[str] = rest_field(name="httpMethod") + """The method for the http request.""" + timeout: Optional[datetime.timedelta] = rest_field() + """The desired timeout for the request. Default is 30 seconds.""" + batch_size: Optional[int] = rest_field(name="batchSize") + """The desired batch size which indicates number of documents.""" + degree_of_parallelism: Optional[int] = rest_field(name="degreeOfParallelism") + """If set, the number of parallel calls that can be made to the Web API.""" + auth_resource_id: Optional[str] = rest_field(name="authResourceId") + """Applies to custom skills that connect to external code in an Azure function or + some other application that provides the transformations. This value should be + the application ID created for the function or app when it was registered with + Azure Active Directory. When specified, the custom skill connects to the + function or app using a managed ID (either system or user-assigned) of the + search service and the access token of the function or app, using this value as + the resource id for creating the scope of the access token.""" + auth_identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field(name="authIdentity") + """The user-assigned managed identity used for outbound connections. If an + authResourceId is provided and it's not specified, the system-assigned managed + identity is used. On updates to the indexer, if the identity is unspecified, + the value remains unchanged. If set to \"none\", the value of this property is + cleared.""" + odata_type: Literal["#Microsoft.Skills.Custom.WebApiSkill"] = rest_discriminator(name="@odata.type") # type: ignore + """A URI fragment specifying the type of skill. Required. Default value is + \"#Microsoft.Skills.Custom.WebApiSkill\".""" + + @overload + def __init__( + self, + *, + inputs: List["_models.InputFieldMappingEntry"], + outputs: List["_models.OutputFieldMappingEntry"], + uri: str, + name: Optional[str] = None, + description: Optional[str] = None, + context: Optional[str] = None, + http_headers: Optional[Dict[str, str]] = None, + http_method: Optional[str] = None, + timeout: Optional[datetime.timedelta] = None, + batch_size: Optional[int] = None, + degree_of_parallelism: Optional[int] = None, + auth_resource_id: Optional[str] = None, + auth_identity: Optional["_models.SearchIndexerDataIdentity"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Skills.Custom.WebApiSkill", **kwargs) + + +class WebApiVectorizer(VectorSearchVectorizer, discriminator="customWebApi"): + """Specifies a user-defined vectorizer for generating the vector embedding of a + query string. Integration of an external vectorizer is achieved using the + custom Web API interface of a skillset. + + + :ivar vectorizer_name: The name to associate with this particular vectorization method. + Required. + :vartype vectorizer_name: str + :ivar web_api_parameters: Specifies the properties of the user-defined vectorizer. + :vartype web_api_parameters: ~azure.search.documents.models.WebApiVectorizerParameters + :ivar kind: The name of the kind of vectorization method being configured for use with + vector search. Required. Generate embeddings using a custom web endpoint at query time. + :vartype kind: str or ~azure.search.documents.models.CUSTOM_WEB_API + """ + + web_api_parameters: Optional["_models.WebApiVectorizerParameters"] = rest_field(name="customWebApiParameters") + """Specifies the properties of the user-defined vectorizer.""" + kind: Literal[VectorSearchVectorizerKind.CUSTOM_WEB_API] = rest_discriminator(name="kind") # type: ignore + """The name of the kind of vectorization method being configured for use with + vector search. Required. Generate embeddings using a custom web endpoint at query time.""" + + @overload + def __init__( + self, + *, + vectorizer_name: str, + web_api_parameters: Optional["_models.WebApiVectorizerParameters"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, kind=VectorSearchVectorizerKind.CUSTOM_WEB_API, **kwargs) + + +class WebApiVectorizerParameters(_model_base.Model): + """Specifies the properties for connecting to a user-defined vectorizer. + + :ivar url: The URI of the Web API providing the vectorizer. + :vartype url: str + :ivar http_headers: The headers required to make the HTTP request. + :vartype http_headers: dict[str, str] + :ivar http_method: The method for the HTTP request. + :vartype http_method: str + :ivar timeout: The desired timeout for the request. Default is 30 seconds. + :vartype timeout: ~datetime.timedelta + :ivar auth_resource_id: Applies to custom endpoints that connect to external code in an Azure + function + or some other application that provides the transformations. This value should + be the application ID created for the function or app when it was registered + with Azure Active Directory. When specified, the vectorization connects to the + function or app using a managed ID (either system or user-assigned) of the + search service and the access token of the function or app, using this value as + the resource id for creating the scope of the access token. + :vartype auth_resource_id: str + :ivar auth_identity: The user-assigned managed identity used for outbound connections. If an + authResourceId is provided and it's not specified, the system-assigned managed + identity is used. On updates to the indexer, if the identity is unspecified, + the value remains unchanged. If set to "none", the value of this property is + cleared. + :vartype auth_identity: ~azure.search.documents.models.SearchIndexerDataIdentity + """ + + url: Optional[str] = rest_field(name="uri") + """The URI of the Web API providing the vectorizer.""" + http_headers: Optional[Dict[str, str]] = rest_field(name="httpHeaders") + """The headers required to make the HTTP request.""" + http_method: Optional[str] = rest_field(name="httpMethod") + """The method for the HTTP request.""" + timeout: Optional[datetime.timedelta] = rest_field() + """The desired timeout for the request. Default is 30 seconds.""" + auth_resource_id: Optional[str] = rest_field(name="authResourceId") + """Applies to custom endpoints that connect to external code in an Azure function + or some other application that provides the transformations. This value should + be the application ID created for the function or app when it was registered + with Azure Active Directory. When specified, the vectorization connects to the + function or app using a managed ID (either system or user-assigned) of the + search service and the access token of the function or app, using this value as + the resource id for creating the scope of the access token.""" + auth_identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field(name="authIdentity") + """The user-assigned managed identity used for outbound connections. If an + authResourceId is provided and it's not specified, the system-assigned managed + identity is used. On updates to the indexer, if the identity is unspecified, + the value remains unchanged. If set to \"none\", the value of this property is + cleared.""" + + @overload + def __init__( + self, + *, + url: Optional[str] = None, + http_headers: Optional[Dict[str, str]] = None, + http_method: Optional[str] = None, + timeout: Optional[datetime.timedelta] = None, + auth_resource_id: Optional[str] = None, + auth_identity: Optional["_models.SearchIndexerDataIdentity"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class WordDelimiterTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.WordDelimiterTokenFilter"): + """Splits words into subwords and performs optional transformations on subword + groups. This token filter is implemented using Apache Lucene. + + + :ivar name: The name of the token filter. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and + is limited to 128 characters. Required. + :vartype name: str + :ivar generate_word_parts: A value indicating whether to generate part words. If set, causes + parts of + words to be generated; for example "AzureSearch" becomes "Azure" "Search". + Default is true. + :vartype generate_word_parts: bool + :ivar generate_number_parts: A value indicating whether to generate number subwords. Default is + true. + :vartype generate_number_parts: bool + :ivar catenate_words: A value indicating whether maximum runs of word parts will be catenated. + For + example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default + is false. + :vartype catenate_words: bool + :ivar catenate_numbers: A value indicating whether maximum runs of number parts will be + catenated. For + example, if this is set to true, "1-2" becomes "12". Default is false. + :vartype catenate_numbers: bool + :ivar catenate_all: A value indicating whether all subword parts will be catenated. For + example, if + this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false. + :vartype catenate_all: bool + :ivar split_on_case_change: A value indicating whether to split words on caseChange. For + example, if this + is set to true, "AzureSearch" becomes "Azure" "Search". Default is true. + :vartype split_on_case_change: bool + :ivar preserve_original: A value indicating whether original words will be preserved and added + to the + subword list. Default is false. + :vartype preserve_original: bool + :ivar split_on_numerics: A value indicating whether to split on numbers. For example, if this + is set to + true, "Azure1Search" becomes "Azure" "1" "Search". Default is true. + :vartype split_on_numerics: bool + :ivar stem_english_possessive: A value indicating whether to remove trailing "'s" for each + subword. Default is + true. + :vartype stem_english_possessive: bool + :ivar protected_words: A list of tokens to protect from being delimited. + :vartype protected_words: list[str] + :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value + is "#Microsoft.Azure.Search.WordDelimiterTokenFilter". + :vartype odata_type: str + """ + + generate_word_parts: Optional[bool] = rest_field(name="generateWordParts") + """A value indicating whether to generate part words. If set, causes parts of + words to be generated; for example \"AzureSearch\" becomes \"Azure\" \"Search\". + Default is true.""" + generate_number_parts: Optional[bool] = rest_field(name="generateNumberParts") + """A value indicating whether to generate number subwords. Default is true.""" + catenate_words: Optional[bool] = rest_field(name="catenateWords") + """A value indicating whether maximum runs of word parts will be catenated. For + example, if this is set to true, \"Azure-Search\" becomes \"AzureSearch\". Default + is false.""" + catenate_numbers: Optional[bool] = rest_field(name="catenateNumbers") + """A value indicating whether maximum runs of number parts will be catenated. For + example, if this is set to true, \"1-2\" becomes \"12\". Default is false.""" + catenate_all: Optional[bool] = rest_field(name="catenateAll") + """A value indicating whether all subword parts will be catenated. For example, if + this is set to true, \"Azure-Search-1\" becomes \"AzureSearch1\". Default is false.""" + split_on_case_change: Optional[bool] = rest_field(name="splitOnCaseChange") + """A value indicating whether to split words on caseChange. For example, if this + is set to true, \"AzureSearch\" becomes \"Azure\" \"Search\". Default is true.""" + preserve_original: Optional[bool] = rest_field(name="preserveOriginal") + """A value indicating whether original words will be preserved and added to the + subword list. Default is false.""" + split_on_numerics: Optional[bool] = rest_field(name="splitOnNumerics") + """A value indicating whether to split on numbers. For example, if this is set to + true, \"Azure1Search\" becomes \"Azure\" \"1\" \"Search\". Default is true.""" + stem_english_possessive: Optional[bool] = rest_field(name="stemEnglishPossessive") + """A value indicating whether to remove trailing \"'s\" for each subword. Default is + true.""" + protected_words: Optional[List[str]] = rest_field(name="protectedWords") + """A list of tokens to protect from being delimited.""" + odata_type: Literal["#Microsoft.Azure.Search.WordDelimiterTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + """A URI fragment specifying the type of token filter. Required. Default value is + \"#Microsoft.Azure.Search.WordDelimiterTokenFilter\".""" + + @overload + def __init__( + self, + *, + name: str, + generate_word_parts: Optional[bool] = None, + generate_number_parts: Optional[bool] = None, + catenate_words: Optional[bool] = None, + catenate_numbers: Optional[bool] = None, + catenate_all: Optional[bool] = None, + split_on_case_change: Optional[bool] = None, + preserve_original: Optional[bool] = None, + split_on_numerics: Optional[bool] = None, + stem_english_possessive: Optional[bool] = None, + protected_words: Optional[List[str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, odata_type="#Microsoft.Azure.Search.WordDelimiterTokenFilter", **kwargs) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models_py3.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models_py3.py deleted file mode 100644 index dfc81820a0bb..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models_py3.py +++ /dev/null @@ -1,2978 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union - -from .. import _serialization - -if TYPE_CHECKING: - from .. import models as _models - - -class AutocompleteItem(_serialization.Model): - """The result of Autocomplete requests. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar text: The completed term. Required. - :vartype text: str - :ivar query_plus_text: The query along with the completed term. Required. - :vartype query_plus_text: str - """ - - _validation = { - "text": {"required": True, "readonly": True}, - "query_plus_text": {"required": True, "readonly": True}, - } - - _attribute_map = { - "text": {"key": "text", "type": "str"}, - "query_plus_text": {"key": "queryPlusText", "type": "str"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.text = None - self.query_plus_text = None - - -class AutocompleteOptions(_serialization.Model): - """Parameter group. - - :ivar autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use - 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing - auto-completed terms. Known values are: "oneTerm", "twoTerms", and "oneTermWithContext". - :vartype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode - :ivar filter: An OData expression that filters the documents used to produce completed terms - for the Autocomplete result. - :vartype filter: str - :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the autocomplete - query. Default is false. When set to true, the query will find terms even if there's a - substituted or missing character in the search text. While this provides a better experience in - some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and - consume more resources. - :vartype use_fuzzy_matching: bool - :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with - highlightPreTag. If omitted, hit highlighting is disabled. - :vartype highlight_post_tag: str - :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with - highlightPostTag. If omitted, hit highlighting is disabled. - :vartype highlight_pre_tag: str - :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that - must be covered by an autocomplete query in order for the query to be reported as a success. - This parameter can be useful for ensuring search availability even for services with only one - replica. The default is 80. - :vartype minimum_coverage: float - :ivar search_fields: The list of field names to consider when querying for auto-completed - terms. Target fields must be included in the specified suggester. - :vartype search_fields: list[str] - :ivar top: The number of auto-completed terms to retrieve. This must be a value between 1 and - 100. The default is 5. - :vartype top: int - """ - - _attribute_map = { - "autocomplete_mode": {"key": "autocompleteMode", "type": "str"}, - "filter": {"key": "$filter", "type": "str"}, - "use_fuzzy_matching": {"key": "UseFuzzyMatching", "type": "bool"}, - "highlight_post_tag": {"key": "highlightPostTag", "type": "str"}, - "highlight_pre_tag": {"key": "highlightPreTag", "type": "str"}, - "minimum_coverage": {"key": "minimumCoverage", "type": "float"}, - "search_fields": {"key": "searchFields", "type": "[str]"}, - "top": {"key": "$top", "type": "int"}, - } - - def __init__( - self, - *, - autocomplete_mode: Optional[Union[str, "_models.AutocompleteMode"]] = None, - filter: Optional[str] = None, # pylint: disable=redefined-builtin - use_fuzzy_matching: Optional[bool] = None, - highlight_post_tag: Optional[str] = None, - highlight_pre_tag: Optional[str] = None, - minimum_coverage: Optional[float] = None, - search_fields: Optional[List[str]] = None, - top: Optional[int] = None, - **kwargs: Any - ) -> None: - """ - :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use - 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing - auto-completed terms. Known values are: "oneTerm", "twoTerms", and "oneTermWithContext". - :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode - :keyword filter: An OData expression that filters the documents used to produce completed terms - for the Autocomplete result. - :paramtype filter: str - :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the - autocomplete query. Default is false. When set to true, the query will find terms even if - there's a substituted or missing character in the search text. While this provides a better - experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are - slower and consume more resources. - :paramtype use_fuzzy_matching: bool - :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with - highlightPreTag. If omitted, hit highlighting is disabled. - :paramtype highlight_post_tag: str - :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with - highlightPostTag. If omitted, hit highlighting is disabled. - :paramtype highlight_pre_tag: str - :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index - that must be covered by an autocomplete query in order for the query to be reported as a - success. This parameter can be useful for ensuring search availability even for services with - only one replica. The default is 80. - :paramtype minimum_coverage: float - :keyword search_fields: The list of field names to consider when querying for auto-completed - terms. Target fields must be included in the specified suggester. - :paramtype search_fields: list[str] - :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1 - and 100. The default is 5. - :paramtype top: int - """ - super().__init__(**kwargs) - self.autocomplete_mode = autocomplete_mode - self.filter = filter - self.use_fuzzy_matching = use_fuzzy_matching - self.highlight_post_tag = highlight_post_tag - self.highlight_pre_tag = highlight_pre_tag - self.minimum_coverage = minimum_coverage - self.search_fields = search_fields - self.top = top - - -class AutocompleteRequest(_serialization.Model): - """Parameters for fuzzy matching, and other autocomplete query behaviors. - - All required parameters must be populated in order to send to server. - - :ivar search_text: The search text on which to base autocomplete results. Required. - :vartype search_text: str - :ivar autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use - 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing - auto-completed terms. Known values are: "oneTerm", "twoTerms", and "oneTermWithContext". - :vartype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode - :ivar filter: An OData expression that filters the documents used to produce completed terms - for the Autocomplete result. - :vartype filter: str - :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the autocomplete - query. Default is false. When set to true, the query will autocomplete terms even if there's a - substituted or missing character in the search text. While this provides a better experience in - some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and - consume more resources. - :vartype use_fuzzy_matching: bool - :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with - highlightPreTag. If omitted, hit highlighting is disabled. - :vartype highlight_post_tag: str - :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with - highlightPostTag. If omitted, hit highlighting is disabled. - :vartype highlight_pre_tag: str - :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that - must be covered by an autocomplete query in order for the query to be reported as a success. - This parameter can be useful for ensuring search availability even for services with only one - replica. The default is 80. - :vartype minimum_coverage: float - :ivar search_fields: The comma-separated list of field names to consider when querying for - auto-completed terms. Target fields must be included in the specified suggester. - :vartype search_fields: str - :ivar suggester_name: The name of the suggester as specified in the suggesters collection - that's part of the index definition. Required. - :vartype suggester_name: str - :ivar top: The number of auto-completed terms to retrieve. This must be a value between 1 and - 100. The default is 5. - :vartype top: int - """ - - _validation = { - "search_text": {"required": True}, - "suggester_name": {"required": True}, - } - - _attribute_map = { - "search_text": {"key": "search", "type": "str"}, - "autocomplete_mode": {"key": "autocompleteMode", "type": "str"}, - "filter": {"key": "filter", "type": "str"}, - "use_fuzzy_matching": {"key": "fuzzy", "type": "bool"}, - "highlight_post_tag": {"key": "highlightPostTag", "type": "str"}, - "highlight_pre_tag": {"key": "highlightPreTag", "type": "str"}, - "minimum_coverage": {"key": "minimumCoverage", "type": "float"}, - "search_fields": {"key": "searchFields", "type": "str"}, - "suggester_name": {"key": "suggesterName", "type": "str"}, - "top": {"key": "top", "type": "int"}, - } - - def __init__( - self, - *, - search_text: str, - suggester_name: str, - autocomplete_mode: Optional[Union[str, "_models.AutocompleteMode"]] = None, - filter: Optional[str] = None, # pylint: disable=redefined-builtin - use_fuzzy_matching: Optional[bool] = None, - highlight_post_tag: Optional[str] = None, - highlight_pre_tag: Optional[str] = None, - minimum_coverage: Optional[float] = None, - search_fields: Optional[str] = None, - top: Optional[int] = None, - **kwargs: Any - ) -> None: - """ - :keyword search_text: The search text on which to base autocomplete results. Required. - :paramtype search_text: str - :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use - 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing - auto-completed terms. Known values are: "oneTerm", "twoTerms", and "oneTermWithContext". - :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode - :keyword filter: An OData expression that filters the documents used to produce completed terms - for the Autocomplete result. - :paramtype filter: str - :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the - autocomplete query. Default is false. When set to true, the query will autocomplete terms even - if there's a substituted or missing character in the search text. While this provides a better - experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are - slower and consume more resources. - :paramtype use_fuzzy_matching: bool - :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with - highlightPreTag. If omitted, hit highlighting is disabled. - :paramtype highlight_post_tag: str - :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with - highlightPostTag. If omitted, hit highlighting is disabled. - :paramtype highlight_pre_tag: str - :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index - that must be covered by an autocomplete query in order for the query to be reported as a - success. This parameter can be useful for ensuring search availability even for services with - only one replica. The default is 80. - :paramtype minimum_coverage: float - :keyword search_fields: The comma-separated list of field names to consider when querying for - auto-completed terms. Target fields must be included in the specified suggester. - :paramtype search_fields: str - :keyword suggester_name: The name of the suggester as specified in the suggesters collection - that's part of the index definition. Required. - :paramtype suggester_name: str - :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1 - and 100. The default is 5. - :paramtype top: int - """ - super().__init__(**kwargs) - self.search_text = search_text - self.autocomplete_mode = autocomplete_mode - self.filter = filter - self.use_fuzzy_matching = use_fuzzy_matching - self.highlight_post_tag = highlight_post_tag - self.highlight_pre_tag = highlight_pre_tag - self.minimum_coverage = minimum_coverage - self.search_fields = search_fields - self.suggester_name = suggester_name - self.top = top - - -class AutocompleteResult(_serialization.Model): - """The result of Autocomplete query. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar coverage: A value indicating the percentage of the index that was considered by the - autocomplete request, or null if minimumCoverage was not specified in the request. - :vartype coverage: float - :ivar results: The list of returned Autocompleted items. Required. - :vartype results: list[~azure.search.documents.models.AutocompleteItem] - """ - - _validation = { - "coverage": {"readonly": True}, - "results": {"required": True, "readonly": True}, - } - - _attribute_map = { - "coverage": {"key": "@search\\.coverage", "type": "float"}, - "results": {"key": "value", "type": "[AutocompleteItem]"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.coverage = None - self.results = None - - -class DebugInfo(_serialization.Model): - """Contains debugging information that can be used to further explore your search results. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar query_rewrites: Contains debugging information specific to query rewrites. - :vartype query_rewrites: ~azure.search.documents.models.QueryRewritesDebugInfo - """ - - _validation = { - "query_rewrites": {"readonly": True}, - } - - _attribute_map = { - "query_rewrites": {"key": "queryRewrites", "type": "QueryRewritesDebugInfo"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.query_rewrites = None - - -class DocumentDebugInfo(_serialization.Model): - """Contains debugging information that can be used to further explore your search results. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar semantic: Contains debugging information specific to semantic ranking requests. - :vartype semantic: ~azure.search.documents.models.SemanticDebugInfo - :ivar vectors: Contains debugging information specific to vector and hybrid search. - :vartype vectors: ~azure.search.documents.models.VectorsDebugInfo - """ - - _validation = { - "semantic": {"readonly": True}, - "vectors": {"readonly": True}, - } - - _attribute_map = { - "semantic": {"key": "semantic", "type": "SemanticDebugInfo"}, - "vectors": {"key": "vectors", "type": "VectorsDebugInfo"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.semantic = None - self.vectors = None - - -class ErrorAdditionalInfo(_serialization.Model): - """The resource management error additional info. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar type: The additional info type. - :vartype type: str - :ivar info: The additional info. - :vartype info: JSON - """ - - _validation = { - "type": {"readonly": True}, - "info": {"readonly": True}, - } - - _attribute_map = { - "type": {"key": "type", "type": "str"}, - "info": {"key": "info", "type": "object"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.type = None - self.info = None - - -class ErrorDetail(_serialization.Model): - """The error detail. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar code: The error code. - :vartype code: str - :ivar message: The error message. - :vartype message: str - :ivar target: The error target. - :vartype target: str - :ivar details: The error details. - :vartype details: list[~azure.search.documents.models.ErrorDetail] - :ivar additional_info: The error additional info. - :vartype additional_info: list[~azure.search.documents.models.ErrorAdditionalInfo] - """ - - _validation = { - "code": {"readonly": True}, - "message": {"readonly": True}, - "target": {"readonly": True}, - "details": {"readonly": True}, - "additional_info": {"readonly": True}, - } - - _attribute_map = { - "code": {"key": "code", "type": "str"}, - "message": {"key": "message", "type": "str"}, - "target": {"key": "target", "type": "str"}, - "details": {"key": "details", "type": "[ErrorDetail]"}, - "additional_info": {"key": "additionalInfo", "type": "[ErrorAdditionalInfo]"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.code = None - self.message = None - self.target = None - self.details = None - self.additional_info = None - - -class ErrorResponse(_serialization.Model): - """Common error response for all Azure Resource Manager APIs to return error details for failed - operations. (This also follows the OData error response format.). - - :ivar error: The error object. - :vartype error: ~azure.search.documents.models.ErrorDetail - """ - - _attribute_map = { - "error": {"key": "error", "type": "ErrorDetail"}, - } - - def __init__(self, *, error: Optional["_models.ErrorDetail"] = None, **kwargs: Any) -> None: - """ - :keyword error: The error object. - :paramtype error: ~azure.search.documents.models.ErrorDetail - """ - super().__init__(**kwargs) - self.error = error - - -class FacetResult(_serialization.Model): - """A single bucket of a facet query result. Reports the number of documents with a field value - falling within a particular range or having a particular value or interval. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar count: The approximate count of documents falling within the bucket described by this - facet. - :vartype count: int - :ivar facets: The nested facet query results for the search operation, organized as a - collection of buckets for each faceted field; null if the query did not contain any nested - facets. - :vartype facets: dict[str, list[~azure.search.documents.models.FacetResult]] - """ - - _validation = { - "count": {"readonly": True}, - "facets": {"readonly": True}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "count": {"key": "count", "type": "int"}, - "facets": {"key": "@search\\.facets", "type": "{[FacetResult]}"}, - } - - def __init__(self, *, additional_properties: Optional[Dict[str, Any]] = None, **kwargs: Any) -> None: - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - """ - super().__init__(**kwargs) - self.additional_properties = additional_properties - self.count = None - self.facets = None - - -class HybridSearch(_serialization.Model): - """TThe query parameters to configure hybrid search behaviors. - - :ivar max_text_recall_size: Determines the maximum number of documents to be retrieved by the - text query portion of a hybrid search request. Those documents will be combined with the - documents matching the vector queries to produce a single final list of results. Choosing a - larger maxTextRecallSize value will allow retrieving and paging through more documents (using - the top and skip parameters), at the cost of higher resource utilization and higher latency. - The value needs to be between 1 and 10,000. Default is 1000. - :vartype max_text_recall_size: int - :ivar count_and_facet_mode: Determines whether the count and facets should includes all - documents that matched the search query, or only the documents that are retrieved within the - 'maxTextRecallSize' window. Known values are: "countRetrievableResults" and "countAllResults". - :vartype count_and_facet_mode: str or ~azure.search.documents.models.HybridCountAndFacetMode - """ - - _attribute_map = { - "max_text_recall_size": {"key": "maxTextRecallSize", "type": "int"}, - "count_and_facet_mode": {"key": "countAndFacetMode", "type": "str"}, - } - - def __init__( - self, - *, - max_text_recall_size: Optional[int] = None, - count_and_facet_mode: Optional[Union[str, "_models.HybridCountAndFacetMode"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword max_text_recall_size: Determines the maximum number of documents to be retrieved by - the text query portion of a hybrid search request. Those documents will be combined with the - documents matching the vector queries to produce a single final list of results. Choosing a - larger maxTextRecallSize value will allow retrieving and paging through more documents (using - the top and skip parameters), at the cost of higher resource utilization and higher latency. - The value needs to be between 1 and 10,000. Default is 1000. - :paramtype max_text_recall_size: int - :keyword count_and_facet_mode: Determines whether the count and facets should includes all - documents that matched the search query, or only the documents that are retrieved within the - 'maxTextRecallSize' window. Known values are: "countRetrievableResults" and "countAllResults". - :paramtype count_and_facet_mode: str or ~azure.search.documents.models.HybridCountAndFacetMode - """ - super().__init__(**kwargs) - self.max_text_recall_size = max_text_recall_size - self.count_and_facet_mode = count_and_facet_mode - - -class IndexAction(_serialization.Model): - """Represents an index action that operates on a document. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar action_type: The operation to perform on a document in an indexing batch. Known values - are: "upload", "merge", "mergeOrUpload", and "delete". - :vartype action_type: str or ~azure.search.documents.models.IndexActionType - """ - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "action_type": {"key": "@search\\.action", "type": "str"}, - } - - def __init__( - self, - *, - additional_properties: Optional[Dict[str, Any]] = None, - action_type: Optional[Union[str, "_models.IndexActionType"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword action_type: The operation to perform on a document in an indexing batch. Known values - are: "upload", "merge", "mergeOrUpload", and "delete". - :paramtype action_type: str or ~azure.search.documents.models.IndexActionType - """ - super().__init__(**kwargs) - self.additional_properties = additional_properties - self.action_type = action_type - - -class IndexBatch(_serialization.Model): - """Contains a batch of document write actions to send to the index. - - All required parameters must be populated in order to send to server. - - :ivar actions: The actions in the batch. Required. - :vartype actions: list[~azure.search.documents.models.IndexAction] - """ - - _validation = { - "actions": {"required": True}, - } - - _attribute_map = { - "actions": {"key": "value", "type": "[IndexAction]"}, - } - - def __init__(self, *, actions: List["_models.IndexAction"], **kwargs: Any) -> None: - """ - :keyword actions: The actions in the batch. Required. - :paramtype actions: list[~azure.search.documents.models.IndexAction] - """ - super().__init__(**kwargs) - self.actions = actions - - -class IndexDocumentsResult(_serialization.Model): - """Response containing the status of operations for all documents in the indexing request. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar results: The list of status information for each document in the indexing request. - Required. - :vartype results: list[~azure.search.documents.models.IndexingResult] - """ - - _validation = { - "results": {"required": True, "readonly": True}, - } - - _attribute_map = { - "results": {"key": "value", "type": "[IndexingResult]"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.results = None - - -class IndexingResult(_serialization.Model): - """Status of an indexing operation for a single document. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar key: The key of a document that was in the indexing request. Required. - :vartype key: str - :ivar error_message: The error message explaining why the indexing operation failed for the - document identified by the key; null if indexing succeeded. - :vartype error_message: str - :ivar succeeded: A value indicating whether the indexing operation succeeded for the document - identified by the key. Required. - :vartype succeeded: bool - :ivar status_code: The status code of the indexing operation. Possible values include: 200 for - a successful update or delete, 201 for successful document creation, 400 for a malformed input - document, 404 for document not found, 409 for a version conflict, 422 when the index is - temporarily unavailable, or 503 for when the service is too busy. Required. - :vartype status_code: int - """ - - _validation = { - "key": {"required": True, "readonly": True}, - "error_message": {"readonly": True}, - "succeeded": {"required": True, "readonly": True}, - "status_code": {"required": True, "readonly": True}, - } - - _attribute_map = { - "key": {"key": "key", "type": "str"}, - "error_message": {"key": "errorMessage", "type": "str"}, - "succeeded": {"key": "status", "type": "bool"}, - "status_code": {"key": "statusCode", "type": "int"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.key = None - self.error_message = None - self.succeeded = None - self.status_code = None - - -class QueryAnswerResult(_serialization.Model): - """An answer is a text passage extracted from the contents of the most relevant documents that - matched the query. Answers are extracted from the top search results. Answer candidates are - scored and the top answers are selected. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar score: The score value represents how relevant the answer is to the query relative to - other answers returned for the query. - :vartype score: float - :ivar key: The key of the document the answer was extracted from. - :vartype key: str - :ivar text: The text passage extracted from the document contents as the answer. - :vartype text: str - :ivar highlights: Same text passage as in the Text property with highlighted text phrases most - relevant to the query. - :vartype highlights: str - """ - - _validation = { - "score": {"readonly": True}, - "key": {"readonly": True}, - "text": {"readonly": True}, - "highlights": {"readonly": True}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "score": {"key": "score", "type": "float"}, - "key": {"key": "key", "type": "str"}, - "text": {"key": "text", "type": "str"}, - "highlights": {"key": "highlights", "type": "str"}, - } - - def __init__(self, *, additional_properties: Optional[Dict[str, Any]] = None, **kwargs: Any) -> None: - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - """ - super().__init__(**kwargs) - self.additional_properties = additional_properties - self.score = None - self.key = None - self.text = None - self.highlights = None - - -class QueryCaptionResult(_serialization.Model): - """Captions are the most representative passages from the document relatively to the search query. - They are often used as document summary. Captions are only returned for queries of type - ``semantic``. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar text: A representative text passage extracted from the document most relevant to the - search query. - :vartype text: str - :ivar highlights: Same text passage as in the Text property with highlighted phrases most - relevant to the query. - :vartype highlights: str - """ - - _validation = { - "text": {"readonly": True}, - "highlights": {"readonly": True}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "text": {"key": "text", "type": "str"}, - "highlights": {"key": "highlights", "type": "str"}, - } - - def __init__(self, *, additional_properties: Optional[Dict[str, Any]] = None, **kwargs: Any) -> None: - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - """ - super().__init__(**kwargs) - self.additional_properties = additional_properties - self.text = None - self.highlights = None - - -class QueryResultDocumentRerankerInput(_serialization.Model): - """The raw concatenated strings that were sent to the semantic enrichment process. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar title: The raw string for the title field that was used for semantic enrichment. - :vartype title: str - :ivar content: The raw concatenated strings for the content fields that were used for semantic - enrichment. - :vartype content: str - :ivar keywords: The raw concatenated strings for the keyword fields that were used for semantic - enrichment. - :vartype keywords: str - """ - - _validation = { - "title": {"readonly": True}, - "content": {"readonly": True}, - "keywords": {"readonly": True}, - } - - _attribute_map = { - "title": {"key": "title", "type": "str"}, - "content": {"key": "content", "type": "str"}, - "keywords": {"key": "keywords", "type": "str"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.title = None - self.content = None - self.keywords = None - - -class QueryResultDocumentSemanticField(_serialization.Model): - """Description of fields that were sent to the semantic enrichment process, as well as how they - were used. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar name: The name of the field that was sent to the semantic enrichment process. - :vartype name: str - :ivar state: The way the field was used for the semantic enrichment process (fully used, - partially used, or unused). Known values are: "used", "unused", and "partial". - :vartype state: str or ~azure.search.documents.models.SemanticFieldState - """ - - _validation = { - "name": {"readonly": True}, - "state": {"readonly": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "state": {"key": "state", "type": "str"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.name = None - self.state = None - - -class QueryResultDocumentSubscores(_serialization.Model): - """The breakdown of subscores between the text and vector query components of the search query for - this document. Each vector query is shown as a separate object in the same order they were - received. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar text: The BM25 or Classic score for the text portion of the query. - :vartype text: ~azure.search.documents.models.TextResult - :ivar vectors: The vector similarity and @search.score values for each vector query. - :vartype vectors: list[dict[str, ~azure.search.documents.models.SingleVectorFieldResult]] - :ivar document_boost: The BM25 or Classic score for the text portion of the query. - :vartype document_boost: float - """ - - _validation = { - "text": {"readonly": True}, - "vectors": {"readonly": True}, - "document_boost": {"readonly": True}, - } - - _attribute_map = { - "text": {"key": "text", "type": "TextResult"}, - "vectors": {"key": "vectors", "type": "[{SingleVectorFieldResult}]"}, - "document_boost": {"key": "documentBoost", "type": "float"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.text = None - self.vectors = None - self.document_boost = None - - -class QueryRewritesDebugInfo(_serialization.Model): - """Contains debugging information specific to query rewrites. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar text: List of query rewrites generated for the text query. - :vartype text: ~azure.search.documents.models.QueryRewritesValuesDebugInfo - :ivar vectors: List of query rewrites generated for the vectorizable text queries. - :vartype vectors: list[~azure.search.documents.models.QueryRewritesValuesDebugInfo] - """ - - _validation = { - "text": {"readonly": True}, - "vectors": {"readonly": True}, - } - - _attribute_map = { - "text": {"key": "text", "type": "QueryRewritesValuesDebugInfo"}, - "vectors": {"key": "vectors", "type": "[QueryRewritesValuesDebugInfo]"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.text = None - self.vectors = None - - -class QueryRewritesValuesDebugInfo(_serialization.Model): - """Contains debugging information specific to query rewrites. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar input_query: The input text to the generative query rewriting model. There may be cases - where the user query and the input to the generative model are not identical. - :vartype input_query: str - :ivar rewrites: List of query rewrites. - :vartype rewrites: list[str] - """ - - _validation = { - "input_query": {"readonly": True}, - "rewrites": {"readonly": True}, - } - - _attribute_map = { - "input_query": {"key": "inputQuery", "type": "str"}, - "rewrites": {"key": "rewrites", "type": "[str]"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.input_query = None - self.rewrites = None - - -class RequestOptions(_serialization.Model): - """Parameter group. - - :ivar x_ms_client_request_id: The tracking ID sent with the request to help with debugging. - :vartype x_ms_client_request_id: str - """ - - _attribute_map = { - "x_ms_client_request_id": {"key": "x-ms-client-request-id", "type": "str"}, - } - - def __init__(self, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> None: - """ - :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging. - :paramtype x_ms_client_request_id: str - """ - super().__init__(**kwargs) - self.x_ms_client_request_id = x_ms_client_request_id - - -class SearchDocumentsResult(_serialization.Model): - """Response containing search results from an index. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar count: The total count of results found by the search operation, or null if the count was - not requested. If present, the count may be greater than the number of results in this - response. This can happen if you use the $top or $skip parameters, or if the query can't return - all the requested documents in a single response. - :vartype count: int - :ivar coverage: A value indicating the percentage of the index that was included in the query, - or null if minimumCoverage was not specified in the request. - :vartype coverage: float - :ivar facets: The facet query results for the search operation, organized as a collection of - buckets for each faceted field; null if the query did not include any facet expressions. - :vartype facets: dict[str, list[~azure.search.documents.models.FacetResult]] - :ivar answers: The answers query results for the search operation; null if the answers query - parameter was not specified or set to 'none'. - :vartype answers: list[~azure.search.documents.models.QueryAnswerResult] - :ivar debug_info: Debug information that applies to the search results as a whole. - :vartype debug_info: ~azure.search.documents.models.DebugInfo - :ivar next_page_parameters: Continuation JSON payload returned when the query can't return all - the requested results in a single response. You can use this JSON along with @odata.nextLink to - formulate another POST Search request to get the next part of the search response. - :vartype next_page_parameters: ~azure.search.documents.models.SearchRequest - :ivar results: The sequence of results returned by the query. Required. - :vartype results: list[~azure.search.documents.models.SearchResult] - :ivar next_link: Continuation URL returned when the query can't return all the requested - results in a single response. You can use this URL to formulate another GET or POST Search - request to get the next part of the search response. Make sure to use the same verb (GET or - POST) as the request that produced this response. - :vartype next_link: str - :ivar semantic_partial_response_reason: Reason that a partial response was returned for a - semantic ranking request. Known values are: "maxWaitExceeded", "capacityOverloaded", and - "transient". - :vartype semantic_partial_response_reason: str or - ~azure.search.documents.models.SemanticErrorReason - :ivar semantic_partial_response_type: Type of partial response that was returned for a semantic - ranking request. Known values are: "baseResults" and "rerankedResults". - :vartype semantic_partial_response_type: str or - ~azure.search.documents.models.SemanticSearchResultsType - :ivar semantic_query_rewrites_result_type: Type of query rewrite that was used to retrieve - documents. "originalQueryOnly" - :vartype semantic_query_rewrites_result_type: str or - ~azure.search.documents.models.SemanticQueryRewritesResultType - """ - - _validation = { - "count": {"readonly": True}, - "coverage": {"readonly": True}, - "facets": {"readonly": True}, - "answers": {"readonly": True}, - "debug_info": {"readonly": True}, - "next_page_parameters": {"readonly": True}, - "results": {"required": True, "readonly": True}, - "next_link": {"readonly": True}, - "semantic_partial_response_reason": {"readonly": True}, - "semantic_partial_response_type": {"readonly": True}, - "semantic_query_rewrites_result_type": {"readonly": True}, - } - - _attribute_map = { - "count": {"key": "@odata\\.count", "type": "int"}, - "coverage": {"key": "@search\\.coverage", "type": "float"}, - "facets": {"key": "@search\\.facets", "type": "{[FacetResult]}"}, - "answers": {"key": "@search\\.answers", "type": "[QueryAnswerResult]"}, - "debug_info": {"key": "@search\\.debugInfo", "type": "DebugInfo"}, - "next_page_parameters": {"key": "@search\\.nextPageParameters", "type": "SearchRequest"}, - "results": {"key": "value", "type": "[SearchResult]"}, - "next_link": {"key": "@odata\\.nextLink", "type": "str"}, - "semantic_partial_response_reason": {"key": "@search\\.semanticPartialResponseReason", "type": "str"}, - "semantic_partial_response_type": {"key": "@search\\.semanticPartialResponseType", "type": "str"}, - "semantic_query_rewrites_result_type": {"key": "@search\\.semanticQueryRewritesResultType", "type": "str"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.count = None - self.coverage = None - self.facets = None - self.answers = None - self.debug_info = None - self.next_page_parameters = None - self.results = None - self.next_link = None - self.semantic_partial_response_reason = None - self.semantic_partial_response_type = None - self.semantic_query_rewrites_result_type = None - - -class SearchOptions(_serialization.Model): - """Parameter group. - - :ivar include_total_result_count: A value that specifies whether to fetch the total count of - results. Default is false. Setting this value to true may have a performance impact. Note that - the count returned is an approximation. - :vartype include_total_result_count: bool - :ivar facets: The list of facet expressions to apply to the search query. Each facet expression - contains a field name, optionally followed by a comma-separated list of name:value pairs. - :vartype facets: list[str] - :ivar filter: The OData $filter expression to apply to the search query. - :vartype filter: str - :ivar highlight_fields: The list of field names to use for hit highlights. Only searchable - fields can be used for hit highlighting. - :vartype highlight_fields: list[str] - :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with - highlightPreTag. Default is </em>. - :vartype highlight_post_tag: str - :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with - highlightPostTag. Default is <em>. - :vartype highlight_pre_tag: str - :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that - must be covered by a search query in order for the query to be reported as a success. This - parameter can be useful for ensuring search availability even for services with only one - replica. The default is 100. - :vartype minimum_coverage: float - :ivar order_by: The list of OData $orderby expressions by which to sort the results. Each - expression can be either a field name or a call to either the geo.distance() or the - search.score() functions. Each expression can be followed by asc to indicate ascending, and - desc to indicate descending. The default is ascending order. Ties will be broken by the match - scores of documents. If no OrderBy is specified, the default sort order is descending by - document match score. There can be at most 32 $orderby clauses. - :vartype order_by: list[str] - :ivar query_type: A value that specifies the syntax of the search query. The default is - 'simple'. Use 'full' if your query uses the Lucene query syntax. Known values are: "simple", - "full", and "semantic". - :vartype query_type: str or ~azure.search.documents.models.QueryType - :ivar scoring_parameters: The list of parameter values to be used in scoring functions (for - example, referencePointParameter) using the format name-values. For example, if the scoring - profile defines a function with a parameter called 'mylocation' the parameter string would be - "mylocation--122.2,44.8" (without the quotes). - :vartype scoring_parameters: list[str] - :ivar scoring_profile: The name of a scoring profile to evaluate match scores for matching - documents in order to sort the results. - :vartype scoring_profile: str - :ivar search_fields: The list of field names to which to scope the full-text search. When using - fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each - fielded search expression take precedence over any field names listed in this parameter. - :vartype search_fields: list[str] - :ivar search_mode: A value that specifies whether any or all of the search terms must be - matched in order to count the document as a match. Known values are: "any" and "all". - :vartype search_mode: str or ~azure.search.documents.models.SearchMode - :ivar scoring_statistics: A value that specifies whether we want to calculate scoring - statistics (such as document frequency) globally for more consistent scoring, or locally, for - lower latency. Known values are: "local" and "global". - :vartype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics - :ivar session_id: A value to be used to create a sticky session, which can help to get more - consistent results. As long as the same sessionId is used, a best-effort attempt will be made - to target the same replica set. Be wary that reusing the same sessionID values repeatedly can - interfere with the load balancing of the requests across replicas and adversely affect the - performance of the search service. The value used as sessionId cannot start with a '_' - character. - :vartype session_id: str - :ivar select: The list of fields to retrieve. If unspecified, all fields marked as retrievable - in the schema are included. - :vartype select: list[str] - :ivar skip: The number of search results to skip. This value cannot be greater than 100,000. If - you need to scan documents in sequence, but cannot use $skip due to this limitation, consider - using $orderby on a totally-ordered key and $filter with a range query instead. - :vartype skip: int - :ivar top: The number of search results to retrieve. This can be used in conjunction with $skip - to implement client-side paging of search results. If results are truncated due to server-side - paging, the response will include a continuation token that can be used to issue another Search - request for the next page of results. - :vartype top: int - :ivar semantic_configuration: The name of the semantic configuration that lists which fields - should be used for semantic ranking, captions, highlights, and answers. - :vartype semantic_configuration: str - :ivar semantic_error_handling: Allows the user to choose whether a semantic call should fail - completely, or to return partial results (default). Known values are: "partial" and "fail". - :vartype semantic_error_handling: str or ~azure.search.documents.models.SemanticErrorMode - :ivar semantic_max_wait_in_milliseconds: Allows the user to set an upper bound on the amount of - time it takes for semantic enrichment to finish processing before the request fails. - :vartype semantic_max_wait_in_milliseconds: int - :ivar answers: This parameter is only valid if the query type is ``semantic``. If set, the - query returns answers extracted from key passages in the highest ranked documents. The number - of answers returned can be configured by appending the pipe character ``|`` followed by the - ``count-`` option after the answers parameter value, such as - ``extractive|count-3``. Default count is 1. The confidence threshold can be configured by - appending the pipe character ``|`` followed by the ``threshold-`` option - after the answers parameter value, such as ``extractive|threshold-0.9``. Default threshold is - 0.7. The maximum character length of answers can be configured by appending the pipe character - '|' followed by the 'count-:code:``', such as - 'extractive|maxcharlength-600'. Known values are: "none" and "extractive". - :vartype answers: str or ~azure.search.documents.models.QueryAnswerType - :ivar captions: This parameter is only valid if the query type is ``semantic``. If set, the - query returns captions extracted from key passages in the highest ranked documents. When - Captions is set to ``extractive``\\ , highlighting is enabled by default, and can be configured - by appending the pipe character ``|`` followed by the ``highlight-`` option, such - as ``extractive|highlight-true``. Defaults to ``None``. The maximum character length of - captions can be configured by appending the pipe character '|' followed by the - 'count-:code:``', such as 'extractive|maxcharlength-600'. - Known values are: "none" and "extractive". - :vartype captions: str or ~azure.search.documents.models.QueryCaptionType - :ivar semantic_query: Allows setting a separate search query that will be solely used for - semantic reranking, semantic captions and semantic answers. Is useful for scenarios where there - is a need to use different queries between the base retrieval and ranking phase, and the L2 - semantic phase. - :vartype semantic_query: str - :ivar query_rewrites: When QueryRewrites is set to ``generative``\\ , the query terms are sent - to a generate model which will produce 10 (default) rewrites to help increase the recall of the - request. The requested count can be configured by appending the pipe character ``|`` followed - by the ``count-`` option, such as ``generative|count-3``. Defaults to - ``None``. This parameter is only valid if the query type is ``semantic``. Known values are: - "none" and "generative". - :vartype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType - :ivar debug: Enables a debugging tool that can be used to further explore your search results. - Known values are: "disabled", "semantic", "vector", "queryRewrites", and "all". - :vartype debug: str or ~azure.search.documents.models.QueryDebugMode - :ivar query_language: The language of the query. Known values are: "none", "en-us", "en-gb", - "en-in", "en-ca", "en-au", "fr-fr", "fr-ca", "de-de", "es-es", "es-mx", "zh-cn", "zh-tw", - "pt-br", "pt-pt", "it-it", "ja-jp", "ko-kr", "ru-ru", "cs-cz", "nl-be", "nl-nl", "hu-hu", - "pl-pl", "sv-se", "tr-tr", "hi-in", "ar-sa", "ar-eg", "ar-ma", "ar-kw", "ar-jo", "da-dk", - "no-no", "bg-bg", "hr-hr", "hr-ba", "ms-my", "ms-bn", "sl-sl", "ta-in", "vi-vn", "el-gr", - "ro-ro", "is-is", "id-id", "th-th", "lt-lt", "uk-ua", "lv-lv", "et-ee", "ca-es", "fi-fi", - "sr-ba", "sr-me", "sr-rs", "sk-sk", "nb-no", "hy-am", "bn-in", "eu-es", "gl-es", "gu-in", - "he-il", "ga-ie", "kn-in", "ml-in", "mr-in", "fa-ae", "pa-in", "te-in", and "ur-pk". - :vartype query_language: str or ~azure.search.documents.models.QueryLanguage - :ivar speller: Improve search recall by spell-correcting individual search query terms. Known - values are: "none" and "lexicon". - :vartype speller: str or ~azure.search.documents.models.QuerySpellerType - :ivar semantic_fields: The list of field names used for semantic ranking. - :vartype semantic_fields: list[str] - """ - - _validation = { - "semantic_max_wait_in_milliseconds": {"minimum": 700}, - } - - _attribute_map = { - "include_total_result_count": {"key": "IncludeTotalResultCount", "type": "bool"}, - "facets": {"key": "Facets", "type": "[str]"}, - "filter": {"key": "$filter", "type": "str"}, - "highlight_fields": {"key": "HighlightFields", "type": "[str]"}, - "highlight_post_tag": {"key": "highlightPostTag", "type": "str"}, - "highlight_pre_tag": {"key": "highlightPreTag", "type": "str"}, - "minimum_coverage": {"key": "minimumCoverage", "type": "float"}, - "order_by": {"key": "OrderBy", "type": "[str]"}, - "query_type": {"key": "queryType", "type": "str"}, - "scoring_parameters": {"key": "ScoringParameters", "type": "[str]"}, - "scoring_profile": {"key": "scoringProfile", "type": "str"}, - "search_fields": {"key": "searchFields", "type": "[str]"}, - "search_mode": {"key": "searchMode", "type": "str"}, - "scoring_statistics": {"key": "scoringStatistics", "type": "str"}, - "session_id": {"key": "sessionId", "type": "str"}, - "select": {"key": "$select", "type": "[str]"}, - "skip": {"key": "$skip", "type": "int"}, - "top": {"key": "$top", "type": "int"}, - "semantic_configuration": {"key": "semanticConfiguration", "type": "str"}, - "semantic_error_handling": {"key": "semanticErrorHandling", "type": "str"}, - "semantic_max_wait_in_milliseconds": {"key": "semanticMaxWaitInMilliseconds", "type": "int"}, - "answers": {"key": "answers", "type": "str"}, - "captions": {"key": "captions", "type": "str"}, - "semantic_query": {"key": "semanticQuery", "type": "str"}, - "query_rewrites": {"key": "queryRewrites", "type": "str"}, - "debug": {"key": "debug", "type": "str"}, - "query_language": {"key": "queryLanguage", "type": "str"}, - "speller": {"key": "speller", "type": "str"}, - "semantic_fields": {"key": "semanticFields", "type": "[str]"}, - } - - def __init__( # pylint: disable=too-many-locals - self, - *, - include_total_result_count: Optional[bool] = None, - facets: Optional[List[str]] = None, - filter: Optional[str] = None, # pylint: disable=redefined-builtin - highlight_fields: Optional[List[str]] = None, - highlight_post_tag: Optional[str] = None, - highlight_pre_tag: Optional[str] = None, - minimum_coverage: Optional[float] = None, - order_by: Optional[List[str]] = None, - query_type: Optional[Union[str, "_models.QueryType"]] = None, - scoring_parameters: Optional[List[str]] = None, - scoring_profile: Optional[str] = None, - search_fields: Optional[List[str]] = None, - search_mode: Optional[Union[str, "_models.SearchMode"]] = None, - scoring_statistics: Optional[Union[str, "_models.ScoringStatistics"]] = None, - session_id: Optional[str] = None, - select: Optional[List[str]] = None, - skip: Optional[int] = None, - top: Optional[int] = None, - semantic_configuration: Optional[str] = None, - semantic_error_handling: Optional[Union[str, "_models.SemanticErrorMode"]] = None, - semantic_max_wait_in_milliseconds: Optional[int] = None, - answers: Optional[Union[str, "_models.QueryAnswerType"]] = None, - captions: Optional[Union[str, "_models.QueryCaptionType"]] = None, - semantic_query: Optional[str] = None, - query_rewrites: Optional[Union[str, "_models.QueryRewritesType"]] = None, - debug: Optional[Union[str, "_models.QueryDebugMode"]] = None, - query_language: Optional[Union[str, "_models.QueryLanguage"]] = None, - speller: Optional[Union[str, "_models.QuerySpellerType"]] = None, - semantic_fields: Optional[List[str]] = None, - **kwargs: Any - ) -> None: - """ - :keyword include_total_result_count: A value that specifies whether to fetch the total count of - results. Default is false. Setting this value to true may have a performance impact. Note that - the count returned is an approximation. - :paramtype include_total_result_count: bool - :keyword facets: The list of facet expressions to apply to the search query. Each facet - expression contains a field name, optionally followed by a comma-separated list of name:value - pairs. - :paramtype facets: list[str] - :keyword filter: The OData $filter expression to apply to the search query. - :paramtype filter: str - :keyword highlight_fields: The list of field names to use for hit highlights. Only searchable - fields can be used for hit highlighting. - :paramtype highlight_fields: list[str] - :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with - highlightPreTag. Default is </em>. - :paramtype highlight_post_tag: str - :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with - highlightPostTag. Default is <em>. - :paramtype highlight_pre_tag: str - :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index - that must be covered by a search query in order for the query to be reported as a success. This - parameter can be useful for ensuring search availability even for services with only one - replica. The default is 100. - :paramtype minimum_coverage: float - :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each - expression can be either a field name or a call to either the geo.distance() or the - search.score() functions. Each expression can be followed by asc to indicate ascending, and - desc to indicate descending. The default is ascending order. Ties will be broken by the match - scores of documents. If no OrderBy is specified, the default sort order is descending by - document match score. There can be at most 32 $orderby clauses. - :paramtype order_by: list[str] - :keyword query_type: A value that specifies the syntax of the search query. The default is - 'simple'. Use 'full' if your query uses the Lucene query syntax. Known values are: "simple", - "full", and "semantic". - :paramtype query_type: str or ~azure.search.documents.models.QueryType - :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for - example, referencePointParameter) using the format name-values. For example, if the scoring - profile defines a function with a parameter called 'mylocation' the parameter string would be - "mylocation--122.2,44.8" (without the quotes). - :paramtype scoring_parameters: list[str] - :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching - documents in order to sort the results. - :paramtype scoring_profile: str - :keyword search_fields: The list of field names to which to scope the full-text search. When - using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of - each fielded search expression take precedence over any field names listed in this parameter. - :paramtype search_fields: list[str] - :keyword search_mode: A value that specifies whether any or all of the search terms must be - matched in order to count the document as a match. Known values are: "any" and "all". - :paramtype search_mode: str or ~azure.search.documents.models.SearchMode - :keyword scoring_statistics: A value that specifies whether we want to calculate scoring - statistics (such as document frequency) globally for more consistent scoring, or locally, for - lower latency. Known values are: "local" and "global". - :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics - :keyword session_id: A value to be used to create a sticky session, which can help to get more - consistent results. As long as the same sessionId is used, a best-effort attempt will be made - to target the same replica set. Be wary that reusing the same sessionID values repeatedly can - interfere with the load balancing of the requests across replicas and adversely affect the - performance of the search service. The value used as sessionId cannot start with a '_' - character. - :paramtype session_id: str - :keyword select: The list of fields to retrieve. If unspecified, all fields marked as - retrievable in the schema are included. - :paramtype select: list[str] - :keyword skip: The number of search results to skip. This value cannot be greater than 100,000. - If you need to scan documents in sequence, but cannot use $skip due to this limitation, - consider using $orderby on a totally-ordered key and $filter with a range query instead. - :paramtype skip: int - :keyword top: The number of search results to retrieve. This can be used in conjunction with - $skip to implement client-side paging of search results. If results are truncated due to - server-side paging, the response will include a continuation token that can be used to issue - another Search request for the next page of results. - :paramtype top: int - :keyword semantic_configuration: The name of the semantic configuration that lists which fields - should be used for semantic ranking, captions, highlights, and answers. - :paramtype semantic_configuration: str - :keyword semantic_error_handling: Allows the user to choose whether a semantic call should fail - completely, or to return partial results (default). Known values are: "partial" and "fail". - :paramtype semantic_error_handling: str or ~azure.search.documents.models.SemanticErrorMode - :keyword semantic_max_wait_in_milliseconds: Allows the user to set an upper bound on the amount - of time it takes for semantic enrichment to finish processing before the request fails. - :paramtype semantic_max_wait_in_milliseconds: int - :keyword answers: This parameter is only valid if the query type is ``semantic``. If set, the - query returns answers extracted from key passages in the highest ranked documents. The number - of answers returned can be configured by appending the pipe character ``|`` followed by the - ``count-`` option after the answers parameter value, such as - ``extractive|count-3``. Default count is 1. The confidence threshold can be configured by - appending the pipe character ``|`` followed by the ``threshold-`` option - after the answers parameter value, such as ``extractive|threshold-0.9``. Default threshold is - 0.7. The maximum character length of answers can be configured by appending the pipe character - '|' followed by the 'count-:code:``', such as - 'extractive|maxcharlength-600'. Known values are: "none" and "extractive". - :paramtype answers: str or ~azure.search.documents.models.QueryAnswerType - :keyword captions: This parameter is only valid if the query type is ``semantic``. If set, the - query returns captions extracted from key passages in the highest ranked documents. When - Captions is set to ``extractive``\\ , highlighting is enabled by default, and can be configured - by appending the pipe character ``|`` followed by the ``highlight-`` option, such - as ``extractive|highlight-true``. Defaults to ``None``. The maximum character length of - captions can be configured by appending the pipe character '|' followed by the - 'count-:code:``', such as 'extractive|maxcharlength-600'. - Known values are: "none" and "extractive". - :paramtype captions: str or ~azure.search.documents.models.QueryCaptionType - :keyword semantic_query: Allows setting a separate search query that will be solely used for - semantic reranking, semantic captions and semantic answers. Is useful for scenarios where there - is a need to use different queries between the base retrieval and ranking phase, and the L2 - semantic phase. - :paramtype semantic_query: str - :keyword query_rewrites: When QueryRewrites is set to ``generative``\\ , the query terms are - sent to a generate model which will produce 10 (default) rewrites to help increase the recall - of the request. The requested count can be configured by appending the pipe character ``|`` - followed by the ``count-`` option, such as ``generative|count-3``. Defaults - to ``None``. This parameter is only valid if the query type is ``semantic``. Known values are: - "none" and "generative". - :paramtype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType - :keyword debug: Enables a debugging tool that can be used to further explore your search - results. Known values are: "disabled", "semantic", "vector", "queryRewrites", and "all". - :paramtype debug: str or ~azure.search.documents.models.QueryDebugMode - :keyword query_language: The language of the query. Known values are: "none", "en-us", "en-gb", - "en-in", "en-ca", "en-au", "fr-fr", "fr-ca", "de-de", "es-es", "es-mx", "zh-cn", "zh-tw", - "pt-br", "pt-pt", "it-it", "ja-jp", "ko-kr", "ru-ru", "cs-cz", "nl-be", "nl-nl", "hu-hu", - "pl-pl", "sv-se", "tr-tr", "hi-in", "ar-sa", "ar-eg", "ar-ma", "ar-kw", "ar-jo", "da-dk", - "no-no", "bg-bg", "hr-hr", "hr-ba", "ms-my", "ms-bn", "sl-sl", "ta-in", "vi-vn", "el-gr", - "ro-ro", "is-is", "id-id", "th-th", "lt-lt", "uk-ua", "lv-lv", "et-ee", "ca-es", "fi-fi", - "sr-ba", "sr-me", "sr-rs", "sk-sk", "nb-no", "hy-am", "bn-in", "eu-es", "gl-es", "gu-in", - "he-il", "ga-ie", "kn-in", "ml-in", "mr-in", "fa-ae", "pa-in", "te-in", and "ur-pk". - :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage - :keyword speller: Improve search recall by spell-correcting individual search query terms. - Known values are: "none" and "lexicon". - :paramtype speller: str or ~azure.search.documents.models.QuerySpellerType - :keyword semantic_fields: The list of field names used for semantic ranking. - :paramtype semantic_fields: list[str] - """ - super().__init__(**kwargs) - self.include_total_result_count = include_total_result_count - self.facets = facets - self.filter = filter - self.highlight_fields = highlight_fields - self.highlight_post_tag = highlight_post_tag - self.highlight_pre_tag = highlight_pre_tag - self.minimum_coverage = minimum_coverage - self.order_by = order_by - self.query_type = query_type - self.scoring_parameters = scoring_parameters - self.scoring_profile = scoring_profile - self.search_fields = search_fields - self.search_mode = search_mode - self.scoring_statistics = scoring_statistics - self.session_id = session_id - self.select = select - self.skip = skip - self.top = top - self.semantic_configuration = semantic_configuration - self.semantic_error_handling = semantic_error_handling - self.semantic_max_wait_in_milliseconds = semantic_max_wait_in_milliseconds - self.answers = answers - self.captions = captions - self.semantic_query = semantic_query - self.query_rewrites = query_rewrites - self.debug = debug - self.query_language = query_language - self.speller = speller - self.semantic_fields = semantic_fields - - -class SearchRequest(_serialization.Model): - """Parameters for filtering, sorting, faceting, paging, and other search query behaviors. - - :ivar include_total_result_count: A value that specifies whether to fetch the total count of - results. Default is false. Setting this value to true may have a performance impact. Note that - the count returned is an approximation. - :vartype include_total_result_count: bool - :ivar facets: The list of facet expressions to apply to the search query. Each facet expression - contains a field name, optionally followed by a comma-separated list of name:value pairs. - :vartype facets: list[str] - :ivar filter: The OData $filter expression to apply to the search query. - :vartype filter: str - :ivar highlight_fields: The comma-separated list of field names to use for hit highlights. Only - searchable fields can be used for hit highlighting. - :vartype highlight_fields: str - :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with - highlightPreTag. Default is </em>. - :vartype highlight_post_tag: str - :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with - highlightPostTag. Default is <em>. - :vartype highlight_pre_tag: str - :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that - must be covered by a search query in order for the query to be reported as a success. This - parameter can be useful for ensuring search availability even for services with only one - replica. The default is 100. - :vartype minimum_coverage: float - :ivar order_by: The comma-separated list of OData $orderby expressions by which to sort the - results. Each expression can be either a field name or a call to either the geo.distance() or - the search.score() functions. Each expression can be followed by asc to indicate ascending, or - desc to indicate descending. The default is ascending order. Ties will be broken by the match - scores of documents. If no $orderby is specified, the default sort order is descending by - document match score. There can be at most 32 $orderby clauses. - :vartype order_by: str - :ivar query_type: A value that specifies the syntax of the search query. The default is - 'simple'. Use 'full' if your query uses the Lucene query syntax. Known values are: "simple", - "full", and "semantic". - :vartype query_type: str or ~azure.search.documents.models.QueryType - :ivar scoring_statistics: A value that specifies whether we want to calculate scoring - statistics (such as document frequency) globally for more consistent scoring, or locally, for - lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally - before scoring. Using global scoring statistics can increase latency of search queries. Known - values are: "local", "global", and "global". - :vartype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics - :ivar session_id: A value to be used to create a sticky session, which can help getting more - consistent results. As long as the same sessionId is used, a best-effort attempt will be made - to target the same replica set. Be wary that reusing the same sessionID values repeatedly can - interfere with the load balancing of the requests across replicas and adversely affect the - performance of the search service. The value used as sessionId cannot start with a '_' - character. - :vartype session_id: str - :ivar scoring_parameters: The list of parameter values to be used in scoring functions (for - example, referencePointParameter) using the format name-values. For example, if the scoring - profile defines a function with a parameter called 'mylocation' the parameter string would be - "mylocation--122.2,44.8" (without the quotes). - :vartype scoring_parameters: list[str] - :ivar scoring_profile: The name of a scoring profile to evaluate match scores for matching - documents in order to sort the results. - :vartype scoring_profile: str - :ivar debug: Enables a debugging tool that can be used to further explore your reranked - results. Known values are: "disabled", "semantic", "vector", "queryRewrites", and "all". - :vartype debug: str or ~azure.search.documents.models.QueryDebugMode - :ivar search_text: A full-text search query expression; Use "*" or omit this parameter to match - all documents. - :vartype search_text: str - :ivar search_fields: The comma-separated list of field names to which to scope the full-text - search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the - field names of each fielded search expression take precedence over any field names listed in - this parameter. - :vartype search_fields: str - :ivar search_mode: A value that specifies whether any or all of the search terms must be - matched in order to count the document as a match. Known values are: "any" and "all". - :vartype search_mode: str or ~azure.search.documents.models.SearchMode - :ivar query_language: A value that specifies the language of the search query. Known values - are: "none", "en-us", "en-gb", "en-in", "en-ca", "en-au", "fr-fr", "fr-ca", "de-de", "es-es", - "es-mx", "zh-cn", "zh-tw", "pt-br", "pt-pt", "it-it", "ja-jp", "ko-kr", "ru-ru", "cs-cz", - "nl-be", "nl-nl", "hu-hu", "pl-pl", "sv-se", "tr-tr", "hi-in", "ar-sa", "ar-eg", "ar-ma", - "ar-kw", "ar-jo", "da-dk", "no-no", "bg-bg", "hr-hr", "hr-ba", "ms-my", "ms-bn", "sl-sl", - "ta-in", "vi-vn", "el-gr", "ro-ro", "is-is", "id-id", "th-th", "lt-lt", "uk-ua", "lv-lv", - "et-ee", "ca-es", "fi-fi", "sr-ba", "sr-me", "sr-rs", "sk-sk", "nb-no", "hy-am", "bn-in", - "eu-es", "gl-es", "gu-in", "he-il", "ga-ie", "kn-in", "ml-in", "mr-in", "fa-ae", "pa-in", - "te-in", and "ur-pk". - :vartype query_language: str or ~azure.search.documents.models.QueryLanguage - :ivar speller: A value that specified the type of the speller to use to spell-correct - individual search query terms. Known values are: "none" and "lexicon". - :vartype speller: str or ~azure.search.documents.models.QuerySpellerType - :ivar select: The comma-separated list of fields to retrieve. If unspecified, all fields marked - as retrievable in the schema are included. - :vartype select: str - :ivar skip: The number of search results to skip. This value cannot be greater than 100,000. If - you need to scan documents in sequence, but cannot use skip due to this limitation, consider - using orderby on a totally-ordered key and filter with a range query instead. - :vartype skip: int - :ivar top: The number of search results to retrieve. This can be used in conjunction with $skip - to implement client-side paging of search results. If results are truncated due to server-side - paging, the response will include a continuation token that can be used to issue another Search - request for the next page of results. - :vartype top: int - :ivar semantic_configuration: The name of a semantic configuration that will be used when - processing documents for queries of type semantic. - :vartype semantic_configuration: str - :ivar semantic_error_handling: Allows the user to choose whether a semantic call should fail - completely (default / current behavior), or to return partial results. Known values are: - "partial" and "fail". - :vartype semantic_error_handling: str or ~azure.search.documents.models.SemanticErrorMode - :ivar semantic_max_wait_in_milliseconds: Allows the user to set an upper bound on the amount of - time it takes for semantic enrichment to finish processing before the request fails. - :vartype semantic_max_wait_in_milliseconds: int - :ivar semantic_query: Allows setting a separate search query that will be solely used for - semantic reranking, semantic captions and semantic answers. Is useful for scenarios where there - is a need to use different queries between the base retrieval and ranking phase, and the L2 - semantic phase. - :vartype semantic_query: str - :ivar answers: A value that specifies whether answers should be returned as part of the search - response. Known values are: "none" and "extractive". - :vartype answers: str or ~azure.search.documents.models.QueryAnswerType - :ivar captions: A value that specifies whether captions should be returned as part of the - search response. Known values are: "none" and "extractive". - :vartype captions: str or ~azure.search.documents.models.QueryCaptionType - :ivar query_rewrites: A value that specifies whether query rewrites should be generated to - augment the search query. Known values are: "none" and "generative". - :vartype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType - :ivar semantic_fields: The comma-separated list of field names used for semantic ranking. - :vartype semantic_fields: str - :ivar vector_queries: The query parameters for vector and hybrid search queries. - :vartype vector_queries: list[~azure.search.documents.models.VectorQuery] - :ivar vector_filter_mode: Determines whether or not filters are applied before or after the - vector search is performed. Default is 'preFilter' for new indexes. Known values are: - "postFilter" and "preFilter". - :vartype vector_filter_mode: str or ~azure.search.documents.models.VectorFilterMode - :ivar hybrid_search: The query parameters to configure hybrid search behaviors. - :vartype hybrid_search: ~azure.search.documents.models.HybridSearch - """ - - _validation = { - "semantic_max_wait_in_milliseconds": {"minimum": 700}, - } - - _attribute_map = { - "include_total_result_count": {"key": "count", "type": "bool"}, - "facets": {"key": "facets", "type": "[str]"}, - "filter": {"key": "filter", "type": "str"}, - "highlight_fields": {"key": "highlight", "type": "str"}, - "highlight_post_tag": {"key": "highlightPostTag", "type": "str"}, - "highlight_pre_tag": {"key": "highlightPreTag", "type": "str"}, - "minimum_coverage": {"key": "minimumCoverage", "type": "float"}, - "order_by": {"key": "orderby", "type": "str"}, - "query_type": {"key": "queryType", "type": "str"}, - "scoring_statistics": {"key": "scoringStatistics", "type": "str"}, - "session_id": {"key": "sessionId", "type": "str"}, - "scoring_parameters": {"key": "scoringParameters", "type": "[str]"}, - "scoring_profile": {"key": "scoringProfile", "type": "str"}, - "debug": {"key": "debug", "type": "str"}, - "search_text": {"key": "search", "type": "str"}, - "search_fields": {"key": "searchFields", "type": "str"}, - "search_mode": {"key": "searchMode", "type": "str"}, - "query_language": {"key": "queryLanguage", "type": "str"}, - "speller": {"key": "speller", "type": "str"}, - "select": {"key": "select", "type": "str"}, - "skip": {"key": "skip", "type": "int"}, - "top": {"key": "top", "type": "int"}, - "semantic_configuration": {"key": "semanticConfiguration", "type": "str"}, - "semantic_error_handling": {"key": "semanticErrorHandling", "type": "str"}, - "semantic_max_wait_in_milliseconds": {"key": "semanticMaxWaitInMilliseconds", "type": "int"}, - "semantic_query": {"key": "semanticQuery", "type": "str"}, - "answers": {"key": "answers", "type": "str"}, - "captions": {"key": "captions", "type": "str"}, - "query_rewrites": {"key": "queryRewrites", "type": "str"}, - "semantic_fields": {"key": "semanticFields", "type": "str"}, - "vector_queries": {"key": "vectorQueries", "type": "[VectorQuery]"}, - "vector_filter_mode": {"key": "vectorFilterMode", "type": "str"}, - "hybrid_search": {"key": "hybridSearch", "type": "HybridSearch"}, - } - - def __init__( # pylint: disable=too-many-locals - self, - *, - include_total_result_count: Optional[bool] = None, - facets: Optional[List[str]] = None, - filter: Optional[str] = None, # pylint: disable=redefined-builtin - highlight_fields: Optional[str] = None, - highlight_post_tag: Optional[str] = None, - highlight_pre_tag: Optional[str] = None, - minimum_coverage: Optional[float] = None, - order_by: Optional[str] = None, - query_type: Optional[Union[str, "_models.QueryType"]] = None, - scoring_statistics: Optional[Union[str, "_models.ScoringStatistics"]] = None, - session_id: Optional[str] = None, - scoring_parameters: Optional[List[str]] = None, - scoring_profile: Optional[str] = None, - debug: Optional[Union[str, "_models.QueryDebugMode"]] = None, - search_text: Optional[str] = None, - search_fields: Optional[str] = None, - search_mode: Optional[Union[str, "_models.SearchMode"]] = None, - query_language: Optional[Union[str, "_models.QueryLanguage"]] = None, - speller: Optional[Union[str, "_models.QuerySpellerType"]] = None, - select: Optional[str] = None, - skip: Optional[int] = None, - top: Optional[int] = None, - semantic_configuration: Optional[str] = None, - semantic_error_handling: Optional[Union[str, "_models.SemanticErrorMode"]] = None, - semantic_max_wait_in_milliseconds: Optional[int] = None, - semantic_query: Optional[str] = None, - answers: Optional[Union[str, "_models.QueryAnswerType"]] = None, - captions: Optional[Union[str, "_models.QueryCaptionType"]] = None, - query_rewrites: Optional[Union[str, "_models.QueryRewritesType"]] = None, - semantic_fields: Optional[str] = None, - vector_queries: Optional[List["_models.VectorQuery"]] = None, - vector_filter_mode: Optional[Union[str, "_models.VectorFilterMode"]] = None, - hybrid_search: Optional["_models.HybridSearch"] = None, - **kwargs: Any - ) -> None: - """ - :keyword include_total_result_count: A value that specifies whether to fetch the total count of - results. Default is false. Setting this value to true may have a performance impact. Note that - the count returned is an approximation. - :paramtype include_total_result_count: bool - :keyword facets: The list of facet expressions to apply to the search query. Each facet - expression contains a field name, optionally followed by a comma-separated list of name:value - pairs. - :paramtype facets: list[str] - :keyword filter: The OData $filter expression to apply to the search query. - :paramtype filter: str - :keyword highlight_fields: The comma-separated list of field names to use for hit highlights. - Only searchable fields can be used for hit highlighting. - :paramtype highlight_fields: str - :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with - highlightPreTag. Default is </em>. - :paramtype highlight_post_tag: str - :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with - highlightPostTag. Default is <em>. - :paramtype highlight_pre_tag: str - :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index - that must be covered by a search query in order for the query to be reported as a success. This - parameter can be useful for ensuring search availability even for services with only one - replica. The default is 100. - :paramtype minimum_coverage: float - :keyword order_by: The comma-separated list of OData $orderby expressions by which to sort the - results. Each expression can be either a field name or a call to either the geo.distance() or - the search.score() functions. Each expression can be followed by asc to indicate ascending, or - desc to indicate descending. The default is ascending order. Ties will be broken by the match - scores of documents. If no $orderby is specified, the default sort order is descending by - document match score. There can be at most 32 $orderby clauses. - :paramtype order_by: str - :keyword query_type: A value that specifies the syntax of the search query. The default is - 'simple'. Use 'full' if your query uses the Lucene query syntax. Known values are: "simple", - "full", and "semantic". - :paramtype query_type: str or ~azure.search.documents.models.QueryType - :keyword scoring_statistics: A value that specifies whether we want to calculate scoring - statistics (such as document frequency) globally for more consistent scoring, or locally, for - lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally - before scoring. Using global scoring statistics can increase latency of search queries. Known - values are: "local", "global", and "global". - :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics - :keyword session_id: A value to be used to create a sticky session, which can help getting more - consistent results. As long as the same sessionId is used, a best-effort attempt will be made - to target the same replica set. Be wary that reusing the same sessionID values repeatedly can - interfere with the load balancing of the requests across replicas and adversely affect the - performance of the search service. The value used as sessionId cannot start with a '_' - character. - :paramtype session_id: str - :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for - example, referencePointParameter) using the format name-values. For example, if the scoring - profile defines a function with a parameter called 'mylocation' the parameter string would be - "mylocation--122.2,44.8" (without the quotes). - :paramtype scoring_parameters: list[str] - :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching - documents in order to sort the results. - :paramtype scoring_profile: str - :keyword debug: Enables a debugging tool that can be used to further explore your reranked - results. Known values are: "disabled", "semantic", "vector", "queryRewrites", and "all". - :paramtype debug: str or ~azure.search.documents.models.QueryDebugMode - :keyword search_text: A full-text search query expression; Use "*" or omit this parameter to - match all documents. - :paramtype search_text: str - :keyword search_fields: The comma-separated list of field names to which to scope the full-text - search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the - field names of each fielded search expression take precedence over any field names listed in - this parameter. - :paramtype search_fields: str - :keyword search_mode: A value that specifies whether any or all of the search terms must be - matched in order to count the document as a match. Known values are: "any" and "all". - :paramtype search_mode: str or ~azure.search.documents.models.SearchMode - :keyword query_language: A value that specifies the language of the search query. Known values - are: "none", "en-us", "en-gb", "en-in", "en-ca", "en-au", "fr-fr", "fr-ca", "de-de", "es-es", - "es-mx", "zh-cn", "zh-tw", "pt-br", "pt-pt", "it-it", "ja-jp", "ko-kr", "ru-ru", "cs-cz", - "nl-be", "nl-nl", "hu-hu", "pl-pl", "sv-se", "tr-tr", "hi-in", "ar-sa", "ar-eg", "ar-ma", - "ar-kw", "ar-jo", "da-dk", "no-no", "bg-bg", "hr-hr", "hr-ba", "ms-my", "ms-bn", "sl-sl", - "ta-in", "vi-vn", "el-gr", "ro-ro", "is-is", "id-id", "th-th", "lt-lt", "uk-ua", "lv-lv", - "et-ee", "ca-es", "fi-fi", "sr-ba", "sr-me", "sr-rs", "sk-sk", "nb-no", "hy-am", "bn-in", - "eu-es", "gl-es", "gu-in", "he-il", "ga-ie", "kn-in", "ml-in", "mr-in", "fa-ae", "pa-in", - "te-in", and "ur-pk". - :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage - :keyword speller: A value that specified the type of the speller to use to spell-correct - individual search query terms. Known values are: "none" and "lexicon". - :paramtype speller: str or ~azure.search.documents.models.QuerySpellerType - :keyword select: The comma-separated list of fields to retrieve. If unspecified, all fields - marked as retrievable in the schema are included. - :paramtype select: str - :keyword skip: The number of search results to skip. This value cannot be greater than 100,000. - If you need to scan documents in sequence, but cannot use skip due to this limitation, consider - using orderby on a totally-ordered key and filter with a range query instead. - :paramtype skip: int - :keyword top: The number of search results to retrieve. This can be used in conjunction with - $skip to implement client-side paging of search results. If results are truncated due to - server-side paging, the response will include a continuation token that can be used to issue - another Search request for the next page of results. - :paramtype top: int - :keyword semantic_configuration: The name of a semantic configuration that will be used when - processing documents for queries of type semantic. - :paramtype semantic_configuration: str - :keyword semantic_error_handling: Allows the user to choose whether a semantic call should fail - completely (default / current behavior), or to return partial results. Known values are: - "partial" and "fail". - :paramtype semantic_error_handling: str or ~azure.search.documents.models.SemanticErrorMode - :keyword semantic_max_wait_in_milliseconds: Allows the user to set an upper bound on the amount - of time it takes for semantic enrichment to finish processing before the request fails. - :paramtype semantic_max_wait_in_milliseconds: int - :keyword semantic_query: Allows setting a separate search query that will be solely used for - semantic reranking, semantic captions and semantic answers. Is useful for scenarios where there - is a need to use different queries between the base retrieval and ranking phase, and the L2 - semantic phase. - :paramtype semantic_query: str - :keyword answers: A value that specifies whether answers should be returned as part of the - search response. Known values are: "none" and "extractive". - :paramtype answers: str or ~azure.search.documents.models.QueryAnswerType - :keyword captions: A value that specifies whether captions should be returned as part of the - search response. Known values are: "none" and "extractive". - :paramtype captions: str or ~azure.search.documents.models.QueryCaptionType - :keyword query_rewrites: A value that specifies whether query rewrites should be generated to - augment the search query. Known values are: "none" and "generative". - :paramtype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType - :keyword semantic_fields: The comma-separated list of field names used for semantic ranking. - :paramtype semantic_fields: str - :keyword vector_queries: The query parameters for vector and hybrid search queries. - :paramtype vector_queries: list[~azure.search.documents.models.VectorQuery] - :keyword vector_filter_mode: Determines whether or not filters are applied before or after the - vector search is performed. Default is 'preFilter' for new indexes. Known values are: - "postFilter" and "preFilter". - :paramtype vector_filter_mode: str or ~azure.search.documents.models.VectorFilterMode - :keyword hybrid_search: The query parameters to configure hybrid search behaviors. - :paramtype hybrid_search: ~azure.search.documents.models.HybridSearch - """ - super().__init__(**kwargs) - self.include_total_result_count = include_total_result_count - self.facets = facets - self.filter = filter - self.highlight_fields = highlight_fields - self.highlight_post_tag = highlight_post_tag - self.highlight_pre_tag = highlight_pre_tag - self.minimum_coverage = minimum_coverage - self.order_by = order_by - self.query_type = query_type - self.scoring_statistics = scoring_statistics - self.session_id = session_id - self.scoring_parameters = scoring_parameters - self.scoring_profile = scoring_profile - self.debug = debug - self.search_text = search_text - self.search_fields = search_fields - self.search_mode = search_mode - self.query_language = query_language - self.speller = speller - self.select = select - self.skip = skip - self.top = top - self.semantic_configuration = semantic_configuration - self.semantic_error_handling = semantic_error_handling - self.semantic_max_wait_in_milliseconds = semantic_max_wait_in_milliseconds - self.semantic_query = semantic_query - self.answers = answers - self.captions = captions - self.query_rewrites = query_rewrites - self.semantic_fields = semantic_fields - self.vector_queries = vector_queries - self.vector_filter_mode = vector_filter_mode - self.hybrid_search = hybrid_search - - -class SearchResult(_serialization.Model): - """Contains a document found by a search query, plus associated metadata. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar score: The relevance score of the document compared to other documents returned by the - query. Required. - :vartype score: float - :ivar reranker_score: The relevance score computed by the semantic ranker for the top search - results. Search results are sorted by the RerankerScore first and then by the Score. - RerankerScore is only returned for queries of type 'semantic'. - :vartype reranker_score: float - :ivar highlights: Text fragments from the document that indicate the matching search terms, - organized by each applicable field; null if hit highlighting was not enabled for the query. - :vartype highlights: dict[str, list[str]] - :ivar captions: Captions are the most representative passages from the document relatively to - the search query. They are often used as document summary. Captions are only returned for - queries of type 'semantic'. - :vartype captions: list[~azure.search.documents.models.QueryCaptionResult] - :ivar document_debug_info: Contains debugging information that can be used to further explore - your search results. - :vartype document_debug_info: ~azure.search.documents.models.DocumentDebugInfo - """ - - _validation = { - "score": {"required": True, "readonly": True}, - "reranker_score": {"readonly": True}, - "highlights": {"readonly": True}, - "captions": {"readonly": True}, - "document_debug_info": {"readonly": True}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "score": {"key": "@search\\.score", "type": "float"}, - "reranker_score": {"key": "@search\\.rerankerScore", "type": "float"}, - "highlights": {"key": "@search\\.highlights", "type": "{[str]}"}, - "captions": {"key": "@search\\.captions", "type": "[QueryCaptionResult]"}, - "document_debug_info": {"key": "@search\\.documentDebugInfo", "type": "DocumentDebugInfo"}, - } - - def __init__(self, *, additional_properties: Optional[Dict[str, Any]] = None, **kwargs: Any) -> None: - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - """ - super().__init__(**kwargs) - self.additional_properties = additional_properties - self.score = None - self.reranker_score = None - self.highlights = None - self.captions = None - self.document_debug_info = None - - -class VectorThreshold(_serialization.Model): - """The threshold used for vector queries. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - SearchScoreThreshold, VectorSimilarityThreshold - - All required parameters must be populated in order to send to server. - - :ivar kind: The kind of threshold used to filter vector queries. Required. Known values are: - "vectorSimilarity" and "searchScore". - :vartype kind: str or ~azure.search.documents.models.VectorThresholdKind - """ - - _validation = { - "kind": {"required": True}, - } - - _attribute_map = { - "kind": {"key": "kind", "type": "str"}, - } - - _subtype_map = {"kind": {"searchScore": "SearchScoreThreshold", "vectorSimilarity": "VectorSimilarityThreshold"}} - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.kind: Optional[str] = None - - -class SearchScoreThreshold(VectorThreshold): - """The results of the vector query will filter based on the '@search.score' value. Note this is - the @search.score returned as part of the search response. The threshold direction will be - chosen for higher @search.score. - - All required parameters must be populated in order to send to server. - - :ivar kind: The kind of threshold used to filter vector queries. Required. Known values are: - "vectorSimilarity" and "searchScore". - :vartype kind: str or ~azure.search.documents.models.VectorThresholdKind - :ivar value: The threshold will filter based on the '@search.score' value. Note this is the - @search.score returned as part of the search response. The threshold direction will be chosen - for higher @search.score. Required. - :vartype value: float - """ - - _validation = { - "kind": {"required": True}, - "value": {"required": True}, - } - - _attribute_map = { - "kind": {"key": "kind", "type": "str"}, - "value": {"key": "value", "type": "float"}, - } - - def __init__(self, *, value: float, **kwargs: Any) -> None: - """ - :keyword value: The threshold will filter based on the '@search.score' value. Note this is the - @search.score returned as part of the search response. The threshold direction will be chosen - for higher @search.score. Required. - :paramtype value: float - """ - super().__init__(**kwargs) - self.kind: str = "searchScore" - self.value = value - - -class SemanticDebugInfo(_serialization.Model): - """SemanticDebugInfo. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar title_field: The title field that was sent to the semantic enrichment process, as well as - how it was used. - :vartype title_field: ~azure.search.documents.models.QueryResultDocumentSemanticField - :ivar content_fields: The content fields that were sent to the semantic enrichment process, as - well as how they were used. - :vartype content_fields: list[~azure.search.documents.models.QueryResultDocumentSemanticField] - :ivar keyword_fields: The keyword fields that were sent to the semantic enrichment process, as - well as how they were used. - :vartype keyword_fields: list[~azure.search.documents.models.QueryResultDocumentSemanticField] - :ivar reranker_input: The raw concatenated strings that were sent to the semantic enrichment - process. - :vartype reranker_input: ~azure.search.documents.models.QueryResultDocumentRerankerInput - """ - - _validation = { - "title_field": {"readonly": True}, - "content_fields": {"readonly": True}, - "keyword_fields": {"readonly": True}, - "reranker_input": {"readonly": True}, - } - - _attribute_map = { - "title_field": {"key": "titleField", "type": "QueryResultDocumentSemanticField"}, - "content_fields": {"key": "contentFields", "type": "[QueryResultDocumentSemanticField]"}, - "keyword_fields": {"key": "keywordFields", "type": "[QueryResultDocumentSemanticField]"}, - "reranker_input": {"key": "rerankerInput", "type": "QueryResultDocumentRerankerInput"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.title_field = None - self.content_fields = None - self.keyword_fields = None - self.reranker_input = None - - -class SingleVectorFieldResult(_serialization.Model): - """A single vector field result. Both @search.score and vector similarity values are returned. - Vector similarity is related to @search.score by an equation. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar search_score: The @search.score value that is calculated from the vector similarity - score. This is the score that's visible in a pure single-field single-vector query. - :vartype search_score: float - :ivar vector_similarity: The vector similarity score for this document. Note this is the - canonical definition of similarity metric, not the 'distance' version. For example, cosine - similarity instead of cosine distance. - :vartype vector_similarity: float - """ - - _validation = { - "search_score": {"readonly": True}, - "vector_similarity": {"readonly": True}, - } - - _attribute_map = { - "search_score": {"key": "searchScore", "type": "float"}, - "vector_similarity": {"key": "vectorSimilarity", "type": "float"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.search_score = None - self.vector_similarity = None - - -class SuggestDocumentsResult(_serialization.Model): - """Response containing suggestion query results from an index. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar results: The sequence of results returned by the query. Required. - :vartype results: list[~azure.search.documents.models.SuggestResult] - :ivar coverage: A value indicating the percentage of the index that was included in the query, - or null if minimumCoverage was not set in the request. - :vartype coverage: float - """ - - _validation = { - "results": {"required": True, "readonly": True}, - "coverage": {"readonly": True}, - } - - _attribute_map = { - "results": {"key": "value", "type": "[SuggestResult]"}, - "coverage": {"key": "@search\\.coverage", "type": "float"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.results = None - self.coverage = None - - -class SuggestOptions(_serialization.Model): - """Parameter group. - - :ivar filter: An OData expression that filters the documents considered for suggestions. - :vartype filter: str - :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the suggestions - query. Default is false. When set to true, the query will find terms even if there's a - substituted or missing character in the search text. While this provides a better experience in - some scenarios, it comes at a performance cost as fuzzy suggestions queries are slower and - consume more resources. - :vartype use_fuzzy_matching: bool - :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with - highlightPreTag. If omitted, hit highlighting of suggestions is disabled. - :vartype highlight_post_tag: str - :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with - highlightPostTag. If omitted, hit highlighting of suggestions is disabled. - :vartype highlight_pre_tag: str - :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that - must be covered by a suggestions query in order for the query to be reported as a success. This - parameter can be useful for ensuring search availability even for services with only one - replica. The default is 80. - :vartype minimum_coverage: float - :ivar order_by: The list of OData $orderby expressions by which to sort the results. Each - expression can be either a field name or a call to either the geo.distance() or the - search.score() functions. Each expression can be followed by asc to indicate ascending, or desc - to indicate descending. The default is ascending order. Ties will be broken by the match scores - of documents. If no $orderby is specified, the default sort order is descending by document - match score. There can be at most 32 $orderby clauses. - :vartype order_by: list[str] - :ivar search_fields: The list of field names to search for the specified search text. Target - fields must be included in the specified suggester. - :vartype search_fields: list[str] - :ivar select: The list of fields to retrieve. If unspecified, only the key field will be - included in the results. - :vartype select: list[str] - :ivar top: The number of suggestions to retrieve. The value must be a number between 1 and 100. - The default is 5. - :vartype top: int - """ - - _attribute_map = { - "filter": {"key": "$filter", "type": "str"}, - "use_fuzzy_matching": {"key": "UseFuzzyMatching", "type": "bool"}, - "highlight_post_tag": {"key": "highlightPostTag", "type": "str"}, - "highlight_pre_tag": {"key": "highlightPreTag", "type": "str"}, - "minimum_coverage": {"key": "minimumCoverage", "type": "float"}, - "order_by": {"key": "OrderBy", "type": "[str]"}, - "search_fields": {"key": "searchFields", "type": "[str]"}, - "select": {"key": "$select", "type": "[str]"}, - "top": {"key": "$top", "type": "int"}, - } - - def __init__( - self, - *, - filter: Optional[str] = None, # pylint: disable=redefined-builtin - use_fuzzy_matching: Optional[bool] = None, - highlight_post_tag: Optional[str] = None, - highlight_pre_tag: Optional[str] = None, - minimum_coverage: Optional[float] = None, - order_by: Optional[List[str]] = None, - search_fields: Optional[List[str]] = None, - select: Optional[List[str]] = None, - top: Optional[int] = None, - **kwargs: Any - ) -> None: - """ - :keyword filter: An OData expression that filters the documents considered for suggestions. - :paramtype filter: str - :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the - suggestions query. Default is false. When set to true, the query will find terms even if - there's a substituted or missing character in the search text. While this provides a better - experience in some scenarios, it comes at a performance cost as fuzzy suggestions queries are - slower and consume more resources. - :paramtype use_fuzzy_matching: bool - :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with - highlightPreTag. If omitted, hit highlighting of suggestions is disabled. - :paramtype highlight_post_tag: str - :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with - highlightPostTag. If omitted, hit highlighting of suggestions is disabled. - :paramtype highlight_pre_tag: str - :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index - that must be covered by a suggestions query in order for the query to be reported as a success. - This parameter can be useful for ensuring search availability even for services with only one - replica. The default is 80. - :paramtype minimum_coverage: float - :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each - expression can be either a field name or a call to either the geo.distance() or the - search.score() functions. Each expression can be followed by asc to indicate ascending, or desc - to indicate descending. The default is ascending order. Ties will be broken by the match scores - of documents. If no $orderby is specified, the default sort order is descending by document - match score. There can be at most 32 $orderby clauses. - :paramtype order_by: list[str] - :keyword search_fields: The list of field names to search for the specified search text. Target - fields must be included in the specified suggester. - :paramtype search_fields: list[str] - :keyword select: The list of fields to retrieve. If unspecified, only the key field will be - included in the results. - :paramtype select: list[str] - :keyword top: The number of suggestions to retrieve. The value must be a number between 1 and - 100. The default is 5. - :paramtype top: int - """ - super().__init__(**kwargs) - self.filter = filter - self.use_fuzzy_matching = use_fuzzy_matching - self.highlight_post_tag = highlight_post_tag - self.highlight_pre_tag = highlight_pre_tag - self.minimum_coverage = minimum_coverage - self.order_by = order_by - self.search_fields = search_fields - self.select = select - self.top = top - - -class SuggestRequest(_serialization.Model): - """Parameters for filtering, sorting, fuzzy matching, and other suggestions query behaviors. - - All required parameters must be populated in order to send to server. - - :ivar filter: An OData expression that filters the documents considered for suggestions. - :vartype filter: str - :ivar use_fuzzy_matching: A value indicating whether to use fuzzy matching for the suggestion - query. Default is false. When set to true, the query will find suggestions even if there's a - substituted or missing character in the search text. While this provides a better experience in - some scenarios, it comes at a performance cost as fuzzy suggestion searches are slower and - consume more resources. - :vartype use_fuzzy_matching: bool - :ivar highlight_post_tag: A string tag that is appended to hit highlights. Must be set with - highlightPreTag. If omitted, hit highlighting of suggestions is disabled. - :vartype highlight_post_tag: str - :ivar highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with - highlightPostTag. If omitted, hit highlighting of suggestions is disabled. - :vartype highlight_pre_tag: str - :ivar minimum_coverage: A number between 0 and 100 indicating the percentage of the index that - must be covered by a suggestion query in order for the query to be reported as a success. This - parameter can be useful for ensuring search availability even for services with only one - replica. The default is 80. - :vartype minimum_coverage: float - :ivar order_by: The comma-separated list of OData $orderby expressions by which to sort the - results. Each expression can be either a field name or a call to either the geo.distance() or - the search.score() functions. Each expression can be followed by asc to indicate ascending, or - desc to indicate descending. The default is ascending order. Ties will be broken by the match - scores of documents. If no $orderby is specified, the default sort order is descending by - document match score. There can be at most 32 $orderby clauses. - :vartype order_by: str - :ivar search_text: The search text to use to suggest documents. Must be at least 1 character, - and no more than 100 characters. Required. - :vartype search_text: str - :ivar search_fields: The comma-separated list of field names to search for the specified search - text. Target fields must be included in the specified suggester. - :vartype search_fields: str - :ivar select: The comma-separated list of fields to retrieve. If unspecified, only the key - field will be included in the results. - :vartype select: str - :ivar suggester_name: The name of the suggester as specified in the suggesters collection - that's part of the index definition. Required. - :vartype suggester_name: str - :ivar top: The number of suggestions to retrieve. This must be a value between 1 and 100. The - default is 5. - :vartype top: int - """ - - _validation = { - "search_text": {"required": True}, - "suggester_name": {"required": True}, - } - - _attribute_map = { - "filter": {"key": "filter", "type": "str"}, - "use_fuzzy_matching": {"key": "fuzzy", "type": "bool"}, - "highlight_post_tag": {"key": "highlightPostTag", "type": "str"}, - "highlight_pre_tag": {"key": "highlightPreTag", "type": "str"}, - "minimum_coverage": {"key": "minimumCoverage", "type": "float"}, - "order_by": {"key": "orderby", "type": "str"}, - "search_text": {"key": "search", "type": "str"}, - "search_fields": {"key": "searchFields", "type": "str"}, - "select": {"key": "select", "type": "str"}, - "suggester_name": {"key": "suggesterName", "type": "str"}, - "top": {"key": "top", "type": "int"}, - } - - def __init__( - self, - *, - search_text: str, - suggester_name: str, - filter: Optional[str] = None, # pylint: disable=redefined-builtin - use_fuzzy_matching: Optional[bool] = None, - highlight_post_tag: Optional[str] = None, - highlight_pre_tag: Optional[str] = None, - minimum_coverage: Optional[float] = None, - order_by: Optional[str] = None, - search_fields: Optional[str] = None, - select: Optional[str] = None, - top: Optional[int] = None, - **kwargs: Any - ) -> None: - """ - :keyword filter: An OData expression that filters the documents considered for suggestions. - :paramtype filter: str - :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the - suggestion query. Default is false. When set to true, the query will find suggestions even if - there's a substituted or missing character in the search text. While this provides a better - experience in some scenarios, it comes at a performance cost as fuzzy suggestion searches are - slower and consume more resources. - :paramtype use_fuzzy_matching: bool - :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with - highlightPreTag. If omitted, hit highlighting of suggestions is disabled. - :paramtype highlight_post_tag: str - :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with - highlightPostTag. If omitted, hit highlighting of suggestions is disabled. - :paramtype highlight_pre_tag: str - :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index - that must be covered by a suggestion query in order for the query to be reported as a success. - This parameter can be useful for ensuring search availability even for services with only one - replica. The default is 80. - :paramtype minimum_coverage: float - :keyword order_by: The comma-separated list of OData $orderby expressions by which to sort the - results. Each expression can be either a field name or a call to either the geo.distance() or - the search.score() functions. Each expression can be followed by asc to indicate ascending, or - desc to indicate descending. The default is ascending order. Ties will be broken by the match - scores of documents. If no $orderby is specified, the default sort order is descending by - document match score. There can be at most 32 $orderby clauses. - :paramtype order_by: str - :keyword search_text: The search text to use to suggest documents. Must be at least 1 - character, and no more than 100 characters. Required. - :paramtype search_text: str - :keyword search_fields: The comma-separated list of field names to search for the specified - search text. Target fields must be included in the specified suggester. - :paramtype search_fields: str - :keyword select: The comma-separated list of fields to retrieve. If unspecified, only the key - field will be included in the results. - :paramtype select: str - :keyword suggester_name: The name of the suggester as specified in the suggesters collection - that's part of the index definition. Required. - :paramtype suggester_name: str - :keyword top: The number of suggestions to retrieve. This must be a value between 1 and 100. - The default is 5. - :paramtype top: int - """ - super().__init__(**kwargs) - self.filter = filter - self.use_fuzzy_matching = use_fuzzy_matching - self.highlight_post_tag = highlight_post_tag - self.highlight_pre_tag = highlight_pre_tag - self.minimum_coverage = minimum_coverage - self.order_by = order_by - self.search_text = search_text - self.search_fields = search_fields - self.select = select - self.suggester_name = suggester_name - self.top = top - - -class SuggestResult(_serialization.Model): - """A result containing a document found by a suggestion query, plus associated metadata. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar text: The text of the suggestion result. Required. - :vartype text: str - """ - - _validation = { - "text": {"required": True, "readonly": True}, - } - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "text": {"key": "@search\\.text", "type": "str"}, - } - - def __init__(self, *, additional_properties: Optional[Dict[str, Any]] = None, **kwargs: Any) -> None: - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - """ - super().__init__(**kwargs) - self.additional_properties = additional_properties - self.text = None - - -class TextResult(_serialization.Model): - """The BM25 or Classic score for the text portion of the query. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar search_score: The BM25 or Classic score for the text portion of the query. - :vartype search_score: float - """ - - _validation = { - "search_score": {"readonly": True}, - } - - _attribute_map = { - "search_score": {"key": "searchScore", "type": "float"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.search_score = None - - -class VectorQuery(_serialization.Model): - """The query parameters for vector and hybrid search queries. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - VectorizableImageBinaryQuery, VectorizableImageUrlQuery, VectorizableTextQuery, VectorizedQuery - - All required parameters must be populated in order to send to server. - - :ivar kind: The kind of vector query being performed. Required. Known values are: "vector", - "text", "imageUrl", and "imageBinary". - :vartype kind: str or ~azure.search.documents.models.VectorQueryKind - :ivar k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :vartype k_nearest_neighbors: int - :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector - searched. - :vartype fields: str - :ivar exhaustive: When true, triggers an exhaustive k-nearest neighbor search across all - vectors within the vector index. Useful for scenarios where exact matches are critical, such as - determining ground truth values. - :vartype exhaustive: bool - :ivar oversampling: Oversampling factor. Minimum value is 1. It overrides the - 'defaultOversampling' parameter configured in the index definition. It can be set only when - 'rerankWithOriginalVectors' is true. This parameter is only permitted when a compression method - is used on the underlying vector field. - :vartype oversampling: float - :ivar weight: Relative weight of the vector query when compared to other vector query and/or - the text query within the same search request. This value is used when combining the results of - multiple ranking lists produced by the different vector queries and/or the results retrieved - through the text query. The higher the weight, the higher the documents that matched that query - will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger - than zero. - :vartype weight: float - :ivar threshold: The threshold used for vector queries. Note this can only be set if all - 'fields' use the same similarity metric. - :vartype threshold: ~azure.search.documents.models.VectorThreshold - :ivar filter_override: The OData filter expression to apply to this specific vector query. If - no filter expression is defined at the vector level, the expression defined in the top level - filter parameter is used instead. - :vartype filter_override: str - """ - - _validation = { - "kind": {"required": True}, - } - - _attribute_map = { - "kind": {"key": "kind", "type": "str"}, - "k_nearest_neighbors": {"key": "k", "type": "int"}, - "fields": {"key": "fields", "type": "str"}, - "exhaustive": {"key": "exhaustive", "type": "bool"}, - "oversampling": {"key": "oversampling", "type": "float"}, - "weight": {"key": "weight", "type": "float"}, - "threshold": {"key": "threshold", "type": "VectorThreshold"}, - "filter_override": {"key": "filterOverride", "type": "str"}, - } - - _subtype_map = { - "kind": { - "imageBinary": "VectorizableImageBinaryQuery", - "imageUrl": "VectorizableImageUrlQuery", - "text": "VectorizableTextQuery", - "vector": "VectorizedQuery", - } - } - - def __init__( - self, - *, - k_nearest_neighbors: Optional[int] = None, - fields: Optional[str] = None, - exhaustive: Optional[bool] = None, - oversampling: Optional[float] = None, - weight: Optional[float] = None, - threshold: Optional["_models.VectorThreshold"] = None, - filter_override: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :paramtype k_nearest_neighbors: int - :keyword fields: Vector Fields of type Collection(Edm.Single) to be included in the vector - searched. - :paramtype fields: str - :keyword exhaustive: When true, triggers an exhaustive k-nearest neighbor search across all - vectors within the vector index. Useful for scenarios where exact matches are critical, such as - determining ground truth values. - :paramtype exhaustive: bool - :keyword oversampling: Oversampling factor. Minimum value is 1. It overrides the - 'defaultOversampling' parameter configured in the index definition. It can be set only when - 'rerankWithOriginalVectors' is true. This parameter is only permitted when a compression method - is used on the underlying vector field. - :paramtype oversampling: float - :keyword weight: Relative weight of the vector query when compared to other vector query and/or - the text query within the same search request. This value is used when combining the results of - multiple ranking lists produced by the different vector queries and/or the results retrieved - through the text query. The higher the weight, the higher the documents that matched that query - will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger - than zero. - :paramtype weight: float - :keyword threshold: The threshold used for vector queries. Note this can only be set if all - 'fields' use the same similarity metric. - :paramtype threshold: ~azure.search.documents.models.VectorThreshold - :keyword filter_override: The OData filter expression to apply to this specific vector query. - If no filter expression is defined at the vector level, the expression defined in the top level - filter parameter is used instead. - :paramtype filter_override: str - """ - super().__init__(**kwargs) - self.kind: Optional[str] = None - self.k_nearest_neighbors = k_nearest_neighbors - self.fields = fields - self.exhaustive = exhaustive - self.oversampling = oversampling - self.weight = weight - self.threshold = threshold - self.filter_override = filter_override - - -class VectorizableImageBinaryQuery(VectorQuery): - """The query parameters to use for vector search when a base 64 encoded binary of an image that - needs to be vectorized is provided. - - All required parameters must be populated in order to send to server. - - :ivar kind: The kind of vector query being performed. Required. Known values are: "vector", - "text", "imageUrl", and "imageBinary". - :vartype kind: str or ~azure.search.documents.models.VectorQueryKind - :ivar k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :vartype k_nearest_neighbors: int - :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector - searched. - :vartype fields: str - :ivar exhaustive: When true, triggers an exhaustive k-nearest neighbor search across all - vectors within the vector index. Useful for scenarios where exact matches are critical, such as - determining ground truth values. - :vartype exhaustive: bool - :ivar oversampling: Oversampling factor. Minimum value is 1. It overrides the - 'defaultOversampling' parameter configured in the index definition. It can be set only when - 'rerankWithOriginalVectors' is true. This parameter is only permitted when a compression method - is used on the underlying vector field. - :vartype oversampling: float - :ivar weight: Relative weight of the vector query when compared to other vector query and/or - the text query within the same search request. This value is used when combining the results of - multiple ranking lists produced by the different vector queries and/or the results retrieved - through the text query. The higher the weight, the higher the documents that matched that query - will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger - than zero. - :vartype weight: float - :ivar threshold: The threshold used for vector queries. Note this can only be set if all - 'fields' use the same similarity metric. - :vartype threshold: ~azure.search.documents.models.VectorThreshold - :ivar filter_override: The OData filter expression to apply to this specific vector query. If - no filter expression is defined at the vector level, the expression defined in the top level - filter parameter is used instead. - :vartype filter_override: str - :ivar base64_image: The base 64 encoded binary of an image to be vectorized to perform a vector - search query. - :vartype base64_image: str - """ - - _validation = { - "kind": {"required": True}, - } - - _attribute_map = { - "kind": {"key": "kind", "type": "str"}, - "k_nearest_neighbors": {"key": "k", "type": "int"}, - "fields": {"key": "fields", "type": "str"}, - "exhaustive": {"key": "exhaustive", "type": "bool"}, - "oversampling": {"key": "oversampling", "type": "float"}, - "weight": {"key": "weight", "type": "float"}, - "threshold": {"key": "threshold", "type": "VectorThreshold"}, - "filter_override": {"key": "filterOverride", "type": "str"}, - "base64_image": {"key": "base64Image", "type": "str"}, - } - - def __init__( - self, - *, - k_nearest_neighbors: Optional[int] = None, - fields: Optional[str] = None, - exhaustive: Optional[bool] = None, - oversampling: Optional[float] = None, - weight: Optional[float] = None, - threshold: Optional["_models.VectorThreshold"] = None, - filter_override: Optional[str] = None, - base64_image: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :paramtype k_nearest_neighbors: int - :keyword fields: Vector Fields of type Collection(Edm.Single) to be included in the vector - searched. - :paramtype fields: str - :keyword exhaustive: When true, triggers an exhaustive k-nearest neighbor search across all - vectors within the vector index. Useful for scenarios where exact matches are critical, such as - determining ground truth values. - :paramtype exhaustive: bool - :keyword oversampling: Oversampling factor. Minimum value is 1. It overrides the - 'defaultOversampling' parameter configured in the index definition. It can be set only when - 'rerankWithOriginalVectors' is true. This parameter is only permitted when a compression method - is used on the underlying vector field. - :paramtype oversampling: float - :keyword weight: Relative weight of the vector query when compared to other vector query and/or - the text query within the same search request. This value is used when combining the results of - multiple ranking lists produced by the different vector queries and/or the results retrieved - through the text query. The higher the weight, the higher the documents that matched that query - will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger - than zero. - :paramtype weight: float - :keyword threshold: The threshold used for vector queries. Note this can only be set if all - 'fields' use the same similarity metric. - :paramtype threshold: ~azure.search.documents.models.VectorThreshold - :keyword filter_override: The OData filter expression to apply to this specific vector query. - If no filter expression is defined at the vector level, the expression defined in the top level - filter parameter is used instead. - :paramtype filter_override: str - :keyword base64_image: The base 64 encoded binary of an image to be vectorized to perform a - vector search query. - :paramtype base64_image: str - """ - super().__init__( - k_nearest_neighbors=k_nearest_neighbors, - fields=fields, - exhaustive=exhaustive, - oversampling=oversampling, - weight=weight, - threshold=threshold, - filter_override=filter_override, - **kwargs - ) - self.kind: str = "imageBinary" - self.base64_image = base64_image - - -class VectorizableImageUrlQuery(VectorQuery): - """The query parameters to use for vector search when an url that represents an image value that - needs to be vectorized is provided. - - All required parameters must be populated in order to send to server. - - :ivar kind: The kind of vector query being performed. Required. Known values are: "vector", - "text", "imageUrl", and "imageBinary". - :vartype kind: str or ~azure.search.documents.models.VectorQueryKind - :ivar k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :vartype k_nearest_neighbors: int - :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector - searched. - :vartype fields: str - :ivar exhaustive: When true, triggers an exhaustive k-nearest neighbor search across all - vectors within the vector index. Useful for scenarios where exact matches are critical, such as - determining ground truth values. - :vartype exhaustive: bool - :ivar oversampling: Oversampling factor. Minimum value is 1. It overrides the - 'defaultOversampling' parameter configured in the index definition. It can be set only when - 'rerankWithOriginalVectors' is true. This parameter is only permitted when a compression method - is used on the underlying vector field. - :vartype oversampling: float - :ivar weight: Relative weight of the vector query when compared to other vector query and/or - the text query within the same search request. This value is used when combining the results of - multiple ranking lists produced by the different vector queries and/or the results retrieved - through the text query. The higher the weight, the higher the documents that matched that query - will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger - than zero. - :vartype weight: float - :ivar threshold: The threshold used for vector queries. Note this can only be set if all - 'fields' use the same similarity metric. - :vartype threshold: ~azure.search.documents.models.VectorThreshold - :ivar filter_override: The OData filter expression to apply to this specific vector query. If - no filter expression is defined at the vector level, the expression defined in the top level - filter parameter is used instead. - :vartype filter_override: str - :ivar url: The URL of an image to be vectorized to perform a vector search query. - :vartype url: str - """ - - _validation = { - "kind": {"required": True}, - } - - _attribute_map = { - "kind": {"key": "kind", "type": "str"}, - "k_nearest_neighbors": {"key": "k", "type": "int"}, - "fields": {"key": "fields", "type": "str"}, - "exhaustive": {"key": "exhaustive", "type": "bool"}, - "oversampling": {"key": "oversampling", "type": "float"}, - "weight": {"key": "weight", "type": "float"}, - "threshold": {"key": "threshold", "type": "VectorThreshold"}, - "filter_override": {"key": "filterOverride", "type": "str"}, - "url": {"key": "url", "type": "str"}, - } - - def __init__( - self, - *, - k_nearest_neighbors: Optional[int] = None, - fields: Optional[str] = None, - exhaustive: Optional[bool] = None, - oversampling: Optional[float] = None, - weight: Optional[float] = None, - threshold: Optional["_models.VectorThreshold"] = None, - filter_override: Optional[str] = None, - url: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :paramtype k_nearest_neighbors: int - :keyword fields: Vector Fields of type Collection(Edm.Single) to be included in the vector - searched. - :paramtype fields: str - :keyword exhaustive: When true, triggers an exhaustive k-nearest neighbor search across all - vectors within the vector index. Useful for scenarios where exact matches are critical, such as - determining ground truth values. - :paramtype exhaustive: bool - :keyword oversampling: Oversampling factor. Minimum value is 1. It overrides the - 'defaultOversampling' parameter configured in the index definition. It can be set only when - 'rerankWithOriginalVectors' is true. This parameter is only permitted when a compression method - is used on the underlying vector field. - :paramtype oversampling: float - :keyword weight: Relative weight of the vector query when compared to other vector query and/or - the text query within the same search request. This value is used when combining the results of - multiple ranking lists produced by the different vector queries and/or the results retrieved - through the text query. The higher the weight, the higher the documents that matched that query - will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger - than zero. - :paramtype weight: float - :keyword threshold: The threshold used for vector queries. Note this can only be set if all - 'fields' use the same similarity metric. - :paramtype threshold: ~azure.search.documents.models.VectorThreshold - :keyword filter_override: The OData filter expression to apply to this specific vector query. - If no filter expression is defined at the vector level, the expression defined in the top level - filter parameter is used instead. - :paramtype filter_override: str - :keyword url: The URL of an image to be vectorized to perform a vector search query. - :paramtype url: str - """ - super().__init__( - k_nearest_neighbors=k_nearest_neighbors, - fields=fields, - exhaustive=exhaustive, - oversampling=oversampling, - weight=weight, - threshold=threshold, - filter_override=filter_override, - **kwargs - ) - self.kind: str = "imageUrl" - self.url = url - - -class VectorizableTextQuery(VectorQuery): - """The query parameters to use for vector search when a text value that needs to be vectorized is - provided. - - All required parameters must be populated in order to send to server. - - :ivar kind: The kind of vector query being performed. Required. Known values are: "vector", - "text", "imageUrl", and "imageBinary". - :vartype kind: str or ~azure.search.documents.models.VectorQueryKind - :ivar k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :vartype k_nearest_neighbors: int - :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector - searched. - :vartype fields: str - :ivar exhaustive: When true, triggers an exhaustive k-nearest neighbor search across all - vectors within the vector index. Useful for scenarios where exact matches are critical, such as - determining ground truth values. - :vartype exhaustive: bool - :ivar oversampling: Oversampling factor. Minimum value is 1. It overrides the - 'defaultOversampling' parameter configured in the index definition. It can be set only when - 'rerankWithOriginalVectors' is true. This parameter is only permitted when a compression method - is used on the underlying vector field. - :vartype oversampling: float - :ivar weight: Relative weight of the vector query when compared to other vector query and/or - the text query within the same search request. This value is used when combining the results of - multiple ranking lists produced by the different vector queries and/or the results retrieved - through the text query. The higher the weight, the higher the documents that matched that query - will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger - than zero. - :vartype weight: float - :ivar threshold: The threshold used for vector queries. Note this can only be set if all - 'fields' use the same similarity metric. - :vartype threshold: ~azure.search.documents.models.VectorThreshold - :ivar filter_override: The OData filter expression to apply to this specific vector query. If - no filter expression is defined at the vector level, the expression defined in the top level - filter parameter is used instead. - :vartype filter_override: str - :ivar text: The text to be vectorized to perform a vector search query. Required. - :vartype text: str - :ivar query_rewrites: Can be configured to let a generative model rewrite the query before - sending it to be vectorized. Known values are: "none" and "generative". - :vartype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType - """ - - _validation = { - "kind": {"required": True}, - "text": {"required": True}, - } - - _attribute_map = { - "kind": {"key": "kind", "type": "str"}, - "k_nearest_neighbors": {"key": "k", "type": "int"}, - "fields": {"key": "fields", "type": "str"}, - "exhaustive": {"key": "exhaustive", "type": "bool"}, - "oversampling": {"key": "oversampling", "type": "float"}, - "weight": {"key": "weight", "type": "float"}, - "threshold": {"key": "threshold", "type": "VectorThreshold"}, - "filter_override": {"key": "filterOverride", "type": "str"}, - "text": {"key": "text", "type": "str"}, - "query_rewrites": {"key": "queryRewrites", "type": "str"}, - } - - def __init__( - self, - *, - text: str, - k_nearest_neighbors: Optional[int] = None, - fields: Optional[str] = None, - exhaustive: Optional[bool] = None, - oversampling: Optional[float] = None, - weight: Optional[float] = None, - threshold: Optional["_models.VectorThreshold"] = None, - filter_override: Optional[str] = None, - query_rewrites: Optional[Union[str, "_models.QueryRewritesType"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :paramtype k_nearest_neighbors: int - :keyword fields: Vector Fields of type Collection(Edm.Single) to be included in the vector - searched. - :paramtype fields: str - :keyword exhaustive: When true, triggers an exhaustive k-nearest neighbor search across all - vectors within the vector index. Useful for scenarios where exact matches are critical, such as - determining ground truth values. - :paramtype exhaustive: bool - :keyword oversampling: Oversampling factor. Minimum value is 1. It overrides the - 'defaultOversampling' parameter configured in the index definition. It can be set only when - 'rerankWithOriginalVectors' is true. This parameter is only permitted when a compression method - is used on the underlying vector field. - :paramtype oversampling: float - :keyword weight: Relative weight of the vector query when compared to other vector query and/or - the text query within the same search request. This value is used when combining the results of - multiple ranking lists produced by the different vector queries and/or the results retrieved - through the text query. The higher the weight, the higher the documents that matched that query - will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger - than zero. - :paramtype weight: float - :keyword threshold: The threshold used for vector queries. Note this can only be set if all - 'fields' use the same similarity metric. - :paramtype threshold: ~azure.search.documents.models.VectorThreshold - :keyword filter_override: The OData filter expression to apply to this specific vector query. - If no filter expression is defined at the vector level, the expression defined in the top level - filter parameter is used instead. - :paramtype filter_override: str - :keyword text: The text to be vectorized to perform a vector search query. Required. - :paramtype text: str - :keyword query_rewrites: Can be configured to let a generative model rewrite the query before - sending it to be vectorized. Known values are: "none" and "generative". - :paramtype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType - """ - super().__init__( - k_nearest_neighbors=k_nearest_neighbors, - fields=fields, - exhaustive=exhaustive, - oversampling=oversampling, - weight=weight, - threshold=threshold, - filter_override=filter_override, - **kwargs - ) - self.kind: str = "text" - self.text = text - self.query_rewrites = query_rewrites - - -class VectorizedQuery(VectorQuery): - """The query parameters to use for vector search when a raw vector value is provided. - - All required parameters must be populated in order to send to server. - - :ivar kind: The kind of vector query being performed. Required. Known values are: "vector", - "text", "imageUrl", and "imageBinary". - :vartype kind: str or ~azure.search.documents.models.VectorQueryKind - :ivar k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :vartype k_nearest_neighbors: int - :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector - searched. - :vartype fields: str - :ivar exhaustive: When true, triggers an exhaustive k-nearest neighbor search across all - vectors within the vector index. Useful for scenarios where exact matches are critical, such as - determining ground truth values. - :vartype exhaustive: bool - :ivar oversampling: Oversampling factor. Minimum value is 1. It overrides the - 'defaultOversampling' parameter configured in the index definition. It can be set only when - 'rerankWithOriginalVectors' is true. This parameter is only permitted when a compression method - is used on the underlying vector field. - :vartype oversampling: float - :ivar weight: Relative weight of the vector query when compared to other vector query and/or - the text query within the same search request. This value is used when combining the results of - multiple ranking lists produced by the different vector queries and/or the results retrieved - through the text query. The higher the weight, the higher the documents that matched that query - will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger - than zero. - :vartype weight: float - :ivar threshold: The threshold used for vector queries. Note this can only be set if all - 'fields' use the same similarity metric. - :vartype threshold: ~azure.search.documents.models.VectorThreshold - :ivar filter_override: The OData filter expression to apply to this specific vector query. If - no filter expression is defined at the vector level, the expression defined in the top level - filter parameter is used instead. - :vartype filter_override: str - :ivar vector: The vector representation of a search query. Required. - :vartype vector: list[float] - """ - - _validation = { - "kind": {"required": True}, - "vector": {"required": True}, - } - - _attribute_map = { - "kind": {"key": "kind", "type": "str"}, - "k_nearest_neighbors": {"key": "k", "type": "int"}, - "fields": {"key": "fields", "type": "str"}, - "exhaustive": {"key": "exhaustive", "type": "bool"}, - "oversampling": {"key": "oversampling", "type": "float"}, - "weight": {"key": "weight", "type": "float"}, - "threshold": {"key": "threshold", "type": "VectorThreshold"}, - "filter_override": {"key": "filterOverride", "type": "str"}, - "vector": {"key": "vector", "type": "[float]"}, - } - - def __init__( - self, - *, - vector: List[float], - k_nearest_neighbors: Optional[int] = None, - fields: Optional[str] = None, - exhaustive: Optional[bool] = None, - oversampling: Optional[float] = None, - weight: Optional[float] = None, - threshold: Optional["_models.VectorThreshold"] = None, - filter_override: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword k_nearest_neighbors: Number of nearest neighbors to return as top hits. - :paramtype k_nearest_neighbors: int - :keyword fields: Vector Fields of type Collection(Edm.Single) to be included in the vector - searched. - :paramtype fields: str - :keyword exhaustive: When true, triggers an exhaustive k-nearest neighbor search across all - vectors within the vector index. Useful for scenarios where exact matches are critical, such as - determining ground truth values. - :paramtype exhaustive: bool - :keyword oversampling: Oversampling factor. Minimum value is 1. It overrides the - 'defaultOversampling' parameter configured in the index definition. It can be set only when - 'rerankWithOriginalVectors' is true. This parameter is only permitted when a compression method - is used on the underlying vector field. - :paramtype oversampling: float - :keyword weight: Relative weight of the vector query when compared to other vector query and/or - the text query within the same search request. This value is used when combining the results of - multiple ranking lists produced by the different vector queries and/or the results retrieved - through the text query. The higher the weight, the higher the documents that matched that query - will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger - than zero. - :paramtype weight: float - :keyword threshold: The threshold used for vector queries. Note this can only be set if all - 'fields' use the same similarity metric. - :paramtype threshold: ~azure.search.documents.models.VectorThreshold - :keyword filter_override: The OData filter expression to apply to this specific vector query. - If no filter expression is defined at the vector level, the expression defined in the top level - filter parameter is used instead. - :paramtype filter_override: str - :keyword vector: The vector representation of a search query. Required. - :paramtype vector: list[float] - """ - super().__init__( - k_nearest_neighbors=k_nearest_neighbors, - fields=fields, - exhaustive=exhaustive, - oversampling=oversampling, - weight=weight, - threshold=threshold, - filter_override=filter_override, - **kwargs - ) - self.kind: str = "vector" - self.vector = vector - - -class VectorsDebugInfo(_serialization.Model): - """VectorsDebugInfo. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar subscores: The breakdown of subscores of the document prior to the chosen result set - fusion/combination method such as RRF. - :vartype subscores: ~azure.search.documents.models.QueryResultDocumentSubscores - """ - - _validation = { - "subscores": {"readonly": True}, - } - - _attribute_map = { - "subscores": {"key": "subscores", "type": "QueryResultDocumentSubscores"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.subscores = None - - -class VectorSimilarityThreshold(VectorThreshold): - """The results of the vector query will be filtered based on the vector similarity metric. Note - this is the canonical definition of similarity metric, not the 'distance' version. The - threshold direction (larger or smaller) will be chosen automatically according to the metric - used by the field. - - All required parameters must be populated in order to send to server. - - :ivar kind: The kind of threshold used to filter vector queries. Required. Known values are: - "vectorSimilarity" and "searchScore". - :vartype kind: str or ~azure.search.documents.models.VectorThresholdKind - :ivar value: The threshold will filter based on the similarity metric value. Note this is the - canonical definition of similarity metric, not the 'distance' version. The threshold direction - (larger or smaller) will be chosen automatically according to the metric used by the field. - Required. - :vartype value: float - """ - - _validation = { - "kind": {"required": True}, - "value": {"required": True}, - } - - _attribute_map = { - "kind": {"key": "kind", "type": "str"}, - "value": {"key": "value", "type": "float"}, - } - - def __init__(self, *, value: float, **kwargs: Any) -> None: - """ - :keyword value: The threshold will filter based on the similarity metric value. Note this is - the canonical definition of similarity metric, not the 'distance' version. The threshold - direction (larger or smaller) will be chosen automatically according to the metric used by the - field. Required. - :paramtype value: float - """ - super().__init__(**kwargs) - self.kind: str = "vectorSimilarity" - self.value = value diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_search_index_client_enums.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_search_index_client_enums.py deleted file mode 100644 index 758a5f9c35d3..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_search_index_client_enums.py +++ /dev/null @@ -1,424 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from enum import Enum -from azure.core import CaseInsensitiveEnumMeta - - -class AutocompleteMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles - and 'oneTermWithContext' to use the current context in producing autocomplete terms. - """ - - ONE_TERM = "oneTerm" - """Only one term is suggested. If the query has two terms, only the last term is completed. For - example, if the input is 'washington medic', the suggested terms could include 'medicaid', - 'medicare', and 'medicine'.""" - TWO_TERMS = "twoTerms" - """Matching two-term phrases in the index will be suggested. For example, if the input is 'medic', - the suggested terms could include 'medicare coverage' and 'medical assistant'.""" - ONE_TERM_WITH_CONTEXT = "oneTermWithContext" - """Completes the last term in a query with two or more terms, where the last two terms are a - phrase that exists in the index. For example, if the input is 'washington medic', the suggested - terms could include 'washington medicaid' and 'washington medical'.""" - - -class HybridCountAndFacetMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Determines whether the count and facets should includes all documents that matched the search - query, or only the documents that are retrieved within the 'maxTextRecallSize' window. The - default value is 'countAllResults'. - """ - - COUNT_RETRIEVABLE_RESULTS = "countRetrievableResults" - """Only include documents that were matched within the 'maxTextRecallSize' retrieval window when - computing 'count' and 'facets'.""" - COUNT_ALL_RESULTS = "countAllResults" - """Include all documents that were matched by the search query when computing 'count' and - 'facets', regardless of whether or not those documents are within the 'maxTextRecallSize' - retrieval window.""" - - -class IndexActionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The operation to perform on a document in an indexing batch.""" - - UPLOAD = "upload" - """Inserts the document into the index if it is new and updates it if it exists. All fields are - replaced in the update case.""" - MERGE = "merge" - """Merges the specified field values with an existing document. If the document does not exist, - the merge will fail. Any field you specify in a merge will replace the existing field in the - document. This also applies to collections of primitive and complex types.""" - MERGE_OR_UPLOAD = "mergeOrUpload" - """Behaves like merge if a document with the given key already exists in the index. If the - document does not exist, it behaves like upload with a new document.""" - DELETE = "delete" - """Removes the specified document from the index. Any field you specify in a delete operation - other than the key field will be ignored. If you want to remove an individual field from a - document, use merge instead and set the field explicitly to null.""" - - -class QueryAnswerType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """This parameter is only valid if the query type is ``semantic``. If set, the query returns - answers extracted from key passages in the highest ranked documents. The number of answers - returned can be configured by appending the pipe character ``|`` followed by the - ``count-`` option after the answers parameter value, such as - ``extractive|count-3``. Default count is 1. The confidence threshold can be configured by - appending the pipe character ``|`` followed by the ``threshold-`` option - after the answers parameter value, such as ``extractive|threshold-0.9``. Default threshold is - 0.7. The maximum character length of answers can be configured by appending the pipe character - '|' followed by the 'count-:code:``', such as - 'extractive|maxcharlength-600'. - """ - - NONE = "none" - """Do not return answers for the query.""" - EXTRACTIVE = "extractive" - """Extracts answer candidates from the contents of the documents returned in response to a query - expressed as a question in natural language.""" - - -class QueryCaptionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """This parameter is only valid if the query type is ``semantic``. If set, the query returns - captions extracted from key passages in the highest ranked documents. When Captions is set to - ``extractive``\\ , highlighting is enabled by default, and can be configured by appending the - pipe character ``|`` followed by the ``highlight-`` option, such as - ``extractive|highlight-true``. Defaults to ``None``. The maximum character length of captions - can be configured by appending the pipe character '|' followed by the 'count-:code:``', such as 'extractive|maxcharlength-600'. - """ - - NONE = "none" - """Do not return captions for the query.""" - EXTRACTIVE = "extractive" - """Extracts captions from the matching documents that contain passages relevant to the search - query.""" - - -class QueryDebugMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Enables a debugging tool that can be used to further explore your search results. You can - enable multiple debug modes simultaneously by separating them with a | character, for example: - semantic|queryRewrites. - """ - - DISABLED = "disabled" - """No query debugging information will be returned.""" - SEMANTIC = "semantic" - """Allows the user to further explore their reranked results.""" - VECTOR = "vector" - """Allows the user to further explore their hybrid and vector query results.""" - QUERY_REWRITES = "queryRewrites" - """Allows the user to explore the list of query rewrites generated for their search request.""" - ALL = "all" - """Turn on all debug options.""" - - -class QueryLanguage(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The language of the query.""" - - NONE = "none" - """Query language not specified.""" - EN_US = "en-us" - """Query language value for English (United States).""" - EN_GB = "en-gb" - """Query language value for English (Great Britain).""" - EN_IN = "en-in" - """Query language value for English (India).""" - EN_CA = "en-ca" - """Query language value for English (Canada).""" - EN_AU = "en-au" - """Query language value for English (Australia).""" - FR_FR = "fr-fr" - """Query language value for French (France).""" - FR_CA = "fr-ca" - """Query language value for French (Canada).""" - DE_DE = "de-de" - """Query language value for German (Germany).""" - ES_ES = "es-es" - """Query language value for Spanish (Spain).""" - ES_MX = "es-mx" - """Query language value for Spanish (Mexico).""" - ZH_CN = "zh-cn" - """Query language value for Chinese (China).""" - ZH_TW = "zh-tw" - """Query language value for Chinese (Taiwan).""" - PT_BR = "pt-br" - """Query language value for Portuguese (Brazil).""" - PT_PT = "pt-pt" - """Query language value for Portuguese (Portugal).""" - IT_IT = "it-it" - """Query language value for Italian (Italy).""" - JA_JP = "ja-jp" - """Query language value for Japanese (Japan).""" - KO_KR = "ko-kr" - """Query language value for Korean (Korea).""" - RU_RU = "ru-ru" - """Query language value for Russian (Russia).""" - CS_CZ = "cs-cz" - """Query language value for Czech (Czech Republic).""" - NL_BE = "nl-be" - """Query language value for Dutch (Belgium).""" - NL_NL = "nl-nl" - """Query language value for Dutch (Netherlands).""" - HU_HU = "hu-hu" - """Query language value for Hungarian (Hungary).""" - PL_PL = "pl-pl" - """Query language value for Polish (Poland).""" - SV_SE = "sv-se" - """Query language value for Swedish (Sweden).""" - TR_TR = "tr-tr" - """Query language value for Turkish (Turkey).""" - HI_IN = "hi-in" - """Query language value for Hindi (India).""" - AR_SA = "ar-sa" - """Query language value for Arabic (Saudi Arabia).""" - AR_EG = "ar-eg" - """Query language value for Arabic (Egypt).""" - AR_MA = "ar-ma" - """Query language value for Arabic (Morocco).""" - AR_KW = "ar-kw" - """Query language value for Arabic (Kuwait).""" - AR_JO = "ar-jo" - """Query language value for Arabic (Jordan).""" - DA_DK = "da-dk" - """Query language value for Danish (Denmark).""" - NO_NO = "no-no" - """Query language value for Norwegian (Norway).""" - BG_BG = "bg-bg" - """Query language value for Bulgarian (Bulgaria).""" - HR_HR = "hr-hr" - """Query language value for Croatian (Croatia).""" - HR_BA = "hr-ba" - """Query language value for Croatian (Bosnia and Herzegovina).""" - MS_MY = "ms-my" - """Query language value for Malay (Malaysia).""" - MS_BN = "ms-bn" - """Query language value for Malay (Brunei Darussalam).""" - SL_SL = "sl-sl" - """Query language value for Slovenian (Slovenia).""" - TA_IN = "ta-in" - """Query language value for Tamil (India).""" - VI_VN = "vi-vn" - """Query language value for Vietnamese (Viet Nam).""" - EL_GR = "el-gr" - """Query language value for Greek (Greece).""" - RO_RO = "ro-ro" - """Query language value for Romanian (Romania).""" - IS_IS = "is-is" - """Query language value for Icelandic (Iceland).""" - ID_ID = "id-id" - """Query language value for Indonesian (Indonesia).""" - TH_TH = "th-th" - """Query language value for Thai (Thailand).""" - LT_LT = "lt-lt" - """Query language value for Lithuanian (Lithuania).""" - UK_UA = "uk-ua" - """Query language value for Ukrainian (Ukraine).""" - LV_LV = "lv-lv" - """Query language value for Latvian (Latvia).""" - ET_EE = "et-ee" - """Query language value for Estonian (Estonia).""" - CA_ES = "ca-es" - """Query language value for Catalan.""" - FI_FI = "fi-fi" - """Query language value for Finnish (Finland).""" - SR_BA = "sr-ba" - """Query language value for Serbian (Bosnia and Herzegovina).""" - SR_ME = "sr-me" - """Query language value for Serbian (Montenegro).""" - SR_RS = "sr-rs" - """Query language value for Serbian (Serbia).""" - SK_SK = "sk-sk" - """Query language value for Slovak (Slovakia).""" - NB_NO = "nb-no" - """Query language value for Norwegian (Norway).""" - HY_AM = "hy-am" - """Query language value for Armenian (Armenia).""" - BN_IN = "bn-in" - """Query language value for Bengali (India).""" - EU_ES = "eu-es" - """Query language value for Basque.""" - GL_ES = "gl-es" - """Query language value for Galician.""" - GU_IN = "gu-in" - """Query language value for Gujarati (India).""" - HE_IL = "he-il" - """Query language value for Hebrew (Israel).""" - GA_IE = "ga-ie" - """Query language value for Irish (Ireland).""" - KN_IN = "kn-in" - """Query language value for Kannada (India).""" - ML_IN = "ml-in" - """Query language value for Malayalam (India).""" - MR_IN = "mr-in" - """Query language value for Marathi (India).""" - FA_AE = "fa-ae" - """Query language value for Persian (U.A.E.).""" - PA_IN = "pa-in" - """Query language value for Punjabi (India).""" - TE_IN = "te-in" - """Query language value for Telugu (India).""" - UR_PK = "ur-pk" - """Query language value for Urdu (Pakistan).""" - - -class QueryRewritesType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """This parameter is only valid if the query type is ``semantic``. When QueryRewrites is set to - ``generative``\\ , the query terms are sent to a generate model which will produce 10 (default) - rewrites to help increase the recall of the request. The requested count can be configured by - appending the pipe character ``|`` followed by the ``count-`` option, such - as ``generative|count-3``. Defaults to ``None``. - """ - - NONE = "none" - """Do not generate additional query rewrites for this query.""" - GENERATIVE = "generative" - """Generate alternative query terms to increase the recall of a search request.""" - - -class QuerySpellerType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Improve search recall by spell-correcting individual search query terms.""" - - NONE = "none" - """Speller not enabled.""" - LEXICON = "lexicon" - """Speller corrects individual query terms using a static lexicon for the language specified by - the queryLanguage parameter.""" - - -class QueryType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query - uses the Lucene query syntax and 'semantic' if query syntax is not needed. - """ - - SIMPLE = "simple" - """Uses the simple query syntax for searches. Search text is interpreted using a simple query - language that allows for symbols such as +, * and "". Queries are evaluated across all - searchable fields by default, unless the searchFields parameter is specified.""" - FULL = "full" - """Uses the full Lucene query syntax for searches. Search text is interpreted using the Lucene - query language which allows field-specific and weighted searches, as well as other advanced - features.""" - SEMANTIC = "semantic" - """Best suited for queries expressed in natural language as opposed to keywords. Improves - precision of search results by re-ranking the top search results using a ranking model trained - on the Web corpus.""" - - -class ScoringStatistics(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """A value that specifies whether we want to calculate scoring statistics (such as document - frequency) globally for more consistent scoring, or locally, for lower latency. The default is - 'local'. Use 'global' to aggregate scoring statistics globally before scoring. Using global - scoring statistics can increase latency of search queries. - """ - - LOCAL = "local" - """The scoring statistics will be calculated locally for lower latency.""" - GLOBAL = "global" - """The scoring statistics will be calculated globally for more consistent scoring.""" - - -class SearchMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Specifies whether any or all of the search terms must be matched in order to count the document - as a match. - """ - - ANY = "any" - """Any of the search terms must be matched in order to count the document as a match.""" - ALL = "all" - """All of the search terms must be matched in order to count the document as a match.""" - - -class SemanticErrorMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Allows the user to choose whether a semantic call should fail completely, or to return partial - results. - """ - - PARTIAL = "partial" - """If the semantic processing fails, partial results still return. The definition of partial - results depends on what semantic step failed and what was the reason for failure.""" - FAIL = "fail" - """If there is an exception during the semantic processing step, the query will fail and return - the appropriate HTTP code depending on the error.""" - - -class SemanticErrorReason(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Reason that a partial response was returned for a semantic ranking request.""" - - MAX_WAIT_EXCEEDED = "maxWaitExceeded" - """If ``semanticMaxWaitInMilliseconds`` was set and the semantic processing duration exceeded that - value. Only the base results were returned.""" - CAPACITY_OVERLOADED = "capacityOverloaded" - """The request was throttled. Only the base results were returned.""" - TRANSIENT = "transient" - """At least one step of the semantic process failed.""" - - -class SemanticFieldState(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The way the field was used for the semantic enrichment process.""" - - USED = "used" - """The field was fully used for semantic enrichment.""" - UNUSED = "unused" - """The field was not used for semantic enrichment.""" - PARTIAL = "partial" - """The field was partially used for semantic enrichment.""" - - -class SemanticQueryRewritesResultType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Type of query rewrite that was used for this request.""" - - ORIGINAL_QUERY_ONLY = "originalQueryOnly" - """Query rewrites were not successfully generated for this request. Only the original query was - used to retrieve the results.""" - - -class SemanticSearchResultsType(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Type of partial response that was returned for a semantic ranking request.""" - - BASE_RESULTS = "baseResults" - """Results without any semantic enrichment or reranking.""" - RERANKED_RESULTS = "rerankedResults" - """Results have been reranked with the reranker model and will include semantic captions. They - will not include any answers, answers highlights or caption highlights.""" - - -class VectorFilterMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Determines whether or not filters are applied before or after the vector search is performed.""" - - POST_FILTER = "postFilter" - """The filter will be applied after the candidate set of vector results is returned. Depending on - the filter selectivity, this can result in fewer results than requested by the parameter 'k'.""" - PRE_FILTER = "preFilter" - """The filter will be applied before the search query.""" - - -class VectorQueryKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The kind of vector query being performed.""" - - VECTOR = "vector" - """Vector query where a raw vector value is provided.""" - TEXT = "text" - """Vector query where a text value that needs to be vectorized is provided.""" - IMAGE_URL = "imageUrl" - """Vector query where an url that represents an image value that needs to be vectorized is - provided.""" - IMAGE_BINARY = "imageBinary" - """Vector query where a base 64 encoded binary of an image that needs to be vectorized is - provided.""" - - -class VectorThresholdKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The kind of vector query being performed.""" - - VECTOR_SIMILARITY = "vectorSimilarity" - """The results of the vector query will be filtered based on the vector similarity metric. Note - this is the canonical definition of similarity metric, not the 'distance' version. The - threshold direction (larger or smaller) will be chosen automatically according to the metric - used by the field.""" - SEARCH_SCORE = "searchScore" - """The results of the vector query will filter based on the '@search.score' value. Note this is - the @search.score returned as part of the search response. The threshold direction will be - chosen for higher @search.score.""" diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/__init__.py index c7d64959c608..cad45d7952dd 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/__init__.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- # pylint: disable=wrong-import-position @@ -10,14 +12,28 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._documents_operations import DocumentsOperations # type: ignore +from ._operations import DataSourcesOperationsOperations # type: ignore +from ._operations import IndexersOperationsOperations # type: ignore +from ._operations import SkillsetsOperationsOperations # type: ignore +from ._operations import SynonymMapsOperationsOperations # type: ignore +from ._operations import IndexesOperationsOperations # type: ignore +from ._operations import AliasesOperationsOperations # type: ignore +from ._operations import DocumentsOperationsOperations # type: ignore +from ._operations import SearchClientOperationsMixin # type: ignore from ._patch import __all__ as _patch_all from ._patch import * from ._patch import patch_sdk as _patch_sdk __all__ = [ - "DocumentsOperations", + "DataSourcesOperationsOperations", + "IndexersOperationsOperations", + "SkillsetsOperationsOperations", + "SynonymMapsOperationsOperations", + "IndexesOperationsOperations", + "AliasesOperationsOperations", + "DocumentsOperationsOperations", + "SearchClientOperationsMixin", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore _patch_sdk() diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py deleted file mode 100644 index 7111524789c0..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_documents_operations.py +++ /dev/null @@ -1,1475 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, List, Optional, TypeVar, Union, overload - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.rest import HttpRequest, HttpResponse -from azure.core.tracing.decorator import distributed_trace -from azure.core.utils import case_insensitive_dict - -from .. import models as _models -from .._serialization import Serializer - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -_SERIALIZER = Serializer() -_SERIALIZER.client_side_validation = False - - -def build_count_request(*, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/docs/$count") - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_search_get_request( - *, - search_text: Optional[str] = None, - include_total_result_count: Optional[bool] = None, - facets: Optional[List[str]] = None, - filter: Optional[str] = None, - highlight_fields: Optional[List[str]] = None, - highlight_post_tag: Optional[str] = None, - highlight_pre_tag: Optional[str] = None, - minimum_coverage: Optional[float] = None, - order_by: Optional[List[str]] = None, - query_type: Optional[Union[str, _models.QueryType]] = None, - scoring_parameters: Optional[List[str]] = None, - scoring_profile: Optional[str] = None, - search_fields: Optional[List[str]] = None, - search_mode: Optional[Union[str, _models.SearchMode]] = None, - scoring_statistics: Optional[Union[str, _models.ScoringStatistics]] = None, - session_id: Optional[str] = None, - select: Optional[List[str]] = None, - skip: Optional[int] = None, - top: Optional[int] = None, - x_ms_client_request_id: Optional[str] = None, - semantic_configuration: Optional[str] = None, - semantic_error_handling: Optional[Union[str, _models.SemanticErrorMode]] = None, - semantic_max_wait_in_milliseconds: Optional[int] = None, - answers: Optional[Union[str, _models.QueryAnswerType]] = None, - captions: Optional[Union[str, _models.QueryCaptionType]] = None, - semantic_query: Optional[str] = None, - query_rewrites: Optional[Union[str, _models.QueryRewritesType]] = None, - debug: Optional[Union[str, _models.QueryDebugMode]] = None, - query_language: Optional[Union[str, _models.QueryLanguage]] = None, - speller: Optional[Union[str, _models.QuerySpellerType]] = None, - semantic_fields: Optional[List[str]] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/docs") - - # Construct parameters - if search_text is not None: - _params["search"] = _SERIALIZER.query("search_text", search_text, "str") - if include_total_result_count is not None: - _params["$count"] = _SERIALIZER.query("include_total_result_count", include_total_result_count, "bool") - if facets is not None: - _params["facet"] = [_SERIALIZER.query("facets", q, "str") if q is not None else "" for q in facets] - if filter is not None: - _params["$filter"] = _SERIALIZER.query("filter", filter, "str") - if highlight_fields is not None: - _params["highlight"] = _SERIALIZER.query("highlight_fields", highlight_fields, "[str]", div=",") - if highlight_post_tag is not None: - _params["highlightPostTag"] = _SERIALIZER.query("highlight_post_tag", highlight_post_tag, "str") - if highlight_pre_tag is not None: - _params["highlightPreTag"] = _SERIALIZER.query("highlight_pre_tag", highlight_pre_tag, "str") - if minimum_coverage is not None: - _params["minimumCoverage"] = _SERIALIZER.query("minimum_coverage", minimum_coverage, "float") - if order_by is not None: - _params["$orderby"] = _SERIALIZER.query("order_by", order_by, "[str]", div=",") - if query_type is not None: - _params["queryType"] = _SERIALIZER.query("query_type", query_type, "str") - if scoring_parameters is not None: - _params["scoringParameter"] = [ - _SERIALIZER.query("scoring_parameters", q, "str") if q is not None else "" for q in scoring_parameters - ] - if scoring_profile is not None: - _params["scoringProfile"] = _SERIALIZER.query("scoring_profile", scoring_profile, "str") - if search_fields is not None: - _params["searchFields"] = _SERIALIZER.query("search_fields", search_fields, "[str]", div=",") - if search_mode is not None: - _params["searchMode"] = _SERIALIZER.query("search_mode", search_mode, "str") - if scoring_statistics is not None: - _params["scoringStatistics"] = _SERIALIZER.query("scoring_statistics", scoring_statistics, "str") - if session_id is not None: - _params["sessionId"] = _SERIALIZER.query("session_id", session_id, "str") - if select is not None: - _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") - if skip is not None: - _params["$skip"] = _SERIALIZER.query("skip", skip, "int") - if top is not None: - _params["$top"] = _SERIALIZER.query("top", top, "int") - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if semantic_configuration is not None: - _params["semanticConfiguration"] = _SERIALIZER.query("semantic_configuration", semantic_configuration, "str") - if semantic_error_handling is not None: - _params["semanticErrorHandling"] = _SERIALIZER.query("semantic_error_handling", semantic_error_handling, "str") - if semantic_max_wait_in_milliseconds is not None: - _params["semanticMaxWaitInMilliseconds"] = _SERIALIZER.query( - "semantic_max_wait_in_milliseconds", semantic_max_wait_in_milliseconds, "int", minimum=700 - ) - if answers is not None: - _params["answers"] = _SERIALIZER.query("answers", answers, "str") - if captions is not None: - _params["captions"] = _SERIALIZER.query("captions", captions, "str") - if semantic_query is not None: - _params["semanticQuery"] = _SERIALIZER.query("semantic_query", semantic_query, "str") - if query_rewrites is not None: - _params["queryRewrites"] = _SERIALIZER.query("query_rewrites", query_rewrites, "str") - if debug is not None: - _params["debug"] = _SERIALIZER.query("debug", debug, "str") - if query_language is not None: - _params["queryLanguage"] = _SERIALIZER.query("query_language", query_language, "str") - if speller is not None: - _params["speller"] = _SERIALIZER.query("speller", speller, "str") - if semantic_fields is not None: - _params["semanticFields"] = _SERIALIZER.query("semantic_fields", semantic_fields, "[str]", div=",") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_search_post_request(*, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/docs/search.post.search") - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_get_request( - key: str, - *, - selected_fields: Optional[List[str]] = None, - x_ms_client_request_id: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/docs('{key}')") - path_format_arguments = { - "key": _SERIALIZER.url("key", key, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - if selected_fields is not None: - _params["$select"] = _SERIALIZER.query("selected_fields", selected_fields, "[str]", div=",") - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_suggest_get_request( - *, - search_text: str, - suggester_name: str, - filter: Optional[str] = None, - use_fuzzy_matching: Optional[bool] = None, - highlight_post_tag: Optional[str] = None, - highlight_pre_tag: Optional[str] = None, - minimum_coverage: Optional[float] = None, - order_by: Optional[List[str]] = None, - search_fields: Optional[List[str]] = None, - select: Optional[List[str]] = None, - top: Optional[int] = None, - x_ms_client_request_id: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/docs/search.suggest") - - # Construct parameters - _params["search"] = _SERIALIZER.query("search_text", search_text, "str") - _params["suggesterName"] = _SERIALIZER.query("suggester_name", suggester_name, "str") - if filter is not None: - _params["$filter"] = _SERIALIZER.query("filter", filter, "str") - if use_fuzzy_matching is not None: - _params["fuzzy"] = _SERIALIZER.query("use_fuzzy_matching", use_fuzzy_matching, "bool") - if highlight_post_tag is not None: - _params["highlightPostTag"] = _SERIALIZER.query("highlight_post_tag", highlight_post_tag, "str") - if highlight_pre_tag is not None: - _params["highlightPreTag"] = _SERIALIZER.query("highlight_pre_tag", highlight_pre_tag, "str") - if minimum_coverage is not None: - _params["minimumCoverage"] = _SERIALIZER.query("minimum_coverage", minimum_coverage, "float") - if order_by is not None: - _params["$orderby"] = _SERIALIZER.query("order_by", order_by, "[str]", div=",") - if search_fields is not None: - _params["searchFields"] = _SERIALIZER.query("search_fields", search_fields, "[str]", div=",") - if select is not None: - _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") - if top is not None: - _params["$top"] = _SERIALIZER.query("top", top, "int") - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_suggest_post_request(*, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/docs/search.post.suggest") - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_index_request(*, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/docs/search.index") - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_autocomplete_get_request( - *, - search_text: str, - suggester_name: str, - x_ms_client_request_id: Optional[str] = None, - autocomplete_mode: Optional[Union[str, _models.AutocompleteMode]] = None, - filter: Optional[str] = None, - use_fuzzy_matching: Optional[bool] = None, - highlight_post_tag: Optional[str] = None, - highlight_pre_tag: Optional[str] = None, - minimum_coverage: Optional[float] = None, - search_fields: Optional[List[str]] = None, - top: Optional[int] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/docs/search.autocomplete") - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - _params["search"] = _SERIALIZER.query("search_text", search_text, "str") - _params["suggesterName"] = _SERIALIZER.query("suggester_name", suggester_name, "str") - if autocomplete_mode is not None: - _params["autocompleteMode"] = _SERIALIZER.query("autocomplete_mode", autocomplete_mode, "str") - if filter is not None: - _params["$filter"] = _SERIALIZER.query("filter", filter, "str") - if use_fuzzy_matching is not None: - _params["fuzzy"] = _SERIALIZER.query("use_fuzzy_matching", use_fuzzy_matching, "bool") - if highlight_post_tag is not None: - _params["highlightPostTag"] = _SERIALIZER.query("highlight_post_tag", highlight_post_tag, "str") - if highlight_pre_tag is not None: - _params["highlightPreTag"] = _SERIALIZER.query("highlight_pre_tag", highlight_pre_tag, "str") - if minimum_coverage is not None: - _params["minimumCoverage"] = _SERIALIZER.query("minimum_coverage", minimum_coverage, "float") - if search_fields is not None: - _params["searchFields"] = _SERIALIZER.query("search_fields", search_fields, "[str]", div=",") - if top is not None: - _params["$top"] = _SERIALIZER.query("top", top, "int") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_autocomplete_post_request(*, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/docs/search.post.autocomplete") - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -class DocumentsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.search.documents.SearchIndexClient`'s - :attr:`documents` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace - def count(self, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any) -> int: - """Queries the number of documents in the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Count-Documents - - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :return: int or the result of cls(response) - :rtype: int - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[int] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_count_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("int", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def search_get( - self, - search_text: Optional[str] = None, - search_options: Optional[_models.SearchOptions] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchDocumentsResult: - """Searches for documents in the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Search-Documents - - :param search_text: A full-text search query expression; Use "*" or omit this parameter to - match all documents. Default value is None. - :type search_text: str - :param search_options: Parameter group. Default value is None. - :type search_options: ~azure.search.documents.models.SearchOptions - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :return: SearchDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.SearchDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SearchDocumentsResult] = kwargs.pop("cls", None) - - _include_total_result_count = None - _facets = None - _filter = None - _highlight_fields = None - _highlight_post_tag = None - _highlight_pre_tag = None - _minimum_coverage = None - _order_by = None - _query_type = None - _scoring_parameters = None - _scoring_profile = None - _search_fields = None - _search_mode = None - _scoring_statistics = None - _session_id = None - _select = None - _skip = None - _top = None - _x_ms_client_request_id = None - _semantic_configuration = None - _semantic_error_handling = None - _semantic_max_wait_in_milliseconds = None - _answers = None - _captions = None - _semantic_query = None - _query_rewrites = None - _debug = None - _query_language = None - _speller = None - _semantic_fields = None - if search_options is not None: - _answers = search_options.answers - _captions = search_options.captions - _debug = search_options.debug - _facets = search_options.facets - _filter = search_options.filter - _highlight_fields = search_options.highlight_fields - _highlight_post_tag = search_options.highlight_post_tag - _highlight_pre_tag = search_options.highlight_pre_tag - _include_total_result_count = search_options.include_total_result_count - _minimum_coverage = search_options.minimum_coverage - _order_by = search_options.order_by - _query_language = search_options.query_language - _query_rewrites = search_options.query_rewrites - _query_type = search_options.query_type - _scoring_parameters = search_options.scoring_parameters - _scoring_profile = search_options.scoring_profile - _scoring_statistics = search_options.scoring_statistics - _search_fields = search_options.search_fields - _search_mode = search_options.search_mode - _select = search_options.select - _semantic_configuration = search_options.semantic_configuration - _semantic_error_handling = search_options.semantic_error_handling - _semantic_fields = search_options.semantic_fields - _semantic_max_wait_in_milliseconds = search_options.semantic_max_wait_in_milliseconds - _semantic_query = search_options.semantic_query - _session_id = search_options.session_id - _skip = search_options.skip - _speller = search_options.speller - _top = search_options.top - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_search_get_request( - search_text=search_text, - include_total_result_count=_include_total_result_count, - facets=_facets, - filter=_filter, - highlight_fields=_highlight_fields, - highlight_post_tag=_highlight_post_tag, - highlight_pre_tag=_highlight_pre_tag, - minimum_coverage=_minimum_coverage, - order_by=_order_by, - query_type=_query_type, - scoring_parameters=_scoring_parameters, - scoring_profile=_scoring_profile, - search_fields=_search_fields, - search_mode=_search_mode, - scoring_statistics=_scoring_statistics, - session_id=_session_id, - select=_select, - skip=_skip, - top=_top, - x_ms_client_request_id=_x_ms_client_request_id, - semantic_configuration=_semantic_configuration, - semantic_error_handling=_semantic_error_handling, - semantic_max_wait_in_milliseconds=_semantic_max_wait_in_milliseconds, - answers=_answers, - captions=_captions, - semantic_query=_semantic_query, - query_rewrites=_query_rewrites, - debug=_debug, - query_language=_query_language, - speller=_speller, - semantic_fields=_semantic_fields, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchDocumentsResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def search_post( - self, - search_request: _models.SearchRequest, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchDocumentsResult: - """Searches for documents in the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Search-Documents - - :param search_request: The definition of the Search request. Required. - :type search_request: ~azure.search.documents.models.SearchRequest - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.SearchDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def search_post( - self, - search_request: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchDocumentsResult: - """Searches for documents in the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Search-Documents - - :param search_request: The definition of the Search request. Required. - :type search_request: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.SearchDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def search_post( - self, - search_request: Union[_models.SearchRequest, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchDocumentsResult: - """Searches for documents in the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Search-Documents - - :param search_request: The definition of the Search request. Is either a SearchRequest type or - a IO[bytes] type. Required. - :type search_request: ~azure.search.documents.models.SearchRequest or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :return: SearchDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.SearchDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchDocumentsResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(search_request, (IOBase, bytes)): - _content = search_request - else: - _json = self._serialize.body(search_request, "SearchRequest") - - _request = build_search_post_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchDocumentsResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get( - self, - key: str, - selected_fields: Optional[List[str]] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> Dict[str, Any]: - """Retrieves a document from the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/lookup-document - - :param key: The key of the document to retrieve. Required. - :type key: str - :param selected_fields: List of field names to retrieve for the document; Any field not - retrieved will be missing from the returned document. Default value is None. - :type selected_fields: list[str] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :return: dict mapping str to any or the result of cls(response) - :rtype: dict[str, any] - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[Dict[str, Any]] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_request( - key=key, - selected_fields=selected_fields, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("{object}", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def suggest_get( - self, - search_text: str, - suggester_name: str, - suggest_options: Optional[_models.SuggestOptions] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SuggestDocumentsResult: - """Suggests documents in the index that match the given partial query text. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/suggestions - - :param search_text: The search text to use to suggest documents. Must be at least 1 character, - and no more than 100 characters. Required. - :type search_text: str - :param suggester_name: The name of the suggester as specified in the suggesters collection - that's part of the index definition. Required. - :type suggester_name: str - :param suggest_options: Parameter group. Default value is None. - :type suggest_options: ~azure.search.documents.models.SuggestOptions - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :return: SuggestDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.SuggestDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SuggestDocumentsResult] = kwargs.pop("cls", None) - - _filter = None - _use_fuzzy_matching = None - _highlight_post_tag = None - _highlight_pre_tag = None - _minimum_coverage = None - _order_by = None - _search_fields = None - _select = None - _top = None - _x_ms_client_request_id = None - if suggest_options is not None: - _filter = suggest_options.filter - _highlight_post_tag = suggest_options.highlight_post_tag - _highlight_pre_tag = suggest_options.highlight_pre_tag - _minimum_coverage = suggest_options.minimum_coverage - _order_by = suggest_options.order_by - _search_fields = suggest_options.search_fields - _select = suggest_options.select - _top = suggest_options.top - _use_fuzzy_matching = suggest_options.use_fuzzy_matching - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_suggest_get_request( - search_text=search_text, - suggester_name=suggester_name, - filter=_filter, - use_fuzzy_matching=_use_fuzzy_matching, - highlight_post_tag=_highlight_post_tag, - highlight_pre_tag=_highlight_pre_tag, - minimum_coverage=_minimum_coverage, - order_by=_order_by, - search_fields=_search_fields, - select=_select, - top=_top, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SuggestDocumentsResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def suggest_post( - self, - suggest_request: _models.SuggestRequest, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SuggestDocumentsResult: - """Suggests documents in the index that match the given partial query text. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/suggestions - - :param suggest_request: The Suggest request. Required. - :type suggest_request: ~azure.search.documents.models.SuggestRequest - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SuggestDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.SuggestDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def suggest_post( - self, - suggest_request: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SuggestDocumentsResult: - """Suggests documents in the index that match the given partial query text. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/suggestions - - :param suggest_request: The Suggest request. Required. - :type suggest_request: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SuggestDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.SuggestDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def suggest_post( - self, - suggest_request: Union[_models.SuggestRequest, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SuggestDocumentsResult: - """Suggests documents in the index that match the given partial query text. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/suggestions - - :param suggest_request: The Suggest request. Is either a SuggestRequest type or a IO[bytes] - type. Required. - :type suggest_request: ~azure.search.documents.models.SuggestRequest or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :return: SuggestDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.SuggestDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SuggestDocumentsResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(suggest_request, (IOBase, bytes)): - _content = suggest_request - else: - _json = self._serialize.body(suggest_request, "SuggestRequest") - - _request = build_suggest_post_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SuggestDocumentsResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def index( - self, - batch: _models.IndexBatch, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.IndexDocumentsResult: - """Sends a batch of document write actions to the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/addupdate-or-delete-documents - - :param batch: The batch of index actions. Required. - :type batch: ~azure.search.documents.models.IndexBatch - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: IndexDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.IndexDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def index( - self, - batch: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.IndexDocumentsResult: - """Sends a batch of document write actions to the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/addupdate-or-delete-documents - - :param batch: The batch of index actions. Required. - :type batch: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: IndexDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.IndexDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def index( - self, - batch: Union[_models.IndexBatch, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.IndexDocumentsResult: - """Sends a batch of document write actions to the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/addupdate-or-delete-documents - - :param batch: The batch of index actions. Is either a IndexBatch type or a IO[bytes] type. - Required. - :type batch: ~azure.search.documents.models.IndexBatch or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :return: IndexDocumentsResult or the result of cls(response) - :rtype: ~azure.search.documents.models.IndexDocumentsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.IndexDocumentsResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(batch, (IOBase, bytes)): - _content = batch - else: - _json = self._serialize.body(batch, "IndexBatch") - - _request = build_index_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 207]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("IndexDocumentsResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def autocomplete_get( - self, - search_text: str, - suggester_name: str, - request_options: Optional[_models.RequestOptions] = None, - autocomplete_options: Optional[_models.AutocompleteOptions] = None, - **kwargs: Any - ) -> _models.AutocompleteResult: - """Autocompletes incomplete query terms based on input text and matching terms in the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/autocomplete - - :param search_text: The incomplete term which should be auto-completed. Required. - :type search_text: str - :param suggester_name: The name of the suggester as specified in the suggesters collection - that's part of the index definition. Required. - :type suggester_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :param autocomplete_options: Parameter group. Default value is None. - :type autocomplete_options: ~azure.search.documents.models.AutocompleteOptions - :return: AutocompleteResult or the result of cls(response) - :rtype: ~azure.search.documents.models.AutocompleteResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.AutocompleteResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - _autocomplete_mode = None - _filter = None - _use_fuzzy_matching = None - _highlight_post_tag = None - _highlight_pre_tag = None - _minimum_coverage = None - _search_fields = None - _top = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - if autocomplete_options is not None: - _autocomplete_mode = autocomplete_options.autocomplete_mode - _filter = autocomplete_options.filter - _highlight_post_tag = autocomplete_options.highlight_post_tag - _highlight_pre_tag = autocomplete_options.highlight_pre_tag - _minimum_coverage = autocomplete_options.minimum_coverage - _search_fields = autocomplete_options.search_fields - _top = autocomplete_options.top - _use_fuzzy_matching = autocomplete_options.use_fuzzy_matching - - _request = build_autocomplete_get_request( - search_text=search_text, - suggester_name=suggester_name, - x_ms_client_request_id=_x_ms_client_request_id, - autocomplete_mode=_autocomplete_mode, - filter=_filter, - use_fuzzy_matching=_use_fuzzy_matching, - highlight_post_tag=_highlight_post_tag, - highlight_pre_tag=_highlight_pre_tag, - minimum_coverage=_minimum_coverage, - search_fields=_search_fields, - top=_top, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("AutocompleteResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def autocomplete_post( - self, - autocomplete_request: _models.AutocompleteRequest, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.AutocompleteResult: - """Autocompletes incomplete query terms based on input text and matching terms in the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/autocomplete - - :param autocomplete_request: The definition of the Autocomplete request. Required. - :type autocomplete_request: ~azure.search.documents.models.AutocompleteRequest - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AutocompleteResult or the result of cls(response) - :rtype: ~azure.search.documents.models.AutocompleteResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def autocomplete_post( - self, - autocomplete_request: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.AutocompleteResult: - """Autocompletes incomplete query terms based on input text and matching terms in the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/autocomplete - - :param autocomplete_request: The definition of the Autocomplete request. Required. - :type autocomplete_request: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AutocompleteResult or the result of cls(response) - :rtype: ~azure.search.documents.models.AutocompleteResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def autocomplete_post( - self, - autocomplete_request: Union[_models.AutocompleteRequest, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.AutocompleteResult: - """Autocompletes incomplete query terms based on input text and matching terms in the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/autocomplete - - :param autocomplete_request: The definition of the Autocomplete request. Is either a - AutocompleteRequest type or a IO[bytes] type. Required. - :type autocomplete_request: ~azure.search.documents.models.AutocompleteRequest or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.models.RequestOptions - :return: AutocompleteResult or the result of cls(response) - :rtype: ~azure.search.documents.models.AutocompleteResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AutocompleteResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(autocomplete_request, (IOBase, bytes)): - _content = autocomplete_request - else: - _json = self._serialize.body(autocomplete_request, "AutocompleteRequest") - - _request = build_autocomplete_post_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - "indexName": self._serialize.url("self._config.index_name", self._config.index_name, "str"), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("AutocompleteResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_operations.py new file mode 100644 index 000000000000..e382ca31e02d --- /dev/null +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_operations.py @@ -0,0 +1,6861 @@ +# pylint: disable=too-many-lines +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) Python Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is regenerated. +# -------------------------------------------------------------------------- +from io import IOBase +import json +import sys +from typing import Any, Callable, Dict, IO, Iterable, List, Literal, Optional, TypeVar, Union, overload +import urllib.parse + +from azure.core import MatchConditions +from azure.core.exceptions import ( + ClientAuthenticationError, + HttpResponseError, + ResourceExistsError, + ResourceModifiedError, + ResourceNotFoundError, + ResourceNotModifiedError, + StreamClosedError, + StreamConsumedError, + map_error, +) +from azure.core.paging import ItemPaged +from azure.core.pipeline import PipelineResponse +from azure.core.rest import HttpRequest, HttpResponse +from azure.core.tracing.decorator import distributed_trace +from azure.core.utils import case_insensitive_dict + +from .. import models as _models +from .._model_base import SdkJSONEncoder, _deserialize +from .._serialization import Serializer +from .._validation import api_version_validation +from .._vendor import SearchClientMixinABC, prep_if_match, prep_if_none_match + +if sys.version_info >= (3, 9): + from collections.abc import MutableMapping +else: + from typing import MutableMapping # type: ignore +JSON = MutableMapping[str, Any] # pylint: disable=unsubscriptable-object +T = TypeVar("T") +ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] + +_SERIALIZER = Serializer() +_SERIALIZER.client_side_validation = False + + +def build_data_sources_operations_create_or_update_request( # pylint: disable=name-too-long + data_source_name: str, + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + prefer: Literal["return=representation"] = kwargs.pop("prefer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/datasources('{dataSourceName}')" + path_format_arguments = { + "dataSourceName": _SERIALIZER.url("data_source_name", data_source_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if skip_indexer_reset_requirement_for_cache is not None: + _params["ignoreResetRequirements"] = _SERIALIZER.query( + "skip_indexer_reset_requirement_for_cache", skip_indexer_reset_requirement_for_cache, "bool" + ) + + # Construct headers + _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_data_sources_operations_delete_request( # pylint: disable=name-too-long + data_source_name: str, + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/datasources('{dataSourceName}')" + path_format_arguments = { + "dataSourceName": _SERIALIZER.url("data_source_name", data_source_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_data_sources_operations_get_request( # pylint: disable=name-too-long + data_source_name: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/datasources('{dataSourceName}')" + path_format_arguments = { + "dataSourceName": _SERIALIZER.url("data_source_name", data_source_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_data_sources_operations_list_request( # pylint: disable=name-too-long + *, _select: Optional[str] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/datasources" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if _select is not None: + _params["$select"] = _SERIALIZER.query("select", _select, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_data_sources_operations_create_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/datasources" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_indexers_operations_reset_request(indexer_name: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexers('{indexerName}')/search.reset" + path_format_arguments = { + "indexerName": _SERIALIZER.url("indexer_name", indexer_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_indexers_operations_reset_docs_request( # pylint: disable=name-too-long + indexer_name: str, *, overwrite: Optional[bool] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexers('{indexerName}')/search.resetdocs" + path_format_arguments = { + "indexerName": _SERIALIZER.url("indexer_name", indexer_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if overwrite is not None: + _params["overwrite"] = _SERIALIZER.query("overwrite", overwrite, "bool") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_indexers_operations_run_request(indexer_name: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexers('{indexerName}')/search.run" + path_format_arguments = { + "indexerName": _SERIALIZER.url("indexer_name", indexer_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_indexers_operations_create_or_update_request( # pylint: disable=name-too-long + indexer_name: str, + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + disable_cache_reprocessing_change_detection: Optional[bool] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + prefer: Literal["return=representation"] = kwargs.pop("prefer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexers('{indexerName}')" + path_format_arguments = { + "indexerName": _SERIALIZER.url("indexer_name", indexer_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if skip_indexer_reset_requirement_for_cache is not None: + _params["ignoreResetRequirements"] = _SERIALIZER.query( + "skip_indexer_reset_requirement_for_cache", skip_indexer_reset_requirement_for_cache, "bool" + ) + if disable_cache_reprocessing_change_detection is not None: + _params["disableCacheReprocessingChangeDetection"] = _SERIALIZER.query( + "disable_cache_reprocessing_change_detection", disable_cache_reprocessing_change_detection, "bool" + ) + + # Construct headers + _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_indexers_operations_delete_request( + indexer_name: str, *, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexers('{indexerName}')" + path_format_arguments = { + "indexerName": _SERIALIZER.url("indexer_name", indexer_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_indexers_operations_get_request(indexer_name: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexers('{indexerName}')" + path_format_arguments = { + "indexerName": _SERIALIZER.url("indexer_name", indexer_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_indexers_operations_list_request(*, _select: Optional[str] = None, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexers" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if _select is not None: + _params["$select"] = _SERIALIZER.query("select", _select, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_indexers_operations_create_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexers" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_indexers_operations_get_status_request( # pylint: disable=name-too-long + indexer_name: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexers('{indexerName}')/search.status" + path_format_arguments = { + "indexerName": _SERIALIZER.url("indexer_name", indexer_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_skillsets_operations_create_or_update_request( # pylint: disable=name-too-long + skillset_name: str, + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + disable_cache_reprocessing_change_detection: Optional[bool] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + prefer: Literal["return=representation"] = kwargs.pop("prefer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/skillsets('{skillsetName}')" + path_format_arguments = { + "skillsetName": _SERIALIZER.url("skillset_name", skillset_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if skip_indexer_reset_requirement_for_cache is not None: + _params["ignoreResetRequirements"] = _SERIALIZER.query( + "skip_indexer_reset_requirement_for_cache", skip_indexer_reset_requirement_for_cache, "bool" + ) + if disable_cache_reprocessing_change_detection is not None: + _params["disableCacheReprocessingChangeDetection"] = _SERIALIZER.query( + "disable_cache_reprocessing_change_detection", disable_cache_reprocessing_change_detection, "bool" + ) + + # Construct headers + _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_skillsets_operations_delete_request( # pylint: disable=name-too-long + skillset_name: str, *, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/skillsets('{skillsetName}')" + path_format_arguments = { + "skillsetName": _SERIALIZER.url("skillset_name", skillset_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_skillsets_operations_get_request(skillset_name: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/skillsets('{skillsetName}')" + path_format_arguments = { + "skillsetName": _SERIALIZER.url("skillset_name", skillset_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_skillsets_operations_list_request(*, _select: Optional[str] = None, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/skillsets" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if _select is not None: + _params["$select"] = _SERIALIZER.query("select", _select, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_skillsets_operations_create_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/skillsets" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_skillsets_operations_reset_skills_request( # pylint: disable=name-too-long + skillset_name: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/skillsets('{skillsetName}')/search.resetskills" + path_format_arguments = { + "skillsetName": _SERIALIZER.url("skillset_name", skillset_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_synonym_maps_operations_create_or_update_request( # pylint: disable=name-too-long + synonym_map_name: str, + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + prefer: Literal["return=representation"] = kwargs.pop("prefer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/synonymmaps('{synonymMapName}')" + path_format_arguments = { + "synonymMapName": _SERIALIZER.url("synonym_map_name", synonym_map_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_synonym_maps_operations_delete_request( # pylint: disable=name-too-long + synonym_map_name: str, + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/synonymmaps('{synonymMapName}')" + path_format_arguments = { + "synonymMapName": _SERIALIZER.url("synonym_map_name", synonym_map_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_synonym_maps_operations_get_request( # pylint: disable=name-too-long + synonym_map_name: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/synonymmaps('{synonymMapName}')" + path_format_arguments = { + "synonymMapName": _SERIALIZER.url("synonym_map_name", synonym_map_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_synonym_maps_operations_list_request( # pylint: disable=name-too-long + *, _select: Optional[str] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/synonymmaps" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if _select is not None: + _params["$select"] = _SERIALIZER.query("select", _select, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_synonym_maps_operations_create_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/synonymmaps" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_indexes_operations_create_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexes" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_indexes_operations_list_request(*, _select: Optional[str] = None, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexes" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if _select is not None: + _params["$select"] = _SERIALIZER.query("select", _select, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_indexes_operations_create_or_update_request( # pylint: disable=name-too-long + index_name: str, + *, + allow_index_downtime: Optional[bool] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + prefer: Literal["return=representation"] = kwargs.pop("prefer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexes('{indexName}')" + path_format_arguments = { + "indexName": _SERIALIZER.url("index_name", index_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if allow_index_downtime is not None: + _params["allowIndexDowntime"] = _SERIALIZER.query("allow_index_downtime", allow_index_downtime, "bool") + + # Construct headers + _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_indexes_operations_delete_request( + index_name: str, *, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexes('{indexName}')" + path_format_arguments = { + "indexName": _SERIALIZER.url("index_name", index_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_indexes_operations_get_request(index_name: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexes('{indexName}')" + path_format_arguments = { + "indexName": _SERIALIZER.url("index_name", index_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_indexes_operations_get_statistics_request( # pylint: disable=name-too-long + index_name: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexes('{indexName}')/search.stats" + path_format_arguments = { + "indexName": _SERIALIZER.url("index_name", index_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_indexes_operations_analyze_request(index_name: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexes('{indexName}')/search.analyze" + path_format_arguments = { + "indexName": _SERIALIZER.url("index_name", index_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_aliases_operations_create_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/aliases" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_aliases_operations_list_request(**kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/aliases" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_aliases_operations_create_or_update_request( # pylint: disable=name-too-long + alias_name: str, *, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + prefer: Literal["return=representation"] = kwargs.pop("prefer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/aliases('{aliasName}')" + path_format_arguments = { + "aliasName": _SERIALIZER.url("alias_name", alias_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_aliases_operations_delete_request( + alias_name: str, *, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/aliases('{aliasName}')" + path_format_arguments = { + "aliasName": _SERIALIZER.url("alias_name", alias_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + if_match = prep_if_match(etag, match_condition) + if if_match is not None: + _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") + if_none_match = prep_if_none_match(etag, match_condition) + if if_none_match is not None: + _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") + + return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_aliases_operations_get_request(alias_name: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/aliases('{aliasName}')" + path_format_arguments = { + "aliasName": _SERIALIZER.url("alias_name", alias_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_documents_operations_count_request(index_name: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexes('{indexName}')/docs/$count" + path_format_arguments = { + "indexName": _SERIALIZER.url("index_name", index_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_documents_operations_search_get_request( # pylint: disable=name-too-long + index_name: str, + *, + search_text: Optional[str] = None, + include_total_result_count: Optional[bool] = None, + facets: Optional[List[str]] = None, + _filter: Optional[str] = None, + highlight_fields: Optional[List[str]] = None, + highlight_post_tag: Optional[str] = None, + highlight_pre_tag: Optional[str] = None, + minimum_coverage: Optional[float] = None, + order_by: Optional[List[str]] = None, + query_type: Optional[Union[str, _models.QueryType]] = None, + scoring_parameters: Optional[List[str]] = None, + scoring_profile: Optional[str] = None, + search_fields: Optional[List[str]] = None, + search_mode: Optional[Union[str, _models.SearchMode]] = None, + scoring_statistics: Optional[Union[str, _models.ScoringStatistics]] = None, + session_id: Optional[str] = None, + _select: Optional[List[str]] = None, + _skip: Optional[int] = None, + _top: Optional[int] = None, + semantic_configuration: Optional[str] = None, + semantic_error_handling: Optional[Union[str, _models.SemanticErrorMode]] = None, + semantic_max_wait_in_milliseconds: Optional[int] = None, + answers: Optional[Union[str, _models.QueryAnswerType]] = None, + captions: Optional[Union[str, _models.QueryCaptionType]] = None, + semantic_query: Optional[str] = None, + query_rewrites: Optional[Union[str, _models.QueryRewritesType]] = None, + debug: Optional[Union[str, _models.QueryDebugMode]] = None, + query_language: Optional[Union[str, _models.QueryLanguage]] = None, + speller: Optional[Union[str, _models.QuerySpellerType]] = None, + semantic_fields: Optional[List[str]] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexes('{indexName}')/docs" + path_format_arguments = { + "indexName": _SERIALIZER.url("index_name", index_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if search_text is not None: + _params["search"] = _SERIALIZER.query("search_text", search_text, "str") + if include_total_result_count is not None: + _params["$count"] = _SERIALIZER.query("include_total_result_count", include_total_result_count, "bool") + if facets is not None: + _params["facet"] = [_SERIALIZER.query("facets", q, "str") if q is not None else "" for q in facets] + if _filter is not None: + _params["$filter"] = _SERIALIZER.query("filter", _filter, "str") + if highlight_fields is not None: + _params["highlight"] = _SERIALIZER.query("highlight_fields", highlight_fields, "[str]", div=",") + if highlight_post_tag is not None: + _params["highlightPostTag"] = _SERIALIZER.query("highlight_post_tag", highlight_post_tag, "str") + if highlight_pre_tag is not None: + _params["highlightPreTag"] = _SERIALIZER.query("highlight_pre_tag", highlight_pre_tag, "str") + if minimum_coverage is not None: + _params["minimumCoverage"] = _SERIALIZER.query("minimum_coverage", minimum_coverage, "float") + if order_by is not None: + _params["$orderby"] = _SERIALIZER.query("order_by", order_by, "[str]", div=",") + if query_type is not None: + _params["queryType"] = _SERIALIZER.query("query_type", query_type, "str") + if scoring_parameters is not None: + _params["scoringParameter"] = [ + _SERIALIZER.query("scoring_parameters", q, "str") if q is not None else "" for q in scoring_parameters + ] + if scoring_profile is not None: + _params["scoringProfile"] = _SERIALIZER.query("scoring_profile", scoring_profile, "str") + if search_fields is not None: + _params["searchFields"] = _SERIALIZER.query("search_fields", search_fields, "[str]", div=",") + if search_mode is not None: + _params["searchMode"] = _SERIALIZER.query("search_mode", search_mode, "str") + if scoring_statistics is not None: + _params["scoringStatistics"] = _SERIALIZER.query("scoring_statistics", scoring_statistics, "str") + if session_id is not None: + _params["sessionId"] = _SERIALIZER.query("session_id", session_id, "str") + if _select is not None: + _params["$select"] = _SERIALIZER.query("select", _select, "[str]", div=",") + if _skip is not None: + _params["$skip"] = _SERIALIZER.query("skip", _skip, "int") + if _top is not None: + _params["$top"] = _SERIALIZER.query("top", _top, "int") + if semantic_configuration is not None: + _params["semanticConfiguration"] = _SERIALIZER.query("semantic_configuration", semantic_configuration, "str") + if semantic_error_handling is not None: + _params["semanticErrorHandling"] = _SERIALIZER.query("semantic_error_handling", semantic_error_handling, "str") + if semantic_max_wait_in_milliseconds is not None: + _params["semanticMaxWaitInMilliseconds"] = _SERIALIZER.query( + "semantic_max_wait_in_milliseconds", semantic_max_wait_in_milliseconds, "int" + ) + if answers is not None: + _params["answers"] = _SERIALIZER.query("answers", answers, "str") + if captions is not None: + _params["captions"] = _SERIALIZER.query("captions", captions, "str") + if semantic_query is not None: + _params["semanticQuery"] = _SERIALIZER.query("semantic_query", semantic_query, "str") + if query_rewrites is not None: + _params["queryRewrites"] = _SERIALIZER.query("query_rewrites", query_rewrites, "str") + if debug is not None: + _params["debug"] = _SERIALIZER.query("debug", debug, "str") + if query_language is not None: + _params["queryLanguage"] = _SERIALIZER.query("query_language", query_language, "str") + if speller is not None: + _params["speller"] = _SERIALIZER.query("speller", speller, "str") + if semantic_fields is not None: + _params["semanticFields"] = _SERIALIZER.query("semantic_fields", semantic_fields, "[str]", div=",") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_documents_operations_search_post_request( # pylint: disable=name-too-long + index_name: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexes('{indexName}')/docs/search.post.search" + path_format_arguments = { + "indexName": _SERIALIZER.url("index_name", index_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_documents_operations_get_request( + key: str, index_name: str, *, selected_fields: Optional[List[str]] = None, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexes('{indexName}')/docs('{key}')" + path_format_arguments = { + "key": _SERIALIZER.url("key", key, "str"), + "indexName": _SERIALIZER.url("index_name", index_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + if selected_fields is not None: + _params["$select"] = _SERIALIZER.query("selected_fields", selected_fields, "[str]", div=",") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_documents_operations_suggest_get_request( # pylint: disable=name-too-long + index_name: str, + *, + search_text: str, + suggester_name: str, + _filter: Optional[str] = None, + use_fuzzy_matching: Optional[bool] = None, + highlight_post_tag: Optional[str] = None, + highlight_pre_tag: Optional[str] = None, + minimum_coverage: Optional[float] = None, + order_by: Optional[List[str]] = None, + search_fields: Optional[List[str]] = None, + _select: Optional[List[str]] = None, + _top: Optional[int] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexes('{indexName}')/docs/search.suggest" + path_format_arguments = { + "indexName": _SERIALIZER.url("index_name", index_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + _params["search"] = _SERIALIZER.query("search_text", search_text, "str") + _params["suggesterName"] = _SERIALIZER.query("suggester_name", suggester_name, "str") + if _filter is not None: + _params["$filter"] = _SERIALIZER.query("filter", _filter, "str") + if use_fuzzy_matching is not None: + _params["fuzzy"] = _SERIALIZER.query("use_fuzzy_matching", use_fuzzy_matching, "bool") + if highlight_post_tag is not None: + _params["highlightPostTag"] = _SERIALIZER.query("highlight_post_tag", highlight_post_tag, "str") + if highlight_pre_tag is not None: + _params["highlightPreTag"] = _SERIALIZER.query("highlight_pre_tag", highlight_pre_tag, "str") + if minimum_coverage is not None: + _params["minimumCoverage"] = _SERIALIZER.query("minimum_coverage", minimum_coverage, "float") + if order_by is not None: + _params["$orderby"] = _SERIALIZER.query("order_by", order_by, "[str]", div=",") + if search_fields is not None: + _params["searchFields"] = _SERIALIZER.query("search_fields", search_fields, "[str]", div=",") + if _select is not None: + _params["$select"] = _SERIALIZER.query("select", _select, "[str]", div=",") + if _top is not None: + _params["$top"] = _SERIALIZER.query("top", _top, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_documents_operations_suggest_post_request( # pylint: disable=name-too-long + index_name: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexes('{indexName}')/docs/search.post.suggest" + path_format_arguments = { + "indexName": _SERIALIZER.url("index_name", index_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_documents_operations_index_request(index_name: str, **kwargs: Any) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexes('{indexName}')/docs/search.index" + path_format_arguments = { + "indexName": _SERIALIZER.url("index_name", index_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_documents_operations_autocomplete_get_request( # pylint: disable=name-too-long + index_name: str, + *, + search_text: str, + suggester_name: str, + autocomplete_mode: Optional[Union[str, _models.AutocompleteMode]] = None, + _filter: Optional[str] = None, + use_fuzzy_matching: Optional[bool] = None, + highlight_post_tag: Optional[str] = None, + highlight_pre_tag: Optional[str] = None, + minimum_coverage: Optional[float] = None, + search_fields: Optional[List[str]] = None, + _top: Optional[int] = None, + **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexes('{indexName}')/docs/search.autocomplete" + path_format_arguments = { + "indexName": _SERIALIZER.url("index_name", index_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + _params["search"] = _SERIALIZER.query("search_text", search_text, "str") + _params["suggesterName"] = _SERIALIZER.query("suggester_name", suggester_name, "str") + if autocomplete_mode is not None: + _params["autocompleteMode"] = _SERIALIZER.query("autocomplete_mode", autocomplete_mode, "str") + if _filter is not None: + _params["$filter"] = _SERIALIZER.query("filter", _filter, "str") + if use_fuzzy_matching is not None: + _params["fuzzy"] = _SERIALIZER.query("use_fuzzy_matching", use_fuzzy_matching, "bool") + if highlight_post_tag is not None: + _params["highlightPostTag"] = _SERIALIZER.query("highlight_post_tag", highlight_post_tag, "str") + if highlight_pre_tag is not None: + _params["highlightPreTag"] = _SERIALIZER.query("highlight_pre_tag", highlight_pre_tag, "str") + if minimum_coverage is not None: + _params["minimumCoverage"] = _SERIALIZER.query("minimum_coverage", minimum_coverage, "float") + if search_fields is not None: + _params["searchFields"] = _SERIALIZER.query("search_fields", search_fields, "[str]", div=",") + if _top is not None: + _params["$top"] = _SERIALIZER.query("top", _top, "int") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_documents_operations_autocomplete_post_request( # pylint: disable=name-too-long + index_name: str, **kwargs: Any +) -> HttpRequest: + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexes('{indexName}')/docs/search.post.autocomplete" + path_format_arguments = { + "indexName": _SERIALIZER.url("index_name", index_name, "str"), + } + + _url: str = _url.format(**path_format_arguments) # type: ignore + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + if content_type is not None: + _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) + + +def build_search_get_service_statistics_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/servicestats" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class DataSourcesOperationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.search.documents.SearchClient`'s + :attr:`data_sources_operations` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + def create_or_update( + self, + data_source_name: str, + data_source: _models.SearchIndexerDataSource, + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexerDataSource: + """Creates a new datasource or updates a datasource if it already exists. + + :param data_source_name: The name of the datasource. Required. + :type data_source_name: str + :param data_source: The definition of the datasource to create or update. Required. + :type data_source: ~azure.search.documents.models.SearchIndexerDataSource + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexerDataSource. The SearchIndexerDataSource is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerDataSource + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update( + self, + data_source_name: str, + data_source: JSON, + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexerDataSource: + """Creates a new datasource or updates a datasource if it already exists. + + :param data_source_name: The name of the datasource. Required. + :type data_source_name: str + :param data_source: The definition of the datasource to create or update. Required. + :type data_source: JSON + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexerDataSource. The SearchIndexerDataSource is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerDataSource + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update( + self, + data_source_name: str, + data_source: IO[bytes], + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexerDataSource: + """Creates a new datasource or updates a datasource if it already exists. + + :param data_source_name: The name of the datasource. Required. + :type data_source_name: str + :param data_source: The definition of the datasource to create or update. Required. + :type data_source: IO[bytes] + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexerDataSource. The SearchIndexerDataSource is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerDataSource + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + @api_version_validation( + params_added_on={"2024-11-01-preview": ["skip_indexer_reset_requirement_for_cache"]}, + ) + def create_or_update( + self, + data_source_name: str, + data_source: Union[_models.SearchIndexerDataSource, JSON, IO[bytes]], + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexerDataSource: + """Creates a new datasource or updates a datasource if it already exists. + + :param data_source_name: The name of the datasource. Required. + :type data_source_name: str + :param data_source: The definition of the datasource to create or update. Is one of the + following types: SearchIndexerDataSource, JSON, IO[bytes] Required. + :type data_source: ~azure.search.documents.models.SearchIndexerDataSource or JSON or IO[bytes] + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexerDataSource. The SearchIndexerDataSource is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerDataSource + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + prefer: Literal["return=representation"] = kwargs.pop("prefer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchIndexerDataSource] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(data_source, (IOBase, bytes)): + _content = data_source + else: + _content = json.dumps(data_source, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_data_sources_operations_create_or_update_request( + data_source_name=data_source_name, + skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, + etag=etag, + match_condition=match_condition, + prefer=prefer, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexerDataSource, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete( # pylint: disable=inconsistent-return-statements + self, + data_source_name: str, + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes a datasource. + + :param data_source_name: The name of the datasource. Required. + :type data_source_name: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_data_sources_operations_delete_request( + data_source_name=data_source_name, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get(self, data_source_name: str, **kwargs: Any) -> _models.SearchIndexerDataSource: + """Retrieves a datasource definition. + + :param data_source_name: The name of the datasource. Required. + :type data_source_name: str + :return: SearchIndexerDataSource. The SearchIndexerDataSource is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerDataSource + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SearchIndexerDataSource] = kwargs.pop("cls", None) + + _request = build_data_sources_operations_get_request( + data_source_name=data_source_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexerDataSource, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListDataSourcesResult: + """Lists all datasources available for a search service. + + :keyword _select: Selects which top-level properties to retrieve. + Specified as a comma-separated list of JSON property names, + or '*' for all properties. The default is all properties. Default value is None. + :paramtype _select: str + :return: ListDataSourcesResult. The ListDataSourcesResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.ListDataSourcesResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ListDataSourcesResult] = kwargs.pop("cls", None) + + _request = build_data_sources_operations_list_request( + _select=_select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ListDataSourcesResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create( + self, data_source: _models.SearchIndexerDataSource, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndexerDataSource: + """Creates a new datasource. + + :param data_source: The definition of the datasource to create. Required. + :type data_source: ~azure.search.documents.models.SearchIndexerDataSource + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndexerDataSource. The SearchIndexerDataSource is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerDataSource + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create( + self, data_source: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndexerDataSource: + """Creates a new datasource. + + :param data_source: The definition of the datasource to create. Required. + :type data_source: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndexerDataSource. The SearchIndexerDataSource is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerDataSource + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create( + self, data_source: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndexerDataSource: + """Creates a new datasource. + + :param data_source: The definition of the datasource to create. Required. + :type data_source: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndexerDataSource. The SearchIndexerDataSource is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerDataSource + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create( + self, data_source: Union[_models.SearchIndexerDataSource, JSON, IO[bytes]], **kwargs: Any + ) -> _models.SearchIndexerDataSource: + """Creates a new datasource. + + :param data_source: The definition of the datasource to create. Is one of the following types: + SearchIndexerDataSource, JSON, IO[bytes] Required. + :type data_source: ~azure.search.documents.models.SearchIndexerDataSource or JSON or IO[bytes] + :return: SearchIndexerDataSource. The SearchIndexerDataSource is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerDataSource + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchIndexerDataSource] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(data_source, (IOBase, bytes)): + _content = data_source + else: + _content = json.dumps(data_source, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_data_sources_operations_create_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexerDataSource, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class IndexersOperationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.search.documents.SearchClient`'s + :attr:`indexers_operations` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def reset(self, indexer_name: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements + """Resets the change tracking state associated with an indexer. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_indexers_operations_reset_request( + indexer_name=indexer_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @overload + def reset_docs( + self, + indexer_name: str, + keys_or_ids: Optional[_models.DocumentKeysOrIds] = None, + *, + overwrite: Optional[bool] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Resets specific documents in the datasource to be selectively re-ingested by + the indexer. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :param keys_or_ids: The keys or ids of the documents to be re-ingested. If keys are provided, + the + document key field must be specified in the indexer configuration. If ids are + provided, the document key field is ignored. Default value is None. + :type keys_or_ids: ~azure.search.documents.models.DocumentKeysOrIds + :keyword overwrite: If false, keys or ids will be appended to existing ones. If true, only the + keys + or ids in this payload will be queued to be re-ingested. Default value is None. + :paramtype overwrite: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def reset_docs( + self, + indexer_name: str, + keys_or_ids: Optional[JSON] = None, + *, + overwrite: Optional[bool] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Resets specific documents in the datasource to be selectively re-ingested by + the indexer. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :param keys_or_ids: The keys or ids of the documents to be re-ingested. If keys are provided, + the + document key field must be specified in the indexer configuration. If ids are + provided, the document key field is ignored. Default value is None. + :type keys_or_ids: JSON + :keyword overwrite: If false, keys or ids will be appended to existing ones. If true, only the + keys + or ids in this payload will be queued to be re-ingested. Default value is None. + :paramtype overwrite: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def reset_docs( + self, + indexer_name: str, + keys_or_ids: Optional[IO[bytes]] = None, + *, + overwrite: Optional[bool] = None, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Resets specific documents in the datasource to be selectively re-ingested by + the indexer. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :param keys_or_ids: The keys or ids of the documents to be re-ingested. If keys are provided, + the + document key field must be specified in the indexer configuration. If ids are + provided, the document key field is ignored. Default value is None. + :type keys_or_ids: IO[bytes] + :keyword overwrite: If false, keys or ids will be appended to existing ones. If true, only the + keys + or ids in this payload will be queued to be re-ingested. Default value is None. + :paramtype overwrite: bool + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + @api_version_validation( + method_added_on="2024-11-01-preview", + params_added_on={ + "2024-11-01-preview": [ + "api_version", + "overwrite", + "client_request_id", + "indexer_name", + "content_type", + "accept", + ] + }, + ) + def reset_docs( # pylint: disable=inconsistent-return-statements + self, + indexer_name: str, + keys_or_ids: Optional[Union[_models.DocumentKeysOrIds, JSON, IO[bytes]]] = None, + *, + overwrite: Optional[bool] = None, + **kwargs: Any + ) -> None: + """Resets specific documents in the datasource to be selectively re-ingested by + the indexer. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :param keys_or_ids: The keys or ids of the documents to be re-ingested. If keys are provided, + the + document key field must be specified in the indexer configuration. If ids are + provided, the document key field is ignored. Is one of the following types: DocumentKeysOrIds, + JSON, IO[bytes] Default value is None. + :type keys_or_ids: ~azure.search.documents.models.DocumentKeysOrIds or JSON or IO[bytes] + :keyword overwrite: If false, keys or ids will be appended to existing ones. If true, only the + keys + or ids in this payload will be queued to be re-ingested. Default value is None. + :paramtype overwrite: bool + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(keys_or_ids, (IOBase, bytes)): + _content = keys_or_ids + else: + if keys_or_ids is not None: + _content = json.dumps(keys_or_ids, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + else: + _content = None + + _request = build_indexers_operations_reset_docs_request( + indexer_name=indexer_name, + overwrite=overwrite, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def run(self, indexer_name: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements + """Runs an indexer on-demand. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_indexers_operations_run_request( + indexer_name=indexer_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @overload + def create_or_update( + self, + indexer_name: str, + indexer: _models.SearchIndexer, + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + disable_cache_reprocessing_change_detection: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexer: + """Creates a new indexer or updates an indexer if it already exists. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :param indexer: The definition of the indexer to create or update. Required. + :type indexer: ~azure.search.documents.models.SearchIndexer + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. Default value is None. + :paramtype disable_cache_reprocessing_change_detection: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexer. The SearchIndexer is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexer + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update( + self, + indexer_name: str, + indexer: JSON, + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + disable_cache_reprocessing_change_detection: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexer: + """Creates a new indexer or updates an indexer if it already exists. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :param indexer: The definition of the indexer to create or update. Required. + :type indexer: JSON + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. Default value is None. + :paramtype disable_cache_reprocessing_change_detection: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexer. The SearchIndexer is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexer + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update( + self, + indexer_name: str, + indexer: IO[bytes], + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + disable_cache_reprocessing_change_detection: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexer: + """Creates a new indexer or updates an indexer if it already exists. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :param indexer: The definition of the indexer to create or update. Required. + :type indexer: IO[bytes] + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. Default value is None. + :paramtype disable_cache_reprocessing_change_detection: bool + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexer. The SearchIndexer is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexer + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + @api_version_validation( + params_added_on={ + "2024-11-01-preview": [ + "skip_indexer_reset_requirement_for_cache", + "disable_cache_reprocessing_change_detection", + ] + }, + ) + def create_or_update( + self, + indexer_name: str, + indexer: Union[_models.SearchIndexer, JSON, IO[bytes]], + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + disable_cache_reprocessing_change_detection: Optional[bool] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexer: + """Creates a new indexer or updates an indexer if it already exists. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :param indexer: The definition of the indexer to create or update. Is one of the following + types: SearchIndexer, JSON, IO[bytes] Required. + :type indexer: ~azure.search.documents.models.SearchIndexer or JSON or IO[bytes] + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. Default value is None. + :paramtype disable_cache_reprocessing_change_detection: bool + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexer. The SearchIndexer is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexer + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + prefer: Literal["return=representation"] = kwargs.pop("prefer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchIndexer] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(indexer, (IOBase, bytes)): + _content = indexer + else: + _content = json.dumps(indexer, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_indexers_operations_create_or_update_request( + indexer_name=indexer_name, + skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, + disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, + etag=etag, + match_condition=match_condition, + prefer=prefer, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexer, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete( # pylint: disable=inconsistent-return-statements + self, + indexer_name: str, + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes an indexer. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_indexers_operations_delete_request( + indexer_name=indexer_name, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get(self, indexer_name: str, **kwargs: Any) -> _models.SearchIndexer: + """Retrieves an indexer definition. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :return: SearchIndexer. The SearchIndexer is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexer + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SearchIndexer] = kwargs.pop("cls", None) + + _request = build_indexers_operations_get_request( + indexer_name=indexer_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexer, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListIndexersResult: + """Lists all indexers available for a search service. + + :keyword _select: Selects which top-level properties to retrieve. + Specified as a comma-separated list of JSON property names, + or '*' for all properties. The default is all properties. Default value is None. + :paramtype _select: str + :return: ListIndexersResult. The ListIndexersResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.ListIndexersResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ListIndexersResult] = kwargs.pop("cls", None) + + _request = build_indexers_operations_list_request( + _select=_select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ListIndexersResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create( + self, indexer: _models.SearchIndexer, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndexer: + """Creates a new indexer. + + :param indexer: The definition of the indexer to create. Required. + :type indexer: ~azure.search.documents.models.SearchIndexer + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndexer. The SearchIndexer is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexer + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create(self, indexer: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.SearchIndexer: + """Creates a new indexer. + + :param indexer: The definition of the indexer to create. Required. + :type indexer: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndexer. The SearchIndexer is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexer + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create( + self, indexer: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndexer: + """Creates a new indexer. + + :param indexer: The definition of the indexer to create. Required. + :type indexer: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndexer. The SearchIndexer is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexer + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create(self, indexer: Union[_models.SearchIndexer, JSON, IO[bytes]], **kwargs: Any) -> _models.SearchIndexer: + """Creates a new indexer. + + :param indexer: The definition of the indexer to create. Is one of the following types: + SearchIndexer, JSON, IO[bytes] Required. + :type indexer: ~azure.search.documents.models.SearchIndexer or JSON or IO[bytes] + :return: SearchIndexer. The SearchIndexer is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexer + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchIndexer] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(indexer, (IOBase, bytes)): + _content = indexer + else: + _content = json.dumps(indexer, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_indexers_operations_create_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexer, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_status(self, indexer_name: str, **kwargs: Any) -> _models.SearchIndexerStatus: + """Returns the current status and execution history of an indexer. + + :param indexer_name: The name of the indexer. Required. + :type indexer_name: str + :return: SearchIndexerStatus. The SearchIndexerStatus is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerStatus + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SearchIndexerStatus] = kwargs.pop("cls", None) + + _request = build_indexers_operations_get_status_request( + indexer_name=indexer_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexerStatus, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class SkillsetsOperationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.search.documents.SearchClient`'s + :attr:`skillsets_operations` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + def create_or_update( + self, + skillset_name: str, + skillset: _models.SearchIndexerSkillset, + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + disable_cache_reprocessing_change_detection: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexerSkillset: + """Creates a new skillset in a search service or updates the skillset if it + already exists. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :param skillset: The skillset containing one or more skills to create or update in a search + service. Required. + :type skillset: ~azure.search.documents.models.SearchIndexerSkillset + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. Default value is None. + :paramtype disable_cache_reprocessing_change_detection: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexerSkillset. The SearchIndexerSkillset is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerSkillset + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update( + self, + skillset_name: str, + skillset: JSON, + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + disable_cache_reprocessing_change_detection: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexerSkillset: + """Creates a new skillset in a search service or updates the skillset if it + already exists. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :param skillset: The skillset containing one or more skills to create or update in a search + service. Required. + :type skillset: JSON + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. Default value is None. + :paramtype disable_cache_reprocessing_change_detection: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexerSkillset. The SearchIndexerSkillset is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerSkillset + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update( + self, + skillset_name: str, + skillset: IO[bytes], + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + disable_cache_reprocessing_change_detection: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexerSkillset: + """Creates a new skillset in a search service or updates the skillset if it + already exists. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :param skillset: The skillset containing one or more skills to create or update in a search + service. Required. + :type skillset: IO[bytes] + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. Default value is None. + :paramtype disable_cache_reprocessing_change_detection: bool + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexerSkillset. The SearchIndexerSkillset is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerSkillset + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + @api_version_validation( + params_added_on={ + "2024-11-01-preview": [ + "skip_indexer_reset_requirement_for_cache", + "disable_cache_reprocessing_change_detection", + ] + }, + ) + def create_or_update( + self, + skillset_name: str, + skillset: Union[_models.SearchIndexerSkillset, JSON, IO[bytes]], + *, + skip_indexer_reset_requirement_for_cache: Optional[bool] = None, + disable_cache_reprocessing_change_detection: Optional[bool] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndexerSkillset: + """Creates a new skillset in a search service or updates the skillset if it + already exists. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :param skillset: The skillset containing one or more skills to create or update in a search + service. Is one of the following types: SearchIndexerSkillset, JSON, IO[bytes] Required. + :type skillset: ~azure.search.documents.models.SearchIndexerSkillset or JSON or IO[bytes] + :keyword skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default + value is None. + :paramtype skip_indexer_reset_requirement_for_cache: bool + :keyword disable_cache_reprocessing_change_detection: Disables cache reprocessing change + detection. Default value is None. + :paramtype disable_cache_reprocessing_change_detection: bool + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndexerSkillset. The SearchIndexerSkillset is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerSkillset + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + prefer: Literal["return=representation"] = kwargs.pop("prefer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchIndexerSkillset] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(skillset, (IOBase, bytes)): + _content = skillset + else: + _content = json.dumps(skillset, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_skillsets_operations_create_or_update_request( + skillset_name=skillset_name, + skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, + disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, + etag=etag, + match_condition=match_condition, + prefer=prefer, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexerSkillset, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete( # pylint: disable=inconsistent-return-statements + self, + skillset_name: str, + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes a skillset in a search service. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_skillsets_operations_delete_request( + skillset_name=skillset_name, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get(self, skillset_name: str, **kwargs: Any) -> _models.SearchIndexerSkillset: + """Retrieves a skillset in a search service. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :return: SearchIndexerSkillset. The SearchIndexerSkillset is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerSkillset + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SearchIndexerSkillset] = kwargs.pop("cls", None) + + _request = build_skillsets_operations_get_request( + skillset_name=skillset_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexerSkillset, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListSkillsetsResult: + """List all skillsets in a search service. + + :keyword _select: Selects which top-level properties to retrieve. + Specified as a comma-separated list of JSON property names, + or '*' for all properties. The default is all properties. Default value is None. + :paramtype _select: str + :return: ListSkillsetsResult. The ListSkillsetsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.ListSkillsetsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ListSkillsetsResult] = kwargs.pop("cls", None) + + _request = build_skillsets_operations_list_request( + _select=_select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ListSkillsetsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create( + self, skillset: _models.SearchIndexerSkillset, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndexerSkillset: + """Creates a new skillset in a search service. + + :param skillset: The skillset containing one or more skills to create in a search service. + Required. + :type skillset: ~azure.search.documents.models.SearchIndexerSkillset + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndexerSkillset. The SearchIndexerSkillset is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerSkillset + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create( + self, skillset: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndexerSkillset: + """Creates a new skillset in a search service. + + :param skillset: The skillset containing one or more skills to create in a search service. + Required. + :type skillset: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndexerSkillset. The SearchIndexerSkillset is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerSkillset + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create( + self, skillset: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndexerSkillset: + """Creates a new skillset in a search service. + + :param skillset: The skillset containing one or more skills to create in a search service. + Required. + :type skillset: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndexerSkillset. The SearchIndexerSkillset is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerSkillset + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create( + self, skillset: Union[_models.SearchIndexerSkillset, JSON, IO[bytes]], **kwargs: Any + ) -> _models.SearchIndexerSkillset: + """Creates a new skillset in a search service. + + :param skillset: The skillset containing one or more skills to create in a search service. Is + one of the following types: SearchIndexerSkillset, JSON, IO[bytes] Required. + :type skillset: ~azure.search.documents.models.SearchIndexerSkillset or JSON or IO[bytes] + :return: SearchIndexerSkillset. The SearchIndexerSkillset is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndexerSkillset + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchIndexerSkillset] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(skillset, (IOBase, bytes)): + _content = skillset + else: + _content = json.dumps(skillset, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_skillsets_operations_create_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndexerSkillset, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def reset_skills( + self, + skillset_name: str, + skill_names: _models.SkillNames, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> None: + """Reset an existing skillset in a search service. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :param skill_names: The names of the skills to reset. If not specified, all skills in the + skillset + will be reset. Required. + :type skill_names: ~azure.search.documents.models.SkillNames + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def reset_skills( + self, skillset_name: str, skill_names: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Reset an existing skillset in a search service. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :param skill_names: The names of the skills to reset. If not specified, all skills in the + skillset + will be reset. Required. + :type skill_names: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def reset_skills( + self, skillset_name: str, skill_names: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> None: + """Reset an existing skillset in a search service. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :param skill_names: The names of the skills to reset. If not specified, all skills in the + skillset + will be reset. Required. + :type skill_names: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + @api_version_validation( + method_added_on="2024-11-01-preview", + params_added_on={ + "2024-11-01-preview": ["api_version", "client_request_id", "skillset_name", "content_type", "accept"] + }, + ) + def reset_skills( # pylint: disable=inconsistent-return-statements + self, skillset_name: str, skill_names: Union[_models.SkillNames, JSON, IO[bytes]], **kwargs: Any + ) -> None: + """Reset an existing skillset in a search service. + + :param skillset_name: The name of the skillset. Required. + :type skillset_name: str + :param skill_names: The names of the skills to reset. If not specified, all skills in the + skillset + will be reset. Is one of the following types: SkillNames, JSON, IO[bytes] Required. + :type skill_names: ~azure.search.documents.models.SkillNames or JSON or IO[bytes] + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[None] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(skill_names, (IOBase, bytes)): + _content = skill_names + else: + _content = json.dumps(skill_names, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_skillsets_operations_reset_skills_request( + skillset_name=skillset_name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + +class SynonymMapsOperationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.search.documents.SearchClient`'s + :attr:`synonym_maps_operations` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + def create_or_update( + self, + synonym_map_name: str, + synonym_map: _models.SynonymMap, + *, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SynonymMap: + """Creates a new synonym map or updates a synonym map if it already exists. + + :param synonym_map_name: The name of the synonym map. Required. + :type synonym_map_name: str + :param synonym_map: The definition of the synonym map to create or update. Required. + :type synonym_map: ~azure.search.documents.models.SynonymMap + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SynonymMap. The SynonymMap is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SynonymMap + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update( + self, + synonym_map_name: str, + synonym_map: JSON, + *, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SynonymMap: + """Creates a new synonym map or updates a synonym map if it already exists. + + :param synonym_map_name: The name of the synonym map. Required. + :type synonym_map_name: str + :param synonym_map: The definition of the synonym map to create or update. Required. + :type synonym_map: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SynonymMap. The SynonymMap is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SynonymMap + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update( + self, + synonym_map_name: str, + synonym_map: IO[bytes], + *, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SynonymMap: + """Creates a new synonym map or updates a synonym map if it already exists. + + :param synonym_map_name: The name of the synonym map. Required. + :type synonym_map_name: str + :param synonym_map: The definition of the synonym map to create or update. Required. + :type synonym_map: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SynonymMap. The SynonymMap is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SynonymMap + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_or_update( + self, + synonym_map_name: str, + synonym_map: Union[_models.SynonymMap, JSON, IO[bytes]], + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SynonymMap: + """Creates a new synonym map or updates a synonym map if it already exists. + + :param synonym_map_name: The name of the synonym map. Required. + :type synonym_map_name: str + :param synonym_map: The definition of the synonym map to create or update. Is one of the + following types: SynonymMap, JSON, IO[bytes] Required. + :type synonym_map: ~azure.search.documents.models.SynonymMap or JSON or IO[bytes] + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SynonymMap. The SynonymMap is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SynonymMap + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + prefer: Literal["return=representation"] = kwargs.pop("prefer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SynonymMap] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(synonym_map, (IOBase, bytes)): + _content = synonym_map + else: + _content = json.dumps(synonym_map, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_synonym_maps_operations_create_or_update_request( + synonym_map_name=synonym_map_name, + etag=etag, + match_condition=match_condition, + prefer=prefer, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SynonymMap, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete( # pylint: disable=inconsistent-return-statements + self, + synonym_map_name: str, + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes a synonym map. + + :param synonym_map_name: The name of the synonym map. Required. + :type synonym_map_name: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_synonym_maps_operations_delete_request( + synonym_map_name=synonym_map_name, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get(self, synonym_map_name: str, **kwargs: Any) -> _models.SynonymMap: + """Retrieves a synonym map definition. + + :param synonym_map_name: The name of the synonym map. Required. + :type synonym_map_name: str + :return: SynonymMap. The SynonymMap is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SynonymMap + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SynonymMap] = kwargs.pop("cls", None) + + _request = build_synonym_maps_operations_get_request( + synonym_map_name=synonym_map_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SynonymMap, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListSynonymMapsResult: + """Lists all synonym maps available for a search service. + + :keyword _select: Selects which top-level properties to retrieve. + Specified as a comma-separated list of JSON property names, + or '*' for all properties. The default is all properties. Default value is None. + :paramtype _select: str + :return: ListSynonymMapsResult. The ListSynonymMapsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.ListSynonymMapsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ListSynonymMapsResult] = kwargs.pop("cls", None) + + _request = build_synonym_maps_operations_list_request( + _select=_select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ListSynonymMapsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def create( + self, synonym_map: _models.SynonymMap, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SynonymMap: + """Creates a new synonym map. + + :param synonym_map: The definition of the synonym map to create. Required. + :type synonym_map: ~azure.search.documents.models.SynonymMap + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SynonymMap. The SynonymMap is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SynonymMap + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create(self, synonym_map: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.SynonymMap: + """Creates a new synonym map. + + :param synonym_map: The definition of the synonym map to create. Required. + :type synonym_map: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SynonymMap. The SynonymMap is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SynonymMap + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create( + self, synonym_map: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SynonymMap: + """Creates a new synonym map. + + :param synonym_map: The definition of the synonym map to create. Required. + :type synonym_map: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: SynonymMap. The SynonymMap is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SynonymMap + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create(self, synonym_map: Union[_models.SynonymMap, JSON, IO[bytes]], **kwargs: Any) -> _models.SynonymMap: + """Creates a new synonym map. + + :param synonym_map: The definition of the synonym map to create. Is one of the following types: + SynonymMap, JSON, IO[bytes] Required. + :type synonym_map: ~azure.search.documents.models.SynonymMap or JSON or IO[bytes] + :return: SynonymMap. The SynonymMap is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SynonymMap + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SynonymMap] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(synonym_map, (IOBase, bytes)): + _content = synonym_map + else: + _content = json.dumps(synonym_map, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_synonym_maps_operations_create_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SynonymMap, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class IndexesOperationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.search.documents.SearchClient`'s + :attr:`indexes_operations` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + def create( + self, index: _models.SearchIndex, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchIndex: + """Creates a new search index. + + :param index: The definition of the index to create. Required. + :type index: ~azure.search.documents.models.SearchIndex + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndex. The SearchIndex is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndex + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create(self, index: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.SearchIndex: + """Creates a new search index. + + :param index: The definition of the index to create. Required. + :type index: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndex. The SearchIndex is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndex + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create(self, index: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.SearchIndex: + """Creates a new search index. + + :param index: The definition of the index to create. Required. + :type index: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchIndex. The SearchIndex is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndex + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create(self, index: Union[_models.SearchIndex, JSON, IO[bytes]], **kwargs: Any) -> _models.SearchIndex: + """Creates a new search index. + + :param index: The definition of the index to create. Is one of the following types: + SearchIndex, JSON, IO[bytes] Required. + :type index: ~azure.search.documents.models.SearchIndex or JSON or IO[bytes] + :return: SearchIndex. The SearchIndex is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndex + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchIndex] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(index, (IOBase, bytes)): + _content = index + else: + _content = json.dumps(index, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_indexes_operations_create_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndex, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> Iterable["_models.SearchIndex"]: + """Lists all indexes available for a search service. + + :keyword _select: Selects which top-level properties to retrieve. + Specified as a comma-separated list of JSON property names, + or '*' for all properties. The default is all properties. Default value is None. + :paramtype _select: str + :return: An iterator like instance of SearchIndex + :rtype: ~azure.core.paging.ItemPaged[~azure.search.documents.models.SearchIndex] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.SearchIndex]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_indexes_operations_list_request( + _select=_select, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.SearchIndex], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @overload + def create_or_update( + self, + index_name: str, + index: _models.SearchIndex, + *, + allow_index_downtime: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndex: + """Creates a new search index or updates an index if it already exists. + + :param index_name: The name of the index. Required. + :type index_name: str + :param index: The definition of the index to create or update. Required. + :type index: ~azure.search.documents.models.SearchIndex + :keyword allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters + to be added to + an index by taking the index offline for at least a few seconds. This + temporarily causes indexing and query requests to fail. Performance and write + availability of the index can be impaired for several minutes after the index + is updated, or longer for very large indexes. Default value is None. + :paramtype allow_index_downtime: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndex. The SearchIndex is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndex + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update( + self, + index_name: str, + index: JSON, + *, + allow_index_downtime: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndex: + """Creates a new search index or updates an index if it already exists. + + :param index_name: The name of the index. Required. + :type index_name: str + :param index: The definition of the index to create or update. Required. + :type index: JSON + :keyword allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters + to be added to + an index by taking the index offline for at least a few seconds. This + temporarily causes indexing and query requests to fail. Performance and write + availability of the index can be impaired for several minutes after the index + is updated, or longer for very large indexes. Default value is None. + :paramtype allow_index_downtime: bool + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndex. The SearchIndex is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndex + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update( + self, + index_name: str, + index: IO[bytes], + *, + allow_index_downtime: Optional[bool] = None, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndex: + """Creates a new search index or updates an index if it already exists. + + :param index_name: The name of the index. Required. + :type index_name: str + :param index: The definition of the index to create or update. Required. + :type index: IO[bytes] + :keyword allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters + to be added to + an index by taking the index offline for at least a few seconds. This + temporarily causes indexing and query requests to fail. Performance and write + availability of the index can be impaired for several minutes after the index + is updated, or longer for very large indexes. Default value is None. + :paramtype allow_index_downtime: bool + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndex. The SearchIndex is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndex + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def create_or_update( + self, + index_name: str, + index: Union[_models.SearchIndex, JSON, IO[bytes]], + *, + allow_index_downtime: Optional[bool] = None, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchIndex: + """Creates a new search index or updates an index if it already exists. + + :param index_name: The name of the index. Required. + :type index_name: str + :param index: The definition of the index to create or update. Is one of the following types: + SearchIndex, JSON, IO[bytes] Required. + :type index: ~azure.search.documents.models.SearchIndex or JSON or IO[bytes] + :keyword allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters + to be added to + an index by taking the index offline for at least a few seconds. This + temporarily causes indexing and query requests to fail. Performance and write + availability of the index can be impaired for several minutes after the index + is updated, or longer for very large indexes. Default value is None. + :paramtype allow_index_downtime: bool + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchIndex. The SearchIndex is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndex + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + prefer: Literal["return=representation"] = kwargs.pop("prefer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchIndex] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(index, (IOBase, bytes)): + _content = index + else: + _content = json.dumps(index, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_indexes_operations_create_or_update_request( + index_name=index_name, + allow_index_downtime=allow_index_downtime, + etag=etag, + match_condition=match_condition, + prefer=prefer, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndex, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def delete( # pylint: disable=inconsistent-return-statements + self, + index_name: str, + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes a search index and all the documents it contains. This operation is + permanent, with no recovery option. Make sure you have a master copy of your + index definition, data ingestion code, and a backup of the primary data source + in case you need to re-build the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_indexes_operations_delete_request( + index_name=index_name, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + def get(self, index_name: str, **kwargs: Any) -> _models.SearchIndex: + """Retrieves an index definition. + + :param index_name: The name of the index. Required. + :type index_name: str + :return: SearchIndex. The SearchIndex is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchIndex + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SearchIndex] = kwargs.pop("cls", None) + + _request = build_indexes_operations_get_request( + index_name=index_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchIndex, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get_statistics(self, index_name: str, **kwargs: Any) -> _models.GetIndexStatisticsResult: + """Returns statistics for the given index, including a document count and storage + usage. + + :param index_name: The name of the index. Required. + :type index_name: str + :return: GetIndexStatisticsResult. The GetIndexStatisticsResult is compatible with + MutableMapping + :rtype: ~azure.search.documents.models.GetIndexStatisticsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.GetIndexStatisticsResult] = kwargs.pop("cls", None) + + _request = build_indexes_operations_get_statistics_request( + index_name=index_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.GetIndexStatisticsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def analyze( + self, index_name: str, request: _models.AnalyzeRequest, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AnalyzeResult: + """Shows how an analyzer breaks text into tokens. + + :param index_name: The name of the index. Required. + :type index_name: str + :param request: The text and analyzer or analysis components to test. Required. + :type request: ~azure.search.documents.models.AnalyzeRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AnalyzeResult. The AnalyzeResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.AnalyzeResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def analyze( + self, index_name: str, request: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AnalyzeResult: + """Shows how an analyzer breaks text into tokens. + + :param index_name: The name of the index. Required. + :type index_name: str + :param request: The text and analyzer or analysis components to test. Required. + :type request: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AnalyzeResult. The AnalyzeResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.AnalyzeResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def analyze( + self, index_name: str, request: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AnalyzeResult: + """Shows how an analyzer breaks text into tokens. + + :param index_name: The name of the index. Required. + :type index_name: str + :param request: The text and analyzer or analysis components to test. Required. + :type request: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AnalyzeResult. The AnalyzeResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.AnalyzeResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def analyze( + self, index_name: str, request: Union[_models.AnalyzeRequest, JSON, IO[bytes]], **kwargs: Any + ) -> _models.AnalyzeResult: + """Shows how an analyzer breaks text into tokens. + + :param index_name: The name of the index. Required. + :type index_name: str + :param request: The text and analyzer or analysis components to test. Is one of the following + types: AnalyzeRequest, JSON, IO[bytes] Required. + :type request: ~azure.search.documents.models.AnalyzeRequest or JSON or IO[bytes] + :return: AnalyzeResult. The AnalyzeResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.AnalyzeResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AnalyzeResult] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(request, (IOBase, bytes)): + _content = request + else: + _content = json.dumps(request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_indexes_operations_analyze_request( + index_name=index_name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AnalyzeResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class AliasesOperationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.search.documents.SearchClient`'s + :attr:`aliases_operations` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @overload + def create( + self, alias: _models.SearchAlias, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchAlias: + """Creates a new search alias. + + :param alias: The definition of the alias to create. Required. + :type alias: ~azure.search.documents.models.SearchAlias + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchAlias. The SearchAlias is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchAlias + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create(self, alias: JSON, *, content_type: str = "application/json", **kwargs: Any) -> _models.SearchAlias: + """Creates a new search alias. + + :param alias: The definition of the alias to create. Required. + :type alias: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchAlias. The SearchAlias is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchAlias + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create(self, alias: IO[bytes], *, content_type: str = "application/json", **kwargs: Any) -> _models.SearchAlias: + """Creates a new search alias. + + :param alias: The definition of the alias to create. Required. + :type alias: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchAlias. The SearchAlias is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchAlias + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + @api_version_validation( + method_added_on="2024-11-01-preview", + params_added_on={"2024-11-01-preview": ["api_version", "client_request_id", "content_type", "accept"]}, + ) + def create(self, alias: Union[_models.SearchAlias, JSON, IO[bytes]], **kwargs: Any) -> _models.SearchAlias: + """Creates a new search alias. + + :param alias: The definition of the alias to create. Is one of the following types: + SearchAlias, JSON, IO[bytes] Required. + :type alias: ~azure.search.documents.models.SearchAlias or JSON or IO[bytes] + :return: SearchAlias. The SearchAlias is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchAlias + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchAlias] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(alias, (IOBase, bytes)): + _content = alias + else: + _content = json.dumps(alias, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_aliases_operations_create_request( + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchAlias, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + @api_version_validation( + method_added_on="2024-11-01-preview", + params_added_on={"2024-11-01-preview": ["api_version", "client_request_id", "accept"]}, + ) + def list(self, **kwargs: Any) -> Iterable["_models.SearchAlias"]: + """Lists all aliases available for a search service. + + :return: An iterator like instance of SearchAlias + :rtype: ~azure.core.paging.ItemPaged[~azure.search.documents.models.SearchAlias] + :raises ~azure.core.exceptions.HttpResponseError: + """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.SearchAlias]] = kwargs.pop("cls", None) + + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + def prepare_request(next_link=None): + if not next_link: + + _request = build_aliases_operations_list_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + return _request + + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.SearchAlias], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return None, iter(list_of_elem) + + def get_next(next_link=None): + _request = prepare_request(next_link) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + return pipeline_response + + return ItemPaged(get_next, extract_data) + + @overload + def create_or_update( + self, + alias_name: str, + alias: _models.SearchAlias, + *, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchAlias: + """Creates a new search alias or updates an alias if it already exists. + + :param alias_name: The name of the alias. Required. + :type alias_name: str + :param alias: The definition of the alias to create or update. Required. + :type alias: ~azure.search.documents.models.SearchAlias + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchAlias. The SearchAlias is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchAlias + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update( + self, + alias_name: str, + alias: JSON, + *, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchAlias: + """Creates a new search alias or updates an alias if it already exists. + + :param alias_name: The name of the alias. Required. + :type alias_name: str + :param alias: The definition of the alias to create or update. Required. + :type alias: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchAlias. The SearchAlias is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchAlias + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def create_or_update( + self, + alias_name: str, + alias: IO[bytes], + *, + content_type: str = "application/json", + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchAlias: + """Creates a new search alias or updates an alias if it already exists. + + :param alias_name: The name of the alias. Required. + :type alias_name: str + :param alias: The definition of the alias to create or update. Required. + :type alias: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchAlias. The SearchAlias is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchAlias + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + @api_version_validation( + method_added_on="2024-11-01-preview", + params_added_on={ + "2024-11-01-preview": [ + "api_version", + "prefer", + "client_request_id", + "alias_name", + "content_type", + "accept", + "etag", + "match_condition", + ] + }, + ) + def create_or_update( + self, + alias_name: str, + alias: Union[_models.SearchAlias, JSON, IO[bytes]], + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> _models.SearchAlias: + """Creates a new search alias or updates an alias if it already exists. + + :param alias_name: The name of the alias. Required. + :type alias_name: str + :param alias: The definition of the alias to create or update. Is one of the following types: + SearchAlias, JSON, IO[bytes] Required. + :type alias: ~azure.search.documents.models.SearchAlias or JSON or IO[bytes] + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: SearchAlias. The SearchAlias is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchAlias + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + prefer: Literal["return=representation"] = kwargs.pop("prefer") + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchAlias] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(alias, (IOBase, bytes)): + _content = alias + else: + _content = json.dumps(alias, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_aliases_operations_create_or_update_request( + alias_name=alias_name, + etag=etag, + match_condition=match_condition, + prefer=prefer, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchAlias, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + @api_version_validation( + method_added_on="2024-11-01-preview", + params_added_on={ + "2024-11-01-preview": [ + "api_version", + "client_request_id", + "alias_name", + "accept", + "etag", + "match_condition", + ] + }, + ) + def delete( # pylint: disable=inconsistent-return-statements + self, + alias_name: str, + *, + etag: Optional[str] = None, + match_condition: Optional[MatchConditions] = None, + **kwargs: Any + ) -> None: + """Deletes a search alias and its associated mapping to an index. This operation + is permanent, with no recovery option. The mapped index is untouched by this + operation. + + :param alias_name: The name of the alias. Required. + :type alias_name: str + :keyword etag: check if resource is changed. Set None to skip checking etag. Default value is + None. + :paramtype etag: str + :keyword match_condition: The match condition to use upon the etag. Default value is None. + :paramtype match_condition: ~azure.core.MatchConditions + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + if match_condition == MatchConditions.IfNotModified: + error_map[412] = ResourceModifiedError + elif match_condition == MatchConditions.IfPresent: + error_map[412] = ResourceNotFoundError + elif match_condition == MatchConditions.IfMissing: + error_map[412] = ResourceExistsError + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[None] = kwargs.pop("cls", None) + + _request = build_aliases_operations_delete_request( + alias_name=alias_name, + etag=etag, + match_condition=match_condition, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if cls: + return cls(pipeline_response, None, {}) # type: ignore + + @distributed_trace + @api_version_validation( + method_added_on="2024-11-01-preview", + params_added_on={"2024-11-01-preview": ["api_version", "client_request_id", "alias_name", "accept"]}, + ) + def get(self, alias_name: str, **kwargs: Any) -> _models.SearchAlias: + """Retrieves an alias definition. + + :param alias_name: The name of the alias. Required. + :type alias_name: str + :return: SearchAlias. The SearchAlias is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchAlias + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SearchAlias] = kwargs.pop("cls", None) + + _request = build_aliases_operations_get_request( + alias_name=alias_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 201]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchAlias, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class DocumentsOperationsOperations: + """ + .. warning:: + **DO NOT** instantiate this class directly. + + Instead, you should access the following operations through + :class:`~azure.search.documents.SearchClient`'s + :attr:`documents_operations` attribute. + """ + + def __init__(self, *args, **kwargs): + input_args = list(args) + self._client = input_args.pop(0) if input_args else kwargs.pop("client") + self._config = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + + @distributed_trace + def count(self, index_name: str, **kwargs: Any) -> int: + """Queries the number of documents in the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :return: int + :rtype: int + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[int] = kwargs.pop("cls", None) + + _request = build_documents_operations_count_request( + index_name=index_name, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(int, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + @api_version_validation( + params_added_on={ + "2024-11-01-preview": ["query_rewrites", "debug", "query_language", "speller", "semantic_fields"] + }, + ) + def search_get( + self, + index_name: str, + *, + search_text: Optional[str] = None, + include_total_result_count: Optional[bool] = None, + facets: Optional[List[str]] = None, + _filter: Optional[str] = None, + highlight_fields: Optional[List[str]] = None, + highlight_post_tag: Optional[str] = None, + highlight_pre_tag: Optional[str] = None, + minimum_coverage: Optional[float] = None, + order_by: Optional[List[str]] = None, + query_type: Optional[Union[str, _models.QueryType]] = None, + scoring_parameters: Optional[List[str]] = None, + scoring_profile: Optional[str] = None, + search_fields: Optional[List[str]] = None, + search_mode: Optional[Union[str, _models.SearchMode]] = None, + scoring_statistics: Optional[Union[str, _models.ScoringStatistics]] = None, + session_id: Optional[str] = None, + _select: Optional[List[str]] = None, + _skip: Optional[int] = None, + _top: Optional[int] = None, + semantic_configuration: Optional[str] = None, + semantic_error_handling: Optional[Union[str, _models.SemanticErrorMode]] = None, + semantic_max_wait_in_milliseconds: Optional[int] = None, + answers: Optional[Union[str, _models.QueryAnswerType]] = None, + captions: Optional[Union[str, _models.QueryCaptionType]] = None, + semantic_query: Optional[str] = None, + query_rewrites: Optional[Union[str, _models.QueryRewritesType]] = None, + debug: Optional[Union[str, _models.QueryDebugMode]] = None, + query_language: Optional[Union[str, _models.QueryLanguage]] = None, + speller: Optional[Union[str, _models.QuerySpellerType]] = None, + semantic_fields: Optional[List[str]] = None, + **kwargs: Any + ) -> _models.SearchDocumentsResult: + """Searches for documents in the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :keyword search_text: A full-text search query expression; Use "*" or omit this parameter to + match + all documents. Default value is None. + :paramtype search_text: str + :keyword include_total_result_count: A value that specifies whether to fetch the total count of + results. Default is + false. Setting this value to true may have a performance impact. Note that the + count returned is an approximation. Default value is None. + :paramtype include_total_result_count: bool + :keyword facets: The list of facet expressions to apply to the search query. Each facet + expression contains a field name, optionally followed by a comma-separated list + of name:value pairs. Default value is None. + :paramtype facets: list[str] + :keyword _filter: The OData $filter expression to apply to the search query. Default value is + None. + :paramtype _filter: str + :keyword highlight_fields: The list of field names to use for hit highlights. Only searchable + fields can + be used for hit highlighting. Default value is None. + :paramtype highlight_fields: list[str] + :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. Default is </em>. Default value is None. + :paramtype highlight_post_tag: str + :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. Default is <em>. Default value is None. + :paramtype highlight_pre_tag: str + :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index + that must be + covered by a search query in order for the query to be reported as a success. + This parameter can be useful for ensuring search availability even for services + with only one replica. The default is 100. Default value is None. + :paramtype minimum_coverage: float + :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each + expression can be either a field name or a call to either the geo.distance() or + the search.score() functions. Each expression can be followed by asc to + indicate ascending, and desc to indicate descending. The default is ascending + order. Ties will be broken by the match scores of documents. If no OrderBy is + specified, the default sort order is descending by document match score. There + can be at most 32 $orderby clauses. Default value is None. + :paramtype order_by: list[str] + :keyword query_type: A value that specifies the syntax of the search query. The default is + 'simple'. + Use 'full' if your query uses the Lucene query syntax. Known values are: "simple", "full", and + "semantic". Default value is None. + :paramtype query_type: str or ~azure.search.documents.models.QueryType + :keyword scoring_parameters: The list of parameter values to be used in scoring functions (for + example, + referencePointParameter) using the format name-values. For example, if the + scoring profile defines a function with a parameter called 'mylocation' the + parameter string would be "mylocation--122.2,44.8" (without the quotes). Default value is + None. + :paramtype scoring_parameters: list[str] + :keyword scoring_profile: The name of a scoring profile to evaluate match scores for matching + documents + in order to sort the results. Default value is None. + :paramtype scoring_profile: str + :keyword search_fields: The list of field names to which to scope the full-text search. When + using + fielded search (fieldName:searchExpression) in a full Lucene query, the field + names of each fielded search expression take precedence over any field names + listed in this parameter. Default value is None. + :paramtype search_fields: list[str] + :keyword search_mode: A value that specifies whether any or all of the search terms must be + matched + in order to count the document as a match. Known values are: "any" and "all". Default value is + None. + :paramtype search_mode: str or ~azure.search.documents.models.SearchMode + :keyword scoring_statistics: A value that specifies whether we want to calculate scoring + statistics (such as + document frequency) globally for more consistent scoring, or locally, for lower + latency. Known values are: "local" and "global". Default value is None. + :paramtype scoring_statistics: str or ~azure.search.documents.models.ScoringStatistics + :keyword session_id: A value to be used to create a sticky session, which can help to get more + consistent results. As long as the same sessionId is used, a best-effort + attempt will be made to target the same replica set. Be wary that reusing the + same sessionID values repeatedly can interfere with the load balancing of the + requests across replicas and adversely affect the performance of the search + service. The value used as sessionId cannot start with a '_' character. Default value is None. + :paramtype session_id: str + :keyword _select: The list of fields to retrieve. If unspecified, all fields marked as + retrievable in the schema are included. Default value is None. + :paramtype _select: list[str] + :keyword _skip: The number of search results to skip. This value cannot be greater than + 100,000. If you need to scan documents in sequence, but cannot use $skip due to + this limitation, consider using $orderby on a totally-ordered key and $filter + with a range query instead. Default value is None. + :paramtype _skip: int + :keyword _top: The number of search results to retrieve. This can be used in conjunction with + $skip to implement client-side paging of search results. If results are + truncated due to server-side paging, the response will include a continuation + token that can be used to issue another Search request for the next page of + results. Default value is None. + :paramtype _top: int + :keyword semantic_configuration: The name of the semantic configuration that lists which fields + should be used + for semantic ranking, captions, highlights, and answers. Default value is None. + :paramtype semantic_configuration: str + :keyword semantic_error_handling: Allows the user to choose whether a semantic call should fail + completely, or to + return partial results (default). Known values are: "partial" and "fail". Default value is + None. + :paramtype semantic_error_handling: str or ~azure.search.documents.models.SemanticErrorMode + :keyword semantic_max_wait_in_milliseconds: Allows the user to set an upper bound on the amount + of time it takes for + semantic enrichment to finish processing before the request fails. Default value is None. + :paramtype semantic_max_wait_in_milliseconds: int + :keyword answers: This parameter is only valid if the query type is ``semantic``. If set, the + query + returns answers extracted from key passages in the highest ranked documents. + The number of answers returned can be configured by appending the pipe + character ``|`` followed by the ``count-`` option after the + answers parameter value, such as ``extractive|count-3``. Default count is 1. The + confidence threshold can be configured by appending the pipe character ``|`` + followed by the ``threshold-`` option after the answers + parameter value, such as ``extractive|threshold-0.9``. Default threshold is 0.7. + The maximum character length of answers can be configured by appending the pipe + character '|' followed by the 'count-:code:``', + such as 'extractive|maxcharlength-600'. Known values are: "none" and "extractive". Default + value is None. + :paramtype answers: str or ~azure.search.documents.models.QueryAnswerType + :keyword captions: This parameter is only valid if the query type is ``semantic``. If set, the + query + returns captions extracted from key passages in the highest ranked documents. + When Captions is set to ``extractive``\\ , highlighting is enabled by default, and + can be configured by appending the pipe character ``|`` followed by the + ``highlight-`` option, such as ``extractive|highlight-true``. Defaults + to ``None``. The maximum character length of captions can be configured by + appending the pipe character '|' followed by the 'count-:code:``', such as 'extractive|maxcharlength-600'. Known values are: "none" and + "extractive". Default value is None. + :paramtype captions: str or ~azure.search.documents.models.QueryCaptionType + :keyword semantic_query: Allows setting a separate search query that will be solely used for + semantic + reranking, semantic captions and semantic answers. Is useful for scenarios + where there is a need to use different queries between the base retrieval and + ranking phase, and the L2 semantic phase. Default value is None. + :paramtype semantic_query: str + :keyword query_rewrites: When QueryRewrites is set to ``generative``\\ , the query terms are + sent to a + generate model which will produce 10 (default) rewrites to help increase the + recall of the request. The requested count can be configured by appending the + pipe character ``|`` followed by the ``count-`` option, such as + ``generative|count-3``. Defaults to ``None``. This parameter is only valid if the + query type is ``semantic``. Known values are: "none" and "generative". Default value is None. + :paramtype query_rewrites: str or ~azure.search.documents.models.QueryRewritesType + :keyword debug: Enables a debugging tool that can be used to further explore your search + results. Known values are: "disabled", "semantic", "vector", "queryRewrites", and "all". + Default value is None. + :paramtype debug: str or ~azure.search.documents.models.QueryDebugMode + :keyword query_language: The language of the query. Known values are: "none", "en-us", "en-gb", + "en-in", "en-ca", "en-au", "fr-fr", "fr-ca", "de-de", "es-es", "es-mx", "zh-cn", "zh-tw", + "pt-br", "pt-pt", "it-it", "ja-jp", "ko-kr", "ru-ru", "cs-cz", "nl-be", "nl-nl", "hu-hu", + "pl-pl", "sv-se", "tr-tr", "hi-in", "ar-sa", "ar-eg", "ar-ma", "ar-kw", "ar-jo", "da-dk", + "no-no", "bg-bg", "hr-hr", "hr-ba", "ms-my", "ms-bn", "sl-sl", "ta-in", "vi-vn", "el-gr", + "ro-ro", "is-is", "id-id", "th-th", "lt-lt", "uk-ua", "lv-lv", "et-ee", "ca-es", "fi-fi", + "sr-ba", "sr-me", "sr-rs", "sk-sk", "nb-no", "hy-am", "bn-in", "eu-es", "gl-es", "gu-in", + "he-il", "ga-ie", "kn-in", "ml-in", "mr-in", "fa-ae", "pa-in", "te-in", and "ur-pk". Default + value is None. + :paramtype query_language: str or ~azure.search.documents.models.QueryLanguage + :keyword speller: Improve search recall by spell-correcting individual search query terms. + Known values are: "none" and "lexicon". Default value is None. + :paramtype speller: str or ~azure.search.documents.models.QuerySpellerType + :keyword semantic_fields: The list of field names used for semantic ranking. Default value is + None. + :paramtype semantic_fields: list[str] + :return: SearchDocumentsResult. The SearchDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SearchDocumentsResult] = kwargs.pop("cls", None) + + _request = build_documents_operations_search_get_request( + index_name=index_name, + search_text=search_text, + include_total_result_count=include_total_result_count, + facets=facets, + _filter=_filter, + highlight_fields=highlight_fields, + highlight_post_tag=highlight_post_tag, + highlight_pre_tag=highlight_pre_tag, + minimum_coverage=minimum_coverage, + order_by=order_by, + query_type=query_type, + scoring_parameters=scoring_parameters, + scoring_profile=scoring_profile, + search_fields=search_fields, + search_mode=search_mode, + scoring_statistics=scoring_statistics, + session_id=session_id, + _select=_select, + _skip=_skip, + _top=_top, + semantic_configuration=semantic_configuration, + semantic_error_handling=semantic_error_handling, + semantic_max_wait_in_milliseconds=semantic_max_wait_in_milliseconds, + answers=answers, + captions=captions, + semantic_query=semantic_query, + query_rewrites=query_rewrites, + debug=debug, + query_language=query_language, + speller=speller, + semantic_fields=semantic_fields, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchDocumentsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def search_post( + self, + index_name: str, + search_request: _models.SearchRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.SearchDocumentsResult: + """Searches for documents in the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param search_request: The definition of the Search request. Required. + :type search_request: ~azure.search.documents.models.SearchRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchDocumentsResult. The SearchDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def search_post( + self, index_name: str, search_request: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchDocumentsResult: + """Searches for documents in the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param search_request: The definition of the Search request. Required. + :type search_request: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchDocumentsResult. The SearchDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def search_post( + self, index_name: str, search_request: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SearchDocumentsResult: + """Searches for documents in the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param search_request: The definition of the Search request. Required. + :type search_request: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: SearchDocumentsResult. The SearchDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def search_post( + self, index_name: str, search_request: Union[_models.SearchRequest, JSON, IO[bytes]], **kwargs: Any + ) -> _models.SearchDocumentsResult: + """Searches for documents in the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param search_request: The definition of the Search request. Is one of the following types: + SearchRequest, JSON, IO[bytes] Required. + :type search_request: ~azure.search.documents.models.SearchRequest or JSON or IO[bytes] + :return: SearchDocumentsResult. The SearchDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SearchDocumentsResult] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(search_request, (IOBase, bytes)): + _content = search_request + else: + _content = json.dumps(search_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_documents_operations_search_post_request( + index_name=index_name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchDocumentsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def get( + self, key: str, index_name: str, *, selected_fields: Optional[List[str]] = None, **kwargs: Any + ) -> Dict[str, Any]: + """Retrieves a document from the index. + + :param key: The key of the document to retrieve. Required. + :type key: str + :param index_name: The name of the index. Required. + :type index_name: str + :keyword selected_fields: List of field names to retrieve for the document; Any field not + retrieved will + be missing from the returned document. Default value is None. + :paramtype selected_fields: list[str] + :return: dict mapping str to any + :rtype: dict[str, any] + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[Dict[str, Any]] = kwargs.pop("cls", None) + + _request = build_documents_operations_get_request( + key=key, + index_name=index_name, + selected_fields=selected_fields, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(Dict[str, Any], response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def suggest_get( + self, + index_name: str, + *, + search_text: str, + suggester_name: str, + _filter: Optional[str] = None, + use_fuzzy_matching: Optional[bool] = None, + highlight_post_tag: Optional[str] = None, + highlight_pre_tag: Optional[str] = None, + minimum_coverage: Optional[float] = None, + order_by: Optional[List[str]] = None, + search_fields: Optional[List[str]] = None, + _select: Optional[List[str]] = None, + _top: Optional[int] = None, + **kwargs: Any + ) -> _models.SuggestDocumentsResult: + """Suggests documents in the index that match the given partial query text. + + :param index_name: The name of the index. Required. + :type index_name: str + :keyword search_text: The search text to use to suggest documents. Must be at least 1 + character, and + no more than 100 characters. Required. + :paramtype search_text: str + :keyword suggester_name: The name of the suggester as specified in the suggesters collection + that's part + of the index definition. Required. + :paramtype suggester_name: str + :keyword _filter: An OData expression that filters the documents considered for suggestions. + Default value is None. + :paramtype _filter: str + :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the + suggestions query. + Default is false. When set to true, the query will find terms even if there's a + substituted or missing character in the search text. While this provides a + better experience in some scenarios, it comes at a performance cost as fuzzy + suggestions queries are slower and consume more resources. Default value is None. + :paramtype use_fuzzy_matching: bool + :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. If omitted, hit highlighting of suggestions is disabled. Default value is + None. + :paramtype highlight_post_tag: str + :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. If omitted, hit highlighting of suggestions is disabled. Default value is + None. + :paramtype highlight_pre_tag: str + :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index + that must be + covered by a suggestions query in order for the query to be reported as a + success. This parameter can be useful for ensuring search availability even for + services with only one replica. The default is 80. Default value is None. + :paramtype minimum_coverage: float + :keyword order_by: The list of OData $orderby expressions by which to sort the results. Each + expression can be either a field name or a call to either the geo.distance() or + the search.score() functions. Each expression can be followed by asc to + indicate ascending, or desc to indicate descending. The default is ascending + order. Ties will be broken by the match scores of documents. If no $orderby is + specified, the default sort order is descending by document match score. There + can be at most 32 $orderby clauses. Default value is None. + :paramtype order_by: list[str] + :keyword search_fields: The list of field names to search for the specified search text. Target + fields + must be included in the specified suggester. Default value is None. + :paramtype search_fields: list[str] + :keyword _select: The list of fields to retrieve. If unspecified, only the key field will be + included in the results. Default value is None. + :paramtype _select: list[str] + :keyword _top: The number of suggestions to retrieve. The value must be a number between 1 and + + + #. The default is 5. Default value is None. + :paramtype _top: int + :return: SuggestDocumentsResult. The SuggestDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SuggestDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SuggestDocumentsResult] = kwargs.pop("cls", None) + + _request = build_documents_operations_suggest_get_request( + index_name=index_name, + search_text=search_text, + suggester_name=suggester_name, + _filter=_filter, + use_fuzzy_matching=use_fuzzy_matching, + highlight_post_tag=highlight_post_tag, + highlight_pre_tag=highlight_pre_tag, + minimum_coverage=minimum_coverage, + order_by=order_by, + search_fields=search_fields, + _select=_select, + _top=_top, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SuggestDocumentsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def suggest_post( + self, + index_name: str, + suggest_request: _models.SuggestRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.SuggestDocumentsResult: + """Suggests documents in the index that match the given partial query text. + + :param index_name: The name of the index. Required. + :type index_name: str + :param suggest_request: The Suggest request. Required. + :type suggest_request: ~azure.search.documents.models.SuggestRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SuggestDocumentsResult. The SuggestDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SuggestDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def suggest_post( + self, index_name: str, suggest_request: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SuggestDocumentsResult: + """Suggests documents in the index that match the given partial query text. + + :param index_name: The name of the index. Required. + :type index_name: str + :param suggest_request: The Suggest request. Required. + :type suggest_request: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: SuggestDocumentsResult. The SuggestDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SuggestDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def suggest_post( + self, index_name: str, suggest_request: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.SuggestDocumentsResult: + """Suggests documents in the index that match the given partial query text. + + :param index_name: The name of the index. Required. + :type index_name: str + :param suggest_request: The Suggest request. Required. + :type suggest_request: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: SuggestDocumentsResult. The SuggestDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SuggestDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def suggest_post( + self, index_name: str, suggest_request: Union[_models.SuggestRequest, JSON, IO[bytes]], **kwargs: Any + ) -> _models.SuggestDocumentsResult: + """Suggests documents in the index that match the given partial query text. + + :param index_name: The name of the index. Required. + :type index_name: str + :param suggest_request: The Suggest request. Is one of the following types: SuggestRequest, + JSON, IO[bytes] Required. + :type suggest_request: ~azure.search.documents.models.SuggestRequest or JSON or IO[bytes] + :return: SuggestDocumentsResult. The SuggestDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SuggestDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.SuggestDocumentsResult] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(suggest_request, (IOBase, bytes)): + _content = suggest_request + else: + _content = json.dumps(suggest_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_documents_operations_suggest_post_request( + index_name=index_name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SuggestDocumentsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def index( + self, index_name: str, batch: _models.IndexBatch, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.IndexDocumentsResult: + """Sends a batch of document write actions to the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param batch: The batch of index actions. Required. + :type batch: ~azure.search.documents.models.IndexBatch + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: IndexDocumentsResult. The IndexDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.IndexDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def index( + self, index_name: str, batch: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.IndexDocumentsResult: + """Sends a batch of document write actions to the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param batch: The batch of index actions. Required. + :type batch: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: IndexDocumentsResult. The IndexDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.IndexDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def index( + self, index_name: str, batch: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.IndexDocumentsResult: + """Sends a batch of document write actions to the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param batch: The batch of index actions. Required. + :type batch: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: IndexDocumentsResult. The IndexDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.IndexDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def index( + self, index_name: str, batch: Union[_models.IndexBatch, JSON, IO[bytes]], **kwargs: Any + ) -> _models.IndexDocumentsResult: + """Sends a batch of document write actions to the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param batch: The batch of index actions. Is one of the following types: IndexBatch, JSON, + IO[bytes] Required. + :type batch: ~azure.search.documents.models.IndexBatch or JSON or IO[bytes] + :return: IndexDocumentsResult. The IndexDocumentsResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.IndexDocumentsResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.IndexDocumentsResult] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(batch, (IOBase, bytes)): + _content = batch + else: + _content = json.dumps(batch, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_documents_operations_index_request( + index_name=index_name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200, 207]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.IndexDocumentsResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @distributed_trace + def autocomplete_get( + self, + index_name: str, + *, + search_text: str, + suggester_name: str, + autocomplete_mode: Optional[Union[str, _models.AutocompleteMode]] = None, + _filter: Optional[str] = None, + use_fuzzy_matching: Optional[bool] = None, + highlight_post_tag: Optional[str] = None, + highlight_pre_tag: Optional[str] = None, + minimum_coverage: Optional[float] = None, + search_fields: Optional[List[str]] = None, + _top: Optional[int] = None, + **kwargs: Any + ) -> _models.AutocompleteResult: + """Autocompletes incomplete query terms based on input text and matching terms in + the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :keyword search_text: The incomplete term which should be auto-completed. Required. + :paramtype search_text: str + :keyword suggester_name: The name of the suggester as specified in the suggesters collection + that's part + of the index definition. Required. + :paramtype suggester_name: str + :keyword autocomplete_mode: Specifies the mode for Autocomplete. The default is 'oneTerm'. Use + 'twoTerms' + to get shingles and 'oneTermWithContext' to use the current context while + producing auto-completed terms. Known values are: "oneTerm", "twoTerms", and + "oneTermWithContext". Default value is None. + :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode + :keyword _filter: An OData expression that filters the documents used to produce completed + terms + for the Autocomplete result. Default value is None. + :paramtype _filter: str + :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the + autocomplete query. + Default is false. When set to true, the query will find terms even if there's a + substituted or missing character in the search text. While this provides a + better experience in some scenarios, it comes at a performance cost as fuzzy + autocomplete queries are slower and consume more resources. Default value is None. + :paramtype use_fuzzy_matching: bool + :keyword highlight_post_tag: A string tag that is appended to hit highlights. Must be set with + highlightPreTag. If omitted, hit highlighting is disabled. Default value is None. + :paramtype highlight_post_tag: str + :keyword highlight_pre_tag: A string tag that is prepended to hit highlights. Must be set with + highlightPostTag. If omitted, hit highlighting is disabled. Default value is None. + :paramtype highlight_pre_tag: str + :keyword minimum_coverage: A number between 0 and 100 indicating the percentage of the index + that must be + covered by an autocomplete query in order for the query to be reported as a + success. This parameter can be useful for ensuring search availability even for + services with only one replica. The default is 80. Default value is None. + :paramtype minimum_coverage: float + :keyword search_fields: The list of field names to consider when querying for auto-completed + terms. + Target fields must be included in the specified suggester. Default value is None. + :paramtype search_fields: list[str] + :keyword _top: The number of auto-completed terms to retrieve. This must be a value between 1 + and 100. The default is 5. Default value is None. + :paramtype _top: int + :return: AutocompleteResult. The AutocompleteResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.AutocompleteResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.AutocompleteResult] = kwargs.pop("cls", None) + + _request = build_documents_operations_autocomplete_get_request( + index_name=index_name, + search_text=search_text, + suggester_name=suggester_name, + autocomplete_mode=autocomplete_mode, + _filter=_filter, + use_fuzzy_matching=use_fuzzy_matching, + highlight_post_tag=highlight_post_tag, + highlight_pre_tag=highlight_pre_tag, + minimum_coverage=minimum_coverage, + search_fields=search_fields, + _top=_top, + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AutocompleteResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + @overload + def autocomplete_post( + self, + index_name: str, + autocomplete_request: _models.AutocompleteRequest, + *, + content_type: str = "application/json", + **kwargs: Any + ) -> _models.AutocompleteResult: + """Autocompletes incomplete query terms based on input text and matching terms in + the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param autocomplete_request: The definition of the Autocomplete request. Required. + :type autocomplete_request: ~azure.search.documents.models.AutocompleteRequest + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AutocompleteResult. The AutocompleteResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.AutocompleteResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def autocomplete_post( + self, index_name: str, autocomplete_request: JSON, *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AutocompleteResult: + """Autocompletes incomplete query terms based on input text and matching terms in + the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param autocomplete_request: The definition of the Autocomplete request. Required. + :type autocomplete_request: JSON + :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. + Default value is "application/json". + :paramtype content_type: str + :return: AutocompleteResult. The AutocompleteResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.AutocompleteResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @overload + def autocomplete_post( + self, index_name: str, autocomplete_request: IO[bytes], *, content_type: str = "application/json", **kwargs: Any + ) -> _models.AutocompleteResult: + """Autocompletes incomplete query terms based on input text and matching terms in + the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param autocomplete_request: The definition of the Autocomplete request. Required. + :type autocomplete_request: IO[bytes] + :keyword content_type: Body Parameter content-type. Content type parameter for binary body. + Default value is "application/json". + :paramtype content_type: str + :return: AutocompleteResult. The AutocompleteResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.AutocompleteResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + + @distributed_trace + def autocomplete_post( + self, index_name: str, autocomplete_request: Union[_models.AutocompleteRequest, JSON, IO[bytes]], **kwargs: Any + ) -> _models.AutocompleteResult: + """Autocompletes incomplete query terms based on input text and matching terms in + the index. + + :param index_name: The name of the index. Required. + :type index_name: str + :param autocomplete_request: The definition of the Autocomplete request. Is one of the + following types: AutocompleteRequest, JSON, IO[bytes] Required. + :type autocomplete_request: ~azure.search.documents.models.AutocompleteRequest or JSON or + IO[bytes] + :return: AutocompleteResult. The AutocompleteResult is compatible with MutableMapping + :rtype: ~azure.search.documents.models.AutocompleteResult + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = kwargs.pop("params", {}) or {} + + content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) + cls: ClsType[_models.AutocompleteResult] = kwargs.pop("cls", None) + + content_type = content_type or "application/json" + _content = None + if isinstance(autocomplete_request, (IOBase, bytes)): + _content = autocomplete_request + else: + _content = json.dumps(autocomplete_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore + + _request = build_documents_operations_autocomplete_post_request( + index_name=index_name, + content_type=content_type, + api_version=self._config.api_version, + content=_content, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.AutocompleteResult, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore + + +class SearchClientOperationsMixin(SearchClientMixinABC): + + @distributed_trace + def get_service_statistics(self, **kwargs: Any) -> _models.SearchServiceStatistics: + """Gets service level statistics for a search service. + + :return: SearchServiceStatistics. The SearchServiceStatistics is compatible with MutableMapping + :rtype: ~azure.search.documents.models.SearchServiceStatistics + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.SearchServiceStatistics] = kwargs.pop("cls", None) + + _request = build_search_get_service_statistics_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.SearchServiceStatistics, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index_documents_batch.py b/sdk/search/azure-search-documents/azure/search/documents/_index_documents_batch.py index db107813fd63..8e8b1b236177 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index_documents_batch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index_documents_batch.py @@ -139,7 +139,15 @@ def enqueue_actions(self, new_actions: Union[IndexAction, List[IndexAction]], ** self._actions.extend(new_actions) def _extend_batch(self, documents: List[Dict], action_type: str) -> List[IndexAction]: - new_actions = [IndexAction(additional_properties=document, action_type=action_type) for document in documents] + new_actions: List[IndexAction] = [] + for document in documents: + index_action = IndexAction(action_type=action_type) + if isinstance(document, dict): + for key, value in document.items(): + index_action[key] = value + else: + index_action[""] = document + new_actions.append(index_action) with self._lock: self._actions.extend(new_actions) return new_actions diff --git a/sdk/search/azure-search-documents/azure/search/documents/_paging.py b/sdk/search/azure-search-documents/azure/search/documents/_paging.py index 6fb63ef07f5d..4748451613bc 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_paging.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_paging.py @@ -15,7 +15,7 @@ def convert_search_result(result): - ret = result.additional_properties + ret = result ret["@search.score"] = result.score ret["@search.reranker_score"] = result.reranker_score ret["@search.highlights"] = result.highlights @@ -38,7 +38,8 @@ def unpack_continuation_token(token): unpacked_token = json.loads(base64.b64decode(token)) next_link = unpacked_token["nextLink"] next_page_parameters = unpacked_token["nextPageParameters"] - next_page_request = SearchRequest.deserialize(next_page_parameters) + search_request_token = json.loads(next_page_parameters) + next_page_request = SearchRequest(search_request_token) return next_link, next_page_request @@ -108,13 +109,14 @@ def wrapper(self, *args, **kw): class SearchPageIterator(PageIterator): - def __init__(self, client, initial_query, kwargs, continuation_token=None) -> None: + def __init__(self, client, index_name, initial_query, kwargs, continuation_token=None) -> None: super(SearchPageIterator, self).__init__( get_next=self._get_next_cb, extract_data=self._extract_data_cb, continuation_token=continuation_token, ) self._client = client + self._index_name = index_name self._initial_query = initial_query self._kwargs = kwargs self._facets = None @@ -122,11 +124,15 @@ def __init__(self, client, initial_query, kwargs, continuation_token=None) -> No def _get_next_cb(self, continuation_token): if continuation_token is None: - return self._client.documents.search_post(search_request=self._initial_query.request, **self._kwargs) + return self._client.documents_operations.search_post( + index_name=self._index_name, search_request=self._initial_query.request, **self._kwargs + ) _next_link, next_page_request = unpack_continuation_token(continuation_token) - return self._client.documents.search_post(search_request=next_page_request, **self._kwargs) + return self._client.documents_operations.search_post( + index_name=self._index_name, search_request=next_page_request, **self._kwargs + ) def _extract_data_cb(self, response): continuation_token = pack_continuation_token(response, api_version=self._api_version) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_search_client.py b/sdk/search/azure-search-documents/azure/search/documents/_search_client.py index c99fd4e463f4..62e87ebb49ad 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_search_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_search_client.py @@ -9,8 +9,9 @@ from azure.core.credentials import AzureKeyCredential, TokenCredential from azure.core.tracing.decorator import distributed_trace from ._api_versions import DEFAULT_VERSION -from ._generated import SearchIndexClient +from ._generated import SearchClient as SearchIndexClient from ._generated.models import ( + AutocompleteItem, AutocompleteMode, AutocompleteRequest, IndexAction, @@ -29,6 +30,7 @@ QueryDebugMode, QueryRewritesType, SuggestRequest, + SuggestResult, HybridSearch, ) from ._search_documents_error import RequestEntityTooLargeError @@ -36,7 +38,7 @@ from ._paging import SearchItemPaged, SearchPageIterator from ._queries import AutocompleteQuery, SearchQuery, SuggestQuery from ._headers_mixin import HeadersMixin -from ._utils import get_authentication_policy, get_answer_query, get_rewrites_query +from ._utils import DEFAULT_AUDIENCE, get_answer_query, get_rewrites_query from ._version import SDK_MONIKER @@ -75,26 +77,20 @@ def __init__( self._index_name = index_name self._credential = credential audience = kwargs.pop("audience", None) - if isinstance(credential, AzureKeyCredential): - self._aad = False - self._client = SearchIndexClient( - endpoint=endpoint, - index_name=index_name, - sdk_moniker=SDK_MONIKER, - api_version=self._api_version, - **kwargs - ) - else: - self._aad = True - authentication_policy = get_authentication_policy(credential, audience=audience) - self._client = SearchIndexClient( - endpoint=endpoint, - index_name=index_name, - authentication_policy=authentication_policy, - sdk_moniker=SDK_MONIKER, - api_version=self._api_version, - **kwargs - ) + if not audience: + audience = DEFAULT_AUDIENCE + scope = audience.rstrip("/") + "/.default" + credential_scopes = [scope] + self._aad = not isinstance(credential, AzureKeyCredential) + self._client = SearchIndexClient( + endpoint=endpoint, + credential=credential, + index_name=index_name, + sdk_moniker=SDK_MONIKER, + api_version=self._api_version, + credential_scopes=credential_scopes, + **kwargs + ) def __repr__(self) -> str: return "".format(repr(self._endpoint), repr(self._index_name))[:1024] @@ -114,7 +110,7 @@ def get_document_count(self, **kwargs: Any) -> int: :rtype: int """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - return int(self._client.documents.count(**kwargs)) + return int(self._client.documents_operations.count(index_name=self._index_name, **kwargs)) @distributed_trace def get_document(self, key: str, selected_fields: Optional[List[str]] = None, **kwargs: Any) -> Dict: @@ -137,7 +133,9 @@ def get_document(self, key: str, selected_fields: Optional[List[str]] = None, ** :caption: Get a specific document from the search index. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.documents.get(key=key, selected_fields=selected_fields, **kwargs) + result = self._client.documents_operations.get( + index_name=self._index_name, key=key, selected_fields=selected_fields, **kwargs + ) return cast(dict, result) @distributed_trace @@ -397,7 +395,7 @@ def search( kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) kwargs["api_version"] = self._api_version - return SearchItemPaged(self._client, query, kwargs, page_iterator_class=SearchPageIterator) + return SearchItemPaged(self._client, self._index_name, query, kwargs, page_iterator_class=SearchPageIterator) @distributed_trace def suggest( @@ -415,7 +413,7 @@ def suggest( select: Optional[List[str]] = None, top: Optional[int] = None, **kwargs - ) -> List[Dict]: + ) -> List[SuggestResult]: """Get search suggestion results from the Azure search index. :param str search_text: Required. The search text to use to suggest documents. Must be at least 1 @@ -450,7 +448,7 @@ def suggest( 100. The default is 5. :return: List of suggestion results. - :rtype: list[dict] + :rtype: list[~azure.search.documents.models.SuggestResult] .. admonition:: Example: @@ -482,9 +480,11 @@ def suggest( query.order_by(order_by) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) request = cast(SuggestRequest, query.request) - response = self._client.documents.suggest_post(suggest_request=request, **kwargs) + response = self._client.documents_operations.suggest_post( + index_name=self._index_name, suggest_request=request, **kwargs + ) assert response.results is not None # Hint for mypy - results = [r.as_dict() for r in response.results] + results = response.results return results @distributed_trace @@ -502,7 +502,7 @@ def autocomplete( search_fields: Optional[List[str]] = None, top: Optional[int] = None, **kwargs - ) -> List[Dict]: + ) -> List[AutocompleteItem]: """Get search auto-completion results from the Azure search index. :param str search_text: The search text on which to base autocomplete results. @@ -532,7 +532,7 @@ def autocomplete( :keyword int top: The number of auto-completed terms to retrieve. This must be a value between 1 and 100. The default is 5. :return: List of auto-completion results. - :rtype: list[dict] + :rtype: list[~azure.search.documents.models.AutocompleteItem] .. admonition:: Example: @@ -561,9 +561,11 @@ def autocomplete( kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) request = cast(AutocompleteRequest, query.request) - response = self._client.documents.autocomplete_post(autocomplete_request=request, **kwargs) + response = self._client.documents_operations.autocomplete_post( + index_name=self._index_name, autocomplete_request=request, **kwargs + ) assert response.results is not None # Hint for mypy - results = [r.as_dict() for r in response.results] + results = response.results return results # pylint:disable=client-method-missing-tracing-decorator @@ -699,7 +701,9 @@ def _index_documents_actions(self, actions: List[IndexAction], **kwargs: Any) -> kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) batch = IndexBatch(actions=actions) try: - batch_response = self._client.documents.index(batch=batch, error_map=error_map, **kwargs) + batch_response = self._client.documents_operations.index( + index_name=self._index_name, batch=batch, error_map=error_map, **kwargs + ) return cast(List[IndexingResult], batch_response.results) except RequestEntityTooLargeError: if len(actions) == 1: @@ -740,4 +744,4 @@ def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs) :rtype: ~azure.core.rest.HttpResponse """ request.headers = self._merge_client_headers(request.headers) - return self._client._send_request(request, stream=stream, **kwargs) # pylint:disable=protected-access + return self._client.send_request(request, stream=stream, **kwargs) # pylint:disable=protected-access diff --git a/sdk/search/azure-search-documents/azure/search/documents/_search_indexing_buffered_sender.py b/sdk/search/azure-search-documents/azure/search/documents/_search_indexing_buffered_sender.py index 5648bd49fa2c..9e8791054ea9 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_search_indexing_buffered_sender.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_search_indexing_buffered_sender.py @@ -10,10 +10,10 @@ from azure.core.credentials import AzureKeyCredential, TokenCredential from azure.core.tracing.decorator import distributed_trace from azure.core.exceptions import ServiceResponseTimeoutError -from ._utils import is_retryable_status_code, get_authentication_policy +from ._utils import is_retryable_status_code, DEFAULT_AUDIENCE from .indexes import SearchIndexClient as SearchServiceClient from ._search_indexing_buffered_sender_base import SearchIndexingBufferedSenderBase -from ._generated import SearchIndexClient +from ._generated import SearchClient as SearchIndexClient from ._generated.models import IndexingResult, IndexBatch, IndexAction from ._search_documents_error import RequestEntityTooLargeError from ._index_documents_batch import IndexDocumentsBatch @@ -62,26 +62,20 @@ def __init__( ) self._index_documents_batch = IndexDocumentsBatch() audience = kwargs.pop("audience", None) - if isinstance(credential, AzureKeyCredential): - self._aad = False - self._client = SearchIndexClient( - endpoint=endpoint, - index_name=index_name, - sdk_moniker=SDK_MONIKER, - api_version=self._api_version, - **kwargs - ) - else: - self._aad = True - authentication_policy = get_authentication_policy(credential, audience=audience) - self._client = SearchIndexClient( - endpoint=endpoint, - index_name=index_name, - authentication_policy=authentication_policy, - sdk_moniker=SDK_MONIKER, - api_version=self._api_version, - **kwargs - ) + if not audience: + audience = DEFAULT_AUDIENCE + scope = audience.rstrip("/") + "/.default" + credential_scopes = [scope] + self._aad = not isinstance(credential, AzureKeyCredential) + self._client = SearchIndexClient( + endpoint=endpoint, + credential=credential, + index_name=index_name, + sdk_moniker=SDK_MONIKER, + api_version=self._api_version, + credential_scopes=credential_scopes, + **kwargs + ) self._reset_timer() def _cleanup(self, flush: bool = True) -> None: @@ -166,11 +160,7 @@ def _process(self, timeout: int = 86400, **kwargs) -> bool: for result in results: try: assert self._index_key is not None # Hint for mypy - action = next( - x - for x in actions - if x.additional_properties and x.additional_properties.get(self._index_key) == result.key - ) + action = next(x for x in actions if x and str(x.get(self._index_key)) == result.key) if result.succeeded: self._callback_succeed(action) elif is_retryable_status_code(result.status_code): @@ -282,7 +272,9 @@ def _index_documents_actions(self, actions: List[IndexAction], **kwargs) -> List kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) batch = IndexBatch(actions=actions) try: - batch_response = self._client.documents.index(batch=batch, error_map=error_map, **kwargs) + batch_response = self._client.documents_operations.index( + index_name=self._index_name, batch=batch, error_map=error_map, **kwargs + ) return cast(List[IndexingResult], batch_response.results) except RequestEntityTooLargeError as ex: if len(actions) == 1: @@ -327,7 +319,7 @@ def _retry_action(self, action: IndexAction) -> None: if not self._index_key: self._callback_fail(action) return - key = cast(str, action.additional_properties.get(self._index_key) if action.additional_properties else "") + key = cast(str, action.get(self._index_key) if action else "") counter = self._retry_counter.get(key) if not counter: # first time that fails diff --git a/sdk/search/azure-search-documents/azure/search/documents/_utils.py b/sdk/search/azure-search-documents/azure/search/documents/_utils.py index 5b912120c1ed..ffefe0a505ec 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_utils.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_utils.py @@ -4,10 +4,6 @@ # license information. # -------------------------------------------------------------------------- from typing import Any, Optional, Union -from azure.core.pipeline.policies import ( - BearerTokenCredentialPolicy, - AsyncBearerTokenCredentialPolicy, -) from ._generated.models import QueryAnswerType, QueryRewritesType DEFAULT_AUDIENCE = "https://search.azure.com" @@ -44,16 +40,6 @@ def is_retryable_status_code(status_code: Optional[int]) -> bool: return status_code in [422, 409, 503] -def get_authentication_policy(credential, *, is_async: bool = False, **kwargs): - audience = kwargs.get("audience", None) - if not audience: - audience = DEFAULT_AUDIENCE - scope = audience.rstrip("/") + "/.default" - _policy = BearerTokenCredentialPolicy if not is_async else AsyncBearerTokenCredentialPolicy - authentication_policy = _policy(credential, scope) - return authentication_policy - - def odata(statement: str, **kwargs: Any) -> str: """Escape an OData query string. diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_index_documents_batch_async.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_index_documents_batch_async.py index 2e86750ab9c1..5c6cdcc3f352 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_index_documents_batch_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_index_documents_batch_async.py @@ -133,7 +133,15 @@ async def enqueue_actions(self, new_actions: Union[IndexAction, List[IndexAction self._actions.extend(new_actions) async def _extend_batch(self, documents: List[Dict], action_type: str) -> List[IndexAction]: - new_actions = [IndexAction(additional_properties=document, action_type=action_type) for document in documents] + new_actions: List[IndexAction] = [] + for document in documents: + index_action = IndexAction(action_type=action_type) + if isinstance(document, dict): + for key, value in document.items(): + index_action[key] = value + else: + index_action[""] = document + new_actions.append(index_action) async with self._lock: self._actions.extend(new_actions) return new_actions diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py index 45e1c233737e..c1a731d30419 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py @@ -92,13 +92,14 @@ async def wrapper(self, *args, **kw): class AsyncSearchPageIterator(AsyncPageIterator[ReturnType]): - def __init__(self, client, initial_query, kwargs, continuation_token=None) -> None: + def __init__(self, client, index_name, initial_query, kwargs, continuation_token=None) -> None: super(AsyncSearchPageIterator, self).__init__( get_next=self._get_next_cb, extract_data=self._extract_data_cb, continuation_token=continuation_token, ) self._client = client + self._index_name = index_name self._initial_query = initial_query self._kwargs = kwargs self._facets = None @@ -106,11 +107,15 @@ def __init__(self, client, initial_query, kwargs, continuation_token=None) -> No async def _get_next_cb(self, continuation_token): if continuation_token is None: - return await self._client.documents.search_post(search_request=self._initial_query.request, **self._kwargs) + return await self._client.documents_operations.search_post( + index_name=self._index_name, search_request=self._initial_query.request, **self._kwargs + ) _next_link, next_page_request = unpack_continuation_token(continuation_token) - return await self._client.documents.search_post(search_request=next_page_request, **self._kwargs) + return await self._client.documents_operations.search_post( + index_name=self._index_name, search_request=next_page_request, **self._kwargs + ) async def _extract_data_cb(self, response): continuation_token = pack_continuation_token(response, api_version=self._api_version) diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py index 974976c10808..7712075a979e 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py @@ -10,9 +10,10 @@ from azure.core.credentials_async import AsyncTokenCredential from azure.core.tracing.decorator_async import distributed_trace_async from ._paging import AsyncSearchItemPaged, AsyncSearchPageIterator -from .._utils import get_authentication_policy, get_answer_query, get_rewrites_query -from .._generated.aio import SearchIndexClient +from .._utils import DEFAULT_AUDIENCE, get_answer_query, get_rewrites_query +from .._generated.aio import SearchClient as SearchIndexClient from .._generated.models import ( + AutocompleteItem, AutocompleteMode, AutocompleteRequest, IndexAction, @@ -31,6 +32,7 @@ QueryRewritesType, QueryDebugMode, SuggestRequest, + SuggestResult, HybridSearch, ) from .._search_documents_error import RequestEntityTooLargeError @@ -77,26 +79,20 @@ def __init__( self._index_name: str = index_name self._credential = credential audience = kwargs.pop("audience", None) - if isinstance(credential, AzureKeyCredential): - self._aad = False - self._client = SearchIndexClient( - endpoint=endpoint, - index_name=index_name, - sdk_moniker=SDK_MONIKER, - api_version=self._api_version, - **kwargs - ) - else: - self._aad = True - authentication_policy = get_authentication_policy(credential, audience=audience, is_async=True) - self._client = SearchIndexClient( - endpoint=endpoint, - index_name=index_name, - authentication_policy=authentication_policy, - sdk_moniker=SDK_MONIKER, - api_version=self._api_version, - **kwargs - ) + if not audience: + audience = DEFAULT_AUDIENCE + scope = audience.rstrip("/") + "/.default" + credential_scopes = [scope] + self._aad = not isinstance(credential, AzureKeyCredential) + self._client = SearchIndexClient( + endpoint=endpoint, + credential=credential, + index_name=index_name, + sdk_moniker=SDK_MONIKER, + api_version=self._api_version, + credential_scopes=credential_scopes, + **kwargs + ) def __repr__(self) -> str: return "".format(repr(self._endpoint), repr(self._index_name))[:1024] @@ -116,7 +112,7 @@ async def get_document_count(self, **kwargs: Any) -> int: :rtype: int """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - return int(await self._client.documents.count(**kwargs)) + return int(await self._client.documents_operations.count(index_name=self._index_name, **kwargs)) @distributed_trace_async async def get_document(self, key: str, selected_fields: Optional[List[str]] = None, **kwargs: Any) -> Dict: @@ -139,7 +135,9 @@ async def get_document(self, key: str, selected_fields: Optional[List[str]] = No :caption: Get a specific document from the search index. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.documents.get(key=key, selected_fields=selected_fields, **kwargs) + result = await self._client.documents_operations.get( + index_name=self._index_name, key=key, selected_fields=selected_fields, **kwargs + ) return cast(dict, result) @distributed_trace_async @@ -395,7 +393,9 @@ async def search( query.order_by(order_by) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) kwargs["api_version"] = self._api_version - return AsyncSearchItemPaged(self._client, query, kwargs, page_iterator_class=AsyncSearchPageIterator) + return AsyncSearchItemPaged( + self._client, self._index_name, query, kwargs, page_iterator_class=AsyncSearchPageIterator + ) @distributed_trace_async async def suggest( @@ -413,7 +413,7 @@ async def suggest( select: Optional[List[str]] = None, top: Optional[int] = None, **kwargs - ) -> List[Dict]: + ) -> List[SuggestResult]: """Get search suggestion results from the Azure search index. :param str search_text: Required. The search text to use to suggest documents. Must be at least 1 @@ -447,7 +447,7 @@ async def suggest( :keyword int top: The number of suggestions to retrieve. The value must be a number between 1 and 100. The default is 5. :return: List of suggestion results. - :rtype: list[dict] + :rtype: list[~azure.search.documents.models.SuggestResult] .. admonition:: Example: @@ -479,9 +479,11 @@ async def suggest( query.order_by(order_by) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) request = cast(SuggestRequest, query.request) - response = await self._client.documents.suggest_post(suggest_request=request, **kwargs) + response = await self._client.documents_operations.suggest_post( + index_name=self._index_name, suggest_request=request, **kwargs + ) assert response.results is not None # Hint for mypy - results = [r.as_dict() for r in response.results] + results = response.results return results @distributed_trace_async @@ -499,7 +501,7 @@ async def autocomplete( search_fields: Optional[List[str]] = None, top: Optional[int] = None, **kwargs - ) -> List[Dict]: + ) -> List[AutocompleteItem]: """Get search auto-completion results from the Azure search index. :param str search_text: The search text on which to base autocomplete results. @@ -529,7 +531,7 @@ async def autocomplete( :keyword int top: The number of auto-completed terms to retrieve. This must be a value between 1 and 100. The default is 5. :return: List of auto-completion results. - :rtype: list[Dict] + :rtype: list[~azure.search.documents.models.AutocompleteItem] .. admonition:: Example: @@ -558,9 +560,11 @@ async def autocomplete( kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) request = cast(AutocompleteRequest, query.request) - response = await self._client.documents.autocomplete_post(autocomplete_request=request, **kwargs) + response = await self._client.documents_operations.autocomplete_post( + index_name=self._index_name, autocomplete_request=request, **kwargs + ) assert response.results is not None # Hint for mypy - results = [r.as_dict() for r in response.results] + results = response.results return results # pylint:disable=client-method-missing-tracing-decorator-async @@ -696,7 +700,9 @@ async def _index_documents_actions(self, actions: List[IndexAction], **kwargs: A kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) batch = IndexBatch(actions=actions) try: - batch_response = await self._client.documents.index(batch=batch, error_map=error_map, **kwargs) + batch_response = await self._client.documents_operations.index( + index_name=self._index_name, batch=batch, error_map=error_map, **kwargs + ) return cast(List[IndexingResult], batch_response.results) except RequestEntityTooLargeError: if len(actions) == 1: @@ -737,4 +743,4 @@ async def send_request(self, request: HttpRequest, *, stream: bool = False, **kw :rtype: ~azure.core.rest.AsyncHttpResponse """ request.headers = self._merge_client_headers(request.headers) - return await self._client._send_request(request, stream=stream, **kwargs) # pylint:disable=protected-access + return await self._client.send_request(request, stream=stream, **kwargs) # pylint:disable=protected-access diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_indexing_buffered_sender_async.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_indexing_buffered_sender_async.py index ca53c7d6910e..0a45752eece6 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_indexing_buffered_sender_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_indexing_buffered_sender_async.py @@ -11,9 +11,9 @@ from azure.core.tracing.decorator_async import distributed_trace_async from azure.core.exceptions import ServiceResponseTimeoutError from ._timer import Timer -from .._utils import is_retryable_status_code, get_authentication_policy +from .._utils import is_retryable_status_code, DEFAULT_AUDIENCE from .._search_indexing_buffered_sender_base import SearchIndexingBufferedSenderBase -from .._generated.aio import SearchIndexClient +from .._generated.aio import SearchClient as SearchIndexClient from .._generated.models import IndexingResult, IndexBatch, IndexAction from .._search_documents_error import RequestEntityTooLargeError from ._index_documents_batch_async import IndexDocumentsBatch @@ -61,26 +61,20 @@ def __init__( ) self._index_documents_batch = IndexDocumentsBatch() audience = kwargs.pop("audience", None) - if isinstance(credential, AzureKeyCredential): - self._aad = False - self._client = SearchIndexClient( - endpoint=endpoint, - index_name=index_name, - sdk_moniker=SDK_MONIKER, - api_version=self._api_version, - **kwargs - ) - else: - self._aad = True - authentication_policy = get_authentication_policy(credential, audience=audience, is_async=True) - self._client = SearchIndexClient( - endpoint=endpoint, - index_name=index_name, - authentication_policy=authentication_policy, - sdk_moniker=SDK_MONIKER, - api_version=self._api_version, - **kwargs - ) + if not audience: + audience = DEFAULT_AUDIENCE + scope = audience.rstrip("/") + "/.default" + credential_scopes = [scope] + self._aad = not isinstance(credential, AzureKeyCredential) + self._client = SearchIndexClient( + endpoint=endpoint, + credential=credential, + index_name=index_name, + sdk_moniker=SDK_MONIKER, + api_version=self._api_version, + credential_scopes=credential_scopes, + **kwargs + ) self._reset_timer() async def _cleanup(self, flush: bool = True) -> None: @@ -166,11 +160,7 @@ async def _process(self, timeout: int = 86400, **kwargs) -> bool: for result in results: try: assert self._index_key is not None # Hint for mypy - action = next( - x - for x in actions - if x.additional_properties and x.additional_properties.get(self._index_key) == result.key - ) + action = next(x for x in actions if x and str(x.get(self._index_key)) == result.key) if result.succeeded: await self._callback_succeed(action) elif is_retryable_status_code(result.status_code): @@ -279,7 +269,9 @@ async def _index_documents_actions(self, actions: List[IndexAction], **kwargs: A kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) batch = IndexBatch(actions=actions) try: - batch_response = await self._client.documents.index(batch=batch, error_map=error_map, **kwargs) + batch_response = await self._client.documents_operations.index( + index_name=self._index_name, batch=batch, error_map=error_map, **kwargs + ) return cast(List[IndexingResult], batch_response.results) except RequestEntityTooLargeError as ex: if len(actions) == 1: @@ -324,7 +316,7 @@ async def _retry_action(self, action: IndexAction) -> None: if not self._index_key: await self._callback_fail(action) return - key = cast(str, action.additional_properties.get(self._index_key) if action.additional_properties else "") + key = cast(str, action.get(self._index_key) if action else "") counter = self._retry_counter.get(key) if not counter: # first time that fails diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/__init__.py deleted file mode 100644 index e67c2149e4d1..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -# pylint: disable=wrong-import-position - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from ._patch import * # pylint: disable=unused-wildcard-import - -from ._search_service_client import SearchServiceClient # type: ignore - -try: - from ._patch import __all__ as _patch_all - from ._patch import * -except ImportError: - _patch_all = [] -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "SearchServiceClient", -] -__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore - -_patch_sdk() diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_configuration.py deleted file mode 100644 index 44488764e054..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_configuration.py +++ /dev/null @@ -1,48 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core.pipeline import policies - -VERSION = "unknown" - - -class SearchServiceClientConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for SearchServiceClient. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param endpoint: The endpoint URL of the search service. Required. - :type endpoint: str - :keyword api_version: Api Version. Default value is "2024-11-01-preview". Note that overriding - this default value may result in unsupported behavior. - :paramtype api_version: str - """ - - def __init__(self, endpoint: str, **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2024-11-01-preview") - - if endpoint is None: - raise ValueError("Parameter 'endpoint' must not be None.") - - self.endpoint = endpoint - self.api_version = api_version - kwargs.setdefault("sdk_moniker", "searchserviceclient/{}".format(VERSION)) - self.polling_interval = kwargs.get("polling_interval", 30) - self._configure(**kwargs) - - def _configure(self, **kwargs: Any) -> None: - self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) - self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get("redirect_policy") or policies.RedirectPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.RetryPolicy(**kwargs) - self.authentication_policy = kwargs.get("authentication_policy") diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_patch.py deleted file mode 100644 index f7dd32510333..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_patch.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -"""Customize generated code here. - -Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -""" -from typing import List - -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level - - -def patch_sdk(): - """Do not remove from this file. - - `patch_sdk` is a last resort escape hatch that allows you to do customizations - you can't accomplish using the techniques described in - https://aka.ms/azsdk/python/dpcodegen/python/customize - """ diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_search_service_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_search_service_client.py deleted file mode 100644 index 285b981b5275..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_search_service_client.py +++ /dev/null @@ -1,121 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from copy import deepcopy -from typing import Any -from typing_extensions import Self - -from azure.core import PipelineClient -from azure.core.pipeline import policies -from azure.core.rest import HttpRequest, HttpResponse - -from . import models as _models -from ._configuration import SearchServiceClientConfiguration -from ._serialization import Deserializer, Serializer -from .operations import ( - AliasesOperations, - DataSourcesOperations, - IndexersOperations, - IndexesOperations, - SearchServiceClientOperationsMixin, - SkillsetsOperations, - SynonymMapsOperations, -) - - -class SearchServiceClient(SearchServiceClientOperationsMixin): # pylint: disable=too-many-instance-attributes - """Client that can be used to manage and query indexes and documents, as well as manage other - resources, on a search service. - - :ivar data_sources: DataSourcesOperations operations - :vartype data_sources: azure.search.documents.indexes.operations.DataSourcesOperations - :ivar indexers: IndexersOperations operations - :vartype indexers: azure.search.documents.indexes.operations.IndexersOperations - :ivar skillsets: SkillsetsOperations operations - :vartype skillsets: azure.search.documents.indexes.operations.SkillsetsOperations - :ivar synonym_maps: SynonymMapsOperations operations - :vartype synonym_maps: azure.search.documents.indexes.operations.SynonymMapsOperations - :ivar indexes: IndexesOperations operations - :vartype indexes: azure.search.documents.indexes.operations.IndexesOperations - :ivar aliases: AliasesOperations operations - :vartype aliases: azure.search.documents.indexes.operations.AliasesOperations - :param endpoint: The endpoint URL of the search service. Required. - :type endpoint: str - :keyword api_version: Api Version. Default value is "2024-11-01-preview". Note that overriding - this default value may result in unsupported behavior. - :paramtype api_version: str - """ - - def __init__( # pylint: disable=missing-client-constructor-parameter-credential - self, endpoint: str, **kwargs: Any - ) -> None: - _endpoint = "{endpoint}" - self._config = SearchServiceClientConfiguration(endpoint=endpoint, **kwargs) - _policies = kwargs.pop("policies", None) - if _policies is None: - _policies = [ - policies.RequestIdPolicy(**kwargs), - self._config.headers_policy, - self._config.user_agent_policy, - self._config.proxy_policy, - policies.ContentDecodePolicy(**kwargs), - self._config.redirect_policy, - self._config.retry_policy, - self._config.authentication_policy, - self._config.custom_hook_policy, - self._config.logging_policy, - policies.DistributedTracingPolicy(**kwargs), - policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, - self._config.http_logging_policy, - ] - self._client: PipelineClient = PipelineClient(base_url=_endpoint, policies=_policies, **kwargs) - - client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - self._serialize.client_side_validation = False - self.data_sources = DataSourcesOperations(self._client, self._config, self._serialize, self._deserialize) - self.indexers = IndexersOperations(self._client, self._config, self._serialize, self._deserialize) - self.skillsets = SkillsetsOperations(self._client, self._config, self._serialize, self._deserialize) - self.synonym_maps = SynonymMapsOperations(self._client, self._config, self._serialize, self._deserialize) - self.indexes = IndexesOperations(self._client, self._config, self._serialize, self._deserialize) - self.aliases = AliasesOperations(self._client, self._config, self._serialize, self._deserialize) - - def _send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: - """Runs the network request through the client's chained policies. - - >>> from azure.core.rest import HttpRequest - >>> request = HttpRequest("GET", "https://www.example.org/") - - >>> response = client._send_request(request) - - - For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request - - :param request: The network request you want to make. Required. - :type request: ~azure.core.rest.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to False. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.rest.HttpResponse - """ - - request_copy = deepcopy(request) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) - return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore - - def close(self) -> None: - self._client.close() - - def __enter__(self) -> Self: - self._client.__enter__() - return self - - def __exit__(self, *exc_details: Any) -> None: - self._client.__exit__(*exc_details) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_serialization.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_serialization.py deleted file mode 100644 index ce17d1798ce7..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_serialization.py +++ /dev/null @@ -1,2114 +0,0 @@ -# pylint: disable=too-many-lines -# -------------------------------------------------------------------------- -# -# Copyright (c) Microsoft Corporation. All rights reserved. -# -# The MIT License (MIT) -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the ""Software""), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -# IN THE SOFTWARE. -# -# -------------------------------------------------------------------------- - -# pyright: reportUnnecessaryTypeIgnoreComment=false - -from base64 import b64decode, b64encode -import calendar -import datetime -import decimal -import email -from enum import Enum -import json -import logging -import re -import sys -import codecs -from typing import ( - Dict, - Any, - cast, - Optional, - Union, - AnyStr, - IO, - Mapping, - Callable, - TypeVar, - MutableMapping, - Type, - List, -) - -try: - from urllib import quote # type: ignore -except ImportError: - from urllib.parse import quote -import xml.etree.ElementTree as ET - -import isodate # type: ignore - -from azure.core.exceptions import DeserializationError, SerializationError -from azure.core.serialization import NULL as CoreNull - -_BOM = codecs.BOM_UTF8.decode(encoding="utf-8") - -ModelType = TypeVar("ModelType", bound="Model") -JSON = MutableMapping[str, Any] - - -class RawDeserializer: - - # Accept "text" because we're open minded people... - JSON_REGEXP = re.compile(r"^(application|text)/([a-z+.]+\+)?json$") - - # Name used in context - CONTEXT_NAME = "deserialized_data" - - @classmethod - def deserialize_from_text(cls, data: Optional[Union[AnyStr, IO]], content_type: Optional[str] = None) -> Any: - """Decode data according to content-type. - - Accept a stream of data as well, but will be load at once in memory for now. - - If no content-type, will return the string version (not bytes, not stream) - - :param data: Input, could be bytes or stream (will be decoded with UTF8) or text - :type data: str or bytes or IO - :param str content_type: The content type. - :return: The deserialized data. - :rtype: object - """ - if hasattr(data, "read"): - # Assume a stream - data = cast(IO, data).read() - - if isinstance(data, bytes): - data_as_str = data.decode(encoding="utf-8-sig") - else: - # Explain to mypy the correct type. - data_as_str = cast(str, data) - - # Remove Byte Order Mark if present in string - data_as_str = data_as_str.lstrip(_BOM) - - if content_type is None: - return data - - if cls.JSON_REGEXP.match(content_type): - try: - return json.loads(data_as_str) - except ValueError as err: - raise DeserializationError("JSON is invalid: {}".format(err), err) from err - elif "xml" in (content_type or []): - try: - - try: - if isinstance(data, unicode): # type: ignore - # If I'm Python 2.7 and unicode XML will scream if I try a "fromstring" on unicode string - data_as_str = data_as_str.encode(encoding="utf-8") # type: ignore - except NameError: - pass - - return ET.fromstring(data_as_str) # nosec - except ET.ParseError as err: - # It might be because the server has an issue, and returned JSON with - # content-type XML.... - # So let's try a JSON load, and if it's still broken - # let's flow the initial exception - def _json_attemp(data): - try: - return True, json.loads(data) - except ValueError: - return False, None # Don't care about this one - - success, json_result = _json_attemp(data) - if success: - return json_result - # If i'm here, it's not JSON, it's not XML, let's scream - # and raise the last context in this block (the XML exception) - # The function hack is because Py2.7 messes up with exception - # context otherwise. - _LOGGER.critical("Wasn't XML not JSON, failing") - raise DeserializationError("XML is invalid") from err - elif content_type.startswith("text/"): - return data_as_str - raise DeserializationError("Cannot deserialize content-type: {}".format(content_type)) - - @classmethod - def deserialize_from_http_generics(cls, body_bytes: Optional[Union[AnyStr, IO]], headers: Mapping) -> Any: - """Deserialize from HTTP response. - - Use bytes and headers to NOT use any requests/aiohttp or whatever - specific implementation. - Headers will tested for "content-type" - - :param bytes body_bytes: The body of the response. - :param dict headers: The headers of the response. - :returns: The deserialized data. - :rtype: object - """ - # Try to use content-type from headers if available - content_type = None - if "content-type" in headers: - content_type = headers["content-type"].split(";")[0].strip().lower() - # Ouch, this server did not declare what it sent... - # Let's guess it's JSON... - # Also, since Autorest was considering that an empty body was a valid JSON, - # need that test as well.... - else: - content_type = "application/json" - - if body_bytes: - return cls.deserialize_from_text(body_bytes, content_type) - return None - - -_LOGGER = logging.getLogger(__name__) - -try: - _long_type = long # type: ignore -except NameError: - _long_type = int - - -class UTC(datetime.tzinfo): - """Time Zone info for handling UTC""" - - def utcoffset(self, dt): - """UTF offset for UTC is 0. - - :param datetime.datetime dt: The datetime - :returns: The offset - :rtype: datetime.timedelta - """ - return datetime.timedelta(0) - - def tzname(self, dt): - """Timestamp representation. - - :param datetime.datetime dt: The datetime - :returns: The timestamp representation - :rtype: str - """ - return "Z" - - def dst(self, dt): - """No daylight saving for UTC. - - :param datetime.datetime dt: The datetime - :returns: The daylight saving time - :rtype: datetime.timedelta - """ - return datetime.timedelta(hours=1) - - -try: - from datetime import timezone as _FixedOffset # type: ignore -except ImportError: # Python 2.7 - - class _FixedOffset(datetime.tzinfo): # type: ignore - """Fixed offset in minutes east from UTC. - Copy/pasted from Python doc - :param datetime.timedelta offset: offset in timedelta format - """ - - def __init__(self, offset) -> None: - self.__offset = offset - - def utcoffset(self, dt): - return self.__offset - - def tzname(self, dt): - return str(self.__offset.total_seconds() / 3600) - - def __repr__(self): - return "".format(self.tzname(None)) - - def dst(self, dt): - return datetime.timedelta(0) - - def __getinitargs__(self): - return (self.__offset,) - - -try: - from datetime import timezone - - TZ_UTC = timezone.utc -except ImportError: - TZ_UTC = UTC() # type: ignore - -_FLATTEN = re.compile(r"(? None: - self.additional_properties: Optional[Dict[str, Any]] = {} - for k in kwargs: # pylint: disable=consider-using-dict-items - if k not in self._attribute_map: - _LOGGER.warning("%s is not a known attribute of class %s and will be ignored", k, self.__class__) - elif k in self._validation and self._validation[k].get("readonly", False): - _LOGGER.warning("Readonly attribute %s will be ignored in class %s", k, self.__class__) - else: - setattr(self, k, kwargs[k]) - - def __eq__(self, other: Any) -> bool: - """Compare objects by comparing all attributes. - - :param object other: The object to compare - :returns: True if objects are equal - :rtype: bool - """ - if isinstance(other, self.__class__): - return self.__dict__ == other.__dict__ - return False - - def __ne__(self, other: Any) -> bool: - """Compare objects by comparing all attributes. - - :param object other: The object to compare - :returns: True if objects are not equal - :rtype: bool - """ - return not self.__eq__(other) - - def __str__(self) -> str: - return str(self.__dict__) - - @classmethod - def enable_additional_properties_sending(cls) -> None: - cls._attribute_map["additional_properties"] = {"key": "", "type": "{object}"} - - @classmethod - def is_xml_model(cls) -> bool: - try: - cls._xml_map # type: ignore - except AttributeError: - return False - return True - - @classmethod - def _create_xml_node(cls): - """Create XML node. - - :returns: The XML node - :rtype: xml.etree.ElementTree.Element - """ - try: - xml_map = cls._xml_map # type: ignore - except AttributeError: - xml_map = {} - - return _create_xml_node(xml_map.get("name", cls.__name__), xml_map.get("prefix", None), xml_map.get("ns", None)) - - def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> JSON: - """Return the JSON that would be sent to server from this model. - - This is an alias to `as_dict(full_restapi_key_transformer, keep_readonly=False)`. - - If you want XML serialization, you can pass the kwargs is_xml=True. - - :param bool keep_readonly: If you want to serialize the readonly attributes - :returns: A dict JSON compatible object - :rtype: dict - """ - serializer = Serializer(self._infer_class_models()) - return serializer._serialize( # type: ignore # pylint: disable=protected-access - self, keep_readonly=keep_readonly, **kwargs - ) - - def as_dict( - self, - keep_readonly: bool = True, - key_transformer: Callable[[str, Dict[str, Any], Any], Any] = attribute_transformer, - **kwargs: Any - ) -> JSON: - """Return a dict that can be serialized using json.dump. - - Advanced usage might optionally use a callback as parameter: - - .. code::python - - def my_key_transformer(key, attr_desc, value): - return key - - Key is the attribute name used in Python. Attr_desc - is a dict of metadata. Currently contains 'type' with the - msrest type and 'key' with the RestAPI encoded key. - Value is the current value in this object. - - The string returned will be used to serialize the key. - If the return type is a list, this is considered hierarchical - result dict. - - See the three examples in this file: - - - attribute_transformer - - full_restapi_key_transformer - - last_restapi_key_transformer - - If you want XML serialization, you can pass the kwargs is_xml=True. - - :param bool keep_readonly: If you want to serialize the readonly attributes - :param function key_transformer: A key transformer function. - :returns: A dict JSON compatible object - :rtype: dict - """ - serializer = Serializer(self._infer_class_models()) - return serializer._serialize( # type: ignore # pylint: disable=protected-access - self, key_transformer=key_transformer, keep_readonly=keep_readonly, **kwargs - ) - - @classmethod - def _infer_class_models(cls): - try: - str_models = cls.__module__.rsplit(".", 1)[0] - models = sys.modules[str_models] - client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - if cls.__name__ not in client_models: - raise ValueError("Not Autorest generated code") - except Exception: # pylint: disable=broad-exception-caught - # Assume it's not Autorest generated (tests?). Add ourselves as dependencies. - client_models = {cls.__name__: cls} - return client_models - - @classmethod - def deserialize(cls: Type[ModelType], data: Any, content_type: Optional[str] = None) -> ModelType: - """Parse a str using the RestAPI syntax and return a model. - - :param str data: A str using RestAPI structure. JSON by default. - :param str content_type: JSON by default, set application/xml if XML. - :returns: An instance of this model - :raises: DeserializationError if something went wrong - :rtype: ModelType - """ - deserializer = Deserializer(cls._infer_class_models()) - return deserializer(cls.__name__, data, content_type=content_type) # type: ignore - - @classmethod - def from_dict( - cls: Type[ModelType], - data: Any, - key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, - content_type: Optional[str] = None, - ) -> ModelType: - """Parse a dict using given key extractor return a model. - - By default consider key - extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor - and last_rest_key_case_insensitive_extractor) - - :param dict data: A dict using RestAPI structure - :param function key_extractors: A key extractor function. - :param str content_type: JSON by default, set application/xml if XML. - :returns: An instance of this model - :raises: DeserializationError if something went wrong - :rtype: ModelType - """ - deserializer = Deserializer(cls._infer_class_models()) - deserializer.key_extractors = ( # type: ignore - [ # type: ignore - attribute_key_case_insensitive_extractor, - rest_key_case_insensitive_extractor, - last_rest_key_case_insensitive_extractor, - ] - if key_extractors is None - else key_extractors - ) - return deserializer(cls.__name__, data, content_type=content_type) # type: ignore - - @classmethod - def _flatten_subtype(cls, key, objects): - if "_subtype_map" not in cls.__dict__: - return {} - result = dict(cls._subtype_map[key]) - for valuetype in cls._subtype_map[key].values(): - result.update(objects[valuetype]._flatten_subtype(key, objects)) # pylint: disable=protected-access - return result - - @classmethod - def _classify(cls, response, objects): - """Check the class _subtype_map for any child classes. - We want to ignore any inherited _subtype_maps. - - :param dict response: The initial data - :param dict objects: The class objects - :returns: The class to be used - :rtype: class - """ - for subtype_key in cls.__dict__.get("_subtype_map", {}).keys(): - subtype_value = None - - if not isinstance(response, ET.Element): - rest_api_response_key = cls._get_rest_key_parts(subtype_key)[-1] - subtype_value = response.get(rest_api_response_key, None) or response.get(subtype_key, None) - else: - subtype_value = xml_key_extractor(subtype_key, cls._attribute_map[subtype_key], response) - if subtype_value: - # Try to match base class. Can be class name only - # (bug to fix in Autorest to support x-ms-discriminator-name) - if cls.__name__ == subtype_value: - return cls - flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) - try: - return objects[flatten_mapping_type[subtype_value]] # type: ignore - except KeyError: - _LOGGER.warning( - "Subtype value %s has no mapping, use base class %s.", - subtype_value, - cls.__name__, - ) - break - else: - _LOGGER.warning("Discriminator %s is absent or null, use base class %s.", subtype_key, cls.__name__) - break - return cls - - @classmethod - def _get_rest_key_parts(cls, attr_key): - """Get the RestAPI key of this attr, split it and decode part - :param str attr_key: Attribute key must be in attribute_map. - :returns: A list of RestAPI part - :rtype: list - """ - rest_split_key = _FLATTEN.split(cls._attribute_map[attr_key]["key"]) - return [_decode_attribute_map_key(key_part) for key_part in rest_split_key] - - -def _decode_attribute_map_key(key): - """This decode a key in an _attribute_map to the actual key we want to look at - inside the received data. - - :param str key: A key string from the generated code - :returns: The decoded key - :rtype: str - """ - return key.replace("\\.", ".") - - -class Serializer(object): # pylint: disable=too-many-public-methods - """Request object model serializer.""" - - basic_types = {str: "str", int: "int", bool: "bool", float: "float"} - - _xml_basic_types_serializers = {"bool": lambda x: str(x).lower()} - days = {0: "Mon", 1: "Tue", 2: "Wed", 3: "Thu", 4: "Fri", 5: "Sat", 6: "Sun"} - months = { - 1: "Jan", - 2: "Feb", - 3: "Mar", - 4: "Apr", - 5: "May", - 6: "Jun", - 7: "Jul", - 8: "Aug", - 9: "Sep", - 10: "Oct", - 11: "Nov", - 12: "Dec", - } - validation = { - "min_length": lambda x, y: len(x) < y, - "max_length": lambda x, y: len(x) > y, - "minimum": lambda x, y: x < y, - "maximum": lambda x, y: x > y, - "minimum_ex": lambda x, y: x <= y, - "maximum_ex": lambda x, y: x >= y, - "min_items": lambda x, y: len(x) < y, - "max_items": lambda x, y: len(x) > y, - "pattern": lambda x, y: not re.match(y, x, re.UNICODE), - "unique": lambda x, y: len(x) != len(set(x)), - "multiple": lambda x, y: x % y != 0, - } - - def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: - self.serialize_type = { - "iso-8601": Serializer.serialize_iso, - "rfc-1123": Serializer.serialize_rfc, - "unix-time": Serializer.serialize_unix, - "duration": Serializer.serialize_duration, - "date": Serializer.serialize_date, - "time": Serializer.serialize_time, - "decimal": Serializer.serialize_decimal, - "long": Serializer.serialize_long, - "bytearray": Serializer.serialize_bytearray, - "base64": Serializer.serialize_base64, - "object": self.serialize_object, - "[]": self.serialize_iter, - "{}": self.serialize_dict, - } - self.dependencies: Dict[str, type] = dict(classes) if classes else {} - self.key_transformer = full_restapi_key_transformer - self.client_side_validation = True - - def _serialize( # pylint: disable=too-many-nested-blocks, too-many-branches, too-many-statements, too-many-locals - self, target_obj, data_type=None, **kwargs - ): - """Serialize data into a string according to type. - - :param object target_obj: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: str, dict - :raises: SerializationError if serialization fails. - :returns: The serialized data. - """ - key_transformer = kwargs.get("key_transformer", self.key_transformer) - keep_readonly = kwargs.get("keep_readonly", False) - if target_obj is None: - return None - - attr_name = None - class_name = target_obj.__class__.__name__ - - if data_type: - return self.serialize_data(target_obj, data_type, **kwargs) - - if not hasattr(target_obj, "_attribute_map"): - data_type = type(target_obj).__name__ - if data_type in self.basic_types.values(): - return self.serialize_data(target_obj, data_type, **kwargs) - - # Force "is_xml" kwargs if we detect a XML model - try: - is_xml_model_serialization = kwargs["is_xml"] - except KeyError: - is_xml_model_serialization = kwargs.setdefault("is_xml", target_obj.is_xml_model()) - - serialized = {} - if is_xml_model_serialization: - serialized = target_obj._create_xml_node() # pylint: disable=protected-access - try: - attributes = target_obj._attribute_map # pylint: disable=protected-access - for attr, attr_desc in attributes.items(): - attr_name = attr - if not keep_readonly and target_obj._validation.get( # pylint: disable=protected-access - attr_name, {} - ).get("readonly", False): - continue - - if attr_name == "additional_properties" and attr_desc["key"] == "": - if target_obj.additional_properties is not None: - serialized.update(target_obj.additional_properties) - continue - try: - - orig_attr = getattr(target_obj, attr) - if is_xml_model_serialization: - pass # Don't provide "transformer" for XML for now. Keep "orig_attr" - else: # JSON - keys, orig_attr = key_transformer(attr, attr_desc.copy(), orig_attr) - keys = keys if isinstance(keys, list) else [keys] - - kwargs["serialization_ctxt"] = attr_desc - new_attr = self.serialize_data(orig_attr, attr_desc["type"], **kwargs) - - if is_xml_model_serialization: - xml_desc = attr_desc.get("xml", {}) - xml_name = xml_desc.get("name", attr_desc["key"]) - xml_prefix = xml_desc.get("prefix", None) - xml_ns = xml_desc.get("ns", None) - if xml_desc.get("attr", False): - if xml_ns: - ET.register_namespace(xml_prefix, xml_ns) - xml_name = "{{{}}}{}".format(xml_ns, xml_name) - serialized.set(xml_name, new_attr) # type: ignore - continue - if xml_desc.get("text", False): - serialized.text = new_attr # type: ignore - continue - if isinstance(new_attr, list): - serialized.extend(new_attr) # type: ignore - elif isinstance(new_attr, ET.Element): - # If the down XML has no XML/Name, - # we MUST replace the tag with the local tag. But keeping the namespaces. - if "name" not in getattr(orig_attr, "_xml_map", {}): - splitted_tag = new_attr.tag.split("}") - if len(splitted_tag) == 2: # Namespace - new_attr.tag = "}".join([splitted_tag[0], xml_name]) - else: - new_attr.tag = xml_name - serialized.append(new_attr) # type: ignore - else: # That's a basic type - # Integrate namespace if necessary - local_node = _create_xml_node(xml_name, xml_prefix, xml_ns) - local_node.text = str(new_attr) - serialized.append(local_node) # type: ignore - else: # JSON - for k in reversed(keys): # type: ignore - new_attr = {k: new_attr} - - _new_attr = new_attr - _serialized = serialized - for k in keys: # type: ignore - if k not in _serialized: - _serialized.update(_new_attr) # type: ignore - _new_attr = _new_attr[k] # type: ignore - _serialized = _serialized[k] - except ValueError as err: - if isinstance(err, SerializationError): - raise - - except (AttributeError, KeyError, TypeError) as err: - msg = "Attribute {} in object {} cannot be serialized.\n{}".format(attr_name, class_name, str(target_obj)) - raise SerializationError(msg) from err - return serialized - - def body(self, data, data_type, **kwargs): - """Serialize data intended for a request body. - - :param object data: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: dict - :raises: SerializationError if serialization fails. - :raises: ValueError if data is None - :returns: The serialized request body - """ - - # Just in case this is a dict - internal_data_type_str = data_type.strip("[]{}") - internal_data_type = self.dependencies.get(internal_data_type_str, None) - try: - is_xml_model_serialization = kwargs["is_xml"] - except KeyError: - if internal_data_type and issubclass(internal_data_type, Model): - is_xml_model_serialization = kwargs.setdefault("is_xml", internal_data_type.is_xml_model()) - else: - is_xml_model_serialization = False - if internal_data_type and not isinstance(internal_data_type, Enum): - try: - deserializer = Deserializer(self.dependencies) - # Since it's on serialization, it's almost sure that format is not JSON REST - # We're not able to deal with additional properties for now. - deserializer.additional_properties_detection = False - if is_xml_model_serialization: - deserializer.key_extractors = [ # type: ignore - attribute_key_case_insensitive_extractor, - ] - else: - deserializer.key_extractors = [ - rest_key_case_insensitive_extractor, - attribute_key_case_insensitive_extractor, - last_rest_key_case_insensitive_extractor, - ] - data = deserializer._deserialize(data_type, data) # pylint: disable=protected-access - except DeserializationError as err: - raise SerializationError("Unable to build a model: " + str(err)) from err - - return self._serialize(data, data_type, **kwargs) - - def url(self, name, data, data_type, **kwargs): - """Serialize data intended for a URL path. - - :param str name: The name of the URL path parameter. - :param object data: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: str - :returns: The serialized URL path - :raises: TypeError if serialization fails. - :raises: ValueError if data is None - """ - try: - output = self.serialize_data(data, data_type, **kwargs) - if data_type == "bool": - output = json.dumps(output) - - if kwargs.get("skip_quote") is True: - output = str(output) - output = output.replace("{", quote("{")).replace("}", quote("}")) - else: - output = quote(str(output), safe="") - except SerializationError as exc: - raise TypeError("{} must be type {}.".format(name, data_type)) from exc - return output - - def query(self, name, data, data_type, **kwargs): - """Serialize data intended for a URL query. - - :param str name: The name of the query parameter. - :param object data: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: str, list - :raises: TypeError if serialization fails. - :raises: ValueError if data is None - :returns: The serialized query parameter - """ - try: - # Treat the list aside, since we don't want to encode the div separator - if data_type.startswith("["): - internal_data_type = data_type[1:-1] - do_quote = not kwargs.get("skip_quote", False) - return self.serialize_iter(data, internal_data_type, do_quote=do_quote, **kwargs) - - # Not a list, regular serialization - output = self.serialize_data(data, data_type, **kwargs) - if data_type == "bool": - output = json.dumps(output) - if kwargs.get("skip_quote") is True: - output = str(output) - else: - output = quote(str(output), safe="") - except SerializationError as exc: - raise TypeError("{} must be type {}.".format(name, data_type)) from exc - return str(output) - - def header(self, name, data, data_type, **kwargs): - """Serialize data intended for a request header. - - :param str name: The name of the header. - :param object data: The data to be serialized. - :param str data_type: The type to be serialized from. - :rtype: str - :raises: TypeError if serialization fails. - :raises: ValueError if data is None - :returns: The serialized header - """ - try: - if data_type in ["[str]"]: - data = ["" if d is None else d for d in data] - - output = self.serialize_data(data, data_type, **kwargs) - if data_type == "bool": - output = json.dumps(output) - except SerializationError as exc: - raise TypeError("{} must be type {}.".format(name, data_type)) from exc - return str(output) - - def serialize_data(self, data, data_type, **kwargs): - """Serialize generic data according to supplied data type. - - :param object data: The data to be serialized. - :param str data_type: The type to be serialized from. - :raises: AttributeError if required data is None. - :raises: ValueError if data is None - :raises: SerializationError if serialization fails. - :returns: The serialized data. - :rtype: str, int, float, bool, dict, list - """ - if data is None: - raise ValueError("No value for given attribute") - - try: - if data is CoreNull: - return None - if data_type in self.basic_types.values(): - return self.serialize_basic(data, data_type, **kwargs) - - if data_type in self.serialize_type: - return self.serialize_type[data_type](data, **kwargs) - - # If dependencies is empty, try with current data class - # It has to be a subclass of Enum anyway - enum_type = self.dependencies.get(data_type, data.__class__) - if issubclass(enum_type, Enum): - return Serializer.serialize_enum(data, enum_obj=enum_type) - - iter_type = data_type[0] + data_type[-1] - if iter_type in self.serialize_type: - return self.serialize_type[iter_type](data, data_type[1:-1], **kwargs) - - except (ValueError, TypeError) as err: - msg = "Unable to serialize value: {!r} as type: {!r}." - raise SerializationError(msg.format(data, data_type)) from err - return self._serialize(data, **kwargs) - - @classmethod - def _get_custom_serializers(cls, data_type, **kwargs): # pylint: disable=inconsistent-return-statements - custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) - if custom_serializer: - return custom_serializer - if kwargs.get("is_xml", False): - return cls._xml_basic_types_serializers.get(data_type) - - @classmethod - def serialize_basic(cls, data, data_type, **kwargs): - """Serialize basic builting data type. - Serializes objects to str, int, float or bool. - - Possible kwargs: - - basic_types_serializers dict[str, callable] : If set, use the callable as serializer - - is_xml bool : If set, use xml_basic_types_serializers - - :param obj data: Object to be serialized. - :param str data_type: Type of object in the iterable. - :rtype: str, int, float, bool - :return: serialized object - """ - custom_serializer = cls._get_custom_serializers(data_type, **kwargs) - if custom_serializer: - return custom_serializer(data) - if data_type == "str": - return cls.serialize_unicode(data) - return eval(data_type)(data) # nosec # pylint: disable=eval-used - - @classmethod - def serialize_unicode(cls, data): - """Special handling for serializing unicode strings in Py2. - Encode to UTF-8 if unicode, otherwise handle as a str. - - :param str data: Object to be serialized. - :rtype: str - :return: serialized object - """ - try: # If I received an enum, return its value - return data.value - except AttributeError: - pass - - try: - if isinstance(data, unicode): # type: ignore - # Don't change it, JSON and XML ElementTree are totally able - # to serialize correctly u'' strings - return data - except NameError: - return str(data) - return str(data) - - def serialize_iter(self, data, iter_type, div=None, **kwargs): - """Serialize iterable. - - Supported kwargs: - - serialization_ctxt dict : The current entry of _attribute_map, or same format. - serialization_ctxt['type'] should be same as data_type. - - is_xml bool : If set, serialize as XML - - :param list data: Object to be serialized. - :param str iter_type: Type of object in the iterable. - :param str div: If set, this str will be used to combine the elements - in the iterable into a combined string. Default is 'None'. - Defaults to False. - :rtype: list, str - :return: serialized iterable - """ - if isinstance(data, str): - raise SerializationError("Refuse str type as a valid iter type.") - - serialization_ctxt = kwargs.get("serialization_ctxt", {}) - is_xml = kwargs.get("is_xml", False) - - serialized = [] - for d in data: - try: - serialized.append(self.serialize_data(d, iter_type, **kwargs)) - except ValueError as err: - if isinstance(err, SerializationError): - raise - serialized.append(None) - - if kwargs.get("do_quote", False): - serialized = ["" if s is None else quote(str(s), safe="") for s in serialized] - - if div: - serialized = ["" if s is None else str(s) for s in serialized] - serialized = div.join(serialized) - - if "xml" in serialization_ctxt or is_xml: - # XML serialization is more complicated - xml_desc = serialization_ctxt.get("xml", {}) - xml_name = xml_desc.get("name") - if not xml_name: - xml_name = serialization_ctxt["key"] - - # Create a wrap node if necessary (use the fact that Element and list have "append") - is_wrapped = xml_desc.get("wrapped", False) - node_name = xml_desc.get("itemsName", xml_name) - if is_wrapped: - final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) - else: - final_result = [] - # All list elements to "local_node" - for el in serialized: - if isinstance(el, ET.Element): - el_node = el - else: - el_node = _create_xml_node(node_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) - if el is not None: # Otherwise it writes "None" :-p - el_node.text = str(el) - final_result.append(el_node) - return final_result - return serialized - - def serialize_dict(self, attr, dict_type, **kwargs): - """Serialize a dictionary of objects. - - :param dict attr: Object to be serialized. - :param str dict_type: Type of object in the dictionary. - :rtype: dict - :return: serialized dictionary - """ - serialization_ctxt = kwargs.get("serialization_ctxt", {}) - serialized = {} - for key, value in attr.items(): - try: - serialized[self.serialize_unicode(key)] = self.serialize_data(value, dict_type, **kwargs) - except ValueError as err: - if isinstance(err, SerializationError): - raise - serialized[self.serialize_unicode(key)] = None - - if "xml" in serialization_ctxt: - # XML serialization is more complicated - xml_desc = serialization_ctxt["xml"] - xml_name = xml_desc["name"] - - final_result = _create_xml_node(xml_name, xml_desc.get("prefix", None), xml_desc.get("ns", None)) - for key, value in serialized.items(): - ET.SubElement(final_result, key).text = value - return final_result - - return serialized - - def serialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements - """Serialize a generic object. - This will be handled as a dictionary. If object passed in is not - a basic type (str, int, float, dict, list) it will simply be - cast to str. - - :param dict attr: Object to be serialized. - :rtype: dict or str - :return: serialized object - """ - if attr is None: - return None - if isinstance(attr, ET.Element): - return attr - obj_type = type(attr) - if obj_type in self.basic_types: - return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) - if obj_type is _long_type: - return self.serialize_long(attr) - if obj_type is str: - return self.serialize_unicode(attr) - if obj_type is datetime.datetime: - return self.serialize_iso(attr) - if obj_type is datetime.date: - return self.serialize_date(attr) - if obj_type is datetime.time: - return self.serialize_time(attr) - if obj_type is datetime.timedelta: - return self.serialize_duration(attr) - if obj_type is decimal.Decimal: - return self.serialize_decimal(attr) - - # If it's a model or I know this dependency, serialize as a Model - if obj_type in self.dependencies.values() or isinstance(attr, Model): - return self._serialize(attr) - - if obj_type == dict: - serialized = {} - for key, value in attr.items(): - try: - serialized[self.serialize_unicode(key)] = self.serialize_object(value, **kwargs) - except ValueError: - serialized[self.serialize_unicode(key)] = None - return serialized - - if obj_type == list: - serialized = [] - for obj in attr: - try: - serialized.append(self.serialize_object(obj, **kwargs)) - except ValueError: - pass - return serialized - return str(attr) - - @staticmethod - def serialize_enum(attr, enum_obj=None): - try: - result = attr.value - except AttributeError: - result = attr - try: - enum_obj(result) # type: ignore - return result - except ValueError as exc: - for enum_value in enum_obj: # type: ignore - if enum_value.value.lower() == str(attr).lower(): - return enum_value.value - error = "{!r} is not valid value for enum {!r}" - raise SerializationError(error.format(attr, enum_obj)) from exc - - @staticmethod - def serialize_bytearray(attr, **kwargs): # pylint: disable=unused-argument - """Serialize bytearray into base-64 string. - - :param str attr: Object to be serialized. - :rtype: str - :return: serialized base64 - """ - return b64encode(attr).decode() - - @staticmethod - def serialize_base64(attr, **kwargs): # pylint: disable=unused-argument - """Serialize str into base-64 string. - - :param str attr: Object to be serialized. - :rtype: str - :return: serialized base64 - """ - encoded = b64encode(attr).decode("ascii") - return encoded.strip("=").replace("+", "-").replace("/", "_") - - @staticmethod - def serialize_decimal(attr, **kwargs): # pylint: disable=unused-argument - """Serialize Decimal object to float. - - :param decimal attr: Object to be serialized. - :rtype: float - :return: serialized decimal - """ - return float(attr) - - @staticmethod - def serialize_long(attr, **kwargs): # pylint: disable=unused-argument - """Serialize long (Py2) or int (Py3). - - :param int attr: Object to be serialized. - :rtype: int/long - :return: serialized long - """ - return _long_type(attr) - - @staticmethod - def serialize_date(attr, **kwargs): # pylint: disable=unused-argument - """Serialize Date object into ISO-8601 formatted string. - - :param Date attr: Object to be serialized. - :rtype: str - :return: serialized date - """ - if isinstance(attr, str): - attr = isodate.parse_date(attr) - t = "{:04}-{:02}-{:02}".format(attr.year, attr.month, attr.day) - return t - - @staticmethod - def serialize_time(attr, **kwargs): # pylint: disable=unused-argument - """Serialize Time object into ISO-8601 formatted string. - - :param datetime.time attr: Object to be serialized. - :rtype: str - :return: serialized time - """ - if isinstance(attr, str): - attr = isodate.parse_time(attr) - t = "{:02}:{:02}:{:02}".format(attr.hour, attr.minute, attr.second) - if attr.microsecond: - t += ".{:02}".format(attr.microsecond) - return t - - @staticmethod - def serialize_duration(attr, **kwargs): # pylint: disable=unused-argument - """Serialize TimeDelta object into ISO-8601 formatted string. - - :param TimeDelta attr: Object to be serialized. - :rtype: str - :return: serialized duration - """ - if isinstance(attr, str): - attr = isodate.parse_duration(attr) - return isodate.duration_isoformat(attr) - - @staticmethod - def serialize_rfc(attr, **kwargs): # pylint: disable=unused-argument - """Serialize Datetime object into RFC-1123 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: TypeError if format invalid. - :return: serialized rfc - """ - try: - if not attr.tzinfo: - _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") - utc = attr.utctimetuple() - except AttributeError as exc: - raise TypeError("RFC1123 object must be valid Datetime object.") from exc - - return "{}, {:02} {} {:04} {:02}:{:02}:{:02} GMT".format( - Serializer.days[utc.tm_wday], - utc.tm_mday, - Serializer.months[utc.tm_mon], - utc.tm_year, - utc.tm_hour, - utc.tm_min, - utc.tm_sec, - ) - - @staticmethod - def serialize_iso(attr, **kwargs): # pylint: disable=unused-argument - """Serialize Datetime object into ISO-8601 formatted string. - - :param Datetime attr: Object to be serialized. - :rtype: str - :raises: SerializationError if format invalid. - :return: serialized iso - """ - if isinstance(attr, str): - attr = isodate.parse_datetime(attr) - try: - if not attr.tzinfo: - _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") - utc = attr.utctimetuple() - if utc.tm_year > 9999 or utc.tm_year < 1: - raise OverflowError("Hit max or min date") - - microseconds = str(attr.microsecond).rjust(6, "0").rstrip("0").ljust(3, "0") - if microseconds: - microseconds = "." + microseconds - date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format( - utc.tm_year, utc.tm_mon, utc.tm_mday, utc.tm_hour, utc.tm_min, utc.tm_sec - ) - return date + microseconds + "Z" - except (ValueError, OverflowError) as err: - msg = "Unable to serialize datetime object." - raise SerializationError(msg) from err - except AttributeError as err: - msg = "ISO-8601 object must be valid Datetime object." - raise TypeError(msg) from err - - @staticmethod - def serialize_unix(attr, **kwargs): # pylint: disable=unused-argument - """Serialize Datetime object into IntTime format. - This is represented as seconds. - - :param Datetime attr: Object to be serialized. - :rtype: int - :raises: SerializationError if format invalid - :return: serialied unix - """ - if isinstance(attr, int): - return attr - try: - if not attr.tzinfo: - _LOGGER.warning("Datetime with no tzinfo will be considered UTC.") - return int(calendar.timegm(attr.utctimetuple())) - except AttributeError as exc: - raise TypeError("Unix time object must be valid Datetime object.") from exc - - -def rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument - key = attr_desc["key"] - working_data = data - - while "." in key: - # Need the cast, as for some reasons "split" is typed as list[str | Any] - dict_keys = cast(List[str], _FLATTEN.split(key)) - if len(dict_keys) == 1: - key = _decode_attribute_map_key(dict_keys[0]) - break - working_key = _decode_attribute_map_key(dict_keys[0]) - working_data = working_data.get(working_key, data) - if working_data is None: - # If at any point while following flatten JSON path see None, it means - # that all properties under are None as well - return None - key = ".".join(dict_keys[1:]) - - return working_data.get(key) - - -def rest_key_case_insensitive_extractor( # pylint: disable=unused-argument, inconsistent-return-statements - attr, attr_desc, data -): - key = attr_desc["key"] - working_data = data - - while "." in key: - dict_keys = _FLATTEN.split(key) - if len(dict_keys) == 1: - key = _decode_attribute_map_key(dict_keys[0]) - break - working_key = _decode_attribute_map_key(dict_keys[0]) - working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) - if working_data is None: - # If at any point while following flatten JSON path see None, it means - # that all properties under are None as well - return None - key = ".".join(dict_keys[1:]) - - if working_data: - return attribute_key_case_insensitive_extractor(key, None, working_data) - - -def last_rest_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument - """Extract the attribute in "data" based on the last part of the JSON path key. - - :param str attr: The attribute to extract - :param dict attr_desc: The attribute description - :param dict data: The data to extract from - :rtype: object - :returns: The extracted attribute - """ - key = attr_desc["key"] - dict_keys = _FLATTEN.split(key) - return attribute_key_extractor(dict_keys[-1], None, data) - - -def last_rest_key_case_insensitive_extractor(attr, attr_desc, data): # pylint: disable=unused-argument - """Extract the attribute in "data" based on the last part of the JSON path key. - - This is the case insensitive version of "last_rest_key_extractor" - :param str attr: The attribute to extract - :param dict attr_desc: The attribute description - :param dict data: The data to extract from - :rtype: object - :returns: The extracted attribute - """ - key = attr_desc["key"] - dict_keys = _FLATTEN.split(key) - return attribute_key_case_insensitive_extractor(dict_keys[-1], None, data) - - -def attribute_key_extractor(attr, _, data): - return data.get(attr) - - -def attribute_key_case_insensitive_extractor(attr, _, data): - found_key = None - lower_attr = attr.lower() - for key in data: - if lower_attr == key.lower(): - found_key = key - break - - return data.get(found_key) - - -def _extract_name_from_internal_type(internal_type): - """Given an internal type XML description, extract correct XML name with namespace. - - :param dict internal_type: An model type - :rtype: tuple - :returns: A tuple XML name + namespace dict - """ - internal_type_xml_map = getattr(internal_type, "_xml_map", {}) - xml_name = internal_type_xml_map.get("name", internal_type.__name__) - xml_ns = internal_type_xml_map.get("ns", None) - if xml_ns: - xml_name = "{{{}}}{}".format(xml_ns, xml_name) - return xml_name - - -def xml_key_extractor(attr, attr_desc, data): # pylint: disable=unused-argument,too-many-return-statements - if isinstance(data, dict): - return None - - # Test if this model is XML ready first - if not isinstance(data, ET.Element): - return None - - xml_desc = attr_desc.get("xml", {}) - xml_name = xml_desc.get("name", attr_desc["key"]) - - # Look for a children - is_iter_type = attr_desc["type"].startswith("[") - is_wrapped = xml_desc.get("wrapped", False) - internal_type = attr_desc.get("internalType", None) - internal_type_xml_map = getattr(internal_type, "_xml_map", {}) - - # Integrate namespace if necessary - xml_ns = xml_desc.get("ns", internal_type_xml_map.get("ns", None)) - if xml_ns: - xml_name = "{{{}}}{}".format(xml_ns, xml_name) - - # If it's an attribute, that's simple - if xml_desc.get("attr", False): - return data.get(xml_name) - - # If it's x-ms-text, that's simple too - if xml_desc.get("text", False): - return data.text - - # Scenario where I take the local name: - # - Wrapped node - # - Internal type is an enum (considered basic types) - # - Internal type has no XML/Name node - if is_wrapped or (internal_type and (issubclass(internal_type, Enum) or "name" not in internal_type_xml_map)): - children = data.findall(xml_name) - # If internal type has a local name and it's not a list, I use that name - elif not is_iter_type and internal_type and "name" in internal_type_xml_map: - xml_name = _extract_name_from_internal_type(internal_type) - children = data.findall(xml_name) - # That's an array - else: - if internal_type: # Complex type, ignore itemsName and use the complex type name - items_name = _extract_name_from_internal_type(internal_type) - else: - items_name = xml_desc.get("itemsName", xml_name) - children = data.findall(items_name) - - if len(children) == 0: - if is_iter_type: - if is_wrapped: - return None # is_wrapped no node, we want None - return [] # not wrapped, assume empty list - return None # Assume it's not there, maybe an optional node. - - # If is_iter_type and not wrapped, return all found children - if is_iter_type: - if not is_wrapped: - return children - # Iter and wrapped, should have found one node only (the wrap one) - if len(children) != 1: - raise DeserializationError( - "Tried to deserialize an array not wrapped, and found several nodes '{}'. Maybe you should declare this array as wrapped?".format( # pylint: disable=line-too-long - xml_name - ) - ) - return list(children[0]) # Might be empty list and that's ok. - - # Here it's not a itertype, we should have found one element only or empty - if len(children) > 1: - raise DeserializationError("Find several XML '{}' where it was not expected".format(xml_name)) - return children[0] - - -class Deserializer(object): - """Response object model deserializer. - - :param dict classes: Class type dictionary for deserializing complex types. - :ivar list key_extractors: Ordered list of extractors to be used by this deserializer. - """ - - basic_types = {str: "str", int: "int", bool: "bool", float: "float"} - - valid_date = re.compile(r"\d{4}[-]\d{2}[-]\d{2}T\d{2}:\d{2}:\d{2}\.?\d*Z?[-+]?[\d{2}]?:?[\d{2}]?") - - def __init__(self, classes: Optional[Mapping[str, type]] = None) -> None: - self.deserialize_type = { - "iso-8601": Deserializer.deserialize_iso, - "rfc-1123": Deserializer.deserialize_rfc, - "unix-time": Deserializer.deserialize_unix, - "duration": Deserializer.deserialize_duration, - "date": Deserializer.deserialize_date, - "time": Deserializer.deserialize_time, - "decimal": Deserializer.deserialize_decimal, - "long": Deserializer.deserialize_long, - "bytearray": Deserializer.deserialize_bytearray, - "base64": Deserializer.deserialize_base64, - "object": self.deserialize_object, - "[]": self.deserialize_iter, - "{}": self.deserialize_dict, - } - self.deserialize_expected_types = { - "duration": (isodate.Duration, datetime.timedelta), - "iso-8601": (datetime.datetime), - } - self.dependencies: Dict[str, type] = dict(classes) if classes else {} - self.key_extractors = [rest_key_extractor, xml_key_extractor] - # Additional properties only works if the "rest_key_extractor" is used to - # extract the keys. Making it to work whatever the key extractor is too much - # complicated, with no real scenario for now. - # So adding a flag to disable additional properties detection. This flag should be - # used if your expect the deserialization to NOT come from a JSON REST syntax. - # Otherwise, result are unexpected - self.additional_properties_detection = True - - def __call__(self, target_obj, response_data, content_type=None): - """Call the deserializer to process a REST response. - - :param str target_obj: Target data type to deserialize to. - :param requests.Response response_data: REST response object. - :param str content_type: Swagger "produces" if available. - :raises: DeserializationError if deserialization fails. - :return: Deserialized object. - :rtype: object - """ - data = self._unpack_content(response_data, content_type) - return self._deserialize(target_obj, data) - - def _deserialize(self, target_obj, data): # pylint: disable=inconsistent-return-statements - """Call the deserializer on a model. - - Data needs to be already deserialized as JSON or XML ElementTree - - :param str target_obj: Target data type to deserialize to. - :param object data: Object to deserialize. - :raises: DeserializationError if deserialization fails. - :return: Deserialized object. - :rtype: object - """ - # This is already a model, go recursive just in case - if hasattr(data, "_attribute_map"): - constants = [name for name, config in getattr(data, "_validation", {}).items() if config.get("constant")] - try: - for attr, mapconfig in data._attribute_map.items(): # pylint: disable=protected-access - if attr in constants: - continue - value = getattr(data, attr) - if value is None: - continue - local_type = mapconfig["type"] - internal_data_type = local_type.strip("[]{}") - if internal_data_type not in self.dependencies or isinstance(internal_data_type, Enum): - continue - setattr(data, attr, self._deserialize(local_type, value)) - return data - except AttributeError: - return - - response, class_name = self._classify_target(target_obj, data) - - if isinstance(response, str): - return self.deserialize_data(data, response) - if isinstance(response, type) and issubclass(response, Enum): - return self.deserialize_enum(data, response) - - if data is None or data is CoreNull: - return data - try: - attributes = response._attribute_map # type: ignore # pylint: disable=protected-access - d_attrs = {} - for attr, attr_desc in attributes.items(): - # Check empty string. If it's not empty, someone has a real "additionalProperties"... - if attr == "additional_properties" and attr_desc["key"] == "": - continue - raw_value = None - # Enhance attr_desc with some dynamic data - attr_desc = attr_desc.copy() # Do a copy, do not change the real one - internal_data_type = attr_desc["type"].strip("[]{}") - if internal_data_type in self.dependencies: - attr_desc["internalType"] = self.dependencies[internal_data_type] - - for key_extractor in self.key_extractors: - found_value = key_extractor(attr, attr_desc, data) - if found_value is not None: - if raw_value is not None and raw_value != found_value: - msg = ( - "Ignoring extracted value '%s' from %s for key '%s'" - " (duplicate extraction, follow extractors order)" - ) - _LOGGER.warning(msg, found_value, key_extractor, attr) - continue - raw_value = found_value - - value = self.deserialize_data(raw_value, attr_desc["type"]) - d_attrs[attr] = value - except (AttributeError, TypeError, KeyError) as err: - msg = "Unable to deserialize to object: " + class_name # type: ignore - raise DeserializationError(msg) from err - additional_properties = self._build_additional_properties(attributes, data) - return self._instantiate_model(response, d_attrs, additional_properties) - - def _build_additional_properties(self, attribute_map, data): - if not self.additional_properties_detection: - return None - if "additional_properties" in attribute_map and attribute_map.get("additional_properties", {}).get("key") != "": - # Check empty string. If it's not empty, someone has a real "additionalProperties" - return None - if isinstance(data, ET.Element): - data = {el.tag: el.text for el in data} - - known_keys = { - _decode_attribute_map_key(_FLATTEN.split(desc["key"])[0]) - for desc in attribute_map.values() - if desc["key"] != "" - } - present_keys = set(data.keys()) - missing_keys = present_keys - known_keys - return {key: data[key] for key in missing_keys} - - def _classify_target(self, target, data): - """Check to see whether the deserialization target object can - be classified into a subclass. - Once classification has been determined, initialize object. - - :param str target: The target object type to deserialize to. - :param str/dict data: The response data to deserialize. - :return: The classified target object and its class name. - :rtype: tuple - """ - if target is None: - return None, None - - if isinstance(target, str): - try: - target = self.dependencies[target] - except KeyError: - return target, target - - try: - target = target._classify(data, self.dependencies) # type: ignore # pylint: disable=protected-access - except AttributeError: - pass # Target is not a Model, no classify - return target, target.__class__.__name__ # type: ignore - - def failsafe_deserialize(self, target_obj, data, content_type=None): - """Ignores any errors encountered in deserialization, - and falls back to not deserializing the object. Recommended - for use in error deserialization, as we want to return the - HttpResponseError to users, and not have them deal with - a deserialization error. - - :param str target_obj: The target object type to deserialize to. - :param str/dict data: The response data to deserialize. - :param str content_type: Swagger "produces" if available. - :return: Deserialized object. - :rtype: object - """ - try: - return self(target_obj, data, content_type=content_type) - except: # pylint: disable=bare-except - _LOGGER.debug( - "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True - ) - return None - - @staticmethod - def _unpack_content(raw_data, content_type=None): - """Extract the correct structure for deserialization. - - If raw_data is a PipelineResponse, try to extract the result of RawDeserializer. - if we can't, raise. Your Pipeline should have a RawDeserializer. - - If not a pipeline response and raw_data is bytes or string, use content-type - to decode it. If no content-type, try JSON. - - If raw_data is something else, bypass all logic and return it directly. - - :param obj raw_data: Data to be processed. - :param str content_type: How to parse if raw_data is a string/bytes. - :raises JSONDecodeError: If JSON is requested and parsing is impossible. - :raises UnicodeDecodeError: If bytes is not UTF8 - :rtype: object - :return: Unpacked content. - """ - # Assume this is enough to detect a Pipeline Response without importing it - context = getattr(raw_data, "context", {}) - if context: - if RawDeserializer.CONTEXT_NAME in context: - return context[RawDeserializer.CONTEXT_NAME] - raise ValueError("This pipeline didn't have the RawDeserializer policy; can't deserialize") - - # Assume this is enough to recognize universal_http.ClientResponse without importing it - if hasattr(raw_data, "body"): - return RawDeserializer.deserialize_from_http_generics(raw_data.text(), raw_data.headers) - - # Assume this enough to recognize requests.Response without importing it. - if hasattr(raw_data, "_content_consumed"): - return RawDeserializer.deserialize_from_http_generics(raw_data.text, raw_data.headers) - - if isinstance(raw_data, (str, bytes)) or hasattr(raw_data, "read"): - return RawDeserializer.deserialize_from_text(raw_data, content_type) # type: ignore - return raw_data - - def _instantiate_model(self, response, attrs, additional_properties=None): - """Instantiate a response model passing in deserialized args. - - :param Response response: The response model class. - :param dict attrs: The deserialized response attributes. - :param dict additional_properties: Additional properties to be set. - :rtype: Response - :return: The instantiated response model. - """ - if callable(response): - subtype = getattr(response, "_subtype_map", {}) - try: - readonly = [ - k for k, v in response._validation.items() if v.get("readonly") # pylint: disable=protected-access - ] - const = [ - k for k, v in response._validation.items() if v.get("constant") # pylint: disable=protected-access - ] - kwargs = {k: v for k, v in attrs.items() if k not in subtype and k not in readonly + const} - response_obj = response(**kwargs) - for attr in readonly: - setattr(response_obj, attr, attrs.get(attr)) - if additional_properties: - response_obj.additional_properties = additional_properties - return response_obj - except TypeError as err: - msg = "Unable to deserialize {} into model {}. ".format(kwargs, response) # type: ignore - raise DeserializationError(msg + str(err)) from err - else: - try: - for attr, value in attrs.items(): - setattr(response, attr, value) - return response - except Exception as exp: - msg = "Unable to populate response model. " - msg += "Type: {}, Error: {}".format(type(response), exp) - raise DeserializationError(msg) from exp - - def deserialize_data(self, data, data_type): # pylint: disable=too-many-return-statements - """Process data for deserialization according to data type. - - :param str data: The response string to be deserialized. - :param str data_type: The type to deserialize to. - :raises: DeserializationError if deserialization fails. - :return: Deserialized object. - :rtype: object - """ - if data is None: - return data - - try: - if not data_type: - return data - if data_type in self.basic_types.values(): - return self.deserialize_basic(data, data_type) - if data_type in self.deserialize_type: - if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): - return data - - is_a_text_parsing_type = lambda x: x not in [ # pylint: disable=unnecessary-lambda-assignment - "object", - "[]", - r"{}", - ] - if isinstance(data, ET.Element) and is_a_text_parsing_type(data_type) and not data.text: - return None - data_val = self.deserialize_type[data_type](data) - return data_val - - iter_type = data_type[0] + data_type[-1] - if iter_type in self.deserialize_type: - return self.deserialize_type[iter_type](data, data_type[1:-1]) - - obj_type = self.dependencies[data_type] - if issubclass(obj_type, Enum): - if isinstance(data, ET.Element): - data = data.text - return self.deserialize_enum(data, obj_type) - - except (ValueError, TypeError, AttributeError) as err: - msg = "Unable to deserialize response data." - msg += " Data: {}, {}".format(data, data_type) - raise DeserializationError(msg) from err - return self._deserialize(obj_type, data) - - def deserialize_iter(self, attr, iter_type): - """Deserialize an iterable. - - :param list attr: Iterable to be deserialized. - :param str iter_type: The type of object in the iterable. - :return: Deserialized iterable. - :rtype: list - """ - if attr is None: - return None - if isinstance(attr, ET.Element): # If I receive an element here, get the children - attr = list(attr) - if not isinstance(attr, (list, set)): - raise DeserializationError("Cannot deserialize as [{}] an object of type {}".format(iter_type, type(attr))) - return [self.deserialize_data(a, iter_type) for a in attr] - - def deserialize_dict(self, attr, dict_type): - """Deserialize a dictionary. - - :param dict/list attr: Dictionary to be deserialized. Also accepts - a list of key, value pairs. - :param str dict_type: The object type of the items in the dictionary. - :return: Deserialized dictionary. - :rtype: dict - """ - if isinstance(attr, list): - return {x["key"]: self.deserialize_data(x["value"], dict_type) for x in attr} - - if isinstance(attr, ET.Element): - # Transform value into {"Key": "value"} - attr = {el.tag: el.text for el in attr} - return {k: self.deserialize_data(v, dict_type) for k, v in attr.items()} - - def deserialize_object(self, attr, **kwargs): # pylint: disable=too-many-return-statements - """Deserialize a generic object. - This will be handled as a dictionary. - - :param dict attr: Dictionary to be deserialized. - :return: Deserialized object. - :rtype: dict - :raises: TypeError if non-builtin datatype encountered. - """ - if attr is None: - return None - if isinstance(attr, ET.Element): - # Do no recurse on XML, just return the tree as-is - return attr - if isinstance(attr, str): - return self.deserialize_basic(attr, "str") - obj_type = type(attr) - if obj_type in self.basic_types: - return self.deserialize_basic(attr, self.basic_types[obj_type]) - if obj_type is _long_type: - return self.deserialize_long(attr) - - if obj_type == dict: - deserialized = {} - for key, value in attr.items(): - try: - deserialized[key] = self.deserialize_object(value, **kwargs) - except ValueError: - deserialized[key] = None - return deserialized - - if obj_type == list: - deserialized = [] - for obj in attr: - try: - deserialized.append(self.deserialize_object(obj, **kwargs)) - except ValueError: - pass - return deserialized - - error = "Cannot deserialize generic object with type: " - raise TypeError(error + str(obj_type)) - - def deserialize_basic(self, attr, data_type): # pylint: disable=too-many-return-statements - """Deserialize basic builtin data type from string. - Will attempt to convert to str, int, float and bool. - This function will also accept '1', '0', 'true' and 'false' as - valid bool values. - - :param str attr: response string to be deserialized. - :param str data_type: deserialization data type. - :return: Deserialized basic type. - :rtype: str, int, float or bool - :raises: TypeError if string format is not valid. - """ - # If we're here, data is supposed to be a basic type. - # If it's still an XML node, take the text - if isinstance(attr, ET.Element): - attr = attr.text - if not attr: - if data_type == "str": - # None or '', node is empty string. - return "" - # None or '', node with a strong type is None. - # Don't try to model "empty bool" or "empty int" - return None - - if data_type == "bool": - if attr in [True, False, 1, 0]: - return bool(attr) - if isinstance(attr, str): - if attr.lower() in ["true", "1"]: - return True - if attr.lower() in ["false", "0"]: - return False - raise TypeError("Invalid boolean value: {}".format(attr)) - - if data_type == "str": - return self.deserialize_unicode(attr) - return eval(data_type)(attr) # nosec # pylint: disable=eval-used - - @staticmethod - def deserialize_unicode(data): - """Preserve unicode objects in Python 2, otherwise return data - as a string. - - :param str data: response string to be deserialized. - :return: Deserialized string. - :rtype: str or unicode - """ - # We might be here because we have an enum modeled as string, - # and we try to deserialize a partial dict with enum inside - if isinstance(data, Enum): - return data - - # Consider this is real string - try: - if isinstance(data, unicode): # type: ignore - return data - except NameError: - return str(data) - return str(data) - - @staticmethod - def deserialize_enum(data, enum_obj): - """Deserialize string into enum object. - - If the string is not a valid enum value it will be returned as-is - and a warning will be logged. - - :param str data: Response string to be deserialized. If this value is - None or invalid it will be returned as-is. - :param Enum enum_obj: Enum object to deserialize to. - :return: Deserialized enum object. - :rtype: Enum - """ - if isinstance(data, enum_obj) or data is None: - return data - if isinstance(data, Enum): - data = data.value - if isinstance(data, int): - # Workaround. We might consider remove it in the future. - try: - return list(enum_obj.__members__.values())[data] - except IndexError as exc: - error = "{!r} is not a valid index for enum {!r}" - raise DeserializationError(error.format(data, enum_obj)) from exc - try: - return enum_obj(str(data)) - except ValueError: - for enum_value in enum_obj: - if enum_value.value.lower() == str(data).lower(): - return enum_value - # We don't fail anymore for unknown value, we deserialize as a string - _LOGGER.warning("Deserializer is not able to find %s as valid enum in %s", data, enum_obj) - return Deserializer.deserialize_unicode(data) - - @staticmethod - def deserialize_bytearray(attr): - """Deserialize string into bytearray. - - :param str attr: response string to be deserialized. - :return: Deserialized bytearray - :rtype: bytearray - :raises: TypeError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - return bytearray(b64decode(attr)) # type: ignore - - @staticmethod - def deserialize_base64(attr): - """Deserialize base64 encoded string into string. - - :param str attr: response string to be deserialized. - :return: Deserialized base64 string - :rtype: bytearray - :raises: TypeError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - padding = "=" * (3 - (len(attr) + 3) % 4) # type: ignore - attr = attr + padding # type: ignore - encoded = attr.replace("-", "+").replace("_", "/") - return b64decode(encoded) - - @staticmethod - def deserialize_decimal(attr): - """Deserialize string into Decimal object. - - :param str attr: response string to be deserialized. - :return: Deserialized decimal - :raises: DeserializationError if string format invalid. - :rtype: decimal - """ - if isinstance(attr, ET.Element): - attr = attr.text - try: - return decimal.Decimal(str(attr)) # type: ignore - except decimal.DecimalException as err: - msg = "Invalid decimal {}".format(attr) - raise DeserializationError(msg) from err - - @staticmethod - def deserialize_long(attr): - """Deserialize string into long (Py2) or int (Py3). - - :param str attr: response string to be deserialized. - :return: Deserialized int - :rtype: long or int - :raises: ValueError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - return _long_type(attr) # type: ignore - - @staticmethod - def deserialize_duration(attr): - """Deserialize ISO-8601 formatted string into TimeDelta object. - - :param str attr: response string to be deserialized. - :return: Deserialized duration - :rtype: TimeDelta - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - try: - duration = isodate.parse_duration(attr) - except (ValueError, OverflowError, AttributeError) as err: - msg = "Cannot deserialize duration object." - raise DeserializationError(msg) from err - return duration - - @staticmethod - def deserialize_date(attr): - """Deserialize ISO-8601 formatted string into Date object. - - :param str attr: response string to be deserialized. - :return: Deserialized date - :rtype: Date - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore - raise DeserializationError("Date must have only digits and -. Received: %s" % attr) - # This must NOT use defaultmonth/defaultday. Using None ensure this raises an exception. - return isodate.parse_date(attr, defaultmonth=0, defaultday=0) - - @staticmethod - def deserialize_time(attr): - """Deserialize ISO-8601 formatted string into time object. - - :param str attr: response string to be deserialized. - :return: Deserialized time - :rtype: datetime.time - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - if re.search(r"[^\W\d_]", attr, re.I + re.U): # type: ignore - raise DeserializationError("Date must have only digits and -. Received: %s" % attr) - return isodate.parse_time(attr) - - @staticmethod - def deserialize_rfc(attr): - """Deserialize RFC-1123 formatted string into Datetime object. - - :param str attr: response string to be deserialized. - :return: Deserialized RFC datetime - :rtype: Datetime - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - try: - parsed_date = email.utils.parsedate_tz(attr) # type: ignore - date_obj = datetime.datetime( - *parsed_date[:6], tzinfo=_FixedOffset(datetime.timedelta(minutes=(parsed_date[9] or 0) / 60)) - ) - if not date_obj.tzinfo: - date_obj = date_obj.astimezone(tz=TZ_UTC) - except ValueError as err: - msg = "Cannot deserialize to rfc datetime object." - raise DeserializationError(msg) from err - return date_obj - - @staticmethod - def deserialize_iso(attr): - """Deserialize ISO-8601 formatted string into Datetime object. - - :param str attr: response string to be deserialized. - :return: Deserialized ISO datetime - :rtype: Datetime - :raises: DeserializationError if string format invalid. - """ - if isinstance(attr, ET.Element): - attr = attr.text - try: - attr = attr.upper() # type: ignore - match = Deserializer.valid_date.match(attr) - if not match: - raise ValueError("Invalid datetime string: " + attr) - - check_decimal = attr.split(".") - if len(check_decimal) > 1: - decimal_str = "" - for digit in check_decimal[1]: - if digit.isdigit(): - decimal_str += digit - else: - break - if len(decimal_str) > 6: - attr = attr.replace(decimal_str, decimal_str[0:6]) - - date_obj = isodate.parse_datetime(attr) - test_utc = date_obj.utctimetuple() - if test_utc.tm_year > 9999 or test_utc.tm_year < 1: - raise OverflowError("Hit max or min date") - except (ValueError, OverflowError, AttributeError) as err: - msg = "Cannot deserialize datetime object." - raise DeserializationError(msg) from err - return date_obj - - @staticmethod - def deserialize_unix(attr): - """Serialize Datetime object into IntTime format. - This is represented as seconds. - - :param int attr: Object to be serialized. - :return: Deserialized datetime - :rtype: Datetime - :raises: DeserializationError if format invalid - """ - if isinstance(attr, ET.Element): - attr = int(attr.text) # type: ignore - try: - attr = int(attr) - date_obj = datetime.datetime.fromtimestamp(attr, TZ_UTC) - except ValueError as err: - msg = "Cannot deserialize to unix datetime object." - raise DeserializationError(msg) from err - return date_obj diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_vendor.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_vendor.py deleted file mode 100644 index cc76775c8a75..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/_vendor.py +++ /dev/null @@ -1,23 +0,0 @@ -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from abc import ABC -from typing import TYPE_CHECKING - -from ._configuration import SearchServiceClientConfiguration - -if TYPE_CHECKING: - from azure.core import PipelineClient - - from ._serialization import Deserializer, Serializer - - -class SearchServiceClientMixinABC(ABC): - """DO NOT use this class. It is for internal typing use only.""" - - _client: "PipelineClient" - _config: SearchServiceClientConfiguration - _serialize: "Serializer" - _deserialize: "Deserializer" diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/__init__.py deleted file mode 100644 index e67c2149e4d1..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -# pylint: disable=wrong-import-position - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from ._patch import * # pylint: disable=unused-wildcard-import - -from ._search_service_client import SearchServiceClient # type: ignore - -try: - from ._patch import __all__ as _patch_all - from ._patch import * -except ImportError: - _patch_all = [] -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "SearchServiceClient", -] -__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore - -_patch_sdk() diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_configuration.py deleted file mode 100644 index 488ff5605713..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_configuration.py +++ /dev/null @@ -1,48 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from typing import Any - -from azure.core.pipeline import policies - -VERSION = "unknown" - - -class SearchServiceClientConfiguration: # pylint: disable=too-many-instance-attributes - """Configuration for SearchServiceClient. - - Note that all parameters used to create this instance are saved as instance - attributes. - - :param endpoint: The endpoint URL of the search service. Required. - :type endpoint: str - :keyword api_version: Api Version. Default value is "2024-11-01-preview". Note that overriding - this default value may result in unsupported behavior. - :paramtype api_version: str - """ - - def __init__(self, endpoint: str, **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2024-11-01-preview") - - if endpoint is None: - raise ValueError("Parameter 'endpoint' must not be None.") - - self.endpoint = endpoint - self.api_version = api_version - kwargs.setdefault("sdk_moniker", "searchserviceclient/{}".format(VERSION)) - self.polling_interval = kwargs.get("polling_interval", 30) - self._configure(**kwargs) - - def _configure(self, **kwargs: Any) -> None: - self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) - self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) - self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) - self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) - self.http_logging_policy = kwargs.get("http_logging_policy") or policies.HttpLoggingPolicy(**kwargs) - self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) - self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) - self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) - self.authentication_policy = kwargs.get("authentication_policy") diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_patch.py deleted file mode 100644 index f7dd32510333..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_patch.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -"""Customize generated code here. - -Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -""" -from typing import List - -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level - - -def patch_sdk(): - """Do not remove from this file. - - `patch_sdk` is a last resort escape hatch that allows you to do customizations - you can't accomplish using the techniques described in - https://aka.ms/azsdk/python/dpcodegen/python/customize - """ diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_search_service_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_search_service_client.py deleted file mode 100644 index a223ef378ee7..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_search_service_client.py +++ /dev/null @@ -1,123 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from copy import deepcopy -from typing import Any, Awaitable -from typing_extensions import Self - -from azure.core import AsyncPipelineClient -from azure.core.pipeline import policies -from azure.core.rest import AsyncHttpResponse, HttpRequest - -from .. import models as _models -from .._serialization import Deserializer, Serializer -from ._configuration import SearchServiceClientConfiguration -from .operations import ( - AliasesOperations, - DataSourcesOperations, - IndexersOperations, - IndexesOperations, - SearchServiceClientOperationsMixin, - SkillsetsOperations, - SynonymMapsOperations, -) - - -class SearchServiceClient(SearchServiceClientOperationsMixin): # pylint: disable=too-many-instance-attributes - """Client that can be used to manage and query indexes and documents, as well as manage other - resources, on a search service. - - :ivar data_sources: DataSourcesOperations operations - :vartype data_sources: azure.search.documents.indexes.aio.operations.DataSourcesOperations - :ivar indexers: IndexersOperations operations - :vartype indexers: azure.search.documents.indexes.aio.operations.IndexersOperations - :ivar skillsets: SkillsetsOperations operations - :vartype skillsets: azure.search.documents.indexes.aio.operations.SkillsetsOperations - :ivar synonym_maps: SynonymMapsOperations operations - :vartype synonym_maps: azure.search.documents.indexes.aio.operations.SynonymMapsOperations - :ivar indexes: IndexesOperations operations - :vartype indexes: azure.search.documents.indexes.aio.operations.IndexesOperations - :ivar aliases: AliasesOperations operations - :vartype aliases: azure.search.documents.indexes.aio.operations.AliasesOperations - :param endpoint: The endpoint URL of the search service. Required. - :type endpoint: str - :keyword api_version: Api Version. Default value is "2024-11-01-preview". Note that overriding - this default value may result in unsupported behavior. - :paramtype api_version: str - """ - - def __init__( # pylint: disable=missing-client-constructor-parameter-credential - self, endpoint: str, **kwargs: Any - ) -> None: - _endpoint = "{endpoint}" - self._config = SearchServiceClientConfiguration(endpoint=endpoint, **kwargs) - _policies = kwargs.pop("policies", None) - if _policies is None: - _policies = [ - policies.RequestIdPolicy(**kwargs), - self._config.headers_policy, - self._config.user_agent_policy, - self._config.proxy_policy, - policies.ContentDecodePolicy(**kwargs), - self._config.redirect_policy, - self._config.retry_policy, - self._config.authentication_policy, - self._config.custom_hook_policy, - self._config.logging_policy, - policies.DistributedTracingPolicy(**kwargs), - policies.SensitiveHeaderCleanupPolicy(**kwargs) if self._config.redirect_policy else None, - self._config.http_logging_policy, - ] - self._client: AsyncPipelineClient = AsyncPipelineClient(base_url=_endpoint, policies=_policies, **kwargs) - - client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)} - self._serialize = Serializer(client_models) - self._deserialize = Deserializer(client_models) - self._serialize.client_side_validation = False - self.data_sources = DataSourcesOperations(self._client, self._config, self._serialize, self._deserialize) - self.indexers = IndexersOperations(self._client, self._config, self._serialize, self._deserialize) - self.skillsets = SkillsetsOperations(self._client, self._config, self._serialize, self._deserialize) - self.synonym_maps = SynonymMapsOperations(self._client, self._config, self._serialize, self._deserialize) - self.indexes = IndexesOperations(self._client, self._config, self._serialize, self._deserialize) - self.aliases = AliasesOperations(self._client, self._config, self._serialize, self._deserialize) - - def _send_request( - self, request: HttpRequest, *, stream: bool = False, **kwargs: Any - ) -> Awaitable[AsyncHttpResponse]: - """Runs the network request through the client's chained policies. - - >>> from azure.core.rest import HttpRequest - >>> request = HttpRequest("GET", "https://www.example.org/") - - >>> response = await client._send_request(request) - - - For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request - - :param request: The network request you want to make. Required. - :type request: ~azure.core.rest.HttpRequest - :keyword bool stream: Whether the response payload will be streamed. Defaults to False. - :return: The response of your network call. Does not do error handling on your response. - :rtype: ~azure.core.rest.AsyncHttpResponse - """ - - request_copy = deepcopy(request) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - - request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments) - return self._client.send_request(request_copy, stream=stream, **kwargs) # type: ignore - - async def close(self) -> None: - await self._client.close() - - async def __aenter__(self) -> Self: - await self._client.__aenter__() - return self - - async def __aexit__(self, *exc_details: Any) -> None: - await self._client.__aexit__(*exc_details) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_vendor.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_vendor.py deleted file mode 100644 index a103363f9d59..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/_vendor.py +++ /dev/null @@ -1,23 +0,0 @@ -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -from abc import ABC -from typing import TYPE_CHECKING - -from ._configuration import SearchServiceClientConfiguration - -if TYPE_CHECKING: - from azure.core import AsyncPipelineClient - - from .._serialization import Deserializer, Serializer - - -class SearchServiceClientMixinABC(ABC): - """DO NOT use this class. It is for internal typing use only.""" - - _client: "AsyncPipelineClient" - _config: SearchServiceClientConfiguration - _serialize: "Serializer" - _deserialize: "Deserializer" diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/__init__.py deleted file mode 100644 index 27f37f309348..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -# pylint: disable=wrong-import-position - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from ._patch import * # pylint: disable=unused-wildcard-import - -from ._data_sources_operations import DataSourcesOperations # type: ignore -from ._indexers_operations import IndexersOperations # type: ignore -from ._skillsets_operations import SkillsetsOperations # type: ignore -from ._synonym_maps_operations import SynonymMapsOperations # type: ignore -from ._indexes_operations import IndexesOperations # type: ignore -from ._aliases_operations import AliasesOperations # type: ignore -from ._search_service_client_operations import SearchServiceClientOperationsMixin # type: ignore - -from ._patch import __all__ as _patch_all -from ._patch import * -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "DataSourcesOperations", - "IndexersOperations", - "SkillsetsOperations", - "SynonymMapsOperations", - "IndexesOperations", - "AliasesOperations", - "SearchServiceClientOperationsMixin", -] -__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore -_patch_sdk() diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_aliases_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_aliases_operations.py deleted file mode 100644 index 0fb34eaca5eb..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_aliases_operations.py +++ /dev/null @@ -1,611 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from io import IOBase -import sys -from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload -import urllib.parse - -from azure.core.async_paging import AsyncItemPaged, AsyncList -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.rest import AsyncHttpResponse, HttpRequest -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.utils import case_insensitive_dict - -from ... import models as _models -from ...operations._aliases_operations import ( - build_create_or_update_request, - build_create_request, - build_delete_request, - build_get_request, - build_list_request, -) - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - - -class AliasesOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.search.documents.indexes.aio.SearchServiceClient`'s - :attr:`aliases` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @overload - async def create( - self, - alias: _models.SearchAlias, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchAlias: - """Creates a new search alias. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Alias - - :param alias: The definition of the alias to create. Required. - :type alias: ~azure.search.documents.indexes.models.SearchAlias - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchAlias or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create( - self, - alias: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchAlias: - """Creates a new search alias. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Alias - - :param alias: The definition of the alias to create. Required. - :type alias: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchAlias or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create( - self, - alias: Union[_models.SearchAlias, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchAlias: - """Creates a new search alias. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Alias - - :param alias: The definition of the alias to create. Is either a SearchAlias type or a - IO[bytes] type. Required. - :type alias: ~azure.search.documents.indexes.models.SearchAlias or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchAlias or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchAlias] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(alias, (IOBase, bytes)): - _content = alias - else: - _json = self._serialize.body(alias, "SearchAlias") - - _request = build_create_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchAlias", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list( - self, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> AsyncIterable["_models.SearchAlias"]: - """Lists all aliases available for a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/List-Aliases - - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: An iterator like instance of either SearchAlias or the result of cls(response) - :rtype: - ~azure.core.async_paging.AsyncItemPaged[~azure.search.documents.indexes.models.SearchAlias] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.ListAliasesResult] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_list_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - _request.method = "GET" - return _request - - async def extract_data(pipeline_response): - deserialized = self._deserialize("ListAliasesResult", pipeline_response) - list_of_elem = deserialized.aliases - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return None, AsyncList(list_of_elem) - - async def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return AsyncItemPaged(get_next, extract_data) - - @overload - async def create_or_update( - self, - alias_name: str, - prefer: Union[str, _models.Enum0], - alias: _models.SearchAlias, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchAlias: - """Creates a new search alias or updates an alias if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Alias - - :param alias_name: The definition of the alias to create or update. Required. - :type alias_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param alias: The definition of the alias to create or update. Required. - :type alias: ~azure.search.documents.indexes.models.SearchAlias - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchAlias or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_or_update( - self, - alias_name: str, - prefer: Union[str, _models.Enum0], - alias: IO[bytes], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchAlias: - """Creates a new search alias or updates an alias if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Alias - - :param alias_name: The definition of the alias to create or update. Required. - :type alias_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param alias: The definition of the alias to create or update. Required. - :type alias: IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchAlias or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_or_update( - self, - alias_name: str, - prefer: Union[str, _models.Enum0], - alias: Union[_models.SearchAlias, IO[bytes]], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchAlias: - """Creates a new search alias or updates an alias if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Alias - - :param alias_name: The definition of the alias to create or update. Required. - :type alias_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param alias: The definition of the alias to create or update. Is either a SearchAlias type or - a IO[bytes] type. Required. - :type alias: ~azure.search.documents.indexes.models.SearchAlias or IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchAlias or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchAlias] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(alias, (IOBase, bytes)): - _content = alias - else: - _json = self._serialize.body(alias, "SearchAlias") - - _request = build_create_or_update_request( - alias_name=alias_name, - prefer=prefer, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchAlias", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def delete( - self, - alias_name: str, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> None: - """Deletes a search alias and its associated mapping to an index. This operation is permanent, - with no recovery option. The mapped index is untouched by this operation. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Delete-Alias - - :param alias_name: The name of the alias to delete. Required. - :type alias_name: str - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_delete_request( - alias_name=alias_name, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @distributed_trace_async - async def get( - self, alias_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.SearchAlias: - """Retrieves an alias definition. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Get-Alias - - :param alias_name: The name of the alias to retrieve. Required. - :type alias_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchAlias or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SearchAlias] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_request( - alias_name=alias_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchAlias", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py deleted file mode 100644 index 8b4a8f2ad5c1..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_data_sources_operations.py +++ /dev/null @@ -1,592 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.rest import AsyncHttpResponse, HttpRequest -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.utils import case_insensitive_dict - -from ... import models as _models -from ...operations._data_sources_operations import ( - build_create_or_update_request, - build_create_request, - build_delete_request, - build_get_request, - build_list_request, -) - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - - -class DataSourcesOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.search.documents.indexes.aio.SearchServiceClient`'s - :attr:`data_sources` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @overload - async def create_or_update( - self, - data_source_name: str, - prefer: Union[str, _models.Enum0], - data_source: _models.SearchIndexerDataSource, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexerDataSource: - """Creates a new datasource or updates a datasource if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Data-Source - - :param data_source_name: The name of the datasource to create or update. Required. - :type data_source_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param data_source: The definition of the datasource to create or update. Required. - :type data_source: ~azure.search.documents.indexes.models.SearchIndexerDataSource - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :type skip_indexer_reset_requirement_for_cache: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSource - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_or_update( - self, - data_source_name: str, - prefer: Union[str, _models.Enum0], - data_source: IO[bytes], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexerDataSource: - """Creates a new datasource or updates a datasource if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Data-Source - - :param data_source_name: The name of the datasource to create or update. Required. - :type data_source_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param data_source: The definition of the datasource to create or update. Required. - :type data_source: IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :type skip_indexer_reset_requirement_for_cache: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSource - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_or_update( - self, - data_source_name: str, - prefer: Union[str, _models.Enum0], - data_source: Union[_models.SearchIndexerDataSource, IO[bytes]], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchIndexerDataSource: - """Creates a new datasource or updates a datasource if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Data-Source - - :param data_source_name: The name of the datasource to create or update. Required. - :type data_source_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param data_source: The definition of the datasource to create or update. Is either a - SearchIndexerDataSource type or a IO[bytes] type. Required. - :type data_source: ~azure.search.documents.indexes.models.SearchIndexerDataSource or IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :type skip_indexer_reset_requirement_for_cache: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSource - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchIndexerDataSource] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(data_source, (IOBase, bytes)): - _content = data_source - else: - _json = self._serialize.body(data_source, "SearchIndexerDataSource") - - _request = build_create_or_update_request( - data_source_name=data_source_name, - prefer=prefer, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexerDataSource", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def delete( - self, - data_source_name: str, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> None: - """Deletes a datasource. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Delete-Data-Source - - :param data_source_name: The name of the datasource to delete. Required. - :type data_source_name: str - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_delete_request( - data_source_name=data_source_name, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @distributed_trace_async - async def get( - self, data_source_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.SearchIndexerDataSource: - """Retrieves a datasource definition. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Get-Data-Source - - :param data_source_name: The name of the datasource to retrieve. Required. - :type data_source_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSource - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SearchIndexerDataSource] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_request( - data_source_name=data_source_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexerDataSource", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list( - self, select: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.ListDataSourcesResult: - """Lists all datasources available for a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/List-Data-Sources - - :param select: Selects which top-level properties of the data sources to retrieve. Specified as - a comma-separated list of JSON property names, or '*' for all properties. The default is all - properties. Default value is None. - :type select: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: ListDataSourcesResult or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.ListDataSourcesResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.ListDataSourcesResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_list_request( - select=select, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("ListDataSourcesResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def create( - self, - data_source: _models.SearchIndexerDataSource, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexerDataSource: - """Creates a new datasource. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Data-Source - - :param data_source: The definition of the datasource to create. Required. - :type data_source: ~azure.search.documents.indexes.models.SearchIndexerDataSource - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSource - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create( - self, - data_source: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexerDataSource: - """Creates a new datasource. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Data-Source - - :param data_source: The definition of the datasource to create. Required. - :type data_source: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSource - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create( - self, - data_source: Union[_models.SearchIndexerDataSource, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchIndexerDataSource: - """Creates a new datasource. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Data-Source - - :param data_source: The definition of the datasource to create. Is either a - SearchIndexerDataSource type or a IO[bytes] type. Required. - :type data_source: ~azure.search.documents.indexes.models.SearchIndexerDataSource or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSource - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchIndexerDataSource] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(data_source, (IOBase, bytes)): - _content = data_source - else: - _json = self._serialize.body(data_source, "SearchIndexerDataSource") - - _request = build_create_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexerDataSource", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py deleted file mode 100644 index e4e5618c1c18..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexers_operations.py +++ /dev/null @@ -1,953 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.rest import AsyncHttpResponse, HttpRequest -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.utils import case_insensitive_dict - -from ... import models as _models -from ...operations._indexers_operations import ( - build_create_or_update_request, - build_create_request, - build_delete_request, - build_get_request, - build_get_status_request, - build_list_request, - build_reset_docs_request, - build_reset_request, - build_run_request, -) - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - - -class IndexersOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.search.documents.indexes.aio.SearchServiceClient`'s - :attr:`indexers` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace_async - async def reset( - self, indexer_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> None: - """Resets the change tracking state associated with an indexer. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Reset-Indexer - - :param indexer_name: The name of the indexer to reset. Required. - :type indexer_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_reset_request( - indexer_name=indexer_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @overload - async def reset_docs( - self, - indexer_name: str, - overwrite: bool = False, - request_options: Optional[_models.RequestOptions] = None, - keys_or_ids: Optional[_models.DocumentKeysOrIds] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> None: - """Resets specific documents in the datasource to be selectively re-ingested by the indexer. - - .. seealso:: - - https://aka.ms/reset-documents - - :param indexer_name: The name of the indexer to reset documents for. Required. - :type indexer_name: str - :param overwrite: If false, keys or ids will be appended to existing ones. If true, only the - keys or ids in this payload will be queued to be re-ingested. Default value is False. - :type overwrite: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :param keys_or_ids: Default value is None. - :type keys_or_ids: ~azure.search.documents.indexes.models.DocumentKeysOrIds - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def reset_docs( - self, - indexer_name: str, - overwrite: bool = False, - request_options: Optional[_models.RequestOptions] = None, - keys_or_ids: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> None: - """Resets specific documents in the datasource to be selectively re-ingested by the indexer. - - .. seealso:: - - https://aka.ms/reset-documents - - :param indexer_name: The name of the indexer to reset documents for. Required. - :type indexer_name: str - :param overwrite: If false, keys or ids will be appended to existing ones. If true, only the - keys or ids in this payload will be queued to be re-ingested. Default value is False. - :type overwrite: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :param keys_or_ids: Default value is None. - :type keys_or_ids: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def reset_docs( - self, - indexer_name: str, - overwrite: bool = False, - request_options: Optional[_models.RequestOptions] = None, - keys_or_ids: Optional[Union[_models.DocumentKeysOrIds, IO[bytes]]] = None, - **kwargs: Any - ) -> None: - """Resets specific documents in the datasource to be selectively re-ingested by the indexer. - - .. seealso:: - - https://aka.ms/reset-documents - - :param indexer_name: The name of the indexer to reset documents for. Required. - :type indexer_name: str - :param overwrite: If false, keys or ids will be appended to existing ones. If true, only the - keys or ids in this payload will be queued to be re-ingested. Default value is False. - :type overwrite: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :param keys_or_ids: Is either a DocumentKeysOrIds type or a IO[bytes] type. Default value is - None. - :type keys_or_ids: ~azure.search.documents.indexes.models.DocumentKeysOrIds or IO[bytes] - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(keys_or_ids, (IOBase, bytes)): - _content = keys_or_ids - else: - if keys_or_ids is not None: - _json = self._serialize.body(keys_or_ids, "DocumentKeysOrIds") - else: - _json = None - - _request = build_reset_docs_request( - indexer_name=indexer_name, - overwrite=overwrite, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @distributed_trace_async - async def run( - self, indexer_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> None: - """Runs an indexer on-demand. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Run-Indexer - - :param indexer_name: The name of the indexer to run. Required. - :type indexer_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_run_request( - indexer_name=indexer_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @overload - async def create_or_update( - self, - indexer_name: str, - prefer: Union[str, _models.Enum0], - indexer: _models.SearchIndexer, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexer: - """Creates a new indexer or updates an indexer if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Indexer - - :param indexer_name: The name of the indexer to create or update. Required. - :type indexer_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param indexer: The definition of the indexer to create or update. Required. - :type indexer: ~azure.search.documents.indexes.models.SearchIndexer - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :type skip_indexer_reset_requirement_for_cache: bool - :param disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :type disable_cache_reprocessing_change_detection: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexer or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexer - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_or_update( - self, - indexer_name: str, - prefer: Union[str, _models.Enum0], - indexer: IO[bytes], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexer: - """Creates a new indexer or updates an indexer if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Indexer - - :param indexer_name: The name of the indexer to create or update. Required. - :type indexer_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param indexer: The definition of the indexer to create or update. Required. - :type indexer: IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :type skip_indexer_reset_requirement_for_cache: bool - :param disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :type disable_cache_reprocessing_change_detection: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexer or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexer - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_or_update( - self, - indexer_name: str, - prefer: Union[str, _models.Enum0], - indexer: Union[_models.SearchIndexer, IO[bytes]], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchIndexer: - """Creates a new indexer or updates an indexer if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Indexer - - :param indexer_name: The name of the indexer to create or update. Required. - :type indexer_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param indexer: The definition of the indexer to create or update. Is either a SearchIndexer - type or a IO[bytes] type. Required. - :type indexer: ~azure.search.documents.indexes.models.SearchIndexer or IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :type skip_indexer_reset_requirement_for_cache: bool - :param disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :type disable_cache_reprocessing_change_detection: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexer or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexer - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchIndexer] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(indexer, (IOBase, bytes)): - _content = indexer - else: - _json = self._serialize.body(indexer, "SearchIndexer") - - _request = build_create_or_update_request( - indexer_name=indexer_name, - prefer=prefer, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, - disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexer", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def delete( - self, - indexer_name: str, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> None: - """Deletes an indexer. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Delete-Indexer - - :param indexer_name: The name of the indexer to delete. Required. - :type indexer_name: str - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_delete_request( - indexer_name=indexer_name, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @distributed_trace_async - async def get( - self, indexer_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.SearchIndexer: - """Retrieves an indexer definition. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Get-Indexer - - :param indexer_name: The name of the indexer to retrieve. Required. - :type indexer_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexer or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexer - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SearchIndexer] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_request( - indexer_name=indexer_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexer", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list( - self, select: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.ListIndexersResult: - """Lists all indexers available for a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/List-Indexers - - :param select: Selects which top-level properties of the indexers to retrieve. Specified as a - comma-separated list of JSON property names, or '*' for all properties. The default is all - properties. Default value is None. - :type select: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: ListIndexersResult or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.ListIndexersResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.ListIndexersResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_list_request( - select=select, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("ListIndexersResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def create( - self, - indexer: _models.SearchIndexer, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexer: - """Creates a new indexer. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Indexer - - :param indexer: The definition of the indexer to create. Required. - :type indexer: ~azure.search.documents.indexes.models.SearchIndexer - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexer or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexer - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create( - self, - indexer: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexer: - """Creates a new indexer. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Indexer - - :param indexer: The definition of the indexer to create. Required. - :type indexer: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexer or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexer - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create( - self, - indexer: Union[_models.SearchIndexer, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchIndexer: - """Creates a new indexer. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Indexer - - :param indexer: The definition of the indexer to create. Is either a SearchIndexer type or a - IO[bytes] type. Required. - :type indexer: ~azure.search.documents.indexes.models.SearchIndexer or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexer or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexer - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchIndexer] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(indexer, (IOBase, bytes)): - _content = indexer - else: - _json = self._serialize.body(indexer, "SearchIndexer") - - _request = build_create_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexer", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_status( - self, indexer_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.SearchIndexerStatus: - """Returns the current status and execution history of an indexer. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Get-Indexer-Status - - :param indexer_name: The name of the indexer for which to retrieve status. Required. - :type indexer_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexerStatus or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SearchIndexerStatus] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_status_request( - indexer_name=indexer_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexerStatus", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py deleted file mode 100644 index 4e3be3bceeea..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_indexes_operations.py +++ /dev/null @@ -1,849 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from io import IOBase -import sys -from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload -import urllib.parse - -from azure.core.async_paging import AsyncItemPaged, AsyncList -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.rest import AsyncHttpResponse, HttpRequest -from azure.core.tracing.decorator import distributed_trace -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.utils import case_insensitive_dict - -from ... import models as _models -from ...operations._indexes_operations import ( - build_analyze_request, - build_create_or_update_request, - build_create_request, - build_delete_request, - build_get_request, - build_get_statistics_request, - build_list_request, -) - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - - -class IndexesOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.search.documents.indexes.aio.SearchServiceClient`'s - :attr:`indexes` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @overload - async def create( - self, - index: _models.SearchIndex, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndex: - """Creates a new search index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Index - - :param index: The definition of the index to create. Required. - :type index: ~azure.search.documents.indexes.models.SearchIndex - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndex or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create( - self, - index: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndex: - """Creates a new search index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Index - - :param index: The definition of the index to create. Required. - :type index: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndex or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create( - self, - index: Union[_models.SearchIndex, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchIndex: - """Creates a new search index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Index - - :param index: The definition of the index to create. Is either a SearchIndex type or a - IO[bytes] type. Required. - :type index: ~azure.search.documents.indexes.models.SearchIndex or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndex or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchIndex] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(index, (IOBase, bytes)): - _content = index - else: - _json = self._serialize.body(index, "SearchIndex") - - _request = build_create_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndex", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list( - self, select: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> AsyncIterable["_models.SearchIndex"]: - """Lists all indexes available for a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/List-Indexes - - :param select: Selects which top-level properties of the index definitions to retrieve. - Specified as a comma-separated list of JSON property names, or '*' for all properties. The - default is all properties. Default value is None. - :type select: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: An iterator like instance of either SearchIndex or the result of cls(response) - :rtype: - ~azure.core.async_paging.AsyncItemPaged[~azure.search.documents.indexes.models.SearchIndex] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.ListIndexesResult] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_list_request( - select=select, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - _request.method = "GET" - return _request - - async def extract_data(pipeline_response): - deserialized = self._deserialize("ListIndexesResult", pipeline_response) - list_of_elem = deserialized.indexes - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return None, AsyncList(list_of_elem) - - async def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return AsyncItemPaged(get_next, extract_data) - - @overload - async def create_or_update( - self, - index_name: str, - prefer: Union[str, _models.Enum0], - index: _models.SearchIndex, - allow_index_downtime: Optional[bool] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndex: - """Creates a new search index or updates an index if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Index - - :param index_name: The definition of the index to create or update. Required. - :type index_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param index: The definition of the index to create or update. Required. - :type index: ~azure.search.documents.indexes.models.SearchIndex - :param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters - to be added to an index by taking the index offline for at least a few seconds. This - temporarily causes indexing and query requests to fail. Performance and write availability of - the index can be impaired for several minutes after the index is updated, or longer for very - large indexes. Default value is None. - :type allow_index_downtime: bool - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndex or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_or_update( - self, - index_name: str, - prefer: Union[str, _models.Enum0], - index: IO[bytes], - allow_index_downtime: Optional[bool] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndex: - """Creates a new search index or updates an index if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Index - - :param index_name: The definition of the index to create or update. Required. - :type index_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param index: The definition of the index to create or update. Required. - :type index: IO[bytes] - :param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters - to be added to an index by taking the index offline for at least a few seconds. This - temporarily causes indexing and query requests to fail. Performance and write availability of - the index can be impaired for several minutes after the index is updated, or longer for very - large indexes. Default value is None. - :type allow_index_downtime: bool - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndex or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_or_update( - self, - index_name: str, - prefer: Union[str, _models.Enum0], - index: Union[_models.SearchIndex, IO[bytes]], - allow_index_downtime: Optional[bool] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchIndex: - """Creates a new search index or updates an index if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Index - - :param index_name: The definition of the index to create or update. Required. - :type index_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param index: The definition of the index to create or update. Is either a SearchIndex type or - a IO[bytes] type. Required. - :type index: ~azure.search.documents.indexes.models.SearchIndex or IO[bytes] - :param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters - to be added to an index by taking the index offline for at least a few seconds. This - temporarily causes indexing and query requests to fail. Performance and write availability of - the index can be impaired for several minutes after the index is updated, or longer for very - large indexes. Default value is None. - :type allow_index_downtime: bool - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndex or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchIndex] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(index, (IOBase, bytes)): - _content = index - else: - _json = self._serialize.body(index, "SearchIndex") - - _request = build_create_or_update_request( - index_name=index_name, - prefer=prefer, - allow_index_downtime=allow_index_downtime, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndex", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def delete( - self, - index_name: str, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> None: - """Deletes a search index and all the documents it contains. This operation is permanent, with no - recovery option. Make sure you have a master copy of your index definition, data ingestion - code, and a backup of the primary data source in case you need to re-build the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Delete-Index - - :param index_name: The name of the index to delete. Required. - :type index_name: str - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_delete_request( - index_name=index_name, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @distributed_trace_async - async def get( - self, index_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.SearchIndex: - """Retrieves an index definition. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Get-Index - - :param index_name: The name of the index to retrieve. Required. - :type index_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndex or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SearchIndex] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_request( - index_name=index_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndex", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def get_statistics( - self, index_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.GetIndexStatisticsResult: - """Returns statistics for the given index, including a document count and storage usage. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Get-Index-Statistics - - :param index_name: The name of the index for which to retrieve statistics. Required. - :type index_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: GetIndexStatisticsResult or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.GetIndexStatisticsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.GetIndexStatisticsResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_statistics_request( - index_name=index_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("GetIndexStatisticsResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def analyze( - self, - index_name: str, - request: _models.AnalyzeRequest, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.AnalyzeResult: - """Shows how an analyzer breaks text into tokens. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/test-analyzer - - :param index_name: The name of the index for which to test an analyzer. Required. - :type index_name: str - :param request: The text and analyzer or analysis components to test. Required. - :type request: ~azure.search.documents.indexes.models.AnalyzeRequest - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AnalyzeResult or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.AnalyzeResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def analyze( - self, - index_name: str, - request: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.AnalyzeResult: - """Shows how an analyzer breaks text into tokens. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/test-analyzer - - :param index_name: The name of the index for which to test an analyzer. Required. - :type index_name: str - :param request: The text and analyzer or analysis components to test. Required. - :type request: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AnalyzeResult or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.AnalyzeResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def analyze( - self, - index_name: str, - request: Union[_models.AnalyzeRequest, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.AnalyzeResult: - """Shows how an analyzer breaks text into tokens. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/test-analyzer - - :param index_name: The name of the index for which to test an analyzer. Required. - :type index_name: str - :param request: The text and analyzer or analysis components to test. Is either a - AnalyzeRequest type or a IO[bytes] type. Required. - :type request: ~azure.search.documents.indexes.models.AnalyzeRequest or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: AnalyzeResult or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.AnalyzeResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AnalyzeResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(request, (IOBase, bytes)): - _content = request - else: - _json = self._serialize.body(request, "AnalyzeRequest") - - _request = build_analyze_request( - index_name=index_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("AnalyzeResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_patch.py deleted file mode 100644 index f7dd32510333..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_patch.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -"""Customize generated code here. - -Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -""" -from typing import List - -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level - - -def patch_sdk(): - """Do not remove from this file. - - `patch_sdk` is a last resort escape hatch that allows you to do customizations - you can't accomplish using the techniques described in - https://aka.ms/azsdk/python/dpcodegen/python/customize - """ diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_service_client_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_service_client_operations.py deleted file mode 100644 index 8c86bec93697..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_search_service_client_operations.py +++ /dev/null @@ -1,94 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import sys -from typing import Any, Callable, Dict, Optional, TypeVar - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.rest import AsyncHttpResponse, HttpRequest -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.utils import case_insensitive_dict - -from ... import models as _models -from ...operations._search_service_client_operations import build_get_service_statistics_request -from .._vendor import SearchServiceClientMixinABC - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - - -class SearchServiceClientOperationsMixin(SearchServiceClientMixinABC): - - @distributed_trace_async - async def get_service_statistics( - self, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.SearchServiceStatistics: - """Gets service level statistics for a search service. - - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchServiceStatistics or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchServiceStatistics - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SearchServiceStatistics] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_service_statistics_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchServiceStatistics", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py deleted file mode 100644 index c31bb6630bf1..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_skillsets_operations.py +++ /dev/null @@ -1,748 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.rest import AsyncHttpResponse, HttpRequest -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.utils import case_insensitive_dict - -from ... import models as _models -from ...operations._skillsets_operations import ( - build_create_or_update_request, - build_create_request, - build_delete_request, - build_get_request, - build_list_request, - build_reset_skills_request, -) - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - - -class SkillsetsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.search.documents.indexes.aio.SearchServiceClient`'s - :attr:`skillsets` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @overload - async def create_or_update( - self, - skillset_name: str, - prefer: Union[str, _models.Enum0], - skillset: _models.SearchIndexerSkillset, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexerSkillset: - """Creates a new skillset in a search service or updates the skillset if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/update-skillset - - :param skillset_name: The name of the skillset to create or update. Required. - :type skillset_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param skillset: The skillset containing one or more skills to create or update in a search - service. Required. - :type skillset: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :type skip_indexer_reset_requirement_for_cache: bool - :param disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :type disable_cache_reprocessing_change_detection: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_or_update( - self, - skillset_name: str, - prefer: Union[str, _models.Enum0], - skillset: IO[bytes], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexerSkillset: - """Creates a new skillset in a search service or updates the skillset if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/update-skillset - - :param skillset_name: The name of the skillset to create or update. Required. - :type skillset_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param skillset: The skillset containing one or more skills to create or update in a search - service. Required. - :type skillset: IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :type skip_indexer_reset_requirement_for_cache: bool - :param disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :type disable_cache_reprocessing_change_detection: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_or_update( - self, - skillset_name: str, - prefer: Union[str, _models.Enum0], - skillset: Union[_models.SearchIndexerSkillset, IO[bytes]], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchIndexerSkillset: - """Creates a new skillset in a search service or updates the skillset if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/update-skillset - - :param skillset_name: The name of the skillset to create or update. Required. - :type skillset_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param skillset: The skillset containing one or more skills to create or update in a search - service. Is either a SearchIndexerSkillset type or a IO[bytes] type. Required. - :type skillset: ~azure.search.documents.indexes.models.SearchIndexerSkillset or IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :type skip_indexer_reset_requirement_for_cache: bool - :param disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :type disable_cache_reprocessing_change_detection: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchIndexerSkillset] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(skillset, (IOBase, bytes)): - _content = skillset - else: - _json = self._serialize.body(skillset, "SearchIndexerSkillset") - - _request = build_create_or_update_request( - skillset_name=skillset_name, - prefer=prefer, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, - disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexerSkillset", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def delete( - self, - skillset_name: str, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> None: - """Deletes a skillset in a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/delete-skillset - - :param skillset_name: The name of the skillset to delete. Required. - :type skillset_name: str - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_delete_request( - skillset_name=skillset_name, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @distributed_trace_async - async def get( - self, skillset_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.SearchIndexerSkillset: - """Retrieves a skillset in a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/get-skillset - - :param skillset_name: The name of the skillset to retrieve. Required. - :type skillset_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SearchIndexerSkillset] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_request( - skillset_name=skillset_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexerSkillset", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list( - self, select: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.ListSkillsetsResult: - """List all skillsets in a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/list-skillset - - :param select: Selects which top-level properties of the skillsets to retrieve. Specified as a - comma-separated list of JSON property names, or '*' for all properties. The default is all - properties. Default value is None. - :type select: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: ListSkillsetsResult or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.ListSkillsetsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.ListSkillsetsResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_list_request( - select=select, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("ListSkillsetsResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def create( - self, - skillset: _models.SearchIndexerSkillset, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexerSkillset: - """Creates a new skillset in a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/create-skillset - - :param skillset: The skillset containing one or more skills to create in a search service. - Required. - :type skillset: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create( - self, - skillset: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexerSkillset: - """Creates a new skillset in a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/create-skillset - - :param skillset: The skillset containing one or more skills to create in a search service. - Required. - :type skillset: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create( - self, - skillset: Union[_models.SearchIndexerSkillset, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchIndexerSkillset: - """Creates a new skillset in a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/create-skillset - - :param skillset: The skillset containing one or more skills to create in a search service. Is - either a SearchIndexerSkillset type or a IO[bytes] type. Required. - :type skillset: ~azure.search.documents.indexes.models.SearchIndexerSkillset or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchIndexerSkillset] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(skillset, (IOBase, bytes)): - _content = skillset - else: - _json = self._serialize.body(skillset, "SearchIndexerSkillset") - - _request = build_create_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexerSkillset", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def reset_skills( - self, - skillset_name: str, - skill_names: _models.SkillNames, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> None: - """Reset an existing skillset in a search service. - - .. seealso:: - - https://aka.ms/reset-skills - - :param skillset_name: The name of the skillset to reset. Required. - :type skillset_name: str - :param skill_names: The names of skills to reset. Required. - :type skill_names: ~azure.search.documents.indexes.models.SkillNames - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def reset_skills( - self, - skillset_name: str, - skill_names: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> None: - """Reset an existing skillset in a search service. - - .. seealso:: - - https://aka.ms/reset-skills - - :param skillset_name: The name of the skillset to reset. Required. - :type skillset_name: str - :param skill_names: The names of skills to reset. Required. - :type skill_names: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def reset_skills( - self, - skillset_name: str, - skill_names: Union[_models.SkillNames, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> None: - """Reset an existing skillset in a search service. - - .. seealso:: - - https://aka.ms/reset-skills - - :param skillset_name: The name of the skillset to reset. Required. - :type skillset_name: str - :param skill_names: The names of skills to reset. Is either a SkillNames type or a IO[bytes] - type. Required. - :type skill_names: ~azure.search.documents.indexes.models.SkillNames or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(skill_names, (IOBase, bytes)): - _content = skill_names - else: - _json = self._serialize.body(skill_names, "SkillNames") - - _request = build_reset_skills_request( - skillset_name=skillset_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py deleted file mode 100644 index 699b20712a44..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/aio/operations/_synonym_maps_operations.py +++ /dev/null @@ -1,579 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.rest import AsyncHttpResponse, HttpRequest -from azure.core.tracing.decorator_async import distributed_trace_async -from azure.core.utils import case_insensitive_dict - -from ... import models as _models -from ...operations._synonym_maps_operations import ( - build_create_or_update_request, - build_create_request, - build_delete_request, - build_get_request, - build_list_request, -) - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] - - -class SynonymMapsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.search.documents.indexes.aio.SearchServiceClient`'s - :attr:`synonym_maps` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs) -> None: - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @overload - async def create_or_update( - self, - synonym_map_name: str, - prefer: Union[str, _models.Enum0], - synonym_map: _models.SynonymMap, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SynonymMap: - """Creates a new synonym map or updates a synonym map if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Synonym-Map - - :param synonym_map_name: The name of the synonym map to create or update. Required. - :type synonym_map_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param synonym_map: The definition of the synonym map to create or update. Required. - :type synonym_map: ~azure.search.documents.indexes.models.SynonymMap - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SynonymMap or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SynonymMap - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create_or_update( - self, - synonym_map_name: str, - prefer: Union[str, _models.Enum0], - synonym_map: IO[bytes], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SynonymMap: - """Creates a new synonym map or updates a synonym map if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Synonym-Map - - :param synonym_map_name: The name of the synonym map to create or update. Required. - :type synonym_map_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param synonym_map: The definition of the synonym map to create or update. Required. - :type synonym_map: IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SynonymMap or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SynonymMap - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create_or_update( - self, - synonym_map_name: str, - prefer: Union[str, _models.Enum0], - synonym_map: Union[_models.SynonymMap, IO[bytes]], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SynonymMap: - """Creates a new synonym map or updates a synonym map if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Synonym-Map - - :param synonym_map_name: The name of the synonym map to create or update. Required. - :type synonym_map_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param synonym_map: The definition of the synonym map to create or update. Is either a - SynonymMap type or a IO[bytes] type. Required. - :type synonym_map: ~azure.search.documents.indexes.models.SynonymMap or IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SynonymMap or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SynonymMap - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SynonymMap] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(synonym_map, (IOBase, bytes)): - _content = synonym_map - else: - _json = self._serialize.body(synonym_map, "SynonymMap") - - _request = build_create_or_update_request( - synonym_map_name=synonym_map_name, - prefer=prefer, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SynonymMap", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def delete( - self, - synonym_map_name: str, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> None: - """Deletes a synonym map. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Delete-Synonym-Map - - :param synonym_map_name: The name of the synonym map to delete. Required. - :type synonym_map_name: str - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_delete_request( - synonym_map_name=synonym_map_name, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @distributed_trace_async - async def get( - self, synonym_map_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.SynonymMap: - """Retrieves a synonym map definition. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Get-Synonym-Map - - :param synonym_map_name: The name of the synonym map to retrieve. Required. - :type synonym_map_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SynonymMap or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SynonymMap - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SynonymMap] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_request( - synonym_map_name=synonym_map_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SynonymMap", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace_async - async def list( - self, select: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.ListSynonymMapsResult: - """Lists all synonym maps available for a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/List-Synonym-Maps - - :param select: Selects which top-level properties of the synonym maps to retrieve. Specified as - a comma-separated list of JSON property names, or '*' for all properties. The default is all - properties. Default value is None. - :type select: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: ListSynonymMapsResult or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.ListSynonymMapsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.ListSynonymMapsResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_list_request( - select=select, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("ListSynonymMapsResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - async def create( - self, - synonym_map: _models.SynonymMap, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SynonymMap: - """Creates a new synonym map. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Synonym-Map - - :param synonym_map: The definition of the synonym map to create. Required. - :type synonym_map: ~azure.search.documents.indexes.models.SynonymMap - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SynonymMap or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SynonymMap - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - async def create( - self, - synonym_map: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SynonymMap: - """Creates a new synonym map. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Synonym-Map - - :param synonym_map: The definition of the synonym map to create. Required. - :type synonym_map: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SynonymMap or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SynonymMap - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace_async - async def create( - self, - synonym_map: Union[_models.SynonymMap, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SynonymMap: - """Creates a new synonym map. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Synonym-Map - - :param synonym_map: The definition of the synonym map to create. Is either a SynonymMap type or - a IO[bytes] type. Required. - :type synonym_map: ~azure.search.documents.indexes.models.SynonymMap or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SynonymMap or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SynonymMap - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SynonymMap] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(synonym_map, (IOBase, bytes)): - _content = synonym_map - else: - _json = self._serialize.body(synonym_map, "SynonymMap") - - _request = build_create_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SynonymMap", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/__init__.py deleted file mode 100644 index 5b2133d5647f..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/__init__.py +++ /dev/null @@ -1,516 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -# pylint: disable=wrong-import-position - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from ._patch import * # pylint: disable=unused-wildcard-import - - -from ._models_py3 import ( # type: ignore - AIServicesAccountIdentity, - AIServicesAccountKey, - AIServicesVisionParameters, - AIServicesVisionVectorizer, - AnalyzeRequest, - AnalyzeResult, - AnalyzedTokenInfo, - AsciiFoldingTokenFilter, - AzureActiveDirectoryApplicationCredentials, - AzureMachineLearningParameters, - AzureMachineLearningSkill, - AzureMachineLearningVectorizer, - AzureOpenAIEmbeddingSkill, - AzureOpenAITokenizerParameters, - AzureOpenAIVectorizer, - AzureOpenAIVectorizerParameters, - BM25SimilarityAlgorithm, - BinaryQuantizationCompression, - CharFilter, - CjkBigramTokenFilter, - ClassicSimilarityAlgorithm, - ClassicTokenizer, - CognitiveServicesAccount, - CognitiveServicesAccountKey, - CommonGramTokenFilter, - ConditionalSkill, - CorsOptions, - CustomAnalyzer, - CustomEntity, - CustomEntityAlias, - CustomEntityLookupSkill, - CustomNormalizer, - DataChangeDetectionPolicy, - DataDeletionDetectionPolicy, - DataSourceCredentials, - DefaultCognitiveServicesAccount, - DictionaryDecompounderTokenFilter, - DistanceScoringFunction, - DistanceScoringParameters, - DocumentExtractionSkill, - DocumentIntelligenceLayoutSkill, - DocumentKeysOrIds, - EdgeNGramTokenFilter, - EdgeNGramTokenFilterV2, - EdgeNGramTokenizer, - ElisionTokenFilter, - EntityLinkingSkill, - EntityRecognitionSkill, - EntityRecognitionSkillV3, - ErrorAdditionalInfo, - ErrorDetail, - ErrorResponse, - ExhaustiveKnnAlgorithmConfiguration, - ExhaustiveKnnParameters, - FieldMapping, - FieldMappingFunction, - FreshnessScoringFunction, - FreshnessScoringParameters, - GetIndexStatisticsResult, - HighWaterMarkChangeDetectionPolicy, - HnswAlgorithmConfiguration, - HnswParameters, - ImageAnalysisSkill, - IndexerCurrentState, - IndexerExecutionResult, - IndexingParameters, - IndexingParametersConfiguration, - IndexingSchedule, - InputFieldMappingEntry, - KeepTokenFilter, - KeyPhraseExtractionSkill, - KeywordMarkerTokenFilter, - KeywordTokenizer, - KeywordTokenizerV2, - LanguageDetectionSkill, - LengthTokenFilter, - LexicalAnalyzer, - LexicalNormalizer, - LexicalTokenizer, - LimitTokenFilter, - ListAliasesResult, - ListDataSourcesResult, - ListIndexersResult, - ListIndexesResult, - ListSkillsetsResult, - ListSynonymMapsResult, - LuceneStandardAnalyzer, - LuceneStandardTokenizer, - LuceneStandardTokenizerV2, - MagnitudeScoringFunction, - MagnitudeScoringParameters, - MappingCharFilter, - MergeSkill, - MicrosoftLanguageStemmingTokenizer, - MicrosoftLanguageTokenizer, - NGramTokenFilter, - NGramTokenFilterV2, - NGramTokenizer, - NativeBlobSoftDeleteDeletionDetectionPolicy, - OcrSkill, - OutputFieldMappingEntry, - PIIDetectionSkill, - PathHierarchyTokenizerV2, - PatternAnalyzer, - PatternCaptureTokenFilter, - PatternReplaceCharFilter, - PatternReplaceTokenFilter, - PatternTokenizer, - PhoneticTokenFilter, - RequestOptions, - RescoringOptions, - ResourceCounter, - ScalarQuantizationCompression, - ScalarQuantizationParameters, - ScoringFunction, - ScoringProfile, - SearchAlias, - SearchField, - SearchIndex, - SearchIndexer, - SearchIndexerCache, - SearchIndexerDataContainer, - SearchIndexerDataIdentity, - SearchIndexerDataNoneIdentity, - SearchIndexerDataSource, - SearchIndexerDataUserAssignedIdentity, - SearchIndexerError, - SearchIndexerIndexProjection, - SearchIndexerIndexProjectionSelector, - SearchIndexerIndexProjectionsParameters, - SearchIndexerKnowledgeStore, - SearchIndexerKnowledgeStoreBlobProjectionSelector, - SearchIndexerKnowledgeStoreFileProjectionSelector, - SearchIndexerKnowledgeStoreObjectProjectionSelector, - SearchIndexerKnowledgeStoreParameters, - SearchIndexerKnowledgeStoreProjection, - SearchIndexerKnowledgeStoreProjectionSelector, - SearchIndexerKnowledgeStoreTableProjectionSelector, - SearchIndexerLimits, - SearchIndexerSkill, - SearchIndexerSkillset, - SearchIndexerStatus, - SearchIndexerWarning, - SearchResourceEncryptionKey, - SearchServiceCounters, - SearchServiceLimits, - SearchServiceStatistics, - SearchSuggester, - SemanticConfiguration, - SemanticField, - SemanticPrioritizedFields, - SemanticSearch, - SentimentSkill, - SentimentSkillV3, - ShaperSkill, - ShingleTokenFilter, - SimilarityAlgorithm, - SkillNames, - SnowballTokenFilter, - SoftDeleteColumnDeletionDetectionPolicy, - SplitSkill, - SqlIntegratedChangeTrackingPolicy, - StemmerOverrideTokenFilter, - StemmerTokenFilter, - StopAnalyzer, - StopwordsTokenFilter, - SynonymMap, - SynonymTokenFilter, - TagScoringFunction, - TagScoringParameters, - TextTranslationSkill, - TextWeights, - TokenFilter, - TruncateTokenFilter, - UaxUrlEmailTokenizer, - UniqueTokenFilter, - VectorSearch, - VectorSearchAlgorithmConfiguration, - VectorSearchCompression, - VectorSearchProfile, - VectorSearchVectorizer, - VisionVectorizeSkill, - WebApiSkill, - WebApiVectorizer, - WebApiVectorizerParameters, - WordDelimiterTokenFilter, -) - -from ._search_service_client_enums import ( # type: ignore - AIStudioModelCatalogName, - AzureOpenAIModelName, - BlobIndexerDataToExtract, - BlobIndexerImageAction, - BlobIndexerPDFTextRotationAlgorithm, - BlobIndexerParsingMode, - CharFilterName, - CjkBigramTokenFilterScripts, - CustomEntityLookupSkillLanguage, - DocumentIntelligenceLayoutSkillMarkdownHeaderDepth, - DocumentIntelligenceLayoutSkillOutputMode, - EdgeNGramTokenFilterSide, - EntityCategory, - EntityRecognitionSkillLanguage, - Enum0, - ImageAnalysisSkillLanguage, - ImageDetail, - IndexProjectionMode, - IndexerExecutionEnvironment, - IndexerExecutionStatus, - IndexerExecutionStatusDetail, - IndexerStatus, - IndexingMode, - KeyPhraseExtractionSkillLanguage, - LexicalAnalyzerName, - LexicalNormalizerName, - LexicalTokenizerName, - MarkdownHeaderDepth, - MarkdownParsingSubmode, - MicrosoftStemmingTokenizerLanguage, - MicrosoftTokenizerLanguage, - OcrLineEnding, - OcrSkillLanguage, - PIIDetectionSkillMaskingMode, - PhoneticEncoder, - RegexFlags, - ScoringFunctionAggregation, - ScoringFunctionInterpolation, - SearchFieldDataType, - SearchIndexerDataSourceType, - SentimentSkillLanguage, - SnowballTokenFilterLanguage, - SplitSkillEncoderModelName, - SplitSkillLanguage, - SplitSkillUnit, - StemmerTokenFilterLanguage, - StopwordsList, - TextSplitMode, - TextTranslationSkillLanguage, - TokenCharacterKind, - TokenFilterName, - VectorEncodingFormat, - VectorSearchAlgorithmKind, - VectorSearchAlgorithmMetric, - VectorSearchCompressionKind, - VectorSearchCompressionRescoreStorageMethod, - VectorSearchCompressionTarget, - VectorSearchVectorizerKind, - VisualFeature, -) -from ._patch import __all__ as _patch_all -from ._patch import * -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "AIServicesAccountIdentity", - "AIServicesAccountKey", - "AIServicesVisionParameters", - "AIServicesVisionVectorizer", - "AnalyzeRequest", - "AnalyzeResult", - "AnalyzedTokenInfo", - "AsciiFoldingTokenFilter", - "AzureActiveDirectoryApplicationCredentials", - "AzureMachineLearningParameters", - "AzureMachineLearningSkill", - "AzureMachineLearningVectorizer", - "AzureOpenAIEmbeddingSkill", - "AzureOpenAITokenizerParameters", - "AzureOpenAIVectorizer", - "AzureOpenAIVectorizerParameters", - "BM25SimilarityAlgorithm", - "BinaryQuantizationCompression", - "CharFilter", - "CjkBigramTokenFilter", - "ClassicSimilarityAlgorithm", - "ClassicTokenizer", - "CognitiveServicesAccount", - "CognitiveServicesAccountKey", - "CommonGramTokenFilter", - "ConditionalSkill", - "CorsOptions", - "CustomAnalyzer", - "CustomEntity", - "CustomEntityAlias", - "CustomEntityLookupSkill", - "CustomNormalizer", - "DataChangeDetectionPolicy", - "DataDeletionDetectionPolicy", - "DataSourceCredentials", - "DefaultCognitiveServicesAccount", - "DictionaryDecompounderTokenFilter", - "DistanceScoringFunction", - "DistanceScoringParameters", - "DocumentExtractionSkill", - "DocumentIntelligenceLayoutSkill", - "DocumentKeysOrIds", - "EdgeNGramTokenFilter", - "EdgeNGramTokenFilterV2", - "EdgeNGramTokenizer", - "ElisionTokenFilter", - "EntityLinkingSkill", - "EntityRecognitionSkill", - "EntityRecognitionSkillV3", - "ErrorAdditionalInfo", - "ErrorDetail", - "ErrorResponse", - "ExhaustiveKnnAlgorithmConfiguration", - "ExhaustiveKnnParameters", - "FieldMapping", - "FieldMappingFunction", - "FreshnessScoringFunction", - "FreshnessScoringParameters", - "GetIndexStatisticsResult", - "HighWaterMarkChangeDetectionPolicy", - "HnswAlgorithmConfiguration", - "HnswParameters", - "ImageAnalysisSkill", - "IndexerCurrentState", - "IndexerExecutionResult", - "IndexingParameters", - "IndexingParametersConfiguration", - "IndexingSchedule", - "InputFieldMappingEntry", - "KeepTokenFilter", - "KeyPhraseExtractionSkill", - "KeywordMarkerTokenFilter", - "KeywordTokenizer", - "KeywordTokenizerV2", - "LanguageDetectionSkill", - "LengthTokenFilter", - "LexicalAnalyzer", - "LexicalNormalizer", - "LexicalTokenizer", - "LimitTokenFilter", - "ListAliasesResult", - "ListDataSourcesResult", - "ListIndexersResult", - "ListIndexesResult", - "ListSkillsetsResult", - "ListSynonymMapsResult", - "LuceneStandardAnalyzer", - "LuceneStandardTokenizer", - "LuceneStandardTokenizerV2", - "MagnitudeScoringFunction", - "MagnitudeScoringParameters", - "MappingCharFilter", - "MergeSkill", - "MicrosoftLanguageStemmingTokenizer", - "MicrosoftLanguageTokenizer", - "NGramTokenFilter", - "NGramTokenFilterV2", - "NGramTokenizer", - "NativeBlobSoftDeleteDeletionDetectionPolicy", - "OcrSkill", - "OutputFieldMappingEntry", - "PIIDetectionSkill", - "PathHierarchyTokenizerV2", - "PatternAnalyzer", - "PatternCaptureTokenFilter", - "PatternReplaceCharFilter", - "PatternReplaceTokenFilter", - "PatternTokenizer", - "PhoneticTokenFilter", - "RequestOptions", - "RescoringOptions", - "ResourceCounter", - "ScalarQuantizationCompression", - "ScalarQuantizationParameters", - "ScoringFunction", - "ScoringProfile", - "SearchAlias", - "SearchField", - "SearchIndex", - "SearchIndexer", - "SearchIndexerCache", - "SearchIndexerDataContainer", - "SearchIndexerDataIdentity", - "SearchIndexerDataNoneIdentity", - "SearchIndexerDataSource", - "SearchIndexerDataUserAssignedIdentity", - "SearchIndexerError", - "SearchIndexerIndexProjection", - "SearchIndexerIndexProjectionSelector", - "SearchIndexerIndexProjectionsParameters", - "SearchIndexerKnowledgeStore", - "SearchIndexerKnowledgeStoreBlobProjectionSelector", - "SearchIndexerKnowledgeStoreFileProjectionSelector", - "SearchIndexerKnowledgeStoreObjectProjectionSelector", - "SearchIndexerKnowledgeStoreParameters", - "SearchIndexerKnowledgeStoreProjection", - "SearchIndexerKnowledgeStoreProjectionSelector", - "SearchIndexerKnowledgeStoreTableProjectionSelector", - "SearchIndexerLimits", - "SearchIndexerSkill", - "SearchIndexerSkillset", - "SearchIndexerStatus", - "SearchIndexerWarning", - "SearchResourceEncryptionKey", - "SearchServiceCounters", - "SearchServiceLimits", - "SearchServiceStatistics", - "SearchSuggester", - "SemanticConfiguration", - "SemanticField", - "SemanticPrioritizedFields", - "SemanticSearch", - "SentimentSkill", - "SentimentSkillV3", - "ShaperSkill", - "ShingleTokenFilter", - "SimilarityAlgorithm", - "SkillNames", - "SnowballTokenFilter", - "SoftDeleteColumnDeletionDetectionPolicy", - "SplitSkill", - "SqlIntegratedChangeTrackingPolicy", - "StemmerOverrideTokenFilter", - "StemmerTokenFilter", - "StopAnalyzer", - "StopwordsTokenFilter", - "SynonymMap", - "SynonymTokenFilter", - "TagScoringFunction", - "TagScoringParameters", - "TextTranslationSkill", - "TextWeights", - "TokenFilter", - "TruncateTokenFilter", - "UaxUrlEmailTokenizer", - "UniqueTokenFilter", - "VectorSearch", - "VectorSearchAlgorithmConfiguration", - "VectorSearchCompression", - "VectorSearchProfile", - "VectorSearchVectorizer", - "VisionVectorizeSkill", - "WebApiSkill", - "WebApiVectorizer", - "WebApiVectorizerParameters", - "WordDelimiterTokenFilter", - "AIStudioModelCatalogName", - "AzureOpenAIModelName", - "BlobIndexerDataToExtract", - "BlobIndexerImageAction", - "BlobIndexerPDFTextRotationAlgorithm", - "BlobIndexerParsingMode", - "CharFilterName", - "CjkBigramTokenFilterScripts", - "CustomEntityLookupSkillLanguage", - "DocumentIntelligenceLayoutSkillMarkdownHeaderDepth", - "DocumentIntelligenceLayoutSkillOutputMode", - "EdgeNGramTokenFilterSide", - "EntityCategory", - "EntityRecognitionSkillLanguage", - "Enum0", - "ImageAnalysisSkillLanguage", - "ImageDetail", - "IndexProjectionMode", - "IndexerExecutionEnvironment", - "IndexerExecutionStatus", - "IndexerExecutionStatusDetail", - "IndexerStatus", - "IndexingMode", - "KeyPhraseExtractionSkillLanguage", - "LexicalAnalyzerName", - "LexicalNormalizerName", - "LexicalTokenizerName", - "MarkdownHeaderDepth", - "MarkdownParsingSubmode", - "MicrosoftStemmingTokenizerLanguage", - "MicrosoftTokenizerLanguage", - "OcrLineEnding", - "OcrSkillLanguage", - "PIIDetectionSkillMaskingMode", - "PhoneticEncoder", - "RegexFlags", - "ScoringFunctionAggregation", - "ScoringFunctionInterpolation", - "SearchFieldDataType", - "SearchIndexerDataSourceType", - "SentimentSkillLanguage", - "SnowballTokenFilterLanguage", - "SplitSkillEncoderModelName", - "SplitSkillLanguage", - "SplitSkillUnit", - "StemmerTokenFilterLanguage", - "StopwordsList", - "TextSplitMode", - "TextTranslationSkillLanguage", - "TokenCharacterKind", - "TokenFilterName", - "VectorEncodingFormat", - "VectorSearchAlgorithmKind", - "VectorSearchAlgorithmMetric", - "VectorSearchCompressionKind", - "VectorSearchCompressionRescoreStorageMethod", - "VectorSearchCompressionTarget", - "VectorSearchVectorizerKind", - "VisualFeature", -] -__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore -_patch_sdk() diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py deleted file mode 100644 index 9ef383c9aa0a..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_models_py3.py +++ /dev/null @@ -1,11729 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- - -import datetime -from typing import Any, Dict, List, Optional, TYPE_CHECKING, Union - -from .. import _serialization - -if TYPE_CHECKING: - from .. import models as _models - - -class CognitiveServicesAccount(_serialization.Model): - """Base type for describing any Azure AI service resource attached to a skillset. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - AIServicesAccountIdentity, AIServicesAccountKey, CognitiveServicesAccountKey, - DefaultCognitiveServicesAccount - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of Azure AI service resource attached to a - skillset. Required. - :vartype odata_type: str - :ivar description: Description of the Azure AI service resource attached to a skillset. - :vartype description: str - """ - - _validation = { - "odata_type": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "description": {"key": "description", "type": "str"}, - } - - _subtype_map = { - "odata_type": { - "#Microsoft.Azure.Search.AIServicesByIdentity": "AIServicesAccountIdentity", - "#Microsoft.Azure.Search.AIServicesByKey": "AIServicesAccountKey", - "#Microsoft.Azure.Search.CognitiveServicesByKey": "CognitiveServicesAccountKey", - "#Microsoft.Azure.Search.DefaultCognitiveServices": "DefaultCognitiveServicesAccount", - } - } - - def __init__(self, *, description: Optional[str] = None, **kwargs: Any) -> None: - """ - :keyword description: Description of the Azure AI service resource attached to a skillset. - :paramtype description: str - """ - super().__init__(**kwargs) - self.odata_type: Optional[str] = None - self.description = description - - -class AIServicesAccountIdentity(CognitiveServicesAccount): - """The multi-region account of an Azure AI service resource that's attached to a skillset. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of Azure AI service resource attached to a - skillset. Required. - :vartype odata_type: str - :ivar description: Description of the Azure AI service resource attached to a skillset. - :vartype description: str - :ivar identity: The user-assigned managed identity used for connections to AI Service. If not - specified, the system-assigned managed identity is used. On updates to the skillset, if the - identity is unspecified, the value remains unchanged. If set to "none", the value of this - property is cleared. Required. - :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - :ivar subdomain_url: The subdomain url for the corresponding AI Service. Required. - :vartype subdomain_url: str - """ - - _validation = { - "odata_type": {"required": True}, - "identity": {"required": True}, - "subdomain_url": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "identity": {"key": "identity", "type": "SearchIndexerDataIdentity"}, - "subdomain_url": {"key": "subdomainUrl", "type": "str"}, - } - - def __init__( - self, - *, - identity: "_models.SearchIndexerDataIdentity", - subdomain_url: str, - description: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword description: Description of the Azure AI service resource attached to a skillset. - :paramtype description: str - :keyword identity: The user-assigned managed identity used for connections to AI Service. If - not specified, the system-assigned managed identity is used. On updates to the skillset, if the - identity is unspecified, the value remains unchanged. If set to "none", the value of this - property is cleared. Required. - :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - :keyword subdomain_url: The subdomain url for the corresponding AI Service. Required. - :paramtype subdomain_url: str - """ - super().__init__(description=description, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.AIServicesByIdentity" - self.identity = identity - self.subdomain_url = subdomain_url - - -class AIServicesAccountKey(CognitiveServicesAccount): - """The account key of an Azure AI service resource that's attached to a skillset, to be used with - the resource's subdomain. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of Azure AI service resource attached to a - skillset. Required. - :vartype odata_type: str - :ivar description: Description of the Azure AI service resource attached to a skillset. - :vartype description: str - :ivar key: The key used to provision the Azure AI service resource attached to a skillset. - Required. - :vartype key: str - :ivar subdomain_url: The subdomain url for the corresponding AI Service. Required. - :vartype subdomain_url: str - """ - - _validation = { - "odata_type": {"required": True}, - "key": {"required": True}, - "subdomain_url": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "key": {"key": "key", "type": "str"}, - "subdomain_url": {"key": "subdomainUrl", "type": "str"}, - } - - def __init__(self, *, key: str, subdomain_url: str, description: Optional[str] = None, **kwargs: Any) -> None: - """ - :keyword description: Description of the Azure AI service resource attached to a skillset. - :paramtype description: str - :keyword key: The key used to provision the Azure AI service resource attached to a skillset. - Required. - :paramtype key: str - :keyword subdomain_url: The subdomain url for the corresponding AI Service. Required. - :paramtype subdomain_url: str - """ - super().__init__(description=description, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.AIServicesByKey" - self.key = key - self.subdomain_url = subdomain_url - - -class AIServicesVisionParameters(_serialization.Model): - """Specifies the AI Services Vision parameters for vectorizing a query image or text. - - All required parameters must be populated in order to send to server. - - :ivar model_version: The version of the model to use when calling the AI Services Vision - service. It will default to the latest available when not specified. Required. - :vartype model_version: str - :ivar resource_uri: The resource URI of the AI Services resource. Required. - :vartype resource_uri: str - :ivar api_key: API key of the designated AI Services resource. - :vartype api_key: str - :ivar auth_identity: The user-assigned managed identity used for outbound connections. If an - authResourceId is provided and it's not specified, the system-assigned managed identity is - used. On updates to the index, if the identity is unspecified, the value remains unchanged. If - set to "none", the value of this property is cleared. - :vartype auth_identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - """ - - _validation = { - "model_version": {"required": True}, - "resource_uri": {"required": True}, - } - - _attribute_map = { - "model_version": {"key": "modelVersion", "type": "str"}, - "resource_uri": {"key": "resourceUri", "type": "str"}, - "api_key": {"key": "apiKey", "type": "str"}, - "auth_identity": {"key": "authIdentity", "type": "SearchIndexerDataIdentity"}, - } - - def __init__( - self, - *, - model_version: str, - resource_uri: str, - api_key: Optional[str] = None, - auth_identity: Optional["_models.SearchIndexerDataIdentity"] = None, - **kwargs: Any - ) -> None: - """ - :keyword model_version: The version of the model to use when calling the AI Services Vision - service. It will default to the latest available when not specified. Required. - :paramtype model_version: str - :keyword resource_uri: The resource URI of the AI Services resource. Required. - :paramtype resource_uri: str - :keyword api_key: API key of the designated AI Services resource. - :paramtype api_key: str - :keyword auth_identity: The user-assigned managed identity used for outbound connections. If an - authResourceId is provided and it's not specified, the system-assigned managed identity is - used. On updates to the index, if the identity is unspecified, the value remains unchanged. If - set to "none", the value of this property is cleared. - :paramtype auth_identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - """ - super().__init__(**kwargs) - self.model_version = model_version - self.resource_uri = resource_uri - self.api_key = api_key - self.auth_identity = auth_identity - - -class VectorSearchVectorizer(_serialization.Model): - """Specifies the vectorization method to be used during query time. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - AIServicesVisionVectorizer, AzureMachineLearningVectorizer, AzureOpenAIVectorizer, - WebApiVectorizer - - All required parameters must be populated in order to send to server. - - :ivar vectorizer_name: The name to associate with this particular vectorization method. - Required. - :vartype vectorizer_name: str - :ivar kind: The name of the kind of vectorization method being configured for use with vector - search. Required. Known values are: "azureOpenAI", "customWebApi", "aiServicesVision", and - "aml". - :vartype kind: str or ~azure.search.documents.indexes.models.VectorSearchVectorizerKind - """ - - _validation = { - "vectorizer_name": {"required": True}, - "kind": {"required": True}, - } - - _attribute_map = { - "vectorizer_name": {"key": "name", "type": "str"}, - "kind": {"key": "kind", "type": "str"}, - } - - _subtype_map = { - "kind": { - "aiServicesVision": "AIServicesVisionVectorizer", - "aml": "AzureMachineLearningVectorizer", - "azureOpenAI": "AzureOpenAIVectorizer", - "customWebApi": "WebApiVectorizer", - } - } - - def __init__(self, *, vectorizer_name: str, **kwargs: Any) -> None: - """ - :keyword vectorizer_name: The name to associate with this particular vectorization method. - Required. - :paramtype vectorizer_name: str - """ - super().__init__(**kwargs) - self.vectorizer_name = vectorizer_name - self.kind: Optional[str] = None - - -class AIServicesVisionVectorizer(VectorSearchVectorizer): - """Specifies the AI Services Vision parameters for vectorizing a query image or text. - - All required parameters must be populated in order to send to server. - - :ivar vectorizer_name: The name to associate with this particular vectorization method. - Required. - :vartype vectorizer_name: str - :ivar kind: The name of the kind of vectorization method being configured for use with vector - search. Required. Known values are: "azureOpenAI", "customWebApi", "aiServicesVision", and - "aml". - :vartype kind: str or ~azure.search.documents.indexes.models.VectorSearchVectorizerKind - :ivar ai_services_vision_parameters: Contains the parameters specific to AI Services Vision - embedding vectorization. - :vartype ai_services_vision_parameters: - ~azure.search.documents.indexes.models.AIServicesVisionParameters - """ - - _validation = { - "vectorizer_name": {"required": True}, - "kind": {"required": True}, - } - - _attribute_map = { - "vectorizer_name": {"key": "name", "type": "str"}, - "kind": {"key": "kind", "type": "str"}, - "ai_services_vision_parameters": {"key": "aiServicesVisionParameters", "type": "AIServicesVisionParameters"}, - } - - def __init__( - self, - *, - vectorizer_name: str, - ai_services_vision_parameters: Optional["_models.AIServicesVisionParameters"] = None, - **kwargs: Any - ) -> None: - """ - :keyword vectorizer_name: The name to associate with this particular vectorization method. - Required. - :paramtype vectorizer_name: str - :keyword ai_services_vision_parameters: Contains the parameters specific to AI Services Vision - embedding vectorization. - :paramtype ai_services_vision_parameters: - ~azure.search.documents.indexes.models.AIServicesVisionParameters - """ - super().__init__(vectorizer_name=vectorizer_name, **kwargs) - self.kind: str = "aiServicesVision" - self.ai_services_vision_parameters = ai_services_vision_parameters - - -class AnalyzedTokenInfo(_serialization.Model): - """Information about a token returned by an analyzer. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar token: The token returned by the analyzer. Required. - :vartype token: str - :ivar start_offset: The index of the first character of the token in the input text. Required. - :vartype start_offset: int - :ivar end_offset: The index of the last character of the token in the input text. Required. - :vartype end_offset: int - :ivar position: The position of the token in the input text relative to other tokens. The first - token in the input text has position 0, the next has position 1, and so on. Depending on the - analyzer used, some tokens might have the same position, for example if they are synonyms of - each other. Required. - :vartype position: int - """ - - _validation = { - "token": {"required": True, "readonly": True}, - "start_offset": {"required": True, "readonly": True}, - "end_offset": {"required": True, "readonly": True}, - "position": {"required": True, "readonly": True}, - } - - _attribute_map = { - "token": {"key": "token", "type": "str"}, - "start_offset": {"key": "startOffset", "type": "int"}, - "end_offset": {"key": "endOffset", "type": "int"}, - "position": {"key": "position", "type": "int"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.token = None - self.start_offset = None - self.end_offset = None - self.position = None - - -class AnalyzeRequest(_serialization.Model): - """Specifies some text and analysis components used to break that text into tokens. - - All required parameters must be populated in order to send to server. - - :ivar text: The text to break into tokens. Required. - :vartype text: str - :ivar analyzer: The name of the analyzer to use to break the given text. If this parameter is - not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are - mutually exclusive. Known values are: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", - "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", - "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", - "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", - "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", - "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", - "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", - "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", - "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", - "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", - "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", - "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", - "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", - "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", - "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", - "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", and - "whitespace". - :vartype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName - :ivar tokenizer: The name of the tokenizer to use to break the given text. If this parameter is - not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are - mutually exclusive. Known values are: "classic", "edgeNGram", "keyword_v2", "letter", - "lowercase", "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", "nGram", - "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", and "whitespace". - :vartype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName - :ivar normalizer: The name of the normalizer to use to normalize the given text. Known values - are: "asciifolding", "elision", "lowercase", "standard", and "uppercase". - :vartype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName - :ivar token_filters: An optional list of token filters to use when breaking the given text. - This parameter can only be set when using the tokenizer parameter. - :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] - :ivar char_filters: An optional list of character filters to use when breaking the given text. - This parameter can only be set when using the tokenizer parameter. - :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] - """ - - _validation = { - "text": {"required": True}, - } - - _attribute_map = { - "text": {"key": "text", "type": "str"}, - "analyzer": {"key": "analyzer", "type": "str"}, - "tokenizer": {"key": "tokenizer", "type": "str"}, - "normalizer": {"key": "normalizer", "type": "str"}, - "token_filters": {"key": "tokenFilters", "type": "[str]"}, - "char_filters": {"key": "charFilters", "type": "[str]"}, - } - - def __init__( - self, - *, - text: str, - analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = None, - tokenizer: Optional[Union[str, "_models.LexicalTokenizerName"]] = None, - normalizer: Optional[Union[str, "_models.LexicalNormalizerName"]] = None, - token_filters: Optional[List[Union[str, "_models.TokenFilterName"]]] = None, - char_filters: Optional[List[Union[str, "_models.CharFilterName"]]] = None, - **kwargs: Any - ) -> None: - """ - :keyword text: The text to break into tokens. Required. - :paramtype text: str - :keyword analyzer: The name of the analyzer to use to break the given text. If this parameter - is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters - are mutually exclusive. Known values are: "ar.microsoft", "ar.lucene", "hy.lucene", - "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", - "zh-Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", - "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", - "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", - "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", - "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", - "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", - "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", - "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", - "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", - "pt-PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", - "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", - "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", - "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", - "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", - "simple", "stop", and "whitespace". - :paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName - :keyword tokenizer: The name of the tokenizer to use to break the given text. If this parameter - is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters - are mutually exclusive. Known values are: "classic", "edgeNGram", "keyword_v2", "letter", - "lowercase", "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", "nGram", - "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", and "whitespace". - :paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName - :keyword normalizer: The name of the normalizer to use to normalize the given text. Known - values are: "asciifolding", "elision", "lowercase", "standard", and "uppercase". - :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName - :keyword token_filters: An optional list of token filters to use when breaking the given text. - This parameter can only be set when using the tokenizer parameter. - :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] - :keyword char_filters: An optional list of character filters to use when breaking the given - text. This parameter can only be set when using the tokenizer parameter. - :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] - """ - super().__init__(**kwargs) - self.text = text - self.analyzer = analyzer - self.tokenizer = tokenizer - self.normalizer = normalizer - self.token_filters = token_filters - self.char_filters = char_filters - - -class AnalyzeResult(_serialization.Model): - """The result of testing an analyzer on text. - - All required parameters must be populated in order to send to server. - - :ivar tokens: The list of tokens returned by the analyzer specified in the request. Required. - :vartype tokens: list[~azure.search.documents.indexes.models.AnalyzedTokenInfo] - """ - - _validation = { - "tokens": {"required": True}, - } - - _attribute_map = { - "tokens": {"key": "tokens", "type": "[AnalyzedTokenInfo]"}, - } - - def __init__(self, *, tokens: List["_models.AnalyzedTokenInfo"], **kwargs: Any) -> None: - """ - :keyword tokens: The list of tokens returned by the analyzer specified in the request. - Required. - :paramtype tokens: list[~azure.search.documents.indexes.models.AnalyzedTokenInfo] - """ - super().__init__(**kwargs) - self.tokens = tokens - - -class TokenFilter(_serialization.Model): - """Base type for token filters. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - AsciiFoldingTokenFilter, CjkBigramTokenFilter, CommonGramTokenFilter, - DictionaryDecompounderTokenFilter, EdgeNGramTokenFilter, EdgeNGramTokenFilterV2, - ElisionTokenFilter, KeepTokenFilter, KeywordMarkerTokenFilter, LengthTokenFilter, - LimitTokenFilter, NGramTokenFilter, NGramTokenFilterV2, PatternCaptureTokenFilter, - PatternReplaceTokenFilter, PhoneticTokenFilter, ShingleTokenFilter, SnowballTokenFilter, - StemmerOverrideTokenFilter, StemmerTokenFilter, StopwordsTokenFilter, SynonymTokenFilter, - TruncateTokenFilter, UniqueTokenFilter, WordDelimiterTokenFilter - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - } - - _subtype_map = { - "odata_type": { - "#Microsoft.Azure.Search.AsciiFoldingTokenFilter": "AsciiFoldingTokenFilter", - "#Microsoft.Azure.Search.CjkBigramTokenFilter": "CjkBigramTokenFilter", - "#Microsoft.Azure.Search.CommonGramTokenFilter": "CommonGramTokenFilter", - "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter": "DictionaryDecompounderTokenFilter", - "#Microsoft.Azure.Search.EdgeNGramTokenFilter": "EdgeNGramTokenFilter", - "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2": "EdgeNGramTokenFilterV2", - "#Microsoft.Azure.Search.ElisionTokenFilter": "ElisionTokenFilter", - "#Microsoft.Azure.Search.KeepTokenFilter": "KeepTokenFilter", - "#Microsoft.Azure.Search.KeywordMarkerTokenFilter": "KeywordMarkerTokenFilter", - "#Microsoft.Azure.Search.LengthTokenFilter": "LengthTokenFilter", - "#Microsoft.Azure.Search.LimitTokenFilter": "LimitTokenFilter", - "#Microsoft.Azure.Search.NGramTokenFilter": "NGramTokenFilter", - "#Microsoft.Azure.Search.NGramTokenFilterV2": "NGramTokenFilterV2", - "#Microsoft.Azure.Search.PatternCaptureTokenFilter": "PatternCaptureTokenFilter", - "#Microsoft.Azure.Search.PatternReplaceTokenFilter": "PatternReplaceTokenFilter", - "#Microsoft.Azure.Search.PhoneticTokenFilter": "PhoneticTokenFilter", - "#Microsoft.Azure.Search.ShingleTokenFilter": "ShingleTokenFilter", - "#Microsoft.Azure.Search.SnowballTokenFilter": "SnowballTokenFilter", - "#Microsoft.Azure.Search.StemmerOverrideTokenFilter": "StemmerOverrideTokenFilter", - "#Microsoft.Azure.Search.StemmerTokenFilter": "StemmerTokenFilter", - "#Microsoft.Azure.Search.StopwordsTokenFilter": "StopwordsTokenFilter", - "#Microsoft.Azure.Search.SynonymTokenFilter": "SynonymTokenFilter", - "#Microsoft.Azure.Search.TruncateTokenFilter": "TruncateTokenFilter", - "#Microsoft.Azure.Search.UniqueTokenFilter": "UniqueTokenFilter", - "#Microsoft.Azure.Search.WordDelimiterTokenFilter": "WordDelimiterTokenFilter", - } - } - - def __init__(self, *, name: str, **kwargs: Any) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - """ - super().__init__(**kwargs) - self.odata_type: Optional[str] = None - self.name = name - - -class AsciiFoldingTokenFilter(TokenFilter): - """Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 - ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such - equivalents exist. This token filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar preserve_original: A value indicating whether the original token will be kept. Default is - false. - :vartype preserve_original: bool - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "preserve_original": {"key": "preserveOriginal", "type": "bool"}, - } - - def __init__(self, *, name: str, preserve_original: bool = False, **kwargs: Any) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword preserve_original: A value indicating whether the original token will be kept. Default - is false. - :paramtype preserve_original: bool - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.AsciiFoldingTokenFilter" - self.preserve_original = preserve_original - - -class AzureActiveDirectoryApplicationCredentials(_serialization.Model): # pylint: disable=name-too-long - """Credentials of a registered application created for your search service, used for authenticated - access to the encryption keys stored in Azure Key Vault. - - All required parameters must be populated in order to send to server. - - :ivar application_id: An AAD Application ID that was granted the required access permissions to - the Azure Key Vault that is to be used when encrypting your data at rest. The Application ID - should not be confused with the Object ID for your AAD Application. Required. - :vartype application_id: str - :ivar application_secret: The authentication key of the specified AAD application. - :vartype application_secret: str - """ - - _validation = { - "application_id": {"required": True}, - } - - _attribute_map = { - "application_id": {"key": "applicationId", "type": "str"}, - "application_secret": {"key": "applicationSecret", "type": "str"}, - } - - def __init__(self, *, application_id: str, application_secret: Optional[str] = None, **kwargs: Any) -> None: - """ - :keyword application_id: An AAD Application ID that was granted the required access permissions - to the Azure Key Vault that is to be used when encrypting your data at rest. The Application ID - should not be confused with the Object ID for your AAD Application. Required. - :paramtype application_id: str - :keyword application_secret: The authentication key of the specified AAD application. - :paramtype application_secret: str - """ - super().__init__(**kwargs) - self.application_id = application_id - self.application_secret = application_secret - - -class AzureMachineLearningParameters(_serialization.Model): - """Specifies the properties for connecting to an AML vectorizer. - - All required parameters must be populated in order to send to server. - - :ivar scoring_uri: (Required for no authentication or key authentication) The scoring URI of - the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. - Required. - :vartype scoring_uri: str - :ivar authentication_key: (Required for key authentication) The key for the AML service. - :vartype authentication_key: str - :ivar resource_id: (Required for token authentication). The Azure Resource Manager resource ID - of the AML service. It should be in the format - subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long - :vartype resource_id: str - :ivar timeout: (Optional) When specified, indicates the timeout for the http client making the - API call. - :vartype timeout: ~datetime.timedelta - :ivar region: (Optional for token authentication). The region the AML service is deployed in. - :vartype region: str - :ivar model_name: The name of the embedding model from the Azure AI Studio Catalog that is - deployed at the provided endpoint. Known values are: - "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32", - "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336", - "Facebook-DinoV2-Image-Embeddings-ViT-Base", "Facebook-DinoV2-Image-Embeddings-ViT-Giant", - "Cohere-embed-v3-english", and "Cohere-embed-v3-multilingual". - :vartype model_name: str or ~azure.search.documents.indexes.models.AIStudioModelCatalogName - """ - - _validation = { - "scoring_uri": {"required": True}, - } - - _attribute_map = { - "scoring_uri": {"key": "uri", "type": "str"}, - "authentication_key": {"key": "key", "type": "str"}, - "resource_id": {"key": "resourceId", "type": "str"}, - "timeout": {"key": "timeout", "type": "duration"}, - "region": {"key": "region", "type": "str"}, - "model_name": {"key": "modelName", "type": "str"}, - } - - def __init__( - self, - *, - scoring_uri: str, - authentication_key: Optional[str] = None, - resource_id: Optional[str] = None, - timeout: Optional[datetime.timedelta] = None, - region: Optional[str] = None, - model_name: Optional[Union[str, "_models.AIStudioModelCatalogName"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword scoring_uri: (Required for no authentication or key authentication) The scoring URI of - the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. - Required. - :paramtype scoring_uri: str - :keyword authentication_key: (Required for key authentication) The key for the AML service. - :paramtype authentication_key: str - :keyword resource_id: (Required for token authentication). The Azure Resource Manager resource - ID of the AML service. It should be in the format - subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long - :paramtype resource_id: str - :keyword timeout: (Optional) When specified, indicates the timeout for the http client making - the API call. - :paramtype timeout: ~datetime.timedelta - :keyword region: (Optional for token authentication). The region the AML service is deployed - in. - :paramtype region: str - :keyword model_name: The name of the embedding model from the Azure AI Studio Catalog that is - deployed at the provided endpoint. Known values are: - "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32", - "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336", - "Facebook-DinoV2-Image-Embeddings-ViT-Base", "Facebook-DinoV2-Image-Embeddings-ViT-Giant", - "Cohere-embed-v3-english", and "Cohere-embed-v3-multilingual". - :paramtype model_name: str or ~azure.search.documents.indexes.models.AIStudioModelCatalogName - """ - super().__init__(**kwargs) - self.scoring_uri = scoring_uri - self.authentication_key = authentication_key - self.resource_id = resource_id - self.timeout = timeout - self.region = region - self.model_name = model_name - - -class SearchIndexerSkill(_serialization.Model): - """Base type for skills. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - AzureMachineLearningSkill, WebApiSkill, AzureOpenAIEmbeddingSkill, CustomEntityLookupSkill, - EntityRecognitionSkill, KeyPhraseExtractionSkill, LanguageDetectionSkill, MergeSkill, - PIIDetectionSkill, SentimentSkill, SplitSkill, TextTranslationSkill, EntityLinkingSkill, - EntityRecognitionSkillV3, SentimentSkillV3, ConditionalSkill, DocumentExtractionSkill, - DocumentIntelligenceLayoutSkill, ShaperSkill, ImageAnalysisSkill, OcrSkill, - VisionVectorizeSkill - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - } - - _subtype_map = { - "odata_type": { - "#Microsoft.Skills.Custom.AmlSkill": "AzureMachineLearningSkill", - "#Microsoft.Skills.Custom.WebApiSkill": "WebApiSkill", - "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill": "AzureOpenAIEmbeddingSkill", - "#Microsoft.Skills.Text.CustomEntityLookupSkill": "CustomEntityLookupSkill", - "#Microsoft.Skills.Text.EntityRecognitionSkill": "EntityRecognitionSkill", - "#Microsoft.Skills.Text.KeyPhraseExtractionSkill": "KeyPhraseExtractionSkill", - "#Microsoft.Skills.Text.LanguageDetectionSkill": "LanguageDetectionSkill", - "#Microsoft.Skills.Text.MergeSkill": "MergeSkill", - "#Microsoft.Skills.Text.PIIDetectionSkill": "PIIDetectionSkill", - "#Microsoft.Skills.Text.SentimentSkill": "SentimentSkill", - "#Microsoft.Skills.Text.SplitSkill": "SplitSkill", - "#Microsoft.Skills.Text.TranslationSkill": "TextTranslationSkill", - "#Microsoft.Skills.Text.V3.EntityLinkingSkill": "EntityLinkingSkill", - "#Microsoft.Skills.Text.V3.EntityRecognitionSkill": "EntityRecognitionSkillV3", - "#Microsoft.Skills.Text.V3.SentimentSkill": "SentimentSkillV3", - "#Microsoft.Skills.Util.ConditionalSkill": "ConditionalSkill", - "#Microsoft.Skills.Util.DocumentExtractionSkill": "DocumentExtractionSkill", - "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill": "DocumentIntelligenceLayoutSkill", - "#Microsoft.Skills.Util.ShaperSkill": "ShaperSkill", - "#Microsoft.Skills.Vision.ImageAnalysisSkill": "ImageAnalysisSkill", - "#Microsoft.Skills.Vision.OcrSkill": "OcrSkill", - "#Microsoft.Skills.Vision.VectorizeSkill": "VisionVectorizeSkill", - } - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - """ - super().__init__(**kwargs) - self.odata_type: Optional[str] = None - self.name = name - self.description = description - self.context = context - self.inputs = inputs - self.outputs = outputs - - -class AzureMachineLearningSkill(SearchIndexerSkill): - """The AML skill allows you to extend AI enrichment with a custom Azure Machine Learning (AML) - model. Once an AML model is trained and deployed, an AML skill integrates it into AI - enrichment. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar scoring_uri: (Required for no authentication or key authentication) The scoring URI of - the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. - :vartype scoring_uri: str - :ivar authentication_key: (Required for key authentication) The key for the AML service. - :vartype authentication_key: str - :ivar resource_id: (Required for token authentication). The Azure Resource Manager resource ID - of the AML service. It should be in the format - subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long - :vartype resource_id: str - :ivar timeout: (Optional) When specified, indicates the timeout for the http client making the - API call. - :vartype timeout: ~datetime.timedelta - :ivar region: (Optional for token authentication). The region the AML service is deployed in. - :vartype region: str - :ivar degree_of_parallelism: (Optional) When specified, indicates the number of calls the - indexer will make in parallel to the endpoint you have provided. You can decrease this value if - your endpoint is failing under too high of a request load, or raise it if your endpoint is able - to accept more requests and you would like an increase in the performance of the indexer. If - not set, a default value of 5 is used. The degreeOfParallelism can be set to a maximum of 10 - and a minimum of 1. - :vartype degree_of_parallelism: int - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "scoring_uri": {"key": "uri", "type": "str"}, - "authentication_key": {"key": "key", "type": "str"}, - "resource_id": {"key": "resourceId", "type": "str"}, - "timeout": {"key": "timeout", "type": "duration"}, - "region": {"key": "region", "type": "str"}, - "degree_of_parallelism": {"key": "degreeOfParallelism", "type": "int"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - scoring_uri: Optional[str] = None, - authentication_key: Optional[str] = None, - resource_id: Optional[str] = None, - timeout: Optional[datetime.timedelta] = None, - region: Optional[str] = None, - degree_of_parallelism: Optional[int] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword scoring_uri: (Required for no authentication or key authentication) The scoring URI of - the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. - :paramtype scoring_uri: str - :keyword authentication_key: (Required for key authentication) The key for the AML service. - :paramtype authentication_key: str - :keyword resource_id: (Required for token authentication). The Azure Resource Manager resource - ID of the AML service. It should be in the format - subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long - :paramtype resource_id: str - :keyword timeout: (Optional) When specified, indicates the timeout for the http client making - the API call. - :paramtype timeout: ~datetime.timedelta - :keyword region: (Optional for token authentication). The region the AML service is deployed - in. - :paramtype region: str - :keyword degree_of_parallelism: (Optional) When specified, indicates the number of calls the - indexer will make in parallel to the endpoint you have provided. You can decrease this value if - your endpoint is failing under too high of a request load, or raise it if your endpoint is able - to accept more requests and you would like an increase in the performance of the indexer. If - not set, a default value of 5 is used. The degreeOfParallelism can be set to a maximum of 10 - and a minimum of 1. - :paramtype degree_of_parallelism: int - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Custom.AmlSkill" - self.scoring_uri = scoring_uri - self.authentication_key = authentication_key - self.resource_id = resource_id - self.timeout = timeout - self.region = region - self.degree_of_parallelism = degree_of_parallelism - - -class AzureMachineLearningVectorizer(VectorSearchVectorizer): - """Specifies an Azure Machine Learning endpoint deployed via the Azure AI Studio Model Catalog for - generating the vector embedding of a query string. - - All required parameters must be populated in order to send to server. - - :ivar vectorizer_name: The name to associate with this particular vectorization method. - Required. - :vartype vectorizer_name: str - :ivar kind: The name of the kind of vectorization method being configured for use with vector - search. Required. Known values are: "azureOpenAI", "customWebApi", "aiServicesVision", and - "aml". - :vartype kind: str or ~azure.search.documents.indexes.models.VectorSearchVectorizerKind - :ivar aml_parameters: Specifies the properties of the AML vectorizer. - :vartype aml_parameters: ~azure.search.documents.indexes.models.AzureMachineLearningParameters - """ - - _validation = { - "vectorizer_name": {"required": True}, - "kind": {"required": True}, - } - - _attribute_map = { - "vectorizer_name": {"key": "name", "type": "str"}, - "kind": {"key": "kind", "type": "str"}, - "aml_parameters": {"key": "amlParameters", "type": "AzureMachineLearningParameters"}, - } - - def __init__( - self, - *, - vectorizer_name: str, - aml_parameters: Optional["_models.AzureMachineLearningParameters"] = None, - **kwargs: Any - ) -> None: - """ - :keyword vectorizer_name: The name to associate with this particular vectorization method. - Required. - :paramtype vectorizer_name: str - :keyword aml_parameters: Specifies the properties of the AML vectorizer. - :paramtype aml_parameters: - ~azure.search.documents.indexes.models.AzureMachineLearningParameters - """ - super().__init__(vectorizer_name=vectorizer_name, **kwargs) - self.kind: str = "aml" - self.aml_parameters = aml_parameters - - -class AzureOpenAIVectorizerParameters(_serialization.Model): - """Specifies the parameters for connecting to the Azure OpenAI resource. - - :ivar resource_url: The resource URI of the Azure OpenAI resource. - :vartype resource_url: str - :ivar deployment_name: ID of the Azure OpenAI model deployment on the designated resource. - :vartype deployment_name: str - :ivar api_key: API key of the designated Azure OpenAI resource. - :vartype api_key: str - :ivar auth_identity: The user-assigned managed identity used for outbound connections. - :vartype auth_identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - :ivar model_name: The name of the embedding model that is deployed at the provided deploymentId - path. Known values are: "text-embedding-ada-002", "text-embedding-3-large", and - "text-embedding-3-small". - :vartype model_name: str or ~azure.search.documents.indexes.models.AzureOpenAIModelName - """ - - _attribute_map = { - "resource_url": {"key": "resourceUri", "type": "str"}, - "deployment_name": {"key": "deploymentId", "type": "str"}, - "api_key": {"key": "apiKey", "type": "str"}, - "auth_identity": {"key": "authIdentity", "type": "SearchIndexerDataIdentity"}, - "model_name": {"key": "modelName", "type": "str"}, - } - - def __init__( - self, - *, - resource_url: Optional[str] = None, - deployment_name: Optional[str] = None, - api_key: Optional[str] = None, - auth_identity: Optional["_models.SearchIndexerDataIdentity"] = None, - model_name: Optional[Union[str, "_models.AzureOpenAIModelName"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword resource_url: The resource URI of the Azure OpenAI resource. - :paramtype resource_url: str - :keyword deployment_name: ID of the Azure OpenAI model deployment on the designated resource. - :paramtype deployment_name: str - :keyword api_key: API key of the designated Azure OpenAI resource. - :paramtype api_key: str - :keyword auth_identity: The user-assigned managed identity used for outbound connections. - :paramtype auth_identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - :keyword model_name: The name of the embedding model that is deployed at the provided - deploymentId path. Known values are: "text-embedding-ada-002", "text-embedding-3-large", and - "text-embedding-3-small". - :paramtype model_name: str or ~azure.search.documents.indexes.models.AzureOpenAIModelName - """ - super().__init__(**kwargs) - self.resource_url = resource_url - self.deployment_name = deployment_name - self.api_key = api_key - self.auth_identity = auth_identity - self.model_name = model_name - - -class AzureOpenAIEmbeddingSkill(SearchIndexerSkill, AzureOpenAIVectorizerParameters): - """Allows you to generate a vector embedding for a given text input using the Azure OpenAI - resource. - - All required parameters must be populated in order to send to server. - - :ivar resource_url: The resource URI of the Azure OpenAI resource. - :vartype resource_url: str - :ivar deployment_name: ID of the Azure OpenAI model deployment on the designated resource. - :vartype deployment_name: str - :ivar api_key: API key of the designated Azure OpenAI resource. - :vartype api_key: str - :ivar auth_identity: The user-assigned managed identity used for outbound connections. - :vartype auth_identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - :ivar model_name: The name of the embedding model that is deployed at the provided deploymentId - path. Known values are: "text-embedding-ada-002", "text-embedding-3-large", and - "text-embedding-3-small". - :vartype model_name: str or ~azure.search.documents.indexes.models.AzureOpenAIModelName - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar dimensions: The number of dimensions the resulting output embeddings should have. Only - supported in text-embedding-3 and later models. - :vartype dimensions: int - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - } - - _attribute_map = { - "resource_url": {"key": "resourceUri", "type": "str"}, - "deployment_name": {"key": "deploymentId", "type": "str"}, - "api_key": {"key": "apiKey", "type": "str"}, - "auth_identity": {"key": "authIdentity", "type": "SearchIndexerDataIdentity"}, - "model_name": {"key": "modelName", "type": "str"}, - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "dimensions": {"key": "dimensions", "type": "int"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - resource_url: Optional[str] = None, - deployment_name: Optional[str] = None, - api_key: Optional[str] = None, - auth_identity: Optional["_models.SearchIndexerDataIdentity"] = None, - model_name: Optional[Union[str, "_models.AzureOpenAIModelName"]] = None, - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - dimensions: Optional[int] = None, - **kwargs: Any - ) -> None: - """ - :keyword resource_url: The resource URI of the Azure OpenAI resource. - :paramtype resource_url: str - :keyword deployment_name: ID of the Azure OpenAI model deployment on the designated resource. - :paramtype deployment_name: str - :keyword api_key: API key of the designated Azure OpenAI resource. - :paramtype api_key: str - :keyword auth_identity: The user-assigned managed identity used for outbound connections. - :paramtype auth_identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - :keyword model_name: The name of the embedding model that is deployed at the provided - deploymentId path. Known values are: "text-embedding-ada-002", "text-embedding-3-large", and - "text-embedding-3-small". - :paramtype model_name: str or ~azure.search.documents.indexes.models.AzureOpenAIModelName - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword dimensions: The number of dimensions the resulting output embeddings should have. Only - supported in text-embedding-3 and later models. - :paramtype dimensions: int - """ - super().__init__( - name=name, - description=description, - context=context, - inputs=inputs, - outputs=outputs, - resource_url=resource_url, - deployment_name=deployment_name, - api_key=api_key, - auth_identity=auth_identity, - model_name=model_name, - **kwargs - ) - self.resource_url = resource_url - self.deployment_name = deployment_name - self.api_key = api_key - self.auth_identity = auth_identity - self.model_name = model_name - self.odata_type: str = "#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill" - self.dimensions = dimensions - self.name = name - self.description = description - self.context = context - self.inputs = inputs - self.outputs = outputs - - -class AzureOpenAITokenizerParameters(_serialization.Model): - """AzureOpenAITokenizerParameters. - - :ivar encoder_model_name: Only applies if the unit is set to azureOpenAITokens. Options include - 'R50k_base', 'P50k_base', 'P50k_edit' and 'CL100k_base'. The default value is 'CL100k_base'. - Known values are: "r50k_base", "p50k_base", "p50k_edit", and "cl100k_base". - :vartype encoder_model_name: str or - ~azure.search.documents.indexes.models.SplitSkillEncoderModelName - :ivar allowed_special_tokens: (Optional) Only applies if the unit is set to azureOpenAITokens. - This parameter defines a collection of special tokens that are permitted within the - tokenization process. - :vartype allowed_special_tokens: list[str] - """ - - _attribute_map = { - "encoder_model_name": {"key": "encoderModelName", "type": "str"}, - "allowed_special_tokens": {"key": "allowedSpecialTokens", "type": "[str]"}, - } - - def __init__( - self, - *, - encoder_model_name: Optional[Union[str, "_models.SplitSkillEncoderModelName"]] = None, - allowed_special_tokens: Optional[List[str]] = None, - **kwargs: Any - ) -> None: - """ - :keyword encoder_model_name: Only applies if the unit is set to azureOpenAITokens. Options - include 'R50k_base', 'P50k_base', 'P50k_edit' and 'CL100k_base'. The default value is - 'CL100k_base'. Known values are: "r50k_base", "p50k_base", "p50k_edit", and "cl100k_base". - :paramtype encoder_model_name: str or - ~azure.search.documents.indexes.models.SplitSkillEncoderModelName - :keyword allowed_special_tokens: (Optional) Only applies if the unit is set to - azureOpenAITokens. This parameter defines a collection of special tokens that are permitted - within the tokenization process. - :paramtype allowed_special_tokens: list[str] - """ - super().__init__(**kwargs) - self.encoder_model_name = encoder_model_name - self.allowed_special_tokens = allowed_special_tokens - - -class AzureOpenAIVectorizer(VectorSearchVectorizer): - """Specifies the Azure OpenAI resource used to vectorize a query string. - - All required parameters must be populated in order to send to server. - - :ivar vectorizer_name: The name to associate with this particular vectorization method. - Required. - :vartype vectorizer_name: str - :ivar kind: The name of the kind of vectorization method being configured for use with vector - search. Required. Known values are: "azureOpenAI", "customWebApi", "aiServicesVision", and - "aml". - :vartype kind: str or ~azure.search.documents.indexes.models.VectorSearchVectorizerKind - :ivar parameters: Contains the parameters specific to Azure OpenAI embedding vectorization. - :vartype parameters: ~azure.search.documents.indexes.models.AzureOpenAIVectorizerParameters - """ - - _validation = { - "vectorizer_name": {"required": True}, - "kind": {"required": True}, - } - - _attribute_map = { - "vectorizer_name": {"key": "name", "type": "str"}, - "kind": {"key": "kind", "type": "str"}, - "parameters": {"key": "azureOpenAIParameters", "type": "AzureOpenAIVectorizerParameters"}, - } - - def __init__( - self, - *, - vectorizer_name: str, - parameters: Optional["_models.AzureOpenAIVectorizerParameters"] = None, - **kwargs: Any - ) -> None: - """ - :keyword vectorizer_name: The name to associate with this particular vectorization method. - Required. - :paramtype vectorizer_name: str - :keyword parameters: Contains the parameters specific to Azure OpenAI embedding vectorization. - :paramtype parameters: ~azure.search.documents.indexes.models.AzureOpenAIVectorizerParameters - """ - super().__init__(vectorizer_name=vectorizer_name, **kwargs) - self.kind: str = "azureOpenAI" - self.parameters = parameters - - -class VectorSearchCompression(_serialization.Model): - """Contains configuration options specific to the compression method used during indexing or - querying. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - BinaryQuantizationCompression, ScalarQuantizationCompression - - All required parameters must be populated in order to send to server. - - :ivar compression_name: The name to associate with this particular configuration. Required. - :vartype compression_name: str - :ivar kind: The name of the kind of compression method being configured for use with vector - search. Required. Known values are: "scalarQuantization" and "binaryQuantization". - :vartype kind: str or ~azure.search.documents.indexes.models.VectorSearchCompressionKind - :ivar rerank_with_original_vectors: If set to true, once the ordered set of results calculated - using compressed vectors are obtained, they will be reranked again by recalculating the - full-precision similarity scores. This will improve recall at the expense of latency. - :vartype rerank_with_original_vectors: bool - :ivar default_oversampling: Default oversampling factor. Oversampling will internally request - more documents (specified by this multiplier) in the initial search. This increases the set of - results that will be reranked using recomputed similarity scores from full-precision vectors. - Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when - rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. - :vartype default_oversampling: float - :ivar rescoring_options: Contains the options for rescoring. - :vartype rescoring_options: ~azure.search.documents.indexes.models.RescoringOptions - :ivar truncation_dimension: The number of dimensions to truncate the vectors to. Truncating the - vectors reduces the size of the vectors and the amount of data that needs to be transferred - during search. This can save storage cost and improve search performance at the expense of - recall. It should be only used for embeddings trained with Matryoshka Representation Learning - (MRL) such as OpenAI text-embedding-3-large (small). The default value is null, which means no - truncation. - :vartype truncation_dimension: int - """ - - _validation = { - "compression_name": {"required": True}, - "kind": {"required": True}, - } - - _attribute_map = { - "compression_name": {"key": "name", "type": "str"}, - "kind": {"key": "kind", "type": "str"}, - "rerank_with_original_vectors": {"key": "rerankWithOriginalVectors", "type": "bool"}, - "default_oversampling": {"key": "defaultOversampling", "type": "float"}, - "rescoring_options": {"key": "rescoringOptions", "type": "RescoringOptions"}, - "truncation_dimension": {"key": "truncationDimension", "type": "int"}, - } - - _subtype_map = { - "kind": { - "binaryQuantization": "BinaryQuantizationCompression", - "scalarQuantization": "ScalarQuantizationCompression", - } - } - - def __init__( - self, - *, - compression_name: str, - rerank_with_original_vectors: bool = True, - default_oversampling: Optional[float] = None, - rescoring_options: Optional["_models.RescoringOptions"] = None, - truncation_dimension: Optional[int] = None, - **kwargs: Any - ) -> None: - """ - :keyword compression_name: The name to associate with this particular configuration. Required. - :paramtype compression_name: str - :keyword rerank_with_original_vectors: If set to true, once the ordered set of results - calculated using compressed vectors are obtained, they will be reranked again by recalculating - the full-precision similarity scores. This will improve recall at the expense of latency. - :paramtype rerank_with_original_vectors: bool - :keyword default_oversampling: Default oversampling factor. Oversampling will internally - request more documents (specified by this multiplier) in the initial search. This increases the - set of results that will be reranked using recomputed similarity scores from full-precision - vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when - rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. - :paramtype default_oversampling: float - :keyword rescoring_options: Contains the options for rescoring. - :paramtype rescoring_options: ~azure.search.documents.indexes.models.RescoringOptions - :keyword truncation_dimension: The number of dimensions to truncate the vectors to. Truncating - the vectors reduces the size of the vectors and the amount of data that needs to be transferred - during search. This can save storage cost and improve search performance at the expense of - recall. It should be only used for embeddings trained with Matryoshka Representation Learning - (MRL) such as OpenAI text-embedding-3-large (small). The default value is null, which means no - truncation. - :paramtype truncation_dimension: int - """ - super().__init__(**kwargs) - self.compression_name = compression_name - self.kind: Optional[str] = None - self.rerank_with_original_vectors = rerank_with_original_vectors - self.default_oversampling = default_oversampling - self.rescoring_options = rescoring_options - self.truncation_dimension = truncation_dimension - - -class BinaryQuantizationCompression(VectorSearchCompression): - """Contains configuration options specific to the binary quantization compression method used - during indexing and querying. - - All required parameters must be populated in order to send to server. - - :ivar compression_name: The name to associate with this particular configuration. Required. - :vartype compression_name: str - :ivar kind: The name of the kind of compression method being configured for use with vector - search. Required. Known values are: "scalarQuantization" and "binaryQuantization". - :vartype kind: str or ~azure.search.documents.indexes.models.VectorSearchCompressionKind - :ivar rerank_with_original_vectors: If set to true, once the ordered set of results calculated - using compressed vectors are obtained, they will be reranked again by recalculating the - full-precision similarity scores. This will improve recall at the expense of latency. - :vartype rerank_with_original_vectors: bool - :ivar default_oversampling: Default oversampling factor. Oversampling will internally request - more documents (specified by this multiplier) in the initial search. This increases the set of - results that will be reranked using recomputed similarity scores from full-precision vectors. - Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when - rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. - :vartype default_oversampling: float - :ivar rescoring_options: Contains the options for rescoring. - :vartype rescoring_options: ~azure.search.documents.indexes.models.RescoringOptions - :ivar truncation_dimension: The number of dimensions to truncate the vectors to. Truncating the - vectors reduces the size of the vectors and the amount of data that needs to be transferred - during search. This can save storage cost and improve search performance at the expense of - recall. It should be only used for embeddings trained with Matryoshka Representation Learning - (MRL) such as OpenAI text-embedding-3-large (small). The default value is null, which means no - truncation. - :vartype truncation_dimension: int - """ - - _validation = { - "compression_name": {"required": True}, - "kind": {"required": True}, - } - - _attribute_map = { - "compression_name": {"key": "name", "type": "str"}, - "kind": {"key": "kind", "type": "str"}, - "rerank_with_original_vectors": {"key": "rerankWithOriginalVectors", "type": "bool"}, - "default_oversampling": {"key": "defaultOversampling", "type": "float"}, - "rescoring_options": {"key": "rescoringOptions", "type": "RescoringOptions"}, - "truncation_dimension": {"key": "truncationDimension", "type": "int"}, - } - - def __init__( - self, - *, - compression_name: str, - rerank_with_original_vectors: bool = True, - default_oversampling: Optional[float] = None, - rescoring_options: Optional["_models.RescoringOptions"] = None, - truncation_dimension: Optional[int] = None, - **kwargs: Any - ) -> None: - """ - :keyword compression_name: The name to associate with this particular configuration. Required. - :paramtype compression_name: str - :keyword rerank_with_original_vectors: If set to true, once the ordered set of results - calculated using compressed vectors are obtained, they will be reranked again by recalculating - the full-precision similarity scores. This will improve recall at the expense of latency. - :paramtype rerank_with_original_vectors: bool - :keyword default_oversampling: Default oversampling factor. Oversampling will internally - request more documents (specified by this multiplier) in the initial search. This increases the - set of results that will be reranked using recomputed similarity scores from full-precision - vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when - rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. - :paramtype default_oversampling: float - :keyword rescoring_options: Contains the options for rescoring. - :paramtype rescoring_options: ~azure.search.documents.indexes.models.RescoringOptions - :keyword truncation_dimension: The number of dimensions to truncate the vectors to. Truncating - the vectors reduces the size of the vectors and the amount of data that needs to be transferred - during search. This can save storage cost and improve search performance at the expense of - recall. It should be only used for embeddings trained with Matryoshka Representation Learning - (MRL) such as OpenAI text-embedding-3-large (small). The default value is null, which means no - truncation. - :paramtype truncation_dimension: int - """ - super().__init__( - compression_name=compression_name, - rerank_with_original_vectors=rerank_with_original_vectors, - default_oversampling=default_oversampling, - rescoring_options=rescoring_options, - truncation_dimension=truncation_dimension, - **kwargs - ) - self.kind: str = "binaryQuantization" - - -class SimilarityAlgorithm(_serialization.Model): - """Base type for similarity algorithms. Similarity algorithms are used to calculate scores that - tie queries to documents. The higher the score, the more relevant the document is to that - specific query. Those scores are used to rank the search results. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - BM25SimilarityAlgorithm, ClassicSimilarityAlgorithm - - All required parameters must be populated in order to send to server. - - :ivar odata_type: Required. - :vartype odata_type: str - """ - - _validation = { - "odata_type": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - } - - _subtype_map = { - "odata_type": { - "#Microsoft.Azure.Search.BM25Similarity": "BM25SimilarityAlgorithm", - "#Microsoft.Azure.Search.ClassicSimilarity": "ClassicSimilarityAlgorithm", - } - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.odata_type: Optional[str] = None - - -class BM25SimilarityAlgorithm(SimilarityAlgorithm): - """Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a TF-IDF-like algorithm - that includes length normalization (controlled by the 'b' parameter) as well as term frequency - saturation (controlled by the 'k1' parameter). - - All required parameters must be populated in order to send to server. - - :ivar odata_type: Required. - :vartype odata_type: str - :ivar k1: This property controls the scaling function between the term frequency of each - matching terms and the final relevance score of a document-query pair. By default, a value of - 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. - :vartype k1: float - :ivar b: This property controls how the length of a document affects the relevance score. By - default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, - while a value of 1.0 means the score is fully normalized by the length of the document. - :vartype b: float - """ - - _validation = { - "odata_type": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "k1": {"key": "k1", "type": "float"}, - "b": {"key": "b", "type": "float"}, - } - - def __init__(self, *, k1: Optional[float] = None, b: Optional[float] = None, **kwargs: Any) -> None: - """ - :keyword k1: This property controls the scaling function between the term frequency of each - matching terms and the final relevance score of a document-query pair. By default, a value of - 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. - :paramtype k1: float - :keyword b: This property controls how the length of a document affects the relevance score. By - default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, - while a value of 1.0 means the score is fully normalized by the length of the document. - :paramtype b: float - """ - super().__init__(**kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.BM25Similarity" - self.k1 = k1 - self.b = b - - -class CharFilter(_serialization.Model): - """Base type for character filters. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - MappingCharFilter, PatternReplaceCharFilter - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of char filter. Required. - :vartype odata_type: str - :ivar name: The name of the char filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - } - - _subtype_map = { - "odata_type": { - "#Microsoft.Azure.Search.MappingCharFilter": "MappingCharFilter", - "#Microsoft.Azure.Search.PatternReplaceCharFilter": "PatternReplaceCharFilter", - } - } - - def __init__(self, *, name: str, **kwargs: Any) -> None: - """ - :keyword name: The name of the char filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - """ - super().__init__(**kwargs) - self.odata_type: Optional[str] = None - self.name = name - - -class CjkBigramTokenFilter(TokenFilter): - """Forms bigrams of CJK terms that are generated from the standard tokenizer. This token filter is - implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar ignore_scripts: The scripts to ignore. - :vartype ignore_scripts: list[str or - ~azure.search.documents.indexes.models.CjkBigramTokenFilterScripts] - :ivar output_unigrams: A value indicating whether to output both unigrams and bigrams (if - true), or just bigrams (if false). Default is false. - :vartype output_unigrams: bool - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "ignore_scripts": {"key": "ignoreScripts", "type": "[str]"}, - "output_unigrams": {"key": "outputUnigrams", "type": "bool"}, - } - - def __init__( - self, - *, - name: str, - ignore_scripts: Optional[List[Union[str, "_models.CjkBigramTokenFilterScripts"]]] = None, - output_unigrams: bool = False, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword ignore_scripts: The scripts to ignore. - :paramtype ignore_scripts: list[str or - ~azure.search.documents.indexes.models.CjkBigramTokenFilterScripts] - :keyword output_unigrams: A value indicating whether to output both unigrams and bigrams (if - true), or just bigrams (if false). Default is false. - :paramtype output_unigrams: bool - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.CjkBigramTokenFilter" - self.ignore_scripts = ignore_scripts - self.output_unigrams = output_unigrams - - -class ClassicSimilarityAlgorithm(SimilarityAlgorithm): - """Legacy similarity algorithm which uses the Lucene TFIDFSimilarity implementation of TF-IDF. - This variation of TF-IDF introduces static document length normalization as well as - coordinating factors that penalize documents that only partially match the searched queries. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: Required. - :vartype odata_type: str - """ - - _validation = { - "odata_type": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.ClassicSimilarity" - - -class LexicalTokenizer(_serialization.Model): - """Base type for tokenizers. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - ClassicTokenizer, EdgeNGramTokenizer, KeywordTokenizer, KeywordTokenizerV2, - MicrosoftLanguageStemmingTokenizer, MicrosoftLanguageTokenizer, NGramTokenizer, - PathHierarchyTokenizerV2, PatternTokenizer, LuceneStandardTokenizer, LuceneStandardTokenizerV2, - UaxUrlEmailTokenizer - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. - :vartype odata_type: str - :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - } - - _subtype_map = { - "odata_type": { - "#Microsoft.Azure.Search.ClassicTokenizer": "ClassicTokenizer", - "#Microsoft.Azure.Search.EdgeNGramTokenizer": "EdgeNGramTokenizer", - "#Microsoft.Azure.Search.KeywordTokenizer": "KeywordTokenizer", - "#Microsoft.Azure.Search.KeywordTokenizerV2": "KeywordTokenizerV2", - "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer": "MicrosoftLanguageStemmingTokenizer", - "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer": "MicrosoftLanguageTokenizer", - "#Microsoft.Azure.Search.NGramTokenizer": "NGramTokenizer", - "#Microsoft.Azure.Search.PathHierarchyTokenizerV2": "PathHierarchyTokenizerV2", - "#Microsoft.Azure.Search.PatternTokenizer": "PatternTokenizer", - "#Microsoft.Azure.Search.StandardTokenizer": "LuceneStandardTokenizer", - "#Microsoft.Azure.Search.StandardTokenizerV2": "LuceneStandardTokenizerV2", - "#Microsoft.Azure.Search.UaxUrlEmailTokenizer": "UaxUrlEmailTokenizer", - } - } - - def __init__(self, *, name: str, **kwargs: Any) -> None: - """ - :keyword name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :paramtype name: str - """ - super().__init__(**kwargs) - self.odata_type: Optional[str] = None - self.name = name - - -class ClassicTokenizer(LexicalTokenizer): - """Grammar-based tokenizer that is suitable for processing most European-language documents. This - tokenizer is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. - :vartype odata_type: str - :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. The maximum token length that can be used is 300 characters. - :vartype max_token_length: int - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "max_token_length": {"maximum": 300}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "max_token_length": {"key": "maxTokenLength", "type": "int"}, - } - - def __init__(self, *, name: str, max_token_length: int = 255, **kwargs: Any) -> None: - """ - :keyword name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :paramtype name: str - :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. The maximum token length that can be used is 300 characters. - :paramtype max_token_length: int - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.ClassicTokenizer" - self.max_token_length = max_token_length - - -class CognitiveServicesAccountKey(CognitiveServicesAccount): - """The multi-region account key of an Azure AI service resource that's attached to a skillset. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of Azure AI service resource attached to a - skillset. Required. - :vartype odata_type: str - :ivar description: Description of the Azure AI service resource attached to a skillset. - :vartype description: str - :ivar key: The key used to provision the Azure AI service resource attached to a skillset. - Required. - :vartype key: str - """ - - _validation = { - "odata_type": {"required": True}, - "key": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "key": {"key": "key", "type": "str"}, - } - - def __init__(self, *, key: str, description: Optional[str] = None, **kwargs: Any) -> None: - """ - :keyword description: Description of the Azure AI service resource attached to a skillset. - :paramtype description: str - :keyword key: The key used to provision the Azure AI service resource attached to a skillset. - Required. - :paramtype key: str - """ - super().__init__(description=description, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.CognitiveServicesByKey" - self.key = key - - -class CommonGramTokenFilter(TokenFilter): - """Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed - too, with bigrams overlaid. This token filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar common_words: The set of common words. Required. - :vartype common_words: list[str] - :ivar ignore_case: A value indicating whether common words matching will be case insensitive. - Default is false. - :vartype ignore_case: bool - :ivar use_query_mode: A value that indicates whether the token filter is in query mode. When in - query mode, the token filter generates bigrams and then removes common words and single terms - followed by a common word. Default is false. - :vartype use_query_mode: bool - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "common_words": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "common_words": {"key": "commonWords", "type": "[str]"}, - "ignore_case": {"key": "ignoreCase", "type": "bool"}, - "use_query_mode": {"key": "queryMode", "type": "bool"}, - } - - def __init__( - self, - *, - name: str, - common_words: List[str], - ignore_case: bool = False, - use_query_mode: bool = False, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword common_words: The set of common words. Required. - :paramtype common_words: list[str] - :keyword ignore_case: A value indicating whether common words matching will be case - insensitive. Default is false. - :paramtype ignore_case: bool - :keyword use_query_mode: A value that indicates whether the token filter is in query mode. When - in query mode, the token filter generates bigrams and then removes common words and single - terms followed by a common word. Default is false. - :paramtype use_query_mode: bool - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.CommonGramTokenFilter" - self.common_words = common_words - self.ignore_case = ignore_case - self.use_query_mode = use_query_mode - - -class ConditionalSkill(SearchIndexerSkill): - """A skill that enables scenarios that require a Boolean operation to determine the data to assign - to an output. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Util.ConditionalSkill" - - -class CorsOptions(_serialization.Model): - """Defines options to control Cross-Origin Resource Sharing (CORS) for an index. - - All required parameters must be populated in order to send to server. - - :ivar allowed_origins: The list of origins from which JavaScript code will be granted access to - your index. Can contain a list of hosts of the form - {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not - recommended). Required. - :vartype allowed_origins: list[str] - :ivar max_age_in_seconds: The duration for which browsers should cache CORS preflight - responses. Defaults to 5 minutes. - :vartype max_age_in_seconds: int - """ - - _validation = { - "allowed_origins": {"required": True}, - } - - _attribute_map = { - "allowed_origins": {"key": "allowedOrigins", "type": "[str]"}, - "max_age_in_seconds": {"key": "maxAgeInSeconds", "type": "int"}, - } - - def __init__(self, *, allowed_origins: List[str], max_age_in_seconds: Optional[int] = None, **kwargs: Any) -> None: - """ - :keyword allowed_origins: The list of origins from which JavaScript code will be granted access - to your index. Can contain a list of hosts of the form - {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not - recommended). Required. - :paramtype allowed_origins: list[str] - :keyword max_age_in_seconds: The duration for which browsers should cache CORS preflight - responses. Defaults to 5 minutes. - :paramtype max_age_in_seconds: int - """ - super().__init__(**kwargs) - self.allowed_origins = allowed_origins - self.max_age_in_seconds = max_age_in_seconds - - -class LexicalAnalyzer(_serialization.Model): - """Base type for analyzers. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - CustomAnalyzer, PatternAnalyzer, LuceneStandardAnalyzer, StopAnalyzer - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of analyzer. Required. - :vartype odata_type: str - :ivar name: The name of the analyzer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - } - - _subtype_map = { - "odata_type": { - "#Microsoft.Azure.Search.CustomAnalyzer": "CustomAnalyzer", - "#Microsoft.Azure.Search.PatternAnalyzer": "PatternAnalyzer", - "#Microsoft.Azure.Search.StandardAnalyzer": "LuceneStandardAnalyzer", - "#Microsoft.Azure.Search.StopAnalyzer": "StopAnalyzer", - } - } - - def __init__(self, *, name: str, **kwargs: Any) -> None: - """ - :keyword name: The name of the analyzer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :paramtype name: str - """ - super().__init__(**kwargs) - self.odata_type: Optional[str] = None - self.name = name - - -class CustomAnalyzer(LexicalAnalyzer): - """Allows you to take control over the process of converting text into indexable/searchable - tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one - or more filters. The tokenizer is responsible for breaking text into tokens, and the filters - for modifying tokens emitted by the tokenizer. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of analyzer. Required. - :vartype odata_type: str - :ivar name: The name of the analyzer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar tokenizer: The name of the tokenizer to use to divide continuous text into a sequence of - tokens, such as breaking a sentence into words. Required. Known values are: "classic", - "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer", - "microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern", - "standard_v2", "uax_url_email", and "whitespace". - :vartype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName - :ivar token_filters: A list of token filters used to filter out or modify the tokens generated - by a tokenizer. For example, you can specify a lowercase filter that converts all characters to - lowercase. The filters are run in the order in which they are listed. - :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] - :ivar char_filters: A list of character filters used to prepare input text before it is - processed by the tokenizer. For instance, they can replace certain characters or symbols. The - filters are run in the order in which they are listed. - :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "tokenizer": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "tokenizer": {"key": "tokenizer", "type": "str"}, - "token_filters": {"key": "tokenFilters", "type": "[str]"}, - "char_filters": {"key": "charFilters", "type": "[str]"}, - } - - def __init__( - self, - *, - name: str, - tokenizer: Union[str, "_models.LexicalTokenizerName"], - token_filters: Optional[List[Union[str, "_models.TokenFilterName"]]] = None, - char_filters: Optional[List[Union[str, "_models.CharFilterName"]]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the analyzer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :paramtype name: str - :keyword tokenizer: The name of the tokenizer to use to divide continuous text into a sequence - of tokens, such as breaking a sentence into words. Required. Known values are: "classic", - "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer", - "microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern", - "standard_v2", "uax_url_email", and "whitespace". - :paramtype tokenizer: str or ~azure.search.documents.indexes.models.LexicalTokenizerName - :keyword token_filters: A list of token filters used to filter out or modify the tokens - generated by a tokenizer. For example, you can specify a lowercase filter that converts all - characters to lowercase. The filters are run in the order in which they are listed. - :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] - :keyword char_filters: A list of character filters used to prepare input text before it is - processed by the tokenizer. For instance, they can replace certain characters or symbols. The - filters are run in the order in which they are listed. - :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.CustomAnalyzer" - self.tokenizer = tokenizer - self.token_filters = token_filters - self.char_filters = char_filters - - -class CustomEntity(_serialization.Model): - """An object that contains information about the matches that were found, and related metadata. - - All required parameters must be populated in order to send to server. - - :ivar name: The top-level entity descriptor. Matches in the skill output will be grouped by - this name, and it should represent the "normalized" form of the text being found. Required. - :vartype name: str - :ivar description: This field can be used as a passthrough for custom metadata about the - matched text(s). The value of this field will appear with every match of its entity in the - skill output. - :vartype description: str - :ivar type: This field can be used as a passthrough for custom metadata about the matched - text(s). The value of this field will appear with every match of its entity in the skill - output. - :vartype type: str - :ivar subtype: This field can be used as a passthrough for custom metadata about the matched - text(s). The value of this field will appear with every match of its entity in the skill - output. - :vartype subtype: str - :ivar id: This field can be used as a passthrough for custom metadata about the matched - text(s). The value of this field will appear with every match of its entity in the skill - output. - :vartype id: str - :ivar case_sensitive: Defaults to false. Boolean value denoting whether comparisons with the - entity name should be sensitive to character casing. Sample case insensitive matches of - "Microsoft" could be: microsoft, microSoft, MICROSOFT. - :vartype case_sensitive: bool - :ivar accent_sensitive: Defaults to false. Boolean value denoting whether comparisons with the - entity name should be sensitive to accent. - :vartype accent_sensitive: bool - :ivar fuzzy_edit_distance: Defaults to 0. Maximum value of 5. Denotes the acceptable number of - divergent characters that would still constitute a match with the entity name. The smallest - possible fuzziness for any given match is returned. For instance, if the edit distance is set - to 3, "Windows10" would still match "Windows", "Windows10" and "Windows 7". When case - sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but - otherwise do. - :vartype fuzzy_edit_distance: int - :ivar default_case_sensitive: Changes the default case sensitivity value for this entity. It be - used to change the default value of all aliases caseSensitive values. - :vartype default_case_sensitive: bool - :ivar default_accent_sensitive: Changes the default accent sensitivity value for this entity. - It be used to change the default value of all aliases accentSensitive values. - :vartype default_accent_sensitive: bool - :ivar default_fuzzy_edit_distance: Changes the default fuzzy edit distance value for this - entity. It can be used to change the default value of all aliases fuzzyEditDistance values. - :vartype default_fuzzy_edit_distance: int - :ivar aliases: An array of complex objects that can be used to specify alternative spellings or - synonyms to the root entity name. - :vartype aliases: list[~azure.search.documents.indexes.models.CustomEntityAlias] - """ - - _validation = { - "name": {"required": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "type": {"key": "type", "type": "str"}, - "subtype": {"key": "subtype", "type": "str"}, - "id": {"key": "id", "type": "str"}, - "case_sensitive": {"key": "caseSensitive", "type": "bool"}, - "accent_sensitive": {"key": "accentSensitive", "type": "bool"}, - "fuzzy_edit_distance": {"key": "fuzzyEditDistance", "type": "int"}, - "default_case_sensitive": {"key": "defaultCaseSensitive", "type": "bool"}, - "default_accent_sensitive": {"key": "defaultAccentSensitive", "type": "bool"}, - "default_fuzzy_edit_distance": {"key": "defaultFuzzyEditDistance", "type": "int"}, - "aliases": {"key": "aliases", "type": "[CustomEntityAlias]"}, - } - - def __init__( - self, - *, - name: str, - description: Optional[str] = None, - type: Optional[str] = None, - subtype: Optional[str] = None, - id: Optional[str] = None, # pylint: disable=redefined-builtin - case_sensitive: Optional[bool] = None, - accent_sensitive: Optional[bool] = None, - fuzzy_edit_distance: Optional[int] = None, - default_case_sensitive: Optional[bool] = None, - default_accent_sensitive: Optional[bool] = None, - default_fuzzy_edit_distance: Optional[int] = None, - aliases: Optional[List["_models.CustomEntityAlias"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The top-level entity descriptor. Matches in the skill output will be grouped by - this name, and it should represent the "normalized" form of the text being found. Required. - :paramtype name: str - :keyword description: This field can be used as a passthrough for custom metadata about the - matched text(s). The value of this field will appear with every match of its entity in the - skill output. - :paramtype description: str - :keyword type: This field can be used as a passthrough for custom metadata about the matched - text(s). The value of this field will appear with every match of its entity in the skill - output. - :paramtype type: str - :keyword subtype: This field can be used as a passthrough for custom metadata about the matched - text(s). The value of this field will appear with every match of its entity in the skill - output. - :paramtype subtype: str - :keyword id: This field can be used as a passthrough for custom metadata about the matched - text(s). The value of this field will appear with every match of its entity in the skill - output. - :paramtype id: str - :keyword case_sensitive: Defaults to false. Boolean value denoting whether comparisons with the - entity name should be sensitive to character casing. Sample case insensitive matches of - "Microsoft" could be: microsoft, microSoft, MICROSOFT. - :paramtype case_sensitive: bool - :keyword accent_sensitive: Defaults to false. Boolean value denoting whether comparisons with - the entity name should be sensitive to accent. - :paramtype accent_sensitive: bool - :keyword fuzzy_edit_distance: Defaults to 0. Maximum value of 5. Denotes the acceptable number - of divergent characters that would still constitute a match with the entity name. The smallest - possible fuzziness for any given match is returned. For instance, if the edit distance is set - to 3, "Windows10" would still match "Windows", "Windows10" and "Windows 7". When case - sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but - otherwise do. - :paramtype fuzzy_edit_distance: int - :keyword default_case_sensitive: Changes the default case sensitivity value for this entity. It - be used to change the default value of all aliases caseSensitive values. - :paramtype default_case_sensitive: bool - :keyword default_accent_sensitive: Changes the default accent sensitivity value for this - entity. It be used to change the default value of all aliases accentSensitive values. - :paramtype default_accent_sensitive: bool - :keyword default_fuzzy_edit_distance: Changes the default fuzzy edit distance value for this - entity. It can be used to change the default value of all aliases fuzzyEditDistance values. - :paramtype default_fuzzy_edit_distance: int - :keyword aliases: An array of complex objects that can be used to specify alternative spellings - or synonyms to the root entity name. - :paramtype aliases: list[~azure.search.documents.indexes.models.CustomEntityAlias] - """ - super().__init__(**kwargs) - self.name = name - self.description = description - self.type = type - self.subtype = subtype - self.id = id - self.case_sensitive = case_sensitive - self.accent_sensitive = accent_sensitive - self.fuzzy_edit_distance = fuzzy_edit_distance - self.default_case_sensitive = default_case_sensitive - self.default_accent_sensitive = default_accent_sensitive - self.default_fuzzy_edit_distance = default_fuzzy_edit_distance - self.aliases = aliases - - -class CustomEntityAlias(_serialization.Model): - """A complex object that can be used to specify alternative spellings or synonyms to the root - entity name. - - All required parameters must be populated in order to send to server. - - :ivar text: The text of the alias. Required. - :vartype text: str - :ivar case_sensitive: Determine if the alias is case sensitive. - :vartype case_sensitive: bool - :ivar accent_sensitive: Determine if the alias is accent sensitive. - :vartype accent_sensitive: bool - :ivar fuzzy_edit_distance: Determine the fuzzy edit distance of the alias. - :vartype fuzzy_edit_distance: int - """ - - _validation = { - "text": {"required": True}, - } - - _attribute_map = { - "text": {"key": "text", "type": "str"}, - "case_sensitive": {"key": "caseSensitive", "type": "bool"}, - "accent_sensitive": {"key": "accentSensitive", "type": "bool"}, - "fuzzy_edit_distance": {"key": "fuzzyEditDistance", "type": "int"}, - } - - def __init__( - self, - *, - text: str, - case_sensitive: Optional[bool] = None, - accent_sensitive: Optional[bool] = None, - fuzzy_edit_distance: Optional[int] = None, - **kwargs: Any - ) -> None: - """ - :keyword text: The text of the alias. Required. - :paramtype text: str - :keyword case_sensitive: Determine if the alias is case sensitive. - :paramtype case_sensitive: bool - :keyword accent_sensitive: Determine if the alias is accent sensitive. - :paramtype accent_sensitive: bool - :keyword fuzzy_edit_distance: Determine the fuzzy edit distance of the alias. - :paramtype fuzzy_edit_distance: int - """ - super().__init__(**kwargs) - self.text = text - self.case_sensitive = case_sensitive - self.accent_sensitive = accent_sensitive - self.fuzzy_edit_distance = fuzzy_edit_distance - - -class CustomEntityLookupSkill(SearchIndexerSkill): - """A skill looks for text from a custom, user-defined list of words and phrases. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar default_language_code: A value indicating which language code to use. Default is ``en``. - Known values are: "da", "de", "en", "es", "fi", "fr", "it", "ko", and "pt". - :vartype default_language_code: str or - ~azure.search.documents.indexes.models.CustomEntityLookupSkillLanguage - :ivar entities_definition_uri: Path to a JSON or CSV file containing all the target text to - match against. This entity definition is read at the beginning of an indexer run. Any updates - to this file during an indexer run will not take effect until subsequent runs. This config must - be accessible over HTTPS. - :vartype entities_definition_uri: str - :ivar inline_entities_definition: The inline CustomEntity definition. - :vartype inline_entities_definition: list[~azure.search.documents.indexes.models.CustomEntity] - :ivar global_default_case_sensitive: A global flag for CaseSensitive. If CaseSensitive is not - set in CustomEntity, this value will be the default value. - :vartype global_default_case_sensitive: bool - :ivar global_default_accent_sensitive: A global flag for AccentSensitive. If AccentSensitive is - not set in CustomEntity, this value will be the default value. - :vartype global_default_accent_sensitive: bool - :ivar global_default_fuzzy_edit_distance: A global flag for FuzzyEditDistance. If - FuzzyEditDistance is not set in CustomEntity, this value will be the default value. - :vartype global_default_fuzzy_edit_distance: int - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "default_language_code": {"key": "defaultLanguageCode", "type": "str"}, - "entities_definition_uri": {"key": "entitiesDefinitionUri", "type": "str"}, - "inline_entities_definition": {"key": "inlineEntitiesDefinition", "type": "[CustomEntity]"}, - "global_default_case_sensitive": {"key": "globalDefaultCaseSensitive", "type": "bool"}, - "global_default_accent_sensitive": {"key": "globalDefaultAccentSensitive", "type": "bool"}, - "global_default_fuzzy_edit_distance": {"key": "globalDefaultFuzzyEditDistance", "type": "int"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - default_language_code: Optional[Union[str, "_models.CustomEntityLookupSkillLanguage"]] = None, - entities_definition_uri: Optional[str] = None, - inline_entities_definition: Optional[List["_models.CustomEntity"]] = None, - global_default_case_sensitive: Optional[bool] = None, - global_default_accent_sensitive: Optional[bool] = None, - global_default_fuzzy_edit_distance: Optional[int] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is - ``en``. Known values are: "da", "de", "en", "es", "fi", "fr", "it", "ko", and "pt". - :paramtype default_language_code: str or - ~azure.search.documents.indexes.models.CustomEntityLookupSkillLanguage - :keyword entities_definition_uri: Path to a JSON or CSV file containing all the target text to - match against. This entity definition is read at the beginning of an indexer run. Any updates - to this file during an indexer run will not take effect until subsequent runs. This config must - be accessible over HTTPS. - :paramtype entities_definition_uri: str - :keyword inline_entities_definition: The inline CustomEntity definition. - :paramtype inline_entities_definition: - list[~azure.search.documents.indexes.models.CustomEntity] - :keyword global_default_case_sensitive: A global flag for CaseSensitive. If CaseSensitive is - not set in CustomEntity, this value will be the default value. - :paramtype global_default_case_sensitive: bool - :keyword global_default_accent_sensitive: A global flag for AccentSensitive. If AccentSensitive - is not set in CustomEntity, this value will be the default value. - :paramtype global_default_accent_sensitive: bool - :keyword global_default_fuzzy_edit_distance: A global flag for FuzzyEditDistance. If - FuzzyEditDistance is not set in CustomEntity, this value will be the default value. - :paramtype global_default_fuzzy_edit_distance: int - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Text.CustomEntityLookupSkill" - self.default_language_code = default_language_code - self.entities_definition_uri = entities_definition_uri - self.inline_entities_definition = inline_entities_definition - self.global_default_case_sensitive = global_default_case_sensitive - self.global_default_accent_sensitive = global_default_accent_sensitive - self.global_default_fuzzy_edit_distance = global_default_fuzzy_edit_distance - - -class LexicalNormalizer(_serialization.Model): - """Base type for normalizers. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - CustomNormalizer - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of normalizer. Required. - :vartype odata_type: str - :ivar name: The name of the normalizer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', - 'standard', 'lowercase', 'uppercase', or 'elision'. Required. - :vartype name: str - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - } - - _subtype_map = {"odata_type": {"#Microsoft.Azure.Search.CustomNormalizer": "CustomNormalizer"}} - - def __init__(self, *, name: str, **kwargs: Any) -> None: - """ - :keyword name: The name of the normalizer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', - 'standard', 'lowercase', 'uppercase', or 'elision'. Required. - :paramtype name: str - """ - super().__init__(**kwargs) - self.odata_type: Optional[str] = None - self.name = name - - -class CustomNormalizer(LexicalNormalizer): - """Allows you to configure normalization for filterable, sortable, and facetable fields, which by - default operate with strict matching. This is a user-defined configuration consisting of at - least one or more filters, which modify the token that is stored. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of normalizer. Required. - :vartype odata_type: str - :ivar name: The name of the normalizer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', - 'standard', 'lowercase', 'uppercase', or 'elision'. Required. - :vartype name: str - :ivar token_filters: A list of token filters used to filter out or modify the input token. For - example, you can specify a lowercase filter that converts all characters to lowercase. The - filters are run in the order in which they are listed. - :vartype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] - :ivar char_filters: A list of character filters used to prepare input text before it is - processed. For instance, they can replace certain characters or symbols. The filters are run in - the order in which they are listed. - :vartype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "token_filters": {"key": "tokenFilters", "type": "[str]"}, - "char_filters": {"key": "charFilters", "type": "[str]"}, - } - - def __init__( - self, - *, - name: str, - token_filters: Optional[List[Union[str, "_models.TokenFilterName"]]] = None, - char_filters: Optional[List[Union[str, "_models.CharFilterName"]]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the normalizer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', - 'standard', 'lowercase', 'uppercase', or 'elision'. Required. - :paramtype name: str - :keyword token_filters: A list of token filters used to filter out or modify the input token. - For example, you can specify a lowercase filter that converts all characters to lowercase. The - filters are run in the order in which they are listed. - :paramtype token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] - :keyword char_filters: A list of character filters used to prepare input text before it is - processed. For instance, they can replace certain characters or symbols. The filters are run in - the order in which they are listed. - :paramtype char_filters: list[str or ~azure.search.documents.indexes.models.CharFilterName] - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.CustomNormalizer" - self.token_filters = token_filters - self.char_filters = char_filters - - -class DataChangeDetectionPolicy(_serialization.Model): - """Base type for data change detection policies. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - HighWaterMarkChangeDetectionPolicy, SqlIntegratedChangeTrackingPolicy - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of data change detection policy. Required. - :vartype odata_type: str - """ - - _validation = { - "odata_type": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - } - - _subtype_map = { - "odata_type": { - "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy": "HighWaterMarkChangeDetectionPolicy", - "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy": "SqlIntegratedChangeTrackingPolicy", - } - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.odata_type: Optional[str] = None - - -class DataDeletionDetectionPolicy(_serialization.Model): - """Base type for data deletion detection policies. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - NativeBlobSoftDeleteDeletionDetectionPolicy, SoftDeleteColumnDeletionDetectionPolicy - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of data deletion detection policy. - Required. - :vartype odata_type: str - """ - - _validation = { - "odata_type": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - } - - _subtype_map = { - "odata_type": { - "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy": "NativeBlobSoftDeleteDeletionDetectionPolicy", - "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy": "SoftDeleteColumnDeletionDetectionPolicy", - } - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.odata_type: Optional[str] = None - - -class DataSourceCredentials(_serialization.Model): - """Represents credentials that can be used to connect to a datasource. - - :ivar connection_string: The connection string for the datasource. Set to ```` (with - brackets) if you don't want the connection string updated. Set to ```` if you want to - remove the connection string value from the datasource. - :vartype connection_string: str - """ - - _attribute_map = { - "connection_string": {"key": "connectionString", "type": "str"}, - } - - def __init__(self, *, connection_string: Optional[str] = None, **kwargs: Any) -> None: - """ - :keyword connection_string: The connection string for the datasource. Set to ```` - (with brackets) if you don't want the connection string updated. Set to ```` if you - want to remove the connection string value from the datasource. - :paramtype connection_string: str - """ - super().__init__(**kwargs) - self.connection_string = connection_string - - -class DefaultCognitiveServicesAccount(CognitiveServicesAccount): - """An empty object that represents the default Azure AI service resource for a skillset. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of Azure AI service resource attached to a - skillset. Required. - :vartype odata_type: str - :ivar description: Description of the Azure AI service resource attached to a skillset. - :vartype description: str - """ - - _validation = { - "odata_type": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "description": {"key": "description", "type": "str"}, - } - - def __init__(self, *, description: Optional[str] = None, **kwargs: Any) -> None: - """ - :keyword description: Description of the Azure AI service resource attached to a skillset. - :paramtype description: str - """ - super().__init__(description=description, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.DefaultCognitiveServices" - - -class DictionaryDecompounderTokenFilter(TokenFilter): - """Decomposes compound words found in many Germanic languages. This token filter is implemented - using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar word_list: The list of words to match against. Required. - :vartype word_list: list[str] - :ivar min_word_size: The minimum word size. Only words longer than this get processed. Default - is 5. Maximum is 300. - :vartype min_word_size: int - :ivar min_subword_size: The minimum subword size. Only subwords longer than this are outputted. - Default is 2. Maximum is 300. - :vartype min_subword_size: int - :ivar max_subword_size: The maximum subword size. Only subwords shorter than this are - outputted. Default is 15. Maximum is 300. - :vartype max_subword_size: int - :ivar only_longest_match: A value indicating whether to add only the longest matching subword - to the output. Default is false. - :vartype only_longest_match: bool - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "word_list": {"required": True}, - "min_word_size": {"maximum": 300}, - "min_subword_size": {"maximum": 300}, - "max_subword_size": {"maximum": 300}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "word_list": {"key": "wordList", "type": "[str]"}, - "min_word_size": {"key": "minWordSize", "type": "int"}, - "min_subword_size": {"key": "minSubwordSize", "type": "int"}, - "max_subword_size": {"key": "maxSubwordSize", "type": "int"}, - "only_longest_match": {"key": "onlyLongestMatch", "type": "bool"}, - } - - def __init__( - self, - *, - name: str, - word_list: List[str], - min_word_size: int = 5, - min_subword_size: int = 2, - max_subword_size: int = 15, - only_longest_match: bool = False, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword word_list: The list of words to match against. Required. - :paramtype word_list: list[str] - :keyword min_word_size: The minimum word size. Only words longer than this get processed. - Default is 5. Maximum is 300. - :paramtype min_word_size: int - :keyword min_subword_size: The minimum subword size. Only subwords longer than this are - outputted. Default is 2. Maximum is 300. - :paramtype min_subword_size: int - :keyword max_subword_size: The maximum subword size. Only subwords shorter than this are - outputted. Default is 15. Maximum is 300. - :paramtype max_subword_size: int - :keyword only_longest_match: A value indicating whether to add only the longest matching - subword to the output. Default is false. - :paramtype only_longest_match: bool - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter" - self.word_list = word_list - self.min_word_size = min_word_size - self.min_subword_size = min_subword_size - self.max_subword_size = max_subword_size - self.only_longest_match = only_longest_match - - -class ScoringFunction(_serialization.Model): - """Base type for functions that can modify document scores during ranking. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - DistanceScoringFunction, FreshnessScoringFunction, MagnitudeScoringFunction, TagScoringFunction - - All required parameters must be populated in order to send to server. - - :ivar type: Indicates the type of function to use. Valid values include magnitude, freshness, - distance, and tag. The function type must be lower case. Required. - :vartype type: str - :ivar field_name: The name of the field used as input to the scoring function. Required. - :vartype field_name: str - :ivar boost: A multiplier for the raw score. Must be a positive number not equal to 1.0. - Required. - :vartype boost: float - :ivar interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Known values are: "linear", "constant", "quadratic", and - "logarithmic". - :vartype interpolation: str or - ~azure.search.documents.indexes.models.ScoringFunctionInterpolation - """ - - _validation = { - "type": {"required": True}, - "field_name": {"required": True}, - "boost": {"required": True}, - } - - _attribute_map = { - "type": {"key": "type", "type": "str"}, - "field_name": {"key": "fieldName", "type": "str"}, - "boost": {"key": "boost", "type": "float"}, - "interpolation": {"key": "interpolation", "type": "str"}, - } - - _subtype_map = { - "type": { - "distance": "DistanceScoringFunction", - "freshness": "FreshnessScoringFunction", - "magnitude": "MagnitudeScoringFunction", - "tag": "TagScoringFunction", - } - } - - def __init__( - self, - *, - field_name: str, - boost: float, - interpolation: Optional[Union[str, "_models.ScoringFunctionInterpolation"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword field_name: The name of the field used as input to the scoring function. Required. - :paramtype field_name: str - :keyword boost: A multiplier for the raw score. Must be a positive number not equal to 1.0. - Required. - :paramtype boost: float - :keyword interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Known values are: "linear", "constant", "quadratic", and - "logarithmic". - :paramtype interpolation: str or - ~azure.search.documents.indexes.models.ScoringFunctionInterpolation - """ - super().__init__(**kwargs) - self.type: Optional[str] = None - self.field_name = field_name - self.boost = boost - self.interpolation = interpolation - - -class DistanceScoringFunction(ScoringFunction): - """Defines a function that boosts scores based on distance from a geographic location. - - All required parameters must be populated in order to send to server. - - :ivar type: Indicates the type of function to use. Valid values include magnitude, freshness, - distance, and tag. The function type must be lower case. Required. - :vartype type: str - :ivar field_name: The name of the field used as input to the scoring function. Required. - :vartype field_name: str - :ivar boost: A multiplier for the raw score. Must be a positive number not equal to 1.0. - Required. - :vartype boost: float - :ivar interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Known values are: "linear", "constant", "quadratic", and - "logarithmic". - :vartype interpolation: str or - ~azure.search.documents.indexes.models.ScoringFunctionInterpolation - :ivar parameters: Parameter values for the distance scoring function. Required. - :vartype parameters: ~azure.search.documents.indexes.models.DistanceScoringParameters - """ - - _validation = { - "type": {"required": True}, - "field_name": {"required": True}, - "boost": {"required": True}, - "parameters": {"required": True}, - } - - _attribute_map = { - "type": {"key": "type", "type": "str"}, - "field_name": {"key": "fieldName", "type": "str"}, - "boost": {"key": "boost", "type": "float"}, - "interpolation": {"key": "interpolation", "type": "str"}, - "parameters": {"key": "distance", "type": "DistanceScoringParameters"}, - } - - def __init__( - self, - *, - field_name: str, - boost: float, - parameters: "_models.DistanceScoringParameters", - interpolation: Optional[Union[str, "_models.ScoringFunctionInterpolation"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword field_name: The name of the field used as input to the scoring function. Required. - :paramtype field_name: str - :keyword boost: A multiplier for the raw score. Must be a positive number not equal to 1.0. - Required. - :paramtype boost: float - :keyword interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Known values are: "linear", "constant", "quadratic", and - "logarithmic". - :paramtype interpolation: str or - ~azure.search.documents.indexes.models.ScoringFunctionInterpolation - :keyword parameters: Parameter values for the distance scoring function. Required. - :paramtype parameters: ~azure.search.documents.indexes.models.DistanceScoringParameters - """ - super().__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) - self.type: str = "distance" - self.parameters = parameters - - -class DistanceScoringParameters(_serialization.Model): - """Provides parameter values to a distance scoring function. - - All required parameters must be populated in order to send to server. - - :ivar reference_point_parameter: The name of the parameter passed in search queries to specify - the reference location. Required. - :vartype reference_point_parameter: str - :ivar boosting_distance: The distance in kilometers from the reference location where the - boosting range ends. Required. - :vartype boosting_distance: float - """ - - _validation = { - "reference_point_parameter": {"required": True}, - "boosting_distance": {"required": True}, - } - - _attribute_map = { - "reference_point_parameter": {"key": "referencePointParameter", "type": "str"}, - "boosting_distance": {"key": "boostingDistance", "type": "float"}, - } - - def __init__(self, *, reference_point_parameter: str, boosting_distance: float, **kwargs: Any) -> None: - """ - :keyword reference_point_parameter: The name of the parameter passed in search queries to - specify the reference location. Required. - :paramtype reference_point_parameter: str - :keyword boosting_distance: The distance in kilometers from the reference location where the - boosting range ends. Required. - :paramtype boosting_distance: float - """ - super().__init__(**kwargs) - self.reference_point_parameter = reference_point_parameter - self.boosting_distance = boosting_distance - - -class DocumentExtractionSkill(SearchIndexerSkill): - """A skill that extracts content from a file within the enrichment pipeline. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar parsing_mode: The parsingMode for the skill. Will be set to 'default' if not defined. - :vartype parsing_mode: str - :ivar data_to_extract: The type of data to be extracted for the skill. Will be set to - 'contentAndMetadata' if not defined. - :vartype data_to_extract: str - :ivar configuration: A dictionary of configurations for the skill. - :vartype configuration: dict[str, any] - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "parsing_mode": {"key": "parsingMode", "type": "str"}, - "data_to_extract": {"key": "dataToExtract", "type": "str"}, - "configuration": {"key": "configuration", "type": "{object}"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - parsing_mode: Optional[str] = None, - data_to_extract: Optional[str] = None, - configuration: Optional[Dict[str, Any]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword parsing_mode: The parsingMode for the skill. Will be set to 'default' if not defined. - :paramtype parsing_mode: str - :keyword data_to_extract: The type of data to be extracted for the skill. Will be set to - 'contentAndMetadata' if not defined. - :paramtype data_to_extract: str - :keyword configuration: A dictionary of configurations for the skill. - :paramtype configuration: dict[str, any] - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Util.DocumentExtractionSkill" - self.parsing_mode = parsing_mode - self.data_to_extract = data_to_extract - self.configuration = configuration - - -class DocumentIntelligenceLayoutSkill(SearchIndexerSkill): - """A skill that extracts content and layout information (as markdown), via Azure AI Services, from - files within the enrichment pipeline. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar output_mode: Controls the cardinality of the output produced by the skill. Default is - 'oneToMany'. "oneToMany" - :vartype output_mode: str or - ~azure.search.documents.indexes.models.DocumentIntelligenceLayoutSkillOutputMode - :ivar markdown_header_depth: The depth of headers in the markdown output. Default is h6. Known - values are: "h1", "h2", "h3", "h4", "h5", and "h6". - :vartype markdown_header_depth: str or - ~azure.search.documents.indexes.models.DocumentIntelligenceLayoutSkillMarkdownHeaderDepth - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "output_mode": {"key": "outputMode", "type": "str"}, - "markdown_header_depth": {"key": "markdownHeaderDepth", "type": "str"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - output_mode: Union[str, "_models.DocumentIntelligenceLayoutSkillOutputMode"] = "oneToMany", - markdown_header_depth: Union[str, "_models.DocumentIntelligenceLayoutSkillMarkdownHeaderDepth"] = "h6", - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword output_mode: Controls the cardinality of the output produced by the skill. Default is - 'oneToMany'. "oneToMany" - :paramtype output_mode: str or - ~azure.search.documents.indexes.models.DocumentIntelligenceLayoutSkillOutputMode - :keyword markdown_header_depth: The depth of headers in the markdown output. Default is h6. - Known values are: "h1", "h2", "h3", "h4", "h5", and "h6". - :paramtype markdown_header_depth: str or - ~azure.search.documents.indexes.models.DocumentIntelligenceLayoutSkillMarkdownHeaderDepth - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill" - self.output_mode = output_mode - self.markdown_header_depth = markdown_header_depth - - -class DocumentKeysOrIds(_serialization.Model): - """DocumentKeysOrIds. - - :ivar document_keys: document keys to be reset. - :vartype document_keys: list[str] - :ivar datasource_document_ids: datasource document identifiers to be reset. - :vartype datasource_document_ids: list[str] - """ - - _attribute_map = { - "document_keys": {"key": "documentKeys", "type": "[str]"}, - "datasource_document_ids": {"key": "datasourceDocumentIds", "type": "[str]"}, - } - - def __init__( - self, - *, - document_keys: Optional[List[str]] = None, - datasource_document_ids: Optional[List[str]] = None, - **kwargs: Any - ) -> None: - """ - :keyword document_keys: document keys to be reset. - :paramtype document_keys: list[str] - :keyword datasource_document_ids: datasource document identifiers to be reset. - :paramtype datasource_document_ids: list[str] - """ - super().__init__(**kwargs) - self.document_keys = document_keys - self.datasource_document_ids = datasource_document_ids - - -class EdgeNGramTokenFilter(TokenFilter): - """Generates n-grams of the given size(s) starting from the front or the back of an input token. - This token filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar min_gram: The minimum n-gram length. Default is 1. Must be less than the value of - maxGram. - :vartype min_gram: int - :ivar max_gram: The maximum n-gram length. Default is 2. - :vartype max_gram: int - :ivar side: Specifies which side of the input the n-gram should be generated from. Default is - "front". Known values are: "front" and "back". - :vartype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "min_gram": {"key": "minGram", "type": "int"}, - "max_gram": {"key": "maxGram", "type": "int"}, - "side": {"key": "side", "type": "str"}, - } - - def __init__( - self, - *, - name: str, - min_gram: int = 1, - max_gram: int = 2, - side: Optional[Union[str, "_models.EdgeNGramTokenFilterSide"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of - maxGram. - :paramtype min_gram: int - :keyword max_gram: The maximum n-gram length. Default is 2. - :paramtype max_gram: int - :keyword side: Specifies which side of the input the n-gram should be generated from. Default - is "front". Known values are: "front" and "back". - :paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.EdgeNGramTokenFilter" - self.min_gram = min_gram - self.max_gram = max_gram - self.side = side - - -class EdgeNGramTokenFilterV2(TokenFilter): - """Generates n-grams of the given size(s) starting from the front or the back of an input token. - This token filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the - value of maxGram. - :vartype min_gram: int - :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300. - :vartype max_gram: int - :ivar side: Specifies which side of the input the n-gram should be generated from. Default is - "front". Known values are: "front" and "back". - :vartype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "min_gram": {"maximum": 300}, - "max_gram": {"maximum": 300}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "min_gram": {"key": "minGram", "type": "int"}, - "max_gram": {"key": "maxGram", "type": "int"}, - "side": {"key": "side", "type": "str"}, - } - - def __init__( - self, - *, - name: str, - min_gram: int = 1, - max_gram: int = 2, - side: Optional[Union[str, "_models.EdgeNGramTokenFilterSide"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than - the value of maxGram. - :paramtype min_gram: int - :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. - :paramtype max_gram: int - :keyword side: Specifies which side of the input the n-gram should be generated from. Default - is "front". Known values are: "front" and "back". - :paramtype side: str or ~azure.search.documents.indexes.models.EdgeNGramTokenFilterSide - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.EdgeNGramTokenFilterV2" - self.min_gram = min_gram - self.max_gram = max_gram - self.side = side - - -class EdgeNGramTokenizer(LexicalTokenizer): - """Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is - implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. - :vartype odata_type: str - :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the - value of maxGram. - :vartype min_gram: int - :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300. - :vartype max_gram: int - :ivar token_chars: Character classes to keep in the tokens. - :vartype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "min_gram": {"maximum": 300}, - "max_gram": {"maximum": 300}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "min_gram": {"key": "minGram", "type": "int"}, - "max_gram": {"key": "maxGram", "type": "int"}, - "token_chars": {"key": "tokenChars", "type": "[str]"}, - } - - def __init__( - self, - *, - name: str, - min_gram: int = 1, - max_gram: int = 2, - token_chars: Optional[List[Union[str, "_models.TokenCharacterKind"]]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :paramtype name: str - :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than - the value of maxGram. - :paramtype min_gram: int - :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. - :paramtype max_gram: int - :keyword token_chars: Character classes to keep in the tokens. - :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.EdgeNGramTokenizer" - self.min_gram = min_gram - self.max_gram = max_gram - self.token_chars = token_chars - - -class ElisionTokenFilter(TokenFilter): - """Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). This - token filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar articles: The set of articles to remove. - :vartype articles: list[str] - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "articles": {"key": "articles", "type": "[str]"}, - } - - def __init__(self, *, name: str, articles: Optional[List[str]] = None, **kwargs: Any) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword articles: The set of articles to remove. - :paramtype articles: list[str] - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.ElisionTokenFilter" - self.articles = articles - - -class EntityLinkingSkill(SearchIndexerSkill): - """Using the Text Analytics API, extracts linked entities from text. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar default_language_code: A value indicating which language code to use. Default is ``en``. - :vartype default_language_code: str - :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose - confidence score is greater than the value specified. If not set (default), or if explicitly - set to null, all entities will be included. - :vartype minimum_precision: float - :ivar model_version: The version of the model to use when calling the Text Analytics service. - It will default to the latest available when not specified. We recommend you do not specify - this value unless absolutely necessary. - :vartype model_version: str - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - "minimum_precision": {"maximum": 1, "minimum": 0}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "default_language_code": {"key": "defaultLanguageCode", "type": "str"}, - "minimum_precision": {"key": "minimumPrecision", "type": "float"}, - "model_version": {"key": "modelVersion", "type": "str"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - default_language_code: Optional[str] = None, - minimum_precision: Optional[float] = None, - model_version: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is - ``en``. - :paramtype default_language_code: str - :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose - confidence score is greater than the value specified. If not set (default), or if explicitly - set to null, all entities will be included. - :paramtype minimum_precision: float - :keyword model_version: The version of the model to use when calling the Text Analytics - service. It will default to the latest available when not specified. We recommend you do not - specify this value unless absolutely necessary. - :paramtype model_version: str - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Text.V3.EntityLinkingSkill" - self.default_language_code = default_language_code - self.minimum_precision = minimum_precision - self.model_version = model_version - - -class EntityRecognitionSkill(SearchIndexerSkill): - """This skill is deprecated. Use the V3.EntityRecognitionSkill instead. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar categories: A list of entity categories that should be extracted. - :vartype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory] - :ivar default_language_code: A value indicating which language code to use. Default is ``en``. - Known values are: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de", "el", - "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", and "tr". - :vartype default_language_code: str or - ~azure.search.documents.indexes.models.EntityRecognitionSkillLanguage - :ivar include_typeless_entities: Determines whether or not to include entities which are well - known but don't conform to a pre-defined type. If this configuration is not set (default), set - to null or set to false, entities which don't conform to one of the pre-defined types will not - be surfaced. - :vartype include_typeless_entities: bool - :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose - confidence score is greater than the value specified. If not set (default), or if explicitly - set to null, all entities will be included. - :vartype minimum_precision: float - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "categories": {"key": "categories", "type": "[str]"}, - "default_language_code": {"key": "defaultLanguageCode", "type": "str"}, - "include_typeless_entities": {"key": "includeTypelessEntities", "type": "bool"}, - "minimum_precision": {"key": "minimumPrecision", "type": "float"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - categories: Optional[List[Union[str, "_models.EntityCategory"]]] = None, - default_language_code: Optional[Union[str, "_models.EntityRecognitionSkillLanguage"]] = None, - include_typeless_entities: Optional[bool] = None, - minimum_precision: Optional[float] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword categories: A list of entity categories that should be extracted. - :paramtype categories: list[str or ~azure.search.documents.indexes.models.EntityCategory] - :keyword default_language_code: A value indicating which language code to use. Default is - ``en``. Known values are: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de", - "el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", and "tr". - :paramtype default_language_code: str or - ~azure.search.documents.indexes.models.EntityRecognitionSkillLanguage - :keyword include_typeless_entities: Determines whether or not to include entities which are - well known but don't conform to a pre-defined type. If this configuration is not set (default), - set to null or set to false, entities which don't conform to one of the pre-defined types will - not be surfaced. - :paramtype include_typeless_entities: bool - :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose - confidence score is greater than the value specified. If not set (default), or if explicitly - set to null, all entities will be included. - :paramtype minimum_precision: float - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Text.EntityRecognitionSkill" - self.categories = categories - self.default_language_code = default_language_code - self.include_typeless_entities = include_typeless_entities - self.minimum_precision = minimum_precision - - -class EntityRecognitionSkillV3(SearchIndexerSkill): - """Using the Text Analytics API, extracts entities of different types from text. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar categories: A list of entity categories that should be extracted. - :vartype categories: list[str] - :ivar default_language_code: A value indicating which language code to use. Default is ``en``. - :vartype default_language_code: str - :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose - confidence score is greater than the value specified. If not set (default), or if explicitly - set to null, all entities will be included. - :vartype minimum_precision: float - :ivar model_version: The version of the model to use when calling the Text Analytics API. It - will default to the latest available when not specified. We recommend you do not specify this - value unless absolutely necessary. - :vartype model_version: str - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - "minimum_precision": {"maximum": 1, "minimum": 0}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "categories": {"key": "categories", "type": "[str]"}, - "default_language_code": {"key": "defaultLanguageCode", "type": "str"}, - "minimum_precision": {"key": "minimumPrecision", "type": "float"}, - "model_version": {"key": "modelVersion", "type": "str"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - categories: Optional[List[str]] = None, - default_language_code: Optional[str] = None, - minimum_precision: Optional[float] = None, - model_version: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword categories: A list of entity categories that should be extracted. - :paramtype categories: list[str] - :keyword default_language_code: A value indicating which language code to use. Default is - ``en``. - :paramtype default_language_code: str - :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose - confidence score is greater than the value specified. If not set (default), or if explicitly - set to null, all entities will be included. - :paramtype minimum_precision: float - :keyword model_version: The version of the model to use when calling the Text Analytics API. It - will default to the latest available when not specified. We recommend you do not specify this - value unless absolutely necessary. - :paramtype model_version: str - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Text.V3.EntityRecognitionSkill" - self.categories = categories - self.default_language_code = default_language_code - self.minimum_precision = minimum_precision - self.model_version = model_version - - -class ErrorAdditionalInfo(_serialization.Model): - """The resource management error additional info. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar type: The additional info type. - :vartype type: str - :ivar info: The additional info. - :vartype info: JSON - """ - - _validation = { - "type": {"readonly": True}, - "info": {"readonly": True}, - } - - _attribute_map = { - "type": {"key": "type", "type": "str"}, - "info": {"key": "info", "type": "object"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.type = None - self.info = None - - -class ErrorDetail(_serialization.Model): - """The error detail. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar code: The error code. - :vartype code: str - :ivar message: The error message. - :vartype message: str - :ivar target: The error target. - :vartype target: str - :ivar details: The error details. - :vartype details: list[~azure.search.documents.indexes.models.ErrorDetail] - :ivar additional_info: The error additional info. - :vartype additional_info: list[~azure.search.documents.indexes.models.ErrorAdditionalInfo] - """ - - _validation = { - "code": {"readonly": True}, - "message": {"readonly": True}, - "target": {"readonly": True}, - "details": {"readonly": True}, - "additional_info": {"readonly": True}, - } - - _attribute_map = { - "code": {"key": "code", "type": "str"}, - "message": {"key": "message", "type": "str"}, - "target": {"key": "target", "type": "str"}, - "details": {"key": "details", "type": "[ErrorDetail]"}, - "additional_info": {"key": "additionalInfo", "type": "[ErrorAdditionalInfo]"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.code = None - self.message = None - self.target = None - self.details = None - self.additional_info = None - - -class ErrorResponse(_serialization.Model): - """Common error response for all Azure Resource Manager APIs to return error details for failed - operations. (This also follows the OData error response format.). - - :ivar error: The error object. - :vartype error: ~azure.search.documents.indexes.models.ErrorDetail - """ - - _attribute_map = { - "error": {"key": "error", "type": "ErrorDetail"}, - } - - def __init__(self, *, error: Optional["_models.ErrorDetail"] = None, **kwargs: Any) -> None: - """ - :keyword error: The error object. - :paramtype error: ~azure.search.documents.indexes.models.ErrorDetail - """ - super().__init__(**kwargs) - self.error = error - - -class VectorSearchAlgorithmConfiguration(_serialization.Model): - """Contains configuration options specific to the algorithm used during indexing or querying. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - ExhaustiveKnnAlgorithmConfiguration, HnswAlgorithmConfiguration - - All required parameters must be populated in order to send to server. - - :ivar name: The name to associate with this particular configuration. Required. - :vartype name: str - :ivar kind: The name of the kind of algorithm being configured for use with vector search. - Required. Known values are: "hnsw" and "exhaustiveKnn". - :vartype kind: str or ~azure.search.documents.indexes.models.VectorSearchAlgorithmKind - """ - - _validation = { - "name": {"required": True}, - "kind": {"required": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "kind": {"key": "kind", "type": "str"}, - } - - _subtype_map = { - "kind": {"exhaustiveKnn": "ExhaustiveKnnAlgorithmConfiguration", "hnsw": "HnswAlgorithmConfiguration"} - } - - def __init__(self, *, name: str, **kwargs: Any) -> None: - """ - :keyword name: The name to associate with this particular configuration. Required. - :paramtype name: str - """ - super().__init__(**kwargs) - self.name = name - self.kind: Optional[str] = None - - -class ExhaustiveKnnAlgorithmConfiguration(VectorSearchAlgorithmConfiguration): - """Contains configuration options specific to the exhaustive KNN algorithm used during querying, - which will perform brute-force search across the entire vector index. - - All required parameters must be populated in order to send to server. - - :ivar name: The name to associate with this particular configuration. Required. - :vartype name: str - :ivar kind: The name of the kind of algorithm being configured for use with vector search. - Required. Known values are: "hnsw" and "exhaustiveKnn". - :vartype kind: str or ~azure.search.documents.indexes.models.VectorSearchAlgorithmKind - :ivar parameters: Contains the parameters specific to exhaustive KNN algorithm. - :vartype parameters: ~azure.search.documents.indexes.models.ExhaustiveKnnParameters - """ - - _validation = { - "name": {"required": True}, - "kind": {"required": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "kind": {"key": "kind", "type": "str"}, - "parameters": {"key": "exhaustiveKnnParameters", "type": "ExhaustiveKnnParameters"}, - } - - def __init__( - self, *, name: str, parameters: Optional["_models.ExhaustiveKnnParameters"] = None, **kwargs: Any - ) -> None: - """ - :keyword name: The name to associate with this particular configuration. Required. - :paramtype name: str - :keyword parameters: Contains the parameters specific to exhaustive KNN algorithm. - :paramtype parameters: ~azure.search.documents.indexes.models.ExhaustiveKnnParameters - """ - super().__init__(name=name, **kwargs) - self.kind: str = "exhaustiveKnn" - self.parameters = parameters - - -class ExhaustiveKnnParameters(_serialization.Model): - """Contains the parameters specific to exhaustive KNN algorithm. - - :ivar metric: The similarity metric to use for vector comparisons. Known values are: "cosine", - "euclidean", "dotProduct", and "hamming". - :vartype metric: str or ~azure.search.documents.indexes.models.VectorSearchAlgorithmMetric - """ - - _attribute_map = { - "metric": {"key": "metric", "type": "str"}, - } - - def __init__( - self, *, metric: Optional[Union[str, "_models.VectorSearchAlgorithmMetric"]] = None, **kwargs: Any - ) -> None: - """ - :keyword metric: The similarity metric to use for vector comparisons. Known values are: - "cosine", "euclidean", "dotProduct", and "hamming". - :paramtype metric: str or ~azure.search.documents.indexes.models.VectorSearchAlgorithmMetric - """ - super().__init__(**kwargs) - self.metric = metric - - -class FieldMapping(_serialization.Model): - """Defines a mapping between a field in a data source and a target field in an index. - - All required parameters must be populated in order to send to server. - - :ivar source_field_name: The name of the field in the data source. Required. - :vartype source_field_name: str - :ivar target_field_name: The name of the target field in the index. Same as the source field - name by default. - :vartype target_field_name: str - :ivar mapping_function: A function to apply to each source field value before indexing. - :vartype mapping_function: ~azure.search.documents.indexes.models.FieldMappingFunction - """ - - _validation = { - "source_field_name": {"required": True}, - } - - _attribute_map = { - "source_field_name": {"key": "sourceFieldName", "type": "str"}, - "target_field_name": {"key": "targetFieldName", "type": "str"}, - "mapping_function": {"key": "mappingFunction", "type": "FieldMappingFunction"}, - } - - def __init__( - self, - *, - source_field_name: str, - target_field_name: Optional[str] = None, - mapping_function: Optional["_models.FieldMappingFunction"] = None, - **kwargs: Any - ) -> None: - """ - :keyword source_field_name: The name of the field in the data source. Required. - :paramtype source_field_name: str - :keyword target_field_name: The name of the target field in the index. Same as the source field - name by default. - :paramtype target_field_name: str - :keyword mapping_function: A function to apply to each source field value before indexing. - :paramtype mapping_function: ~azure.search.documents.indexes.models.FieldMappingFunction - """ - super().__init__(**kwargs) - self.source_field_name = source_field_name - self.target_field_name = target_field_name - self.mapping_function = mapping_function - - -class FieldMappingFunction(_serialization.Model): - """Represents a function that transforms a value from a data source before indexing. - - All required parameters must be populated in order to send to server. - - :ivar name: The name of the field mapping function. Required. - :vartype name: str - :ivar parameters: A dictionary of parameter name/value pairs to pass to the function. Each - value must be of a primitive type. - :vartype parameters: dict[str, any] - """ - - _validation = { - "name": {"required": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "parameters": {"key": "parameters", "type": "{object}"}, - } - - def __init__(self, *, name: str, parameters: Optional[Dict[str, Any]] = None, **kwargs: Any) -> None: - """ - :keyword name: The name of the field mapping function. Required. - :paramtype name: str - :keyword parameters: A dictionary of parameter name/value pairs to pass to the function. Each - value must be of a primitive type. - :paramtype parameters: dict[str, any] - """ - super().__init__(**kwargs) - self.name = name - self.parameters = parameters - - -class FreshnessScoringFunction(ScoringFunction): - """Defines a function that boosts scores based on the value of a date-time field. - - All required parameters must be populated in order to send to server. - - :ivar type: Indicates the type of function to use. Valid values include magnitude, freshness, - distance, and tag. The function type must be lower case. Required. - :vartype type: str - :ivar field_name: The name of the field used as input to the scoring function. Required. - :vartype field_name: str - :ivar boost: A multiplier for the raw score. Must be a positive number not equal to 1.0. - Required. - :vartype boost: float - :ivar interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Known values are: "linear", "constant", "quadratic", and - "logarithmic". - :vartype interpolation: str or - ~azure.search.documents.indexes.models.ScoringFunctionInterpolation - :ivar parameters: Parameter values for the freshness scoring function. Required. - :vartype parameters: ~azure.search.documents.indexes.models.FreshnessScoringParameters - """ - - _validation = { - "type": {"required": True}, - "field_name": {"required": True}, - "boost": {"required": True}, - "parameters": {"required": True}, - } - - _attribute_map = { - "type": {"key": "type", "type": "str"}, - "field_name": {"key": "fieldName", "type": "str"}, - "boost": {"key": "boost", "type": "float"}, - "interpolation": {"key": "interpolation", "type": "str"}, - "parameters": {"key": "freshness", "type": "FreshnessScoringParameters"}, - } - - def __init__( - self, - *, - field_name: str, - boost: float, - parameters: "_models.FreshnessScoringParameters", - interpolation: Optional[Union[str, "_models.ScoringFunctionInterpolation"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword field_name: The name of the field used as input to the scoring function. Required. - :paramtype field_name: str - :keyword boost: A multiplier for the raw score. Must be a positive number not equal to 1.0. - Required. - :paramtype boost: float - :keyword interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Known values are: "linear", "constant", "quadratic", and - "logarithmic". - :paramtype interpolation: str or - ~azure.search.documents.indexes.models.ScoringFunctionInterpolation - :keyword parameters: Parameter values for the freshness scoring function. Required. - :paramtype parameters: ~azure.search.documents.indexes.models.FreshnessScoringParameters - """ - super().__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) - self.type: str = "freshness" - self.parameters = parameters - - -class FreshnessScoringParameters(_serialization.Model): - """Provides parameter values to a freshness scoring function. - - All required parameters must be populated in order to send to server. - - :ivar boosting_duration: The expiration period after which boosting will stop for a particular - document. Required. - :vartype boosting_duration: ~datetime.timedelta - """ - - _validation = { - "boosting_duration": {"required": True}, - } - - _attribute_map = { - "boosting_duration": {"key": "boostingDuration", "type": "duration"}, - } - - def __init__(self, *, boosting_duration: datetime.timedelta, **kwargs: Any) -> None: - """ - :keyword boosting_duration: The expiration period after which boosting will stop for a - particular document. Required. - :paramtype boosting_duration: ~datetime.timedelta - """ - super().__init__(**kwargs) - self.boosting_duration = boosting_duration - - -class GetIndexStatisticsResult(_serialization.Model): - """Statistics for a given index. Statistics are collected periodically and are not guaranteed to - always be up-to-date. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar document_count: The number of documents in the index. Required. - :vartype document_count: int - :ivar storage_size: The amount of storage in bytes consumed by the index. Required. - :vartype storage_size: int - :ivar vector_index_size: The amount of memory in bytes consumed by vectors in the index. - Required. - :vartype vector_index_size: int - """ - - _validation = { - "document_count": {"required": True, "readonly": True}, - "storage_size": {"required": True, "readonly": True}, - "vector_index_size": {"required": True, "readonly": True}, - } - - _attribute_map = { - "document_count": {"key": "documentCount", "type": "int"}, - "storage_size": {"key": "storageSize", "type": "int"}, - "vector_index_size": {"key": "vectorIndexSize", "type": "int"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.document_count = None - self.storage_size = None - self.vector_index_size = None - - -class HighWaterMarkChangeDetectionPolicy(DataChangeDetectionPolicy): - """Defines a data change detection policy that captures changes based on the value of a high water - mark column. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of data change detection policy. Required. - :vartype odata_type: str - :ivar high_water_mark_column_name: The name of the high water mark column. Required. - :vartype high_water_mark_column_name: str - """ - - _validation = { - "odata_type": {"required": True}, - "high_water_mark_column_name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "high_water_mark_column_name": {"key": "highWaterMarkColumnName", "type": "str"}, - } - - def __init__(self, *, high_water_mark_column_name: str, **kwargs: Any) -> None: - """ - :keyword high_water_mark_column_name: The name of the high water mark column. Required. - :paramtype high_water_mark_column_name: str - """ - super().__init__(**kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy" - self.high_water_mark_column_name = high_water_mark_column_name - - -class HnswAlgorithmConfiguration(VectorSearchAlgorithmConfiguration): - """Contains configuration options specific to the HNSW approximate nearest neighbors algorithm - used during indexing and querying. The HNSW algorithm offers a tunable trade-off between search - speed and accuracy. - - All required parameters must be populated in order to send to server. - - :ivar name: The name to associate with this particular configuration. Required. - :vartype name: str - :ivar kind: The name of the kind of algorithm being configured for use with vector search. - Required. Known values are: "hnsw" and "exhaustiveKnn". - :vartype kind: str or ~azure.search.documents.indexes.models.VectorSearchAlgorithmKind - :ivar parameters: Contains the parameters specific to HNSW algorithm. - :vartype parameters: ~azure.search.documents.indexes.models.HnswParameters - """ - - _validation = { - "name": {"required": True}, - "kind": {"required": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "kind": {"key": "kind", "type": "str"}, - "parameters": {"key": "hnswParameters", "type": "HnswParameters"}, - } - - def __init__(self, *, name: str, parameters: Optional["_models.HnswParameters"] = None, **kwargs: Any) -> None: - """ - :keyword name: The name to associate with this particular configuration. Required. - :paramtype name: str - :keyword parameters: Contains the parameters specific to HNSW algorithm. - :paramtype parameters: ~azure.search.documents.indexes.models.HnswParameters - """ - super().__init__(name=name, **kwargs) - self.kind: str = "hnsw" - self.parameters = parameters - - -class HnswParameters(_serialization.Model): - """Contains the parameters specific to the HNSW algorithm. - - :ivar m: The number of bi-directional links created for every new element during construction. - Increasing this parameter value may improve recall and reduce retrieval times for datasets with - high intrinsic dimensionality at the expense of increased memory consumption and longer - indexing time. - :vartype m: int - :ivar ef_construction: The size of the dynamic list containing the nearest neighbors, which is - used during index time. Increasing this parameter may improve index quality, at the expense of - increased indexing time. At a certain point, increasing this parameter leads to diminishing - returns. - :vartype ef_construction: int - :ivar ef_search: The size of the dynamic list containing the nearest neighbors, which is used - during search time. Increasing this parameter may improve search results, at the expense of - slower search. At a certain point, increasing this parameter leads to diminishing returns. - :vartype ef_search: int - :ivar metric: The similarity metric to use for vector comparisons. Known values are: "cosine", - "euclidean", "dotProduct", and "hamming". - :vartype metric: str or ~azure.search.documents.indexes.models.VectorSearchAlgorithmMetric - """ - - _validation = { - "m": {"maximum": 10, "minimum": 4}, - "ef_construction": {"maximum": 1000, "minimum": 100}, - "ef_search": {"maximum": 1000, "minimum": 100}, - } - - _attribute_map = { - "m": {"key": "m", "type": "int"}, - "ef_construction": {"key": "efConstruction", "type": "int"}, - "ef_search": {"key": "efSearch", "type": "int"}, - "metric": {"key": "metric", "type": "str"}, - } - - def __init__( - self, - *, - m: int = 4, - ef_construction: int = 400, - ef_search: int = 500, - metric: Optional[Union[str, "_models.VectorSearchAlgorithmMetric"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword m: The number of bi-directional links created for every new element during - construction. Increasing this parameter value may improve recall and reduce retrieval times for - datasets with high intrinsic dimensionality at the expense of increased memory consumption and - longer indexing time. - :paramtype m: int - :keyword ef_construction: The size of the dynamic list containing the nearest neighbors, which - is used during index time. Increasing this parameter may improve index quality, at the expense - of increased indexing time. At a certain point, increasing this parameter leads to diminishing - returns. - :paramtype ef_construction: int - :keyword ef_search: The size of the dynamic list containing the nearest neighbors, which is - used during search time. Increasing this parameter may improve search results, at the expense - of slower search. At a certain point, increasing this parameter leads to diminishing returns. - :paramtype ef_search: int - :keyword metric: The similarity metric to use for vector comparisons. Known values are: - "cosine", "euclidean", "dotProduct", and "hamming". - :paramtype metric: str or ~azure.search.documents.indexes.models.VectorSearchAlgorithmMetric - """ - super().__init__(**kwargs) - self.m = m - self.ef_construction = ef_construction - self.ef_search = ef_search - self.metric = metric - - -class ImageAnalysisSkill(SearchIndexerSkill): - """A skill that analyzes image files. It extracts a rich set of visual features based on the image - content. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar default_language_code: A value indicating which language code to use. Default is ``en``. - Known values are: "ar", "az", "bg", "bs", "ca", "cs", "cy", "da", "de", "el", "en", "es", "et", - "eu", "fi", "fr", "ga", "gl", "he", "hi", "hr", "hu", "id", "it", "ja", "kk", "ko", "lt", "lv", - "mk", "ms", "nb", "nl", "pl", "prs", "pt-BR", "pt", "pt-PT", "ro", "ru", "sk", "sl", "sr-Cyrl", - "sr-Latn", "sv", "th", "tr", "uk", "vi", "zh", "zh-Hans", and "zh-Hant". - :vartype default_language_code: str or - ~azure.search.documents.indexes.models.ImageAnalysisSkillLanguage - :ivar visual_features: A list of visual features. - :vartype visual_features: list[str or ~azure.search.documents.indexes.models.VisualFeature] - :ivar details: A string indicating which domain-specific details to return. - :vartype details: list[str or ~azure.search.documents.indexes.models.ImageDetail] - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "default_language_code": {"key": "defaultLanguageCode", "type": "str"}, - "visual_features": {"key": "visualFeatures", "type": "[str]"}, - "details": {"key": "details", "type": "[str]"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - default_language_code: Optional[Union[str, "_models.ImageAnalysisSkillLanguage"]] = None, - visual_features: Optional[List[Union[str, "_models.VisualFeature"]]] = None, - details: Optional[List[Union[str, "_models.ImageDetail"]]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is - ``en``. Known values are: "ar", "az", "bg", "bs", "ca", "cs", "cy", "da", "de", "el", "en", - "es", "et", "eu", "fi", "fr", "ga", "gl", "he", "hi", "hr", "hu", "id", "it", "ja", "kk", "ko", - "lt", "lv", "mk", "ms", "nb", "nl", "pl", "prs", "pt-BR", "pt", "pt-PT", "ro", "ru", "sk", - "sl", "sr-Cyrl", "sr-Latn", "sv", "th", "tr", "uk", "vi", "zh", "zh-Hans", and "zh-Hant". - :paramtype default_language_code: str or - ~azure.search.documents.indexes.models.ImageAnalysisSkillLanguage - :keyword visual_features: A list of visual features. - :paramtype visual_features: list[str or ~azure.search.documents.indexes.models.VisualFeature] - :keyword details: A string indicating which domain-specific details to return. - :paramtype details: list[str or ~azure.search.documents.indexes.models.ImageDetail] - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Vision.ImageAnalysisSkill" - self.default_language_code = default_language_code - self.visual_features = visual_features - self.details = details - - -class IndexerCurrentState(_serialization.Model): - """Represents all of the state that defines and dictates the indexer's current execution. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar mode: The mode the indexer is running in. Known values are: "indexingAllDocs" and - "indexingResetDocs". - :vartype mode: str or ~azure.search.documents.indexes.models.IndexingMode - :ivar all_docs_initial_change_tracking_state: Change tracking state used when indexing starts - on all documents in the datasource. - :vartype all_docs_initial_change_tracking_state: str - :ivar all_docs_final_change_tracking_state: Change tracking state value when indexing finishes - on all documents in the datasource. - :vartype all_docs_final_change_tracking_state: str - :ivar reset_docs_initial_change_tracking_state: Change tracking state used when indexing starts - on select, reset documents in the datasource. - :vartype reset_docs_initial_change_tracking_state: str - :ivar reset_docs_final_change_tracking_state: Change tracking state value when indexing - finishes on select, reset documents in the datasource. - :vartype reset_docs_final_change_tracking_state: str - :ivar reset_document_keys: The list of document keys that have been reset. The document key is - the document's unique identifier for the data in the search index. The indexer will prioritize - selectively re-ingesting these keys. - :vartype reset_document_keys: list[str] - :ivar reset_datasource_document_ids: The list of datasource document ids that have been reset. - The datasource document id is the unique identifier for the data in the datasource. The indexer - will prioritize selectively re-ingesting these ids. - :vartype reset_datasource_document_ids: list[str] - """ - - _validation = { - "mode": {"readonly": True}, - "all_docs_initial_change_tracking_state": {"readonly": True}, - "all_docs_final_change_tracking_state": {"readonly": True}, - "reset_docs_initial_change_tracking_state": {"readonly": True}, - "reset_docs_final_change_tracking_state": {"readonly": True}, - "reset_document_keys": {"readonly": True}, - "reset_datasource_document_ids": {"readonly": True}, - } - - _attribute_map = { - "mode": {"key": "mode", "type": "str"}, - "all_docs_initial_change_tracking_state": {"key": "allDocsInitialChangeTrackingState", "type": "str"}, - "all_docs_final_change_tracking_state": {"key": "allDocsFinalChangeTrackingState", "type": "str"}, - "reset_docs_initial_change_tracking_state": {"key": "resetDocsInitialChangeTrackingState", "type": "str"}, - "reset_docs_final_change_tracking_state": {"key": "resetDocsFinalChangeTrackingState", "type": "str"}, - "reset_document_keys": {"key": "resetDocumentKeys", "type": "[str]"}, - "reset_datasource_document_ids": {"key": "resetDatasourceDocumentIds", "type": "[str]"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.mode = None - self.all_docs_initial_change_tracking_state = None - self.all_docs_final_change_tracking_state = None - self.reset_docs_initial_change_tracking_state = None - self.reset_docs_final_change_tracking_state = None - self.reset_document_keys = None - self.reset_datasource_document_ids = None - - -class IndexerExecutionResult(_serialization.Model): - """Represents the result of an individual indexer execution. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar status: The outcome of this indexer execution. Required. Known values are: - "transientFailure", "success", "inProgress", and "reset". - :vartype status: str or ~azure.search.documents.indexes.models.IndexerExecutionStatus - :ivar status_detail: The outcome of this indexer execution. "resetDocs" - :vartype status_detail: str or - ~azure.search.documents.indexes.models.IndexerExecutionStatusDetail - :ivar current_state: All of the state that defines and dictates the indexer's current - execution. - :vartype current_state: ~azure.search.documents.indexes.models.IndexerCurrentState - :ivar error_message: The error message indicating the top-level error, if any. - :vartype error_message: str - :ivar start_time: The start time of this indexer execution. - :vartype start_time: ~datetime.datetime - :ivar end_time: The end time of this indexer execution, if the execution has already completed. - :vartype end_time: ~datetime.datetime - :ivar errors: The item-level indexing errors. Required. - :vartype errors: list[~azure.search.documents.indexes.models.SearchIndexerError] - :ivar warnings: The item-level indexing warnings. Required. - :vartype warnings: list[~azure.search.documents.indexes.models.SearchIndexerWarning] - :ivar item_count: The number of items that were processed during this indexer execution. This - includes both successfully processed items and items where indexing was attempted but failed. - Required. - :vartype item_count: int - :ivar failed_item_count: The number of items that failed to be indexed during this indexer - execution. Required. - :vartype failed_item_count: int - :ivar initial_tracking_state: Change tracking state with which an indexer execution started. - :vartype initial_tracking_state: str - :ivar final_tracking_state: Change tracking state with which an indexer execution finished. - :vartype final_tracking_state: str - """ - - _validation = { - "status": {"required": True, "readonly": True}, - "status_detail": {"readonly": True}, - "current_state": {"readonly": True}, - "error_message": {"readonly": True}, - "start_time": {"readonly": True}, - "end_time": {"readonly": True}, - "errors": {"required": True, "readonly": True}, - "warnings": {"required": True, "readonly": True}, - "item_count": {"required": True, "readonly": True}, - "failed_item_count": {"required": True, "readonly": True}, - "initial_tracking_state": {"readonly": True}, - "final_tracking_state": {"readonly": True}, - } - - _attribute_map = { - "status": {"key": "status", "type": "str"}, - "status_detail": {"key": "statusDetail", "type": "str"}, - "current_state": {"key": "currentState", "type": "IndexerCurrentState"}, - "error_message": {"key": "errorMessage", "type": "str"}, - "start_time": {"key": "startTime", "type": "iso-8601"}, - "end_time": {"key": "endTime", "type": "iso-8601"}, - "errors": {"key": "errors", "type": "[SearchIndexerError]"}, - "warnings": {"key": "warnings", "type": "[SearchIndexerWarning]"}, - "item_count": {"key": "itemsProcessed", "type": "int"}, - "failed_item_count": {"key": "itemsFailed", "type": "int"}, - "initial_tracking_state": {"key": "initialTrackingState", "type": "str"}, - "final_tracking_state": {"key": "finalTrackingState", "type": "str"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.status = None - self.status_detail = None - self.current_state = None - self.error_message = None - self.start_time = None - self.end_time = None - self.errors = None - self.warnings = None - self.item_count = None - self.failed_item_count = None - self.initial_tracking_state = None - self.final_tracking_state = None - - -class IndexingParameters(_serialization.Model): - """Represents parameters for indexer execution. - - :ivar batch_size: The number of items that are read from the data source and indexed as a - single batch in order to improve performance. The default depends on the data source type. - :vartype batch_size: int - :ivar max_failed_items: The maximum number of items that can fail indexing for indexer - execution to still be considered successful. -1 means no limit. Default is 0. - :vartype max_failed_items: int - :ivar max_failed_items_per_batch: The maximum number of items in a single batch that can fail - indexing for the batch to still be considered successful. -1 means no limit. Default is 0. - :vartype max_failed_items_per_batch: int - :ivar configuration: A dictionary of indexer-specific configuration properties. Each name is - the name of a specific property. Each value must be of a primitive type. - :vartype configuration: ~azure.search.documents.indexes.models.IndexingParametersConfiguration - """ - - _attribute_map = { - "batch_size": {"key": "batchSize", "type": "int"}, - "max_failed_items": {"key": "maxFailedItems", "type": "int"}, - "max_failed_items_per_batch": {"key": "maxFailedItemsPerBatch", "type": "int"}, - "configuration": {"key": "configuration", "type": "IndexingParametersConfiguration"}, - } - - def __init__( - self, - *, - batch_size: Optional[int] = None, - max_failed_items: int = 0, - max_failed_items_per_batch: int = 0, - configuration: Optional["_models.IndexingParametersConfiguration"] = None, - **kwargs: Any - ) -> None: - """ - :keyword batch_size: The number of items that are read from the data source and indexed as a - single batch in order to improve performance. The default depends on the data source type. - :paramtype batch_size: int - :keyword max_failed_items: The maximum number of items that can fail indexing for indexer - execution to still be considered successful. -1 means no limit. Default is 0. - :paramtype max_failed_items: int - :keyword max_failed_items_per_batch: The maximum number of items in a single batch that can - fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0. - :paramtype max_failed_items_per_batch: int - :keyword configuration: A dictionary of indexer-specific configuration properties. Each name is - the name of a specific property. Each value must be of a primitive type. - :paramtype configuration: - ~azure.search.documents.indexes.models.IndexingParametersConfiguration - """ - super().__init__(**kwargs) - self.batch_size = batch_size - self.max_failed_items = max_failed_items - self.max_failed_items_per_batch = max_failed_items_per_batch - self.configuration = configuration - - -class IndexingParametersConfiguration(_serialization.Model): - """A dictionary of indexer-specific configuration properties. Each name is the name of a specific - property. Each value must be of a primitive type. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar parsing_mode: Represents the parsing mode for indexing from an Azure blob data source. - Known values are: "default", "text", "delimitedText", "json", "jsonArray", "jsonLines", and - "markdown". - :vartype parsing_mode: str or ~azure.search.documents.indexes.models.BlobIndexerParsingMode - :ivar excluded_file_name_extensions: Comma-delimited list of filename extensions to ignore when - processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip over - those files during indexing. - :vartype excluded_file_name_extensions: str - :ivar indexed_file_name_extensions: Comma-delimited list of filename extensions to select when - processing from Azure blob storage. For example, you could focus indexing on specific - application files ".docx, .pptx, .msg" to specifically include those file types. - :vartype indexed_file_name_extensions: str - :ivar fail_on_unsupported_content_type: For Azure blobs, set to false if you want to continue - indexing when an unsupported content type is encountered, and you don't know all the content - types (file extensions) in advance. - :vartype fail_on_unsupported_content_type: bool - :ivar fail_on_unprocessable_document: For Azure blobs, set to false if you want to continue - indexing if a document fails indexing. - :vartype fail_on_unprocessable_document: bool - :ivar index_storage_metadata_only_for_oversized_documents: For Azure blobs, set this property - to true to still index storage metadata for blob content that is too large to process. - Oversized blobs are treated as errors by default. For limits on blob size, see - https://learn.microsoft.com/azure/search/search-limits-quotas-capacity. - :vartype index_storage_metadata_only_for_oversized_documents: bool - :ivar delimited_text_headers: For CSV blobs, specifies a comma-delimited list of column - headers, useful for mapping source fields to destination fields in an index. - :vartype delimited_text_headers: str - :ivar delimited_text_delimiter: For CSV blobs, specifies the end-of-line single-character - delimiter for CSV files where each line starts a new document (for example, "|"). - :vartype delimited_text_delimiter: str - :ivar first_line_contains_headers: For CSV blobs, indicates that the first (non-blank) line of - each blob contains headers. - :vartype first_line_contains_headers: bool - :ivar markdown_parsing_submode: Specifies the submode that will determine whether a markdown - file will be parsed into exactly one search document or multiple search documents. Default is - ``oneToMany``. Known values are: "oneToMany" and "oneToOne". - :vartype markdown_parsing_submode: str or - ~azure.search.documents.indexes.models.MarkdownParsingSubmode - :ivar markdown_header_depth: Specifies the max header depth that will be considered while - grouping markdown content. Default is ``h6``. Known values are: "h1", "h2", "h3", "h4", "h5", - and "h6". - :vartype markdown_header_depth: str or - ~azure.search.documents.indexes.models.MarkdownHeaderDepth - :ivar document_root: For JSON arrays, given a structured or semi-structured document, you can - specify a path to the array using this property. - :vartype document_root: str - :ivar data_to_extract: Specifies the data to extract from Azure blob storage and tells the - indexer which data to extract from image content when "imageAction" is set to a value other - than "none". This applies to embedded image content in a .PDF or other application, or image - files such as .jpg and .png, in Azure blobs. Known values are: "storageMetadata", - "allMetadata", and "contentAndMetadata". - :vartype data_to_extract: str or - ~azure.search.documents.indexes.models.BlobIndexerDataToExtract - :ivar image_action: Determines how to process embedded images and image files in Azure blob - storage. Setting the "imageAction" configuration to any value other than "none" requires that - a skillset also be attached to that indexer. Known values are: "none", - "generateNormalizedImages", and "generateNormalizedImagePerPage". - :vartype image_action: str or ~azure.search.documents.indexes.models.BlobIndexerImageAction - :ivar allow_skillset_to_read_file_data: If true, will create a path //document//file_data that - is an object representing the original file data downloaded from your blob data source. This - allows you to pass the original file data to a custom skill for processing within the - enrichment pipeline, or to the Document Extraction skill. - :vartype allow_skillset_to_read_file_data: bool - :ivar pdf_text_rotation_algorithm: Determines algorithm for text extraction from PDF files in - Azure blob storage. Known values are: "none" and "detectAngles". - :vartype pdf_text_rotation_algorithm: str or - ~azure.search.documents.indexes.models.BlobIndexerPDFTextRotationAlgorithm - :ivar execution_environment: Specifies the environment in which the indexer should execute. - Known values are: "standard" and "private". - :vartype execution_environment: str or - ~azure.search.documents.indexes.models.IndexerExecutionEnvironment - :ivar query_timeout: Increases the timeout beyond the 5-minute default for Azure SQL database - data sources, specified in the format "hh:mm:ss". - :vartype query_timeout: str - """ - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "parsing_mode": {"key": "parsingMode", "type": "str"}, - "excluded_file_name_extensions": {"key": "excludedFileNameExtensions", "type": "str"}, - "indexed_file_name_extensions": {"key": "indexedFileNameExtensions", "type": "str"}, - "fail_on_unsupported_content_type": {"key": "failOnUnsupportedContentType", "type": "bool"}, - "fail_on_unprocessable_document": {"key": "failOnUnprocessableDocument", "type": "bool"}, - "index_storage_metadata_only_for_oversized_documents": { - "key": "indexStorageMetadataOnlyForOversizedDocuments", - "type": "bool", - }, - "delimited_text_headers": {"key": "delimitedTextHeaders", "type": "str"}, - "delimited_text_delimiter": {"key": "delimitedTextDelimiter", "type": "str"}, - "first_line_contains_headers": {"key": "firstLineContainsHeaders", "type": "bool"}, - "markdown_parsing_submode": {"key": "markdownParsingSubmode", "type": "str"}, - "markdown_header_depth": {"key": "markdownHeaderDepth", "type": "str"}, - "document_root": {"key": "documentRoot", "type": "str"}, - "data_to_extract": {"key": "dataToExtract", "type": "str"}, - "image_action": {"key": "imageAction", "type": "str"}, - "allow_skillset_to_read_file_data": {"key": "allowSkillsetToReadFileData", "type": "bool"}, - "pdf_text_rotation_algorithm": {"key": "pdfTextRotationAlgorithm", "type": "str"}, - "execution_environment": {"key": "executionEnvironment", "type": "str"}, - "query_timeout": {"key": "queryTimeout", "type": "str"}, - } - - def __init__( - self, - *, - additional_properties: Optional[Dict[str, Any]] = None, - parsing_mode: Union[str, "_models.BlobIndexerParsingMode"] = "default", - excluded_file_name_extensions: str = "", - indexed_file_name_extensions: str = "", - fail_on_unsupported_content_type: bool = False, - fail_on_unprocessable_document: bool = False, - index_storage_metadata_only_for_oversized_documents: bool = False, - delimited_text_headers: Optional[str] = None, - delimited_text_delimiter: Optional[str] = None, - first_line_contains_headers: bool = True, - markdown_parsing_submode: Union[str, "_models.MarkdownParsingSubmode"] = "oneToMany", - markdown_header_depth: Union[str, "_models.MarkdownHeaderDepth"] = "h6", - document_root: Optional[str] = None, - data_to_extract: Union[str, "_models.BlobIndexerDataToExtract"] = "contentAndMetadata", - image_action: Union[str, "_models.BlobIndexerImageAction"] = "none", - allow_skillset_to_read_file_data: bool = False, - pdf_text_rotation_algorithm: Union[str, "_models.BlobIndexerPDFTextRotationAlgorithm"] = "none", - execution_environment: Union[str, "_models.IndexerExecutionEnvironment"] = "standard", - query_timeout: str = "00:05:00", - **kwargs: Any - ) -> None: - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword parsing_mode: Represents the parsing mode for indexing from an Azure blob data source. - Known values are: "default", "text", "delimitedText", "json", "jsonArray", "jsonLines", and - "markdown". - :paramtype parsing_mode: str or ~azure.search.documents.indexes.models.BlobIndexerParsingMode - :keyword excluded_file_name_extensions: Comma-delimited list of filename extensions to ignore - when processing from Azure blob storage. For example, you could exclude ".png, .mp4" to skip - over those files during indexing. - :paramtype excluded_file_name_extensions: str - :keyword indexed_file_name_extensions: Comma-delimited list of filename extensions to select - when processing from Azure blob storage. For example, you could focus indexing on specific - application files ".docx, .pptx, .msg" to specifically include those file types. - :paramtype indexed_file_name_extensions: str - :keyword fail_on_unsupported_content_type: For Azure blobs, set to false if you want to - continue indexing when an unsupported content type is encountered, and you don't know all the - content types (file extensions) in advance. - :paramtype fail_on_unsupported_content_type: bool - :keyword fail_on_unprocessable_document: For Azure blobs, set to false if you want to continue - indexing if a document fails indexing. - :paramtype fail_on_unprocessable_document: bool - :keyword index_storage_metadata_only_for_oversized_documents: For Azure blobs, set this - property to true to still index storage metadata for blob content that is too large to process. - Oversized blobs are treated as errors by default. For limits on blob size, see - https://learn.microsoft.com/azure/search/search-limits-quotas-capacity. - :paramtype index_storage_metadata_only_for_oversized_documents: bool - :keyword delimited_text_headers: For CSV blobs, specifies a comma-delimited list of column - headers, useful for mapping source fields to destination fields in an index. - :paramtype delimited_text_headers: str - :keyword delimited_text_delimiter: For CSV blobs, specifies the end-of-line single-character - delimiter for CSV files where each line starts a new document (for example, "|"). - :paramtype delimited_text_delimiter: str - :keyword first_line_contains_headers: For CSV blobs, indicates that the first (non-blank) line - of each blob contains headers. - :paramtype first_line_contains_headers: bool - :keyword markdown_parsing_submode: Specifies the submode that will determine whether a markdown - file will be parsed into exactly one search document or multiple search documents. Default is - ``oneToMany``. Known values are: "oneToMany" and "oneToOne". - :paramtype markdown_parsing_submode: str or - ~azure.search.documents.indexes.models.MarkdownParsingSubmode - :keyword markdown_header_depth: Specifies the max header depth that will be considered while - grouping markdown content. Default is ``h6``. Known values are: "h1", "h2", "h3", "h4", "h5", - and "h6". - :paramtype markdown_header_depth: str or - ~azure.search.documents.indexes.models.MarkdownHeaderDepth - :keyword document_root: For JSON arrays, given a structured or semi-structured document, you - can specify a path to the array using this property. - :paramtype document_root: str - :keyword data_to_extract: Specifies the data to extract from Azure blob storage and tells the - indexer which data to extract from image content when "imageAction" is set to a value other - than "none". This applies to embedded image content in a .PDF or other application, or image - files such as .jpg and .png, in Azure blobs. Known values are: "storageMetadata", - "allMetadata", and "contentAndMetadata". - :paramtype data_to_extract: str or - ~azure.search.documents.indexes.models.BlobIndexerDataToExtract - :keyword image_action: Determines how to process embedded images and image files in Azure blob - storage. Setting the "imageAction" configuration to any value other than "none" requires that - a skillset also be attached to that indexer. Known values are: "none", - "generateNormalizedImages", and "generateNormalizedImagePerPage". - :paramtype image_action: str or ~azure.search.documents.indexes.models.BlobIndexerImageAction - :keyword allow_skillset_to_read_file_data: If true, will create a path //document//file_data - that is an object representing the original file data downloaded from your blob data source. - This allows you to pass the original file data to a custom skill for processing within the - enrichment pipeline, or to the Document Extraction skill. - :paramtype allow_skillset_to_read_file_data: bool - :keyword pdf_text_rotation_algorithm: Determines algorithm for text extraction from PDF files - in Azure blob storage. Known values are: "none" and "detectAngles". - :paramtype pdf_text_rotation_algorithm: str or - ~azure.search.documents.indexes.models.BlobIndexerPDFTextRotationAlgorithm - :keyword execution_environment: Specifies the environment in which the indexer should execute. - Known values are: "standard" and "private". - :paramtype execution_environment: str or - ~azure.search.documents.indexes.models.IndexerExecutionEnvironment - :keyword query_timeout: Increases the timeout beyond the 5-minute default for Azure SQL - database data sources, specified in the format "hh:mm:ss". - :paramtype query_timeout: str - """ - super().__init__(**kwargs) - self.additional_properties = additional_properties - self.parsing_mode = parsing_mode - self.excluded_file_name_extensions = excluded_file_name_extensions - self.indexed_file_name_extensions = indexed_file_name_extensions - self.fail_on_unsupported_content_type = fail_on_unsupported_content_type - self.fail_on_unprocessable_document = fail_on_unprocessable_document - self.index_storage_metadata_only_for_oversized_documents = index_storage_metadata_only_for_oversized_documents - self.delimited_text_headers = delimited_text_headers - self.delimited_text_delimiter = delimited_text_delimiter - self.first_line_contains_headers = first_line_contains_headers - self.markdown_parsing_submode = markdown_parsing_submode - self.markdown_header_depth = markdown_header_depth - self.document_root = document_root - self.data_to_extract = data_to_extract - self.image_action = image_action - self.allow_skillset_to_read_file_data = allow_skillset_to_read_file_data - self.pdf_text_rotation_algorithm = pdf_text_rotation_algorithm - self.execution_environment = execution_environment - self.query_timeout = query_timeout - - -class IndexingSchedule(_serialization.Model): - """Represents a schedule for indexer execution. - - All required parameters must be populated in order to send to server. - - :ivar interval: The interval of time between indexer executions. Required. - :vartype interval: ~datetime.timedelta - :ivar start_time: The time when an indexer should start running. - :vartype start_time: ~datetime.datetime - """ - - _validation = { - "interval": {"required": True}, - } - - _attribute_map = { - "interval": {"key": "interval", "type": "duration"}, - "start_time": {"key": "startTime", "type": "iso-8601"}, - } - - def __init__( - self, *, interval: datetime.timedelta, start_time: Optional[datetime.datetime] = None, **kwargs: Any - ) -> None: - """ - :keyword interval: The interval of time between indexer executions. Required. - :paramtype interval: ~datetime.timedelta - :keyword start_time: The time when an indexer should start running. - :paramtype start_time: ~datetime.datetime - """ - super().__init__(**kwargs) - self.interval = interval - self.start_time = start_time - - -class InputFieldMappingEntry(_serialization.Model): - """Input field mapping for a skill. - - All required parameters must be populated in order to send to server. - - :ivar name: The name of the input. Required. - :vartype name: str - :ivar source: The source of the input. - :vartype source: str - :ivar source_context: The source context used for selecting recursive inputs. - :vartype source_context: str - :ivar inputs: The recursive inputs used when creating a complex type. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - """ - - _validation = { - "name": {"required": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "source": {"key": "source", "type": "str"}, - "source_context": {"key": "sourceContext", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - } - - def __init__( - self, - *, - name: str, - source: Optional[str] = None, - source_context: Optional[str] = None, - inputs: Optional[List["_models.InputFieldMappingEntry"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the input. Required. - :paramtype name: str - :keyword source: The source of the input. - :paramtype source: str - :keyword source_context: The source context used for selecting recursive inputs. - :paramtype source_context: str - :keyword inputs: The recursive inputs used when creating a complex type. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - """ - super().__init__(**kwargs) - self.name = name - self.source = source - self.source_context = source_context - self.inputs = inputs - - -class KeepTokenFilter(TokenFilter): - """A token filter that only keeps tokens with text contained in a specified list of words. This - token filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar keep_words: The list of words to keep. Required. - :vartype keep_words: list[str] - :ivar lower_case_keep_words: A value indicating whether to lower case all words first. Default - is false. - :vartype lower_case_keep_words: bool - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "keep_words": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "keep_words": {"key": "keepWords", "type": "[str]"}, - "lower_case_keep_words": {"key": "keepWordsCase", "type": "bool"}, - } - - def __init__(self, *, name: str, keep_words: List[str], lower_case_keep_words: bool = False, **kwargs: Any) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword keep_words: The list of words to keep. Required. - :paramtype keep_words: list[str] - :keyword lower_case_keep_words: A value indicating whether to lower case all words first. - Default is false. - :paramtype lower_case_keep_words: bool - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.KeepTokenFilter" - self.keep_words = keep_words - self.lower_case_keep_words = lower_case_keep_words - - -class KeyPhraseExtractionSkill(SearchIndexerSkill): - """A skill that uses text analytics for key phrase extraction. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar default_language_code: A value indicating which language code to use. Default is ``en``. - Known values are: "da", "nl", "en", "fi", "fr", "de", "it", "ja", "ko", "no", "pl", "pt-PT", - "pt-BR", "ru", "es", and "sv". - :vartype default_language_code: str or - ~azure.search.documents.indexes.models.KeyPhraseExtractionSkillLanguage - :ivar max_key_phrase_count: A number indicating how many key phrases to return. If absent, all - identified key phrases will be returned. - :vartype max_key_phrase_count: int - :ivar model_version: The version of the model to use when calling the Text Analytics service. - It will default to the latest available when not specified. We recommend you do not specify - this value unless absolutely necessary. - :vartype model_version: str - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "default_language_code": {"key": "defaultLanguageCode", "type": "str"}, - "max_key_phrase_count": {"key": "maxKeyPhraseCount", "type": "int"}, - "model_version": {"key": "modelVersion", "type": "str"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - default_language_code: Optional[Union[str, "_models.KeyPhraseExtractionSkillLanguage"]] = None, - max_key_phrase_count: Optional[int] = None, - model_version: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is - ``en``. Known values are: "da", "nl", "en", "fi", "fr", "de", "it", "ja", "ko", "no", "pl", - "pt-PT", "pt-BR", "ru", "es", and "sv". - :paramtype default_language_code: str or - ~azure.search.documents.indexes.models.KeyPhraseExtractionSkillLanguage - :keyword max_key_phrase_count: A number indicating how many key phrases to return. If absent, - all identified key phrases will be returned. - :paramtype max_key_phrase_count: int - :keyword model_version: The version of the model to use when calling the Text Analytics - service. It will default to the latest available when not specified. We recommend you do not - specify this value unless absolutely necessary. - :paramtype model_version: str - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Text.KeyPhraseExtractionSkill" - self.default_language_code = default_language_code - self.max_key_phrase_count = max_key_phrase_count - self.model_version = model_version - - -class KeywordMarkerTokenFilter(TokenFilter): - """Marks terms as keywords. This token filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar keywords: A list of words to mark as keywords. Required. - :vartype keywords: list[str] - :ivar ignore_case: A value indicating whether to ignore case. If true, all words are converted - to lower case first. Default is false. - :vartype ignore_case: bool - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "keywords": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "keywords": {"key": "keywords", "type": "[str]"}, - "ignore_case": {"key": "ignoreCase", "type": "bool"}, - } - - def __init__(self, *, name: str, keywords: List[str], ignore_case: bool = False, **kwargs: Any) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword keywords: A list of words to mark as keywords. Required. - :paramtype keywords: list[str] - :keyword ignore_case: A value indicating whether to ignore case. If true, all words are - converted to lower case first. Default is false. - :paramtype ignore_case: bool - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.KeywordMarkerTokenFilter" - self.keywords = keywords - self.ignore_case = ignore_case - - -class KeywordTokenizer(LexicalTokenizer): - """Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. - :vartype odata_type: str - :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar buffer_size: The read buffer size in bytes. Default is 256. - :vartype buffer_size: int - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "buffer_size": {"key": "bufferSize", "type": "int"}, - } - - def __init__(self, *, name: str, buffer_size: int = 256, **kwargs: Any) -> None: - """ - :keyword name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :paramtype name: str - :keyword buffer_size: The read buffer size in bytes. Default is 256. - :paramtype buffer_size: int - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.KeywordTokenizer" - self.buffer_size = buffer_size - - -class KeywordTokenizerV2(LexicalTokenizer): - """Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. - :vartype odata_type: str - :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar max_token_length: The maximum token length. Default is 256. Tokens longer than the - maximum length are split. The maximum token length that can be used is 300 characters. - :vartype max_token_length: int - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "max_token_length": {"maximum": 300}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "max_token_length": {"key": "maxTokenLength", "type": "int"}, - } - - def __init__(self, *, name: str, max_token_length: int = 256, **kwargs: Any) -> None: - """ - :keyword name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :paramtype name: str - :keyword max_token_length: The maximum token length. Default is 256. Tokens longer than the - maximum length are split. The maximum token length that can be used is 300 characters. - :paramtype max_token_length: int - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.KeywordTokenizerV2" - self.max_token_length = max_token_length - - -class LanguageDetectionSkill(SearchIndexerSkill): - """A skill that detects the language of input text and reports a single language code for every - document submitted on the request. The language code is paired with a score indicating the - confidence of the analysis. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar default_country_hint: A country code to use as a hint to the language detection model if - it cannot disambiguate the language. - :vartype default_country_hint: str - :ivar model_version: The version of the model to use when calling the Text Analytics service. - It will default to the latest available when not specified. We recommend you do not specify - this value unless absolutely necessary. - :vartype model_version: str - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "default_country_hint": {"key": "defaultCountryHint", "type": "str"}, - "model_version": {"key": "modelVersion", "type": "str"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - default_country_hint: Optional[str] = None, - model_version: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_country_hint: A country code to use as a hint to the language detection model - if it cannot disambiguate the language. - :paramtype default_country_hint: str - :keyword model_version: The version of the model to use when calling the Text Analytics - service. It will default to the latest available when not specified. We recommend you do not - specify this value unless absolutely necessary. - :paramtype model_version: str - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Text.LanguageDetectionSkill" - self.default_country_hint = default_country_hint - self.model_version = model_version - - -class LengthTokenFilter(TokenFilter): - """Removes words that are too long or too short. This token filter is implemented using Apache - Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be less - than the value of max. - :vartype min_length: int - :ivar max_length: The maximum length in characters. Default and maximum is 300. - :vartype max_length: int - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "min_length": {"maximum": 300}, - "max_length": {"maximum": 300}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "min_length": {"key": "min", "type": "int"}, - "max_length": {"key": "max", "type": "int"}, - } - - def __init__(self, *, name: str, min_length: int = 0, max_length: int = 300, **kwargs: Any) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be - less than the value of max. - :paramtype min_length: int - :keyword max_length: The maximum length in characters. Default and maximum is 300. - :paramtype max_length: int - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.LengthTokenFilter" - self.min_length = min_length - self.max_length = max_length - - -class LimitTokenFilter(TokenFilter): - """Limits the number of tokens while indexing. This token filter is implemented using Apache - Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar max_token_count: The maximum number of tokens to produce. Default is 1. - :vartype max_token_count: int - :ivar consume_all_tokens: A value indicating whether all tokens from the input must be consumed - even if maxTokenCount is reached. Default is false. - :vartype consume_all_tokens: bool - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "max_token_count": {"key": "maxTokenCount", "type": "int"}, - "consume_all_tokens": {"key": "consumeAllTokens", "type": "bool"}, - } - - def __init__(self, *, name: str, max_token_count: int = 1, consume_all_tokens: bool = False, **kwargs: Any) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword max_token_count: The maximum number of tokens to produce. Default is 1. - :paramtype max_token_count: int - :keyword consume_all_tokens: A value indicating whether all tokens from the input must be - consumed even if maxTokenCount is reached. Default is false. - :paramtype consume_all_tokens: bool - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.LimitTokenFilter" - self.max_token_count = max_token_count - self.consume_all_tokens = consume_all_tokens - - -class ListAliasesResult(_serialization.Model): - """Response from a List Aliases request. If successful, it includes the associated index mappings - for all aliases. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar aliases: The aliases in the Search service. Required. - :vartype aliases: list[~azure.search.documents.indexes.models.SearchAlias] - """ - - _validation = { - "aliases": {"required": True, "readonly": True}, - } - - _attribute_map = { - "aliases": {"key": "value", "type": "[SearchAlias]"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.aliases = None - - -class ListDataSourcesResult(_serialization.Model): - """Response from a List Datasources request. If successful, it includes the full definitions of - all datasources. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar data_sources: The datasources in the Search service. Required. - :vartype data_sources: list[~azure.search.documents.indexes.models.SearchIndexerDataSource] - """ - - _validation = { - "data_sources": {"required": True, "readonly": True}, - } - - _attribute_map = { - "data_sources": {"key": "value", "type": "[SearchIndexerDataSource]"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.data_sources = None - - -class ListIndexersResult(_serialization.Model): - """Response from a List Indexers request. If successful, it includes the full definitions of all - indexers. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar indexers: The indexers in the Search service. Required. - :vartype indexers: list[~azure.search.documents.indexes.models.SearchIndexer] - """ - - _validation = { - "indexers": {"required": True, "readonly": True}, - } - - _attribute_map = { - "indexers": {"key": "value", "type": "[SearchIndexer]"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.indexers = None - - -class ListIndexesResult(_serialization.Model): - """Response from a List Indexes request. If successful, it includes the full definitions of all - indexes. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar indexes: The indexes in the Search service. Required. - :vartype indexes: list[~azure.search.documents.indexes.models.SearchIndex] - """ - - _validation = { - "indexes": {"required": True, "readonly": True}, - } - - _attribute_map = { - "indexes": {"key": "value", "type": "[SearchIndex]"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.indexes = None - - -class ListSkillsetsResult(_serialization.Model): - """Response from a list skillset request. If successful, it includes the full definitions of all - skillsets. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar skillsets: The skillsets defined in the Search service. Required. - :vartype skillsets: list[~azure.search.documents.indexes.models.SearchIndexerSkillset] - """ - - _validation = { - "skillsets": {"required": True, "readonly": True}, - } - - _attribute_map = { - "skillsets": {"key": "value", "type": "[SearchIndexerSkillset]"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.skillsets = None - - -class ListSynonymMapsResult(_serialization.Model): - """Response from a List SynonymMaps request. If successful, it includes the full definitions of - all synonym maps. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar synonym_maps: The synonym maps in the Search service. Required. - :vartype synonym_maps: list[~azure.search.documents.indexes.models.SynonymMap] - """ - - _validation = { - "synonym_maps": {"required": True, "readonly": True}, - } - - _attribute_map = { - "synonym_maps": {"key": "value", "type": "[SynonymMap]"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.synonym_maps = None - - -class LuceneStandardAnalyzer(LexicalAnalyzer): - """Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop - filter. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of analyzer. Required. - :vartype odata_type: str - :ivar name: The name of the analyzer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. The maximum token length that can be used is 300 characters. - :vartype max_token_length: int - :ivar stopwords: A list of stopwords. - :vartype stopwords: list[str] - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "max_token_length": {"maximum": 300}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "max_token_length": {"key": "maxTokenLength", "type": "int"}, - "stopwords": {"key": "stopwords", "type": "[str]"}, - } - - def __init__( - self, *, name: str, max_token_length: int = 255, stopwords: Optional[List[str]] = None, **kwargs: Any - ) -> None: - """ - :keyword name: The name of the analyzer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :paramtype name: str - :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. The maximum token length that can be used is 300 characters. - :paramtype max_token_length: int - :keyword stopwords: A list of stopwords. - :paramtype stopwords: list[str] - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.StandardAnalyzer" - self.max_token_length = max_token_length - self.stopwords = stopwords - - -class LuceneStandardTokenizer(LexicalTokenizer): - """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using - Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. - :vartype odata_type: str - :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. - :vartype max_token_length: int - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "max_token_length": {"key": "maxTokenLength", "type": "int"}, - } - - def __init__(self, *, name: str, max_token_length: int = 255, **kwargs: Any) -> None: - """ - :keyword name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :paramtype name: str - :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. - :paramtype max_token_length: int - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.StandardTokenizer" - self.max_token_length = max_token_length - - -class LuceneStandardTokenizerV2(LexicalTokenizer): - """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using - Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. - :vartype odata_type: str - :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. The maximum token length that can be used is 300 characters. - :vartype max_token_length: int - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "max_token_length": {"maximum": 300}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "max_token_length": {"key": "maxTokenLength", "type": "int"}, - } - - def __init__(self, *, name: str, max_token_length: int = 255, **kwargs: Any) -> None: - """ - :keyword name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :paramtype name: str - :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. The maximum token length that can be used is 300 characters. - :paramtype max_token_length: int - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.StandardTokenizerV2" - self.max_token_length = max_token_length - - -class MagnitudeScoringFunction(ScoringFunction): - """Defines a function that boosts scores based on the magnitude of a numeric field. - - All required parameters must be populated in order to send to server. - - :ivar type: Indicates the type of function to use. Valid values include magnitude, freshness, - distance, and tag. The function type must be lower case. Required. - :vartype type: str - :ivar field_name: The name of the field used as input to the scoring function. Required. - :vartype field_name: str - :ivar boost: A multiplier for the raw score. Must be a positive number not equal to 1.0. - Required. - :vartype boost: float - :ivar interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Known values are: "linear", "constant", "quadratic", and - "logarithmic". - :vartype interpolation: str or - ~azure.search.documents.indexes.models.ScoringFunctionInterpolation - :ivar parameters: Parameter values for the magnitude scoring function. Required. - :vartype parameters: ~azure.search.documents.indexes.models.MagnitudeScoringParameters - """ - - _validation = { - "type": {"required": True}, - "field_name": {"required": True}, - "boost": {"required": True}, - "parameters": {"required": True}, - } - - _attribute_map = { - "type": {"key": "type", "type": "str"}, - "field_name": {"key": "fieldName", "type": "str"}, - "boost": {"key": "boost", "type": "float"}, - "interpolation": {"key": "interpolation", "type": "str"}, - "parameters": {"key": "magnitude", "type": "MagnitudeScoringParameters"}, - } - - def __init__( - self, - *, - field_name: str, - boost: float, - parameters: "_models.MagnitudeScoringParameters", - interpolation: Optional[Union[str, "_models.ScoringFunctionInterpolation"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword field_name: The name of the field used as input to the scoring function. Required. - :paramtype field_name: str - :keyword boost: A multiplier for the raw score. Must be a positive number not equal to 1.0. - Required. - :paramtype boost: float - :keyword interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Known values are: "linear", "constant", "quadratic", and - "logarithmic". - :paramtype interpolation: str or - ~azure.search.documents.indexes.models.ScoringFunctionInterpolation - :keyword parameters: Parameter values for the magnitude scoring function. Required. - :paramtype parameters: ~azure.search.documents.indexes.models.MagnitudeScoringParameters - """ - super().__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) - self.type: str = "magnitude" - self.parameters = parameters - - -class MagnitudeScoringParameters(_serialization.Model): - """Provides parameter values to a magnitude scoring function. - - All required parameters must be populated in order to send to server. - - :ivar boosting_range_start: The field value at which boosting starts. Required. - :vartype boosting_range_start: float - :ivar boosting_range_end: The field value at which boosting ends. Required. - :vartype boosting_range_end: float - :ivar should_boost_beyond_range_by_constant: A value indicating whether to apply a constant - boost for field values beyond the range end value; default is false. - :vartype should_boost_beyond_range_by_constant: bool - """ - - _validation = { - "boosting_range_start": {"required": True}, - "boosting_range_end": {"required": True}, - } - - _attribute_map = { - "boosting_range_start": {"key": "boostingRangeStart", "type": "float"}, - "boosting_range_end": {"key": "boostingRangeEnd", "type": "float"}, - "should_boost_beyond_range_by_constant": {"key": "constantBoostBeyondRange", "type": "bool"}, - } - - def __init__( - self, - *, - boosting_range_start: float, - boosting_range_end: float, - should_boost_beyond_range_by_constant: Optional[bool] = None, - **kwargs: Any - ) -> None: - """ - :keyword boosting_range_start: The field value at which boosting starts. Required. - :paramtype boosting_range_start: float - :keyword boosting_range_end: The field value at which boosting ends. Required. - :paramtype boosting_range_end: float - :keyword should_boost_beyond_range_by_constant: A value indicating whether to apply a constant - boost for field values beyond the range end value; default is false. - :paramtype should_boost_beyond_range_by_constant: bool - """ - super().__init__(**kwargs) - self.boosting_range_start = boosting_range_start - self.boosting_range_end = boosting_range_end - self.should_boost_beyond_range_by_constant = should_boost_beyond_range_by_constant - - -class MappingCharFilter(CharFilter): - """A character filter that applies mappings defined with the mappings option. Matching is greedy - (longest pattern matching at a given point wins). Replacement is allowed to be the empty - string. This character filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of char filter. Required. - :vartype odata_type: str - :ivar name: The name of the char filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar mappings: A list of mappings of the following format: "a=>b" (all occurrences of the - character "a" will be replaced with character "b"). Required. - :vartype mappings: list[str] - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "mappings": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "mappings": {"key": "mappings", "type": "[str]"}, - } - - def __init__(self, *, name: str, mappings: List[str], **kwargs: Any) -> None: - """ - :keyword name: The name of the char filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword mappings: A list of mappings of the following format: "a=>b" (all occurrences of the - character "a" will be replaced with character "b"). Required. - :paramtype mappings: list[str] - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.MappingCharFilter" - self.mappings = mappings - - -class MergeSkill(SearchIndexerSkill): - """A skill for merging two or more strings into a single unified string, with an optional - user-defined delimiter separating each component part. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is an - empty space. - :vartype insert_pre_tag: str - :ivar insert_post_tag: The tag indicates the end of the merged text. By default, the tag is an - empty space. - :vartype insert_post_tag: str - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "insert_pre_tag": {"key": "insertPreTag", "type": "str"}, - "insert_post_tag": {"key": "insertPostTag", "type": "str"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - insert_pre_tag: str = " ", - insert_post_tag: str = " ", - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is - an empty space. - :paramtype insert_pre_tag: str - :keyword insert_post_tag: The tag indicates the end of the merged text. By default, the tag is - an empty space. - :paramtype insert_post_tag: str - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Text.MergeSkill" - self.insert_pre_tag = insert_pre_tag - self.insert_post_tag = insert_post_tag - - -class MicrosoftLanguageStemmingTokenizer(LexicalTokenizer): - """Divides text using language-specific rules and reduces words to their base forms. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. - :vartype odata_type: str - :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar max_token_length: The maximum token length. Tokens longer than the maximum length are - split. Maximum token length that can be used is 300 characters. Tokens longer than 300 - characters are first split into tokens of length 300 and then each of those tokens is split - based on the max token length set. Default is 255. - :vartype max_token_length: int - :ivar is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used as - the search tokenizer, set to false if used as the indexing tokenizer. Default is false. - :vartype is_search_tokenizer: bool - :ivar language: The language to use. The default is English. Known values are: "arabic", - "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english", - "estonian", "finnish", "french", "german", "greek", "gujarati", "hebrew", "hindi", "hungarian", - "icelandic", "indonesian", "italian", "kannada", "latvian", "lithuanian", "malay", "malayalam", - "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", - "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovak", "slovenian", "spanish", - "swedish", "tamil", "telugu", "turkish", "ukrainian", and "urdu". - :vartype language: str or - ~azure.search.documents.indexes.models.MicrosoftStemmingTokenizerLanguage - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "max_token_length": {"maximum": 300}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "max_token_length": {"key": "maxTokenLength", "type": "int"}, - "is_search_tokenizer": {"key": "isSearchTokenizer", "type": "bool"}, - "language": {"key": "language", "type": "str"}, - } - - def __init__( - self, - *, - name: str, - max_token_length: int = 255, - is_search_tokenizer: bool = False, - language: Optional[Union[str, "_models.MicrosoftStemmingTokenizerLanguage"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :paramtype name: str - :keyword max_token_length: The maximum token length. Tokens longer than the maximum length are - split. Maximum token length that can be used is 300 characters. Tokens longer than 300 - characters are first split into tokens of length 300 and then each of those tokens is split - based on the max token length set. Default is 255. - :paramtype max_token_length: int - :keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used - as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. - :paramtype is_search_tokenizer: bool - :keyword language: The language to use. The default is English. Known values are: "arabic", - "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english", - "estonian", "finnish", "french", "german", "greek", "gujarati", "hebrew", "hindi", "hungarian", - "icelandic", "indonesian", "italian", "kannada", "latvian", "lithuanian", "malay", "malayalam", - "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", - "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovak", "slovenian", "spanish", - "swedish", "tamil", "telugu", "turkish", "ukrainian", and "urdu". - :paramtype language: str or - ~azure.search.documents.indexes.models.MicrosoftStemmingTokenizerLanguage - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer" - self.max_token_length = max_token_length - self.is_search_tokenizer = is_search_tokenizer - self.language = language - - -class MicrosoftLanguageTokenizer(LexicalTokenizer): - """Divides text using language-specific rules. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. - :vartype odata_type: str - :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar max_token_length: The maximum token length. Tokens longer than the maximum length are - split. Maximum token length that can be used is 300 characters. Tokens longer than 300 - characters are first split into tokens of length 300 and then each of those tokens is split - based on the max token length set. Default is 255. - :vartype max_token_length: int - :ivar is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used as - the search tokenizer, set to false if used as the indexing tokenizer. Default is false. - :vartype is_search_tokenizer: bool - :ivar language: The language to use. The default is English. Known values are: "bangla", - "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian", "czech", - "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi", "icelandic", - "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam", "marathi", - "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", "romanian", - "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish", "tamil", - "telugu", "thai", "ukrainian", "urdu", and "vietnamese". - :vartype language: str or ~azure.search.documents.indexes.models.MicrosoftTokenizerLanguage - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "max_token_length": {"maximum": 300}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "max_token_length": {"key": "maxTokenLength", "type": "int"}, - "is_search_tokenizer": {"key": "isSearchTokenizer", "type": "bool"}, - "language": {"key": "language", "type": "str"}, - } - - def __init__( - self, - *, - name: str, - max_token_length: int = 255, - is_search_tokenizer: bool = False, - language: Optional[Union[str, "_models.MicrosoftTokenizerLanguage"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :paramtype name: str - :keyword max_token_length: The maximum token length. Tokens longer than the maximum length are - split. Maximum token length that can be used is 300 characters. Tokens longer than 300 - characters are first split into tokens of length 300 and then each of those tokens is split - based on the max token length set. Default is 255. - :paramtype max_token_length: int - :keyword is_search_tokenizer: A value indicating how the tokenizer is used. Set to true if used - as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. - :paramtype is_search_tokenizer: bool - :keyword language: The language to use. The default is English. Known values are: "bangla", - "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian", "czech", - "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi", "icelandic", - "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam", "marathi", - "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", "romanian", - "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish", "tamil", - "telugu", "thai", "ukrainian", "urdu", and "vietnamese". - :paramtype language: str or ~azure.search.documents.indexes.models.MicrosoftTokenizerLanguage - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer" - self.max_token_length = max_token_length - self.is_search_tokenizer = is_search_tokenizer - self.language = language - - -class NativeBlobSoftDeleteDeletionDetectionPolicy(DataDeletionDetectionPolicy): # pylint: disable=name-too-long - """Defines a data deletion detection policy utilizing Azure Blob Storage's native soft delete - feature for deletion detection. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of data deletion detection policy. - Required. - :vartype odata_type: str - """ - - _validation = { - "odata_type": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy" - - -class NGramTokenFilter(TokenFilter): - """Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar min_gram: The minimum n-gram length. Default is 1. Must be less than the value of - maxGram. - :vartype min_gram: int - :ivar max_gram: The maximum n-gram length. Default is 2. - :vartype max_gram: int - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "min_gram": {"key": "minGram", "type": "int"}, - "max_gram": {"key": "maxGram", "type": "int"}, - } - - def __init__(self, *, name: str, min_gram: int = 1, max_gram: int = 2, **kwargs: Any) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword min_gram: The minimum n-gram length. Default is 1. Must be less than the value of - maxGram. - :paramtype min_gram: int - :keyword max_gram: The maximum n-gram length. Default is 2. - :paramtype max_gram: int - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.NGramTokenFilter" - self.min_gram = min_gram - self.max_gram = max_gram - - -class NGramTokenFilterV2(TokenFilter): - """Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the - value of maxGram. - :vartype min_gram: int - :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300. - :vartype max_gram: int - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "min_gram": {"maximum": 300}, - "max_gram": {"maximum": 300}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "min_gram": {"key": "minGram", "type": "int"}, - "max_gram": {"key": "maxGram", "type": "int"}, - } - - def __init__(self, *, name: str, min_gram: int = 1, max_gram: int = 2, **kwargs: Any) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than - the value of maxGram. - :paramtype min_gram: int - :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. - :paramtype max_gram: int - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.NGramTokenFilterV2" - self.min_gram = min_gram - self.max_gram = max_gram - - -class NGramTokenizer(LexicalTokenizer): - """Tokenizes the input into n-grams of the given size(s). This tokenizer is implemented using - Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. - :vartype odata_type: str - :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the - value of maxGram. - :vartype min_gram: int - :ivar max_gram: The maximum n-gram length. Default is 2. Maximum is 300. - :vartype max_gram: int - :ivar token_chars: Character classes to keep in the tokens. - :vartype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "min_gram": {"maximum": 300}, - "max_gram": {"maximum": 300}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "min_gram": {"key": "minGram", "type": "int"}, - "max_gram": {"key": "maxGram", "type": "int"}, - "token_chars": {"key": "tokenChars", "type": "[str]"}, - } - - def __init__( - self, - *, - name: str, - min_gram: int = 1, - max_gram: int = 2, - token_chars: Optional[List[Union[str, "_models.TokenCharacterKind"]]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :paramtype name: str - :keyword min_gram: The minimum n-gram length. Default is 1. Maximum is 300. Must be less than - the value of maxGram. - :paramtype min_gram: int - :keyword max_gram: The maximum n-gram length. Default is 2. Maximum is 300. - :paramtype max_gram: int - :keyword token_chars: Character classes to keep in the tokens. - :paramtype token_chars: list[str or ~azure.search.documents.indexes.models.TokenCharacterKind] - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.NGramTokenizer" - self.min_gram = min_gram - self.max_gram = max_gram - self.token_chars = token_chars - - -class OcrSkill(SearchIndexerSkill): - """A skill that extracts text from image files. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar default_language_code: A value indicating which language code to use. Default is ``en``. - Known values are: "af", "sq", "anp", "ar", "ast", "awa", "az", "bfy", "eu", "be", "be-cyrl", - "be-latn", "bho", "bi", "brx", "bs", "bra", "br", "bg", "bns", "bua", "ca", "ceb", "rab", "ch", - "hne", "zh-Hans", "zh-Hant", "kw", "co", "crh", "hr", "cs", "da", "prs", "dhi", "doi", "nl", - "en", "myv", "et", "fo", "fj", "fil", "fi", "fr", "fur", "gag", "gl", "de", "gil", "gon", "el", - "kl", "gvr", "ht", "hlb", "hni", "bgc", "haw", "hi", "mww", "hoc", "hu", "is", "smn", "id", - "ia", "iu", "ga", "it", "ja", "Jns", "jv", "kea", "kac", "xnr", "krc", "kaa-cyrl", "kaa", - "csb", "kk-cyrl", "kk-latn", "klr", "kha", "quc", "ko", "kfq", "kpy", "kos", "kum", "ku-arab", - "ku-latn", "kru", "ky", "lkt", "la", "lt", "dsb", "smj", "lb", "bfz", "ms", "mt", "kmj", "gv", - "mi", "mr", "mn", "cnr-cyrl", "cnr-latn", "nap", "ne", "niu", "nog", "sme", "nb", "no", "oc", - "os", "ps", "fa", "pl", "pt", "pa", "ksh", "ro", "rm", "ru", "sck", "sm", "sa", "sat", "sco", - "gd", "sr", "sr-Cyrl", "sr-Latn", "xsr", "srx", "sms", "sk", "sl", "so", "sma", "es", "sw", - "sv", "tg", "tt", "tet", "thf", "to", "tr", "tk", "tyv", "hsb", "ur", "ug", "uz-arab", - "uz-cyrl", "uz", "vo", "wae", "cy", "fy", "yua", "za", "zu", "unk", and "is". - :vartype default_language_code: str or ~azure.search.documents.indexes.models.OcrSkillLanguage - :ivar should_detect_orientation: A value indicating to turn orientation detection on or not. - Default is false. - :vartype should_detect_orientation: bool - :ivar line_ending: Defines the sequence of characters to use between the lines of text - recognized by the OCR skill. The default value is "space". Known values are: "space", - "carriageReturn", "lineFeed", and "carriageReturnLineFeed". - :vartype line_ending: str or ~azure.search.documents.indexes.models.OcrLineEnding - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "default_language_code": {"key": "defaultLanguageCode", "type": "str"}, - "should_detect_orientation": {"key": "detectOrientation", "type": "bool"}, - "line_ending": {"key": "lineEnding", "type": "str"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - default_language_code: Optional[Union[str, "_models.OcrSkillLanguage"]] = None, - should_detect_orientation: bool = False, - line_ending: Optional[Union[str, "_models.OcrLineEnding"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is - ``en``. Known values are: "af", "sq", "anp", "ar", "ast", "awa", "az", "bfy", "eu", "be", - "be-cyrl", "be-latn", "bho", "bi", "brx", "bs", "bra", "br", "bg", "bns", "bua", "ca", "ceb", - "rab", "ch", "hne", "zh-Hans", "zh-Hant", "kw", "co", "crh", "hr", "cs", "da", "prs", "dhi", - "doi", "nl", "en", "myv", "et", "fo", "fj", "fil", "fi", "fr", "fur", "gag", "gl", "de", "gil", - "gon", "el", "kl", "gvr", "ht", "hlb", "hni", "bgc", "haw", "hi", "mww", "hoc", "hu", "is", - "smn", "id", "ia", "iu", "ga", "it", "ja", "Jns", "jv", "kea", "kac", "xnr", "krc", "kaa-cyrl", - "kaa", "csb", "kk-cyrl", "kk-latn", "klr", "kha", "quc", "ko", "kfq", "kpy", "kos", "kum", - "ku-arab", "ku-latn", "kru", "ky", "lkt", "la", "lt", "dsb", "smj", "lb", "bfz", "ms", "mt", - "kmj", "gv", "mi", "mr", "mn", "cnr-cyrl", "cnr-latn", "nap", "ne", "niu", "nog", "sme", "nb", - "no", "oc", "os", "ps", "fa", "pl", "pt", "pa", "ksh", "ro", "rm", "ru", "sck", "sm", "sa", - "sat", "sco", "gd", "sr", "sr-Cyrl", "sr-Latn", "xsr", "srx", "sms", "sk", "sl", "so", "sma", - "es", "sw", "sv", "tg", "tt", "tet", "thf", "to", "tr", "tk", "tyv", "hsb", "ur", "ug", - "uz-arab", "uz-cyrl", "uz", "vo", "wae", "cy", "fy", "yua", "za", "zu", "unk", and "is". - :paramtype default_language_code: str or - ~azure.search.documents.indexes.models.OcrSkillLanguage - :keyword should_detect_orientation: A value indicating to turn orientation detection on or not. - Default is false. - :paramtype should_detect_orientation: bool - :keyword line_ending: Defines the sequence of characters to use between the lines of text - recognized by the OCR skill. The default value is "space". Known values are: "space", - "carriageReturn", "lineFeed", and "carriageReturnLineFeed". - :paramtype line_ending: str or ~azure.search.documents.indexes.models.OcrLineEnding - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Vision.OcrSkill" - self.default_language_code = default_language_code - self.should_detect_orientation = should_detect_orientation - self.line_ending = line_ending - - -class OutputFieldMappingEntry(_serialization.Model): - """Output field mapping for a skill. - - All required parameters must be populated in order to send to server. - - :ivar name: The name of the output defined by the skill. Required. - :vartype name: str - :ivar target_name: The target name of the output. It is optional and default to name. - :vartype target_name: str - """ - - _validation = { - "name": {"required": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "target_name": {"key": "targetName", "type": "str"}, - } - - def __init__(self, *, name: str, target_name: Optional[str] = None, **kwargs: Any) -> None: - """ - :keyword name: The name of the output defined by the skill. Required. - :paramtype name: str - :keyword target_name: The target name of the output. It is optional and default to name. - :paramtype target_name: str - """ - super().__init__(**kwargs) - self.name = name - self.target_name = target_name - - -class PathHierarchyTokenizerV2(LexicalTokenizer): - """Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. - :vartype odata_type: str - :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar delimiter: The delimiter character to use. Default is "/". - :vartype delimiter: str - :ivar replacement: A value that, if set, replaces the delimiter character. Default is "/". - :vartype replacement: str - :ivar max_token_length: The maximum token length. Default and maximum is 300. - :vartype max_token_length: int - :ivar reverse_token_order: A value indicating whether to generate tokens in reverse order. - Default is false. - :vartype reverse_token_order: bool - :ivar number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0. - :vartype number_of_tokens_to_skip: int - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "max_token_length": {"maximum": 300}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "delimiter": {"key": "delimiter", "type": "str"}, - "replacement": {"key": "replacement", "type": "str"}, - "max_token_length": {"key": "maxTokenLength", "type": "int"}, - "reverse_token_order": {"key": "reverse", "type": "bool"}, - "number_of_tokens_to_skip": {"key": "skip", "type": "int"}, - } - - def __init__( - self, - *, - name: str, - delimiter: str = "/", - replacement: str = "/", - max_token_length: int = 300, - reverse_token_order: bool = False, - number_of_tokens_to_skip: int = 0, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :paramtype name: str - :keyword delimiter: The delimiter character to use. Default is "/". - :paramtype delimiter: str - :keyword replacement: A value that, if set, replaces the delimiter character. Default is "/". - :paramtype replacement: str - :keyword max_token_length: The maximum token length. Default and maximum is 300. - :paramtype max_token_length: int - :keyword reverse_token_order: A value indicating whether to generate tokens in reverse order. - Default is false. - :paramtype reverse_token_order: bool - :keyword number_of_tokens_to_skip: The number of initial tokens to skip. Default is 0. - :paramtype number_of_tokens_to_skip: int - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.PathHierarchyTokenizerV2" - self.delimiter = delimiter - self.replacement = replacement - self.max_token_length = max_token_length - self.reverse_token_order = reverse_token_order - self.number_of_tokens_to_skip = number_of_tokens_to_skip - - -class PatternAnalyzer(LexicalAnalyzer): - """Flexibly separates text into terms via a regular expression pattern. This analyzer is - implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of analyzer. Required. - :vartype odata_type: str - :ivar name: The name of the analyzer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar lower_case_terms: A value indicating whether terms should be lower-cased. Default is - true. - :vartype lower_case_terms: bool - :ivar pattern: A regular expression pattern to match token separators. Default is an expression - that matches one or more non-word characters. - :vartype pattern: str - :ivar flags: Regular expression flags. Known values are: "CANON_EQ", "CASE_INSENSITIVE", - "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", and "UNIX_LINES". - :vartype flags: str or ~azure.search.documents.indexes.models.RegexFlags - :ivar stopwords: A list of stopwords. - :vartype stopwords: list[str] - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "lower_case_terms": {"key": "lowercase", "type": "bool"}, - "pattern": {"key": "pattern", "type": "str"}, - "flags": {"key": "flags", "type": "str"}, - "stopwords": {"key": "stopwords", "type": "[str]"}, - } - - def __init__( - self, - *, - name: str, - lower_case_terms: bool = True, - pattern: str = "\W+", - flags: Optional[Union[str, "_models.RegexFlags"]] = None, - stopwords: Optional[List[str]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the analyzer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :paramtype name: str - :keyword lower_case_terms: A value indicating whether terms should be lower-cased. Default is - true. - :paramtype lower_case_terms: bool - :keyword pattern: A regular expression pattern to match token separators. Default is an - expression that matches one or more non-word characters. - :paramtype pattern: str - :keyword flags: Regular expression flags. Known values are: "CANON_EQ", "CASE_INSENSITIVE", - "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", and "UNIX_LINES". - :paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags - :keyword stopwords: A list of stopwords. - :paramtype stopwords: list[str] - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.PatternAnalyzer" - self.lower_case_terms = lower_case_terms - self.pattern = pattern - self.flags = flags - self.stopwords = stopwords - - -class PatternCaptureTokenFilter(TokenFilter): - """Uses Java regexes to emit multiple tokens - one for each capture group in one or more patterns. - This token filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar patterns: A list of patterns to match against each token. Required. - :vartype patterns: list[str] - :ivar preserve_original: A value indicating whether to return the original token even if one of - the patterns matches. Default is true. - :vartype preserve_original: bool - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "patterns": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "patterns": {"key": "patterns", "type": "[str]"}, - "preserve_original": {"key": "preserveOriginal", "type": "bool"}, - } - - def __init__(self, *, name: str, patterns: List[str], preserve_original: bool = True, **kwargs: Any) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword patterns: A list of patterns to match against each token. Required. - :paramtype patterns: list[str] - :keyword preserve_original: A value indicating whether to return the original token even if one - of the patterns matches. Default is true. - :paramtype preserve_original: bool - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.PatternCaptureTokenFilter" - self.patterns = patterns - self.preserve_original = preserve_original - - -class PatternReplaceCharFilter(CharFilter): - """A character filter that replaces characters in the input string. It uses a regular expression - to identify character sequences to preserve and a replacement pattern to identify characters to - replace. For example, given the input text "aa bb aa bb", pattern "(aa)\\s+(bb)", and - replacement "$1#$2", the result would be "aa#bb aa#bb". This character filter is implemented - using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of char filter. Required. - :vartype odata_type: str - :ivar name: The name of the char filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar pattern: A regular expression pattern. Required. - :vartype pattern: str - :ivar replacement: The replacement text. Required. - :vartype replacement: str - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "pattern": {"required": True}, - "replacement": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "pattern": {"key": "pattern", "type": "str"}, - "replacement": {"key": "replacement", "type": "str"}, - } - - def __init__(self, *, name: str, pattern: str, replacement: str, **kwargs: Any) -> None: - """ - :keyword name: The name of the char filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword pattern: A regular expression pattern. Required. - :paramtype pattern: str - :keyword replacement: The replacement text. Required. - :paramtype replacement: str - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.PatternReplaceCharFilter" - self.pattern = pattern - self.replacement = replacement - - -class PatternReplaceTokenFilter(TokenFilter): - """A character filter that replaces characters in the input string. It uses a regular expression - to identify character sequences to preserve and a replacement pattern to identify characters to - replace. For example, given the input text "aa bb aa bb", pattern "(aa)\\s+(bb)", and - replacement "$1#$2", the result would be "aa#bb aa#bb". This token filter is implemented using - Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar pattern: A regular expression pattern. Required. - :vartype pattern: str - :ivar replacement: The replacement text. Required. - :vartype replacement: str - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "pattern": {"required": True}, - "replacement": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "pattern": {"key": "pattern", "type": "str"}, - "replacement": {"key": "replacement", "type": "str"}, - } - - def __init__(self, *, name: str, pattern: str, replacement: str, **kwargs: Any) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword pattern: A regular expression pattern. Required. - :paramtype pattern: str - :keyword replacement: The replacement text. Required. - :paramtype replacement: str - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.PatternReplaceTokenFilter" - self.pattern = pattern - self.replacement = replacement - - -class PatternTokenizer(LexicalTokenizer): - """Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is - implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. - :vartype odata_type: str - :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar pattern: A regular expression pattern to match token separators. Default is an expression - that matches one or more non-word characters. - :vartype pattern: str - :ivar flags: Regular expression flags. Known values are: "CANON_EQ", "CASE_INSENSITIVE", - "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", and "UNIX_LINES". - :vartype flags: str or ~azure.search.documents.indexes.models.RegexFlags - :ivar group: The zero-based ordinal of the matching group in the regular expression pattern to - extract into tokens. Use -1 if you want to use the entire pattern to split the input into - tokens, irrespective of matching groups. Default is -1. - :vartype group: int - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "pattern": {"key": "pattern", "type": "str"}, - "flags": {"key": "flags", "type": "str"}, - "group": {"key": "group", "type": "int"}, - } - - def __init__( - self, - *, - name: str, - pattern: str = "\W+", - flags: Optional[Union[str, "_models.RegexFlags"]] = None, - group: int = -1, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :paramtype name: str - :keyword pattern: A regular expression pattern to match token separators. Default is an - expression that matches one or more non-word characters. - :paramtype pattern: str - :keyword flags: Regular expression flags. Known values are: "CANON_EQ", "CASE_INSENSITIVE", - "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", and "UNIX_LINES". - :paramtype flags: str or ~azure.search.documents.indexes.models.RegexFlags - :keyword group: The zero-based ordinal of the matching group in the regular expression pattern - to extract into tokens. Use -1 if you want to use the entire pattern to split the input into - tokens, irrespective of matching groups. Default is -1. - :paramtype group: int - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.PatternTokenizer" - self.pattern = pattern - self.flags = flags - self.group = group - - -class PhoneticTokenFilter(TokenFilter): - """Create tokens for phonetic matches. This token filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar encoder: The phonetic encoder to use. Default is "metaphone". Known values are: - "metaphone", "doubleMetaphone", "soundex", "refinedSoundex", "caverphone1", "caverphone2", - "cologne", "nysiis", "koelnerPhonetik", "haasePhonetik", and "beiderMorse". - :vartype encoder: str or ~azure.search.documents.indexes.models.PhoneticEncoder - :ivar replace_original_tokens: A value indicating whether encoded tokens should replace - original tokens. If false, encoded tokens are added as synonyms. Default is true. - :vartype replace_original_tokens: bool - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "encoder": {"key": "encoder", "type": "str"}, - "replace_original_tokens": {"key": "replace", "type": "bool"}, - } - - def __init__( - self, - *, - name: str, - encoder: Optional[Union[str, "_models.PhoneticEncoder"]] = None, - replace_original_tokens: bool = True, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword encoder: The phonetic encoder to use. Default is "metaphone". Known values are: - "metaphone", "doubleMetaphone", "soundex", "refinedSoundex", "caverphone1", "caverphone2", - "cologne", "nysiis", "koelnerPhonetik", "haasePhonetik", and "beiderMorse". - :paramtype encoder: str or ~azure.search.documents.indexes.models.PhoneticEncoder - :keyword replace_original_tokens: A value indicating whether encoded tokens should replace - original tokens. If false, encoded tokens are added as synonyms. Default is true. - :paramtype replace_original_tokens: bool - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.PhoneticTokenFilter" - self.encoder = encoder - self.replace_original_tokens = replace_original_tokens - - -class PIIDetectionSkill(SearchIndexerSkill): - """Using the Text Analytics API, extracts personal information from an input text and gives you - the option of masking it. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar default_language_code: A value indicating which language code to use. Default is ``en``. - :vartype default_language_code: str - :ivar minimum_precision: A value between 0 and 1 that be used to only include entities whose - confidence score is greater than the value specified. If not set (default), or if explicitly - set to null, all entities will be included. - :vartype minimum_precision: float - :ivar masking_mode: A parameter that provides various ways to mask the personal information - detected in the input text. Default is 'none'. Known values are: "none" and "replace". - :vartype masking_mode: str or - ~azure.search.documents.indexes.models.PIIDetectionSkillMaskingMode - :ivar mask: The character used to mask the text if the maskingMode parameter is set to replace. - Default is '*'. - :vartype mask: str - :ivar model_version: The version of the model to use when calling the Text Analytics service. - It will default to the latest available when not specified. We recommend you do not specify - this value unless absolutely necessary. - :vartype model_version: str - :ivar pii_categories: A list of PII entity categories that should be extracted and masked. - :vartype pii_categories: list[str] - :ivar domain: If specified, will set the PII domain to include only a subset of the entity - categories. Possible values include: 'phi', 'none'. Default is 'none'. - :vartype domain: str - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - "minimum_precision": {"maximum": 1, "minimum": 0}, - "mask": {"max_length": 1}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "default_language_code": {"key": "defaultLanguageCode", "type": "str"}, - "minimum_precision": {"key": "minimumPrecision", "type": "float"}, - "masking_mode": {"key": "maskingMode", "type": "str"}, - "mask": {"key": "maskingCharacter", "type": "str"}, - "model_version": {"key": "modelVersion", "type": "str"}, - "pii_categories": {"key": "piiCategories", "type": "[str]"}, - "domain": {"key": "domain", "type": "str"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - default_language_code: Optional[str] = None, - minimum_precision: Optional[float] = None, - masking_mode: Optional[Union[str, "_models.PIIDetectionSkillMaskingMode"]] = None, - mask: Optional[str] = None, - model_version: Optional[str] = None, - pii_categories: Optional[List[str]] = None, - domain: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is - ``en``. - :paramtype default_language_code: str - :keyword minimum_precision: A value between 0 and 1 that be used to only include entities whose - confidence score is greater than the value specified. If not set (default), or if explicitly - set to null, all entities will be included. - :paramtype minimum_precision: float - :keyword masking_mode: A parameter that provides various ways to mask the personal information - detected in the input text. Default is 'none'. Known values are: "none" and "replace". - :paramtype masking_mode: str or - ~azure.search.documents.indexes.models.PIIDetectionSkillMaskingMode - :keyword mask: The character used to mask the text if the maskingMode parameter is set to - replace. Default is '*'. - :paramtype mask: str - :keyword model_version: The version of the model to use when calling the Text Analytics - service. It will default to the latest available when not specified. We recommend you do not - specify this value unless absolutely necessary. - :paramtype model_version: str - :keyword pii_categories: A list of PII entity categories that should be extracted and masked. - :paramtype pii_categories: list[str] - :keyword domain: If specified, will set the PII domain to include only a subset of the entity - categories. Possible values include: 'phi', 'none'. Default is 'none'. - :paramtype domain: str - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Text.PIIDetectionSkill" - self.default_language_code = default_language_code - self.minimum_precision = minimum_precision - self.masking_mode = masking_mode - self.mask = mask - self.model_version = model_version - self.pii_categories = pii_categories - self.domain = domain - - -class RequestOptions(_serialization.Model): - """Parameter group. - - :ivar x_ms_client_request_id: The tracking ID sent with the request to help with debugging. - :vartype x_ms_client_request_id: str - """ - - _attribute_map = { - "x_ms_client_request_id": {"key": "x-ms-client-request-id", "type": "str"}, - } - - def __init__(self, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> None: - """ - :keyword x_ms_client_request_id: The tracking ID sent with the request to help with debugging. - :paramtype x_ms_client_request_id: str - """ - super().__init__(**kwargs) - self.x_ms_client_request_id = x_ms_client_request_id - - -class RescoringOptions(_serialization.Model): - """Contains the options for rescoring. - - :ivar enable_rescoring: If set to true, after the initial search on the compressed vectors, the - similarity scores are recalculated using the full-precision vectors. This will improve recall - at the expense of latency. - :vartype enable_rescoring: bool - :ivar default_oversampling: Default oversampling factor. Oversampling retrieves a greater set - of potential documents to offset the resolution loss due to quantization. This increases the - set of results that will be rescored on full-precision vectors. Minimum value is 1, meaning no - oversampling (1x). This parameter can only be set when 'enableRescoring' is true. Higher values - improve recall at the expense of latency. - :vartype default_oversampling: float - :ivar rescore_storage_method: Controls the storage method for original vectors. This setting is - immutable. Known values are: "preserveOriginals" and "discardOriginals". - :vartype rescore_storage_method: str or - ~azure.search.documents.indexes.models.VectorSearchCompressionRescoreStorageMethod - """ - - _attribute_map = { - "enable_rescoring": {"key": "enableRescoring", "type": "bool"}, - "default_oversampling": {"key": "defaultOversampling", "type": "float"}, - "rescore_storage_method": {"key": "rescoreStorageMethod", "type": "str"}, - } - - def __init__( - self, - *, - enable_rescoring: bool = True, - default_oversampling: Optional[float] = None, - rescore_storage_method: Optional[Union[str, "_models.VectorSearchCompressionRescoreStorageMethod"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword enable_rescoring: If set to true, after the initial search on the compressed vectors, - the similarity scores are recalculated using the full-precision vectors. This will improve - recall at the expense of latency. - :paramtype enable_rescoring: bool - :keyword default_oversampling: Default oversampling factor. Oversampling retrieves a greater - set of potential documents to offset the resolution loss due to quantization. This increases - the set of results that will be rescored on full-precision vectors. Minimum value is 1, meaning - no oversampling (1x). This parameter can only be set when 'enableRescoring' is true. Higher - values improve recall at the expense of latency. - :paramtype default_oversampling: float - :keyword rescore_storage_method: Controls the storage method for original vectors. This setting - is immutable. Known values are: "preserveOriginals" and "discardOriginals". - :paramtype rescore_storage_method: str or - ~azure.search.documents.indexes.models.VectorSearchCompressionRescoreStorageMethod - """ - super().__init__(**kwargs) - self.enable_rescoring = enable_rescoring - self.default_oversampling = default_oversampling - self.rescore_storage_method = rescore_storage_method - - -class ResourceCounter(_serialization.Model): - """Represents a resource's usage and quota. - - All required parameters must be populated in order to send to server. - - :ivar usage: The resource usage amount. Required. - :vartype usage: int - :ivar quota: The resource amount quota. - :vartype quota: int - """ - - _validation = { - "usage": {"required": True}, - } - - _attribute_map = { - "usage": {"key": "usage", "type": "int"}, - "quota": {"key": "quota", "type": "int"}, - } - - def __init__(self, *, usage: int, quota: Optional[int] = None, **kwargs: Any) -> None: - """ - :keyword usage: The resource usage amount. Required. - :paramtype usage: int - :keyword quota: The resource amount quota. - :paramtype quota: int - """ - super().__init__(**kwargs) - self.usage = usage - self.quota = quota - - -class ScalarQuantizationCompression(VectorSearchCompression): - """Contains configuration options specific to the scalar quantization compression method used - during indexing and querying. - - All required parameters must be populated in order to send to server. - - :ivar compression_name: The name to associate with this particular configuration. Required. - :vartype compression_name: str - :ivar kind: The name of the kind of compression method being configured for use with vector - search. Required. Known values are: "scalarQuantization" and "binaryQuantization". - :vartype kind: str or ~azure.search.documents.indexes.models.VectorSearchCompressionKind - :ivar rerank_with_original_vectors: If set to true, once the ordered set of results calculated - using compressed vectors are obtained, they will be reranked again by recalculating the - full-precision similarity scores. This will improve recall at the expense of latency. - :vartype rerank_with_original_vectors: bool - :ivar default_oversampling: Default oversampling factor. Oversampling will internally request - more documents (specified by this multiplier) in the initial search. This increases the set of - results that will be reranked using recomputed similarity scores from full-precision vectors. - Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when - rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. - :vartype default_oversampling: float - :ivar rescoring_options: Contains the options for rescoring. - :vartype rescoring_options: ~azure.search.documents.indexes.models.RescoringOptions - :ivar truncation_dimension: The number of dimensions to truncate the vectors to. Truncating the - vectors reduces the size of the vectors and the amount of data that needs to be transferred - during search. This can save storage cost and improve search performance at the expense of - recall. It should be only used for embeddings trained with Matryoshka Representation Learning - (MRL) such as OpenAI text-embedding-3-large (small). The default value is null, which means no - truncation. - :vartype truncation_dimension: int - :ivar parameters: Contains the parameters specific to Scalar Quantization. - :vartype parameters: ~azure.search.documents.indexes.models.ScalarQuantizationParameters - """ - - _validation = { - "compression_name": {"required": True}, - "kind": {"required": True}, - } - - _attribute_map = { - "compression_name": {"key": "name", "type": "str"}, - "kind": {"key": "kind", "type": "str"}, - "rerank_with_original_vectors": {"key": "rerankWithOriginalVectors", "type": "bool"}, - "default_oversampling": {"key": "defaultOversampling", "type": "float"}, - "rescoring_options": {"key": "rescoringOptions", "type": "RescoringOptions"}, - "truncation_dimension": {"key": "truncationDimension", "type": "int"}, - "parameters": {"key": "scalarQuantizationParameters", "type": "ScalarQuantizationParameters"}, - } - - def __init__( - self, - *, - compression_name: str, - rerank_with_original_vectors: bool = True, - default_oversampling: Optional[float] = None, - rescoring_options: Optional["_models.RescoringOptions"] = None, - truncation_dimension: Optional[int] = None, - parameters: Optional["_models.ScalarQuantizationParameters"] = None, - **kwargs: Any - ) -> None: - """ - :keyword compression_name: The name to associate with this particular configuration. Required. - :paramtype compression_name: str - :keyword rerank_with_original_vectors: If set to true, once the ordered set of results - calculated using compressed vectors are obtained, they will be reranked again by recalculating - the full-precision similarity scores. This will improve recall at the expense of latency. - :paramtype rerank_with_original_vectors: bool - :keyword default_oversampling: Default oversampling factor. Oversampling will internally - request more documents (specified by this multiplier) in the initial search. This increases the - set of results that will be reranked using recomputed similarity scores from full-precision - vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when - rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. - :paramtype default_oversampling: float - :keyword rescoring_options: Contains the options for rescoring. - :paramtype rescoring_options: ~azure.search.documents.indexes.models.RescoringOptions - :keyword truncation_dimension: The number of dimensions to truncate the vectors to. Truncating - the vectors reduces the size of the vectors and the amount of data that needs to be transferred - during search. This can save storage cost and improve search performance at the expense of - recall. It should be only used for embeddings trained with Matryoshka Representation Learning - (MRL) such as OpenAI text-embedding-3-large (small). The default value is null, which means no - truncation. - :paramtype truncation_dimension: int - :keyword parameters: Contains the parameters specific to Scalar Quantization. - :paramtype parameters: ~azure.search.documents.indexes.models.ScalarQuantizationParameters - """ - super().__init__( - compression_name=compression_name, - rerank_with_original_vectors=rerank_with_original_vectors, - default_oversampling=default_oversampling, - rescoring_options=rescoring_options, - truncation_dimension=truncation_dimension, - **kwargs - ) - self.kind: str = "scalarQuantization" - self.parameters = parameters - - -class ScalarQuantizationParameters(_serialization.Model): - """Contains the parameters specific to Scalar Quantization. - - :ivar quantized_data_type: The quantized data type of compressed vector values. "int8" - :vartype quantized_data_type: str or - ~azure.search.documents.indexes.models.VectorSearchCompressionTarget - """ - - _attribute_map = { - "quantized_data_type": {"key": "quantizedDataType", "type": "str"}, - } - - def __init__( - self, - *, - quantized_data_type: Optional[Union[str, "_models.VectorSearchCompressionTarget"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword quantized_data_type: The quantized data type of compressed vector values. "int8" - :paramtype quantized_data_type: str or - ~azure.search.documents.indexes.models.VectorSearchCompressionTarget - """ - super().__init__(**kwargs) - self.quantized_data_type = quantized_data_type - - -class ScoringProfile(_serialization.Model): - """Defines parameters for a search index that influence scoring in search queries. - - All required parameters must be populated in order to send to server. - - :ivar name: The name of the scoring profile. Required. - :vartype name: str - :ivar text_weights: Parameters that boost scoring based on text matches in certain index - fields. - :vartype text_weights: ~azure.search.documents.indexes.models.TextWeights - :ivar functions: The collection of functions that influence the scoring of documents. - :vartype functions: list[~azure.search.documents.indexes.models.ScoringFunction] - :ivar function_aggregation: A value indicating how the results of individual scoring functions - should be combined. Defaults to "Sum". Ignored if there are no scoring functions. Known values - are: "sum", "average", "minimum", "maximum", and "firstMatching". - :vartype function_aggregation: str or - ~azure.search.documents.indexes.models.ScoringFunctionAggregation - """ - - _validation = { - "name": {"required": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "text_weights": {"key": "text", "type": "TextWeights"}, - "functions": {"key": "functions", "type": "[ScoringFunction]"}, - "function_aggregation": {"key": "functionAggregation", "type": "str"}, - } - - def __init__( - self, - *, - name: str, - text_weights: Optional["_models.TextWeights"] = None, - functions: Optional[List["_models.ScoringFunction"]] = None, - function_aggregation: Optional[Union[str, "_models.ScoringFunctionAggregation"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the scoring profile. Required. - :paramtype name: str - :keyword text_weights: Parameters that boost scoring based on text matches in certain index - fields. - :paramtype text_weights: ~azure.search.documents.indexes.models.TextWeights - :keyword functions: The collection of functions that influence the scoring of documents. - :paramtype functions: list[~azure.search.documents.indexes.models.ScoringFunction] - :keyword function_aggregation: A value indicating how the results of individual scoring - functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions. - Known values are: "sum", "average", "minimum", "maximum", and "firstMatching". - :paramtype function_aggregation: str or - ~azure.search.documents.indexes.models.ScoringFunctionAggregation - """ - super().__init__(**kwargs) - self.name = name - self.text_weights = text_weights - self.functions = functions - self.function_aggregation = function_aggregation - - -class SearchAlias(_serialization.Model): - """Represents an index alias, which describes a mapping from the alias name to an index. The alias - name can be used in place of the index name for supported operations. - - All required parameters must be populated in order to send to server. - - :ivar name: The name of the alias. Required. - :vartype name: str - :ivar indexes: The name of the index this alias maps to. Only one index name may be specified. - Required. - :vartype indexes: list[str] - :ivar e_tag: The ETag of the alias. - :vartype e_tag: str - """ - - _validation = { - "name": {"required": True}, - "indexes": {"required": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "indexes": {"key": "indexes", "type": "[str]"}, - "e_tag": {"key": "@odata\\.etag", "type": "str"}, - } - - def __init__(self, *, name: str, indexes: List[str], e_tag: Optional[str] = None, **kwargs: Any) -> None: - """ - :keyword name: The name of the alias. Required. - :paramtype name: str - :keyword indexes: The name of the index this alias maps to. Only one index name may be - specified. Required. - :paramtype indexes: list[str] - :keyword e_tag: The ETag of the alias. - :paramtype e_tag: str - """ - super().__init__(**kwargs) - self.name = name - self.indexes = indexes - self.e_tag = e_tag - - -class SearchField(_serialization.Model): - """Represents a field in an index definition, which describes the name, data type, and search - behavior of a field. - - All required parameters must be populated in order to send to server. - - :ivar name: The name of the field, which must be unique within the fields collection of the - index or parent field. Required. - :vartype name: str - :ivar type: The data type of the field. Required. Known values are: "Edm.String", "Edm.Int32", - "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset", "Edm.GeographyPoint", - "Edm.ComplexType", "Edm.Single", "Edm.Half", "Edm.Int16", "Edm.SByte", and "Edm.Byte". - :vartype type: str or ~azure.search.documents.indexes.models.SearchFieldDataType - :ivar key: A value indicating whether the field uniquely identifies documents in the index. - Exactly one top-level field in each index must be chosen as the key field and it must be of - type Edm.String. Key fields can be used to look up documents directly and update or delete - specific documents. Default is false for simple fields and null for complex fields. - :vartype key: bool - :ivar retrievable: A value indicating whether the field can be returned in a search result. You - can disable this option if you want to use a field (for example, margin) as a filter, sorting, - or scoring mechanism but do not want the field to be visible to the end user. This property - must be true for key fields, and it must be null for complex fields. This property can be - changed on existing fields. Enabling this property does not cause any increase in index storage - requirements. Default is true for simple fields, false for vector fields, and null for complex - fields. - :vartype retrievable: bool - :ivar stored: An immutable value indicating whether the field will be persisted separately on - disk to be returned in a search result. You can disable this option if you don't plan to return - the field contents in a search response to save on storage overhead. This can only be set - during index creation and only for vector fields. This property cannot be changed for existing - fields or set as false for new fields. If this property is set as false, the property - 'retrievable' must also be set to false. This property must be true or unset for key fields, - for new fields, and for non-vector fields, and it must be null for complex fields. Disabling - this property will reduce index storage requirements. The default is true for vector fields. - :vartype stored: bool - :ivar searchable: A value indicating whether the field is full-text searchable. This means it - will undergo analysis such as word-breaking during indexing. If you set a searchable field to a - value like "sunny day", internally it will be split into the individual tokens "sunny" and - "day". This enables full-text searches for these terms. Fields of type Edm.String or - Collection(Edm.String) are searchable by default. This property must be false for simple fields - of other non-string data types, and it must be null for complex fields. Note: searchable fields - consume extra space in your index to accommodate additional tokenized versions of the field - value for full-text searches. If you want to save space in your index and you don't need a - field to be included in searches, set searchable to false. - :vartype searchable: bool - :ivar filterable: A value indicating whether to enable the field to be referenced in $filter - queries. filterable differs from searchable in how strings are handled. Fields of type - Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so - comparisons are for exact matches only. For example, if you set such a field f to "sunny day", - $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property - must be null for complex fields. Default is true for simple fields and null for complex fields. - :vartype filterable: bool - :ivar sortable: A value indicating whether to enable the field to be referenced in $orderby - expressions. By default, the search engine sorts results by score, but in many experiences - users will want to sort by fields in the documents. A simple field can be sortable only if it - is single-valued (it has a single value in the scope of the parent document). Simple collection - fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex - collections are also multi-valued, and therefore cannot be sortable. This is true whether it's - an immediate parent field, or an ancestor field, that's the complex collection. Complex fields - cannot be sortable and the sortable property must be null for such fields. The default for - sortable is true for single-valued simple fields, false for multi-valued simple fields, and - null for complex fields. - :vartype sortable: bool - :ivar facetable: A value indicating whether to enable the field to be referenced in facet - queries. Typically used in a presentation of search results that includes hit count by category - (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so - on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or - Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple - fields. - :vartype facetable: bool - :ivar analyzer: The name of the analyzer to use for the field. This option can be used only - with searchable fields and it can't be set together with either searchAnalyzer or - indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null - for complex fields. Known values are: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", - "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", - "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", - "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", - "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", - "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", - "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", - "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", - "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", - "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", - "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", - "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", - "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", - "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", - "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", - "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", and - "whitespace". - :vartype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName - :ivar search_analyzer: The name of the analyzer used at search time for the field. This option - can be used only with searchable fields. It must be set together with indexAnalyzer and it - cannot be set together with the analyzer option. This property cannot be set to the name of a - language analyzer; use the analyzer property instead if you need a language analyzer. This - analyzer can be updated on an existing field. Must be null for complex fields. Known values - are: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", - "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", - "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene", - "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene", - "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene", - "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft", - "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft", - "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene", - "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft", - "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene", - "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", - "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", - "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", - "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", - "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", - "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", and - "whitespace". - :vartype search_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName - :ivar index_analyzer: The name of the analyzer used at indexing time for the field. This option - can be used only with searchable fields. It must be set together with searchAnalyzer and it - cannot be set together with the analyzer option. This property cannot be set to the name of a - language analyzer; use the analyzer property instead if you need a language analyzer. Once the - analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. Known - values are: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", - "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", - "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", - "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", - "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", - "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", - "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", - "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", - "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", - "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", - "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", - "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", - "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", - "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", - "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", - "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", and - "whitespace". - :vartype index_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName - :ivar normalizer: The name of the normalizer to use for the field. This option can be used only - with fields with filterable, sortable, or facetable enabled. Once the normalizer is chosen, it - cannot be changed for the field. Must be null for complex fields. Known values are: - "asciifolding", "elision", "lowercase", "standard", and "uppercase". - :vartype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName - :ivar vector_search_dimensions: The dimensionality of the vector field. - :vartype vector_search_dimensions: int - :ivar vector_search_profile_name: The name of the vector search profile that specifies the - algorithm and vectorizer to use when searching the vector field. - :vartype vector_search_profile_name: str - :ivar vector_encoding_format: The encoding format to interpret the field contents. "packedBit" - :vartype vector_encoding_format: str or - ~azure.search.documents.indexes.models.VectorEncodingFormat - :ivar synonym_maps: A list of the names of synonym maps to associate with this field. This - option can be used only with searchable fields. Currently only one synonym map per field is - supported. Assigning a synonym map to a field ensures that query terms targeting that field are - expanded at query-time using the rules in the synonym map. This attribute can be changed on - existing fields. Must be null or an empty collection for complex fields. - :vartype synonym_maps: list[str] - :ivar fields: A list of sub-fields if this is a field of type Edm.ComplexType or - Collection(Edm.ComplexType). Must be null or empty for simple fields. - :vartype fields: list[~azure.search.documents.indexes.models.SearchField] - """ - - _validation = { - "name": {"required": True}, - "type": {"required": True}, - "vector_search_dimensions": {"maximum": 2048, "minimum": 2}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "type": {"key": "type", "type": "str"}, - "key": {"key": "key", "type": "bool"}, - "retrievable": {"key": "retrievable", "type": "bool"}, - "stored": {"key": "stored", "type": "bool"}, - "searchable": {"key": "searchable", "type": "bool"}, - "filterable": {"key": "filterable", "type": "bool"}, - "sortable": {"key": "sortable", "type": "bool"}, - "facetable": {"key": "facetable", "type": "bool"}, - "analyzer": {"key": "analyzer", "type": "str"}, - "search_analyzer": {"key": "searchAnalyzer", "type": "str"}, - "index_analyzer": {"key": "indexAnalyzer", "type": "str"}, - "normalizer": {"key": "normalizer", "type": "str"}, - "vector_search_dimensions": {"key": "dimensions", "type": "int"}, - "vector_search_profile_name": {"key": "vectorSearchProfile", "type": "str"}, - "vector_encoding_format": {"key": "vectorEncoding", "type": "str"}, - "synonym_maps": {"key": "synonymMaps", "type": "[str]"}, - "fields": {"key": "fields", "type": "[SearchField]"}, - } - - def __init__( - self, - *, - name: str, - type: Union[str, "_models.SearchFieldDataType"], - key: Optional[bool] = None, - retrievable: Optional[bool] = None, - stored: Optional[bool] = None, - searchable: Optional[bool] = None, - filterable: Optional[bool] = None, - sortable: Optional[bool] = None, - facetable: Optional[bool] = None, - analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = None, - search_analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = None, - index_analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = None, - normalizer: Optional[Union[str, "_models.LexicalNormalizerName"]] = None, - vector_search_dimensions: Optional[int] = None, - vector_search_profile_name: Optional[str] = None, - vector_encoding_format: Optional[Union[str, "_models.VectorEncodingFormat"]] = None, - synonym_maps: Optional[List[str]] = None, - fields: Optional[List["_models.SearchField"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the field, which must be unique within the fields collection of the - index or parent field. Required. - :paramtype name: str - :keyword type: The data type of the field. Required. Known values are: "Edm.String", - "Edm.Int32", "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset", - "Edm.GeographyPoint", "Edm.ComplexType", "Edm.Single", "Edm.Half", "Edm.Int16", "Edm.SByte", - and "Edm.Byte". - :paramtype type: str or ~azure.search.documents.indexes.models.SearchFieldDataType - :keyword key: A value indicating whether the field uniquely identifies documents in the index. - Exactly one top-level field in each index must be chosen as the key field and it must be of - type Edm.String. Key fields can be used to look up documents directly and update or delete - specific documents. Default is false for simple fields and null for complex fields. - :paramtype key: bool - :keyword retrievable: A value indicating whether the field can be returned in a search result. - You can disable this option if you want to use a field (for example, margin) as a filter, - sorting, or scoring mechanism but do not want the field to be visible to the end user. This - property must be true for key fields, and it must be null for complex fields. This property can - be changed on existing fields. Enabling this property does not cause any increase in index - storage requirements. Default is true for simple fields, false for vector fields, and null for - complex fields. - :paramtype retrievable: bool - :keyword stored: An immutable value indicating whether the field will be persisted separately - on disk to be returned in a search result. You can disable this option if you don't plan to - return the field contents in a search response to save on storage overhead. This can only be - set during index creation and only for vector fields. This property cannot be changed for - existing fields or set as false for new fields. If this property is set as false, the property - 'retrievable' must also be set to false. This property must be true or unset for key fields, - for new fields, and for non-vector fields, and it must be null for complex fields. Disabling - this property will reduce index storage requirements. The default is true for vector fields. - :paramtype stored: bool - :keyword searchable: A value indicating whether the field is full-text searchable. This means - it will undergo analysis such as word-breaking during indexing. If you set a searchable field - to a value like "sunny day", internally it will be split into the individual tokens "sunny" and - "day". This enables full-text searches for these terms. Fields of type Edm.String or - Collection(Edm.String) are searchable by default. This property must be false for simple fields - of other non-string data types, and it must be null for complex fields. Note: searchable fields - consume extra space in your index to accommodate additional tokenized versions of the field - value for full-text searches. If you want to save space in your index and you don't need a - field to be included in searches, set searchable to false. - :paramtype searchable: bool - :keyword filterable: A value indicating whether to enable the field to be referenced in $filter - queries. filterable differs from searchable in how strings are handled. Fields of type - Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so - comparisons are for exact matches only. For example, if you set such a field f to "sunny day", - $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property - must be null for complex fields. Default is true for simple fields and null for complex fields. - :paramtype filterable: bool - :keyword sortable: A value indicating whether to enable the field to be referenced in $orderby - expressions. By default, the search engine sorts results by score, but in many experiences - users will want to sort by fields in the documents. A simple field can be sortable only if it - is single-valued (it has a single value in the scope of the parent document). Simple collection - fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex - collections are also multi-valued, and therefore cannot be sortable. This is true whether it's - an immediate parent field, or an ancestor field, that's the complex collection. Complex fields - cannot be sortable and the sortable property must be null for such fields. The default for - sortable is true for single-valued simple fields, false for multi-valued simple fields, and - null for complex fields. - :paramtype sortable: bool - :keyword facetable: A value indicating whether to enable the field to be referenced in facet - queries. Typically used in a presentation of search results that includes hit count by category - (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so - on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or - Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple - fields. - :paramtype facetable: bool - :keyword analyzer: The name of the analyzer to use for the field. This option can be used only - with searchable fields and it can't be set together with either searchAnalyzer or - indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null - for complex fields. Known values are: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", - "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", - "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", - "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", - "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", - "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", - "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", - "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", - "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", - "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", - "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", - "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", - "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", - "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", - "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", - "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", and - "whitespace". - :paramtype analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName - :keyword search_analyzer: The name of the analyzer used at search time for the field. This - option can be used only with searchable fields. It must be set together with indexAnalyzer and - it cannot be set together with the analyzer option. This property cannot be set to the name of - a language analyzer; use the analyzer property instead if you need a language analyzer. This - analyzer can be updated on an existing field. Must be null for complex fields. Known values - are: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", - "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", - "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene", - "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene", - "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene", - "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft", - "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft", - "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene", - "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft", - "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene", - "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", - "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", - "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", - "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", - "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", - "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", and - "whitespace". - :paramtype search_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName - :keyword index_analyzer: The name of the analyzer used at indexing time for the field. This - option can be used only with searchable fields. It must be set together with searchAnalyzer and - it cannot be set together with the analyzer option. This property cannot be set to the name of - a language analyzer; use the analyzer property instead if you need a language analyzer. Once - the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. - Known values are: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", - "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", - "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", - "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", - "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", - "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", - "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", - "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", - "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", - "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", - "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", - "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", - "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", - "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", - "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", - "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", and - "whitespace". - :paramtype index_analyzer: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName - :keyword normalizer: The name of the normalizer to use for the field. This option can be used - only with fields with filterable, sortable, or facetable enabled. Once the normalizer is - chosen, it cannot be changed for the field. Must be null for complex fields. Known values are: - "asciifolding", "elision", "lowercase", "standard", and "uppercase". - :paramtype normalizer: str or ~azure.search.documents.indexes.models.LexicalNormalizerName - :keyword vector_search_dimensions: The dimensionality of the vector field. - :paramtype vector_search_dimensions: int - :keyword vector_search_profile_name: The name of the vector search profile that specifies the - algorithm and vectorizer to use when searching the vector field. - :paramtype vector_search_profile_name: str - :keyword vector_encoding_format: The encoding format to interpret the field contents. - "packedBit" - :paramtype vector_encoding_format: str or - ~azure.search.documents.indexes.models.VectorEncodingFormat - :keyword synonym_maps: A list of the names of synonym maps to associate with this field. This - option can be used only with searchable fields. Currently only one synonym map per field is - supported. Assigning a synonym map to a field ensures that query terms targeting that field are - expanded at query-time using the rules in the synonym map. This attribute can be changed on - existing fields. Must be null or an empty collection for complex fields. - :paramtype synonym_maps: list[str] - :keyword fields: A list of sub-fields if this is a field of type Edm.ComplexType or - Collection(Edm.ComplexType). Must be null or empty for simple fields. - :paramtype fields: list[~azure.search.documents.indexes.models.SearchField] - """ - super().__init__(**kwargs) - self.name = name - self.type = type - self.key = key - self.retrievable = retrievable - self.stored = stored - self.searchable = searchable - self.filterable = filterable - self.sortable = sortable - self.facetable = facetable - self.analyzer = analyzer - self.search_analyzer = search_analyzer - self.index_analyzer = index_analyzer - self.normalizer = normalizer - self.vector_search_dimensions = vector_search_dimensions - self.vector_search_profile_name = vector_search_profile_name - self.vector_encoding_format = vector_encoding_format - self.synonym_maps = synonym_maps - self.fields = fields - - -class SearchIndex(_serialization.Model): - """Represents a search index definition, which describes the fields and search behavior of an - index. - - All required parameters must be populated in order to send to server. - - :ivar name: The name of the index. Required. - :vartype name: str - :ivar fields: The fields of the index. Required. - :vartype fields: list[~azure.search.documents.indexes.models.SearchField] - :ivar scoring_profiles: The scoring profiles for the index. - :vartype scoring_profiles: list[~azure.search.documents.indexes.models.ScoringProfile] - :ivar default_scoring_profile: The name of the scoring profile to use if none is specified in - the query. If this property is not set and no scoring profile is specified in the query, then - default scoring (tf-idf) will be used. - :vartype default_scoring_profile: str - :ivar cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. - :vartype cors_options: ~azure.search.documents.indexes.models.CorsOptions - :ivar suggesters: The suggesters for the index. - :vartype suggesters: list[~azure.search.documents.indexes.models.SearchSuggester] - :ivar analyzers: The analyzers for the index. - :vartype analyzers: list[~azure.search.documents.indexes.models.LexicalAnalyzer] - :ivar tokenizers: The tokenizers for the index. - :vartype tokenizers: list[~azure.search.documents.indexes.models.LexicalTokenizer] - :ivar token_filters: The token filters for the index. - :vartype token_filters: list[~azure.search.documents.indexes.models.TokenFilter] - :ivar char_filters: The character filters for the index. - :vartype char_filters: list[~azure.search.documents.indexes.models.CharFilter] - :ivar normalizers: The normalizers for the index. - :vartype normalizers: list[~azure.search.documents.indexes.models.LexicalNormalizer] - :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your data when you - want full assurance that no one, not even Microsoft, can decrypt your data. Once you have - encrypted your data, it will always remain encrypted. The search service will ignore attempts - to set this property to null. You can change this property as needed if you want to rotate your - encryption key; Your data will be unaffected. Encryption with customer-managed keys is not - available for free search services, and is only available for paid services created on or after - January 1, 2019. - :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - :ivar similarity: The type of similarity algorithm to be used when scoring and ranking the - documents matching a search query. The similarity algorithm can only be defined at index - creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity - algorithm is used. - :vartype similarity: ~azure.search.documents.indexes.models.SimilarityAlgorithm - :ivar semantic_search: Defines parameters for a search index that influence semantic - capabilities. - :vartype semantic_search: ~azure.search.documents.indexes.models.SemanticSearch - :ivar vector_search: Contains configuration options related to vector search. - :vartype vector_search: ~azure.search.documents.indexes.models.VectorSearch - :ivar e_tag: The ETag of the index. - :vartype e_tag: str - """ - - _validation = { - "name": {"required": True}, - "fields": {"required": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "fields": {"key": "fields", "type": "[SearchField]"}, - "scoring_profiles": {"key": "scoringProfiles", "type": "[ScoringProfile]"}, - "default_scoring_profile": {"key": "defaultScoringProfile", "type": "str"}, - "cors_options": {"key": "corsOptions", "type": "CorsOptions"}, - "suggesters": {"key": "suggesters", "type": "[SearchSuggester]"}, - "analyzers": {"key": "analyzers", "type": "[LexicalAnalyzer]"}, - "tokenizers": {"key": "tokenizers", "type": "[LexicalTokenizer]"}, - "token_filters": {"key": "tokenFilters", "type": "[TokenFilter]"}, - "char_filters": {"key": "charFilters", "type": "[CharFilter]"}, - "normalizers": {"key": "normalizers", "type": "[LexicalNormalizer]"}, - "encryption_key": {"key": "encryptionKey", "type": "SearchResourceEncryptionKey"}, - "similarity": {"key": "similarity", "type": "SimilarityAlgorithm"}, - "semantic_search": {"key": "semantic", "type": "SemanticSearch"}, - "vector_search": {"key": "vectorSearch", "type": "VectorSearch"}, - "e_tag": {"key": "@odata\\.etag", "type": "str"}, - } - - def __init__( - self, - *, - name: str, - fields: List["_models.SearchField"], - scoring_profiles: Optional[List["_models.ScoringProfile"]] = None, - default_scoring_profile: Optional[str] = None, - cors_options: Optional["_models.CorsOptions"] = None, - suggesters: Optional[List["_models.SearchSuggester"]] = None, - analyzers: Optional[List["_models.LexicalAnalyzer"]] = None, - tokenizers: Optional[List["_models.LexicalTokenizer"]] = None, - token_filters: Optional[List["_models.TokenFilter"]] = None, - char_filters: Optional[List["_models.CharFilter"]] = None, - normalizers: Optional[List["_models.LexicalNormalizer"]] = None, - encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, - similarity: Optional["_models.SimilarityAlgorithm"] = None, - semantic_search: Optional["_models.SemanticSearch"] = None, - vector_search: Optional["_models.VectorSearch"] = None, - e_tag: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the index. Required. - :paramtype name: str - :keyword fields: The fields of the index. Required. - :paramtype fields: list[~azure.search.documents.indexes.models.SearchField] - :keyword scoring_profiles: The scoring profiles for the index. - :paramtype scoring_profiles: list[~azure.search.documents.indexes.models.ScoringProfile] - :keyword default_scoring_profile: The name of the scoring profile to use if none is specified - in the query. If this property is not set and no scoring profile is specified in the query, - then default scoring (tf-idf) will be used. - :paramtype default_scoring_profile: str - :keyword cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. - :paramtype cors_options: ~azure.search.documents.indexes.models.CorsOptions - :keyword suggesters: The suggesters for the index. - :paramtype suggesters: list[~azure.search.documents.indexes.models.SearchSuggester] - :keyword analyzers: The analyzers for the index. - :paramtype analyzers: list[~azure.search.documents.indexes.models.LexicalAnalyzer] - :keyword tokenizers: The tokenizers for the index. - :paramtype tokenizers: list[~azure.search.documents.indexes.models.LexicalTokenizer] - :keyword token_filters: The token filters for the index. - :paramtype token_filters: list[~azure.search.documents.indexes.models.TokenFilter] - :keyword char_filters: The character filters for the index. - :paramtype char_filters: list[~azure.search.documents.indexes.models.CharFilter] - :keyword normalizers: The normalizers for the index. - :paramtype normalizers: list[~azure.search.documents.indexes.models.LexicalNormalizer] - :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your data when you - want full assurance that no one, not even Microsoft, can decrypt your data. Once you have - encrypted your data, it will always remain encrypted. The search service will ignore attempts - to set this property to null. You can change this property as needed if you want to rotate your - encryption key; Your data will be unaffected. Encryption with customer-managed keys is not - available for free search services, and is only available for paid services created on or after - January 1, 2019. - :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - :keyword similarity: The type of similarity algorithm to be used when scoring and ranking the - documents matching a search query. The similarity algorithm can only be defined at index - creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity - algorithm is used. - :paramtype similarity: ~azure.search.documents.indexes.models.SimilarityAlgorithm - :keyword semantic_search: Defines parameters for a search index that influence semantic - capabilities. - :paramtype semantic_search: ~azure.search.documents.indexes.models.SemanticSearch - :keyword vector_search: Contains configuration options related to vector search. - :paramtype vector_search: ~azure.search.documents.indexes.models.VectorSearch - :keyword e_tag: The ETag of the index. - :paramtype e_tag: str - """ - super().__init__(**kwargs) - self.name = name - self.fields = fields - self.scoring_profiles = scoring_profiles - self.default_scoring_profile = default_scoring_profile - self.cors_options = cors_options - self.suggesters = suggesters - self.analyzers = analyzers - self.tokenizers = tokenizers - self.token_filters = token_filters - self.char_filters = char_filters - self.normalizers = normalizers - self.encryption_key = encryption_key - self.similarity = similarity - self.semantic_search = semantic_search - self.vector_search = vector_search - self.e_tag = e_tag - - -class SearchIndexer(_serialization.Model): - """Represents an indexer. - - All required parameters must be populated in order to send to server. - - :ivar name: The name of the indexer. Required. - :vartype name: str - :ivar description: The description of the indexer. - :vartype description: str - :ivar data_source_name: The name of the datasource from which this indexer reads data. - Required. - :vartype data_source_name: str - :ivar skillset_name: The name of the skillset executing with this indexer. - :vartype skillset_name: str - :ivar target_index_name: The name of the index to which this indexer writes data. Required. - :vartype target_index_name: str - :ivar schedule: The schedule for this indexer. - :vartype schedule: ~azure.search.documents.indexes.models.IndexingSchedule - :ivar parameters: Parameters for indexer execution. - :vartype parameters: ~azure.search.documents.indexes.models.IndexingParameters - :ivar field_mappings: Defines mappings between fields in the data source and corresponding - target fields in the index. - :vartype field_mappings: list[~azure.search.documents.indexes.models.FieldMapping] - :ivar output_field_mappings: Output field mappings are applied after enrichment and immediately - before indexing. - :vartype output_field_mappings: list[~azure.search.documents.indexes.models.FieldMapping] - :ivar is_disabled: A value indicating whether the indexer is disabled. Default is false. - :vartype is_disabled: bool - :ivar e_tag: The ETag of the indexer. - :vartype e_tag: str - :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your indexer - definition (as well as indexer execution status) when you want full assurance that no one, not - even Microsoft, can decrypt them. Once you have encrypted your indexer definition, it will - always remain encrypted. The search service will ignore attempts to set this property to null. - You can change this property as needed if you want to rotate your encryption key; Your indexer - definition (and indexer execution status) will be unaffected. Encryption with customer-managed - keys is not available for free search services, and is only available for paid services created - on or after January 1, 2019. - :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - :ivar cache: Adds caching to an enrichment pipeline to allow for incremental modification steps - without having to rebuild the index every time. - :vartype cache: ~azure.search.documents.indexes.models.SearchIndexerCache - """ - - _validation = { - "name": {"required": True}, - "data_source_name": {"required": True}, - "target_index_name": {"required": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "data_source_name": {"key": "dataSourceName", "type": "str"}, - "skillset_name": {"key": "skillsetName", "type": "str"}, - "target_index_name": {"key": "targetIndexName", "type": "str"}, - "schedule": {"key": "schedule", "type": "IndexingSchedule"}, - "parameters": {"key": "parameters", "type": "IndexingParameters"}, - "field_mappings": {"key": "fieldMappings", "type": "[FieldMapping]"}, - "output_field_mappings": {"key": "outputFieldMappings", "type": "[FieldMapping]"}, - "is_disabled": {"key": "disabled", "type": "bool"}, - "e_tag": {"key": "@odata\\.etag", "type": "str"}, - "encryption_key": {"key": "encryptionKey", "type": "SearchResourceEncryptionKey"}, - "cache": {"key": "cache", "type": "SearchIndexerCache"}, - } - - def __init__( - self, - *, - name: str, - data_source_name: str, - target_index_name: str, - description: Optional[str] = None, - skillset_name: Optional[str] = None, - schedule: Optional["_models.IndexingSchedule"] = None, - parameters: Optional["_models.IndexingParameters"] = None, - field_mappings: Optional[List["_models.FieldMapping"]] = None, - output_field_mappings: Optional[List["_models.FieldMapping"]] = None, - is_disabled: bool = False, - e_tag: Optional[str] = None, - encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, - cache: Optional["_models.SearchIndexerCache"] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the indexer. Required. - :paramtype name: str - :keyword description: The description of the indexer. - :paramtype description: str - :keyword data_source_name: The name of the datasource from which this indexer reads data. - Required. - :paramtype data_source_name: str - :keyword skillset_name: The name of the skillset executing with this indexer. - :paramtype skillset_name: str - :keyword target_index_name: The name of the index to which this indexer writes data. Required. - :paramtype target_index_name: str - :keyword schedule: The schedule for this indexer. - :paramtype schedule: ~azure.search.documents.indexes.models.IndexingSchedule - :keyword parameters: Parameters for indexer execution. - :paramtype parameters: ~azure.search.documents.indexes.models.IndexingParameters - :keyword field_mappings: Defines mappings between fields in the data source and corresponding - target fields in the index. - :paramtype field_mappings: list[~azure.search.documents.indexes.models.FieldMapping] - :keyword output_field_mappings: Output field mappings are applied after enrichment and - immediately before indexing. - :paramtype output_field_mappings: list[~azure.search.documents.indexes.models.FieldMapping] - :keyword is_disabled: A value indicating whether the indexer is disabled. Default is false. - :paramtype is_disabled: bool - :keyword e_tag: The ETag of the indexer. - :paramtype e_tag: str - :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your indexer - definition (as well as indexer execution status) when you want full assurance that no one, not - even Microsoft, can decrypt them. Once you have encrypted your indexer definition, it will - always remain encrypted. The search service will ignore attempts to set this property to null. - You can change this property as needed if you want to rotate your encryption key; Your indexer - definition (and indexer execution status) will be unaffected. Encryption with customer-managed - keys is not available for free search services, and is only available for paid services created - on or after January 1, 2019. - :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - :keyword cache: Adds caching to an enrichment pipeline to allow for incremental modification - steps without having to rebuild the index every time. - :paramtype cache: ~azure.search.documents.indexes.models.SearchIndexerCache - """ - super().__init__(**kwargs) - self.name = name - self.description = description - self.data_source_name = data_source_name - self.skillset_name = skillset_name - self.target_index_name = target_index_name - self.schedule = schedule - self.parameters = parameters - self.field_mappings = field_mappings - self.output_field_mappings = output_field_mappings - self.is_disabled = is_disabled - self.e_tag = e_tag - self.encryption_key = encryption_key - self.cache = cache - - -class SearchIndexerCache(_serialization.Model): - """SearchIndexerCache. - - :ivar storage_connection_string: The connection string to the storage account where the cache - data will be persisted. - :vartype storage_connection_string: str - :ivar enable_reprocessing: Specifies whether incremental reprocessing is enabled. - :vartype enable_reprocessing: bool - :ivar identity: The user-assigned managed identity used for connections to the enrichment - cache. If the connection string indicates an identity (ResourceId) and it's not specified, the - system-assigned managed identity is used. On updates to the indexer, if the identity is - unspecified, the value remains unchanged. If set to "none", the value of this property is - cleared. - :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - """ - - _attribute_map = { - "storage_connection_string": {"key": "storageConnectionString", "type": "str"}, - "enable_reprocessing": {"key": "enableReprocessing", "type": "bool"}, - "identity": {"key": "identity", "type": "SearchIndexerDataIdentity"}, - } - - def __init__( - self, - *, - storage_connection_string: Optional[str] = None, - enable_reprocessing: Optional[bool] = None, - identity: Optional["_models.SearchIndexerDataIdentity"] = None, - **kwargs: Any - ) -> None: - """ - :keyword storage_connection_string: The connection string to the storage account where the - cache data will be persisted. - :paramtype storage_connection_string: str - :keyword enable_reprocessing: Specifies whether incremental reprocessing is enabled. - :paramtype enable_reprocessing: bool - :keyword identity: The user-assigned managed identity used for connections to the enrichment - cache. If the connection string indicates an identity (ResourceId) and it's not specified, the - system-assigned managed identity is used. On updates to the indexer, if the identity is - unspecified, the value remains unchanged. If set to "none", the value of this property is - cleared. - :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - """ - super().__init__(**kwargs) - self.storage_connection_string = storage_connection_string - self.enable_reprocessing = enable_reprocessing - self.identity = identity - - -class SearchIndexerDataContainer(_serialization.Model): - """Represents information about the entity (such as Azure SQL table or CosmosDB collection) that - will be indexed. - - All required parameters must be populated in order to send to server. - - :ivar name: The name of the table or view (for Azure SQL data source) or collection (for - CosmosDB data source) that will be indexed. Required. - :vartype name: str - :ivar query: A query that is applied to this data container. The syntax and meaning of this - parameter is datasource-specific. Not supported by Azure SQL datasources. - :vartype query: str - """ - - _validation = { - "name": {"required": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "query": {"key": "query", "type": "str"}, - } - - def __init__(self, *, name: str, query: Optional[str] = None, **kwargs: Any) -> None: - """ - :keyword name: The name of the table or view (for Azure SQL data source) or collection (for - CosmosDB data source) that will be indexed. Required. - :paramtype name: str - :keyword query: A query that is applied to this data container. The syntax and meaning of this - parameter is datasource-specific. Not supported by Azure SQL datasources. - :paramtype query: str - """ - super().__init__(**kwargs) - self.name = name - self.query = query - - -class SearchIndexerDataIdentity(_serialization.Model): - """Abstract base type for data identities. - - You probably want to use the sub-classes and not this class directly. Known sub-classes are: - SearchIndexerDataNoneIdentity, SearchIndexerDataUserAssignedIdentity - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of identity. Required. - :vartype odata_type: str - """ - - _validation = { - "odata_type": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - } - - _subtype_map = { - "odata_type": { - "#Microsoft.Azure.Search.DataNoneIdentity": "SearchIndexerDataNoneIdentity", - "#Microsoft.Azure.Search.DataUserAssignedIdentity": "SearchIndexerDataUserAssignedIdentity", - } - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.odata_type: Optional[str] = None - - -class SearchIndexerDataNoneIdentity(SearchIndexerDataIdentity): - """Clears the identity property of a datasource. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of identity. Required. - :vartype odata_type: str - """ - - _validation = { - "odata_type": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.DataNoneIdentity" - - -class SearchIndexerDataSource(_serialization.Model): - """Represents a datasource definition, which can be used to configure an indexer. - - All required parameters must be populated in order to send to server. - - :ivar name: The name of the datasource. Required. - :vartype name: str - :ivar description: The description of the datasource. - :vartype description: str - :ivar type: The type of the datasource. Required. Known values are: "azuresql", "cosmosdb", - "azureblob", "azuretable", "mysql", "adlsgen2", and "onelake". - :vartype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType - :ivar credentials: Credentials for the datasource. Required. - :vartype credentials: ~azure.search.documents.indexes.models.DataSourceCredentials - :ivar container: The data container for the datasource. Required. - :vartype container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer - :ivar identity: An explicit managed identity to use for this datasource. If not specified and - the connection string is a managed identity, the system-assigned managed identity is used. If - not specified, the value remains unchanged. If "none" is specified, the value of this property - is cleared. - :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - :ivar data_change_detection_policy: The data change detection policy for the datasource. - :vartype data_change_detection_policy: - ~azure.search.documents.indexes.models.DataChangeDetectionPolicy - :ivar data_deletion_detection_policy: The data deletion detection policy for the datasource. - :vartype data_deletion_detection_policy: - ~azure.search.documents.indexes.models.DataDeletionDetectionPolicy - :ivar e_tag: The ETag of the data source. - :vartype e_tag: str - :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your datasource - definition when you want full assurance that no one, not even Microsoft, can decrypt your data - source definition. Once you have encrypted your data source definition, it will always remain - encrypted. The search service will ignore attempts to set this property to null. You can change - this property as needed if you want to rotate your encryption key; Your datasource definition - will be unaffected. Encryption with customer-managed keys is not available for free search - services, and is only available for paid services created on or after January 1, 2019. - :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - """ - - _validation = { - "name": {"required": True}, - "type": {"required": True}, - "credentials": {"required": True}, - "container": {"required": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "type": {"key": "type", "type": "str"}, - "credentials": {"key": "credentials", "type": "DataSourceCredentials"}, - "container": {"key": "container", "type": "SearchIndexerDataContainer"}, - "identity": {"key": "identity", "type": "SearchIndexerDataIdentity"}, - "data_change_detection_policy": {"key": "dataChangeDetectionPolicy", "type": "DataChangeDetectionPolicy"}, - "data_deletion_detection_policy": {"key": "dataDeletionDetectionPolicy", "type": "DataDeletionDetectionPolicy"}, - "e_tag": {"key": "@odata\\.etag", "type": "str"}, - "encryption_key": {"key": "encryptionKey", "type": "SearchResourceEncryptionKey"}, - } - - def __init__( - self, - *, - name: str, - type: Union[str, "_models.SearchIndexerDataSourceType"], - credentials: "_models.DataSourceCredentials", - container: "_models.SearchIndexerDataContainer", - description: Optional[str] = None, - identity: Optional["_models.SearchIndexerDataIdentity"] = None, - data_change_detection_policy: Optional["_models.DataChangeDetectionPolicy"] = None, - data_deletion_detection_policy: Optional["_models.DataDeletionDetectionPolicy"] = None, - e_tag: Optional[str] = None, - encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the datasource. Required. - :paramtype name: str - :keyword description: The description of the datasource. - :paramtype description: str - :keyword type: The type of the datasource. Required. Known values are: "azuresql", "cosmosdb", - "azureblob", "azuretable", "mysql", "adlsgen2", and "onelake". - :paramtype type: str or ~azure.search.documents.indexes.models.SearchIndexerDataSourceType - :keyword credentials: Credentials for the datasource. Required. - :paramtype credentials: ~azure.search.documents.indexes.models.DataSourceCredentials - :keyword container: The data container for the datasource. Required. - :paramtype container: ~azure.search.documents.indexes.models.SearchIndexerDataContainer - :keyword identity: An explicit managed identity to use for this datasource. If not specified - and the connection string is a managed identity, the system-assigned managed identity is used. - If not specified, the value remains unchanged. If "none" is specified, the value of this - property is cleared. - :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - :keyword data_change_detection_policy: The data change detection policy for the datasource. - :paramtype data_change_detection_policy: - ~azure.search.documents.indexes.models.DataChangeDetectionPolicy - :keyword data_deletion_detection_policy: The data deletion detection policy for the datasource. - :paramtype data_deletion_detection_policy: - ~azure.search.documents.indexes.models.DataDeletionDetectionPolicy - :keyword e_tag: The ETag of the data source. - :paramtype e_tag: str - :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your datasource - definition when you want full assurance that no one, not even Microsoft, can decrypt your data - source definition. Once you have encrypted your data source definition, it will always remain - encrypted. The search service will ignore attempts to set this property to null. You can change - this property as needed if you want to rotate your encryption key; Your datasource definition - will be unaffected. Encryption with customer-managed keys is not available for free search - services, and is only available for paid services created on or after January 1, 2019. - :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - """ - super().__init__(**kwargs) - self.name = name - self.description = description - self.type = type - self.credentials = credentials - self.container = container - self.identity = identity - self.data_change_detection_policy = data_change_detection_policy - self.data_deletion_detection_policy = data_deletion_detection_policy - self.e_tag = e_tag - self.encryption_key = encryption_key - - -class SearchIndexerDataUserAssignedIdentity(SearchIndexerDataIdentity): - """Specifies the identity for a datasource to use. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of identity. Required. - :vartype odata_type: str - :ivar resource_id: The fully qualified Azure resource Id of a user assigned managed identity - typically in the form - "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" # pylint: disable=line-too-long - that should have been assigned to the search service. Required. - :vartype resource_id: str - """ - - _validation = { - "odata_type": {"required": True}, - "resource_id": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "resource_id": {"key": "userAssignedIdentity", "type": "str"}, - } - - def __init__(self, *, resource_id: str, **kwargs: Any) -> None: - """ - :keyword resource_id: The fully qualified Azure resource Id of a user assigned managed identity - typically in the form - "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" # pylint: disable=line-too-long - that should have been assigned to the search service. Required. - :paramtype resource_id: str - """ - super().__init__(**kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.DataUserAssignedIdentity" - self.resource_id = resource_id - - -class SearchIndexerError(_serialization.Model): - """Represents an item- or document-level indexing error. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar key: The key of the item for which indexing failed. - :vartype key: str - :ivar error_message: The message describing the error that occurred while processing the item. - Required. - :vartype error_message: str - :ivar status_code: The status code indicating why the indexing operation failed. Possible - values include: 400 for a malformed input document, 404 for document not found, 409 for a - version conflict, 422 when the index is temporarily unavailable, or 503 for when the service is - too busy. Required. - :vartype status_code: int - :ivar name: The name of the source at which the error originated. For example, this could refer - to a particular skill in the attached skillset. This may not be always available. - :vartype name: str - :ivar details: Additional, verbose details about the error to assist in debugging the indexer. - This may not be always available. - :vartype details: str - :ivar documentation_link: A link to a troubleshooting guide for these classes of errors. This - may not be always available. - :vartype documentation_link: str - """ - - _validation = { - "key": {"readonly": True}, - "error_message": {"required": True, "readonly": True}, - "status_code": {"required": True, "readonly": True}, - "name": {"readonly": True}, - "details": {"readonly": True}, - "documentation_link": {"readonly": True}, - } - - _attribute_map = { - "key": {"key": "key", "type": "str"}, - "error_message": {"key": "errorMessage", "type": "str"}, - "status_code": {"key": "statusCode", "type": "int"}, - "name": {"key": "name", "type": "str"}, - "details": {"key": "details", "type": "str"}, - "documentation_link": {"key": "documentationLink", "type": "str"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.key = None - self.error_message = None - self.status_code = None - self.name = None - self.details = None - self.documentation_link = None - - -class SearchIndexerIndexProjection(_serialization.Model): - """Definition of additional projections to secondary search indexes. - - All required parameters must be populated in order to send to server. - - :ivar selectors: A list of projections to be performed to secondary search indexes. Required. - :vartype selectors: - list[~azure.search.documents.indexes.models.SearchIndexerIndexProjectionSelector] - :ivar parameters: A dictionary of index projection-specific configuration properties. Each name - is the name of a specific property. Each value must be of a primitive type. - :vartype parameters: - ~azure.search.documents.indexes.models.SearchIndexerIndexProjectionsParameters - """ - - _validation = { - "selectors": {"required": True}, - } - - _attribute_map = { - "selectors": {"key": "selectors", "type": "[SearchIndexerIndexProjectionSelector]"}, - "parameters": {"key": "parameters", "type": "SearchIndexerIndexProjectionsParameters"}, - } - - def __init__( - self, - *, - selectors: List["_models.SearchIndexerIndexProjectionSelector"], - parameters: Optional["_models.SearchIndexerIndexProjectionsParameters"] = None, - **kwargs: Any - ) -> None: - """ - :keyword selectors: A list of projections to be performed to secondary search indexes. - Required. - :paramtype selectors: - list[~azure.search.documents.indexes.models.SearchIndexerIndexProjectionSelector] - :keyword parameters: A dictionary of index projection-specific configuration properties. Each - name is the name of a specific property. Each value must be of a primitive type. - :paramtype parameters: - ~azure.search.documents.indexes.models.SearchIndexerIndexProjectionsParameters - """ - super().__init__(**kwargs) - self.selectors = selectors - self.parameters = parameters - - -class SearchIndexerIndexProjectionSelector(_serialization.Model): - """Description for what data to store in the designated search index. - - All required parameters must be populated in order to send to server. - - :ivar target_index_name: Name of the search index to project to. Must have a key field with the - 'keyword' analyzer set. Required. - :vartype target_index_name: str - :ivar parent_key_field_name: Name of the field in the search index to map the parent document's - key value to. Must be a string field that is filterable and not the key field. Required. - :vartype parent_key_field_name: str - :ivar source_context: Source context for the projections. Represents the cardinality at which - the document will be split into multiple sub documents. Required. - :vartype source_context: str - :ivar mappings: Mappings for the projection, or which source should be mapped to which field in - the target index. Required. - :vartype mappings: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - """ - - _validation = { - "target_index_name": {"required": True}, - "parent_key_field_name": {"required": True}, - "source_context": {"required": True}, - "mappings": {"required": True}, - } - - _attribute_map = { - "target_index_name": {"key": "targetIndexName", "type": "str"}, - "parent_key_field_name": {"key": "parentKeyFieldName", "type": "str"}, - "source_context": {"key": "sourceContext", "type": "str"}, - "mappings": {"key": "mappings", "type": "[InputFieldMappingEntry]"}, - } - - def __init__( - self, - *, - target_index_name: str, - parent_key_field_name: str, - source_context: str, - mappings: List["_models.InputFieldMappingEntry"], - **kwargs: Any - ) -> None: - """ - :keyword target_index_name: Name of the search index to project to. Must have a key field with - the 'keyword' analyzer set. Required. - :paramtype target_index_name: str - :keyword parent_key_field_name: Name of the field in the search index to map the parent - document's key value to. Must be a string field that is filterable and not the key field. - Required. - :paramtype parent_key_field_name: str - :keyword source_context: Source context for the projections. Represents the cardinality at - which the document will be split into multiple sub documents. Required. - :paramtype source_context: str - :keyword mappings: Mappings for the projection, or which source should be mapped to which field - in the target index. Required. - :paramtype mappings: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - """ - super().__init__(**kwargs) - self.target_index_name = target_index_name - self.parent_key_field_name = parent_key_field_name - self.source_context = source_context - self.mappings = mappings - - -class SearchIndexerIndexProjectionsParameters(_serialization.Model): - """A dictionary of index projection-specific configuration properties. Each name is the name of a - specific property. Each value must be of a primitive type. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar projection_mode: Defines behavior of the index projections in relation to the rest of the - indexer. Known values are: "skipIndexingParentDocuments" and "includeIndexingParentDocuments". - :vartype projection_mode: str or ~azure.search.documents.indexes.models.IndexProjectionMode - """ - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "projection_mode": {"key": "projectionMode", "type": "str"}, - } - - def __init__( - self, - *, - additional_properties: Optional[Dict[str, Any]] = None, - projection_mode: Optional[Union[str, "_models.IndexProjectionMode"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword projection_mode: Defines behavior of the index projections in relation to the rest of - the indexer. Known values are: "skipIndexingParentDocuments" and - "includeIndexingParentDocuments". - :paramtype projection_mode: str or ~azure.search.documents.indexes.models.IndexProjectionMode - """ - super().__init__(**kwargs) - self.additional_properties = additional_properties - self.projection_mode = projection_mode - - -class SearchIndexerKnowledgeStore(_serialization.Model): - """Definition of additional projections to azure blob, table, or files, of enriched data. - - All required parameters must be populated in order to send to server. - - :ivar storage_connection_string: The connection string to the storage account projections will - be stored in. Required. - :vartype storage_connection_string: str - :ivar projections: A list of additional projections to perform during indexing. Required. - :vartype projections: - list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreProjection] - :ivar identity: The user-assigned managed identity used for connections to Azure Storage when - writing knowledge store projections. If the connection string indicates an identity - (ResourceId) and it's not specified, the system-assigned managed identity is used. On updates - to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", - the value of this property is cleared. - :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - :ivar parameters: A dictionary of knowledge store-specific configuration properties. Each name - is the name of a specific property. Each value must be of a primitive type. - :vartype parameters: - ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreParameters - """ - - _validation = { - "storage_connection_string": {"required": True}, - "projections": {"required": True}, - } - - _attribute_map = { - "storage_connection_string": {"key": "storageConnectionString", "type": "str"}, - "projections": {"key": "projections", "type": "[SearchIndexerKnowledgeStoreProjection]"}, - "identity": {"key": "identity", "type": "SearchIndexerDataIdentity"}, - "parameters": {"key": "parameters", "type": "SearchIndexerKnowledgeStoreParameters"}, - } - - def __init__( - self, - *, - storage_connection_string: str, - projections: List["_models.SearchIndexerKnowledgeStoreProjection"], - identity: Optional["_models.SearchIndexerDataIdentity"] = None, - parameters: Optional["_models.SearchIndexerKnowledgeStoreParameters"] = None, - **kwargs: Any - ) -> None: - """ - :keyword storage_connection_string: The connection string to the storage account projections - will be stored in. Required. - :paramtype storage_connection_string: str - :keyword projections: A list of additional projections to perform during indexing. Required. - :paramtype projections: - list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreProjection] - :keyword identity: The user-assigned managed identity used for connections to Azure Storage - when writing knowledge store projections. If the connection string indicates an identity - (ResourceId) and it's not specified, the system-assigned managed identity is used. On updates - to the indexer, if the identity is unspecified, the value remains unchanged. If set to "none", - the value of this property is cleared. - :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - :keyword parameters: A dictionary of knowledge store-specific configuration properties. Each - name is the name of a specific property. Each value must be of a primitive type. - :paramtype parameters: - ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreParameters - """ - super().__init__(**kwargs) - self.storage_connection_string = storage_connection_string - self.projections = projections - self.identity = identity - self.parameters = parameters - - -class SearchIndexerKnowledgeStoreProjectionSelector(_serialization.Model): # pylint: disable=name-too-long - """Abstract class to share properties between concrete selectors. - - :ivar reference_key_name: Name of reference key to different projection. - :vartype reference_key_name: str - :ivar generated_key_name: Name of generated key to store projection under. - :vartype generated_key_name: str - :ivar source: Source data to project. - :vartype source: str - :ivar source_context: Source context for complex projections. - :vartype source_context: str - :ivar inputs: Nested inputs for complex projections. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - """ - - _attribute_map = { - "reference_key_name": {"key": "referenceKeyName", "type": "str"}, - "generated_key_name": {"key": "generatedKeyName", "type": "str"}, - "source": {"key": "source", "type": "str"}, - "source_context": {"key": "sourceContext", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - } - - def __init__( - self, - *, - reference_key_name: Optional[str] = None, - generated_key_name: Optional[str] = None, - source: Optional[str] = None, - source_context: Optional[str] = None, - inputs: Optional[List["_models.InputFieldMappingEntry"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword reference_key_name: Name of reference key to different projection. - :paramtype reference_key_name: str - :keyword generated_key_name: Name of generated key to store projection under. - :paramtype generated_key_name: str - :keyword source: Source data to project. - :paramtype source: str - :keyword source_context: Source context for complex projections. - :paramtype source_context: str - :keyword inputs: Nested inputs for complex projections. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - """ - super().__init__(**kwargs) - self.reference_key_name = reference_key_name - self.generated_key_name = generated_key_name - self.source = source - self.source_context = source_context - self.inputs = inputs - - -class SearchIndexerKnowledgeStoreBlobProjectionSelector( - SearchIndexerKnowledgeStoreProjectionSelector -): # pylint: disable=name-too-long - """Abstract class to share properties between concrete selectors. - - All required parameters must be populated in order to send to server. - - :ivar reference_key_name: Name of reference key to different projection. - :vartype reference_key_name: str - :ivar generated_key_name: Name of generated key to store projection under. - :vartype generated_key_name: str - :ivar source: Source data to project. - :vartype source: str - :ivar source_context: Source context for complex projections. - :vartype source_context: str - :ivar inputs: Nested inputs for complex projections. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar storage_container: Blob container to store projections in. Required. - :vartype storage_container: str - """ - - _validation = { - "storage_container": {"required": True}, - } - - _attribute_map = { - "reference_key_name": {"key": "referenceKeyName", "type": "str"}, - "generated_key_name": {"key": "generatedKeyName", "type": "str"}, - "source": {"key": "source", "type": "str"}, - "source_context": {"key": "sourceContext", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "storage_container": {"key": "storageContainer", "type": "str"}, - } - - def __init__( - self, - *, - storage_container: str, - reference_key_name: Optional[str] = None, - generated_key_name: Optional[str] = None, - source: Optional[str] = None, - source_context: Optional[str] = None, - inputs: Optional[List["_models.InputFieldMappingEntry"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword reference_key_name: Name of reference key to different projection. - :paramtype reference_key_name: str - :keyword generated_key_name: Name of generated key to store projection under. - :paramtype generated_key_name: str - :keyword source: Source data to project. - :paramtype source: str - :keyword source_context: Source context for complex projections. - :paramtype source_context: str - :keyword inputs: Nested inputs for complex projections. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword storage_container: Blob container to store projections in. Required. - :paramtype storage_container: str - """ - super().__init__( - reference_key_name=reference_key_name, - generated_key_name=generated_key_name, - source=source, - source_context=source_context, - inputs=inputs, - **kwargs - ) - self.storage_container = storage_container - - -class SearchIndexerKnowledgeStoreFileProjectionSelector( - SearchIndexerKnowledgeStoreBlobProjectionSelector -): # pylint: disable=name-too-long - """Projection definition for what data to store in Azure Files. - - All required parameters must be populated in order to send to server. - - :ivar reference_key_name: Name of reference key to different projection. - :vartype reference_key_name: str - :ivar generated_key_name: Name of generated key to store projection under. - :vartype generated_key_name: str - :ivar source: Source data to project. - :vartype source: str - :ivar source_context: Source context for complex projections. - :vartype source_context: str - :ivar inputs: Nested inputs for complex projections. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar storage_container: Blob container to store projections in. Required. - :vartype storage_container: str - """ - - -class SearchIndexerKnowledgeStoreObjectProjectionSelector( - SearchIndexerKnowledgeStoreBlobProjectionSelector -): # pylint: disable=name-too-long - """Projection definition for what data to store in Azure Blob. - - All required parameters must be populated in order to send to server. - - :ivar reference_key_name: Name of reference key to different projection. - :vartype reference_key_name: str - :ivar generated_key_name: Name of generated key to store projection under. - :vartype generated_key_name: str - :ivar source: Source data to project. - :vartype source: str - :ivar source_context: Source context for complex projections. - :vartype source_context: str - :ivar inputs: Nested inputs for complex projections. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar storage_container: Blob container to store projections in. Required. - :vartype storage_container: str - """ - - -class SearchIndexerKnowledgeStoreParameters(_serialization.Model): - """A dictionary of knowledge store-specific configuration properties. Each name is the name of a - specific property. Each value must be of a primitive type. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar synthesize_generated_key_name: Whether or not projections should synthesize a generated - key name if one isn't already present. - :vartype synthesize_generated_key_name: bool - """ - - _attribute_map = { - "additional_properties": {"key": "", "type": "{object}"}, - "synthesize_generated_key_name": {"key": "synthesizeGeneratedKeyName", "type": "bool"}, - } - - def __init__( - self, - *, - additional_properties: Optional[Dict[str, Any]] = None, - synthesize_generated_key_name: bool = False, - **kwargs: Any - ) -> None: - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword synthesize_generated_key_name: Whether or not projections should synthesize a - generated key name if one isn't already present. - :paramtype synthesize_generated_key_name: bool - """ - super().__init__(**kwargs) - self.additional_properties = additional_properties - self.synthesize_generated_key_name = synthesize_generated_key_name - - -class SearchIndexerKnowledgeStoreProjection(_serialization.Model): - """Container object for various projection selectors. - - :ivar tables: Projections to Azure Table storage. - :vartype tables: - list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreTableProjectionSelector] - :ivar objects: Projections to Azure Blob storage. - :vartype objects: - list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreObjectProjectionSelector] - :ivar files: Projections to Azure File storage. - :vartype files: - list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreFileProjectionSelector] - """ - - _attribute_map = { - "tables": {"key": "tables", "type": "[SearchIndexerKnowledgeStoreTableProjectionSelector]"}, - "objects": {"key": "objects", "type": "[SearchIndexerKnowledgeStoreObjectProjectionSelector]"}, - "files": {"key": "files", "type": "[SearchIndexerKnowledgeStoreFileProjectionSelector]"}, - } - - def __init__( - self, - *, - tables: Optional[List["_models.SearchIndexerKnowledgeStoreTableProjectionSelector"]] = None, - objects: Optional[List["_models.SearchIndexerKnowledgeStoreObjectProjectionSelector"]] = None, - files: Optional[List["_models.SearchIndexerKnowledgeStoreFileProjectionSelector"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword tables: Projections to Azure Table storage. - :paramtype tables: - list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreTableProjectionSelector] - :keyword objects: Projections to Azure Blob storage. - :paramtype objects: - list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreObjectProjectionSelector] - :keyword files: Projections to Azure File storage. - :paramtype files: - list[~azure.search.documents.indexes.models.SearchIndexerKnowledgeStoreFileProjectionSelector] - """ - super().__init__(**kwargs) - self.tables = tables - self.objects = objects - self.files = files - - -class SearchIndexerKnowledgeStoreTableProjectionSelector( - SearchIndexerKnowledgeStoreProjectionSelector -): # pylint: disable=name-too-long - """Description for what data to store in Azure Tables. - - All required parameters must be populated in order to send to server. - - :ivar reference_key_name: Name of reference key to different projection. - :vartype reference_key_name: str - :ivar generated_key_name: Name of generated key to store projection under. - :vartype generated_key_name: str - :ivar source: Source data to project. - :vartype source: str - :ivar source_context: Source context for complex projections. - :vartype source_context: str - :ivar inputs: Nested inputs for complex projections. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar table_name: Name of the Azure table to store projected data in. Required. - :vartype table_name: str - """ - - _validation = { - "table_name": {"required": True}, - } - - _attribute_map = { - "reference_key_name": {"key": "referenceKeyName", "type": "str"}, - "generated_key_name": {"key": "generatedKeyName", "type": "str"}, - "source": {"key": "source", "type": "str"}, - "source_context": {"key": "sourceContext", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "table_name": {"key": "tableName", "type": "str"}, - } - - def __init__( - self, - *, - table_name: str, - reference_key_name: Optional[str] = None, - generated_key_name: Optional[str] = None, - source: Optional[str] = None, - source_context: Optional[str] = None, - inputs: Optional[List["_models.InputFieldMappingEntry"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword reference_key_name: Name of reference key to different projection. - :paramtype reference_key_name: str - :keyword generated_key_name: Name of generated key to store projection under. - :paramtype generated_key_name: str - :keyword source: Source data to project. - :paramtype source: str - :keyword source_context: Source context for complex projections. - :paramtype source_context: str - :keyword inputs: Nested inputs for complex projections. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword table_name: Name of the Azure table to store projected data in. Required. - :paramtype table_name: str - """ - super().__init__( - reference_key_name=reference_key_name, - generated_key_name=generated_key_name, - source=source, - source_context=source_context, - inputs=inputs, - **kwargs - ) - self.table_name = table_name - - -class SearchIndexerLimits(_serialization.Model): - """SearchIndexerLimits. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar max_run_time: The maximum duration that the indexer is permitted to run for one - execution. - :vartype max_run_time: ~datetime.timedelta - :ivar max_document_extraction_size: The maximum size of a document, in bytes, which will be - considered valid for indexing. - :vartype max_document_extraction_size: int - :ivar max_document_content_characters_to_extract: The maximum number of characters that will be - extracted from a document picked up for indexing. - :vartype max_document_content_characters_to_extract: int - """ - - _validation = { - "max_run_time": {"readonly": True}, - "max_document_extraction_size": {"readonly": True}, - "max_document_content_characters_to_extract": {"readonly": True}, - } - - _attribute_map = { - "max_run_time": {"key": "maxRunTime", "type": "duration"}, - "max_document_extraction_size": {"key": "maxDocumentExtractionSize", "type": "int"}, - "max_document_content_characters_to_extract": {"key": "maxDocumentContentCharactersToExtract", "type": "int"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.max_run_time = None - self.max_document_extraction_size = None - self.max_document_content_characters_to_extract = None - - -class SearchIndexerSkillset(_serialization.Model): - """A list of skills. - - All required parameters must be populated in order to send to server. - - :ivar name: The name of the skillset. Required. - :vartype name: str - :ivar description: The description of the skillset. - :vartype description: str - :ivar skills: A list of skills in the skillset. Required. - :vartype skills: list[~azure.search.documents.indexes.models.SearchIndexerSkill] - :ivar cognitive_services_account: Details about the Azure AI service to be used when running - skills. - :vartype cognitive_services_account: - ~azure.search.documents.indexes.models.CognitiveServicesAccount - :ivar knowledge_store: Definition of additional projections to Azure blob, table, or files, of - enriched data. - :vartype knowledge_store: ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStore - :ivar index_projection: Definition of additional projections to secondary search index(es). - :vartype index_projection: ~azure.search.documents.indexes.models.SearchIndexerIndexProjection - :ivar e_tag: The ETag of the skillset. - :vartype e_tag: str - :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your skillset - definition when you want full assurance that no one, not even Microsoft, can decrypt your - skillset definition. Once you have encrypted your skillset definition, it will always remain - encrypted. The search service will ignore attempts to set this property to null. You can change - this property as needed if you want to rotate your encryption key; Your skillset definition - will be unaffected. Encryption with customer-managed keys is not available for free search - services, and is only available for paid services created on or after January 1, 2019. - :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - """ - - _validation = { - "name": {"required": True}, - "skills": {"required": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "skills": {"key": "skills", "type": "[SearchIndexerSkill]"}, - "cognitive_services_account": {"key": "cognitiveServices", "type": "CognitiveServicesAccount"}, - "knowledge_store": {"key": "knowledgeStore", "type": "SearchIndexerKnowledgeStore"}, - "index_projection": {"key": "indexProjections", "type": "SearchIndexerIndexProjection"}, - "e_tag": {"key": "@odata\\.etag", "type": "str"}, - "encryption_key": {"key": "encryptionKey", "type": "SearchResourceEncryptionKey"}, - } - - def __init__( - self, - *, - name: str, - skills: List["_models.SearchIndexerSkill"], - description: Optional[str] = None, - cognitive_services_account: Optional["_models.CognitiveServicesAccount"] = None, - knowledge_store: Optional["_models.SearchIndexerKnowledgeStore"] = None, - index_projection: Optional["_models.SearchIndexerIndexProjection"] = None, - e_tag: Optional[str] = None, - encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skillset. Required. - :paramtype name: str - :keyword description: The description of the skillset. - :paramtype description: str - :keyword skills: A list of skills in the skillset. Required. - :paramtype skills: list[~azure.search.documents.indexes.models.SearchIndexerSkill] - :keyword cognitive_services_account: Details about the Azure AI service to be used when running - skills. - :paramtype cognitive_services_account: - ~azure.search.documents.indexes.models.CognitiveServicesAccount - :keyword knowledge_store: Definition of additional projections to Azure blob, table, or files, - of enriched data. - :paramtype knowledge_store: ~azure.search.documents.indexes.models.SearchIndexerKnowledgeStore - :keyword index_projection: Definition of additional projections to secondary search index(es). - :paramtype index_projection: - ~azure.search.documents.indexes.models.SearchIndexerIndexProjection - :keyword e_tag: The ETag of the skillset. - :paramtype e_tag: str - :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your skillset - definition when you want full assurance that no one, not even Microsoft, can decrypt your - skillset definition. Once you have encrypted your skillset definition, it will always remain - encrypted. The search service will ignore attempts to set this property to null. You can change - this property as needed if you want to rotate your encryption key; Your skillset definition - will be unaffected. Encryption with customer-managed keys is not available for free search - services, and is only available for paid services created on or after January 1, 2019. - :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - """ - super().__init__(**kwargs) - self.name = name - self.description = description - self.skills = skills - self.cognitive_services_account = cognitive_services_account - self.knowledge_store = knowledge_store - self.index_projection = index_projection - self.e_tag = e_tag - self.encryption_key = encryption_key - - -class SearchIndexerStatus(_serialization.Model): - """Represents the current status and execution history of an indexer. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar status: Overall indexer status. Required. Known values are: "unknown", "error", and - "running". - :vartype status: str or ~azure.search.documents.indexes.models.IndexerStatus - :ivar last_result: The result of the most recent or an in-progress indexer execution. - :vartype last_result: ~azure.search.documents.indexes.models.IndexerExecutionResult - :ivar execution_history: History of the recent indexer executions, sorted in reverse - chronological order. Required. - :vartype execution_history: list[~azure.search.documents.indexes.models.IndexerExecutionResult] - :ivar limits: The execution limits for the indexer. Required. - :vartype limits: ~azure.search.documents.indexes.models.SearchIndexerLimits - """ - - _validation = { - "status": {"required": True, "readonly": True}, - "last_result": {"readonly": True}, - "execution_history": {"required": True, "readonly": True}, - "limits": {"required": True, "readonly": True}, - } - - _attribute_map = { - "status": {"key": "status", "type": "str"}, - "last_result": {"key": "lastResult", "type": "IndexerExecutionResult"}, - "execution_history": {"key": "executionHistory", "type": "[IndexerExecutionResult]"}, - "limits": {"key": "limits", "type": "SearchIndexerLimits"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.status = None - self.last_result = None - self.execution_history = None - self.limits = None - - -class SearchIndexerWarning(_serialization.Model): - """Represents an item-level warning. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar key: The key of the item which generated a warning. - :vartype key: str - :ivar message: The message describing the warning that occurred while processing the item. - Required. - :vartype message: str - :ivar name: The name of the source at which the warning originated. For example, this could - refer to a particular skill in the attached skillset. This may not be always available. - :vartype name: str - :ivar details: Additional, verbose details about the warning to assist in debugging the - indexer. This may not be always available. - :vartype details: str - :ivar documentation_link: A link to a troubleshooting guide for these classes of warnings. This - may not be always available. - :vartype documentation_link: str - """ - - _validation = { - "key": {"readonly": True}, - "message": {"required": True, "readonly": True}, - "name": {"readonly": True}, - "details": {"readonly": True}, - "documentation_link": {"readonly": True}, - } - - _attribute_map = { - "key": {"key": "key", "type": "str"}, - "message": {"key": "message", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "details": {"key": "details", "type": "str"}, - "documentation_link": {"key": "documentationLink", "type": "str"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.key = None - self.message = None - self.name = None - self.details = None - self.documentation_link = None - - -class SearchResourceEncryptionKey(_serialization.Model): - """A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be - used to encrypt or decrypt data-at-rest, such as indexes and synonym maps. - - All required parameters must be populated in order to send to server. - - :ivar key_name: The name of your Azure Key Vault key to be used to encrypt your data at rest. - Required. - :vartype key_name: str - :ivar key_version: The version of your Azure Key Vault key to be used to encrypt your data at - rest. Required. - :vartype key_version: str - :ivar vault_uri: The URI of your Azure Key Vault, also referred to as DNS name, that contains - the key to be used to encrypt your data at rest. An example URI might be - ``https://my-keyvault-name.vault.azure.net``. Required. - :vartype vault_uri: str - :ivar access_credentials: Optional Azure Active Directory credentials used for accessing your - Azure Key Vault. Not required if using managed identity instead. - :vartype access_credentials: - ~azure.search.documents.indexes.models.AzureActiveDirectoryApplicationCredentials - :ivar identity: An explicit managed identity to use for this encryption key. If not specified - and the access credentials property is null, the system-assigned managed identity is used. On - update to the resource, if the explicit identity is unspecified, it remains unchanged. If - "none" is specified, the value of this property is cleared. - :vartype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - """ - - _validation = { - "key_name": {"required": True}, - "key_version": {"required": True}, - "vault_uri": {"required": True}, - } - - _attribute_map = { - "key_name": {"key": "keyVaultKeyName", "type": "str"}, - "key_version": {"key": "keyVaultKeyVersion", "type": "str"}, - "vault_uri": {"key": "keyVaultUri", "type": "str"}, - "access_credentials": {"key": "accessCredentials", "type": "AzureActiveDirectoryApplicationCredentials"}, - "identity": {"key": "identity", "type": "SearchIndexerDataIdentity"}, - } - - def __init__( - self, - *, - key_name: str, - key_version: str, - vault_uri: str, - access_credentials: Optional["_models.AzureActiveDirectoryApplicationCredentials"] = None, - identity: Optional["_models.SearchIndexerDataIdentity"] = None, - **kwargs: Any - ) -> None: - """ - :keyword key_name: The name of your Azure Key Vault key to be used to encrypt your data at - rest. Required. - :paramtype key_name: str - :keyword key_version: The version of your Azure Key Vault key to be used to encrypt your data - at rest. Required. - :paramtype key_version: str - :keyword vault_uri: The URI of your Azure Key Vault, also referred to as DNS name, that - contains the key to be used to encrypt your data at rest. An example URI might be - ``https://my-keyvault-name.vault.azure.net``. Required. - :paramtype vault_uri: str - :keyword access_credentials: Optional Azure Active Directory credentials used for accessing - your Azure Key Vault. Not required if using managed identity instead. - :paramtype access_credentials: - ~azure.search.documents.indexes.models.AzureActiveDirectoryApplicationCredentials - :keyword identity: An explicit managed identity to use for this encryption key. If not - specified and the access credentials property is null, the system-assigned managed identity is - used. On update to the resource, if the explicit identity is unspecified, it remains unchanged. - If "none" is specified, the value of this property is cleared. - :paramtype identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - """ - super().__init__(**kwargs) - self.key_name = key_name - self.key_version = key_version - self.vault_uri = vault_uri - self.access_credentials = access_credentials - self.identity = identity - - -class SearchServiceCounters(_serialization.Model): - """Represents service-level resource counters and quotas. - - All required parameters must be populated in order to send to server. - - :ivar alias_counter: Total number of aliases. Required. - :vartype alias_counter: ~azure.search.documents.indexes.models.ResourceCounter - :ivar document_counter: Total number of documents across all indexes in the service. Required. - :vartype document_counter: ~azure.search.documents.indexes.models.ResourceCounter - :ivar index_counter: Total number of indexes. Required. - :vartype index_counter: ~azure.search.documents.indexes.models.ResourceCounter - :ivar indexer_counter: Total number of indexers. Required. - :vartype indexer_counter: ~azure.search.documents.indexes.models.ResourceCounter - :ivar data_source_counter: Total number of data sources. Required. - :vartype data_source_counter: ~azure.search.documents.indexes.models.ResourceCounter - :ivar storage_size_counter: Total size of used storage in bytes. Required. - :vartype storage_size_counter: ~azure.search.documents.indexes.models.ResourceCounter - :ivar synonym_map_counter: Total number of synonym maps. Required. - :vartype synonym_map_counter: ~azure.search.documents.indexes.models.ResourceCounter - :ivar skillset_counter: Total number of skillsets. Required. - :vartype skillset_counter: ~azure.search.documents.indexes.models.ResourceCounter - :ivar vector_index_size_counter: Total memory consumption of all vector indexes within the - service, in bytes. Required. - :vartype vector_index_size_counter: ~azure.search.documents.indexes.models.ResourceCounter - """ - - _validation = { - "alias_counter": {"required": True}, - "document_counter": {"required": True}, - "index_counter": {"required": True}, - "indexer_counter": {"required": True}, - "data_source_counter": {"required": True}, - "storage_size_counter": {"required": True}, - "synonym_map_counter": {"required": True}, - "skillset_counter": {"required": True}, - "vector_index_size_counter": {"required": True}, - } - - _attribute_map = { - "alias_counter": {"key": "aliasesCount", "type": "ResourceCounter"}, - "document_counter": {"key": "documentCount", "type": "ResourceCounter"}, - "index_counter": {"key": "indexesCount", "type": "ResourceCounter"}, - "indexer_counter": {"key": "indexersCount", "type": "ResourceCounter"}, - "data_source_counter": {"key": "dataSourcesCount", "type": "ResourceCounter"}, - "storage_size_counter": {"key": "storageSize", "type": "ResourceCounter"}, - "synonym_map_counter": {"key": "synonymMaps", "type": "ResourceCounter"}, - "skillset_counter": {"key": "skillsetCount", "type": "ResourceCounter"}, - "vector_index_size_counter": {"key": "vectorIndexSize", "type": "ResourceCounter"}, - } - - def __init__( - self, - *, - alias_counter: "_models.ResourceCounter", - document_counter: "_models.ResourceCounter", - index_counter: "_models.ResourceCounter", - indexer_counter: "_models.ResourceCounter", - data_source_counter: "_models.ResourceCounter", - storage_size_counter: "_models.ResourceCounter", - synonym_map_counter: "_models.ResourceCounter", - skillset_counter: "_models.ResourceCounter", - vector_index_size_counter: "_models.ResourceCounter", - **kwargs: Any - ) -> None: - """ - :keyword alias_counter: Total number of aliases. Required. - :paramtype alias_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword document_counter: Total number of documents across all indexes in the service. - Required. - :paramtype document_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword index_counter: Total number of indexes. Required. - :paramtype index_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword indexer_counter: Total number of indexers. Required. - :paramtype indexer_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword data_source_counter: Total number of data sources. Required. - :paramtype data_source_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword storage_size_counter: Total size of used storage in bytes. Required. - :paramtype storage_size_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword synonym_map_counter: Total number of synonym maps. Required. - :paramtype synonym_map_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword skillset_counter: Total number of skillsets. Required. - :paramtype skillset_counter: ~azure.search.documents.indexes.models.ResourceCounter - :keyword vector_index_size_counter: Total memory consumption of all vector indexes within the - service, in bytes. Required. - :paramtype vector_index_size_counter: ~azure.search.documents.indexes.models.ResourceCounter - """ - super().__init__(**kwargs) - self.alias_counter = alias_counter - self.document_counter = document_counter - self.index_counter = index_counter - self.indexer_counter = indexer_counter - self.data_source_counter = data_source_counter - self.storage_size_counter = storage_size_counter - self.synonym_map_counter = synonym_map_counter - self.skillset_counter = skillset_counter - self.vector_index_size_counter = vector_index_size_counter - - -class SearchServiceLimits(_serialization.Model): - """Represents various service level limits. - - :ivar max_fields_per_index: The maximum allowed fields per index. - :vartype max_fields_per_index: int - :ivar max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in an - index, including the top-level complex field. For example, a/b/c has a nesting depth of 3. - :vartype max_field_nesting_depth_per_index: int - :ivar max_complex_collection_fields_per_index: The maximum number of fields of type - Collection(Edm.ComplexType) allowed in an index. - :vartype max_complex_collection_fields_per_index: int - :ivar max_complex_objects_in_collections_per_document: The maximum number of objects in complex - collections allowed per document. - :vartype max_complex_objects_in_collections_per_document: int - :ivar max_storage_per_index_in_bytes: The maximum amount of storage in bytes allowed per index. - :vartype max_storage_per_index_in_bytes: int - """ - - _attribute_map = { - "max_fields_per_index": {"key": "maxFieldsPerIndex", "type": "int"}, - "max_field_nesting_depth_per_index": {"key": "maxFieldNestingDepthPerIndex", "type": "int"}, - "max_complex_collection_fields_per_index": {"key": "maxComplexCollectionFieldsPerIndex", "type": "int"}, - "max_complex_objects_in_collections_per_document": { - "key": "maxComplexObjectsInCollectionsPerDocument", - "type": "int", - }, - "max_storage_per_index_in_bytes": {"key": "maxStoragePerIndex", "type": "int"}, - } - - def __init__( - self, - *, - max_fields_per_index: Optional[int] = None, - max_field_nesting_depth_per_index: Optional[int] = None, - max_complex_collection_fields_per_index: Optional[int] = None, - max_complex_objects_in_collections_per_document: Optional[int] = None, - max_storage_per_index_in_bytes: Optional[int] = None, - **kwargs: Any - ) -> None: - """ - :keyword max_fields_per_index: The maximum allowed fields per index. - :paramtype max_fields_per_index: int - :keyword max_field_nesting_depth_per_index: The maximum depth which you can nest sub-fields in - an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3. - :paramtype max_field_nesting_depth_per_index: int - :keyword max_complex_collection_fields_per_index: The maximum number of fields of type - Collection(Edm.ComplexType) allowed in an index. - :paramtype max_complex_collection_fields_per_index: int - :keyword max_complex_objects_in_collections_per_document: The maximum number of objects in - complex collections allowed per document. - :paramtype max_complex_objects_in_collections_per_document: int - :keyword max_storage_per_index_in_bytes: The maximum amount of storage in bytes allowed per - index. - :paramtype max_storage_per_index_in_bytes: int - """ - super().__init__(**kwargs) - self.max_fields_per_index = max_fields_per_index - self.max_field_nesting_depth_per_index = max_field_nesting_depth_per_index - self.max_complex_collection_fields_per_index = max_complex_collection_fields_per_index - self.max_complex_objects_in_collections_per_document = max_complex_objects_in_collections_per_document - self.max_storage_per_index_in_bytes = max_storage_per_index_in_bytes - - -class SearchServiceStatistics(_serialization.Model): - """Response from a get service statistics request. If successful, it includes service level - counters and limits. - - All required parameters must be populated in order to send to server. - - :ivar counters: Service level resource counters. Required. - :vartype counters: ~azure.search.documents.indexes.models.SearchServiceCounters - :ivar limits: Service level general limits. Required. - :vartype limits: ~azure.search.documents.indexes.models.SearchServiceLimits - """ - - _validation = { - "counters": {"required": True}, - "limits": {"required": True}, - } - - _attribute_map = { - "counters": {"key": "counters", "type": "SearchServiceCounters"}, - "limits": {"key": "limits", "type": "SearchServiceLimits"}, - } - - def __init__( - self, *, counters: "_models.SearchServiceCounters", limits: "_models.SearchServiceLimits", **kwargs: Any - ) -> None: - """ - :keyword counters: Service level resource counters. Required. - :paramtype counters: ~azure.search.documents.indexes.models.SearchServiceCounters - :keyword limits: Service level general limits. Required. - :paramtype limits: ~azure.search.documents.indexes.models.SearchServiceLimits - """ - super().__init__(**kwargs) - self.counters = counters - self.limits = limits - - -class SearchSuggester(_serialization.Model): - """Defines how the Suggest API should apply to a group of fields in the index. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar name: The name of the suggester. Required. - :vartype name: str - :ivar search_mode: A value indicating the capabilities of the suggester. Required. Default - value is "analyzingInfixMatching". - :vartype search_mode: str - :ivar source_fields: The list of field names to which the suggester applies. Each field must be - searchable. Required. - :vartype source_fields: list[str] - """ - - _validation = { - "name": {"required": True}, - "search_mode": {"required": True, "constant": True}, - "source_fields": {"required": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "search_mode": {"key": "searchMode", "type": "str"}, - "source_fields": {"key": "sourceFields", "type": "[str]"}, - } - - search_mode = "analyzingInfixMatching" - - def __init__(self, *, name: str, source_fields: List[str], **kwargs: Any) -> None: - """ - :keyword name: The name of the suggester. Required. - :paramtype name: str - :keyword source_fields: The list of field names to which the suggester applies. Each field must - be searchable. Required. - :paramtype source_fields: list[str] - """ - super().__init__(**kwargs) - self.name = name - self.source_fields = source_fields - - -class SemanticConfiguration(_serialization.Model): - """Defines a specific configuration to be used in the context of semantic capabilities. - - All required parameters must be populated in order to send to server. - - :ivar name: The name of the semantic configuration. Required. - :vartype name: str - :ivar prioritized_fields: Describes the title, content, and keyword fields to be used for - semantic ranking, captions, highlights, and answers. At least one of the three sub properties - (titleField, prioritizedKeywordsFields and prioritizedContentFields) need to be set. Required. - :vartype prioritized_fields: ~azure.search.documents.indexes.models.SemanticPrioritizedFields - """ - - _validation = { - "name": {"required": True}, - "prioritized_fields": {"required": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "prioritized_fields": {"key": "prioritizedFields", "type": "SemanticPrioritizedFields"}, - } - - def __init__(self, *, name: str, prioritized_fields: "_models.SemanticPrioritizedFields", **kwargs: Any) -> None: - """ - :keyword name: The name of the semantic configuration. Required. - :paramtype name: str - :keyword prioritized_fields: Describes the title, content, and keyword fields to be used for - semantic ranking, captions, highlights, and answers. At least one of the three sub properties - (titleField, prioritizedKeywordsFields and prioritizedContentFields) need to be set. Required. - :paramtype prioritized_fields: ~azure.search.documents.indexes.models.SemanticPrioritizedFields - """ - super().__init__(**kwargs) - self.name = name - self.prioritized_fields = prioritized_fields - - -class SemanticField(_serialization.Model): - """A field that is used as part of the semantic configuration. - - All required parameters must be populated in order to send to server. - - :ivar field_name: Required. - :vartype field_name: str - """ - - _validation = { - "field_name": {"required": True}, - } - - _attribute_map = { - "field_name": {"key": "fieldName", "type": "str"}, - } - - def __init__(self, *, field_name: str, **kwargs: Any) -> None: - """ - :keyword field_name: Required. - :paramtype field_name: str - """ - super().__init__(**kwargs) - self.field_name = field_name - - -class SemanticPrioritizedFields(_serialization.Model): - """Describes the title, content, and keywords fields to be used for semantic ranking, captions, - highlights, and answers. - - :ivar title_field: Defines the title field to be used for semantic ranking, captions, - highlights, and answers. If you don't have a title field in your index, leave this blank. - :vartype title_field: ~azure.search.documents.indexes.models.SemanticField - :ivar content_fields: Defines the content fields to be used for semantic ranking, captions, - highlights, and answers. For the best result, the selected fields should contain text in - natural language form. The order of the fields in the array represents their priority. Fields - with lower priority may get truncated if the content is long. - :vartype content_fields: list[~azure.search.documents.indexes.models.SemanticField] - :ivar keywords_fields: Defines the keyword fields to be used for semantic ranking, captions, - highlights, and answers. For the best result, the selected fields should contain a list of - keywords. The order of the fields in the array represents their priority. Fields with lower - priority may get truncated if the content is long. - :vartype keywords_fields: list[~azure.search.documents.indexes.models.SemanticField] - """ - - _attribute_map = { - "title_field": {"key": "titleField", "type": "SemanticField"}, - "content_fields": {"key": "prioritizedContentFields", "type": "[SemanticField]"}, - "keywords_fields": {"key": "prioritizedKeywordsFields", "type": "[SemanticField]"}, - } - - def __init__( - self, - *, - title_field: Optional["_models.SemanticField"] = None, - content_fields: Optional[List["_models.SemanticField"]] = None, - keywords_fields: Optional[List["_models.SemanticField"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword title_field: Defines the title field to be used for semantic ranking, captions, - highlights, and answers. If you don't have a title field in your index, leave this blank. - :paramtype title_field: ~azure.search.documents.indexes.models.SemanticField - :keyword content_fields: Defines the content fields to be used for semantic ranking, captions, - highlights, and answers. For the best result, the selected fields should contain text in - natural language form. The order of the fields in the array represents their priority. Fields - with lower priority may get truncated if the content is long. - :paramtype content_fields: list[~azure.search.documents.indexes.models.SemanticField] - :keyword keywords_fields: Defines the keyword fields to be used for semantic ranking, captions, - highlights, and answers. For the best result, the selected fields should contain a list of - keywords. The order of the fields in the array represents their priority. Fields with lower - priority may get truncated if the content is long. - :paramtype keywords_fields: list[~azure.search.documents.indexes.models.SemanticField] - """ - super().__init__(**kwargs) - self.title_field = title_field - self.content_fields = content_fields - self.keywords_fields = keywords_fields - - -class SemanticSearch(_serialization.Model): - """Defines parameters for a search index that influence semantic capabilities. - - :ivar default_configuration_name: Allows you to set the name of a default semantic - configuration in your index, making it optional to pass it on as a query parameter every time. - :vartype default_configuration_name: str - :ivar configurations: The semantic configurations for the index. - :vartype configurations: list[~azure.search.documents.indexes.models.SemanticConfiguration] - """ - - _attribute_map = { - "default_configuration_name": {"key": "defaultConfiguration", "type": "str"}, - "configurations": {"key": "configurations", "type": "[SemanticConfiguration]"}, - } - - def __init__( - self, - *, - default_configuration_name: Optional[str] = None, - configurations: Optional[List["_models.SemanticConfiguration"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword default_configuration_name: Allows you to set the name of a default semantic - configuration in your index, making it optional to pass it on as a query parameter every time. - :paramtype default_configuration_name: str - :keyword configurations: The semantic configurations for the index. - :paramtype configurations: list[~azure.search.documents.indexes.models.SemanticConfiguration] - """ - super().__init__(**kwargs) - self.default_configuration_name = default_configuration_name - self.configurations = configurations - - -class SentimentSkill(SearchIndexerSkill): - """This skill is deprecated. Use the V3.SentimentSkill instead. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar default_language_code: A value indicating which language code to use. Default is ``en``. - Known values are: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT", "ru", - "es", "sv", and "tr". - :vartype default_language_code: str or - ~azure.search.documents.indexes.models.SentimentSkillLanguage - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "default_language_code": {"key": "defaultLanguageCode", "type": "str"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - default_language_code: Optional[Union[str, "_models.SentimentSkillLanguage"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is - ``en``. Known values are: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT", - "ru", "es", "sv", and "tr". - :paramtype default_language_code: str or - ~azure.search.documents.indexes.models.SentimentSkillLanguage - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Text.SentimentSkill" - self.default_language_code = default_language_code - - -class SentimentSkillV3(SearchIndexerSkill): - """Using the Text Analytics API, evaluates unstructured text and for each record, provides - sentiment labels (such as "negative", "neutral" and "positive") based on the highest confidence - score found by the service at a sentence and document-level. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar default_language_code: A value indicating which language code to use. Default is ``en``. - :vartype default_language_code: str - :ivar include_opinion_mining: If set to true, the skill output will include information from - Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated - assessment (adjective) in the text. Default is false. - :vartype include_opinion_mining: bool - :ivar model_version: The version of the model to use when calling the Text Analytics service. - It will default to the latest available when not specified. We recommend you do not specify - this value unless absolutely necessary. - :vartype model_version: str - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "default_language_code": {"key": "defaultLanguageCode", "type": "str"}, - "include_opinion_mining": {"key": "includeOpinionMining", "type": "bool"}, - "model_version": {"key": "modelVersion", "type": "str"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - default_language_code: Optional[str] = None, - include_opinion_mining: bool = False, - model_version: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is - ``en``. - :paramtype default_language_code: str - :keyword include_opinion_mining: If set to true, the skill output will include information from - Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated - assessment (adjective) in the text. Default is false. - :paramtype include_opinion_mining: bool - :keyword model_version: The version of the model to use when calling the Text Analytics - service. It will default to the latest available when not specified. We recommend you do not - specify this value unless absolutely necessary. - :paramtype model_version: str - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Text.V3.SentimentSkill" - self.default_language_code = default_language_code - self.include_opinion_mining = include_opinion_mining - self.model_version = model_version - - -class ShaperSkill(SearchIndexerSkill): - """A skill for reshaping the outputs. It creates a complex type to support composite fields (also - known as multipart fields). - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Util.ShaperSkill" - - -class ShingleTokenFilter(TokenFilter): - """Creates combinations of tokens as a single token. This token filter is implemented using Apache - Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar max_shingle_size: The maximum shingle size. Default and minimum value is 2. - :vartype max_shingle_size: int - :ivar min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be less - than the value of maxShingleSize. - :vartype min_shingle_size: int - :ivar output_unigrams: A value indicating whether the output stream will contain the input - tokens (unigrams) as well as shingles. Default is true. - :vartype output_unigrams: bool - :ivar output_unigrams_if_no_shingles: A value indicating whether to output unigrams for those - times when no shingles are available. This property takes precedence when outputUnigrams is set - to false. Default is false. - :vartype output_unigrams_if_no_shingles: bool - :ivar token_separator: The string to use when joining adjacent tokens to form a shingle. - Default is a single space (" "). - :vartype token_separator: str - :ivar filter_token: The string to insert for each position at which there is no token. Default - is an underscore ("_"). - :vartype filter_token: str - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "max_shingle_size": {"minimum": 2}, - "min_shingle_size": {"minimum": 2}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "max_shingle_size": {"key": "maxShingleSize", "type": "int"}, - "min_shingle_size": {"key": "minShingleSize", "type": "int"}, - "output_unigrams": {"key": "outputUnigrams", "type": "bool"}, - "output_unigrams_if_no_shingles": {"key": "outputUnigramsIfNoShingles", "type": "bool"}, - "token_separator": {"key": "tokenSeparator", "type": "str"}, - "filter_token": {"key": "filterToken", "type": "str"}, - } - - def __init__( - self, - *, - name: str, - max_shingle_size: int = 2, - min_shingle_size: int = 2, - output_unigrams: bool = True, - output_unigrams_if_no_shingles: bool = False, - token_separator: str = " ", - filter_token: str = "_", - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword max_shingle_size: The maximum shingle size. Default and minimum value is 2. - :paramtype max_shingle_size: int - :keyword min_shingle_size: The minimum shingle size. Default and minimum value is 2. Must be - less than the value of maxShingleSize. - :paramtype min_shingle_size: int - :keyword output_unigrams: A value indicating whether the output stream will contain the input - tokens (unigrams) as well as shingles. Default is true. - :paramtype output_unigrams: bool - :keyword output_unigrams_if_no_shingles: A value indicating whether to output unigrams for - those times when no shingles are available. This property takes precedence when outputUnigrams - is set to false. Default is false. - :paramtype output_unigrams_if_no_shingles: bool - :keyword token_separator: The string to use when joining adjacent tokens to form a shingle. - Default is a single space (" "). - :paramtype token_separator: str - :keyword filter_token: The string to insert for each position at which there is no token. - Default is an underscore ("_"). - :paramtype filter_token: str - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.ShingleTokenFilter" - self.max_shingle_size = max_shingle_size - self.min_shingle_size = min_shingle_size - self.output_unigrams = output_unigrams - self.output_unigrams_if_no_shingles = output_unigrams_if_no_shingles - self.token_separator = token_separator - self.filter_token = filter_token - - -class SkillNames(_serialization.Model): - """SkillNames. - - :ivar skill_names: the names of skills to be reset. - :vartype skill_names: list[str] - """ - - _attribute_map = { - "skill_names": {"key": "skillNames", "type": "[str]"}, - } - - def __init__(self, *, skill_names: Optional[List[str]] = None, **kwargs: Any) -> None: - """ - :keyword skill_names: the names of skills to be reset. - :paramtype skill_names: list[str] - """ - super().__init__(**kwargs) - self.skill_names = skill_names - - -class SnowballTokenFilter(TokenFilter): - """A filter that stems words using a Snowball-generated stemmer. This token filter is implemented - using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar language: The language to use. Required. Known values are: "armenian", "basque", - "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2", "hungarian", - "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian", "russian", - "spanish", "swedish", and "turkish". - :vartype language: str or ~azure.search.documents.indexes.models.SnowballTokenFilterLanguage - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "language": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "language": {"key": "language", "type": "str"}, - } - - def __init__( - self, *, name: str, language: Union[str, "_models.SnowballTokenFilterLanguage"], **kwargs: Any - ) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword language: The language to use. Required. Known values are: "armenian", "basque", - "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2", "hungarian", - "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian", "russian", - "spanish", "swedish", and "turkish". - :paramtype language: str or ~azure.search.documents.indexes.models.SnowballTokenFilterLanguage - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.SnowballTokenFilter" - self.language = language - - -class SoftDeleteColumnDeletionDetectionPolicy(DataDeletionDetectionPolicy): - """Defines a data deletion detection policy that implements a soft-deletion strategy. It - determines whether an item should be deleted based on the value of a designated 'soft delete' - column. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of data deletion detection policy. - Required. - :vartype odata_type: str - :ivar soft_delete_column_name: The name of the column to use for soft-deletion detection. - :vartype soft_delete_column_name: str - :ivar soft_delete_marker_value: The marker value that identifies an item as deleted. - :vartype soft_delete_marker_value: str - """ - - _validation = { - "odata_type": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "soft_delete_column_name": {"key": "softDeleteColumnName", "type": "str"}, - "soft_delete_marker_value": {"key": "softDeleteMarkerValue", "type": "str"}, - } - - def __init__( - self, - *, - soft_delete_column_name: Optional[str] = None, - soft_delete_marker_value: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword soft_delete_column_name: The name of the column to use for soft-deletion detection. - :paramtype soft_delete_column_name: str - :keyword soft_delete_marker_value: The marker value that identifies an item as deleted. - :paramtype soft_delete_marker_value: str - """ - super().__init__(**kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy" - self.soft_delete_column_name = soft_delete_column_name - self.soft_delete_marker_value = soft_delete_marker_value - - -class SplitSkill(SearchIndexerSkill): - """A skill to split a string into chunks of text. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar default_language_code: A value indicating which language code to use. Default is ``en``. - Known values are: "am", "bs", "cs", "da", "de", "en", "es", "et", "fi", "fr", "he", "hi", "hr", - "hu", "id", "is", "it", "ja", "ko", "lv", "nb", "nl", "pl", "pt", "pt-br", "ru", "sk", "sl", - "sr", "sv", "tr", "ur", "zh", and "is". - :vartype default_language_code: str or - ~azure.search.documents.indexes.models.SplitSkillLanguage - :ivar text_split_mode: A value indicating which split mode to perform. Known values are: - "pages" and "sentences". - :vartype text_split_mode: str or ~azure.search.documents.indexes.models.TextSplitMode - :ivar maximum_page_length: The desired maximum page length. Default is 10000. - :vartype maximum_page_length: int - :ivar page_overlap_length: Only applicable when textSplitMode is set to 'pages'. If specified, - n+1th chunk will start with this number of characters/tokens from the end of the nth chunk. - :vartype page_overlap_length: int - :ivar maximum_pages_to_take: Only applicable when textSplitMode is set to 'pages'. If - specified, the SplitSkill will discontinue splitting after processing the first - 'maximumPagesToTake' pages, in order to improve performance when only a few initial pages are - needed from each document. - :vartype maximum_pages_to_take: int - :ivar unit: Only applies if textSplitMode is set to pages. There are two possible values. The - choice of the values will decide the length (maximumPageLength and pageOverlapLength) - measurement. The default is 'characters', which means the length will be measured by character. - Known values are: "characters" and "azureOpenAITokens". - :vartype unit: str or ~azure.search.documents.indexes.models.SplitSkillUnit - :ivar parameters: Only applies if the unit is set to azureOpenAITokens. If specified, the - splitSkill will use these parameters when performing the tokenization. The parameters are a - valid 'encoderModelName' and an optional 'allowedSpecialTokens' property. - :vartype parameters: ~azure.search.documents.indexes.models.AzureOpenAITokenizerParameters - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "default_language_code": {"key": "defaultLanguageCode", "type": "str"}, - "text_split_mode": {"key": "textSplitMode", "type": "str"}, - "maximum_page_length": {"key": "maximumPageLength", "type": "int"}, - "page_overlap_length": {"key": "pageOverlapLength", "type": "int"}, - "maximum_pages_to_take": {"key": "maximumPagesToTake", "type": "int"}, - "unit": {"key": "unit", "type": "str"}, - "parameters": {"key": "azureOpenAITokenizerParameters", "type": "AzureOpenAITokenizerParameters"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - default_language_code: Optional[Union[str, "_models.SplitSkillLanguage"]] = None, - text_split_mode: Optional[Union[str, "_models.TextSplitMode"]] = None, - maximum_page_length: Optional[int] = None, - page_overlap_length: Optional[int] = None, - maximum_pages_to_take: Optional[int] = None, - unit: Optional[Union[str, "_models.SplitSkillUnit"]] = None, - parameters: Optional["_models.AzureOpenAITokenizerParameters"] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_language_code: A value indicating which language code to use. Default is - ``en``. Known values are: "am", "bs", "cs", "da", "de", "en", "es", "et", "fi", "fr", "he", - "hi", "hr", "hu", "id", "is", "it", "ja", "ko", "lv", "nb", "nl", "pl", "pt", "pt-br", "ru", - "sk", "sl", "sr", "sv", "tr", "ur", "zh", and "is". - :paramtype default_language_code: str or - ~azure.search.documents.indexes.models.SplitSkillLanguage - :keyword text_split_mode: A value indicating which split mode to perform. Known values are: - "pages" and "sentences". - :paramtype text_split_mode: str or ~azure.search.documents.indexes.models.TextSplitMode - :keyword maximum_page_length: The desired maximum page length. Default is 10000. - :paramtype maximum_page_length: int - :keyword page_overlap_length: Only applicable when textSplitMode is set to 'pages'. If - specified, n+1th chunk will start with this number of characters/tokens from the end of the nth - chunk. - :paramtype page_overlap_length: int - :keyword maximum_pages_to_take: Only applicable when textSplitMode is set to 'pages'. If - specified, the SplitSkill will discontinue splitting after processing the first - 'maximumPagesToTake' pages, in order to improve performance when only a few initial pages are - needed from each document. - :paramtype maximum_pages_to_take: int - :keyword unit: Only applies if textSplitMode is set to pages. There are two possible values. - The choice of the values will decide the length (maximumPageLength and pageOverlapLength) - measurement. The default is 'characters', which means the length will be measured by character. - Known values are: "characters" and "azureOpenAITokens". - :paramtype unit: str or ~azure.search.documents.indexes.models.SplitSkillUnit - :keyword parameters: Only applies if the unit is set to azureOpenAITokens. If specified, the - splitSkill will use these parameters when performing the tokenization. The parameters are a - valid 'encoderModelName' and an optional 'allowedSpecialTokens' property. - :paramtype parameters: ~azure.search.documents.indexes.models.AzureOpenAITokenizerParameters - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Text.SplitSkill" - self.default_language_code = default_language_code - self.text_split_mode = text_split_mode - self.maximum_page_length = maximum_page_length - self.page_overlap_length = page_overlap_length - self.maximum_pages_to_take = maximum_pages_to_take - self.unit = unit - self.parameters = parameters - - -class SqlIntegratedChangeTrackingPolicy(DataChangeDetectionPolicy): - """Defines a data change detection policy that captures changes using the Integrated Change - Tracking feature of Azure SQL Database. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of data change detection policy. Required. - :vartype odata_type: str - """ - - _validation = { - "odata_type": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - } - - def __init__(self, **kwargs: Any) -> None: - """ """ - super().__init__(**kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy" - - -class StemmerOverrideTokenFilter(TokenFilter): - """Provides the ability to override other stemming filters with custom dictionary-based stemming. - Any dictionary-stemmed terms will be marked as keywords so that they will not be stemmed with - stemmers down the chain. Must be placed before any stemming filters. This token filter is - implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar rules: A list of stemming rules in the following format: "word => stem", for example: - "ran => run". Required. - :vartype rules: list[str] - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "rules": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "rules": {"key": "rules", "type": "[str]"}, - } - - def __init__(self, *, name: str, rules: List[str], **kwargs: Any) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword rules: A list of stemming rules in the following format: "word => stem", for example: - "ran => run". Required. - :paramtype rules: list[str] - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.StemmerOverrideTokenFilter" - self.rules = rules - - -class StemmerTokenFilter(TokenFilter): - """Language specific stemming filter. This token filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar language: The language to use. Required. Known values are: "arabic", "armenian", - "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "dutchKp", - "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2", "lovins", - "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician", - "minimalGalician", "german", "german2", "lightGerman", "minimalGerman", "greek", "hindi", - "hungarian", "lightHungarian", "indonesian", "irish", "italian", "lightItalian", "sorani", - "latvian", "norwegian", "lightNorwegian", "minimalNorwegian", "lightNynorsk", "minimalNynorsk", - "portuguese", "lightPortuguese", "minimalPortuguese", "portugueseRslp", "romanian", "russian", - "lightRussian", "spanish", "lightSpanish", "swedish", "lightSwedish", and "turkish". - :vartype language: str or ~azure.search.documents.indexes.models.StemmerTokenFilterLanguage - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "language": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "language": {"key": "language", "type": "str"}, - } - - def __init__(self, *, name: str, language: Union[str, "_models.StemmerTokenFilterLanguage"], **kwargs: Any) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword language: The language to use. Required. Known values are: "arabic", "armenian", - "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "dutchKp", - "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2", "lovins", - "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician", - "minimalGalician", "german", "german2", "lightGerman", "minimalGerman", "greek", "hindi", - "hungarian", "lightHungarian", "indonesian", "irish", "italian", "lightItalian", "sorani", - "latvian", "norwegian", "lightNorwegian", "minimalNorwegian", "lightNynorsk", "minimalNynorsk", - "portuguese", "lightPortuguese", "minimalPortuguese", "portugueseRslp", "romanian", "russian", - "lightRussian", "spanish", "lightSpanish", "swedish", "lightSwedish", and "turkish". - :paramtype language: str or ~azure.search.documents.indexes.models.StemmerTokenFilterLanguage - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.StemmerTokenFilter" - self.language = language - - -class StopAnalyzer(LexicalAnalyzer): - """Divides text at non-letters; Applies the lowercase and stopword token filters. This analyzer is - implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of analyzer. Required. - :vartype odata_type: str - :ivar name: The name of the analyzer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar stopwords: A list of stopwords. - :vartype stopwords: list[str] - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "stopwords": {"key": "stopwords", "type": "[str]"}, - } - - def __init__(self, *, name: str, stopwords: Optional[List[str]] = None, **kwargs: Any) -> None: - """ - :keyword name: The name of the analyzer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :paramtype name: str - :keyword stopwords: A list of stopwords. - :paramtype stopwords: list[str] - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.StopAnalyzer" - self.stopwords = stopwords - - -class StopwordsTokenFilter(TokenFilter): - """Removes stop words from a token stream. This token filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar stopwords: The list of stopwords. This property and the stopwords list property cannot - both be set. - :vartype stopwords: list[str] - :ivar stopwords_list: A predefined list of stopwords to use. This property and the stopwords - property cannot both be set. Default is English. Known values are: "arabic", "armenian", - "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "english", - "finnish", "french", "galician", "german", "greek", "hindi", "hungarian", "indonesian", - "irish", "italian", "latvian", "norwegian", "persian", "portuguese", "romanian", "russian", - "sorani", "spanish", "swedish", "thai", and "turkish". - :vartype stopwords_list: str or ~azure.search.documents.indexes.models.StopwordsList - :ivar ignore_case: A value indicating whether to ignore case. If true, all words are converted - to lower case first. Default is false. - :vartype ignore_case: bool - :ivar remove_trailing_stop_words: A value indicating whether to ignore the last search term if - it's a stop word. Default is true. - :vartype remove_trailing_stop_words: bool - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "stopwords": {"key": "stopwords", "type": "[str]"}, - "stopwords_list": {"key": "stopwordsList", "type": "str"}, - "ignore_case": {"key": "ignoreCase", "type": "bool"}, - "remove_trailing_stop_words": {"key": "removeTrailing", "type": "bool"}, - } - - def __init__( - self, - *, - name: str, - stopwords: Optional[List[str]] = None, - stopwords_list: Optional[Union[str, "_models.StopwordsList"]] = None, - ignore_case: bool = False, - remove_trailing_stop_words: bool = True, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword stopwords: The list of stopwords. This property and the stopwords list property cannot - both be set. - :paramtype stopwords: list[str] - :keyword stopwords_list: A predefined list of stopwords to use. This property and the stopwords - property cannot both be set. Default is English. Known values are: "arabic", "armenian", - "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "english", - "finnish", "french", "galician", "german", "greek", "hindi", "hungarian", "indonesian", - "irish", "italian", "latvian", "norwegian", "persian", "portuguese", "romanian", "russian", - "sorani", "spanish", "swedish", "thai", and "turkish". - :paramtype stopwords_list: str or ~azure.search.documents.indexes.models.StopwordsList - :keyword ignore_case: A value indicating whether to ignore case. If true, all words are - converted to lower case first. Default is false. - :paramtype ignore_case: bool - :keyword remove_trailing_stop_words: A value indicating whether to ignore the last search term - if it's a stop word. Default is true. - :paramtype remove_trailing_stop_words: bool - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.StopwordsTokenFilter" - self.stopwords = stopwords - self.stopwords_list = stopwords_list - self.ignore_case = ignore_case - self.remove_trailing_stop_words = remove_trailing_stop_words - - -class SynonymMap(_serialization.Model): - """Represents a synonym map definition. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to server. - - :ivar name: The name of the synonym map. Required. - :vartype name: str - :ivar format: The format of the synonym map. Only the 'solr' format is currently supported. - Required. Default value is "solr". - :vartype format: str - :ivar synonyms: A series of synonym rules in the specified synonym map format. The rules must - be separated by newlines. Required. - :vartype synonyms: str - :ivar encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your data when you - want full assurance that no one, not even Microsoft, can decrypt your data. Once you have - encrypted your data, it will always remain encrypted. The search service will ignore attempts - to set this property to null. You can change this property as needed if you want to rotate your - encryption key; Your data will be unaffected. Encryption with customer-managed keys is not - available for free search services, and is only available for paid services created on or after - January 1, 2019. - :vartype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - :ivar e_tag: The ETag of the synonym map. - :vartype e_tag: str - """ - - _validation = { - "name": {"required": True}, - "format": {"required": True, "constant": True}, - "synonyms": {"required": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "format": {"key": "format", "type": "str"}, - "synonyms": {"key": "synonyms", "type": "str"}, - "encryption_key": {"key": "encryptionKey", "type": "SearchResourceEncryptionKey"}, - "e_tag": {"key": "@odata\\.etag", "type": "str"}, - } - - format = "solr" - - def __init__( - self, - *, - name: str, - synonyms: str, - encryption_key: Optional["_models.SearchResourceEncryptionKey"] = None, - e_tag: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the synonym map. Required. - :paramtype name: str - :keyword synonyms: A series of synonym rules in the specified synonym map format. The rules - must be separated by newlines. Required. - :paramtype synonyms: str - :keyword encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your data when you - want full assurance that no one, not even Microsoft, can decrypt your data. Once you have - encrypted your data, it will always remain encrypted. The search service will ignore attempts - to set this property to null. You can change this property as needed if you want to rotate your - encryption key; Your data will be unaffected. Encryption with customer-managed keys is not - available for free search services, and is only available for paid services created on or after - January 1, 2019. - :paramtype encryption_key: ~azure.search.documents.indexes.models.SearchResourceEncryptionKey - :keyword e_tag: The ETag of the synonym map. - :paramtype e_tag: str - """ - super().__init__(**kwargs) - self.name = name - self.synonyms = synonyms - self.encryption_key = encryption_key - self.e_tag = e_tag - - -class SynonymTokenFilter(TokenFilter): - """Matches single or multi-word synonyms in a token stream. This token filter is implemented using - Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar synonyms: A list of synonyms in following one of two formats: 1. incredible, - unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced - with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma - separated list of equivalent words. Set the expand option to change how this list is - interpreted. Required. - :vartype synonyms: list[str] - :ivar ignore_case: A value indicating whether to case-fold input for matching. Default is - false. - :vartype ignore_case: bool - :ivar expand: A value indicating whether all words in the list of synonyms (if => notation is - not used) will map to one another. If true, all words in the list of synonyms (if => notation - is not used) will map to one another. The following list: incredible, unbelievable, fabulous, - amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, - unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable, - fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing => - incredible. Default is true. - :vartype expand: bool - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "synonyms": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "synonyms": {"key": "synonyms", "type": "[str]"}, - "ignore_case": {"key": "ignoreCase", "type": "bool"}, - "expand": {"key": "expand", "type": "bool"}, - } - - def __init__( - self, *, name: str, synonyms: List[str], ignore_case: bool = False, expand: bool = True, **kwargs: Any - ) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword synonyms: A list of synonyms in following one of two formats: 1. incredible, - unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced - with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma - separated list of equivalent words. Set the expand option to change how this list is - interpreted. Required. - :paramtype synonyms: list[str] - :keyword ignore_case: A value indicating whether to case-fold input for matching. Default is - false. - :paramtype ignore_case: bool - :keyword expand: A value indicating whether all words in the list of synonyms (if => notation - is not used) will map to one another. If true, all words in the list of synonyms (if => - notation is not used) will map to one another. The following list: incredible, unbelievable, - fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, - unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable, - fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing => - incredible. Default is true. - :paramtype expand: bool - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.SynonymTokenFilter" - self.synonyms = synonyms - self.ignore_case = ignore_case - self.expand = expand - - -class TagScoringFunction(ScoringFunction): - """Defines a function that boosts scores of documents with string values matching a given list of - tags. - - All required parameters must be populated in order to send to server. - - :ivar type: Indicates the type of function to use. Valid values include magnitude, freshness, - distance, and tag. The function type must be lower case. Required. - :vartype type: str - :ivar field_name: The name of the field used as input to the scoring function. Required. - :vartype field_name: str - :ivar boost: A multiplier for the raw score. Must be a positive number not equal to 1.0. - Required. - :vartype boost: float - :ivar interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Known values are: "linear", "constant", "quadratic", and - "logarithmic". - :vartype interpolation: str or - ~azure.search.documents.indexes.models.ScoringFunctionInterpolation - :ivar parameters: Parameter values for the tag scoring function. Required. - :vartype parameters: ~azure.search.documents.indexes.models.TagScoringParameters - """ - - _validation = { - "type": {"required": True}, - "field_name": {"required": True}, - "boost": {"required": True}, - "parameters": {"required": True}, - } - - _attribute_map = { - "type": {"key": "type", "type": "str"}, - "field_name": {"key": "fieldName", "type": "str"}, - "boost": {"key": "boost", "type": "float"}, - "interpolation": {"key": "interpolation", "type": "str"}, - "parameters": {"key": "tag", "type": "TagScoringParameters"}, - } - - def __init__( - self, - *, - field_name: str, - boost: float, - parameters: "_models.TagScoringParameters", - interpolation: Optional[Union[str, "_models.ScoringFunctionInterpolation"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword field_name: The name of the field used as input to the scoring function. Required. - :paramtype field_name: str - :keyword boost: A multiplier for the raw score. Must be a positive number not equal to 1.0. - Required. - :paramtype boost: float - :keyword interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Known values are: "linear", "constant", "quadratic", and - "logarithmic". - :paramtype interpolation: str or - ~azure.search.documents.indexes.models.ScoringFunctionInterpolation - :keyword parameters: Parameter values for the tag scoring function. Required. - :paramtype parameters: ~azure.search.documents.indexes.models.TagScoringParameters - """ - super().__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) - self.type: str = "tag" - self.parameters = parameters - - -class TagScoringParameters(_serialization.Model): - """Provides parameter values to a tag scoring function. - - All required parameters must be populated in order to send to server. - - :ivar tags_parameter: The name of the parameter passed in search queries to specify the list of - tags to compare against the target field. Required. - :vartype tags_parameter: str - """ - - _validation = { - "tags_parameter": {"required": True}, - } - - _attribute_map = { - "tags_parameter": {"key": "tagsParameter", "type": "str"}, - } - - def __init__(self, *, tags_parameter: str, **kwargs: Any) -> None: - """ - :keyword tags_parameter: The name of the parameter passed in search queries to specify the list - of tags to compare against the target field. Required. - :paramtype tags_parameter: str - """ - super().__init__(**kwargs) - self.tags_parameter = tags_parameter - - -class TextTranslationSkill(SearchIndexerSkill): - """A skill to translate text from one language to another. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar default_to_language_code: The language code to translate documents into for documents - that don't specify the to language explicitly. Required. Known values are: "af", "ar", "bn", - "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", "fil", - "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", "tlh", - "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "pt-br", - "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", - "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa", and "is". - :vartype default_to_language_code: str or - ~azure.search.documents.indexes.models.TextTranslationSkillLanguage - :ivar default_from_language_code: The language code to translate documents from for documents - that don't specify the from language explicitly. Known values are: "af", "ar", "bn", "bs", - "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", "fil", "fi", - "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", "tlh", - "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "pt-br", - "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", - "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa", and "is". - :vartype default_from_language_code: str or - ~azure.search.documents.indexes.models.TextTranslationSkillLanguage - :ivar suggested_from: The language code to translate documents from when neither the - fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the - automatic language detection is unsuccessful. Default is ``en``. Known values are: "af", "ar", - "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", - "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", - "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", - "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", - "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa", and - "is". - :vartype suggested_from: str or - ~azure.search.documents.indexes.models.TextTranslationSkillLanguage - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - "default_to_language_code": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "default_to_language_code": {"key": "defaultToLanguageCode", "type": "str"}, - "default_from_language_code": {"key": "defaultFromLanguageCode", "type": "str"}, - "suggested_from": {"key": "suggestedFrom", "type": "str"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - default_to_language_code: Union[str, "_models.TextTranslationSkillLanguage"], - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - default_from_language_code: Optional[Union[str, "_models.TextTranslationSkillLanguage"]] = None, - suggested_from: Optional[Union[str, "_models.TextTranslationSkillLanguage"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword default_to_language_code: The language code to translate documents into for documents - that don't specify the to language explicitly. Required. Known values are: "af", "ar", "bn", - "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", "fil", - "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", "tlh", - "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "pt-br", - "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", - "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa", and "is". - :paramtype default_to_language_code: str or - ~azure.search.documents.indexes.models.TextTranslationSkillLanguage - :keyword default_from_language_code: The language code to translate documents from for - documents that don't specify the from language explicitly. Known values are: "af", "ar", "bn", - "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", "fil", - "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", "tlh", - "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "pt-br", - "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", - "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa", and "is". - :paramtype default_from_language_code: str or - ~azure.search.documents.indexes.models.TextTranslationSkillLanguage - :keyword suggested_from: The language code to translate documents from when neither the - fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the - automatic language detection is unsuccessful. Default is ``en``. Known values are: "af", "ar", - "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", - "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", - "tlh", "tlh-Latn", "tlh-Piqd", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", - "pt-br", "pt-PT", "otq", "ro", "ru", "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", - "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", "yua", "ga", "kn", "mi", "ml", "pa", and - "is". - :paramtype suggested_from: str or - ~azure.search.documents.indexes.models.TextTranslationSkillLanguage - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Text.TranslationSkill" - self.default_to_language_code = default_to_language_code - self.default_from_language_code = default_from_language_code - self.suggested_from = suggested_from - - -class TextWeights(_serialization.Model): - """Defines weights on index fields for which matches should boost scoring in search queries. - - All required parameters must be populated in order to send to server. - - :ivar weights: The dictionary of per-field weights to boost document scoring. The keys are - field names and the values are the weights for each field. Required. - :vartype weights: dict[str, float] - """ - - _validation = { - "weights": {"required": True}, - } - - _attribute_map = { - "weights": {"key": "weights", "type": "{float}"}, - } - - def __init__(self, *, weights: Dict[str, float], **kwargs: Any) -> None: - """ - :keyword weights: The dictionary of per-field weights to boost document scoring. The keys are - field names and the values are the weights for each field. Required. - :paramtype weights: dict[str, float] - """ - super().__init__(**kwargs) - self.weights = weights - - -class TruncateTokenFilter(TokenFilter): - """Truncates the terms to a specific length. This token filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar length: The length at which terms will be truncated. Default and maximum is 300. - :vartype length: int - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "length": {"maximum": 300}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "length": {"key": "length", "type": "int"}, - } - - def __init__(self, *, name: str, length: int = 300, **kwargs: Any) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword length: The length at which terms will be truncated. Default and maximum is 300. - :paramtype length: int - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.TruncateTokenFilter" - self.length = length - - -class UaxUrlEmailTokenizer(LexicalTokenizer): - """Tokenizes urls and emails as one token. This tokenizer is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. - :vartype odata_type: str - :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes or - underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. The maximum token length that can be used is 300 characters. - :vartype max_token_length: int - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - "max_token_length": {"maximum": 300}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "max_token_length": {"key": "maxTokenLength", "type": "int"}, - } - - def __init__(self, *, name: str, max_token_length: int = 255, **kwargs: Any) -> None: - """ - :keyword name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :paramtype name: str - :keyword max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. The maximum token length that can be used is 300 characters. - :paramtype max_token_length: int - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.UaxUrlEmailTokenizer" - self.max_token_length = max_token_length - - -class UniqueTokenFilter(TokenFilter): - """Filters out tokens with same text as the previous token. This token filter is implemented using - Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar only_on_same_position: A value indicating whether to remove duplicates only at the same - position. Default is false. - :vartype only_on_same_position: bool - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "only_on_same_position": {"key": "onlyOnSamePosition", "type": "bool"}, - } - - def __init__(self, *, name: str, only_on_same_position: bool = False, **kwargs: Any) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword only_on_same_position: A value indicating whether to remove duplicates only at the - same position. Default is false. - :paramtype only_on_same_position: bool - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.UniqueTokenFilter" - self.only_on_same_position = only_on_same_position - - -class VectorSearch(_serialization.Model): - """Contains configuration options related to vector search. - - :ivar profiles: Defines combinations of configurations to use with vector search. - :vartype profiles: list[~azure.search.documents.indexes.models.VectorSearchProfile] - :ivar algorithms: Contains configuration options specific to the algorithm used during indexing - or querying. - :vartype algorithms: - list[~azure.search.documents.indexes.models.VectorSearchAlgorithmConfiguration] - :ivar vectorizers: Contains configuration options on how to vectorize text vector queries. - :vartype vectorizers: list[~azure.search.documents.indexes.models.VectorSearchVectorizer] - :ivar compressions: Contains configuration options specific to the compression method used - during indexing or querying. - :vartype compressions: list[~azure.search.documents.indexes.models.VectorSearchCompression] - """ - - _attribute_map = { - "profiles": {"key": "profiles", "type": "[VectorSearchProfile]"}, - "algorithms": {"key": "algorithms", "type": "[VectorSearchAlgorithmConfiguration]"}, - "vectorizers": {"key": "vectorizers", "type": "[VectorSearchVectorizer]"}, - "compressions": {"key": "compressions", "type": "[VectorSearchCompression]"}, - } - - def __init__( - self, - *, - profiles: Optional[List["_models.VectorSearchProfile"]] = None, - algorithms: Optional[List["_models.VectorSearchAlgorithmConfiguration"]] = None, - vectorizers: Optional[List["_models.VectorSearchVectorizer"]] = None, - compressions: Optional[List["_models.VectorSearchCompression"]] = None, - **kwargs: Any - ) -> None: - """ - :keyword profiles: Defines combinations of configurations to use with vector search. - :paramtype profiles: list[~azure.search.documents.indexes.models.VectorSearchProfile] - :keyword algorithms: Contains configuration options specific to the algorithm used during - indexing or querying. - :paramtype algorithms: - list[~azure.search.documents.indexes.models.VectorSearchAlgorithmConfiguration] - :keyword vectorizers: Contains configuration options on how to vectorize text vector queries. - :paramtype vectorizers: list[~azure.search.documents.indexes.models.VectorSearchVectorizer] - :keyword compressions: Contains configuration options specific to the compression method used - during indexing or querying. - :paramtype compressions: list[~azure.search.documents.indexes.models.VectorSearchCompression] - """ - super().__init__(**kwargs) - self.profiles = profiles - self.algorithms = algorithms - self.vectorizers = vectorizers - self.compressions = compressions - - -class VectorSearchProfile(_serialization.Model): - """Defines a combination of configurations to use with vector search. - - All required parameters must be populated in order to send to server. - - :ivar name: The name to associate with this particular vector search profile. Required. - :vartype name: str - :ivar algorithm_configuration_name: The name of the vector search algorithm configuration that - specifies the algorithm and optional parameters. Required. - :vartype algorithm_configuration_name: str - :ivar vectorizer_name: The name of the vectorization being configured for use with vector - search. - :vartype vectorizer_name: str - :ivar compression_name: The name of the compression method configuration that specifies the - compression method and optional parameters. - :vartype compression_name: str - """ - - _validation = { - "name": {"required": True}, - "algorithm_configuration_name": {"required": True}, - } - - _attribute_map = { - "name": {"key": "name", "type": "str"}, - "algorithm_configuration_name": {"key": "algorithm", "type": "str"}, - "vectorizer_name": {"key": "vectorizer", "type": "str"}, - "compression_name": {"key": "compression", "type": "str"}, - } - - def __init__( - self, - *, - name: str, - algorithm_configuration_name: str, - vectorizer_name: Optional[str] = None, - compression_name: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name to associate with this particular vector search profile. Required. - :paramtype name: str - :keyword algorithm_configuration_name: The name of the vector search algorithm configuration - that specifies the algorithm and optional parameters. Required. - :paramtype algorithm_configuration_name: str - :keyword vectorizer_name: The name of the vectorization being configured for use with vector - search. - :paramtype vectorizer_name: str - :keyword compression_name: The name of the compression method configuration that specifies the - compression method and optional parameters. - :paramtype compression_name: str - """ - super().__init__(**kwargs) - self.name = name - self.algorithm_configuration_name = algorithm_configuration_name - self.vectorizer_name = vectorizer_name - self.compression_name = compression_name - - -class VisionVectorizeSkill(SearchIndexerSkill): - """Allows you to generate a vector embedding for a given image or text input using the Azure AI - Services Vision Vectorize API. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar model_version: The version of the model to use when calling the AI Services Vision - service. It will default to the latest available when not specified. Required. - :vartype model_version: str - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - "model_version": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "model_version": {"key": "modelVersion", "type": "str"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - model_version: str, - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword model_version: The version of the model to use when calling the AI Services Vision - service. It will default to the latest available when not specified. Required. - :paramtype model_version: str - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Vision.VectorizeSkill" - self.model_version = model_version - - -class WebApiSkill(SearchIndexerSkill): - """A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call - your custom code. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of skill. Required. - :vartype odata_type: str - :ivar name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :vartype name: str - :ivar description: The description of the skill which describes the inputs, outputs, and usage - of the skill. - :vartype description: str - :ivar context: Represents the level at which operations take place, such as the document root - or document content (for example, /document or /document/content). The default is /document. - :vartype context: str - :ivar inputs: Inputs of the skills could be a column in the source data set, or the output of - an upstream skill. Required. - :vartype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :ivar outputs: The output of a skill is either a field in a search index, or a value that can - be consumed as an input by another skill. Required. - :vartype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :ivar uri: The url for the Web API. Required. - :vartype uri: str - :ivar http_headers: The headers required to make the http request. - :vartype http_headers: dict[str, str] - :ivar http_method: The method for the http request. - :vartype http_method: str - :ivar timeout: The desired timeout for the request. Default is 30 seconds. - :vartype timeout: ~datetime.timedelta - :ivar batch_size: The desired batch size which indicates number of documents. - :vartype batch_size: int - :ivar degree_of_parallelism: If set, the number of parallel calls that can be made to the Web - API. - :vartype degree_of_parallelism: int - :ivar auth_resource_id: Applies to custom skills that connect to external code in an Azure - function or some other application that provides the transformations. This value should be the - application ID created for the function or app when it was registered with Azure Active - Directory. When specified, the custom skill connects to the function or app using a managed ID - (either system or user-assigned) of the search service and the access token of the function or - app, using this value as the resource id for creating the scope of the access token. - :vartype auth_resource_id: str - :ivar auth_identity: The user-assigned managed identity used for outbound connections. If an - authResourceId is provided and it's not specified, the system-assigned managed identity is - used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. - If set to "none", the value of this property is cleared. - :vartype auth_identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - """ - - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - "uri": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "uri": {"key": "uri", "type": "str"}, - "http_headers": {"key": "httpHeaders", "type": "{str}"}, - "http_method": {"key": "httpMethod", "type": "str"}, - "timeout": {"key": "timeout", "type": "duration"}, - "batch_size": {"key": "batchSize", "type": "int"}, - "degree_of_parallelism": {"key": "degreeOfParallelism", "type": "int"}, - "auth_resource_id": {"key": "authResourceId", "type": "str"}, - "auth_identity": {"key": "authIdentity", "type": "SearchIndexerDataIdentity"}, - } - - def __init__( - self, - *, - inputs: List["_models.InputFieldMappingEntry"], - outputs: List["_models.OutputFieldMappingEntry"], - uri: str, - name: Optional[str] = None, - description: Optional[str] = None, - context: Optional[str] = None, - http_headers: Optional[Dict[str, str]] = None, - http_method: Optional[str] = None, - timeout: Optional[datetime.timedelta] = None, - batch_size: Optional[int] = None, - degree_of_parallelism: Optional[int] = None, - auth_resource_id: Optional[str] = None, - auth_identity: Optional["_models.SearchIndexerDataIdentity"] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the skill which uniquely identifies it within the skillset. A skill - with no name defined will be given a default name of its 1-based index in the skills array, - prefixed with the character '#'. - :paramtype name: str - :keyword description: The description of the skill which describes the inputs, outputs, and - usage of the skill. - :paramtype description: str - :keyword context: Represents the level at which operations take place, such as the document - root or document content (for example, /document or /document/content). The default is - /document. - :paramtype context: str - :keyword inputs: Inputs of the skills could be a column in the source data set, or the output - of an upstream skill. Required. - :paramtype inputs: list[~azure.search.documents.indexes.models.InputFieldMappingEntry] - :keyword outputs: The output of a skill is either a field in a search index, or a value that - can be consumed as an input by another skill. Required. - :paramtype outputs: list[~azure.search.documents.indexes.models.OutputFieldMappingEntry] - :keyword uri: The url for the Web API. Required. - :paramtype uri: str - :keyword http_headers: The headers required to make the http request. - :paramtype http_headers: dict[str, str] - :keyword http_method: The method for the http request. - :paramtype http_method: str - :keyword timeout: The desired timeout for the request. Default is 30 seconds. - :paramtype timeout: ~datetime.timedelta - :keyword batch_size: The desired batch size which indicates number of documents. - :paramtype batch_size: int - :keyword degree_of_parallelism: If set, the number of parallel calls that can be made to the - Web API. - :paramtype degree_of_parallelism: int - :keyword auth_resource_id: Applies to custom skills that connect to external code in an Azure - function or some other application that provides the transformations. This value should be the - application ID created for the function or app when it was registered with Azure Active - Directory. When specified, the custom skill connects to the function or app using a managed ID - (either system or user-assigned) of the search service and the access token of the function or - app, using this value as the resource id for creating the scope of the access token. - :paramtype auth_resource_id: str - :keyword auth_identity: The user-assigned managed identity used for outbound connections. If an - authResourceId is provided and it's not specified, the system-assigned managed identity is - used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. - If set to "none", the value of this property is cleared. - :paramtype auth_identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - """ - super().__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type: str = "#Microsoft.Skills.Custom.WebApiSkill" - self.uri = uri - self.http_headers = http_headers - self.http_method = http_method - self.timeout = timeout - self.batch_size = batch_size - self.degree_of_parallelism = degree_of_parallelism - self.auth_resource_id = auth_resource_id - self.auth_identity = auth_identity - - -class WebApiVectorizer(VectorSearchVectorizer): - """Specifies a user-defined vectorizer for generating the vector embedding of a query string. - Integration of an external vectorizer is achieved using the custom Web API interface of a - skillset. - - All required parameters must be populated in order to send to server. - - :ivar vectorizer_name: The name to associate with this particular vectorization method. - Required. - :vartype vectorizer_name: str - :ivar kind: The name of the kind of vectorization method being configured for use with vector - search. Required. Known values are: "azureOpenAI", "customWebApi", "aiServicesVision", and - "aml". - :vartype kind: str or ~azure.search.documents.indexes.models.VectorSearchVectorizerKind - :ivar web_api_parameters: Specifies the properties of the user-defined vectorizer. - :vartype web_api_parameters: ~azure.search.documents.indexes.models.WebApiVectorizerParameters - """ - - _validation = { - "vectorizer_name": {"required": True}, - "kind": {"required": True}, - } - - _attribute_map = { - "vectorizer_name": {"key": "name", "type": "str"}, - "kind": {"key": "kind", "type": "str"}, - "web_api_parameters": {"key": "customWebApiParameters", "type": "WebApiVectorizerParameters"}, - } - - def __init__( - self, - *, - vectorizer_name: str, - web_api_parameters: Optional["_models.WebApiVectorizerParameters"] = None, - **kwargs: Any - ) -> None: - """ - :keyword vectorizer_name: The name to associate with this particular vectorization method. - Required. - :paramtype vectorizer_name: str - :keyword web_api_parameters: Specifies the properties of the user-defined vectorizer. - :paramtype web_api_parameters: - ~azure.search.documents.indexes.models.WebApiVectorizerParameters - """ - super().__init__(vectorizer_name=vectorizer_name, **kwargs) - self.kind: str = "customWebApi" - self.web_api_parameters = web_api_parameters - - -class WebApiVectorizerParameters(_serialization.Model): - """Specifies the properties for connecting to a user-defined vectorizer. - - :ivar url: The URI of the Web API providing the vectorizer. - :vartype url: str - :ivar http_headers: The headers required to make the HTTP request. - :vartype http_headers: dict[str, str] - :ivar http_method: The method for the HTTP request. - :vartype http_method: str - :ivar timeout: The desired timeout for the request. Default is 30 seconds. - :vartype timeout: ~datetime.timedelta - :ivar auth_resource_id: Applies to custom endpoints that connect to external code in an Azure - function or some other application that provides the transformations. This value should be the - application ID created for the function or app when it was registered with Azure Active - Directory. When specified, the vectorization connects to the function or app using a managed ID - (either system or user-assigned) of the search service and the access token of the function or - app, using this value as the resource id for creating the scope of the access token. - :vartype auth_resource_id: str - :ivar auth_identity: The user-assigned managed identity used for outbound connections. If an - authResourceId is provided and it's not specified, the system-assigned managed identity is - used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. - If set to "none", the value of this property is cleared. - :vartype auth_identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - """ - - _attribute_map = { - "url": {"key": "uri", "type": "str"}, - "http_headers": {"key": "httpHeaders", "type": "{str}"}, - "http_method": {"key": "httpMethod", "type": "str"}, - "timeout": {"key": "timeout", "type": "duration"}, - "auth_resource_id": {"key": "authResourceId", "type": "str"}, - "auth_identity": {"key": "authIdentity", "type": "SearchIndexerDataIdentity"}, - } - - def __init__( - self, - *, - url: Optional[str] = None, - http_headers: Optional[Dict[str, str]] = None, - http_method: Optional[str] = None, - timeout: Optional[datetime.timedelta] = None, - auth_resource_id: Optional[str] = None, - auth_identity: Optional["_models.SearchIndexerDataIdentity"] = None, - **kwargs: Any - ) -> None: - """ - :keyword url: The URI of the Web API providing the vectorizer. - :paramtype url: str - :keyword http_headers: The headers required to make the HTTP request. - :paramtype http_headers: dict[str, str] - :keyword http_method: The method for the HTTP request. - :paramtype http_method: str - :keyword timeout: The desired timeout for the request. Default is 30 seconds. - :paramtype timeout: ~datetime.timedelta - :keyword auth_resource_id: Applies to custom endpoints that connect to external code in an - Azure function or some other application that provides the transformations. This value should - be the application ID created for the function or app when it was registered with Azure Active - Directory. When specified, the vectorization connects to the function or app using a managed ID - (either system or user-assigned) of the search service and the access token of the function or - app, using this value as the resource id for creating the scope of the access token. - :paramtype auth_resource_id: str - :keyword auth_identity: The user-assigned managed identity used for outbound connections. If an - authResourceId is provided and it's not specified, the system-assigned managed identity is - used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. - If set to "none", the value of this property is cleared. - :paramtype auth_identity: ~azure.search.documents.indexes.models.SearchIndexerDataIdentity - """ - super().__init__(**kwargs) - self.url = url - self.http_headers = http_headers - self.http_method = http_method - self.timeout = timeout - self.auth_resource_id = auth_resource_id - self.auth_identity = auth_identity - - -class WordDelimiterTokenFilter(TokenFilter): - """Splits words into subwords and performs optional transformations on subword groups. This token - filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to server. - - :ivar odata_type: A URI fragment specifying the type of token filter. Required. - :vartype odata_type: str - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is limited to 128 - characters. Required. - :vartype name: str - :ivar generate_word_parts: A value indicating whether to generate part words. If set, causes - parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is - true. - :vartype generate_word_parts: bool - :ivar generate_number_parts: A value indicating whether to generate number subwords. Default is - true. - :vartype generate_number_parts: bool - :ivar catenate_words: A value indicating whether maximum runs of word parts will be catenated. - For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default is false. - :vartype catenate_words: bool - :ivar catenate_numbers: A value indicating whether maximum runs of number parts will be - catenated. For example, if this is set to true, "1-2" becomes "12". Default is false. - :vartype catenate_numbers: bool - :ivar catenate_all: A value indicating whether all subword parts will be catenated. For - example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false. - :vartype catenate_all: bool - :ivar split_on_case_change: A value indicating whether to split words on caseChange. For - example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true. - :vartype split_on_case_change: bool - :ivar preserve_original: A value indicating whether original words will be preserved and added - to the subword list. Default is false. - :vartype preserve_original: bool - :ivar split_on_numerics: A value indicating whether to split on numbers. For example, if this - is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true. - :vartype split_on_numerics: bool - :ivar stem_english_possessive: A value indicating whether to remove trailing "'s" for each - subword. Default is true. - :vartype stem_english_possessive: bool - :ivar protected_words: A list of tokens to protect from being delimited. - :vartype protected_words: list[str] - """ - - _validation = { - "odata_type": {"required": True}, - "name": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "generate_word_parts": {"key": "generateWordParts", "type": "bool"}, - "generate_number_parts": {"key": "generateNumberParts", "type": "bool"}, - "catenate_words": {"key": "catenateWords", "type": "bool"}, - "catenate_numbers": {"key": "catenateNumbers", "type": "bool"}, - "catenate_all": {"key": "catenateAll", "type": "bool"}, - "split_on_case_change": {"key": "splitOnCaseChange", "type": "bool"}, - "preserve_original": {"key": "preserveOriginal", "type": "bool"}, - "split_on_numerics": {"key": "splitOnNumerics", "type": "bool"}, - "stem_english_possessive": {"key": "stemEnglishPossessive", "type": "bool"}, - "protected_words": {"key": "protectedWords", "type": "[str]"}, - } - - def __init__( - self, - *, - name: str, - generate_word_parts: bool = True, - generate_number_parts: bool = True, - catenate_words: bool = False, - catenate_numbers: bool = False, - catenate_all: bool = False, - split_on_case_change: bool = True, - preserve_original: bool = False, - split_on_numerics: bool = True, - stem_english_possessive: bool = True, - protected_words: Optional[List[str]] = None, - **kwargs: Any - ) -> None: - """ - :keyword name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. Required. - :paramtype name: str - :keyword generate_word_parts: A value indicating whether to generate part words. If set, causes - parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is - true. - :paramtype generate_word_parts: bool - :keyword generate_number_parts: A value indicating whether to generate number subwords. Default - is true. - :paramtype generate_number_parts: bool - :keyword catenate_words: A value indicating whether maximum runs of word parts will be - catenated. For example, if this is set to true, "Azure-Search" becomes "AzureSearch". Default - is false. - :paramtype catenate_words: bool - :keyword catenate_numbers: A value indicating whether maximum runs of number parts will be - catenated. For example, if this is set to true, "1-2" becomes "12". Default is false. - :paramtype catenate_numbers: bool - :keyword catenate_all: A value indicating whether all subword parts will be catenated. For - example, if this is set to true, "Azure-Search-1" becomes "AzureSearch1". Default is false. - :paramtype catenate_all: bool - :keyword split_on_case_change: A value indicating whether to split words on caseChange. For - example, if this is set to true, "AzureSearch" becomes "Azure" "Search". Default is true. - :paramtype split_on_case_change: bool - :keyword preserve_original: A value indicating whether original words will be preserved and - added to the subword list. Default is false. - :paramtype preserve_original: bool - :keyword split_on_numerics: A value indicating whether to split on numbers. For example, if - this is set to true, "Azure1Search" becomes "Azure" "1" "Search". Default is true. - :paramtype split_on_numerics: bool - :keyword stem_english_possessive: A value indicating whether to remove trailing "'s" for each - subword. Default is true. - :paramtype stem_english_possessive: bool - :keyword protected_words: A list of tokens to protect from being delimited. - :paramtype protected_words: list[str] - """ - super().__init__(name=name, **kwargs) - self.odata_type: str = "#Microsoft.Azure.Search.WordDelimiterTokenFilter" - self.generate_word_parts = generate_word_parts - self.generate_number_parts = generate_number_parts - self.catenate_words = catenate_words - self.catenate_numbers = catenate_numbers - self.catenate_all = catenate_all - self.split_on_case_change = split_on_case_change - self.preserve_original = preserve_original - self.split_on_numerics = split_on_numerics - self.stem_english_possessive = stem_english_possessive - self.protected_words = protected_words diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_patch.py deleted file mode 100644 index f7dd32510333..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/models/_patch.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -"""Customize generated code here. - -Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -""" -from typing import List - -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level - - -def patch_sdk(): - """Do not remove from this file. - - `patch_sdk` is a last resort escape hatch that allows you to do customizations - you can't accomplish using the techniques described in - https://aka.ms/azsdk/python/dpcodegen/python/customize - """ diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/__init__.py deleted file mode 100644 index 27f37f309348..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -# pylint: disable=wrong-import-position - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from ._patch import * # pylint: disable=unused-wildcard-import - -from ._data_sources_operations import DataSourcesOperations # type: ignore -from ._indexers_operations import IndexersOperations # type: ignore -from ._skillsets_operations import SkillsetsOperations # type: ignore -from ._synonym_maps_operations import SynonymMapsOperations # type: ignore -from ._indexes_operations import IndexesOperations # type: ignore -from ._aliases_operations import AliasesOperations # type: ignore -from ._search_service_client_operations import SearchServiceClientOperationsMixin # type: ignore - -from ._patch import __all__ as _patch_all -from ._patch import * -from ._patch import patch_sdk as _patch_sdk - -__all__ = [ - "DataSourcesOperations", - "IndexersOperations", - "SkillsetsOperations", - "SynonymMapsOperations", - "IndexesOperations", - "AliasesOperations", - "SearchServiceClientOperationsMixin", -] -__all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore -_patch_sdk() diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_aliases_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_aliases_operations.py deleted file mode 100644 index 82310d0a8355..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_aliases_operations.py +++ /dev/null @@ -1,756 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload -import urllib.parse - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - map_error, -) -from azure.core.paging import ItemPaged -from azure.core.pipeline import PipelineResponse -from azure.core.rest import HttpRequest, HttpResponse -from azure.core.tracing.decorator import distributed_trace -from azure.core.utils import case_insensitive_dict - -from .. import models as _models -from .._serialization import Serializer - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -_SERIALIZER = Serializer() -_SERIALIZER.client_side_validation = False - - -def build_create_request(*, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/aliases") - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_list_request(*, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/aliases") - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_create_or_update_request( - alias_name: str, - *, - prefer: Union[str, _models.Enum0], - x_ms_client_request_id: Optional[str] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/aliases('{aliasName}')") - path_format_arguments = { - "aliasName": _SERIALIZER.url("alias_name", alias_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if if_match is not None: - _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") - if if_none_match is not None: - _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") - _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_delete_request( - alias_name: str, - *, - x_ms_client_request_id: Optional[str] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/aliases('{aliasName}')") - path_format_arguments = { - "aliasName": _SERIALIZER.url("alias_name", alias_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if if_match is not None: - _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") - if if_none_match is not None: - _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_get_request(alias_name: str, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/aliases('{aliasName}')") - path_format_arguments = { - "aliasName": _SERIALIZER.url("alias_name", alias_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -class AliasesOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.search.documents.indexes.SearchServiceClient`'s - :attr:`aliases` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @overload - def create( - self, - alias: _models.SearchAlias, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchAlias: - """Creates a new search alias. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Alias - - :param alias: The definition of the alias to create. Required. - :type alias: ~azure.search.documents.indexes.models.SearchAlias - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchAlias or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create( - self, - alias: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchAlias: - """Creates a new search alias. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Alias - - :param alias: The definition of the alias to create. Required. - :type alias: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchAlias or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create( - self, - alias: Union[_models.SearchAlias, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchAlias: - """Creates a new search alias. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Alias - - :param alias: The definition of the alias to create. Is either a SearchAlias type or a - IO[bytes] type. Required. - :type alias: ~azure.search.documents.indexes.models.SearchAlias or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchAlias or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchAlias] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(alias, (IOBase, bytes)): - _content = alias - else: - _json = self._serialize.body(alias, "SearchAlias") - - _request = build_create_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchAlias", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list( - self, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> Iterable["_models.SearchAlias"]: - """Lists all aliases available for a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/List-Aliases - - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: An iterator like instance of either SearchAlias or the result of cls(response) - :rtype: ~azure.core.paging.ItemPaged[~azure.search.documents.indexes.models.SearchAlias] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.ListAliasesResult] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_list_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - _request.method = "GET" - return _request - - def extract_data(pipeline_response): - deserialized = self._deserialize("ListAliasesResult", pipeline_response) - list_of_elem = deserialized.aliases - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return None, iter(list_of_elem) - - def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return ItemPaged(get_next, extract_data) - - @overload - def create_or_update( - self, - alias_name: str, - prefer: Union[str, _models.Enum0], - alias: _models.SearchAlias, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchAlias: - """Creates a new search alias or updates an alias if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Alias - - :param alias_name: The definition of the alias to create or update. Required. - :type alias_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param alias: The definition of the alias to create or update. Required. - :type alias: ~azure.search.documents.indexes.models.SearchAlias - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchAlias or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_or_update( - self, - alias_name: str, - prefer: Union[str, _models.Enum0], - alias: IO[bytes], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchAlias: - """Creates a new search alias or updates an alias if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Alias - - :param alias_name: The definition of the alias to create or update. Required. - :type alias_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param alias: The definition of the alias to create or update. Required. - :type alias: IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchAlias or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_or_update( - self, - alias_name: str, - prefer: Union[str, _models.Enum0], - alias: Union[_models.SearchAlias, IO[bytes]], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchAlias: - """Creates a new search alias or updates an alias if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Alias - - :param alias_name: The definition of the alias to create or update. Required. - :type alias_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param alias: The definition of the alias to create or update. Is either a SearchAlias type or - a IO[bytes] type. Required. - :type alias: ~azure.search.documents.indexes.models.SearchAlias or IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchAlias or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchAlias] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(alias, (IOBase, bytes)): - _content = alias - else: - _json = self._serialize.body(alias, "SearchAlias") - - _request = build_create_or_update_request( - alias_name=alias_name, - prefer=prefer, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchAlias", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def delete( # pylint: disable=inconsistent-return-statements - self, - alias_name: str, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> None: - """Deletes a search alias and its associated mapping to an index. This operation is permanent, - with no recovery option. The mapped index is untouched by this operation. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Delete-Alias - - :param alias_name: The name of the alias to delete. Required. - :type alias_name: str - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_delete_request( - alias_name=alias_name, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @distributed_trace - def get( - self, alias_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.SearchAlias: - """Retrieves an alias definition. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Get-Alias - - :param alias_name: The name of the alias to retrieve. Required. - :type alias_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchAlias or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchAlias - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SearchAlias] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_request( - alias_name=alias_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchAlias", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py deleted file mode 100644 index 828e283ddf46..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_data_sources_operations.py +++ /dev/null @@ -1,750 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.rest import HttpRequest, HttpResponse -from azure.core.tracing.decorator import distributed_trace -from azure.core.utils import case_insensitive_dict - -from .. import models as _models -from .._serialization import Serializer - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -_SERIALIZER = Serializer() -_SERIALIZER.client_side_validation = False - - -def build_create_or_update_request( - data_source_name: str, - *, - prefer: Union[str, _models.Enum0], - x_ms_client_request_id: Optional[str] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/datasources('{dataSourceName}')") - path_format_arguments = { - "dataSourceName": _SERIALIZER.url("data_source_name", data_source_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if skip_indexer_reset_requirement_for_cache is not None: - _params["ignoreResetRequirements"] = _SERIALIZER.query( - "skip_indexer_reset_requirement_for_cache", skip_indexer_reset_requirement_for_cache, "bool" - ) - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if if_match is not None: - _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") - if if_none_match is not None: - _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") - _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_delete_request( - data_source_name: str, - *, - x_ms_client_request_id: Optional[str] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/datasources('{dataSourceName}')") - path_format_arguments = { - "dataSourceName": _SERIALIZER.url("data_source_name", data_source_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if if_match is not None: - _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") - if if_none_match is not None: - _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_get_request( - data_source_name: str, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/datasources('{dataSourceName}')") - path_format_arguments = { - "dataSourceName": _SERIALIZER.url("data_source_name", data_source_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_list_request( - *, select: Optional[str] = None, x_ms_client_request_id: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/datasources") - - # Construct parameters - if select is not None: - _params["$select"] = _SERIALIZER.query("select", select, "str") - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_create_request(*, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/datasources") - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -class DataSourcesOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.search.documents.indexes.SearchServiceClient`'s - :attr:`data_sources` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @overload - def create_or_update( - self, - data_source_name: str, - prefer: Union[str, _models.Enum0], - data_source: _models.SearchIndexerDataSource, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexerDataSource: - """Creates a new datasource or updates a datasource if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Data-Source - - :param data_source_name: The name of the datasource to create or update. Required. - :type data_source_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param data_source: The definition of the datasource to create or update. Required. - :type data_source: ~azure.search.documents.indexes.models.SearchIndexerDataSource - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :type skip_indexer_reset_requirement_for_cache: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSource - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_or_update( - self, - data_source_name: str, - prefer: Union[str, _models.Enum0], - data_source: IO[bytes], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexerDataSource: - """Creates a new datasource or updates a datasource if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Data-Source - - :param data_source_name: The name of the datasource to create or update. Required. - :type data_source_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param data_source: The definition of the datasource to create or update. Required. - :type data_source: IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :type skip_indexer_reset_requirement_for_cache: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSource - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_or_update( - self, - data_source_name: str, - prefer: Union[str, _models.Enum0], - data_source: Union[_models.SearchIndexerDataSource, IO[bytes]], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchIndexerDataSource: - """Creates a new datasource or updates a datasource if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Data-Source - - :param data_source_name: The name of the datasource to create or update. Required. - :type data_source_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param data_source: The definition of the datasource to create or update. Is either a - SearchIndexerDataSource type or a IO[bytes] type. Required. - :type data_source: ~azure.search.documents.indexes.models.SearchIndexerDataSource or IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :type skip_indexer_reset_requirement_for_cache: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSource - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchIndexerDataSource] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(data_source, (IOBase, bytes)): - _content = data_source - else: - _json = self._serialize.body(data_source, "SearchIndexerDataSource") - - _request = build_create_or_update_request( - data_source_name=data_source_name, - prefer=prefer, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexerDataSource", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def delete( # pylint: disable=inconsistent-return-statements - self, - data_source_name: str, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> None: - """Deletes a datasource. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Delete-Data-Source - - :param data_source_name: The name of the datasource to delete. Required. - :type data_source_name: str - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_delete_request( - data_source_name=data_source_name, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @distributed_trace - def get( - self, data_source_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.SearchIndexerDataSource: - """Retrieves a datasource definition. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Get-Data-Source - - :param data_source_name: The name of the datasource to retrieve. Required. - :type data_source_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSource - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SearchIndexerDataSource] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_request( - data_source_name=data_source_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexerDataSource", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list( - self, select: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.ListDataSourcesResult: - """Lists all datasources available for a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/List-Data-Sources - - :param select: Selects which top-level properties of the data sources to retrieve. Specified as - a comma-separated list of JSON property names, or '*' for all properties. The default is all - properties. Default value is None. - :type select: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: ListDataSourcesResult or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.ListDataSourcesResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.ListDataSourcesResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_list_request( - select=select, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("ListDataSourcesResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def create( - self, - data_source: _models.SearchIndexerDataSource, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexerDataSource: - """Creates a new datasource. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Data-Source - - :param data_source: The definition of the datasource to create. Required. - :type data_source: ~azure.search.documents.indexes.models.SearchIndexerDataSource - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSource - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create( - self, - data_source: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexerDataSource: - """Creates a new datasource. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Data-Source - - :param data_source: The definition of the datasource to create. Required. - :type data_source: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSource - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create( - self, - data_source: Union[_models.SearchIndexerDataSource, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchIndexerDataSource: - """Creates a new datasource. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Data-Source - - :param data_source: The definition of the datasource to create. Is either a - SearchIndexerDataSource type or a IO[bytes] type. Required. - :type data_source: ~azure.search.documents.indexes.models.SearchIndexerDataSource or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexerDataSource or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSource - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchIndexerDataSource] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(data_source, (IOBase, bytes)): - _content = data_source - else: - _json = self._serialize.body(data_source, "SearchIndexerDataSource") - - _request = build_create_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexerDataSource", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py deleted file mode 100644 index 0b4aabbb85a2..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexers_operations.py +++ /dev/null @@ -1,1226 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.rest import HttpRequest, HttpResponse -from azure.core.tracing.decorator import distributed_trace -from azure.core.utils import case_insensitive_dict - -from .. import models as _models -from .._serialization import Serializer - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -_SERIALIZER = Serializer() -_SERIALIZER.client_side_validation = False - - -def build_reset_request( - indexer_name: str, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/indexers('{indexerName}')/search.reset") - path_format_arguments = { - "indexerName": _SERIALIZER.url("indexer_name", indexer_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_reset_docs_request( - indexer_name: str, *, overwrite: bool = False, x_ms_client_request_id: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/indexers('{indexerName}')/search.resetdocs") - path_format_arguments = { - "indexerName": _SERIALIZER.url("indexer_name", indexer_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - if overwrite is not None: - _params["overwrite"] = _SERIALIZER.query("overwrite", overwrite, "bool") - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_run_request(indexer_name: str, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/indexers('{indexerName}')/search.run") - path_format_arguments = { - "indexerName": _SERIALIZER.url("indexer_name", indexer_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_create_or_update_request( - indexer_name: str, - *, - prefer: Union[str, _models.Enum0], - x_ms_client_request_id: Optional[str] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/indexers('{indexerName}')") - path_format_arguments = { - "indexerName": _SERIALIZER.url("indexer_name", indexer_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if skip_indexer_reset_requirement_for_cache is not None: - _params["ignoreResetRequirements"] = _SERIALIZER.query( - "skip_indexer_reset_requirement_for_cache", skip_indexer_reset_requirement_for_cache, "bool" - ) - if disable_cache_reprocessing_change_detection is not None: - _params["disableCacheReprocessingChangeDetection"] = _SERIALIZER.query( - "disable_cache_reprocessing_change_detection", disable_cache_reprocessing_change_detection, "bool" - ) - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if if_match is not None: - _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") - if if_none_match is not None: - _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") - _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_delete_request( - indexer_name: str, - *, - x_ms_client_request_id: Optional[str] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/indexers('{indexerName}')") - path_format_arguments = { - "indexerName": _SERIALIZER.url("indexer_name", indexer_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if if_match is not None: - _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") - if if_none_match is not None: - _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_get_request(indexer_name: str, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/indexers('{indexerName}')") - path_format_arguments = { - "indexerName": _SERIALIZER.url("indexer_name", indexer_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_list_request( - *, select: Optional[str] = None, x_ms_client_request_id: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/indexers") - - # Construct parameters - if select is not None: - _params["$select"] = _SERIALIZER.query("select", select, "str") - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_create_request(*, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/indexers") - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_get_status_request( - indexer_name: str, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/indexers('{indexerName}')/search.status") - path_format_arguments = { - "indexerName": _SERIALIZER.url("indexer_name", indexer_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -class IndexersOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.search.documents.indexes.SearchServiceClient`'s - :attr:`indexers` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @distributed_trace - def reset( # pylint: disable=inconsistent-return-statements - self, indexer_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> None: - """Resets the change tracking state associated with an indexer. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Reset-Indexer - - :param indexer_name: The name of the indexer to reset. Required. - :type indexer_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_reset_request( - indexer_name=indexer_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @overload - def reset_docs( - self, - indexer_name: str, - overwrite: bool = False, - request_options: Optional[_models.RequestOptions] = None, - keys_or_ids: Optional[_models.DocumentKeysOrIds] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> None: - """Resets specific documents in the datasource to be selectively re-ingested by the indexer. - - .. seealso:: - - https://aka.ms/reset-documents - - :param indexer_name: The name of the indexer to reset documents for. Required. - :type indexer_name: str - :param overwrite: If false, keys or ids will be appended to existing ones. If true, only the - keys or ids in this payload will be queued to be re-ingested. Default value is False. - :type overwrite: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :param keys_or_ids: Default value is None. - :type keys_or_ids: ~azure.search.documents.indexes.models.DocumentKeysOrIds - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def reset_docs( - self, - indexer_name: str, - overwrite: bool = False, - request_options: Optional[_models.RequestOptions] = None, - keys_or_ids: Optional[IO[bytes]] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> None: - """Resets specific documents in the datasource to be selectively re-ingested by the indexer. - - .. seealso:: - - https://aka.ms/reset-documents - - :param indexer_name: The name of the indexer to reset documents for. Required. - :type indexer_name: str - :param overwrite: If false, keys or ids will be appended to existing ones. If true, only the - keys or ids in this payload will be queued to be re-ingested. Default value is False. - :type overwrite: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :param keys_or_ids: Default value is None. - :type keys_or_ids: IO[bytes] - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def reset_docs( # pylint: disable=inconsistent-return-statements - self, - indexer_name: str, - overwrite: bool = False, - request_options: Optional[_models.RequestOptions] = None, - keys_or_ids: Optional[Union[_models.DocumentKeysOrIds, IO[bytes]]] = None, - **kwargs: Any - ) -> None: - """Resets specific documents in the datasource to be selectively re-ingested by the indexer. - - .. seealso:: - - https://aka.ms/reset-documents - - :param indexer_name: The name of the indexer to reset documents for. Required. - :type indexer_name: str - :param overwrite: If false, keys or ids will be appended to existing ones. If true, only the - keys or ids in this payload will be queued to be re-ingested. Default value is False. - :type overwrite: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :param keys_or_ids: Is either a DocumentKeysOrIds type or a IO[bytes] type. Default value is - None. - :type keys_or_ids: ~azure.search.documents.indexes.models.DocumentKeysOrIds or IO[bytes] - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(keys_or_ids, (IOBase, bytes)): - _content = keys_or_ids - else: - if keys_or_ids is not None: - _json = self._serialize.body(keys_or_ids, "DocumentKeysOrIds") - else: - _json = None - - _request = build_reset_docs_request( - indexer_name=indexer_name, - overwrite=overwrite, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @distributed_trace - def run( # pylint: disable=inconsistent-return-statements - self, indexer_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> None: - """Runs an indexer on-demand. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Run-Indexer - - :param indexer_name: The name of the indexer to run. Required. - :type indexer_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_run_request( - indexer_name=indexer_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [202]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @overload - def create_or_update( - self, - indexer_name: str, - prefer: Union[str, _models.Enum0], - indexer: _models.SearchIndexer, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexer: - """Creates a new indexer or updates an indexer if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Indexer - - :param indexer_name: The name of the indexer to create or update. Required. - :type indexer_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param indexer: The definition of the indexer to create or update. Required. - :type indexer: ~azure.search.documents.indexes.models.SearchIndexer - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :type skip_indexer_reset_requirement_for_cache: bool - :param disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :type disable_cache_reprocessing_change_detection: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexer or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexer - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_or_update( - self, - indexer_name: str, - prefer: Union[str, _models.Enum0], - indexer: IO[bytes], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexer: - """Creates a new indexer or updates an indexer if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Indexer - - :param indexer_name: The name of the indexer to create or update. Required. - :type indexer_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param indexer: The definition of the indexer to create or update. Required. - :type indexer: IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :type skip_indexer_reset_requirement_for_cache: bool - :param disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :type disable_cache_reprocessing_change_detection: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexer or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexer - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_or_update( - self, - indexer_name: str, - prefer: Union[str, _models.Enum0], - indexer: Union[_models.SearchIndexer, IO[bytes]], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchIndexer: - """Creates a new indexer or updates an indexer if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Indexer - - :param indexer_name: The name of the indexer to create or update. Required. - :type indexer_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param indexer: The definition of the indexer to create or update. Is either a SearchIndexer - type or a IO[bytes] type. Required. - :type indexer: ~azure.search.documents.indexes.models.SearchIndexer or IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :type skip_indexer_reset_requirement_for_cache: bool - :param disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :type disable_cache_reprocessing_change_detection: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexer or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexer - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchIndexer] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(indexer, (IOBase, bytes)): - _content = indexer - else: - _json = self._serialize.body(indexer, "SearchIndexer") - - _request = build_create_or_update_request( - indexer_name=indexer_name, - prefer=prefer, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, - disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexer", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def delete( # pylint: disable=inconsistent-return-statements - self, - indexer_name: str, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> None: - """Deletes an indexer. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Delete-Indexer - - :param indexer_name: The name of the indexer to delete. Required. - :type indexer_name: str - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_delete_request( - indexer_name=indexer_name, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @distributed_trace - def get( - self, indexer_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.SearchIndexer: - """Retrieves an indexer definition. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Get-Indexer - - :param indexer_name: The name of the indexer to retrieve. Required. - :type indexer_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexer or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexer - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SearchIndexer] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_request( - indexer_name=indexer_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexer", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list( - self, select: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.ListIndexersResult: - """Lists all indexers available for a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/List-Indexers - - :param select: Selects which top-level properties of the indexers to retrieve. Specified as a - comma-separated list of JSON property names, or '*' for all properties. The default is all - properties. Default value is None. - :type select: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: ListIndexersResult or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.ListIndexersResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.ListIndexersResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_list_request( - select=select, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("ListIndexersResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def create( - self, - indexer: _models.SearchIndexer, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexer: - """Creates a new indexer. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Indexer - - :param indexer: The definition of the indexer to create. Required. - :type indexer: ~azure.search.documents.indexes.models.SearchIndexer - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexer or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexer - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create( - self, - indexer: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexer: - """Creates a new indexer. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Indexer - - :param indexer: The definition of the indexer to create. Required. - :type indexer: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexer or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexer - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create( - self, - indexer: Union[_models.SearchIndexer, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchIndexer: - """Creates a new indexer. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Indexer - - :param indexer: The definition of the indexer to create. Is either a SearchIndexer type or a - IO[bytes] type. Required. - :type indexer: ~azure.search.documents.indexes.models.SearchIndexer or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexer or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexer - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchIndexer] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(indexer, (IOBase, bytes)): - _content = indexer - else: - _json = self._serialize.body(indexer, "SearchIndexer") - - _request = build_create_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexer", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_status( - self, indexer_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.SearchIndexerStatus: - """Returns the current status and execution history of an indexer. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Get-Indexer-Status - - :param indexer_name: The name of the indexer for which to retrieve status. Required. - :type indexer_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexerStatus or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerStatus - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SearchIndexerStatus] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_status_request( - indexer_name=indexer_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexerStatus", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py deleted file mode 100644 index 2005b00aad20..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_indexes_operations.py +++ /dev/null @@ -1,1059 +0,0 @@ -# pylint: disable=too-many-lines -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload -import urllib.parse - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - map_error, -) -from azure.core.paging import ItemPaged -from azure.core.pipeline import PipelineResponse -from azure.core.rest import HttpRequest, HttpResponse -from azure.core.tracing.decorator import distributed_trace -from azure.core.utils import case_insensitive_dict - -from .. import models as _models -from .._serialization import Serializer - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -_SERIALIZER = Serializer() -_SERIALIZER.client_side_validation = False - - -def build_create_request(*, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/indexes") - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_list_request( - *, select: Optional[str] = None, x_ms_client_request_id: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/indexes") - - # Construct parameters - if select is not None: - _params["$select"] = _SERIALIZER.query("select", select, "str") - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_create_or_update_request( - index_name: str, - *, - prefer: Union[str, _models.Enum0], - allow_index_downtime: Optional[bool] = None, - x_ms_client_request_id: Optional[str] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/indexes('{indexName}')") - path_format_arguments = { - "indexName": _SERIALIZER.url("index_name", index_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - if allow_index_downtime is not None: - _params["allowIndexDowntime"] = _SERIALIZER.query("allow_index_downtime", allow_index_downtime, "bool") - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if if_match is not None: - _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") - if if_none_match is not None: - _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") - _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_delete_request( - index_name: str, - *, - x_ms_client_request_id: Optional[str] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/indexes('{indexName}')") - path_format_arguments = { - "indexName": _SERIALIZER.url("index_name", index_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if if_match is not None: - _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") - if if_none_match is not None: - _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_get_request(index_name: str, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/indexes('{indexName}')") - path_format_arguments = { - "indexName": _SERIALIZER.url("index_name", index_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_get_statistics_request( - index_name: str, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/indexes('{indexName}')/search.stats") - path_format_arguments = { - "indexName": _SERIALIZER.url("index_name", index_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_analyze_request( - index_name: str, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/indexes('{indexName}')/search.analyze") - path_format_arguments = { - "indexName": _SERIALIZER.url("index_name", index_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -class IndexesOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.search.documents.indexes.SearchServiceClient`'s - :attr:`indexes` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @overload - def create( - self, - index: _models.SearchIndex, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndex: - """Creates a new search index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Index - - :param index: The definition of the index to create. Required. - :type index: ~azure.search.documents.indexes.models.SearchIndex - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndex or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create( - self, - index: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndex: - """Creates a new search index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Index - - :param index: The definition of the index to create. Required. - :type index: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndex or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create( - self, - index: Union[_models.SearchIndex, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchIndex: - """Creates a new search index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Index - - :param index: The definition of the index to create. Is either a SearchIndex type or a - IO[bytes] type. Required. - :type index: ~azure.search.documents.indexes.models.SearchIndex or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndex or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchIndex] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(index, (IOBase, bytes)): - _content = index - else: - _json = self._serialize.body(index, "SearchIndex") - - _request = build_create_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndex", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list( - self, select: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> Iterable["_models.SearchIndex"]: - """Lists all indexes available for a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/List-Indexes - - :param select: Selects which top-level properties of the index definitions to retrieve. - Specified as a comma-separated list of JSON property names, or '*' for all properties. The - default is all properties. Default value is None. - :type select: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: An iterator like instance of either SearchIndex or the result of cls(response) - :rtype: ~azure.core.paging.ItemPaged[~azure.search.documents.indexes.models.SearchIndex] - :raises ~azure.core.exceptions.HttpResponseError: - """ - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.ListIndexesResult] = kwargs.pop("cls", None) - - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - def prepare_request(next_link=None): - if not next_link: - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_list_request( - select=select, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - else: - # make call to next link with the client's api-version - _parsed_next_link = urllib.parse.urlparse(next_link) - _next_request_params = case_insensitive_dict( - { - key: [urllib.parse.quote(v) for v in value] - for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() - } - ) - _next_request_params["api-version"] = self._config.api_version - _request = HttpRequest( - "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params - ) - path_format_arguments = { - "endpoint": self._serialize.url( - "self._config.endpoint", self._config.endpoint, "str", skip_quote=True - ), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - _request.method = "GET" - return _request - - def extract_data(pipeline_response): - deserialized = self._deserialize("ListIndexesResult", pipeline_response) - list_of_elem = deserialized.indexes - if cls: - list_of_elem = cls(list_of_elem) # type: ignore - return None, iter(list_of_elem) - - def get_next(next_link=None): - _request = prepare_request(next_link) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - return pipeline_response - - return ItemPaged(get_next, extract_data) - - @overload - def create_or_update( - self, - index_name: str, - prefer: Union[str, _models.Enum0], - index: _models.SearchIndex, - allow_index_downtime: Optional[bool] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndex: - """Creates a new search index or updates an index if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Index - - :param index_name: The definition of the index to create or update. Required. - :type index_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param index: The definition of the index to create or update. Required. - :type index: ~azure.search.documents.indexes.models.SearchIndex - :param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters - to be added to an index by taking the index offline for at least a few seconds. This - temporarily causes indexing and query requests to fail. Performance and write availability of - the index can be impaired for several minutes after the index is updated, or longer for very - large indexes. Default value is None. - :type allow_index_downtime: bool - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndex or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_or_update( - self, - index_name: str, - prefer: Union[str, _models.Enum0], - index: IO[bytes], - allow_index_downtime: Optional[bool] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndex: - """Creates a new search index or updates an index if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Index - - :param index_name: The definition of the index to create or update. Required. - :type index_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param index: The definition of the index to create or update. Required. - :type index: IO[bytes] - :param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters - to be added to an index by taking the index offline for at least a few seconds. This - temporarily causes indexing and query requests to fail. Performance and write availability of - the index can be impaired for several minutes after the index is updated, or longer for very - large indexes. Default value is None. - :type allow_index_downtime: bool - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndex or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_or_update( - self, - index_name: str, - prefer: Union[str, _models.Enum0], - index: Union[_models.SearchIndex, IO[bytes]], - allow_index_downtime: Optional[bool] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchIndex: - """Creates a new search index or updates an index if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Index - - :param index_name: The definition of the index to create or update. Required. - :type index_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param index: The definition of the index to create or update. Is either a SearchIndex type or - a IO[bytes] type. Required. - :type index: ~azure.search.documents.indexes.models.SearchIndex or IO[bytes] - :param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters - to be added to an index by taking the index offline for at least a few seconds. This - temporarily causes indexing and query requests to fail. Performance and write availability of - the index can be impaired for several minutes after the index is updated, or longer for very - large indexes. Default value is None. - :type allow_index_downtime: bool - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndex or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchIndex] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(index, (IOBase, bytes)): - _content = index - else: - _json = self._serialize.body(index, "SearchIndex") - - _request = build_create_or_update_request( - index_name=index_name, - prefer=prefer, - allow_index_downtime=allow_index_downtime, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndex", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def delete( # pylint: disable=inconsistent-return-statements - self, - index_name: str, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> None: - """Deletes a search index and all the documents it contains. This operation is permanent, with no - recovery option. Make sure you have a master copy of your index definition, data ingestion - code, and a backup of the primary data source in case you need to re-build the index. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Delete-Index - - :param index_name: The name of the index to delete. Required. - :type index_name: str - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_delete_request( - index_name=index_name, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @distributed_trace - def get( - self, index_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.SearchIndex: - """Retrieves an index definition. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Get-Index - - :param index_name: The name of the index to retrieve. Required. - :type index_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndex or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndex - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SearchIndex] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_request( - index_name=index_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndex", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def get_statistics( - self, index_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.GetIndexStatisticsResult: - """Returns statistics for the given index, including a document count and storage usage. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Get-Index-Statistics - - :param index_name: The name of the index for which to retrieve statistics. Required. - :type index_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: GetIndexStatisticsResult or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.GetIndexStatisticsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.GetIndexStatisticsResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_statistics_request( - index_name=index_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("GetIndexStatisticsResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def analyze( - self, - index_name: str, - request: _models.AnalyzeRequest, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.AnalyzeResult: - """Shows how an analyzer breaks text into tokens. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/test-analyzer - - :param index_name: The name of the index for which to test an analyzer. Required. - :type index_name: str - :param request: The text and analyzer or analysis components to test. Required. - :type request: ~azure.search.documents.indexes.models.AnalyzeRequest - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: AnalyzeResult or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.AnalyzeResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def analyze( - self, - index_name: str, - request: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.AnalyzeResult: - """Shows how an analyzer breaks text into tokens. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/test-analyzer - - :param index_name: The name of the index for which to test an analyzer. Required. - :type index_name: str - :param request: The text and analyzer or analysis components to test. Required. - :type request: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: AnalyzeResult or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.AnalyzeResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def analyze( - self, - index_name: str, - request: Union[_models.AnalyzeRequest, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.AnalyzeResult: - """Shows how an analyzer breaks text into tokens. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/test-analyzer - - :param index_name: The name of the index for which to test an analyzer. Required. - :type index_name: str - :param request: The text and analyzer or analysis components to test. Is either a - AnalyzeRequest type or a IO[bytes] type. Required. - :type request: ~azure.search.documents.indexes.models.AnalyzeRequest or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: AnalyzeResult or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.AnalyzeResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.AnalyzeResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(request, (IOBase, bytes)): - _content = request - else: - _json = self._serialize.body(request, "AnalyzeRequest") - - _request = build_analyze_request( - index_name=index_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("AnalyzeResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_patch.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_patch.py deleted file mode 100644 index f7dd32510333..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_patch.py +++ /dev/null @@ -1,20 +0,0 @@ -# ------------------------------------ -# Copyright (c) Microsoft Corporation. -# Licensed under the MIT License. -# ------------------------------------ -"""Customize generated code here. - -Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize -""" -from typing import List - -__all__: List[str] = [] # Add all objects you want publicly available to users at this package level - - -def patch_sdk(): - """Do not remove from this file. - - `patch_sdk` is a last resort escape hatch that allows you to do customizations - you can't accomplish using the techniques described in - https://aka.ms/azsdk/python/dpcodegen/python/customize - """ diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_service_client_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_service_client_operations.py deleted file mode 100644 index b007b000d4ef..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_search_service_client_operations.py +++ /dev/null @@ -1,118 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -import sys -from typing import Any, Callable, Dict, Optional, TypeVar - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.rest import HttpRequest, HttpResponse -from azure.core.tracing.decorator import distributed_trace -from azure.core.utils import case_insensitive_dict - -from .. import models as _models -from .._serialization import Serializer -from .._vendor import SearchServiceClientMixinABC - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -_SERIALIZER = Serializer() -_SERIALIZER.client_side_validation = False - - -def build_get_service_statistics_request(*, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/servicestats") - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -class SearchServiceClientOperationsMixin(SearchServiceClientMixinABC): - - @distributed_trace - def get_service_statistics( - self, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.SearchServiceStatistics: - """Gets service level statistics for a search service. - - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchServiceStatistics or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchServiceStatistics - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SearchServiceStatistics] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_service_statistics_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchServiceStatistics", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py deleted file mode 100644 index 7570ce317c06..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_skillsets_operations.py +++ /dev/null @@ -1,941 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.rest import HttpRequest, HttpResponse -from azure.core.tracing.decorator import distributed_trace -from azure.core.utils import case_insensitive_dict - -from .. import models as _models -from .._serialization import Serializer - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -_SERIALIZER = Serializer() -_SERIALIZER.client_side_validation = False - - -def build_create_or_update_request( - skillset_name: str, - *, - prefer: Union[str, _models.Enum0], - x_ms_client_request_id: Optional[str] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/skillsets('{skillsetName}')") - path_format_arguments = { - "skillsetName": _SERIALIZER.url("skillset_name", skillset_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if skip_indexer_reset_requirement_for_cache is not None: - _params["ignoreResetRequirements"] = _SERIALIZER.query( - "skip_indexer_reset_requirement_for_cache", skip_indexer_reset_requirement_for_cache, "bool" - ) - if disable_cache_reprocessing_change_detection is not None: - _params["disableCacheReprocessingChangeDetection"] = _SERIALIZER.query( - "disable_cache_reprocessing_change_detection", disable_cache_reprocessing_change_detection, "bool" - ) - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if if_match is not None: - _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") - if if_none_match is not None: - _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") - _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_delete_request( - skillset_name: str, - *, - x_ms_client_request_id: Optional[str] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/skillsets('{skillsetName}')") - path_format_arguments = { - "skillsetName": _SERIALIZER.url("skillset_name", skillset_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if if_match is not None: - _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") - if if_none_match is not None: - _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_get_request( - skillset_name: str, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/skillsets('{skillsetName}')") - path_format_arguments = { - "skillsetName": _SERIALIZER.url("skillset_name", skillset_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_list_request( - *, select: Optional[str] = None, x_ms_client_request_id: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/skillsets") - - # Construct parameters - if select is not None: - _params["$select"] = _SERIALIZER.query("select", select, "str") - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_create_request(*, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/skillsets") - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_reset_skills_request( - skillset_name: str, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/skillsets('{skillsetName}')/search.resetskills") - path_format_arguments = { - "skillsetName": _SERIALIZER.url("skillset_name", skillset_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -class SkillsetsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.search.documents.indexes.SearchServiceClient`'s - :attr:`skillsets` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @overload - def create_or_update( - self, - skillset_name: str, - prefer: Union[str, _models.Enum0], - skillset: _models.SearchIndexerSkillset, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexerSkillset: - """Creates a new skillset in a search service or updates the skillset if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/update-skillset - - :param skillset_name: The name of the skillset to create or update. Required. - :type skillset_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param skillset: The skillset containing one or more skills to create or update in a search - service. Required. - :type skillset: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :type skip_indexer_reset_requirement_for_cache: bool - :param disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :type disable_cache_reprocessing_change_detection: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_or_update( - self, - skillset_name: str, - prefer: Union[str, _models.Enum0], - skillset: IO[bytes], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexerSkillset: - """Creates a new skillset in a search service or updates the skillset if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/update-skillset - - :param skillset_name: The name of the skillset to create or update. Required. - :type skillset_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param skillset: The skillset containing one or more skills to create or update in a search - service. Required. - :type skillset: IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :type skip_indexer_reset_requirement_for_cache: bool - :param disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :type disable_cache_reprocessing_change_detection: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_or_update( - self, - skillset_name: str, - prefer: Union[str, _models.Enum0], - skillset: Union[_models.SearchIndexerSkillset, IO[bytes]], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - skip_indexer_reset_requirement_for_cache: Optional[bool] = None, - disable_cache_reprocessing_change_detection: Optional[bool] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchIndexerSkillset: - """Creates a new skillset in a search service or updates the skillset if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/update-skillset - - :param skillset_name: The name of the skillset to create or update. Required. - :type skillset_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param skillset: The skillset containing one or more skills to create or update in a search - service. Is either a SearchIndexerSkillset type or a IO[bytes] type. Required. - :type skillset: ~azure.search.documents.indexes.models.SearchIndexerSkillset or IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param skip_indexer_reset_requirement_for_cache: Ignores cache reset requirements. Default - value is None. - :type skip_indexer_reset_requirement_for_cache: bool - :param disable_cache_reprocessing_change_detection: Disables cache reprocessing change - detection. Default value is None. - :type disable_cache_reprocessing_change_detection: bool - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchIndexerSkillset] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(skillset, (IOBase, bytes)): - _content = skillset - else: - _json = self._serialize.body(skillset, "SearchIndexerSkillset") - - _request = build_create_or_update_request( - skillset_name=skillset_name, - prefer=prefer, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, - disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexerSkillset", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def delete( # pylint: disable=inconsistent-return-statements - self, - skillset_name: str, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> None: - """Deletes a skillset in a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/delete-skillset - - :param skillset_name: The name of the skillset to delete. Required. - :type skillset_name: str - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_delete_request( - skillset_name=skillset_name, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @distributed_trace - def get( - self, skillset_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.SearchIndexerSkillset: - """Retrieves a skillset in a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/get-skillset - - :param skillset_name: The name of the skillset to retrieve. Required. - :type skillset_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SearchIndexerSkillset] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_request( - skillset_name=skillset_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexerSkillset", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list( - self, select: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.ListSkillsetsResult: - """List all skillsets in a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/list-skillset - - :param select: Selects which top-level properties of the skillsets to retrieve. Specified as a - comma-separated list of JSON property names, or '*' for all properties. The default is all - properties. Default value is None. - :type select: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: ListSkillsetsResult or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.ListSkillsetsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.ListSkillsetsResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_list_request( - select=select, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("ListSkillsetsResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def create( - self, - skillset: _models.SearchIndexerSkillset, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexerSkillset: - """Creates a new skillset in a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/create-skillset - - :param skillset: The skillset containing one or more skills to create in a search service. - Required. - :type skillset: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create( - self, - skillset: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SearchIndexerSkillset: - """Creates a new skillset in a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/create-skillset - - :param skillset: The skillset containing one or more skills to create in a search service. - Required. - :type skillset: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create( - self, - skillset: Union[_models.SearchIndexerSkillset, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SearchIndexerSkillset: - """Creates a new skillset in a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/create-skillset - - :param skillset: The skillset containing one or more skills to create in a search service. Is - either a SearchIndexerSkillset type or a IO[bytes] type. Required. - :type skillset: ~azure.search.documents.indexes.models.SearchIndexerSkillset or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SearchIndexerSkillset or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SearchIndexerSkillset - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SearchIndexerSkillset] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(skillset, (IOBase, bytes)): - _content = skillset - else: - _json = self._serialize.body(skillset, "SearchIndexerSkillset") - - _request = build_create_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SearchIndexerSkillset", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def reset_skills( - self, - skillset_name: str, - skill_names: _models.SkillNames, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> None: - """Reset an existing skillset in a search service. - - .. seealso:: - - https://aka.ms/reset-skills - - :param skillset_name: The name of the skillset to reset. Required. - :type skillset_name: str - :param skill_names: The names of skills to reset. Required. - :type skill_names: ~azure.search.documents.indexes.models.SkillNames - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def reset_skills( - self, - skillset_name: str, - skill_names: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> None: - """Reset an existing skillset in a search service. - - .. seealso:: - - https://aka.ms/reset-skills - - :param skillset_name: The name of the skillset to reset. Required. - :type skillset_name: str - :param skill_names: The names of skills to reset. Required. - :type skill_names: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def reset_skills( # pylint: disable=inconsistent-return-statements - self, - skillset_name: str, - skill_names: Union[_models.SkillNames, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> None: - """Reset an existing skillset in a search service. - - .. seealso:: - - https://aka.ms/reset-skills - - :param skillset_name: The name of the skillset to reset. Required. - :type skillset_name: str - :param skill_names: The names of skills to reset. Is either a SkillNames type or a IO[bytes] - type. Required. - :type skill_names: ~azure.search.documents.indexes.models.SkillNames or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(skill_names, (IOBase, bytes)): - _content = skill_names - else: - _json = self._serialize.body(skill_names, "SkillNames") - - _request = build_reset_skills_request( - skillset_name=skillset_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py deleted file mode 100644 index febf639c0ba6..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/operations/_synonym_maps_operations.py +++ /dev/null @@ -1,732 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.9.5, generator: @autorest/python@6.26.1) -# Changes may cause incorrect behavior and will be lost if the code is regenerated. -# -------------------------------------------------------------------------- -from io import IOBase -import sys -from typing import Any, Callable, Dict, IO, Optional, TypeVar, Union, overload - -from azure.core.exceptions import ( - ClientAuthenticationError, - HttpResponseError, - ResourceExistsError, - ResourceNotFoundError, - ResourceNotModifiedError, - map_error, -) -from azure.core.pipeline import PipelineResponse -from azure.core.rest import HttpRequest, HttpResponse -from azure.core.tracing.decorator import distributed_trace -from azure.core.utils import case_insensitive_dict - -from .. import models as _models -from .._serialization import Serializer - -if sys.version_info >= (3, 9): - from collections.abc import MutableMapping -else: - from typing import MutableMapping # type: ignore -T = TypeVar("T") -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] - -_SERIALIZER = Serializer() -_SERIALIZER.client_side_validation = False - - -def build_create_or_update_request( - synonym_map_name: str, - *, - prefer: Union[str, _models.Enum0], - x_ms_client_request_id: Optional[str] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/synonymmaps('{synonymMapName}')") - path_format_arguments = { - "synonymMapName": _SERIALIZER.url("synonym_map_name", synonym_map_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if if_match is not None: - _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") - if if_none_match is not None: - _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") - _headers["Prefer"] = _SERIALIZER.header("prefer", prefer, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_delete_request( - synonym_map_name: str, - *, - x_ms_client_request_id: Optional[str] = None, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/synonymmaps('{synonymMapName}')") - path_format_arguments = { - "synonymMapName": _SERIALIZER.url("synonym_map_name", synonym_map_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if if_match is not None: - _headers["If-Match"] = _SERIALIZER.header("if_match", if_match, "str") - if if_none_match is not None: - _headers["If-None-Match"] = _SERIALIZER.header("if_none_match", if_none_match, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_get_request( - synonym_map_name: str, *, x_ms_client_request_id: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/synonymmaps('{synonymMapName}')") - path_format_arguments = { - "synonymMapName": _SERIALIZER.url("synonym_map_name", synonym_map_name, "str"), - } - - _url: str = _url.format(**path_format_arguments) # type: ignore - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_list_request( - *, select: Optional[str] = None, x_ms_client_request_id: Optional[str] = None, **kwargs: Any -) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/synonymmaps") - - # Construct parameters - if select is not None: - _params["$select"] = _SERIALIZER.query("select", select, "str") - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) - - -def build_create_request(*, x_ms_client_request_id: Optional[str] = None, **kwargs: Any) -> HttpRequest: - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - accept = _headers.pop("Accept", "application/json") - - # Construct URL - _url = kwargs.pop("template_url", "/synonymmaps") - - # Construct parameters - _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - - # Construct headers - if x_ms_client_request_id is not None: - _headers["x-ms-client-request-id"] = _SERIALIZER.header("x_ms_client_request_id", x_ms_client_request_id, "str") - if content_type is not None: - _headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") - _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") - - return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) - - -class SynonymMapsOperations: - """ - .. warning:: - **DO NOT** instantiate this class directly. - - Instead, you should access the following operations through - :class:`~azure.search.documents.indexes.SearchServiceClient`'s - :attr:`synonym_maps` attribute. - """ - - models = _models - - def __init__(self, *args, **kwargs): - input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") - - @overload - def create_or_update( - self, - synonym_map_name: str, - prefer: Union[str, _models.Enum0], - synonym_map: _models.SynonymMap, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SynonymMap: - """Creates a new synonym map or updates a synonym map if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Synonym-Map - - :param synonym_map_name: The name of the synonym map to create or update. Required. - :type synonym_map_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param synonym_map: The definition of the synonym map to create or update. Required. - :type synonym_map: ~azure.search.documents.indexes.models.SynonymMap - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SynonymMap or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SynonymMap - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create_or_update( - self, - synonym_map_name: str, - prefer: Union[str, _models.Enum0], - synonym_map: IO[bytes], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SynonymMap: - """Creates a new synonym map or updates a synonym map if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Synonym-Map - - :param synonym_map_name: The name of the synonym map to create or update. Required. - :type synonym_map_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param synonym_map: The definition of the synonym map to create or update. Required. - :type synonym_map: IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SynonymMap or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SynonymMap - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create_or_update( - self, - synonym_map_name: str, - prefer: Union[str, _models.Enum0], - synonym_map: Union[_models.SynonymMap, IO[bytes]], - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SynonymMap: - """Creates a new synonym map or updates a synonym map if it already exists. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Update-Synonym-Map - - :param synonym_map_name: The name of the synonym map to create or update. Required. - :type synonym_map_name: str - :param prefer: For HTTP PUT requests, instructs the service to return the created/updated - resource on success. "return=representation" Required. - :type prefer: str or ~azure.search.documents.indexes.models.Enum0 - :param synonym_map: The definition of the synonym map to create or update. Is either a - SynonymMap type or a IO[bytes] type. Required. - :type synonym_map: ~azure.search.documents.indexes.models.SynonymMap or IO[bytes] - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SynonymMap or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SynonymMap - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SynonymMap] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(synonym_map, (IOBase, bytes)): - _content = synonym_map - else: - _json = self._serialize.body(synonym_map, "SynonymMap") - - _request = build_create_or_update_request( - synonym_map_name=synonym_map_name, - prefer=prefer, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200, 201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SynonymMap", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def delete( # pylint: disable=inconsistent-return-statements - self, - synonym_map_name: str, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> None: - """Deletes a synonym map. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Delete-Synonym-Map - - :param synonym_map_name: The name of the synonym map to delete. Required. - :type synonym_map_name: str - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. Default value is None. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. Default value is None. - :type if_none_match: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: None or the result of cls(response) - :rtype: None - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[None] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_delete_request( - synonym_map_name=synonym_map_name, - x_ms_client_request_id=_x_ms_client_request_id, - if_match=if_match, - if_none_match=if_none_match, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [204, 404]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - if cls: - return cls(pipeline_response, None, {}) # type: ignore - - @distributed_trace - def get( - self, synonym_map_name: str, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.SynonymMap: - """Retrieves a synonym map definition. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Get-Synonym-Map - - :param synonym_map_name: The name of the synonym map to retrieve. Required. - :type synonym_map_name: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SynonymMap or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SynonymMap - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.SynonymMap] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_get_request( - synonym_map_name=synonym_map_name, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SynonymMap", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @distributed_trace - def list( - self, select: Optional[str] = None, request_options: Optional[_models.RequestOptions] = None, **kwargs: Any - ) -> _models.ListSynonymMapsResult: - """Lists all synonym maps available for a search service. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/List-Synonym-Maps - - :param select: Selects which top-level properties of the synonym maps to retrieve. Specified as - a comma-separated list of JSON property names, or '*' for all properties. The default is all - properties. Default value is None. - :type select: str - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: ListSynonymMapsResult or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.ListSynonymMapsResult - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = kwargs.pop("headers", {}) or {} - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - cls: ClsType[_models.ListSynonymMapsResult] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - - _request = build_list_request( - select=select, - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("ListSynonymMapsResult", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore - - @overload - def create( - self, - synonym_map: _models.SynonymMap, - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SynonymMap: - """Creates a new synonym map. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Synonym-Map - - :param synonym_map: The definition of the synonym map to create. Required. - :type synonym_map: ~azure.search.documents.indexes.models.SynonymMap - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for JSON body. - Default value is "application/json". - :paramtype content_type: str - :return: SynonymMap or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SynonymMap - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @overload - def create( - self, - synonym_map: IO[bytes], - request_options: Optional[_models.RequestOptions] = None, - *, - content_type: str = "application/json", - **kwargs: Any - ) -> _models.SynonymMap: - """Creates a new synonym map. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Synonym-Map - - :param synonym_map: The definition of the synonym map to create. Required. - :type synonym_map: IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :keyword content_type: Body Parameter content-type. Content type parameter for binary body. - Default value is "application/json". - :paramtype content_type: str - :return: SynonymMap or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SynonymMap - :raises ~azure.core.exceptions.HttpResponseError: - """ - - @distributed_trace - def create( - self, - synonym_map: Union[_models.SynonymMap, IO[bytes]], - request_options: Optional[_models.RequestOptions] = None, - **kwargs: Any - ) -> _models.SynonymMap: - """Creates a new synonym map. - - .. seealso:: - - https://learn.microsoft.com/rest/api/searchservice/Create-Synonym-Map - - :param synonym_map: The definition of the synonym map to create. Is either a SynonymMap type or - a IO[bytes] type. Required. - :type synonym_map: ~azure.search.documents.indexes.models.SynonymMap or IO[bytes] - :param request_options: Parameter group. Default value is None. - :type request_options: ~azure.search.documents.indexes.models.RequestOptions - :return: SynonymMap or the result of cls(response) - :rtype: ~azure.search.documents.indexes.models.SynonymMap - :raises ~azure.core.exceptions.HttpResponseError: - """ - error_map: MutableMapping = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError, - 409: ResourceExistsError, - 304: ResourceNotModifiedError, - } - error_map.update(kwargs.pop("error_map", {}) or {}) - - _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) - _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - - api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) - content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - cls: ClsType[_models.SynonymMap] = kwargs.pop("cls", None) - - _x_ms_client_request_id = None - if request_options is not None: - _x_ms_client_request_id = request_options.x_ms_client_request_id - content_type = content_type or "application/json" - _json = None - _content = None - if isinstance(synonym_map, (IOBase, bytes)): - _content = synonym_map - else: - _json = self._serialize.body(synonym_map, "SynonymMap") - - _request = build_create_request( - x_ms_client_request_id=_x_ms_client_request_id, - api_version=api_version, - content_type=content_type, - json=_json, - content=_content, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) - - _stream = False - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) - - response = pipeline_response.http_response - - if response.status_code not in [201]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) - raise HttpResponseError(response=response, model=error) - - deserialized = self._deserialize("SynonymMap", pipeline_response.http_response) - - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore - - return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/py.typed b/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/py.typed deleted file mode 100644 index e5aff4f83af8..000000000000 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_generated/py.typed +++ /dev/null @@ -1 +0,0 @@ -# Marker file for PEP 561. \ No newline at end of file diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py index 0a6b96137725..f010bfd55bf5 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py @@ -12,13 +12,10 @@ from azure.core.paging import ItemPaged from .._api_versions import DEFAULT_VERSION -from ._generated import SearchServiceClient as _SearchServiceClient -from ._utils import ( - get_access_conditions, - normalize_endpoint, -) +from .._generated import SearchClient as _SearchServiceClient +from ._utils import normalize_endpoint from .._headers_mixin import HeadersMixin -from .._utils import get_authentication_policy +from .._utils import DEFAULT_AUDIENCE from .._version import SDK_MONIKER from .._search_client import SearchClient from .models import ( @@ -51,21 +48,19 @@ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, TokenCre self._endpoint = normalize_endpoint(endpoint) self._credential = credential self._audience = kwargs.pop("audience", None) - if isinstance(credential, AzureKeyCredential): - self._aad = False - self._client = _SearchServiceClient( - endpoint=endpoint, sdk_moniker=SDK_MONIKER, api_version=self._api_version, **kwargs - ) - else: - self._aad = True - authentication_policy = get_authentication_policy(credential, audience=self._audience) - self._client = _SearchServiceClient( - endpoint=endpoint, - authentication_policy=authentication_policy, - sdk_moniker=SDK_MONIKER, - api_version=self._api_version, - **kwargs - ) + if not self._audience: + self._audience = DEFAULT_AUDIENCE + scope = self._audience.rstrip("/") + "/.default" + credential_scopes = [scope] + self._aad = not isinstance(credential, AzureKeyCredential) + self._client = _SearchServiceClient( + endpoint=endpoint, + credential=credential, + sdk_moniker=SDK_MONIKER, + api_version=self._api_version, + credential_scopes=credential_scopes, + **kwargs + ) def __enter__(self): self._client.__enter__() @@ -116,7 +111,9 @@ def list_indexes(self, *, select: Optional[List[str]] = None, **kwargs: Any) -> if select: kwargs["select"] = ",".join(select) # pylint:disable=protected-access - indexes = self._client.indexes.list(cls=lambda objs: [SearchIndex._from_generated(x) for x in objs], **kwargs) + indexes = self._client.indexes_operations.list( + cls=lambda objs: [SearchIndex._from_generated(x) for x in objs], **kwargs + ) return cast(ItemPaged[SearchIndex], indexes) @distributed_trace @@ -130,7 +127,7 @@ def list_index_names(self, **kwargs: Any) -> ItemPaged[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - names = self._client.indexes.list(cls=lambda objs: [x.name for x in objs], **kwargs) + names = self._client.indexes_operations.list(cls=lambda objs: [x.name for x in objs], **kwargs) return cast(ItemPaged[str], names) @distributed_trace @@ -153,7 +150,7 @@ def get_index(self, name: str, **kwargs: Any) -> SearchIndex: :caption: Get an index. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.indexes.get(name, **kwargs) + result = self._client.indexes_operations.get(name, **kwargs) return cast(SearchIndex, SearchIndex._from_generated(result)) # pylint:disable=protected-access @distributed_trace @@ -169,8 +166,8 @@ def get_index_statistics(self, index_name: str, **kwargs: Any) -> MutableMapping """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.indexes.get_statistics(index_name, **kwargs) - return result.as_dict() + result = self._client.indexes_operations.get_statistics(index_name, **kwargs) + return result @distributed_trace def delete_index( @@ -199,13 +196,17 @@ def delete_index( :caption: Delete an index. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(index, match_condition) - kwargs.update(access_condition) + if isinstance(index, str) and match_condition is not MatchConditions.Unconditionally: + raise ValueError("A model must be passed to use access conditions") + etag = None try: index_name = index.name # type: ignore + etag = index.e_tag # type: ignore except AttributeError: index_name = index - self._client.indexes.delete(index_name=index_name, error_map=error_map, **kwargs) + self._client.indexes_operations.delete( + index_name=index_name, etag=etag, match_condition=match_condition, **kwargs + ) @distributed_trace def create_index(self, index: SearchIndex, **kwargs: Any) -> SearchIndex: @@ -228,7 +229,7 @@ def create_index(self, index: SearchIndex, **kwargs: Any) -> SearchIndex: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) patched_index = index._to_generated() # pylint:disable=protected-access - result = self._client.indexes.create(patched_index, **kwargs) + result = self._client.indexes_operations.create(patched_index, **kwargs) return cast(SearchIndex, SearchIndex._from_generated(result)) # pylint:disable=protected-access @distributed_trace @@ -270,15 +271,14 @@ def create_or_update_index( :caption: Update an index. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(index, match_condition) - kwargs.update(access_condition) patched_index = index._to_generated() # pylint:disable=protected-access - result = self._client.indexes.create_or_update( + result = self._client.indexes_operations.create_or_update( index_name=index.name, index=patched_index, allow_index_downtime=allow_index_downtime, prefer="return=representation", - error_map=error_map, + etag=index.e_tag, + match_condition=match_condition, **kwargs ) return cast(SearchIndex, SearchIndex._from_generated(result)) # pylint:disable=protected-access @@ -305,10 +305,8 @@ def analyze_text(self, index_name: str, analyze_request: AnalyzeTextOptions, **k :caption: Analyze text """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.indexes.analyze( - index_name=index_name, - request=analyze_request._to_analyze_request(), # pylint:disable=protected-access - **kwargs + result = self._client.indexes_operations.analyze( + index_name=index_name, request=analyze_request._to_generated(), **kwargs # pylint:disable=protected-access ) return result @@ -337,10 +335,10 @@ def get_synonym_maps(self, *, select: Optional[List[str]] = None, **kwargs) -> L kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: kwargs["select"] = ",".join(select) - result = self._client.synonym_maps.list(**kwargs) + result = self._client.synonym_maps_operations.list(**kwargs) assert result.synonym_maps is not None # Hint for mypy # pylint:disable=protected-access - return [SynonymMap._from_generated(x) for x in result.synonym_maps] + return [cast(SynonymMap, SynonymMap._from_generated(x)) for x in result.synonym_maps] @distributed_trace def get_synonym_map_names(self, **kwargs: Any) -> List[str]: @@ -352,7 +350,7 @@ def get_synonym_map_names(self, **kwargs: Any) -> List[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.synonym_maps.list(**kwargs) + result = self._client.synonym_maps_operations.list(**kwargs) assert result.synonym_maps is not None # Hint for mypy return [x.name for x in result.synonym_maps] @@ -377,7 +375,7 @@ def get_synonym_map(self, name: str, **kwargs: Any) -> SynonymMap: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.synonym_maps.get(name, **kwargs) + result = self._client.synonym_maps_operations.get(name, **kwargs) return cast(SynonymMap, SynonymMap._from_generated(result)) # pylint:disable=protected-access @distributed_trace @@ -408,13 +406,17 @@ def delete_synonym_map( """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(synonym_map, match_condition) - kwargs.update(access_condition) + if isinstance(synonym_map, str) and match_condition is not MatchConditions.Unconditionally: + raise ValueError("A model must be passed to use access conditions") + etag = None try: name = synonym_map.name # type: ignore + etag = synonym_map.e_tag # type: ignore except AttributeError: name = synonym_map - self._client.synonym_maps.delete(synonym_map_name=name, error_map=error_map, **kwargs) + self._client.synonym_maps_operations.delete( + synonym_map_name=name, etag=etag, match_condition=match_condition, **kwargs + ) @distributed_trace def create_synonym_map(self, synonym_map: SynonymMap, **kwargs: Any) -> SynonymMap: @@ -437,7 +439,7 @@ def create_synonym_map(self, synonym_map: SynonymMap, **kwargs: Any) -> SynonymM """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) patched_synonym_map = synonym_map._to_generated() # pylint:disable=protected-access - result = self._client.synonym_maps.create(patched_synonym_map, **kwargs) + result = self._client.synonym_maps_operations.create(patched_synonym_map, **kwargs) return cast(SynonymMap, SynonymMap._from_generated(result)) # pylint:disable=protected-access @distributed_trace @@ -460,14 +462,13 @@ def create_or_update_synonym_map( """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(synonym_map, match_condition) - kwargs.update(access_condition) patched_synonym_map = synonym_map._to_generated() # pylint:disable=protected-access - result = self._client.synonym_maps.create_or_update( + result = self._client.synonym_maps_operations.create_or_update( synonym_map_name=synonym_map.name, synonym_map=patched_synonym_map, prefer="return=representation", - error_map=error_map, + etag=synonym_map.e_tag, + match_condition=match_condition, **kwargs ) return cast(SynonymMap, SynonymMap._from_generated(result)) # pylint:disable=protected-access @@ -481,7 +482,7 @@ def get_service_statistics(self, **kwargs: Any) -> MutableMapping[str, Any]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = self._client.get_service_statistics(**kwargs) - return result.as_dict() + return result @distributed_trace def list_aliases(self, *, select: Optional[List[str]] = None, **kwargs: Any) -> ItemPaged[SearchAlias]: @@ -499,7 +500,7 @@ def list_aliases(self, *, select: Optional[List[str]] = None, **kwargs: Any) -> if select: kwargs["select"] = ",".join(select) # pylint:disable=protected-access - return cast(ItemPaged[SearchAlias], self._client.aliases.list(**kwargs)) + return cast(ItemPaged[SearchAlias], self._client.aliases_operations.list(**kwargs)) @distributed_trace def list_alias_names(self, **kwargs: Any) -> ItemPaged[str]: @@ -512,7 +513,7 @@ def list_alias_names(self, **kwargs: Any) -> ItemPaged[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - names = self._client.aliases.list(cls=lambda objs: [x.name for x in objs], **kwargs) + names = self._client.aliases_operations.list(cls=lambda objs: [x.name for x in objs], **kwargs) return cast(ItemPaged[str], names) @distributed_trace @@ -526,8 +527,8 @@ def get_alias(self, name: str, **kwargs: Any) -> SearchAlias: :raises: ~azure.core.exceptions.HttpResponseError """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.aliases.get(name, **kwargs) - return result + result = self._client.aliases_operations.get(name, **kwargs) + return cast(SearchAlias, result) @distributed_trace def delete_alias( @@ -556,13 +557,17 @@ def delete_alias( :caption: Deleting an alias. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(alias, match_condition) - kwargs.update(access_condition) + if isinstance(alias, str) and match_condition is not MatchConditions.Unconditionally: + raise ValueError("A model must be passed to use access conditions") + etag = None try: alias_name = alias.name # type: ignore + etag = alias.e_tag # type: ignore except AttributeError: alias_name = alias - self._client.aliases.delete(alias_name=alias_name, error_map=error_map, **kwargs) + self._client.aliases_operations.delete( + alias_name=alias_name, etag=etag, match_condition=match_condition, **kwargs + ) @distributed_trace def create_alias(self, alias: SearchAlias, **kwargs: Any) -> SearchAlias: @@ -584,8 +589,8 @@ def create_alias(self, alias: SearchAlias, **kwargs: Any) -> SearchAlias: :caption: Creating a new alias. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.aliases.create(alias, **kwargs) - return result # pylint:disable=protected-access + result = self._client.aliases_operations.create(alias, **kwargs) + return cast(SearchAlias, result) @distributed_trace def create_or_update_alias( @@ -616,12 +621,15 @@ def create_or_update_alias( :caption: Updating an alias. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(alias, match_condition) - kwargs.update(access_condition) - result = self._client.aliases.create_or_update( - alias_name=alias.name, alias=alias, prefer="return=representation", error_map=error_map, **kwargs + result = self._client.aliases_operations.create_or_update( + alias_name=alias.name, + alias=alias, + prefer="return=representation", + etag=alias.e_tag, + match_condition=match_condition, + **kwargs ) - return result # pylint:disable=protected-access + return cast(SearchAlias, result) @distributed_trace def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs) -> HttpResponse: @@ -634,4 +642,4 @@ def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs) :rtype: ~azure.core.rest.HttpResponse """ request.headers = self._merge_client_headers(request.headers) - return self._client._send_request(request, stream=stream, **kwargs) # pylint:disable=protected-access + return self._client.send_request(request, stream=stream, **kwargs) # pylint:disable=protected-access diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py index 592895702879..bd17f9d64301 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py @@ -9,16 +9,13 @@ from azure.core.credentials import AzureKeyCredential, TokenCredential from azure.core.tracing.decorator import distributed_trace -from ._generated import SearchServiceClient as _SearchServiceClient -from ._generated.models import ( +from .._generated import SearchClient as _SearchServiceClient +from .._generated.models import ( SkillNames, SearchIndexerStatus, DocumentKeysOrIds, ) -from ._utils import ( - get_access_conditions, - normalize_endpoint, -) +from ._utils import normalize_endpoint from .models import ( SearchIndexer, SearchIndexerSkillset, @@ -28,7 +25,7 @@ ) from .._api_versions import DEFAULT_VERSION from .._headers_mixin import HeadersMixin -from .._utils import get_authentication_policy +from .._utils import DEFAULT_AUDIENCE from .._version import SDK_MONIKER @@ -53,21 +50,19 @@ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, TokenCre self._endpoint = normalize_endpoint(endpoint) self._credential = credential audience = kwargs.pop("audience", None) - if isinstance(credential, AzureKeyCredential): - self._aad = False - self._client = _SearchServiceClient( - endpoint=endpoint, sdk_moniker=SDK_MONIKER, api_version=self._api_version, **kwargs - ) - else: - self._aad = True - authentication_policy = get_authentication_policy(credential, audience=audience) - self._client = _SearchServiceClient( - endpoint=endpoint, - authentication_policy=authentication_policy, - sdk_moniker=SDK_MONIKER, - api_version=self._api_version, - **kwargs - ) + if not audience: + audience = DEFAULT_AUDIENCE + scope = audience.rstrip("/") + "/.default" + credential_scopes = [scope] + self._aad = not isinstance(credential, AzureKeyCredential) + self._client = _SearchServiceClient( + endpoint=endpoint, + credential=credential, + sdk_moniker=SDK_MONIKER, + api_version=self._api_version, + credential_scopes=credential_scopes, + **kwargs + ) def __enter__(self) -> "SearchIndexerClient": self._client.__enter__() @@ -103,7 +98,7 @@ def create_indexer(self, indexer: SearchIndexer, **kwargs: Any) -> SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) patched_indexer = indexer._to_generated() # pylint:disable=protected-access - result = self._client.indexers.create(patched_indexer, **kwargs) + result = self._client.indexers_operations.create(patched_indexer, **kwargs) return cast(SearchIndexer, SearchIndexer._from_generated(result)) # pylint:disable=protected-access @distributed_trace @@ -131,15 +126,14 @@ def create_or_update_indexer( :rtype: ~azure.search.documents.indexes.models.SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(indexer, match_condition) - kwargs.update(access_condition) name = indexer.name patched_indexer = indexer._to_generated() # pylint:disable=protected-access - result = self._client.indexers.create_or_update( + result = self._client.indexers_operations.create_or_update( indexer_name=name, indexer=patched_indexer, prefer="return=representation", - error_map=error_map, + etag=indexer.e_tag, + match_condition=match_condition, skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, **kwargs @@ -165,7 +159,7 @@ def get_indexer(self, name: str, **kwargs: Any) -> SearchIndexer: :caption: Retrieve a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.indexers.get(name, **kwargs) + result = self._client.indexers_operations.get(name, **kwargs) return cast(SearchIndexer, SearchIndexer._from_generated(result)) # pylint:disable=protected-access @distributed_trace @@ -191,7 +185,7 @@ def get_indexers(self, *, select: Optional[List[str]] = None, **kwargs: Any) -> kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: kwargs["select"] = ",".join(select) - result = self._client.indexers.list(**kwargs) + result = self._client.indexers_operations.list(**kwargs) assert result.indexers is not None # Hint for mypy # pylint:disable=protected-access return [cast(SearchIndexer, SearchIndexer._from_generated(index)) for index in result.indexers] @@ -213,7 +207,7 @@ def get_indexer_names(self, **kwargs: Any) -> Sequence[str]: :caption: List all the SearchIndexers """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.indexers.list(**kwargs) + result = self._client.indexers_operations.list(**kwargs) assert result.indexers is not None # Hint for mypy return [x.name for x in result.indexers] @@ -244,13 +238,15 @@ def delete_indexer( :caption: Delete a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(indexer, match_condition) - kwargs.update(access_condition) + if isinstance(indexer, str) and match_condition is not MatchConditions.Unconditionally: + raise ValueError("A model must be passed to use access conditions") + etag = None try: name = indexer.name # type: ignore + etag = indexer.e_tag # type: ignore except AttributeError: name = indexer - self._client.indexers.delete(name, error_map=error_map, **kwargs) + self._client.indexers_operations.delete(name, etag=etag, match_condition=match_condition, **kwargs) @distributed_trace def run_indexer(self, name: str, **kwargs: Any) -> None: @@ -269,7 +265,7 @@ def run_indexer(self, name: str, **kwargs: Any) -> None: :caption: Run a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - self._client.indexers.run(name, **kwargs) + self._client.indexers_operations.run(name, **kwargs) @distributed_trace def reset_indexer(self, name: str, **kwargs: Any) -> None: @@ -288,7 +284,7 @@ def reset_indexer(self, name: str, **kwargs: Any) -> None: :caption: Reset a SearchIndexer's change tracking state """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - self._client.indexers.reset(name, **kwargs) + self._client.indexers_operations.reset(name, **kwargs) @distributed_trace def reset_documents( @@ -318,7 +314,7 @@ def reset_documents( name = indexer.name # type: ignore except AttributeError: name = indexer - return self._client.indexers.reset_docs(name, overwrite=overwrite, **kwargs) + return self._client.indexers_operations.reset_docs(name, overwrite=overwrite, **kwargs) @distributed_trace def get_indexer_status(self, name: str, **kwargs: Any) -> SearchIndexerStatus: @@ -340,7 +336,7 @@ def get_indexer_status(self, name: str, **kwargs: Any) -> SearchIndexerStatus: :caption: Get a SearchIndexer's status """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - return self._client.indexers.get_status(name, **kwargs) + return self._client.indexers_operations.get_status(name, **kwargs) @distributed_trace def create_data_source_connection( @@ -365,7 +361,7 @@ def create_data_source_connection( kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) # pylint:disable=protected-access packed_data_source = data_source_connection._to_generated() - result = self._client.data_sources.create(packed_data_source, **kwargs) + result = self._client.data_sources_operations.create(packed_data_source, **kwargs) return cast(SearchIndexerDataSourceConnection, SearchIndexerDataSourceConnection._from_generated(result)) @distributed_trace @@ -389,15 +385,14 @@ def create_or_update_data_source_connection( """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(data_source_connection, match_condition) - kwargs.update(access_condition) name = data_source_connection.name packed_data_source = data_source_connection._to_generated() # pylint:disable=protected-access - result = self._client.data_sources.create_or_update( + result = self._client.data_sources_operations.create_or_update( data_source_name=name, data_source=packed_data_source, prefer="return=representation", - error_map=error_map, + etag=data_source_connection.e_tag, + match_condition=match_condition, skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, **kwargs ) @@ -423,7 +418,7 @@ def get_data_source_connection(self, name: str, **kwargs: Any) -> SearchIndexerD :caption: Retrieve a SearchIndexerDataSourceConnection """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.data_sources.get(name, **kwargs) + result = self._client.data_sources_operations.get(name, **kwargs) # pylint:disable=protected-access return cast(SearchIndexerDataSourceConnection, SearchIndexerDataSourceConnection._from_generated(result)) @@ -452,10 +447,13 @@ def get_data_source_connections( kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: kwargs["select"] = ",".join(select) - result = self._client.data_sources.list(**kwargs) + result = self._client.data_sources_operations.list(**kwargs) assert result.data_sources is not None # Hint for mypy # pylint:disable=protected-access - return [SearchIndexerDataSourceConnection._from_generated(x) for x in result.data_sources] + return [ + cast(SearchIndexerDataSourceConnection, SearchIndexerDataSourceConnection._from_generated(x)) + for x in result.data_sources + ] @distributed_trace def get_data_source_connection_names(self, **kwargs: Any) -> Sequence[str]: @@ -466,7 +464,7 @@ def get_data_source_connection_names(self, **kwargs: Any) -> Sequence[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.data_sources.list(**kwargs) + result = self._client.data_sources_operations.list(**kwargs) assert result.data_sources is not None # Hint for mypy return [x.name for x in result.data_sources] @@ -497,13 +495,17 @@ def delete_data_source_connection( :caption: Delete a SearchIndexerDataSourceConnection """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(data_source_connection, match_condition) - kwargs.update(access_condition) + if isinstance(data_source_connection, str) and match_condition is not MatchConditions.Unconditionally: + raise ValueError("A model must be passed to use access conditions") + etag = None try: name = data_source_connection.name # type: ignore + etag = data_source_connection.e_tag # type: ignore except AttributeError: name = data_source_connection - self._client.data_sources.delete(data_source_name=name, error_map=error_map, **kwargs) + self._client.data_sources_operations.delete( + data_source_name=name, etag=etag, match_condition=match_condition, **kwargs + ) @distributed_trace def get_skillsets(self, *, select: Optional[List[str]] = None, **kwargs: Any) -> List[SearchIndexerSkillset]: @@ -522,9 +524,12 @@ def get_skillsets(self, *, select: Optional[List[str]] = None, **kwargs: Any) -> kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: kwargs["select"] = ",".join(select) - result = self._client.skillsets.list(**kwargs) + result = self._client.skillsets_operations.list(**kwargs) assert result.skillsets is not None # Hint for mypy - return [SearchIndexerSkillset._from_generated(skillset) for skillset in result.skillsets] + return [ + cast(SearchIndexerSkillset, SearchIndexerSkillset._from_generated(skillset)) + for skillset in result.skillsets + ] @distributed_trace def get_skillset_names(self, **kwargs: Any) -> List[str]: @@ -536,7 +541,7 @@ def get_skillset_names(self, **kwargs: Any) -> List[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.skillsets.list(**kwargs) + result = self._client.skillsets_operations.list(**kwargs) assert result.skillsets is not None # Hint for mypy return [x.name for x in result.skillsets] @@ -551,7 +556,7 @@ def get_skillset(self, name: str, **kwargs: Any) -> SearchIndexerSkillset: :raises: ~azure.core.exceptions.ResourceNotFoundError """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.skillsets.get(name, **kwargs) + result = self._client.skillsets_operations.get(name, **kwargs) # pylint:disable=protected-access return cast(SearchIndexerSkillset, SearchIndexerSkillset._from_generated(result)) @@ -573,13 +578,15 @@ def delete_skillset( :paramtype match_condition: ~azure.core.MatchConditions """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(skillset, match_condition) - kwargs.update(access_condition) + if isinstance(skillset, str) and match_condition is not MatchConditions.Unconditionally: + raise ValueError("A model must be passed to use access conditions") + etag = None try: name = skillset.name # type: ignore + etag = skillset.e_tag # type: ignore except AttributeError: name = skillset - self._client.skillsets.delete(name, error_map=error_map, **kwargs) + self._client.skillsets_operations.delete(name, etag=etag, match_condition=match_condition, **kwargs) @distributed_trace def create_skillset(self, skillset: SearchIndexerSkillset, **kwargs: Any) -> SearchIndexerSkillset: @@ -595,7 +602,7 @@ def create_skillset(self, skillset: SearchIndexerSkillset, **kwargs: Any) -> Sea _validate_skillset(skillset) skillset_gen = skillset._to_generated() if hasattr(skillset, "_to_generated") else skillset - result = self._client.skillsets.create(skillset_gen, **kwargs) + result = self._client.skillsets_operations.create(skillset_gen, **kwargs) return cast(SearchIndexerSkillset, SearchIndexerSkillset._from_generated(result)) @distributed_trace @@ -626,16 +633,15 @@ def create_or_update_skillset( """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(skillset, match_condition) - kwargs.update(access_condition) _validate_skillset(skillset) skillset_gen = skillset._to_generated() if hasattr(skillset, "_to_generated") else skillset - result = self._client.skillsets.create_or_update( + result = self._client.skillsets_operations.create_or_update( skillset_name=skillset.name, skillset=skillset_gen, prefer="return=representation", - error_map=error_map, + etag=skillset.e_tag, + match_condition=match_condition, skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, **kwargs @@ -660,7 +666,7 @@ def reset_skills(self, skillset: Union[str, SearchIndexerSkillset], skill_names: except AttributeError: name = skillset names = SkillNames(skill_names=skill_names) - return self._client.skillsets.reset_skills(skillset_name=name, skill_names=names, **kwargs) + return self._client.skillsets_operations.reset_skills(skillset_name=name, skill_names=names, **kwargs) def _validate_skillset(skillset: SearchIndexerSkillset): diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_utils.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_utils.py index bfe9a82a8357..00546115065e 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_utils.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_utils.py @@ -3,15 +3,8 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # ------------------------------------------------------------------------- -from typing import Optional, Any, Tuple, Dict +from typing import Optional from azure.core import MatchConditions -from azure.core.exceptions import ( - ClientAuthenticationError, - ResourceExistsError, - ResourceNotFoundError, - ResourceModifiedError, - ResourceNotModifiedError, -) def quote_etag(etag: Optional[str]) -> Optional[str]: @@ -42,33 +35,6 @@ def prep_if_none_match(etag: str, match_condition: MatchConditions) -> Optional[ return None -def get_access_conditions( - model: Any, match_condition: MatchConditions = MatchConditions.Unconditionally -) -> Tuple[Dict[int, Any], Dict[str, Optional[str]]]: - error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError} - - if isinstance(model, str): - if match_condition is not MatchConditions.Unconditionally: - raise ValueError("A model must be passed to use access conditions") - return error_map, {} - - try: - if_match = prep_if_match(model.e_tag, match_condition) - if_none_match = prep_if_none_match(model.e_tag, match_condition) - if match_condition == MatchConditions.IfNotModified: - error_map[412] = ResourceModifiedError - if match_condition == MatchConditions.IfModified: - error_map[304] = ResourceNotModifiedError - error_map[412] = ResourceNotModifiedError - if match_condition == MatchConditions.IfPresent: - error_map[412] = ResourceNotFoundError - if match_condition == MatchConditions.IfMissing: - error_map[412] = ResourceExistsError - return error_map, {"if_match": if_match, "if_none_match": if_none_match} - except AttributeError as ex: - raise ValueError("Unable to get e_tag from the model") from ex - - def normalize_endpoint(endpoint): try: if not endpoint.lower().startswith("http"): diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py index c71e3e210282..e970ab7228d3 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py @@ -12,15 +12,12 @@ from azure.core.tracing.decorator import distributed_trace from azure.core.tracing.decorator_async import distributed_trace_async from azure.core.async_paging import AsyncItemPaged -from .._generated.aio import SearchServiceClient as _SearchServiceClient +from ..._generated.aio import SearchClient as _SearchServiceClient from ...aio._search_client_async import SearchClient -from .._utils import ( - get_access_conditions, - normalize_endpoint, -) +from .._utils import normalize_endpoint from ..._api_versions import DEFAULT_VERSION from ..._headers_mixin import HeadersMixin -from ..._utils import get_authentication_policy +from ..._utils import DEFAULT_AUDIENCE from ..._version import SDK_MONIKER from ..models import ( SearchIndex, @@ -52,21 +49,19 @@ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, AsyncTok self._endpoint = normalize_endpoint(endpoint) self._credential = credential self._audience = kwargs.pop("audience", None) - if isinstance(credential, AzureKeyCredential): - self._aad = False - self._client = _SearchServiceClient( - endpoint=endpoint, sdk_moniker=SDK_MONIKER, api_version=self._api_version, **kwargs - ) - else: - self._aad = True - authentication_policy = get_authentication_policy(credential, audience=self._audience, is_async=True) - self._client = _SearchServiceClient( - endpoint=endpoint, - authentication_policy=authentication_policy, - sdk_moniker=SDK_MONIKER, - api_version=self._api_version, - **kwargs - ) + if not self._audience: + self._audience = DEFAULT_AUDIENCE + scope = self._audience.rstrip("/") + "/.default" + credential_scopes = [scope] + self._aad = not isinstance(credential, AzureKeyCredential) + self._client = _SearchServiceClient( + endpoint=endpoint, + credential=credential, + sdk_moniker=SDK_MONIKER, + api_version=self._api_version, + credential_scopes=credential_scopes, + **kwargs + ) async def __aenter__(self) -> "SearchIndexClient": await self._client.__aenter__() # pylint:disable=no-member @@ -115,7 +110,9 @@ def list_indexes(self, *, select: Optional[List[str]] = None, **kwargs) -> Async if select: kwargs["select"] = ",".join(select) # pylint:disable=protected-access - indexes = self._client.indexes.list(cls=lambda objs: [SearchIndex._from_generated(x) for x in objs], **kwargs) + indexes = self._client.indexes_operations.list( + cls=lambda objs: [SearchIndex._from_generated(x) for x in objs], **kwargs + ) return cast(AsyncItemPaged[SearchIndex], indexes) @distributed_trace @@ -128,7 +125,7 @@ def list_index_names(self, **kwargs: Any) -> AsyncItemPaged[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - names = self._client.indexes.list(cls=lambda objs: [x.name for x in objs], **kwargs) + names = self._client.indexes_operations.list(cls=lambda objs: [x.name for x in objs], **kwargs) return cast(AsyncItemPaged[str], names) @distributed_trace_async @@ -151,7 +148,7 @@ async def get_index(self, name: str, **kwargs: Any) -> SearchIndex: :caption: Get an index. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.indexes.get(name, **kwargs) + result = await self._client.indexes_operations.get(name, **kwargs) return cast(SearchIndex, SearchIndex._from_generated(result)) # pylint:disable=protected-access @distributed_trace_async @@ -166,8 +163,8 @@ async def get_index_statistics(self, index_name: str, **kwargs: Any) -> MutableM :raises: ~azure.core.exceptions.HttpResponseError """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.indexes.get_statistics(index_name, **kwargs) - return result.as_dict() + result = await self._client.indexes_operations.get_statistics(index_name, **kwargs) + return result @distributed_trace_async async def delete_index( @@ -196,13 +193,17 @@ async def delete_index( :caption: Delete an index. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(index, match_condition) - kwargs.update(access_condition) + if isinstance(index, str) and match_condition is not MatchConditions.Unconditionally: + raise ValueError("A model must be passed to use access conditions") + etag = None try: index_name = index.name # type: ignore + etag = index.e_tag # type: ignore except AttributeError: index_name = index - await self._client.indexes.delete(index_name=index_name, error_map=error_map, **kwargs) + await self._client.indexes_operations.delete( + index_name=index_name, etag=etag, match_condition=match_condition, **kwargs + ) @distributed_trace_async async def create_index(self, index: SearchIndex, **kwargs: Any) -> SearchIndex: @@ -225,7 +226,7 @@ async def create_index(self, index: SearchIndex, **kwargs: Any) -> SearchIndex: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) patched_index = index._to_generated() # pylint:disable=protected-access - result = await self._client.indexes.create(patched_index, **kwargs) + result = await self._client.indexes_operations.create(patched_index, **kwargs) return cast(SearchIndex, SearchIndex._from_generated(result)) # pylint:disable=protected-access @distributed_trace_async @@ -267,15 +268,14 @@ async def create_or_update_index( :caption: Update an index. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(index, match_condition) - kwargs.update(access_condition) patched_index = index._to_generated() # pylint:disable=protected-access - result = await self._client.indexes.create_or_update( + result = await self._client.indexes_operations.create_or_update( index_name=index.name, index=patched_index, allow_index_downtime=allow_index_downtime, prefer="return=representation", - error_map=error_map, + etag=index.e_tag, + match_condition=match_condition, **kwargs ) return cast(SearchIndex, SearchIndex._from_generated(result)) # pylint:disable=protected-access @@ -302,10 +302,8 @@ async def analyze_text(self, index_name: str, analyze_request: AnalyzeTextOption :caption: Analyze text """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.indexes.analyze( - index_name=index_name, - request=analyze_request._to_analyze_request(), # pylint:disable=protected-access - **kwargs + result = await self._client.indexes_operations.analyze( + index_name=index_name, request=analyze_request._to_generated(), **kwargs # pylint:disable=protected-access ) return result @@ -334,10 +332,10 @@ async def get_synonym_maps(self, *, select: Optional[List[str]] = None, **kwargs kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: kwargs["select"] = ",".join(select) - result = await self._client.synonym_maps.list(**kwargs) + result = await self._client.synonym_maps_operations.list(**kwargs) assert result.synonym_maps is not None # Hint for mypy # pylint:disable=protected-access - return [SynonymMap._from_generated(x) for x in result.synonym_maps] + return [cast(SynonymMap, SynonymMap._from_generated(x)) for x in result.synonym_maps] @distributed_trace_async async def get_synonym_map_names(self, **kwargs: Any) -> List[str]: @@ -349,7 +347,7 @@ async def get_synonym_map_names(self, **kwargs: Any) -> List[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.synonym_maps.list(**kwargs) + result = await self._client.synonym_maps_operations.list(**kwargs) assert result.synonym_maps is not None # Hint for mypy return [x.name for x in result.synonym_maps] @@ -374,7 +372,7 @@ async def get_synonym_map(self, name: str, **kwargs: Any) -> SynonymMap: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.synonym_maps.get(name, **kwargs) + result = await self._client.synonym_maps_operations.get(name, **kwargs) return cast(SynonymMap, SynonymMap._from_generated(result)) # pylint:disable=protected-access @distributed_trace_async @@ -405,13 +403,17 @@ async def delete_synonym_map( """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(synonym_map, match_condition) - kwargs.update(access_condition) + if isinstance(synonym_map, str) and match_condition is not MatchConditions.Unconditionally: + raise ValueError("A model must be passed to use access conditions") + etag = None try: name = synonym_map.name # type: ignore + etag = synonym_map.e_tag # type: ignore except AttributeError: name = synonym_map - await self._client.synonym_maps.delete(synonym_map_name=name, error_map=error_map, **kwargs) + await self._client.synonym_maps_operations.delete( + synonym_map_name=name, etag=etag, match_condition=match_condition, **kwargs + ) @distributed_trace_async async def create_synonym_map(self, synonym_map: SynonymMap, **kwargs: Any) -> SynonymMap: @@ -434,7 +436,7 @@ async def create_synonym_map(self, synonym_map: SynonymMap, **kwargs: Any) -> Sy """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) patched_synonym_map = synonym_map._to_generated() # pylint:disable=protected-access - result = await self._client.synonym_maps.create(patched_synonym_map, **kwargs) + result = await self._client.synonym_maps_operations.create(patched_synonym_map, **kwargs) return cast(SynonymMap, SynonymMap._from_generated(result)) # pylint:disable=protected-access @distributed_trace_async @@ -456,14 +458,13 @@ async def create_or_update_synonym_map( :rtype: ~azure.search.documents.indexes.models.SynonymMap """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(synonym_map, match_condition) - kwargs.update(access_condition) patched_synonym_map = synonym_map._to_generated() # pylint:disable=protected-access - result = await self._client.synonym_maps.create_or_update( + result = await self._client.synonym_maps_operations.create_or_update( synonym_map_name=synonym_map.name, synonym_map=patched_synonym_map, prefer="return=representation", - error_map=error_map, + etag=synonym_map.e_tag, + match_condition=match_condition, **kwargs ) return cast(SynonymMap, SynonymMap._from_generated(result)) # pylint:disable=protected-access @@ -477,7 +478,7 @@ async def get_service_statistics(self, **kwargs) -> MutableMapping[str, Any]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.get_service_statistics(**kwargs) - return result.as_dict() + return result @distributed_trace def list_aliases(self, *, select: Optional[List[str]] = None, **kwargs) -> AsyncItemPaged[SearchAlias]: @@ -495,7 +496,7 @@ def list_aliases(self, *, select: Optional[List[str]] = None, **kwargs) -> Async if select: kwargs["select"] = ",".join(select) # pylint:disable=protected-access - return cast(AsyncItemPaged[SearchAlias], self._client.aliases.list(**kwargs)) + return cast(AsyncItemPaged[SearchAlias], self._client.aliases_operations.list(**kwargs)) @distributed_trace def list_alias_names(self, **kwargs) -> AsyncItemPaged[str]: @@ -508,7 +509,7 @@ def list_alias_names(self, **kwargs) -> AsyncItemPaged[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - names = self._client.aliases.list(cls=lambda objs: [x.name for x in objs], **kwargs) + names = self._client.aliases_operations.list(cls=lambda objs: [x.name for x in objs], **kwargs) return cast(AsyncItemPaged[str], names) @distributed_trace_async @@ -522,8 +523,8 @@ async def get_alias(self, name: str, **kwargs) -> SearchAlias: :raises: ~azure.core.exceptions.HttpResponseError """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.aliases.get(name, **kwargs) - return result + result = await self._client.aliases_operations.get(name, **kwargs) + return cast(SearchAlias, result) @distributed_trace_async async def delete_alias( @@ -551,13 +552,17 @@ async def delete_alias( :caption: Delete an alias. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(alias, match_condition) - kwargs.update(access_condition) + if isinstance(alias, str) and match_condition is not MatchConditions.Unconditionally: + raise ValueError("A model must be passed to use access conditions") + etag = None try: alias_name = alias.name # type: ignore + etag = alias.e_tag # type: ignore except AttributeError: alias_name = alias - await self._client.aliases.delete(alias_name=alias_name, error_map=error_map, **kwargs) + await self._client.aliases_operations.delete( + alias_name=alias_name, etag=etag, match_condition=match_condition, **kwargs + ) @distributed_trace_async async def create_alias(self, alias: SearchAlias, **kwargs: Any) -> SearchAlias: @@ -578,8 +583,8 @@ async def create_alias(self, alias: SearchAlias, **kwargs: Any) -> SearchAlias: :caption: Create an alias. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.aliases.create(alias, **kwargs) - return result # pylint:disable=protected-access + result = await self._client.aliases_operations.create(alias, **kwargs) + return cast(SearchAlias, result) @distributed_trace_async async def create_or_update_alias( @@ -609,12 +614,15 @@ async def create_or_update_alias( :caption: Update an alias. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(alias, match_condition) - kwargs.update(access_condition) - result = await self._client.aliases.create_or_update( - alias_name=alias.name, alias=alias, prefer="return=representation", error_map=error_map, **kwargs + result = await self._client.aliases_operations.create_or_update( + alias_name=alias.name, + alias=alias, + prefer="return=representation", + etag=alias.e_tag, + match_condition=match_condition, + **kwargs ) - return result # pylint:disable=protected-access + return cast(SearchAlias, result) @distributed_trace_async async def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs) -> AsyncHttpResponse: @@ -627,4 +635,4 @@ async def send_request(self, request: HttpRequest, *, stream: bool = False, **kw :rtype: ~azure.core.rest.AsyncHttpResponse """ request.headers = self._merge_client_headers(request.headers) - return await self._client._send_request(request, stream=stream, **kwargs) # pylint:disable=protected-access + return await self._client.send_request(request, stream=stream, **kwargs) # pylint:disable=protected-access diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py index c83e536fc788..dc99de543a56 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py @@ -10,20 +10,17 @@ from azure.core.credentials_async import AsyncTokenCredential from azure.core.tracing.decorator_async import distributed_trace_async -from .._generated.aio import SearchServiceClient as _SearchServiceClient -from .._generated.models import ( +from ..._generated.aio import SearchClient as _SearchServiceClient +from ..._generated.models import ( SkillNames, SearchIndexerStatus, DocumentKeysOrIds, ) from ..models import SearchIndexer, SearchIndexerSkillset, SearchIndexerDataSourceConnection -from .._utils import ( - get_access_conditions, - normalize_endpoint, -) +from .._utils import normalize_endpoint from ..._api_versions import DEFAULT_VERSION from ..._headers_mixin import HeadersMixin -from ..._utils import get_authentication_policy +from ..._utils import DEFAULT_AUDIENCE from ..._version import SDK_MONIKER @@ -48,21 +45,19 @@ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, AsyncTok self._endpoint = normalize_endpoint(endpoint) # type: str self._credential = credential audience = kwargs.pop("audience", None) - if isinstance(credential, AzureKeyCredential): - self._aad = False - self._client = _SearchServiceClient( - endpoint=endpoint, sdk_moniker=SDK_MONIKER, api_version=self._api_version, **kwargs - ) - else: - self._aad = True - authentication_policy = get_authentication_policy(credential, audience=audience, is_async=True) - self._client = _SearchServiceClient( - endpoint=endpoint, - authentication_policy=authentication_policy, - sdk_moniker=SDK_MONIKER, - api_version=self._api_version, - **kwargs - ) + if not audience: + audience = DEFAULT_AUDIENCE + scope = audience.rstrip("/") + "/.default" + credential_scopes = [scope] + self._aad = not isinstance(credential, AzureKeyCredential) + self._client = _SearchServiceClient( + endpoint=endpoint, + credential=credential, + sdk_moniker=SDK_MONIKER, + api_version=self._api_version, + credential_scopes=credential_scopes, + **kwargs + ) async def __aenter__(self) -> "SearchIndexerClient": await self._client.__aenter__() @@ -98,7 +93,7 @@ async def create_indexer(self, indexer: SearchIndexer, **kwargs: Any) -> SearchI """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) patched_indexer = indexer._to_generated() # pylint:disable=protected-access - result = await self._client.indexers.create(patched_indexer, **kwargs) + result = await self._client.indexers_operations.create(patched_indexer, **kwargs) return cast(SearchIndexer, SearchIndexer._from_generated(result)) # pylint:disable=protected-access @distributed_trace_async @@ -126,15 +121,14 @@ async def create_or_update_indexer( :rtype: ~azure.search.documents.indexes.models.SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(indexer, match_condition) - kwargs.update(access_condition) name = indexer.name patched_indexer = indexer._to_generated() # pylint:disable=protected-access - result = await self._client.indexers.create_or_update( + result = await self._client.indexers_operations.create_or_update( indexer_name=name, indexer=patched_indexer, prefer="return=representation", - error_map=error_map, + etag=indexer.e_tag, + match_condition=match_condition, skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, **kwargs @@ -160,7 +154,7 @@ async def get_indexer(self, name: str, **kwargs: Any) -> SearchIndexer: :caption: Retrieve a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.indexers.get(name, **kwargs) + result = await self._client.indexers_operations.get(name, **kwargs) return cast(SearchIndexer, SearchIndexer._from_generated(result)) # pylint:disable=protected-access @distributed_trace_async @@ -186,7 +180,7 @@ async def get_indexers(self, *, select: Optional[List[str]] = None, **kwargs) -> kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: kwargs["select"] = ",".join(select) - result = await self._client.indexers.list(**kwargs) + result = await self._client.indexers_operations.list(**kwargs) assert result.indexers is not None # Hint for mypy # pylint:disable=protected-access return [cast(SearchIndexer, SearchIndexer._from_generated(index)) for index in result.indexers] @@ -199,7 +193,7 @@ async def get_indexer_names(self, **kwargs) -> Sequence[str]: :rtype: list[str] """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.indexers.list(**kwargs) + result = await self._client.indexers_operations.list(**kwargs) assert result.indexers is not None # Hint for mypy return [x.name for x in result.indexers] @@ -230,13 +224,15 @@ async def delete_indexer( :caption: Delete a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(indexer, match_condition) - kwargs.update(access_condition) + if isinstance(indexer, str) and match_condition is not MatchConditions.Unconditionally: + raise ValueError("A model must be passed to use access conditions") + etag = None try: name = indexer.name # type: ignore + etag = indexer.e_tag # type: ignore except AttributeError: name = indexer - await self._client.indexers.delete(name, error_map=error_map, **kwargs) + await self._client.indexers_operations.delete(name, etag=etag, match_condition=match_condition, **kwargs) @distributed_trace_async async def run_indexer(self, name: str, **kwargs: Any) -> None: @@ -255,7 +251,7 @@ async def run_indexer(self, name: str, **kwargs: Any) -> None: :caption: Run a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - await self._client.indexers.run(name, **kwargs) + await self._client.indexers_operations.run(name, **kwargs) @distributed_trace_async async def reset_indexer(self, name: str, **kwargs: Any) -> None: @@ -274,7 +270,7 @@ async def reset_indexer(self, name: str, **kwargs: Any) -> None: :caption: Reset a SearchIndexer's change tracking state """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - await self._client.indexers.reset(name, **kwargs) + await self._client.indexers_operations.reset(name, **kwargs) @distributed_trace_async async def reset_documents( @@ -304,7 +300,7 @@ async def reset_documents( name = indexer.name # type: ignore except AttributeError: name = indexer - await self._client.indexers.reset_docs(name, overwrite=overwrite, **kwargs) + await self._client.indexers_operations.reset_docs(name, overwrite=overwrite, **kwargs) return @distributed_trace_async @@ -327,7 +323,7 @@ async def get_indexer_status(self, name: str, **kwargs: Any) -> SearchIndexerSta :caption: Get a SearchIndexer's status """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - return await self._client.indexers.get_status(name, **kwargs) + return await self._client.indexers_operations.get_status(name, **kwargs) @distributed_trace_async async def create_data_source_connection( @@ -351,7 +347,7 @@ async def create_data_source_connection( kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) # pylint:disable=protected-access packed_data_source = data_source_connection._to_generated() - result = await self._client.data_sources.create(packed_data_source, **kwargs) + result = await self._client.data_sources_operations.create(packed_data_source, **kwargs) return cast(SearchIndexerDataSourceConnection, SearchIndexerDataSourceConnection._from_generated(result)) @distributed_trace_async @@ -374,19 +370,15 @@ async def create_or_update_data_source_connection( :rtype: ~azure.search.documents.indexes.models.SearchIndexerDataSourceConnection """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions( - data_source_connection, - match_condition, - ) - kwargs.update(access_condition) name = data_source_connection.name # pylint:disable=protected-access packed_data_source = data_source_connection._to_generated() - result = await self._client.data_sources.create_or_update( + result = await self._client.data_sources_operations.create_or_update( data_source_name=name, data_source=packed_data_source, prefer="return=representation", - error_map=error_map, + etag=data_source_connection.e_tag, + match_condition=match_condition, skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, **kwargs ) @@ -419,16 +411,17 @@ async def delete_data_source_connection( :caption: Delete a SearchIndexerDataSourceConnection """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions( - data_source_connection, - match_condition, - ) - kwargs.update(access_condition) + if isinstance(data_source_connection, str) and match_condition is not MatchConditions.Unconditionally: + raise ValueError("A model must be passed to use access conditions") + etag = None try: name = data_source_connection.name # type: ignore + etag = data_source_connection.e_tag # type: ignore except AttributeError: name = data_source_connection - await self._client.data_sources.delete(data_source_name=name, error_map=error_map, **kwargs) + await self._client.data_sources_operations.delete( + data_source_name=name, etag=etag, match_condition=match_condition, **kwargs + ) @distributed_trace_async async def get_data_source_connection( @@ -455,7 +448,7 @@ async def get_data_source_connection( kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: kwargs["select"] = ",".join(select) - result = await self._client.data_sources.get(name, **kwargs) + result = await self._client.data_sources_operations.get(name, **kwargs) # pylint:disable=protected-access return cast(SearchIndexerDataSourceConnection, SearchIndexerDataSourceConnection._from_generated(result)) @@ -476,10 +469,13 @@ async def get_data_source_connections(self, **kwargs: Any) -> Sequence[SearchInd :caption: List all SearchIndexerDataSourceConnections """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.data_sources.list(**kwargs) + result = await self._client.data_sources_operations.list(**kwargs) assert result.data_sources is not None # Hint for mypy # pylint:disable=protected-access - return [SearchIndexerDataSourceConnection._from_generated(x) for x in result.data_sources] + return [ + cast(SearchIndexerDataSourceConnection, SearchIndexerDataSourceConnection._from_generated(x)) + for x in result.data_sources + ] @distributed_trace_async async def get_data_source_connection_names(self, **kwargs) -> Sequence[str]: @@ -490,7 +486,7 @@ async def get_data_source_connection_names(self, **kwargs) -> Sequence[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.data_sources.list(**kwargs) + result = await self._client.data_sources_operations.list(**kwargs) assert result.data_sources is not None # Hint for mypy return [x.name for x in result.data_sources] @@ -510,9 +506,12 @@ async def get_skillsets(self, *, select: Optional[List[str]] = None, **kwargs) - kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: kwargs["select"] = ",".join(select) - result = await self._client.skillsets.list(**kwargs) + result = await self._client.skillsets_operations.list(**kwargs) assert result.skillsets is not None # Hint for mypy - return [SearchIndexerSkillset._from_generated(skillset) for skillset in result.skillsets] + return [ + cast(SearchIndexerSkillset, SearchIndexerSkillset._from_generated(skillset)) + for skillset in result.skillsets + ] @distributed_trace_async async def get_skillset_names(self, **kwargs) -> List[str]: @@ -524,7 +523,7 @@ async def get_skillset_names(self, **kwargs) -> List[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.skillsets.list(**kwargs) + result = await self._client.skillsets_operations.list(**kwargs) assert result.skillsets is not None # Hint for mypy return [x.name for x in result.skillsets] @@ -539,7 +538,7 @@ async def get_skillset(self, name: str, **kwargs) -> SearchIndexerSkillset: :raises: ~azure.core.exceptions.ResourceNotFoundError """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.skillsets.get(name, **kwargs) + result = await self._client.skillsets_operations.get(name, **kwargs) # pylint:disable=protected-access return cast(SearchIndexerSkillset, SearchIndexerSkillset._from_generated(result)) @@ -561,13 +560,15 @@ async def delete_skillset( :paramtype match_condition: ~azure.core.MatchConditions """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(skillset, match_condition) - kwargs.update(access_condition) + if isinstance(skillset, str) and match_condition is not MatchConditions.Unconditionally: + raise ValueError("A model must be passed to use access conditions") + etag = None try: name = skillset.name # type: ignore + etag = skillset.e_tag # type: ignore except AttributeError: name = skillset - await self._client.skillsets.delete(name, error_map=error_map, **kwargs) + await self._client.skillsets_operations.delete(name, etag=etag, match_condition=match_condition, **kwargs) @distributed_trace_async async def create_skillset(self, skillset: SearchIndexerSkillset, **kwargs: Any) -> SearchIndexerSkillset: @@ -581,7 +582,7 @@ async def create_skillset(self, skillset: SearchIndexerSkillset, **kwargs: Any) """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) skillset_gen = skillset._to_generated() if hasattr(skillset, "_to_generated") else skillset - result = await self._client.skillsets.create(skillset_gen, **kwargs) + result = await self._client.skillsets_operations.create(skillset_gen, **kwargs) return cast(SearchIndexerSkillset, SearchIndexerSkillset._from_generated(result)) @distributed_trace_async @@ -612,15 +613,14 @@ async def create_or_update_skillset( """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map, access_condition = get_access_conditions(skillset, match_condition) - kwargs.update(access_condition) skillset_gen = skillset._to_generated() if hasattr(skillset, "_to_generated") else skillset - result = await self._client.skillsets.create_or_update( + result = await self._client.skillsets_operations.create_or_update( skillset_name=skillset.name, skillset=skillset_gen, prefer="return=representation", - error_map=error_map, + etag=skillset.e_tag, + match_condition=match_condition, skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, **kwargs @@ -645,5 +645,5 @@ async def reset_skills(self, skillset: Union[str, SearchIndexerSkillset], skill_ except AttributeError: name = skillset names = SkillNames(skill_names=skill_names) - await self._client.skillsets.reset_skills(skillset_name=name, skill_names=names, **kwargs) + await self._client.skillsets_operations.reset_skills(skillset_name=name, skill_names=names, **kwargs) return diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py index 736e0ecb611c..64080f58809b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py @@ -32,8 +32,7 @@ SearchIndex, ) from . import _edm -from ..._generated.models import SuggestOptions -from .._generated.models import ( +from ..._generated.models import ( SearchAlias, AIServicesAccountIdentity, AIServicesAccountKey, @@ -473,7 +472,6 @@ class PathHierarchyTokenizer(PathHierarchyTokenizerV2): "StopAnalyzer", "StopwordsList", "StopwordsTokenFilter", - "SuggestOptions", "SynonymMap", "SynonymTokenFilter", "TagScoringFunction", diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_index.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_index.py index 403504c1ed78..13b5b34db845 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_index.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_index.py @@ -3,11 +3,13 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- -from typing import Any, Dict, Union, List, Optional, MutableMapping, Callable, cast +import json +from typing import Any, Dict, Union, List, Optional, cast from typing_extensions import Self -from .._generated import _serialization +from azure.core.exceptions import DeserializationError +from ..._generated._model_base import Model from ._edm import Collection, ComplexType, String -from .._generated.models import ( +from ..._generated.models import ( SearchField as _SearchField, SearchIndex as _SearchIndex, PatternTokenizer as _PatternTokenizer, @@ -37,7 +39,7 @@ __all__ = ("ComplexField", "SearchableField", "SimpleField") -class SearchField(_serialization.Model): +class SearchField(Model): # pylint: disable=too-many-instance-attributes """Represents a field in an index definition, which describes the name, data type, and search behavior of a field. @@ -212,7 +214,7 @@ def __init__( vector_encoding_format: Optional[Union[str, VectorEncodingFormat]] = None, **kwargs ): - super().__init__(**kwargs) + super().__init__() self.name = name self.type = type self.key = key @@ -258,7 +260,7 @@ def _to_generated(self) -> _SearchField: @classmethod def _from_generated(cls, search_field) -> Optional[Self]: - if not search_field: + if search_field is None: return None # pylint:disable=protected-access fields = ( @@ -292,65 +294,53 @@ def _from_generated(cls, search_field) -> Optional[Self]: vector_encoding_format=search_field.vector_encoding_format, ) - def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> MutableMapping[str, Any]: + def serialize(self, **kwargs: Any) -> str: + # pylint: disable=unused-argument """Return the JSON that would be sent to server from this model. - :param bool keep_readonly: If you want to serialize the readonly attributes - :returns: A dict JSON compatible object - :rtype: dict + :returns: A dict JSON compatible string + :rtype: str """ - return self._to_generated().serialize(keep_readonly=keep_readonly, **kwargs) + return json.dumps(self._to_generated().as_dict()) # type: ignore @classmethod - def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Optional[Self]: # type: ignore + def deserialize(cls, data: Any, **kwargs) -> Optional[Self]: # type: ignore + # pylint: disable=unused-argument """Parse a str using the RestAPI syntax and return a SearchField instance. - :param str data: A str using RestAPI structure. JSON by default. - :param str content_type: JSON by default, set application/xml if XML. + :param str data: A JSON str using RestAPI structure. :returns: A SearchField instance :raises: DeserializationError if something went wrong """ - return cls._from_generated(_SearchField.deserialize(data, content_type=content_type)) + try: + obj_dict = json.loads(data) + obj = _SearchField(obj_dict) + return cls._from_generated(obj) + except json.JSONDecodeError as err: + raise DeserializationError("Failed to deserialize data.") from err - def as_dict( - self, - keep_readonly: bool = True, - key_transformer: Callable[[str, Dict[str, Any], Any], Any] = _serialization.attribute_transformer, - **kwargs: Any - ) -> MutableMapping[str, Any]: + def as_dict(self, **kwargs: Any) -> Dict[str, Any]: """Return a dict that can be serialized using json.dump. - :param bool keep_readonly: If you want to serialize the readonly attributes - :param Callable key_transformer: A callable that will transform the key of the dict :returns: A dict JSON compatible object :rtype: dict """ - return self._to_generated().as_dict( # type: ignore - keep_readonly=keep_readonly, key_transformer=key_transformer, **kwargs - ) + return self._to_generated().as_dict(**kwargs) # type: ignore @classmethod - def from_dict( # type: ignore - cls, - data: Any, - key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, - content_type: Optional[str] = None, - ) -> Optional[Self]: - """Parse a dict using given key extractor return a model. - - By default consider key - extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor - and last_rest_key_case_insensitive_extractor) + def from_dict(cls, data: Any, **kwargs: Any) -> Optional[Self]: # type: ignore + # pylint: disable=unused-argument + """Parse a dict return a model. :param dict data: A dict using RestAPI structure - :param Callable key_extractors: A callable that will extract a key from a dict - :param str content_type: JSON by default, set application/xml if XML. :returns: A SearchField instance :rtype: SearchField :raises: DeserializationError if something went wrong """ - return cls._from_generated( - _SearchField.from_dict(data, content_type=content_type, key_extractors=key_extractors) - ) + try: + obj = _SearchField(data) + return cls._from_generated(obj) + except json.JSONDecodeError as err: + raise DeserializationError("Failed to deserialize data.") from err def SimpleField( @@ -604,7 +594,7 @@ def ComplexField( return SearchField(**result) -class SearchIndex(_serialization.Model): +class SearchIndex(Model): # pylint: disable=too-many-instance-attributes """Represents a search index definition, which describes the fields and search behavior of an index. @@ -677,7 +667,7 @@ def __init__( e_tag: Optional[str] = None, **kwargs ): - super().__init__(**kwargs) + super().__init__() self.name = name self.fields = fields self.scoring_profiles = scoring_profiles @@ -733,7 +723,7 @@ def _to_generated(self) -> _SearchIndex: @classmethod def _from_generated(cls, search_index) -> Optional[Self]: - if not search_index: + if search_index is None: return None if search_index.analyzers: analyzers = [unpack_analyzer(x) for x in search_index.analyzers] # type: ignore @@ -779,66 +769,56 @@ def _from_generated(cls, search_index) -> Optional[Self]: vector_search=search_index.vector_search, ) - def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> MutableMapping[str, Any]: + def serialize(self, **kwargs: Any) -> str: + # pylint: disable=unused-argument """Return the JSON that would be sent to server from this model. - :param bool keep_readonly: If you want to serialize the readonly attributes - :returns: A dict JSON compatible object - :rtype: dict + :returns: A dict JSON compatible string + :rtype: str """ - return self._to_generated().serialize(keep_readonly=keep_readonly, **kwargs) + return json.dumps(self._to_generated().as_dict()) @classmethod - def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Optional[Self]: # type: ignore + def deserialize(cls, data: Any, **kwargs) -> Optional[Self]: # type: ignore + # pylint: disable=unused-argument """Parse a str using the RestAPI syntax and return a SearchIndex instance. - :param str data: A str using RestAPI structure. JSON by default. - :param str content_type: JSON by default, set application/xml if XML. + :param str data: A JSON str using RestAPI structure. :returns: A SearchIndex instance :rtype: SearchIndex :raises: DeserializationError if something went wrong """ - return cls._from_generated(_SearchIndex.deserialize(data, content_type=content_type)) + try: + obj_dict = json.loads(data) + obj = _SearchIndex(obj_dict) + return cls._from_generated(obj) + except json.JSONDecodeError as err: + raise DeserializationError("Failed to deserialize data.") from err - def as_dict( - self, - keep_readonly: bool = True, - key_transformer: Callable[[str, Dict[str, Any], Any], Any] = _serialization.attribute_transformer, - **kwargs: Any - ) -> MutableMapping[str, Any]: + def as_dict(self, **kwargs: Any) -> Dict[str, Any]: """Return a dict that can be serialized using json.dump. - :param bool keep_readonly: If you want to serialize the readonly attributes - :param Callable key_transformer: A callable that will transform the key of the dict :returns: A dict JSON compatible object :rtype: dict """ - return self._to_generated().as_dict( # type: ignore - keep_readonly=keep_readonly, key_transformer=key_transformer, **kwargs - ) + return self._to_generated().as_dict(**kwargs) # type: ignore @classmethod def from_dict( # type: ignore cls, data: Any, - key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, - content_type: Optional[str] = None, ) -> Optional[Self]: - """Parse a dict using given key extractor return a model. - - By default consider key - extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor - and last_rest_key_case_insensitive_extractor) + """Parse a dict return a model. :param dict data: A dict using RestAPI structure - :param Callable key_extractors: A callable that will extract a key from a dict - :param str content_type: JSON by default, set application/xml if XML. :returns: A SearchIndex instance :rtype: SearchIndex :raises: DeserializationError if something went wrong """ - return cls._from_generated( - _SearchIndex.from_dict(data, content_type=content_type, key_extractors=key_extractors) - ) + try: + obj = _SearchIndex(data) + return cls._from_generated(obj) + except json.JSONDecodeError as err: + raise DeserializationError("Failed to deserialize data.") from err def pack_search_field(search_field: SearchField) -> _SearchField: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py index 2831c47bbc50..e4e51f110dfb 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/_models.py @@ -4,13 +4,14 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- - -from typing import Any, List, Optional, MutableMapping, Dict, Callable +import json +from typing import Any, List, Optional, Dict from enum import Enum from typing_extensions import Self from azure.core import CaseInsensitiveEnumMeta -from .._generated import _serialization -from .._generated.models import ( +from azure.core.exceptions import DeserializationError +from ..._generated._model_base import Model +from ..._generated.models import ( LexicalAnalyzer, LexicalTokenizer, AnalyzeRequest, @@ -43,7 +44,7 @@ DELIMITER = "|" -class SearchIndexerSkillset(_serialization.Model): +class SearchIndexerSkillset(Model): """A list of skills. All required parameters must be populated in order to send to Azure. @@ -88,7 +89,7 @@ def __init__( encryption_key: Optional["SearchResourceEncryptionKey"] = None, **kwargs: Any ) -> None: - super().__init__(**kwargs) + super().__init__() self.name = name self.description = description self.skills = skills @@ -107,6 +108,8 @@ def _to_generated(self): generated_skills.append(skill) assert len(generated_skills) == len(self.skills) encryption_key = getattr(self, "encryption_key", None) + # pylint:disable=protected-access + encryption_key_patched = None if encryption_key is None else encryption_key._to_generated() return _SearchIndexerSkillset( name=getattr(self, "name", None), description=getattr(self, "description", None), @@ -115,14 +118,12 @@ def _to_generated(self): knowledge_store=getattr(self, "knowledge_store", None), index_projection=getattr(self, "index_projection", None), e_tag=getattr(self, "e_tag", None), - encryption_key=( - encryption_key._to_generated() if encryption_key else None # pylint:disable=protected-access - ), + encryption_key=encryption_key_patched, ) @classmethod def _from_generated(cls, skillset) -> Optional[Self]: - if not skillset: + if skillset is None: return None custom_skills = [] for skill in skillset.skills: @@ -140,66 +141,56 @@ def _from_generated(cls, skillset) -> Optional[Self]: kwargs["skills"] = custom_skills return cls(**kwargs) - def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> MutableMapping[str, Any]: + def serialize(self, **kwargs: Any) -> str: + # pylint: disable=unused-argument """Return the JSON that would be sent to server from this model. - :param bool keep_readonly: If you want to serialize the readonly attributes - :returns: A dict JSON compatible object - :rtype: dict + :returns: A dict JSON compatible string + :rtype: str """ - return self._to_generated().serialize(keep_readonly=keep_readonly, **kwargs) # type: ignore + return json.dumps(self._to_generated().as_dict()) # type: ignore @classmethod - def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Optional[Self]: # type: ignore + def deserialize(cls, data: Any, **kwargs) -> Optional[Self]: # type: ignore + # pylint: disable=unused-argument """Parse a str using the RestAPI syntax and return a SearchIndexerSkillset instance. - :param str data: A str using RestAPI structure. JSON by default. - :param str content_type: JSON by default, set application/xml if XML. + :param str data: A JSON str using RestAPI structure. :returns: A SearchIndexerSkillset instance :rtype: SearchIndexerSkillset :raises: DeserializationError if something went wrong """ - return cls._from_generated(_SearchIndexerSkillset.deserialize(data, content_type=content_type)) - - def as_dict( - self, - keep_readonly: bool = True, - key_transformer: Callable[[str, Dict[str, Any], Any], Any] = _serialization.attribute_transformer, - **kwargs: Any - ) -> MutableMapping[str, Any]: + try: + obj_dict = json.loads(data) + obj = _SearchIndexerSkillset(obj_dict) + return cls._from_generated(obj) + except json.JSONDecodeError as err: + raise DeserializationError("Failed to deserialize data.") from err + + def as_dict(self, **kwargs: Any) -> Dict[str, Any]: """Return a dict that can be serialized using json.dump. - :param bool keep_readonly: If you want to serialize the readonly attributes - :param Callable key_transformer: A callable that will transform the key of the dict :returns: A dict JSON compatible object :rtype: dict """ - return self._to_generated().as_dict( # type: ignore - keep_readonly=keep_readonly, key_transformer=key_transformer, **kwargs - ) + return self._to_generated().as_dict(**kwargs) # type: ignore @classmethod def from_dict( # type: ignore cls, data: Any, - key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, - content_type: Optional[str] = None, ) -> Optional[Self]: - """Parse a dict using given key extractor return a model. - - By default consider key - extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor - and last_rest_key_case_insensitive_extractor) + """Parse a dict return a model. :param dict data: A dict using RestAPI structure - :param Callable key_extractors: A callable that will extract a key from a dict - :param str content_type: JSON by default, set application/xml if XML. :returns: A SearchIndexerSkillset instance :rtype: SearchIndexerSkillset :raises: DeserializationError if something went wrong """ - return cls._from_generated( - _SearchIndexerSkillset.from_dict(data, content_type=content_type, key_extractors=key_extractors) - ) + try: + obj = _SearchIndexerSkillset(data) + return cls._from_generated(obj) + except json.JSONDecodeError as err: + raise DeserializationError("Failed to deserialize data.") from err class EntityRecognitionSkillVersion(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -261,28 +252,6 @@ class EntityRecognitionSkill(SearchIndexerSkill): :vartype skill_version: ~azure.search.documents.indexes.models.EntityRecognitionSkillVersion """ - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - "minimum_precision": {"maximum": 1, "minimum": 0}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "categories": {"key": "categories", "type": "[str]"}, - "default_language_code": {"key": "defaultLanguageCode", "type": "str"}, - "include_typeless_entities": {"key": "includeTypelessEntities", "type": "bool"}, - "minimum_precision": {"key": "minimumPrecision", "type": "float"}, - "model_version": {"key": "modelVersion", "type": "str"}, - "skill_version": {"key": "skillVersion", "type": "str"}, - } - def __init__(self, **kwargs): # pop skill_version from kwargs to avoid warning in msrest skill_version = kwargs.pop("skill_version", EntityRecognitionSkillVersion.V3) @@ -317,7 +286,7 @@ def _to_generated(self): @classmethod def _from_generated(cls, skill): - if not skill: + if skill is None: return None kwargs = skill.as_dict() if isinstance(skill, _EntityRecognitionSkillV3): @@ -381,25 +350,6 @@ class SentimentSkill(SearchIndexerSkill): :vartype skill_version: ~azure.search.documents.indexes.models.SentimentSkillVersion """ - _validation = { - "odata_type": {"required": True}, - "inputs": {"required": True}, - "outputs": {"required": True}, - } - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "description": {"key": "description", "type": "str"}, - "context": {"key": "context", "type": "str"}, - "inputs": {"key": "inputs", "type": "[InputFieldMappingEntry]"}, - "outputs": {"key": "outputs", "type": "[OutputFieldMappingEntry]"}, - "default_language_code": {"key": "defaultLanguageCode", "type": "str"}, - "include_opinion_mining": {"key": "includeOpinionMining", "type": "bool"}, - "model_version": {"key": "modelVersion", "type": "str"}, - "skill_version": {"key": "skillVersion", "type": "str"}, - } - def __init__(self, **kwargs): # pop skill_version from kwargs to avoid warning in msrest skill_version = kwargs.pop("skill_version", SentimentSkillVersion.V3) @@ -431,7 +381,7 @@ def _to_generated(self): @classmethod def _from_generated(cls, skill): - if not skill: + if skill is None: return None kwargs = skill.as_dict() if isinstance(skill, _SentimentSkillV3): @@ -439,7 +389,7 @@ def _from_generated(cls, skill): return None -class AnalyzeTextOptions(_serialization.Model): +class AnalyzeTextOptions(Model): """Specifies some text and analysis components used to break that text into tokens. All required parameters must be populated in order to send to Azure. @@ -491,7 +441,7 @@ def __init__( char_filters: Optional[List[str]] = None, **kwargs ): - super().__init__(**kwargs) + super().__init__() self.text = text self.analyzer_name = analyzer_name self.tokenizer_name = tokenizer_name @@ -499,7 +449,7 @@ def __init__( self.token_filters = token_filters self.char_filters = char_filters - def _to_analyze_request(self): + def _to_generated(self): return AnalyzeRequest( text=self.text, analyzer=self.analyzer_name, @@ -510,8 +460,8 @@ def _to_analyze_request(self): ) @classmethod - def _from_analyze_request(cls, analyze_request) -> Optional[Self]: - if not analyze_request: + def _from_generated(cls, analyze_request) -> Optional[Self]: + if analyze_request is None: return None return cls( text=analyze_request.text, @@ -522,66 +472,54 @@ def _from_analyze_request(cls, analyze_request) -> Optional[Self]: char_filters=analyze_request.char_filters, ) - def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> MutableMapping[str, Any]: + def serialize(self, **kwargs: Any) -> str: + # pylint: disable=unused-argument """Return the JSON that would be sent to server from this model. - :param bool keep_readonly: If you want to serialize the readonly attributes - :returns: A dict JSON compatible object - :rtype: dict + :returns: A dict JSON compatible string + :rtype: str """ - return self._to_analyze_request().serialize(keep_readonly=keep_readonly, **kwargs) # type: ignore + return json.dumps(self._to_generated().as_dict()) # type: ignore @classmethod - def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Optional[Self]: # type: ignore + def deserialize(cls, data: Any, **kwargs) -> Optional[Self]: # type: ignore + # pylint: disable=unused-argument """Parse a str using the RestAPI syntax and return a AnalyzeTextOptions instance. :param str data: A str using RestAPI structure. JSON by default. - :param str content_type: JSON by default, set application/xml if XML. - :returns: A AnalyzeTextOptions instance + :returns: An AnalyzeTextOptions instance :rtype: AnalyzeTextOptions :raises: DeserializationError if something went wrong """ - return cls._from_analyze_request(AnalyzeRequest.deserialize(data, content_type=content_type)) - - def as_dict( - self, - keep_readonly: bool = True, - key_transformer: Callable[[str, Dict[str, Any], Any], Any] = _serialization.attribute_transformer, - **kwargs: Any - ) -> MutableMapping[str, Any]: + try: + obj_dict = json.loads(data) + obj = AnalyzeRequest(obj_dict) + return cls._from_generated(obj) + except json.JSONDecodeError as err: + raise DeserializationError("Failed to deserialize data.") from err + + def as_dict(self, **kwargs: Any) -> Dict[str, Any]: """Return a dict that can be serialized using json.dump. - :param bool keep_readonly: If you want to serialize the readonly attributes - :param Callable key_transformer: A callable that will transform the key of the dict :returns: A dict JSON compatible object :rtype: dict """ - return self._to_analyze_request().as_dict( # type: ignore - keep_readonly=keep_readonly, key_transformer=key_transformer, **kwargs - ) + return self._to_generated().as_dict(**kwargs) # type: ignore @classmethod - def from_dict( # type: ignore - cls, - data: Any, - key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, - content_type: Optional[str] = None, - ) -> Optional[Self]: - """Parse a dict using given key extractor return a model. - - By default consider key - extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor - and last_rest_key_case_insensitive_extractor) + def from_dict(cls, data: Any, **kwargs) -> Optional[Self]: # type: ignore + # pylint: disable=unused-argument + """Parse a dict return a model. :param dict data: A dict using RestAPI structure - :param Callable key_extractors: A callable that will extract a key from a dict - :param str content_type: JSON by default, set application/xml if XML. - :returns: A AnalyzeTextOptions instance + :returns: An AnalyzeTextOptions instance :rtype: AnalyzeTextOptions :raises: DeserializationError if something went wrong """ - return cls._from_analyze_request( - AnalyzeRequest.from_dict(data, content_type=content_type, key_extractors=key_extractors) - ) + try: + obj = AnalyzeRequest(data) + return cls._from_generated(obj) + except json.JSONDecodeError as err: + raise DeserializationError("Failed to deserialize data.") from err class CustomAnalyzer(LexicalAnalyzer): @@ -616,11 +554,14 @@ class CustomAnalyzer(LexicalAnalyzer): """ def __init__(self, **kwargs): + tokenizer_name = kwargs.pop("tokenizer_name", None) + token_filters = kwargs.pop("token_filters", None) + char_filters = kwargs.pop("char_filters", None) super(CustomAnalyzer, self).__init__(**kwargs) self.odata_type = "#Microsoft.Azure.Search.CustomAnalyzer" - self.tokenizer_name = kwargs["tokenizer_name"] - self.token_filters = kwargs.get("token_filters", None) - self.char_filters = kwargs.get("char_filters", None) + self.tokenizer_name = tokenizer_name + self.token_filters = token_filters + self.char_filters = char_filters def _to_generated(self): return _CustomAnalyzer( @@ -633,7 +574,7 @@ def _to_generated(self): @classmethod def _from_generated(cls, custom_analyzer): - if not custom_analyzer: + if custom_analyzer is None: return None return cls( name=custom_analyzer.name, @@ -667,27 +608,20 @@ class PatternAnalyzer(LexicalAnalyzer): :vartype stopwords: list[str] """ - _validation = {"odata_type": {"required": True}, "name": {"required": True}} - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "lower_case_terms": {"key": "lowercase", "type": "bool"}, - "pattern": {"key": "pattern", "type": "str"}, - "flags": {"key": "flags", "type": "[str]"}, - "stopwords": {"key": "stopwords", "type": "[str]"}, - } - def __init__(self, **kwargs): + lower_case_terms = kwargs.pop("lower_case_terms", True) + pattern = kwargs.pop("pattern", r"\W+") + flags = kwargs.pop("flags", None) + stopwords = kwargs.pop("stopwords", None) super(PatternAnalyzer, self).__init__(**kwargs) self.odata_type = "#Microsoft.Azure.Search.PatternAnalyzer" - self.lower_case_terms = kwargs.get("lower_case_terms", True) - self.pattern = kwargs.get("pattern", r"\W+") - self.flags = kwargs.get("flags", None) - self.stopwords = kwargs.get("stopwords", None) + self.lower_case_terms = lower_case_terms + self.pattern = pattern + self.flags = flags + self.stopwords = stopwords def _to_generated(self): - if not self.flags: + if self.flags is None: flags = None else: flags = DELIMITER.join(self.flags) @@ -701,9 +635,9 @@ def _to_generated(self): @classmethod def _from_generated(cls, pattern_analyzer): - if not pattern_analyzer: + if pattern_analyzer is None: return None - if not pattern_analyzer.flags: + if pattern_analyzer.flags is None: flags = None else: flags = pattern_analyzer.flags.split(DELIMITER) @@ -738,25 +672,18 @@ class PatternTokenizer(LexicalTokenizer): :vartype group: int """ - _validation = {"odata_type": {"required": True}, "name": {"required": True}} - - _attribute_map = { - "odata_type": {"key": "@odata\\.type", "type": "str"}, - "name": {"key": "name", "type": "str"}, - "pattern": {"key": "pattern", "type": "str"}, - "flags": {"key": "flags", "type": "[str]"}, - "group": {"key": "group", "type": "int"}, - } - def __init__(self, **kwargs): + pattern = kwargs.pop("pattern", r"\W+") + flags = kwargs.pop("flags", None) + group = kwargs.pop("group", -1) super(PatternTokenizer, self).__init__(**kwargs) self.odata_type = "#Microsoft.Azure.Search.PatternTokenizer" - self.pattern = kwargs.get("pattern", r"\W+") - self.flags = kwargs.get("flags", None) - self.group = kwargs.get("group", -1) + self.pattern = pattern + self.flags = flags + self.group = group def _to_generated(self): - if not self.flags: + if self.flags is None: flags = None else: flags = DELIMITER.join(self.flags) @@ -769,9 +696,9 @@ def _to_generated(self): @classmethod def _from_generated(cls, pattern_tokenizer): - if not pattern_tokenizer: + if pattern_tokenizer is None: return None - if not pattern_tokenizer.flags: + if pattern_tokenizer.flags is None: flags = None else: flags = pattern_tokenizer.flags.split(DELIMITER) @@ -783,7 +710,7 @@ def _from_generated(cls, pattern_tokenizer): ) -class SearchResourceEncryptionKey(_serialization.Model): +class SearchResourceEncryptionKey(Model): """A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps. @@ -823,7 +750,7 @@ def __init__( identity: Optional[SearchIndexerDataIdentity] = None, **kwargs ): - super().__init__(**kwargs) + super().__init__() self.key_name = key_name self.key_version = key_version self.vault_uri = vault_uri @@ -849,7 +776,7 @@ def _to_generated(self): @classmethod def _from_generated(cls, search_resource_encryption_key) -> Optional[Self]: - if not search_resource_encryption_key: + if search_resource_encryption_key is None: return None if search_resource_encryption_key.access_credentials: application_id = search_resource_encryption_key.access_credentials.application_id @@ -866,70 +793,58 @@ def _from_generated(cls, search_resource_encryption_key) -> Optional[Self]: identity=search_resource_encryption_key.identity, ) - def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> MutableMapping[str, Any]: + def serialize(self, **kwargs: Any) -> str: + # pylint: disable=unused-argument """Return the JSON that would be sent to server from this model. - :param bool keep_readonly: If you want to serialize the readonly attributes - :returns: A dict JSON compatible object - :rtype: dict + :returns: A dict JSON compatible string + :rtype: str """ - return self._to_generated().serialize(keep_readonly=keep_readonly, **kwargs) # type: ignore + return json.dumps(self._to_generated().as_dict()) # type: ignore @classmethod - def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Optional[Self]: # type: ignore + def deserialize(cls, data: Any, **kwargs) -> Optional[Self]: # type: ignore + # pylint: disable=unused-argument """Parse a str using the RestAPI syntax and return a SearchResourceEncryptionKey instance. - :param str data: A str using RestAPI structure. JSON by default. - :param str content_type: JSON by default, set application/xml if XML. + :param str data: A JSON str using RestAPI structure. :returns: A SearchResourceEncryptionKey instance :raises: DeserializationError if something went wrong """ - return cls._from_generated( # type: ignore - _SearchResourceEncryptionKey.deserialize(data, content_type=content_type) - ) - - def as_dict( - self, - keep_readonly: bool = True, - key_transformer: Callable[[str, Dict[str, Any], Any], Any] = _serialization.attribute_transformer, - **kwargs: Any - ) -> MutableMapping[str, Any]: + try: + obj_dict = json.loads(data) + obj = _SearchResourceEncryptionKey(obj_dict) + return cls._from_generated(obj) + except json.JSONDecodeError as err: + raise DeserializationError("Failed to deserialize data.") from err + + def as_dict(self, **kwargs: Any) -> Dict[str, Any]: """Return a dict that can be serialized using json.dump. - :param bool keep_readonly: If you want to serialize the readonly attributes - :param Callable key_transformer: A callable that will transform the key of the dict :returns: A dict JSON compatible object :rtype: dict """ - return self._to_generated().as_dict( # type: ignore - keep_readonly=keep_readonly, key_transformer=key_transformer, **kwargs - ) + return self._to_generated().as_dict(**kwargs) # type: ignore @classmethod def from_dict( # type: ignore cls, data: Any, - key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, - content_type: Optional[str] = None, ) -> Optional[Self]: - """Parse a dict using given key extractor return a model. - - By default consider key - extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor - and last_rest_key_case_insensitive_extractor) + """Parse a dict return a model. :param dict data: A dict using RestAPI structure - :param Callable key_extractors: A callable that will extract a key from a dict - :param str content_type: JSON by default, set application/xml if XML. :returns: A SearchResourceEncryptionKey instance :rtype: SearchResourceEncryptionKey :raises: DeserializationError if something went wrong """ - return cls._from_generated( - _SearchResourceEncryptionKey.from_dict(data, content_type=content_type, key_extractors=key_extractors) - ) + try: + obj = _SearchResourceEncryptionKey(data) + return cls._from_generated(obj) + except json.JSONDecodeError as err: + raise DeserializationError("Failed to deserialize data.") from err -class SynonymMap(_serialization.Model): +class SynonymMap(Model): """Represents a synonym map definition. Variables are only populated by the server, and will be ignored when sending a request. @@ -968,25 +883,25 @@ def __init__( e_tag: Optional[str] = None, **kwargs ): - super().__init__(**kwargs) + super().__init__() self.name = name self.synonyms = synonyms self.encryption_key = encryption_key self.e_tag = e_tag def _to_generated(self): + # pylint:disable=protected-access + encryption_key = None if self.encryption_key is None else self.encryption_key._to_generated() return _SynonymMap( name=self.name, synonyms="\n".join(self.synonyms), - encryption_key=( - self.encryption_key._to_generated() if self.encryption_key else None # pylint:disable=protected-access - ), + encryption_key=encryption_key, e_tag=self.e_tag, ) @classmethod def _from_generated(cls, synonym_map) -> Optional[Self]: - if not synonym_map: + if synonym_map is None: return None return cls( name=synonym_map.name, @@ -996,69 +911,57 @@ def _from_generated(cls, synonym_map) -> Optional[Self]: e_tag=synonym_map.e_tag, ) - def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> MutableMapping[str, Any]: + def serialize(self, **kwargs: Any) -> str: + # pylint: disable=unused-argument """Return the JSON that would be sent to server from this model. - :param bool keep_readonly: If you want to serialize the readonly attributes - :returns: A dict JSON compatible object - :rtype: dict + :returns: A dict JSON compatible string + :rtype: str """ - return self._to_generated().serialize(keep_readonly=keep_readonly, **kwargs) # type: ignore + return json.dumps(self._to_generated().as_dict()) # type: ignore @classmethod - def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Optional[Self]: # type: ignore + def deserialize(cls, data: Any, **kwargs) -> Optional[Self]: # type: ignore + # pylint: disable=unused-argument """Parse a str using the RestAPI syntax and return a SynonymMap instance. - :param str data: A str using RestAPI structure. JSON by default. - :param str content_type: JSON by default, set application/xml if XML. + :param str data: A JSON str using RestAPI structure. :returns: A SynonymMap instance :rtype: SynonymMap :raises: DeserializationError if something went wrong """ - return cls._from_generated(_SynonymMap.deserialize(data, content_type=content_type)) - - def as_dict( - self, - keep_readonly: bool = True, - key_transformer: Callable[[str, Dict[str, Any], Any], Any] = _serialization.attribute_transformer, - **kwargs: Any - ) -> MutableMapping[str, Any]: + try: + obj_dict = json.loads(data) + obj = _SynonymMap(obj_dict) + return cls._from_generated(obj) + except json.JSONDecodeError as err: + raise DeserializationError("Failed to deserialize data.") from err + + def as_dict(self, **kwargs: Any) -> Dict[str, Any]: """Return a dict that can be serialized using json.dump. - :param bool keep_readonly: If you want to serialize the readonly attributes - :param Callable key_transformer: A callable that will transform the key of the dict :returns: A dict JSON compatible object :rtype: dict """ - return self._to_generated().as_dict( # type: ignore - keep_readonly=keep_readonly, key_transformer=key_transformer, **kwargs - ) + return self._to_generated().as_dict(**kwargs) # type: ignore @classmethod - def from_dict( # type: ignore - cls, - data: Any, - key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, - content_type: Optional[str] = None, - ) -> Optional[Self]: - """Parse a dict using given key extractor return a model. - - By default consider key - extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor - and last_rest_key_case_insensitive_extractor) + def from_dict(cls, data: Any, **kwargs) -> Optional[Self]: # type: ignore + # pylint: disable=unused-argument + """Parse a dict return a model. :param dict data: A dict using RestAPI structure - :param Callable key_extractors: A callable that will extract a key from a dict - :param str content_type: JSON by default, set application/xml if XML. :returns: A SynonymMap instance :rtype: SynonymMap :raises: DeserializationError if something went wrong """ - return cls._from_generated( - _SynonymMap.from_dict(data, content_type=content_type, key_extractors=key_extractors) - ) + try: + obj = _SynonymMap(data) + return cls._from_generated(obj) + except json.JSONDecodeError as err: + raise DeserializationError("Failed to deserialize data.") from err -class SearchIndexerDataSourceConnection(_serialization.Model): +class SearchIndexerDataSourceConnection(Model): """Represents a datasource connection definition, which can be used to configure an indexer. All required parameters must be populated in order to send to Azure. @@ -1113,7 +1016,7 @@ def __init__( encryption_key: Optional[SearchResourceEncryptionKey] = None, **kwargs ): - super().__init__(**kwargs) + super().__init__() self.name = name self.description = description self.type = type @@ -1131,6 +1034,8 @@ def _to_generated(self): else: connection_string = self.connection_string credentials = DataSourceCredentials(connection_string=connection_string) + # pylint:disable=protected-access + encryption_key = None if self.encryption_key is None else self.encryption_key._to_generated() return _SearchIndexerDataSource( name=self.name, description=self.description, @@ -1140,15 +1045,13 @@ def _to_generated(self): data_change_detection_policy=self.data_change_detection_policy, data_deletion_detection_policy=self.data_deletion_detection_policy, e_tag=self.e_tag, - encryption_key=( - self.encryption_key._to_generated() if self.encryption_key else None # pylint: disable=protected-access - ), + encryption_key=encryption_key, identity=self.identity, ) @classmethod def _from_generated(cls, search_indexer_data_source) -> Optional[Self]: - if not search_indexer_data_source: + if search_indexer_data_source is None: return None connection_string = ( search_indexer_data_source.credentials.connection_string if search_indexer_data_source.credentials else None @@ -1172,70 +1075,59 @@ def _from_generated(cls, search_indexer_data_source) -> Optional[Self]: identity=search_indexer_data_source.identity, ) - def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> MutableMapping[str, Any]: + def serialize(self, **kwargs: Any) -> str: + # pylint: disable=unused-argument """Return the JSON that would be sent to server from this model. - :param bool keep_readonly: If you want to serialize the readonly attributes - :returns: A dict JSON compatible object - :rtype: dict + :returns: A dict JSON compatible string + :rtype: str """ - return self._to_generated().serialize(keep_readonly=keep_readonly, **kwargs) # type: ignore + return json.dumps(self._to_generated().as_dict()) # type: ignore @classmethod - def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Optional[Self]: # type: ignore + def deserialize(cls, data: Any, **kwargs) -> Optional[Self]: # type: ignore + # pylint: disable=unused-argument """Parse a str using the RestAPI syntax and return a SearchIndexerDataSourceConnection instance. - :param str data: A str using RestAPI structure. JSON by default. - :param str content_type: JSON by default, set application/xml if XML. + :param str data: A JSON str using RestAPI structure. :returns: A SearchIndexerDataSourceConnection instance :rtype: SearchIndexerDataSourceConnection :raises: DeserializationError if something went wrong """ - return cls._from_generated(_SearchIndexerDataSource.deserialize(data, content_type=content_type)) - - def as_dict( - self, - keep_readonly: bool = True, - key_transformer: Callable[[str, Dict[str, Any], Any], Any] = _serialization.attribute_transformer, - **kwargs: Any - ) -> MutableMapping[str, Any]: + try: + obj_dict = json.loads(data) + obj = _SearchIndexerDataSource(obj_dict) + return cls._from_generated(obj) + except json.JSONDecodeError as err: + raise DeserializationError("Failed to deserialize data.") from err + + def as_dict(self, **kwargs: Any) -> Dict[str, Any]: """Return a dict that can be serialized using json.dump. - :param bool keep_readonly: If you want to serialize the readonly attributes - :param Callable key_transformer: A callable that will transform the key of the dict :returns: A dict JSON compatible object :rtype: dict """ - return self._to_generated().as_dict( # type: ignore - keep_readonly=keep_readonly, key_transformer=key_transformer, **kwargs - ) + return self._to_generated().as_dict(**kwargs) # type: ignore @classmethod - def from_dict( # type: ignore - cls, - data: Any, - key_extractors: Optional[Callable[[str, Dict[str, Any], Any], Any]] = None, - content_type: Optional[str] = None, - ) -> Optional[Self]: - """Parse a dict using given key extractor return a model. - - By default consider key - extractors (rest_key_case_insensitive_extractor, attribute_key_case_insensitive_extractor - and last_rest_key_case_insensitive_extractor) + def from_dict(cls, data: Any, **kwargs) -> Optional[Self]: # type: ignore + # pylint: disable=unused-argument + """Parse a dict return a model. :param dict data: A dict using RestAPI structure - :param Callable key_extractors: A callable that will extract a key from a dict - :param str content_type: JSON by default, set application/xml if XML. :returns: A SearchIndexerDataSourceConnection instance :rtype: SearchIndexerDataSourceConnection :raises: DeserializationError if something went wrong """ - return cls._from_generated( - _SearchIndexerDataSource.from_dict(data, content_type=content_type, key_extractors=key_extractors) - ) + try: + obj_dict = json.loads(data) + obj = _SearchIndexerDataSource(obj_dict) + return cls._from_generated(obj) + except json.JSONDecodeError as err: + raise DeserializationError("Failed to deserialize data.") from err def pack_analyzer(analyzer): - if not analyzer: + if analyzer is None: return None if isinstance(analyzer, (PatternAnalyzer, CustomAnalyzer)): return analyzer._to_generated() # pylint:disable=protected-access @@ -1243,7 +1135,7 @@ def pack_analyzer(analyzer): def unpack_analyzer(analyzer): - if not analyzer: + if analyzer is None: return None if isinstance(analyzer, _PatternAnalyzer): return PatternAnalyzer._from_generated(analyzer) # pylint:disable=protected-access @@ -1252,7 +1144,7 @@ def unpack_analyzer(analyzer): return analyzer -class SearchIndexer(_serialization.Model): # pylint: disable=too-many-instance-attributes +class SearchIndexer(Model): # pylint: disable=too-many-instance-attributes """Represents an indexer. All required parameters must be populated in order to send to server. @@ -1355,7 +1247,7 @@ def __init__( steps without having to rebuild the index every time. :paramtype cache: ~azure.search.documents.indexes.models.SearchIndexerCache """ - super().__init__(**kwargs) + super().__init__() self.name = name self.description = description self.data_source_name = data_source_name @@ -1371,6 +1263,8 @@ def __init__( self.cache = cache def _to_generated(self): + # pylint:disable=protected-access + encryption_key = None if self.encryption_key is None else self.encryption_key._to_generated() return _SearchIndexer( name=self.name, description=self.description, @@ -1383,15 +1277,13 @@ def _to_generated(self): output_field_mappings=self.output_field_mappings, is_disabled=self.is_disabled, e_tag=self.e_tag, - encryption_key=( - self.encryption_key._to_generated() if self.encryption_key else None # pylint:disable=protected-access - ), + encryption_key=encryption_key, cache=self.cache, ) @classmethod def _from_generated(cls, search_indexer) -> Optional[Self]: - if not search_indexer: + if search_indexer is None: return None return cls( name=search_indexer.name, @@ -1410,22 +1302,27 @@ def _from_generated(cls, search_indexer) -> Optional[Self]: cache=search_indexer.cache, ) - def serialize(self, keep_readonly: bool = False, **kwargs: Any) -> MutableMapping[str, Any]: + def serialize(self, **kwargs: Any) -> str: + # pylint: disable=unused-argument """Return the JSON that would be sent to server from this model. - :param bool keep_readonly: If you want to serialize the readonly attributes - :returns: A dict JSON compatible object - :rtype: dict + :returns: A dict JSON compatible string + :rtype: str """ - return self._to_generated().serialize(keep_readonly=keep_readonly, **kwargs) # type: ignore + return json.dumps(self._to_generated().as_dict()) # type: ignore @classmethod - def deserialize(cls, data: Any, content_type: Optional[str] = None) -> Optional[Self]: # type: ignore + def deserialize(cls, data: Any, **kwargs) -> Optional[Self]: # type: ignore + # pylint: disable=unused-argument """Parse a str using the RestAPI syntax and return a SearchIndexer instance. - :param str data: A str using RestAPI structure. JSON by default. - :param str content_type: JSON by default, set application/xml if XML. + :param str data: A JSON str using RestAPI structure. :returns: A SearchIndexer instance :rtype: SearchIndexer :raises: DeserializationError if something went wrong """ - return cls._from_generated(_SearchIndexer.deserialize(data, content_type=content_type)) + try: + obj_dict = json.loads(data) + obj = _SearchIndexer(obj_dict) + return cls._from_generated(obj) + except json.JSONDecodeError as err: + raise DeserializationError("Failed to deserialize data.") from err diff --git a/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py index b45daf448bd1..2bb19fef6d4b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py @@ -25,6 +25,7 @@ # -------------------------------------------------------------------------- from .._generated.models import ( + AutocompleteItem, AutocompleteMode, HybridCountAndFacetMode, HybridSearch, @@ -45,6 +46,7 @@ SemanticErrorMode, SemanticErrorReason, SemanticSearchResultsType, + SuggestResult, VectorFilterMode, VectorSimilarityThreshold, VectorThreshold, @@ -58,6 +60,7 @@ __all__ = ( + "AutocompleteItem", "AutocompleteMode", "HybridCountAndFacetMode", "HybridSearch", @@ -78,6 +81,7 @@ "SemanticErrorMode", "SemanticErrorReason", "SemanticSearchResultsType", + "SuggestResult", "VectorFilterMode", "VectorSimilarityThreshold", "VectorThreshold", From 4f17e3b207077f9398903695475bdc5508a80c9f Mon Sep 17 00:00:00 2001 From: Xiang Yan Date: Fri, 8 Nov 2024 14:38:25 -0800 Subject: [PATCH 02/12] update --- .../azure/search/documents/_generated/models/_models.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py index 5dd05a7fcdde..86a408eb62a3 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py @@ -776,8 +776,7 @@ class AzureMachineLearningParameters(_model_base.Model): :ivar authentication_key: (Required for key authentication) The key for the AML service. :vartype authentication_key: str :ivar resource_id: (Required for token authentication). The Azure Resource Manager resource ID - of - the AML service. It should be in the format + of the AML service. It should be in the format subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long :vartype resource_id: str :ivar timeout: (Optional) When specified, indicates the timeout for the http client making the @@ -801,8 +800,8 @@ class AzureMachineLearningParameters(_model_base.Model): authentication_key: Optional[str] = rest_field(name="key") """(Required for key authentication) The key for the AML service.""" resource_id: Optional[str] = rest_field(name="resourceId") - """(Required for token authentication). The Azure Resource Manager resource ID of - the AML service. It should be in the format + """(Required for token authentication). The Azure Resource Manager resource ID of the AML service. + It should be in the format subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long""" timeout: Optional[datetime.timedelta] = rest_field() """(Optional) When specified, indicates the timeout for the http client making the From 2d466ea137ae933349f783806fab1b464c8e537b Mon Sep 17 00:00:00 2001 From: Xiang Yan Date: Fri, 8 Nov 2024 15:23:47 -0800 Subject: [PATCH 03/12] update --- .../azure-search-documents/azure/search/documents/_paging.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_paging.py b/sdk/search/azure-search-documents/azure/search/documents/_paging.py index 4748451613bc..813c353027ca 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_paging.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_paging.py @@ -146,7 +146,7 @@ def get_facets(self) -> Optional[Dict]: facets = response.facets if facets is not None and self._facets is None: assert facets.items() is not None # Hint for mypy - self._facets = {k: [x.as_dict() for x in v] for k, v in facets.items()} + self._facets = {k: [x.as_dict() for x in v] for k, v in facets.items()} # type: ignore return self._facets @_ensure_response From 658812f2f45a92235ac940b7e96ae1032901d4a0 Mon Sep 17 00:00:00 2001 From: Xiang Yan Date: Fri, 8 Nov 2024 15:50:57 -0800 Subject: [PATCH 04/12] updates --- .../documents/_generated/models/_models.py | 4 +- .../async_tests/test_buffered_sender_async.py | 6 +-- .../async_tests/test_search_client_async.py | 4 +- .../tests/search_service_preparer.py | 2 +- .../tests/test_buffered_sender.py | 6 +-- .../tests/test_index_documents_batch.py | 3 -- .../tests/test_models.py | 2 +- .../tests/test_queries.py | 3 -- .../tests/test_regex_flags.py | 2 +- .../tests/test_search_client.py | 38 ++++++++----------- .../tests/test_search_client_search_live.py | 13 ++++--- .../tests/test_search_index_client.py | 4 +- .../tests/test_search_index_client_live.py | 11 +++--- .../tests/test_serialization.py | 10 ++--- 14 files changed, 48 insertions(+), 60 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py index 86a408eb62a3..460c5f4c01df 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py @@ -777,7 +777,7 @@ class AzureMachineLearningParameters(_model_base.Model): :vartype authentication_key: str :ivar resource_id: (Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format - subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long + subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long :vartype resource_id: str :ivar timeout: (Optional) When specified, indicates the timeout for the http client making the API call. @@ -951,7 +951,7 @@ class AzureMachineLearningSkill(SearchIndexerSkill, discriminator="#Microsoft.Sk :ivar resource_id: (Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format - subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long + subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long :vartype resource_id: str :ivar timeout: (Optional) When specified, indicates the timeout for the http client making the API call. diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_buffered_sender_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_buffered_sender_async.py index 3c166e74983c..8c1f8af946d5 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_buffered_sender_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_buffered_sender_async.py @@ -92,7 +92,7 @@ async def test_callback_error(self): async def mock_fail_index_documents(actions, timeout=86400): if len(actions) > 0: result = IndexingResult() - result.key = actions[0].additional_properties.get("id") + result.key = actions[0].get("id") result.status_code = 400 result.succeeded = False self.uploaded = self.uploaded + len(actions) - 1 @@ -113,7 +113,7 @@ async def test_callback_error_on_timeout(self): async def mock_fail_index_documents(actions, timeout=86400): if len(actions) > 0: result = IndexingResult() - result.key = actions[0].additional_properties.get("id") + result.key = actions[0].get("id") result.status_code = 400 result.succeeded = False self.uploaded = self.uploaded + len(actions) - 1 @@ -136,7 +136,7 @@ async def test_callback_progress(self): async def mock_successful_index_documents(actions, timeout=86400): if len(actions) > 0: result = IndexingResult() - result.key = actions[0].additional_properties.get("id") + result.key = actions[0].get("id") result.status_code = 200 result.succeeded = True return [result] diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_search_client_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_search_client_async.py index c41691889a96..9d28d911dacf 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_search_client_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_search_client_async.py @@ -15,14 +15,14 @@ class TestSearchClientAsync: @await_prepared_test @mock.patch( - "azure.search.documents._generated.aio.operations._documents_operations.DocumentsOperations.search_post" + "azure.search.documents._generated.aio.operations._operations.DocumentsOperationsOperations.search_post" ) async def test_get_count_reset_continuation_token(self, mock_search_post): client = SearchClient("endpoint", "index name", CREDENTIAL) result = await client.search(search_text="search text") assert result._page_iterator_class is AsyncSearchPageIterator search_result = SearchDocumentsResult() - search_result.results = [SearchResult(additional_properties={"key": "val"})] + search_result.results = [SearchResult({"key": "val"})] mock_search_post.return_value = search_result await result.__anext__() result._first_page_iterator_instance.continuation_token = "fake token" diff --git a/sdk/search/azure-search-documents/tests/search_service_preparer.py b/sdk/search/azure-search-documents/tests/search_service_preparer.py index d4de15e3b660..e594227486b3 100644 --- a/sdk/search/azure-search-documents/tests/search_service_preparer.py +++ b/sdk/search/azure-search-documents/tests/search_service_preparer.py @@ -98,7 +98,7 @@ def _set_up_index(service_name, endpoint, cred, schema, index_batch): # optionally load data into the index if index_batch and schema: - batch = IndexBatch.deserialize(index_batch) + batch = IndexBatch(index_batch) client = SearchClient(endpoint, index_name, cred) results = client.index_documents(batch) if not all(result.succeeded for result in results): diff --git a/sdk/search/azure-search-documents/tests/test_buffered_sender.py b/sdk/search/azure-search-documents/tests/test_buffered_sender.py index 5af0b3d5efbf..3bf30d932d0b 100644 --- a/sdk/search/azure-search-documents/tests/test_buffered_sender.py +++ b/sdk/search/azure-search-documents/tests/test_buffered_sender.py @@ -87,7 +87,7 @@ def test_callback_error(self): def mock_fail_index_documents(actions, timeout=86400): if len(actions) > 0: result = IndexingResult() - result.key = actions[0].additional_properties.get("id") + result.key = actions[0].get("id") result.status_code = 400 result.succeeded = False self.uploaded = self.uploaded + len(actions) - 1 @@ -109,7 +109,7 @@ def mock_fail_index_documents(actions, timeout=86400): if len(actions) > 0: result = IndexingResult() - result.key = actions[0].additional_properties.get("id") + result.key = actions[0].get("id") result.status_code = 400 result.succeeded = False self.uploaded = self.uploaded + len(actions) - 1 @@ -131,7 +131,7 @@ def test_callback_progress(self): def mock_successful_index_documents(actions, timeout=86400): if len(actions) > 0: result = IndexingResult() - result.key = actions[0].additional_properties.get("id") + result.key = actions[0].get("id") result.status_code = 200 result.succeeded = True return [result] diff --git a/sdk/search/azure-search-documents/tests/test_index_documents_batch.py b/sdk/search/azure-search-documents/tests/test_index_documents_batch.py index c9d8c488d1ce..d45e331613a1 100644 --- a/sdk/search/azure-search-documents/tests/test_index_documents_batch.py +++ b/sdk/search/azure-search-documents/tests/test_index_documents_batch.py @@ -64,6 +64,3 @@ def test_add_method(self, method_name): assert all(action.action_type == METHOD_MAP[method_name] for action in batch.actions) assert all(type(action) == IndexAction for action in batch.actions) - - expected = ["doc{}".format(i) for i in range(1, 8)] - assert [action.additional_properties for action in batch.actions] == expected diff --git a/sdk/search/azure-search-documents/tests/test_models.py b/sdk/search/azure-search-documents/tests/test_models.py index 299ee69091a6..feb237e740ac 100644 --- a/sdk/search/azure-search-documents/tests/test_models.py +++ b/sdk/search/azure-search-documents/tests/test_models.py @@ -14,7 +14,7 @@ def test_encryption_key_serialization(): - from azure.search.documents.indexes._generated.models import ( + from azure.search.documents._generated.models import ( SearchResourceEncryptionKey as SearchResourceEncryptionKeyGen, ) diff --git a/sdk/search/azure-search-documents/tests/test_queries.py b/sdk/search/azure-search-documents/tests/test_queries.py index dacac2c74ad1..f343cbe159cf 100644 --- a/sdk/search/azure-search-documents/tests/test_queries.py +++ b/sdk/search/azure-search-documents/tests/test_queries.py @@ -68,9 +68,6 @@ def test_repr(self): query = SearchQuery() assert repr(query) == "" - query = SearchQuery(search_text="foo bar", suggester_name="sg") - assert repr(query) == "" - query = SearchQuery(search_text="aaaaabbbbb" * 200) assert len(repr(query)) == 1024 diff --git a/sdk/search/azure-search-documents/tests/test_regex_flags.py b/sdk/search/azure-search-documents/tests/test_regex_flags.py index cee471b5d1df..23a378918da1 100644 --- a/sdk/search/azure-search-documents/tests/test_regex_flags.py +++ b/sdk/search/azure-search-documents/tests/test_regex_flags.py @@ -9,7 +9,7 @@ PatternAnalyzer, PatternTokenizer, ) -from azure.search.documents.indexes._generated.models import ( +from azure.search.documents._generated.models import ( PatternAnalyzer as _PatternAnalyzer, PatternTokenizer as _PatternTokenizer, ) diff --git a/sdk/search/azure-search-documents/tests/test_search_client.py b/sdk/search/azure-search-documents/tests/test_search_client.py index b8a5613cbf3b..827249522ed0 100644 --- a/sdk/search/azure-search-documents/tests/test_search_client.py +++ b/sdk/search/azure-search-documents/tests/test_search_client.py @@ -110,22 +110,20 @@ def test_repr(self): client = SearchClient("endpoint", "index name", CREDENTIAL) assert repr(client) == "".format(repr("endpoint"), repr("index name")) - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.count") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.count") def test_get_document_count(self, mock_count): client = SearchClient("endpoint", "index name", CREDENTIAL) client.get_document_count() assert mock_count.called assert mock_count.call_args[0] == () - assert len(mock_count.call_args[1]) == 1 assert mock_count.call_args[1]["headers"] == client._headers - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.get") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.get") def test_get_document(self, mock_get): client = SearchClient("endpoint", "index name", CREDENTIAL) client.get_document("some_key") assert mock_get.called assert mock_get.call_args[0] == () - assert len(mock_get.call_args[1]) == 3 assert mock_get.call_args[1]["headers"] == client._headers assert mock_get.call_args[1]["key"] == "some_key" assert mock_get.call_args[1]["selected_fields"] == None @@ -135,19 +133,18 @@ def test_get_document(self, mock_get): client.get_document("some_key", selected_fields="foo") assert mock_get.called assert mock_get.call_args[0] == () - assert len(mock_get.call_args[1]) == 3 assert mock_get.call_args[1]["headers"] == client._headers assert mock_get.call_args[1]["key"] == "some_key" assert mock_get.call_args[1]["selected_fields"] == "foo" - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.search_post") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.search_post") def test_search_query_argument(self, mock_search_post): client = SearchClient("endpoint", "index name", CREDENTIAL) result = client.search(search_text="search text") assert isinstance(result, ItemPaged) assert result._page_iterator_class is SearchPageIterator search_result = SearchDocumentsResult() - search_result.results = [SearchResult(additional_properties={"key": "val"})] + search_result.results = [SearchResult({"key": "val"})] mock_search_post.return_value = search_result assert not mock_search_post.called next(result) @@ -155,7 +152,7 @@ def test_search_query_argument(self, mock_search_post): assert mock_search_post.call_args[0] == () assert mock_search_post.call_args[1]["search_request"].search_text == "search text" - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.suggest_post") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.suggest_post") def test_suggest_query_argument(self, mock_suggest_post): client = SearchClient("endpoint", "index name", CREDENTIAL) result = client.suggest(search_text="search text", suggester_name="sg") @@ -170,14 +167,14 @@ def test_suggest_bad_argument(self): client.suggest("bad_query") assert str(e) == "Expected a SuggestQuery for 'query', but got {}".format(repr("bad_query")) - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.search_post") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.search_post") def test_get_count_reset_continuation_token(self, mock_search_post): client = SearchClient("endpoint", "index name", CREDENTIAL) result = client.search(search_text="search text") assert isinstance(result, ItemPaged) assert result._page_iterator_class is SearchPageIterator search_result = SearchDocumentsResult() - search_result.results = [SearchResult(additional_properties={"key": "val"})] + search_result.results = [SearchResult({"key": "val"})] mock_search_post.return_value = search_result result.__next__() result._first_page_iterator_instance.continuation_token = "fake token" @@ -185,7 +182,7 @@ def test_get_count_reset_continuation_token(self, mock_search_post): assert not result._first_page_iterator_instance.continuation_token @mock.patch( - "azure.search.documents._generated.operations._documents_operations.DocumentsOperations.autocomplete_post" + "azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.autocomplete_post" ) def test_autocomplete_query_argument(self, mock_autocomplete_post): client = SearchClient("endpoint", "index name", CREDENTIAL) @@ -195,22 +192,20 @@ def test_autocomplete_query_argument(self, mock_autocomplete_post): assert mock_autocomplete_post.call_args[1]["headers"] == client._headers assert mock_autocomplete_post.call_args[1]["autocomplete_request"].search_text == "search text" - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.count") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.count") def test_get_document_count_v2020_06_30(self, mock_count): client = SearchClient("endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30) client.get_document_count() assert mock_count.called assert mock_count.call_args[0] == () - assert len(mock_count.call_args[1]) == 1 assert mock_count.call_args[1]["headers"] == client._headers - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.get") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.get") def test_get_document_v2020_06_30(self, mock_get): client = SearchClient("endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30) client.get_document("some_key") assert mock_get.called assert mock_get.call_args[0] == () - assert len(mock_get.call_args[1]) == 3 assert mock_get.call_args[1]["headers"] == client._headers assert mock_get.call_args[1]["key"] == "some_key" assert mock_get.call_args[1]["selected_fields"] == None @@ -220,19 +215,18 @@ def test_get_document_v2020_06_30(self, mock_get): client.get_document("some_key", selected_fields="foo") assert mock_get.called assert mock_get.call_args[0] == () - assert len(mock_get.call_args[1]) == 3 assert mock_get.call_args[1]["headers"] == client._headers assert mock_get.call_args[1]["key"] == "some_key" assert mock_get.call_args[1]["selected_fields"] == "foo" - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.search_post") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.search_post") def test_search_query_argument_v2020_06_30(self, mock_search_post): client = SearchClient("endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30) result = client.search(search_text="search text") assert isinstance(result, ItemPaged) assert result._page_iterator_class is SearchPageIterator search_result = SearchDocumentsResult() - search_result.results = [SearchResult(additional_properties={"key": "val"})] + search_result.results = [SearchResult({"key": "val"})] mock_search_post.return_value = search_result assert not mock_search_post.called next(result) @@ -240,7 +234,7 @@ def test_search_query_argument_v2020_06_30(self, mock_search_post): assert mock_search_post.call_args[0] == () assert mock_search_post.call_args[1]["search_request"].search_text == "search text" - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.suggest_post") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.suggest_post") def test_suggest_query_argument_v2020_06_30(self, mock_suggest_post): client = SearchClient("endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30) result = client.suggest(search_text="search text", suggester_name="sg") @@ -250,7 +244,7 @@ def test_suggest_query_argument_v2020_06_30(self, mock_suggest_post): assert mock_suggest_post.call_args[1]["suggest_request"].search_text == "search text" @mock.patch( - "azure.search.documents._generated.operations._documents_operations.DocumentsOperations.autocomplete_post" + "azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.autocomplete_post" ) def test_autocomplete_query_argument_v2020_06_30(self, mock_autocomplete_post): client = SearchClient("endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30) @@ -280,11 +274,10 @@ def test_add_method(self, arg, method_name): batch = mock_index_documents.call_args[0][0] assert isinstance(batch, IndexDocumentsBatch) assert all(action.action_type == CRUD_METHOD_MAP[method_name] for action in batch.actions) - assert [action.additional_properties for action in batch.actions] == arg assert mock_index_documents.call_args[1]["headers"] == client._headers assert mock_index_documents.call_args[1]["extra"] == "foo" - @mock.patch("azure.search.documents._generated.operations._documents_operations.DocumentsOperations.index") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.index") def test_index_documents(self, mock_index): client = SearchClient("endpoint", "index name", CREDENTIAL) @@ -307,7 +300,6 @@ def test_index_documents(self, mock_index): client.index_documents(batch, extra="foo") assert mock_index.called assert mock_index.call_args[0] == () - assert len(mock_index.call_args[1]) == 4 assert mock_index.call_args[1]["headers"] == client._headers assert mock_index.call_args[1]["extra"] == "foo" diff --git a/sdk/search/azure-search-documents/tests/test_search_client_search_live.py b/sdk/search/azure-search-documents/tests/test_search_client_search_live.py index f06d5ef2d07e..58ea0441fdbc 100644 --- a/sdk/search/azure-search-documents/tests/test_search_client_search_live.py +++ b/sdk/search/azure-search-documents/tests/test_search_client_search_live.py @@ -120,11 +120,14 @@ def _test_get_search_facets_result(self, client): def _test_autocomplete(self, client): results = client.autocomplete(search_text="mot", suggester_name="sg") - assert results == [{"text": "motel", "query_plus_text": "motel"}] + assert len(results) == 1 + result = results[0] + assert result.text == "motel" + assert result.query_plus_text == "motel" def _test_suggest(self, client): results = client.suggest(search_text="mot", suggester_name="sg") - assert results == [ - {"hotelId": "2", "text": "Cheapest hotel in town. Infact, a motel."}, - {"hotelId": "9", "text": "Secret Point Motel"}, - ] + assert results[0].text == "Cheapest hotel in town. Infact, a motel." + assert results[0]["hotelId"] == "2" + assert results[1].text == "Secret Point Motel" + assert results[1]["hotelId"] == "9" diff --git a/sdk/search/azure-search-documents/tests/test_search_index_client.py b/sdk/search/azure-search-documents/tests/test_search_index_client.py index ab0c8d28b082..3cf927eeaec9 100644 --- a/sdk/search/azure-search-documents/tests/test_search_index_client.py +++ b/sdk/search/azure-search-documents/tests/test_search_index_client.py @@ -52,7 +52,7 @@ def test_get_search_client_inherit_api_version(self): assert search_client._api_version == ApiVersion.V2020_06_30 @mock.patch( - "azure.search.documents.indexes._generated.operations._search_service_client_operations.SearchServiceClientOperationsMixin.get_service_statistics" + "azure.search.documents._generated.operations._operations.SearchClientOperationsMixin.get_service_statistics" ) def test_get_service_statistics(self, mock_get_stats): client = SearchIndexClient("endpoint", CREDENTIAL) @@ -62,7 +62,7 @@ def test_get_service_statistics(self, mock_get_stats): assert mock_get_stats.call_args[1] == {"headers": client._headers} @mock.patch( - "azure.search.documents.indexes._generated.operations._search_service_client_operations.SearchServiceClientOperationsMixin.get_service_statistics" + "azure.search.documents._generated.operations._operations.SearchClientOperationsMixin.get_service_statistics" ) def test_get_service_statistics_v2020_06_30(self, mock_get_stats): client = SearchIndexClient("endpoint", CREDENTIAL, api_version=ApiVersion.V2020_06_30) diff --git a/sdk/search/azure-search-documents/tests/test_search_index_client_live.py b/sdk/search/azure-search-documents/tests/test_search_index_client_live.py index 149e4d46944c..1742d6698103 100644 --- a/sdk/search/azure-search-documents/tests/test_search_index_client_live.py +++ b/sdk/search/azure-search-documents/tests/test_search_index_client_live.py @@ -41,8 +41,8 @@ def test_search_index_client(self, endpoint, index_name): def _test_get_service_statistics(self, client): result = client.get_service_statistics() - assert isinstance(result, dict) - assert set(result.keys()) == {"counters", "limits"} + assert "counters" in result.keys() + assert "limits" in result.keys() def _test_list_indexes_empty(self, client): result = client.list_indexes() @@ -63,10 +63,9 @@ def _test_get_index(self, client, index_name): def _test_get_index_statistics(self, client, index_name): result = client.get_index_statistics(index_name) - keys = set(result.keys()) - assert "document_count" in keys - assert "storage_size" in keys - assert "vector_index_size" in keys + assert result.document_count is not None + assert result.storage_size is not None + assert result.vector_index_size is not None def _test_create_index(self, client, index_name): fields = [ diff --git a/sdk/search/azure-search-documents/tests/test_serialization.py b/sdk/search/azure-search-documents/tests/test_serialization.py index 09a5f04caf32..be9f34ddbd53 100644 --- a/sdk/search/azure-search-documents/tests/test_serialization.py +++ b/sdk/search/azure-search-documents/tests/test_serialization.py @@ -47,12 +47,12 @@ def test_serialize_search_index(): ) search_index_serialized = index.serialize() search_index = SearchIndex.deserialize(search_index_serialized) - assert search_index + assert search_index is not None def test_serialize_search_indexer_skillset(): - COGNITIVE_KEY = ... - COGNITIVE_DESCRIPTION = ... + COGNITIVE_KEY = "KEY" + COGNITIVE_DESCRIPTION = "DESCRIPTION" cognitive_services_account = CognitiveServicesAccountKey(key=COGNITIVE_KEY, description=COGNITIVE_DESCRIPTION) @@ -76,7 +76,7 @@ def test_serialize_search_indexer_skillset(): serialized_skillset = skillset.serialize() skillset = SearchIndexerSkillset.deserialize(serialized_skillset) - assert skillset + assert skillset is not None def test_serialize_search_index_dict(): @@ -105,4 +105,4 @@ def test_serialize_search_index_dict(): ) search_index_serialized_dict = index.as_dict() search_index = SearchIndex.from_dict(search_index_serialized_dict) - assert search_index + assert search_index is not None From 12ad0962178a5e6fcfb4c68f6197c2401de49d2f Mon Sep 17 00:00:00 2001 From: Xiang Yan Date: Fri, 8 Nov 2024 16:34:40 -0800 Subject: [PATCH 05/12] update --- .../azure/search/documents/aio/_paging.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py index c1a731d30419..1e3a6c446ba3 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py @@ -129,7 +129,7 @@ async def get_facets(self) -> Optional[Dict]: facets = response.facets if facets is not None and self._facets is None: assert facets.items() is not None # Hint for mypy - self._facets = {k: [x.as_dict() for x in v] for k, v in facets.items()} + self._facets = {k: [x.as_dict() for x in v] for k, v in facets.items()} # type: ignore return self._facets @_ensure_response From 5de99ebfb9454d042d89da4f1ecd7c932f6b8ce4 Mon Sep 17 00:00:00 2001 From: Xiang Yan Date: Fri, 8 Nov 2024 16:58:29 -0800 Subject: [PATCH 06/12] update --- .../azure/search/documents/_generated/models/_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py index 460c5f4c01df..3ec945ed91bf 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py @@ -7998,7 +7998,7 @@ class SearchIndexerDataUserAssignedIdentity( :ivar resource_id: The fully qualified Azure resource Id of a user assigned managed identity typically in the form - "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" # pylint: disable=line-too-long + "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" # pylint: disable=line-too-long that should have been assigned to the search service. Required. :vartype resource_id: str :ivar odata_type: A URI fragment specifying the type of identity. Required. Default value is From a911f360f0fc464dec2eb24ad1465d5a34ee4e35 Mon Sep 17 00:00:00 2001 From: Xiang Yan Date: Fri, 8 Nov 2024 17:49:19 -0800 Subject: [PATCH 07/12] update --- .../documents/_generated/models/_models.py | 39 ++++++++++--------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py index 3ec945ed91bf..80d9ffbc477b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py @@ -776,7 +776,8 @@ class AzureMachineLearningParameters(_model_base.Model): :ivar authentication_key: (Required for key authentication) The key for the AML service. :vartype authentication_key: str :ivar resource_id: (Required for token authentication). The Azure Resource Manager resource ID - of the AML service. It should be in the format + of + the AML service. It should be in the format subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long :vartype resource_id: str :ivar timeout: (Optional) When specified, indicates the timeout for the http client making the @@ -800,8 +801,8 @@ class AzureMachineLearningParameters(_model_base.Model): authentication_key: Optional[str] = rest_field(name="key") """(Required for key authentication) The key for the AML service.""" resource_id: Optional[str] = rest_field(name="resourceId") - """(Required for token authentication). The Azure Resource Manager resource ID of the AML service. - It should be in the format + """(Required for token authentication). The Azure Resource Manager resource ID of + the AML service. It should be in the format subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long""" timeout: Optional[datetime.timedelta] = rest_field() """(Optional) When specified, indicates the timeout for the http client making the @@ -11190,8 +11191,8 @@ class VectorQuery(_model_base.Model): VectorizableImageBinaryQuery, VectorizableImageUrlQuery, VectorizableTextQuery, VectorizedQuery - :ivar k: Number of nearest neighbors to return as top hits. - :vartype k: int + :ivar k_nearest_neighbors: Number of nearest neighbors to return as top hits. + :vartype k_nearest_neighbors: int :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector searched. :vartype fields: str @@ -11228,7 +11229,7 @@ class VectorQuery(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - k: Optional[int] = rest_field() + k_nearest_neighbors: Optional[int] = rest_field(name="k") """Number of nearest neighbors to return as top hits.""" fields: Optional[str] = rest_field() """Vector Fields of type Collection(Edm.Single) to be included in the vector @@ -11266,7 +11267,7 @@ def __init__( self, *, kind: str, - k: Optional[int] = None, + k_nearest_neighbors: Optional[int] = None, fields: Optional[str] = None, exhaustive: Optional[bool] = None, oversampling: Optional[float] = None, @@ -11291,8 +11292,8 @@ class VectorizableImageBinaryQuery(VectorQuery, discriminator="imageBinary"): an image that needs to be vectorized is provided. - :ivar k: Number of nearest neighbors to return as top hits. - :vartype k: int + :ivar k_nearest_neighbors: Number of nearest neighbors to return as top hits. + :vartype k_nearest_neighbors: int :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector searched. :vartype fields: str @@ -11344,7 +11345,7 @@ class VectorizableImageBinaryQuery(VectorQuery, discriminator="imageBinary"): def __init__( self, *, - k: Optional[int] = None, + k_nearest_neighbors: Optional[int] = None, fields: Optional[str] = None, exhaustive: Optional[bool] = None, oversampling: Optional[float] = None, @@ -11370,8 +11371,8 @@ class VectorizableImageUrlQuery(VectorQuery, discriminator="imageUrl"): image value that needs to be vectorized is provided. - :ivar k: Number of nearest neighbors to return as top hits. - :vartype k: int + :ivar k_nearest_neighbors: Number of nearest neighbors to return as top hits. + :vartype k_nearest_neighbors: int :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector searched. :vartype fields: str @@ -11421,7 +11422,7 @@ class VectorizableImageUrlQuery(VectorQuery, discriminator="imageUrl"): def __init__( self, *, - k: Optional[int] = None, + k_nearest_neighbors: Optional[int] = None, fields: Optional[str] = None, exhaustive: Optional[bool] = None, oversampling: Optional[float] = None, @@ -11447,8 +11448,8 @@ class VectorizableTextQuery(VectorQuery, discriminator="text"): be vectorized is provided. - :ivar k: Number of nearest neighbors to return as top hits. - :vartype k: int + :ivar k_nearest_neighbors: Number of nearest neighbors to return as top hits. + :vartype k_nearest_neighbors: int :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector searched. :vartype fields: str @@ -11504,7 +11505,7 @@ def __init__( self, *, text: str, - k: Optional[int] = None, + k_nearest_neighbors: Optional[int] = None, fields: Optional[str] = None, exhaustive: Optional[bool] = None, oversampling: Optional[float] = None, @@ -11530,8 +11531,8 @@ class VectorizedQuery(VectorQuery, discriminator="vector"): provided. - :ivar k: Number of nearest neighbors to return as top hits. - :vartype k: int + :ivar k_nearest_neighbors: Number of nearest neighbors to return as top hits. + :vartype k_nearest_neighbors: int :ivar fields: Vector Fields of type Collection(Edm.Single) to be included in the vector searched. :vartype fields: str @@ -11580,7 +11581,7 @@ def __init__( self, *, vector: List[float], - k: Optional[int] = None, + k_nearest_neighbors: Optional[int] = None, fields: Optional[str] = None, exhaustive: Optional[bool] = None, oversampling: Optional[float] = None, From 9008f6456baa0077484fd99c4fa97f39052cd9ca Mon Sep 17 00:00:00 2001 From: Xiang Yan Date: Fri, 7 Mar 2025 11:43:19 -0800 Subject: [PATCH 08/12] Update code --- .../search/documents/_generated/_client.py | 78 +- .../documents/_generated/_configuration.py | 8 +- .../documents/_generated/_model_base.py | 78 +- .../documents/_generated/_serialization.py | 2 +- .../documents/_generated/aio/_client.py | 81 +- .../_generated/aio/_configuration.py | 8 +- .../_generated/aio/operations/__init__.py | 28 +- .../_generated/aio/operations/_operations.py | 634 ++-- .../documents/_generated/models/__init__.py | 18 +- .../documents/_generated/models/_enums.py | 34 +- .../documents/_generated/models/_models.py | 3131 +++++++++++------ .../_generated/operations/__init__.py | 28 +- .../_generated/operations/_operations.py | 837 +++-- .../azure/search/documents/_paging.py | 4 +- .../azure/search/documents/_search_client.py | 10 +- .../_search_indexing_buffered_sender.py | 2 +- .../azure/search/documents/aio/_paging.py | 4 +- .../documents/aio/_search_client_async.py | 10 +- .../_search_indexing_buffered_sender_async.py | 2 +- .../documents/indexes/_search_index_client.py | 40 +- .../indexes/_search_indexer_client.py | 24 +- .../indexes/aio/_search_index_client.py | 40 +- .../indexes/aio/_search_indexer_client.py | 24 +- .../documents/indexes/models/__init__.py | 6 +- .../async_tests/test_search_client_async.py | 2 +- .../tests/test_search_client.py | 24 +- 26 files changed, 3189 insertions(+), 1968 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_client.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_client.py index 2cb95a31223c..3ee3a0662fca 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_client.py @@ -18,14 +18,14 @@ from ._configuration import SearchClientConfiguration from ._serialization import Deserializer, Serializer from .operations import ( - AliasesOperationsOperations, - DataSourcesOperationsOperations, - DocumentsOperationsOperations, - IndexersOperationsOperations, - IndexesOperationsOperations, + AliasesOperations, + DataSourcesOperations, + DocumentsOperations, + IndexersOperations, + IndexesOperations, SearchClientOperationsMixin, - SkillsetsOperationsOperations, - SynonymMapsOperationsOperations, + SkillsetsOperations, + SynonymMapsOperations, ) if TYPE_CHECKING: @@ -36,30 +36,28 @@ class SearchClient(SearchClientOperationsMixin): # pylint: disable=too-many-ins """Client that can be used to manage and query indexes and documents, as well as manage other resources, on a search service. - :ivar data_sources_operations: DataSourcesOperationsOperations operations - :vartype data_sources_operations: - azure.search.documents.operations.DataSourcesOperationsOperations - :ivar indexers_operations: IndexersOperationsOperations operations - :vartype indexers_operations: azure.search.documents.operations.IndexersOperationsOperations - :ivar skillsets_operations: SkillsetsOperationsOperations operations - :vartype skillsets_operations: azure.search.documents.operations.SkillsetsOperationsOperations - :ivar synonym_maps_operations: SynonymMapsOperationsOperations operations - :vartype synonym_maps_operations: - azure.search.documents.operations.SynonymMapsOperationsOperations - :ivar indexes_operations: IndexesOperationsOperations operations - :vartype indexes_operations: azure.search.documents.operations.IndexesOperationsOperations - :ivar aliases_operations: AliasesOperationsOperations operations - :vartype aliases_operations: azure.search.documents.operations.AliasesOperationsOperations - :ivar documents_operations: DocumentsOperationsOperations operations - :vartype documents_operations: azure.search.documents.operations.DocumentsOperationsOperations + :ivar data_sources: DataSourcesOperations operations + :vartype data_sources: azure.search.documents.operations.DataSourcesOperations + :ivar indexers: IndexersOperations operations + :vartype indexers: azure.search.documents.operations.IndexersOperations + :ivar skillsets: SkillsetsOperations operations + :vartype skillsets: azure.search.documents.operations.SkillsetsOperations + :ivar synonym_maps: SynonymMapsOperations operations + :vartype synonym_maps: azure.search.documents.operations.SynonymMapsOperations + :ivar indexes: IndexesOperations operations + :vartype indexes: azure.search.documents.operations.IndexesOperations + :ivar aliases: AliasesOperations operations + :vartype aliases: azure.search.documents.operations.AliasesOperations + :ivar documents: DocumentsOperations operations + :vartype documents: azure.search.documents.operations.DocumentsOperations :param endpoint: Service host. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Is either a - AzureKeyCredential type or a TokenCredential type. Required. + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-11-01-preview". Note that overriding this default value may result in unsupported + "2025-03-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ @@ -89,27 +87,13 @@ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCr self._serialize = Serializer() self._deserialize = Deserializer() self._serialize.client_side_validation = False - self.data_sources_operations = DataSourcesOperationsOperations( - self._client, self._config, self._serialize, self._deserialize - ) - self.indexers_operations = IndexersOperationsOperations( - self._client, self._config, self._serialize, self._deserialize - ) - self.skillsets_operations = SkillsetsOperationsOperations( - self._client, self._config, self._serialize, self._deserialize - ) - self.synonym_maps_operations = SynonymMapsOperationsOperations( - self._client, self._config, self._serialize, self._deserialize - ) - self.indexes_operations = IndexesOperationsOperations( - self._client, self._config, self._serialize, self._deserialize - ) - self.aliases_operations = AliasesOperationsOperations( - self._client, self._config, self._serialize, self._deserialize - ) - self.documents_operations = DocumentsOperationsOperations( - self._client, self._config, self._serialize, self._deserialize - ) + self.data_sources = DataSourcesOperations(self._client, self._config, self._serialize, self._deserialize) + self.indexers = IndexersOperations(self._client, self._config, self._serialize, self._deserialize) + self.skillsets = SkillsetsOperations(self._client, self._config, self._serialize, self._deserialize) + self.synonym_maps = SynonymMapsOperations(self._client, self._config, self._serialize, self._deserialize) + self.indexes = IndexesOperations(self._client, self._config, self._serialize, self._deserialize) + self.aliases = AliasesOperations(self._client, self._config, self._serialize, self._deserialize) + self.documents = DocumentsOperations(self._client, self._config, self._serialize, self._deserialize) def send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> HttpResponse: """Runs the network request through the client's chained policies. diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_configuration.py index 945878c06c91..4bd8e3d12635 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_configuration.py @@ -25,18 +25,18 @@ class SearchClientConfiguration: # pylint: disable=too-many-instance-attributes :param endpoint: Service host. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Is either a - AzureKeyCredential type or a TokenCredential type. Required. + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials.TokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-11-01-preview". Note that overriding this default value may result in unsupported + "2025-03-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, endpoint: str, credential: Union[AzureKeyCredential, "TokenCredential"], **kwargs: Any) -> None: - api_version: str = kwargs.pop("api_version", "2024-11-01-preview") + api_version: str = kwargs.pop("api_version", "2025-03-01-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_model_base.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_model_base.py index e6a2730f9276..3072ee252ed9 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/_model_base.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_model_base.py @@ -373,15 +373,34 @@ def __ne__(self, other: typing.Any) -> bool: return not self.__eq__(other) def keys(self) -> typing.KeysView[str]: + """ + :returns: a set-like object providing a view on D's keys + :rtype: ~typing.KeysView + """ return self._data.keys() def values(self) -> typing.ValuesView[typing.Any]: + """ + :returns: an object providing a view on D's values + :rtype: ~typing.ValuesView + """ return self._data.values() def items(self) -> typing.ItemsView[str, typing.Any]: + """ + :returns: set-like object providing a view on D's items + :rtype: ~typing.ItemsView + """ return self._data.items() def get(self, key: str, default: typing.Any = None) -> typing.Any: + """ + Get the value for key if key is in the dictionary, else default. + :param str key: The key to look up. + :param any default: The value to return if key is not in the dictionary. Defaults to None + :returns: D[k] if k in D, else d. + :rtype: any + """ try: return self[key] except KeyError: @@ -397,17 +416,38 @@ def pop(self, key: str, default: _T) -> _T: ... def pop(self, key: str, default: typing.Any) -> typing.Any: ... def pop(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + """ + Removes specified key and return the corresponding value. + :param str key: The key to pop. + :param any default: The value to return if key is not in the dictionary + :returns: The value corresponding to the key. + :rtype: any + :raises KeyError: If key is not found and default is not given. + """ if default is _UNSET: return self._data.pop(key) return self._data.pop(key, default) def popitem(self) -> typing.Tuple[str, typing.Any]: + """ + Removes and returns some (key, value) pair + :returns: The (key, value) pair. + :rtype: tuple + :raises KeyError: if D is empty. + """ return self._data.popitem() def clear(self) -> None: + """ + Remove all items from D. + """ self._data.clear() def update(self, *args: typing.Any, **kwargs: typing.Any) -> None: + """ + Updates D from mapping/iterable E and F. + :param any args: Either a mapping object or an iterable of key-value pairs. + """ self._data.update(*args, **kwargs) @typing.overload @@ -417,6 +457,13 @@ def setdefault(self, key: str, default: None = None) -> None: ... def setdefault(self, key: str, default: typing.Any) -> typing.Any: ... def setdefault(self, key: str, default: typing.Any = _UNSET) -> typing.Any: + """ + Same as calling D.get(k, d), and setting D[k]=d if k not found + :param str key: The key to look up. + :param any default: The value to set if key is not in the dictionary + :returns: D[k] if k in D, else d. + :rtype: any + """ if default is _UNSET: return self._data.setdefault(key) return self._data.setdefault(key, default) @@ -754,7 +801,7 @@ def _get_deserialize_callable_from_annotation( # pylint: disable=too-many-retur except AttributeError: model_name = annotation if module is not None: - annotation = _get_model(module, model_name) + annotation = _get_model(module, model_name) # type: ignore try: if module and _is_model(annotation): @@ -894,6 +941,35 @@ def _deserialize( return _deserialize_with_callable(deserializer, value) +def _failsafe_deserialize( + deserializer: typing.Any, + value: typing.Any, + module: typing.Optional[str] = None, + rf: typing.Optional["_RestField"] = None, + format: typing.Optional[str] = None, +) -> typing.Any: + try: + return _deserialize(deserializer, value, module, rf, format) + except DeserializationError: + _LOGGER.warning( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + +def _failsafe_deserialize_xml( + deserializer: typing.Any, + value: typing.Any, +) -> typing.Any: + try: + return _deserialize_xml(deserializer, value) + except DeserializationError: + _LOGGER.warning( + "Ran into a deserialization error. Ignoring since this is failsafe deserialization", exc_info=True + ) + return None + + class _RestField: def __init__( self, diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_serialization.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_serialization.py index 7a0232de5ddc..e2a20b1d534c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/_serialization.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_serialization.py @@ -411,7 +411,7 @@ def from_dict( :param function key_extractors: A key extractor function. :param str content_type: JSON by default, set application/xml if XML. :returns: An instance of this model - :raises DeserializationError: if something went wrong + :raises: DeserializationError if something went wrong :rtype: Self """ deserializer = Deserializer(cls._infer_class_models()) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_client.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_client.py index b91435886003..988fcd7aa7ce 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_client.py @@ -18,14 +18,14 @@ from .._serialization import Deserializer, Serializer from ._configuration import SearchClientConfiguration from .operations import ( - AliasesOperationsOperations, - DataSourcesOperationsOperations, - DocumentsOperationsOperations, - IndexersOperationsOperations, - IndexesOperationsOperations, + AliasesOperations, + DataSourcesOperations, + DocumentsOperations, + IndexersOperations, + IndexesOperations, SearchClientOperationsMixin, - SkillsetsOperationsOperations, - SynonymMapsOperationsOperations, + SkillsetsOperations, + SynonymMapsOperations, ) if TYPE_CHECKING: @@ -36,33 +36,28 @@ class SearchClient(SearchClientOperationsMixin): # pylint: disable=too-many-ins """Client that can be used to manage and query indexes and documents, as well as manage other resources, on a search service. - :ivar data_sources_operations: DataSourcesOperationsOperations operations - :vartype data_sources_operations: - azure.search.documents.aio.operations.DataSourcesOperationsOperations - :ivar indexers_operations: IndexersOperationsOperations operations - :vartype indexers_operations: - azure.search.documents.aio.operations.IndexersOperationsOperations - :ivar skillsets_operations: SkillsetsOperationsOperations operations - :vartype skillsets_operations: - azure.search.documents.aio.operations.SkillsetsOperationsOperations - :ivar synonym_maps_operations: SynonymMapsOperationsOperations operations - :vartype synonym_maps_operations: - azure.search.documents.aio.operations.SynonymMapsOperationsOperations - :ivar indexes_operations: IndexesOperationsOperations operations - :vartype indexes_operations: azure.search.documents.aio.operations.IndexesOperationsOperations - :ivar aliases_operations: AliasesOperationsOperations operations - :vartype aliases_operations: azure.search.documents.aio.operations.AliasesOperationsOperations - :ivar documents_operations: DocumentsOperationsOperations operations - :vartype documents_operations: - azure.search.documents.aio.operations.DocumentsOperationsOperations + :ivar data_sources: DataSourcesOperations operations + :vartype data_sources: azure.search.documents.aio.operations.DataSourcesOperations + :ivar indexers: IndexersOperations operations + :vartype indexers: azure.search.documents.aio.operations.IndexersOperations + :ivar skillsets: SkillsetsOperations operations + :vartype skillsets: azure.search.documents.aio.operations.SkillsetsOperations + :ivar synonym_maps: SynonymMapsOperations operations + :vartype synonym_maps: azure.search.documents.aio.operations.SynonymMapsOperations + :ivar indexes: IndexesOperations operations + :vartype indexes: azure.search.documents.aio.operations.IndexesOperations + :ivar aliases: AliasesOperations operations + :vartype aliases: azure.search.documents.aio.operations.AliasesOperations + :ivar documents: DocumentsOperations operations + :vartype documents: azure.search.documents.aio.operations.DocumentsOperations :param endpoint: Service host. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Is either a - AzureKeyCredential type or a TokenCredential type. Required. + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-11-01-preview". Note that overriding this default value may result in unsupported + "2025-03-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ @@ -94,27 +89,13 @@ def __init__( self._serialize = Serializer() self._deserialize = Deserializer() self._serialize.client_side_validation = False - self.data_sources_operations = DataSourcesOperationsOperations( - self._client, self._config, self._serialize, self._deserialize - ) - self.indexers_operations = IndexersOperationsOperations( - self._client, self._config, self._serialize, self._deserialize - ) - self.skillsets_operations = SkillsetsOperationsOperations( - self._client, self._config, self._serialize, self._deserialize - ) - self.synonym_maps_operations = SynonymMapsOperationsOperations( - self._client, self._config, self._serialize, self._deserialize - ) - self.indexes_operations = IndexesOperationsOperations( - self._client, self._config, self._serialize, self._deserialize - ) - self.aliases_operations = AliasesOperationsOperations( - self._client, self._config, self._serialize, self._deserialize - ) - self.documents_operations = DocumentsOperationsOperations( - self._client, self._config, self._serialize, self._deserialize - ) + self.data_sources = DataSourcesOperations(self._client, self._config, self._serialize, self._deserialize) + self.indexers = IndexersOperations(self._client, self._config, self._serialize, self._deserialize) + self.skillsets = SkillsetsOperations(self._client, self._config, self._serialize, self._deserialize) + self.synonym_maps = SynonymMapsOperations(self._client, self._config, self._serialize, self._deserialize) + self.indexes = IndexesOperations(self._client, self._config, self._serialize, self._deserialize) + self.aliases = AliasesOperations(self._client, self._config, self._serialize, self._deserialize) + self.documents = DocumentsOperations(self._client, self._config, self._serialize, self._deserialize) def send_request( self, request: HttpRequest, *, stream: bool = False, **kwargs: Any diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_configuration.py index 6d9f5b83d222..bf196f25a63b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/_configuration.py @@ -25,12 +25,12 @@ class SearchClientConfiguration: # pylint: disable=too-many-instance-attributes :param endpoint: Service host. Required. :type endpoint: str - :param credential: Credential used to authenticate requests to the service. Is either a - AzureKeyCredential type or a TokenCredential type. Required. + :param credential: Credential used to authenticate requests to the service. Is either a key + credential type or a token credential type. Required. :type credential: ~azure.core.credentials.AzureKeyCredential or ~azure.core.credentials_async.AsyncTokenCredential :keyword api_version: The API version to use for this operation. Default value is - "2024-11-01-preview". Note that overriding this default value may result in unsupported + "2025-03-01-preview". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ @@ -38,7 +38,7 @@ class SearchClientConfiguration: # pylint: disable=too-many-instance-attributes def __init__( self, endpoint: str, credential: Union[AzureKeyCredential, "AsyncTokenCredential"], **kwargs: Any ) -> None: - api_version: str = kwargs.pop("api_version", "2024-11-01-preview") + api_version: str = kwargs.pop("api_version", "2025-03-01-preview") if endpoint is None: raise ValueError("Parameter 'endpoint' must not be None.") diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/__init__.py index cad45d7952dd..c4716a340135 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/__init__.py @@ -12,13 +12,13 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._operations import DataSourcesOperationsOperations # type: ignore -from ._operations import IndexersOperationsOperations # type: ignore -from ._operations import SkillsetsOperationsOperations # type: ignore -from ._operations import SynonymMapsOperationsOperations # type: ignore -from ._operations import IndexesOperationsOperations # type: ignore -from ._operations import AliasesOperationsOperations # type: ignore -from ._operations import DocumentsOperationsOperations # type: ignore +from ._operations import DataSourcesOperations # type: ignore +from ._operations import IndexersOperations # type: ignore +from ._operations import SkillsetsOperations # type: ignore +from ._operations import SynonymMapsOperations # type: ignore +from ._operations import IndexesOperations # type: ignore +from ._operations import AliasesOperations # type: ignore +from ._operations import DocumentsOperations # type: ignore from ._operations import SearchClientOperationsMixin # type: ignore from ._patch import __all__ as _patch_all @@ -26,13 +26,13 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "DataSourcesOperationsOperations", - "IndexersOperationsOperations", - "SkillsetsOperationsOperations", - "SynonymMapsOperationsOperations", - "IndexesOperationsOperations", - "AliasesOperationsOperations", - "DocumentsOperationsOperations", + "DataSourcesOperations", + "IndexersOperations", + "SkillsetsOperations", + "SynonymMapsOperations", + "IndexesOperations", + "AliasesOperations", + "DocumentsOperations", "SearchClientOperationsMixin", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_operations.py index a6af0faa4564..b17c26c4aa1d 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_operations.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines +# pylint: disable=line-too-long,useless-suppression,too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -12,7 +12,7 @@ from typing import Any, AsyncIterable, Callable, Dict, IO, List, Literal, Optional, TypeVar, Union, overload import urllib.parse -from azure.core import MatchConditions +from azure.core import AsyncPipelineClient, MatchConditions from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ( ClientAuthenticationError, @@ -32,57 +32,60 @@ from azure.core.utils import case_insensitive_dict from ... import models as _models -from ..._model_base import SdkJSONEncoder, _deserialize +from ..._model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize +from ..._serialization import Deserializer, Serializer from ..._validation import api_version_validation from ...operations._operations import ( - build_aliases_operations_create_or_update_request, - build_aliases_operations_create_request, - build_aliases_operations_delete_request, - build_aliases_operations_get_request, - build_aliases_operations_list_request, - build_data_sources_operations_create_or_update_request, - build_data_sources_operations_create_request, - build_data_sources_operations_delete_request, - build_data_sources_operations_get_request, - build_data_sources_operations_list_request, - build_documents_operations_autocomplete_get_request, - build_documents_operations_autocomplete_post_request, - build_documents_operations_count_request, - build_documents_operations_get_request, - build_documents_operations_index_request, - build_documents_operations_search_get_request, - build_documents_operations_search_post_request, - build_documents_operations_suggest_get_request, - build_documents_operations_suggest_post_request, - build_indexers_operations_create_or_update_request, - build_indexers_operations_create_request, - build_indexers_operations_delete_request, - build_indexers_operations_get_request, - build_indexers_operations_get_status_request, - build_indexers_operations_list_request, - build_indexers_operations_reset_docs_request, - build_indexers_operations_reset_request, - build_indexers_operations_run_request, - build_indexes_operations_analyze_request, - build_indexes_operations_create_or_update_request, - build_indexes_operations_create_request, - build_indexes_operations_delete_request, - build_indexes_operations_get_request, - build_indexes_operations_get_statistics_request, - build_indexes_operations_list_request, + build_aliases_create_or_update_request, + build_aliases_create_request, + build_aliases_delete_request, + build_aliases_get_request, + build_aliases_list_request, + build_data_sources_create_or_update_request, + build_data_sources_create_request, + build_data_sources_delete_request, + build_data_sources_get_request, + build_data_sources_list_request, + build_documents_autocomplete_get_request, + build_documents_autocomplete_post_request, + build_documents_count_request, + build_documents_get_request, + build_documents_index_request, + build_documents_search_get_request, + build_documents_search_post_request, + build_documents_suggest_get_request, + build_documents_suggest_post_request, + build_indexers_create_or_update_request, + build_indexers_create_request, + build_indexers_delete_request, + build_indexers_get_request, + build_indexers_get_status_request, + build_indexers_list_request, + build_indexers_reset_docs_request, + build_indexers_reset_request, + build_indexers_run_request, + build_indexes_analyze_request, + build_indexes_create_or_update_request, + build_indexes_create_request, + build_indexes_delete_request, + build_indexes_get_request, + build_indexes_get_statistics_request, + build_indexes_list_request, + build_search_get_index_stats_summary_request, build_search_get_service_statistics_request, - build_skillsets_operations_create_or_update_request, - build_skillsets_operations_create_request, - build_skillsets_operations_delete_request, - build_skillsets_operations_get_request, - build_skillsets_operations_list_request, - build_skillsets_operations_reset_skills_request, - build_synonym_maps_operations_create_or_update_request, - build_synonym_maps_operations_create_request, - build_synonym_maps_operations_delete_request, - build_synonym_maps_operations_get_request, - build_synonym_maps_operations_list_request, + build_skillsets_create_or_update_request, + build_skillsets_create_request, + build_skillsets_delete_request, + build_skillsets_get_request, + build_skillsets_list_request, + build_skillsets_reset_skills_request, + build_synonym_maps_create_or_update_request, + build_synonym_maps_create_request, + build_synonym_maps_delete_request, + build_synonym_maps_get_request, + build_synonym_maps_list_request, ) +from .._configuration import SearchClientConfiguration from .._vendor import SearchClientMixinABC if sys.version_info >= (3, 9): @@ -94,22 +97,22 @@ ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] -class DataSourcesOperationsOperations: +class DataSourcesOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.search.documents.aio.SearchClient`'s - :attr:`data_sources_operations` attribute. + :attr:`data_sources` attribute. """ def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: SearchClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @overload async def create_or_update( @@ -215,7 +218,7 @@ async def create_or_update( @distributed_trace_async @api_version_validation( - params_added_on={"2024-11-01-preview": ["skip_indexer_reset_requirement_for_cache"]}, + params_added_on={"2025-03-01-preview": ["skip_indexer_reset_requirement_for_cache"]}, ) async def create_or_update( self, @@ -274,7 +277,7 @@ async def create_or_update( else: _content = json.dumps(data_source, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_data_sources_operations_create_or_update_request( + _request = build_data_sources_create_or_update_request( data_source_name=data_source_name, skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, etag=etag, @@ -305,7 +308,7 @@ async def create_or_update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -359,7 +362,7 @@ async def delete( cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_data_sources_operations_delete_request( + _request = build_data_sources_delete_request( data_source_name=data_source_name, etag=etag, match_condition=match_condition, @@ -379,9 +382,9 @@ async def delete( response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: @@ -410,7 +413,7 @@ async def get(self, data_source_name: str, **kwargs: Any) -> _models.SearchIndex cls: ClsType[_models.SearchIndexerDataSource] = kwargs.pop("cls", None) - _request = build_data_sources_operations_get_request( + _request = build_data_sources_get_request( data_source_name=data_source_name, api_version=self._config.api_version, headers=_headers, @@ -428,14 +431,14 @@ async def get(self, data_source_name: str, **kwargs: Any) -> _models.SearchIndex response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [200]: if _stream: try: await response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -449,13 +452,13 @@ async def get(self, data_source_name: str, **kwargs: Any) -> _models.SearchIndex return deserialized # type: ignore @distributed_trace_async - async def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListDataSourcesResult: + async def list(self, *, select: Optional[str] = None, **kwargs: Any) -> _models.ListDataSourcesResult: """Lists all datasources available for a search service. - :keyword _select: Selects which top-level properties to retrieve. + :keyword select: Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. Default value is None. - :paramtype _select: str + :paramtype select: str :return: ListDataSourcesResult. The ListDataSourcesResult is compatible with MutableMapping :rtype: ~azure.search.documents.models.ListDataSourcesResult :raises ~azure.core.exceptions.HttpResponseError: @@ -473,8 +476,8 @@ async def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models cls: ClsType[_models.ListDataSourcesResult] = kwargs.pop("cls", None) - _request = build_data_sources_operations_list_request( - _select=_select, + _request = build_data_sources_list_request( + select=select, api_version=self._config.api_version, headers=_headers, params=_params, @@ -498,7 +501,7 @@ async def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -593,7 +596,7 @@ async def create( else: _content = json.dumps(data_source, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_data_sources_operations_create_request( + _request = build_data_sources_create_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -612,14 +615,14 @@ async def create( response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [201]: if _stream: try: await response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -633,22 +636,22 @@ async def create( return deserialized # type: ignore -class IndexersOperationsOperations: +class IndexersOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.search.documents.aio.SearchClient`'s - :attr:`indexers_operations` attribute. + :attr:`indexers` attribute. """ def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: SearchClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace_async async def reset(self, indexer_name: str, **kwargs: Any) -> None: @@ -673,7 +676,7 @@ async def reset(self, indexer_name: str, **kwargs: Any) -> None: cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_indexers_operations_reset_request( + _request = build_indexers_reset_request( indexer_name=indexer_name, api_version=self._config.api_version, headers=_headers, @@ -693,7 +696,7 @@ async def reset(self, indexer_name: str, **kwargs: Any) -> None: if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: @@ -797,9 +800,9 @@ async def reset_docs( @distributed_trace_async @api_version_validation( - method_added_on="2024-11-01-preview", + method_added_on="2025-03-01-preview", params_added_on={ - "2024-11-01-preview": [ + "2025-03-01-preview": [ "api_version", "overwrite", "client_request_id", @@ -860,7 +863,7 @@ async def reset_docs( else: _content = None - _request = build_indexers_operations_reset_docs_request( + _request = build_indexers_reset_docs_request( indexer_name=indexer_name, overwrite=overwrite, content_type=content_type, @@ -883,7 +886,7 @@ async def reset_docs( if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: @@ -912,7 +915,7 @@ async def run(self, indexer_name: str, **kwargs: Any) -> None: cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_indexers_operations_run_request( + _request = build_indexers_run_request( indexer_name=indexer_name, api_version=self._config.api_version, headers=_headers, @@ -930,9 +933,9 @@ async def run(self, indexer_name: str, **kwargs: Any) -> None: response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: @@ -1055,7 +1058,7 @@ async def create_or_update( @distributed_trace_async @api_version_validation( params_added_on={ - "2024-11-01-preview": [ + "2025-03-01-preview": [ "skip_indexer_reset_requirement_for_cache", "disable_cache_reprocessing_change_detection", ] @@ -1122,7 +1125,7 @@ async def create_or_update( else: _content = json.dumps(indexer, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_indexers_operations_create_or_update_request( + _request = build_indexers_create_or_update_request( indexer_name=indexer_name, skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, @@ -1154,7 +1157,7 @@ async def create_or_update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -1208,7 +1211,7 @@ async def delete( cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_indexers_operations_delete_request( + _request = build_indexers_delete_request( indexer_name=indexer_name, etag=etag, match_condition=match_condition, @@ -1228,9 +1231,9 @@ async def delete( response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: @@ -1259,7 +1262,7 @@ async def get(self, indexer_name: str, **kwargs: Any) -> _models.SearchIndexer: cls: ClsType[_models.SearchIndexer] = kwargs.pop("cls", None) - _request = build_indexers_operations_get_request( + _request = build_indexers_get_request( indexer_name=indexer_name, api_version=self._config.api_version, headers=_headers, @@ -1277,14 +1280,14 @@ async def get(self, indexer_name: str, **kwargs: Any) -> _models.SearchIndexer: response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [200]: if _stream: try: await response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -1298,13 +1301,13 @@ async def get(self, indexer_name: str, **kwargs: Any) -> _models.SearchIndexer: return deserialized # type: ignore @distributed_trace_async - async def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListIndexersResult: + async def list(self, *, select: Optional[str] = None, **kwargs: Any) -> _models.ListIndexersResult: """Lists all indexers available for a search service. - :keyword _select: Selects which top-level properties to retrieve. + :keyword select: Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. Default value is None. - :paramtype _select: str + :paramtype select: str :return: ListIndexersResult. The ListIndexersResult is compatible with MutableMapping :rtype: ~azure.search.documents.models.ListIndexersResult :raises ~azure.core.exceptions.HttpResponseError: @@ -1322,8 +1325,8 @@ async def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models cls: ClsType[_models.ListIndexersResult] = kwargs.pop("cls", None) - _request = build_indexers_operations_list_request( - _select=_select, + _request = build_indexers_list_request( + select=select, api_version=self._config.api_version, headers=_headers, params=_params, @@ -1347,7 +1350,7 @@ async def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -1442,7 +1445,7 @@ async def create( else: _content = json.dumps(indexer, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_indexers_operations_create_request( + _request = build_indexers_create_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -1461,14 +1464,14 @@ async def create( response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [201]: if _stream: try: await response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -1504,7 +1507,7 @@ async def get_status(self, indexer_name: str, **kwargs: Any) -> _models.SearchIn cls: ClsType[_models.SearchIndexerStatus] = kwargs.pop("cls", None) - _request = build_indexers_operations_get_status_request( + _request = build_indexers_get_status_request( indexer_name=indexer_name, api_version=self._config.api_version, headers=_headers, @@ -1529,7 +1532,7 @@ async def get_status(self, indexer_name: str, **kwargs: Any) -> _models.SearchIn except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -1543,22 +1546,22 @@ async def get_status(self, indexer_name: str, **kwargs: Any) -> _models.SearchIn return deserialized # type: ignore -class SkillsetsOperationsOperations: +class SkillsetsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.search.documents.aio.SearchClient`'s - :attr:`skillsets_operations` attribute. + :attr:`skillsets` attribute. """ def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: SearchClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @overload async def create_or_update( @@ -1683,7 +1686,7 @@ async def create_or_update( @distributed_trace_async @api_version_validation( params_added_on={ - "2024-11-01-preview": [ + "2025-03-01-preview": [ "skip_indexer_reset_requirement_for_cache", "disable_cache_reprocessing_change_detection", ] @@ -1751,7 +1754,7 @@ async def create_or_update( else: _content = json.dumps(skillset, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_skillsets_operations_create_or_update_request( + _request = build_skillsets_create_or_update_request( skillset_name=skillset_name, skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, @@ -1783,7 +1786,7 @@ async def create_or_update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -1837,7 +1840,7 @@ async def delete( cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_skillsets_operations_delete_request( + _request = build_skillsets_delete_request( skillset_name=skillset_name, etag=etag, match_condition=match_condition, @@ -1857,9 +1860,9 @@ async def delete( response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: @@ -1888,7 +1891,7 @@ async def get(self, skillset_name: str, **kwargs: Any) -> _models.SearchIndexerS cls: ClsType[_models.SearchIndexerSkillset] = kwargs.pop("cls", None) - _request = build_skillsets_operations_get_request( + _request = build_skillsets_get_request( skillset_name=skillset_name, api_version=self._config.api_version, headers=_headers, @@ -1906,14 +1909,14 @@ async def get(self, skillset_name: str, **kwargs: Any) -> _models.SearchIndexerS response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [200]: if _stream: try: await response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -1927,13 +1930,13 @@ async def get(self, skillset_name: str, **kwargs: Any) -> _models.SearchIndexerS return deserialized # type: ignore @distributed_trace_async - async def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListSkillsetsResult: + async def list(self, *, select: Optional[str] = None, **kwargs: Any) -> _models.ListSkillsetsResult: """List all skillsets in a search service. - :keyword _select: Selects which top-level properties to retrieve. + :keyword select: Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. Default value is None. - :paramtype _select: str + :paramtype select: str :return: ListSkillsetsResult. The ListSkillsetsResult is compatible with MutableMapping :rtype: ~azure.search.documents.models.ListSkillsetsResult :raises ~azure.core.exceptions.HttpResponseError: @@ -1951,8 +1954,8 @@ async def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models cls: ClsType[_models.ListSkillsetsResult] = kwargs.pop("cls", None) - _request = build_skillsets_operations_list_request( - _select=_select, + _request = build_skillsets_list_request( + select=select, api_version=self._config.api_version, headers=_headers, params=_params, @@ -1976,7 +1979,7 @@ async def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -2074,7 +2077,7 @@ async def create( else: _content = json.dumps(skillset, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_skillsets_operations_create_request( + _request = build_skillsets_create_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -2093,14 +2096,14 @@ async def create( response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [201]: if _stream: try: await response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -2180,9 +2183,9 @@ async def reset_skills( @distributed_trace_async @api_version_validation( - method_added_on="2024-11-01-preview", + method_added_on="2025-03-01-preview", params_added_on={ - "2024-11-01-preview": ["api_version", "client_request_id", "skillset_name", "content_type", "accept"] + "2025-03-01-preview": ["api_version", "client_request_id", "skillset_name", "content_type", "accept"] }, ) async def reset_skills( @@ -2221,7 +2224,7 @@ async def reset_skills( else: _content = json.dumps(skill_names, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_skillsets_operations_reset_skills_request( + _request = build_skillsets_reset_skills_request( skillset_name=skillset_name, content_type=content_type, api_version=self._config.api_version, @@ -2243,29 +2246,29 @@ async def reset_skills( if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: return cls(pipeline_response, None, {}) # type: ignore -class SynonymMapsOperationsOperations: +class SynonymMapsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.search.documents.aio.SearchClient`'s - :attr:`synonym_maps_operations` attribute. + :attr:`synonym_maps` attribute. """ def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: SearchClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @overload async def create_or_update( @@ -2411,7 +2414,7 @@ async def create_or_update( else: _content = json.dumps(synonym_map, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_synonym_maps_operations_create_or_update_request( + _request = build_synonym_maps_create_or_update_request( synonym_map_name=synonym_map_name, etag=etag, match_condition=match_condition, @@ -2441,7 +2444,7 @@ async def create_or_update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -2495,7 +2498,7 @@ async def delete( cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_synonym_maps_operations_delete_request( + _request = build_synonym_maps_delete_request( synonym_map_name=synonym_map_name, etag=etag, match_condition=match_condition, @@ -2515,9 +2518,9 @@ async def delete( response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: @@ -2546,7 +2549,7 @@ async def get(self, synonym_map_name: str, **kwargs: Any) -> _models.SynonymMap: cls: ClsType[_models.SynonymMap] = kwargs.pop("cls", None) - _request = build_synonym_maps_operations_get_request( + _request = build_synonym_maps_get_request( synonym_map_name=synonym_map_name, api_version=self._config.api_version, headers=_headers, @@ -2564,14 +2567,14 @@ async def get(self, synonym_map_name: str, **kwargs: Any) -> _models.SynonymMap: response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [200]: if _stream: try: await response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -2585,13 +2588,13 @@ async def get(self, synonym_map_name: str, **kwargs: Any) -> _models.SynonymMap: return deserialized # type: ignore @distributed_trace_async - async def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListSynonymMapsResult: + async def list(self, *, select: Optional[str] = None, **kwargs: Any) -> _models.ListSynonymMapsResult: """Lists all synonym maps available for a search service. - :keyword _select: Selects which top-level properties to retrieve. + :keyword select: Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. Default value is None. - :paramtype _select: str + :paramtype select: str :return: ListSynonymMapsResult. The ListSynonymMapsResult is compatible with MutableMapping :rtype: ~azure.search.documents.models.ListSynonymMapsResult :raises ~azure.core.exceptions.HttpResponseError: @@ -2609,8 +2612,8 @@ async def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models cls: ClsType[_models.ListSynonymMapsResult] = kwargs.pop("cls", None) - _request = build_synonym_maps_operations_list_request( - _select=_select, + _request = build_synonym_maps_list_request( + select=select, api_version=self._config.api_version, headers=_headers, params=_params, @@ -2634,7 +2637,7 @@ async def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -2729,7 +2732,7 @@ async def create( else: _content = json.dumps(synonym_map, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_synonym_maps_operations_create_request( + _request = build_synonym_maps_create_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -2748,14 +2751,14 @@ async def create( response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [201]: if _stream: try: await response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -2769,22 +2772,22 @@ async def create( return deserialized # type: ignore -class IndexesOperationsOperations: +class IndexesOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.search.documents.aio.SearchClient`'s - :attr:`indexes_operations` attribute. + :attr:`indexes` attribute. """ def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: SearchClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @overload async def create( @@ -2866,7 +2869,7 @@ async def create(self, index: Union[_models.SearchIndex, JSON, IO[bytes]], **kwa else: _content = json.dumps(index, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_indexes_operations_create_request( + _request = build_indexes_create_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -2885,14 +2888,14 @@ async def create(self, index: Union[_models.SearchIndex, JSON, IO[bytes]], **kwa response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [201]: if _stream: try: await response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -2906,13 +2909,13 @@ async def create(self, index: Union[_models.SearchIndex, JSON, IO[bytes]], **kwa return deserialized # type: ignore @distributed_trace - def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> AsyncIterable["_models.SearchIndex"]: + def list(self, *, select: Optional[str] = None, **kwargs: Any) -> AsyncIterable["_models.SearchIndex"]: """Lists all indexes available for a search service. - :keyword _select: Selects which top-level properties to retrieve. + :keyword select: Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. Default value is None. - :paramtype _select: str + :paramtype select: str :return: An iterator like instance of SearchIndex :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.search.documents.models.SearchIndex] :raises ~azure.core.exceptions.HttpResponseError: @@ -2933,8 +2936,8 @@ def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> AsyncIterable def prepare_request(next_link=None): if not next_link: - _request = build_indexes_operations_list_request( - _select=_select, + _request = build_indexes_list_request( + select=select, api_version=self._config.api_version, headers=_headers, params=_params, @@ -2986,7 +2989,7 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -3169,7 +3172,7 @@ async def create_or_update( else: _content = json.dumps(index, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_indexes_operations_create_or_update_request( + _request = build_indexes_create_or_update_request( index_name=index_name, allow_index_downtime=allow_index_downtime, etag=etag, @@ -3200,7 +3203,7 @@ async def create_or_update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -3257,7 +3260,7 @@ async def delete( cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_indexes_operations_delete_request( + _request = build_indexes_delete_request( index_name=index_name, etag=etag, match_condition=match_condition, @@ -3277,9 +3280,9 @@ async def delete( response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: @@ -3308,7 +3311,7 @@ async def get(self, index_name: str, **kwargs: Any) -> _models.SearchIndex: cls: ClsType[_models.SearchIndex] = kwargs.pop("cls", None) - _request = build_indexes_operations_get_request( + _request = build_indexes_get_request( index_name=index_name, api_version=self._config.api_version, headers=_headers, @@ -3326,14 +3329,14 @@ async def get(self, index_name: str, **kwargs: Any) -> _models.SearchIndex: response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [200]: if _stream: try: await response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -3371,7 +3374,7 @@ async def get_statistics(self, index_name: str, **kwargs: Any) -> _models.GetInd cls: ClsType[_models.GetIndexStatisticsResult] = kwargs.pop("cls", None) - _request = build_indexes_operations_get_statistics_request( + _request = build_indexes_get_statistics_request( index_name=index_name, api_version=self._config.api_version, headers=_headers, @@ -3396,7 +3399,7 @@ async def get_statistics(self, index_name: str, **kwargs: Any) -> _models.GetInd except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -3499,7 +3502,7 @@ async def analyze( else: _content = json.dumps(request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_indexes_operations_analyze_request( + _request = build_indexes_analyze_request( index_name=index_name, content_type=content_type, api_version=self._config.api_version, @@ -3526,7 +3529,7 @@ async def analyze( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -3540,22 +3543,22 @@ async def analyze( return deserialized # type: ignore -class AliasesOperationsOperations: +class AliasesOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.search.documents.aio.SearchClient`'s - :attr:`aliases_operations` attribute. + :attr:`aliases` attribute. """ def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: SearchClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @overload async def create( @@ -3607,8 +3610,8 @@ async def create( @distributed_trace_async @api_version_validation( - method_added_on="2024-11-01-preview", - params_added_on={"2024-11-01-preview": ["api_version", "client_request_id", "content_type", "accept"]}, + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "client_request_id", "content_type", "accept"]}, ) async def create(self, alias: Union[_models.SearchAlias, JSON, IO[bytes]], **kwargs: Any) -> _models.SearchAlias: """Creates a new search alias. @@ -3641,7 +3644,7 @@ async def create(self, alias: Union[_models.SearchAlias, JSON, IO[bytes]], **kwa else: _content = json.dumps(alias, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_aliases_operations_create_request( + _request = build_aliases_create_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -3660,14 +3663,14 @@ async def create(self, alias: Union[_models.SearchAlias, JSON, IO[bytes]], **kwa response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [201]: if _stream: try: await response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -3682,8 +3685,8 @@ async def create(self, alias: Union[_models.SearchAlias, JSON, IO[bytes]], **kwa @distributed_trace @api_version_validation( - method_added_on="2024-11-01-preview", - params_added_on={"2024-11-01-preview": ["api_version", "client_request_id", "accept"]}, + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "client_request_id", "accept"]}, ) def list(self, **kwargs: Any) -> AsyncIterable["_models.SearchAlias"]: """Lists all aliases available for a search service. @@ -3708,7 +3711,7 @@ def list(self, **kwargs: Any) -> AsyncIterable["_models.SearchAlias"]: def prepare_request(next_link=None): if not next_link: - _request = build_aliases_operations_list_request( + _request = build_aliases_list_request( api_version=self._config.api_version, headers=_headers, params=_params, @@ -3760,7 +3763,7 @@ async def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -3859,9 +3862,9 @@ async def create_or_update( @distributed_trace_async @api_version_validation( - method_added_on="2024-11-01-preview", + method_added_on="2025-03-01-preview", params_added_on={ - "2024-11-01-preview": [ + "2025-03-01-preview": [ "api_version", "prefer", "client_request_id", @@ -3926,7 +3929,7 @@ async def create_or_update( else: _content = json.dumps(alias, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_aliases_operations_create_or_update_request( + _request = build_aliases_create_or_update_request( alias_name=alias_name, etag=etag, match_condition=match_condition, @@ -3956,7 +3959,7 @@ async def create_or_update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -3971,9 +3974,9 @@ async def create_or_update( @distributed_trace_async @api_version_validation( - method_added_on="2024-11-01-preview", + method_added_on="2025-03-01-preview", params_added_on={ - "2024-11-01-preview": [ + "2025-03-01-preview": [ "api_version", "client_request_id", "alias_name", @@ -4025,7 +4028,7 @@ async def delete( cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_aliases_operations_delete_request( + _request = build_aliases_delete_request( alias_name=alias_name, etag=etag, match_condition=match_condition, @@ -4045,9 +4048,9 @@ async def delete( response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: @@ -4055,8 +4058,8 @@ async def delete( @distributed_trace_async @api_version_validation( - method_added_on="2024-11-01-preview", - params_added_on={"2024-11-01-preview": ["api_version", "client_request_id", "alias_name", "accept"]}, + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "client_request_id", "alias_name", "accept"]}, ) async def get(self, alias_name: str, **kwargs: Any) -> _models.SearchAlias: """Retrieves an alias definition. @@ -4080,7 +4083,7 @@ async def get(self, alias_name: str, **kwargs: Any) -> _models.SearchAlias: cls: ClsType[_models.SearchAlias] = kwargs.pop("cls", None) - _request = build_aliases_operations_get_request( + _request = build_aliases_get_request( alias_name=alias_name, api_version=self._config.api_version, headers=_headers, @@ -4098,14 +4101,14 @@ async def get(self, alias_name: str, **kwargs: Any) -> _models.SearchAlias: response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [200]: if _stream: try: await response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -4119,22 +4122,22 @@ async def get(self, alias_name: str, **kwargs: Any) -> _models.SearchAlias: return deserialized # type: ignore -class DocumentsOperationsOperations: +class DocumentsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.search.documents.aio.SearchClient`'s - :attr:`documents_operations` attribute. + :attr:`documents` attribute. """ def __init__(self, *args, **kwargs) -> None: input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: AsyncPipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: SearchClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace_async async def count(self, index_name: str, **kwargs: Any) -> int: @@ -4159,7 +4162,7 @@ async def count(self, index_name: str, **kwargs: Any) -> int: cls: ClsType[int] = kwargs.pop("cls", None) - _request = build_documents_operations_count_request( + _request = build_documents_count_request( index_name=index_name, api_version=self._config.api_version, headers=_headers, @@ -4184,7 +4187,7 @@ async def count(self, index_name: str, **kwargs: Any) -> int: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -4200,7 +4203,7 @@ async def count(self, index_name: str, **kwargs: Any) -> int: @distributed_trace_async @api_version_validation( params_added_on={ - "2024-11-01-preview": ["query_rewrites", "debug", "query_language", "speller", "semantic_fields"] + "2025-03-01-preview": ["query_rewrites", "debug", "query_language", "speller", "semantic_fields"] }, ) async def search_get( @@ -4210,7 +4213,7 @@ async def search_get( search_text: Optional[str] = None, include_total_result_count: Optional[bool] = None, facets: Optional[List[str]] = None, - _filter: Optional[str] = None, + filter: Optional[str] = None, highlight_fields: Optional[List[str]] = None, highlight_post_tag: Optional[str] = None, highlight_pre_tag: Optional[str] = None, @@ -4223,9 +4226,9 @@ async def search_get( search_mode: Optional[Union[str, _models.SearchMode]] = None, scoring_statistics: Optional[Union[str, _models.ScoringStatistics]] = None, session_id: Optional[str] = None, - _select: Optional[List[str]] = None, - _skip: Optional[int] = None, - _top: Optional[int] = None, + select: Optional[List[str]] = None, + skip: Optional[int] = None, + top: Optional[int] = None, semantic_configuration: Optional[str] = None, semantic_error_handling: Optional[Union[str, _models.SemanticErrorMode]] = None, semantic_max_wait_in_milliseconds: Optional[int] = None, @@ -4256,9 +4259,9 @@ async def search_get( expression contains a field name, optionally followed by a comma-separated list of name:value pairs. Default value is None. :paramtype facets: list[str] - :keyword _filter: The OData $filter expression to apply to the search query. Default value is + :keyword filter: The OData $filter expression to apply to the search query. Default value is None. - :paramtype _filter: str + :paramtype filter: str :keyword highlight_fields: The list of field names to use for hit highlights. Only searchable fields can be used for hit highlighting. Default value is None. @@ -4322,20 +4325,20 @@ async def search_get( requests across replicas and adversely affect the performance of the search service. The value used as sessionId cannot start with a '_' character. Default value is None. :paramtype session_id: str - :keyword _select: The list of fields to retrieve. If unspecified, all fields marked as + :keyword select: The list of fields to retrieve. If unspecified, all fields marked as retrievable in the schema are included. Default value is None. - :paramtype _select: list[str] - :keyword _skip: The number of search results to skip. This value cannot be greater than + :paramtype select: list[str] + :keyword skip: The number of search results to skip. This value cannot be greater than 100,000. If you need to scan documents in sequence, but cannot use $skip due to this limitation, consider using $orderby on a totally-ordered key and $filter with a range query instead. Default value is None. - :paramtype _skip: int - :keyword _top: The number of search results to retrieve. This can be used in conjunction with + :paramtype skip: int + :keyword top: The number of search results to retrieve. This can be used in conjunction with $skip to implement client-side paging of search results. If results are truncated due to server-side paging, the response will include a continuation token that can be used to issue another Search request for the next page of results. Default value is None. - :paramtype _top: int + :paramtype top: int :keyword semantic_configuration: The name of the semantic configuration that lists which fields should be used for semantic ranking, captions, highlights, and answers. Default value is None. @@ -4359,7 +4362,7 @@ async def search_get( followed by the ``threshold-`` option after the answers parameter value, such as ``extractive|threshold-0.9``. Default threshold is 0.7. The maximum character length of answers can be configured by appending the pipe - character '|' followed by the 'count-:code:``', + character '|' followed by the 'count-\\ :code:``', such as 'extractive|maxcharlength-600'. Known values are: "none" and "extractive". Default value is None. :paramtype answers: str or ~azure.search.documents.models.QueryAnswerType @@ -4370,7 +4373,7 @@ async def search_get( can be configured by appending the pipe character ``|`` followed by the ``highlight-`` option, such as ``extractive|highlight-true``. Defaults to ``None``. The maximum character length of captions can be configured by - appending the pipe character '|' followed by the 'count-:code:``', such as 'extractive|maxcharlength-600'. Known values are: "none" and "extractive". Default value is None. :paramtype captions: str or ~azure.search.documents.models.QueryCaptionType @@ -4425,12 +4428,12 @@ async def search_get( cls: ClsType[_models.SearchDocumentsResult] = kwargs.pop("cls", None) - _request = build_documents_operations_search_get_request( + _request = build_documents_search_get_request( index_name=index_name, search_text=search_text, include_total_result_count=include_total_result_count, facets=facets, - _filter=_filter, + filter=filter, highlight_fields=highlight_fields, highlight_post_tag=highlight_post_tag, highlight_pre_tag=highlight_pre_tag, @@ -4443,9 +4446,9 @@ async def search_get( search_mode=search_mode, scoring_statistics=scoring_statistics, session_id=session_id, - _select=_select, - _skip=_skip, - _top=_top, + select=select, + skip=skip, + top=top, semantic_configuration=semantic_configuration, semantic_error_handling=semantic_error_handling, semantic_max_wait_in_milliseconds=semantic_max_wait_in_milliseconds, @@ -4480,7 +4483,7 @@ async def search_get( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -4588,7 +4591,7 @@ async def search_post( else: _content = json.dumps(search_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_documents_operations_search_post_request( + _request = build_documents_search_post_request( index_name=index_name, content_type=content_type, api_version=self._config.api_version, @@ -4615,7 +4618,7 @@ async def search_post( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -4631,7 +4634,7 @@ async def search_post( @distributed_trace_async async def get( self, key: str, index_name: str, *, selected_fields: Optional[List[str]] = None, **kwargs: Any - ) -> Dict[str, Any]: + ) -> _models.LookupDocument: """Retrieves a document from the index. :param key: The key of the document to retrieve. Required. @@ -4642,8 +4645,8 @@ async def get( retrieved will be missing from the returned document. Default value is None. :paramtype selected_fields: list[str] - :return: dict mapping str to any - :rtype: dict[str, any] + :return: LookupDocument. The LookupDocument is compatible with MutableMapping + :rtype: ~azure.search.documents.models.LookupDocument :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -4657,9 +4660,9 @@ async def get( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[Dict[str, Any]] = kwargs.pop("cls", None) + cls: ClsType[_models.LookupDocument] = kwargs.pop("cls", None) - _request = build_documents_operations_get_request( + _request = build_documents_get_request( key=key, index_name=index_name, selected_fields=selected_fields, @@ -4686,13 +4689,13 @@ async def get( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(Dict[str, Any], response.json()) + deserialized = _deserialize(_models.LookupDocument, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -4706,15 +4709,15 @@ async def suggest_get( *, search_text: str, suggester_name: str, - _filter: Optional[str] = None, + filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, highlight_pre_tag: Optional[str] = None, minimum_coverage: Optional[float] = None, order_by: Optional[List[str]] = None, search_fields: Optional[List[str]] = None, - _select: Optional[List[str]] = None, - _top: Optional[int] = None, + select: Optional[List[str]] = None, + top: Optional[int] = None, **kwargs: Any ) -> _models.SuggestDocumentsResult: """Suggests documents in the index that match the given partial query text. @@ -4729,9 +4732,9 @@ async def suggest_get( that's part of the index definition. Required. :paramtype suggester_name: str - :keyword _filter: An OData expression that filters the documents considered for suggestions. + :keyword filter: An OData expression that filters the documents considered for suggestions. Default value is None. - :paramtype _filter: str + :paramtype filter: str :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the suggestions query. Default is false. When set to true, the query will find terms even if there's a @@ -4765,14 +4768,14 @@ async def suggest_get( fields must be included in the specified suggester. Default value is None. :paramtype search_fields: list[str] - :keyword _select: The list of fields to retrieve. If unspecified, only the key field will be + :keyword select: The list of fields to retrieve. If unspecified, only the key field will be included in the results. Default value is None. - :paramtype _select: list[str] - :keyword _top: The number of suggestions to retrieve. The value must be a number between 1 and + :paramtype select: list[str] + :keyword top: The number of suggestions to retrieve. The value must be a number between 1 and #. The default is 5. Default value is None. - :paramtype _top: int + :paramtype top: int :return: SuggestDocumentsResult. The SuggestDocumentsResult is compatible with MutableMapping :rtype: ~azure.search.documents.models.SuggestDocumentsResult :raises ~azure.core.exceptions.HttpResponseError: @@ -4790,19 +4793,19 @@ async def suggest_get( cls: ClsType[_models.SuggestDocumentsResult] = kwargs.pop("cls", None) - _request = build_documents_operations_suggest_get_request( + _request = build_documents_suggest_get_request( index_name=index_name, search_text=search_text, suggester_name=suggester_name, - _filter=_filter, + filter=filter, use_fuzzy_matching=use_fuzzy_matching, highlight_post_tag=highlight_post_tag, highlight_pre_tag=highlight_pre_tag, minimum_coverage=minimum_coverage, order_by=order_by, search_fields=search_fields, - _select=_select, - _top=_top, + select=select, + top=top, api_version=self._config.api_version, headers=_headers, params=_params, @@ -4826,7 +4829,7 @@ async def suggest_get( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -4934,7 +4937,7 @@ async def suggest_post( else: _content = json.dumps(suggest_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_documents_operations_suggest_post_request( + _request = build_documents_suggest_post_request( index_name=index_name, content_type=content_type, api_version=self._config.api_version, @@ -4961,7 +4964,7 @@ async def suggest_post( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -5064,7 +5067,7 @@ async def index( else: _content = json.dumps(batch, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_documents_operations_index_request( + _request = build_documents_index_request( index_name=index_name, content_type=content_type, api_version=self._config.api_version, @@ -5091,7 +5094,7 @@ async def index( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -5112,13 +5115,13 @@ async def autocomplete_get( search_text: str, suggester_name: str, autocomplete_mode: Optional[Union[str, _models.AutocompleteMode]] = None, - _filter: Optional[str] = None, + filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, highlight_pre_tag: Optional[str] = None, minimum_coverage: Optional[float] = None, search_fields: Optional[List[str]] = None, - _top: Optional[int] = None, + top: Optional[int] = None, **kwargs: Any ) -> _models.AutocompleteResult: """Autocompletes incomplete query terms based on input text and matching terms in @@ -5138,10 +5141,9 @@ async def autocomplete_get( producing auto-completed terms. Known values are: "oneTerm", "twoTerms", and "oneTermWithContext". Default value is None. :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode - :keyword _filter: An OData expression that filters the documents used to produce completed - terms + :keyword filter: An OData expression that filters the documents used to produce completed terms for the Autocomplete result. Default value is None. - :paramtype _filter: str + :paramtype filter: str :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the autocomplete query. Default is false. When set to true, the query will find terms even if there's a @@ -5165,9 +5167,9 @@ async def autocomplete_get( terms. Target fields must be included in the specified suggester. Default value is None. :paramtype search_fields: list[str] - :keyword _top: The number of auto-completed terms to retrieve. This must be a value between 1 + :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1 and 100. The default is 5. Default value is None. - :paramtype _top: int + :paramtype top: int :return: AutocompleteResult. The AutocompleteResult is compatible with MutableMapping :rtype: ~azure.search.documents.models.AutocompleteResult :raises ~azure.core.exceptions.HttpResponseError: @@ -5185,18 +5187,18 @@ async def autocomplete_get( cls: ClsType[_models.AutocompleteResult] = kwargs.pop("cls", None) - _request = build_documents_operations_autocomplete_get_request( + _request = build_documents_autocomplete_get_request( index_name=index_name, search_text=search_text, suggester_name=suggester_name, autocomplete_mode=autocomplete_mode, - _filter=_filter, + filter=filter, use_fuzzy_matching=use_fuzzy_matching, highlight_post_tag=highlight_post_tag, highlight_pre_tag=highlight_pre_tag, minimum_coverage=minimum_coverage, search_fields=search_fields, - _top=_top, + top=top, api_version=self._config.api_version, headers=_headers, params=_params, @@ -5220,7 +5222,7 @@ async def autocomplete_get( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -5333,7 +5335,7 @@ async def autocomplete_post( else: _content = json.dumps(autocomplete_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_documents_operations_autocomplete_post_request( + _request = build_documents_autocomplete_post_request( index_name=index_name, content_type=content_type, api_version=self._config.api_version, @@ -5360,7 +5362,7 @@ async def autocomplete_post( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -5421,7 +5423,7 @@ async def get_service_statistics(self, **kwargs: Any) -> _models.SearchServiceSt except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -5433,3 +5435,65 @@ async def get_service_statistics(self, **kwargs: Any) -> _models.SearchServiceSt return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + + @distributed_trace_async + @api_version_validation( + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "client_request_id", "accept"]}, + ) + async def get_index_stats_summary(self, **kwargs: Any) -> _models.ListIndexStatsSummary: + """Gets service level statistics for a search service. + + :return: ListIndexStatsSummary. The ListIndexStatsSummary is compatible with MutableMapping + :rtype: ~azure.search.documents.models.ListIndexStatsSummary + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ListIndexStatsSummary] = kwargs.pop("cls", None) + + _request = build_search_get_index_stats_summary_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + await response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ListIndexStatsSummary, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py index b88c6249b5aa..ef116bba5932 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py @@ -61,7 +61,6 @@ DocumentExtractionSkill, DocumentIntelligenceLayoutSkill, DocumentKeysOrIds, - EdgeNGramTokenFilter, EdgeNGramTokenFilterV2, EdgeNGramTokenizer, ElisionTokenFilter, @@ -87,6 +86,7 @@ IndexAction, IndexBatch, IndexDocumentsResult, + IndexStatisticsSummary, IndexerCurrentState, IndexerExecutionResult, IndexingParameters, @@ -97,7 +97,6 @@ KeepTokenFilter, KeyPhraseExtractionSkill, KeywordMarkerTokenFilter, - KeywordTokenizer, KeywordTokenizerV2, LanguageDetectionSkill, LengthTokenFilter, @@ -106,11 +105,12 @@ LexicalTokenizer, LimitTokenFilter, ListDataSourcesResult, + ListIndexStatsSummary, ListIndexersResult, ListSkillsetsResult, ListSynonymMapsResult, + LookupDocument, LuceneStandardAnalyzer, - LuceneStandardTokenizer, LuceneStandardTokenizerV2, MagnitudeScoringFunction, MagnitudeScoringParameters, @@ -118,7 +118,6 @@ MergeSkill, MicrosoftLanguageStemmingTokenizer, MicrosoftLanguageTokenizer, - NGramTokenFilter, NGramTokenFilterV2, NGramTokenizer, NativeBlobSoftDeleteDeletionDetectionPolicy, @@ -236,7 +235,7 @@ ) from ._enums import ( # type: ignore - AIStudioModelCatalogName, + AIFoundryModelCatalogName, AutocompleteMode, AzureOpenAIModelName, BlobIndexerDataToExtract, @@ -367,7 +366,6 @@ "DocumentExtractionSkill", "DocumentIntelligenceLayoutSkill", "DocumentKeysOrIds", - "EdgeNGramTokenFilter", "EdgeNGramTokenFilterV2", "EdgeNGramTokenizer", "ElisionTokenFilter", @@ -393,6 +391,7 @@ "IndexAction", "IndexBatch", "IndexDocumentsResult", + "IndexStatisticsSummary", "IndexerCurrentState", "IndexerExecutionResult", "IndexingParameters", @@ -403,7 +402,6 @@ "KeepTokenFilter", "KeyPhraseExtractionSkill", "KeywordMarkerTokenFilter", - "KeywordTokenizer", "KeywordTokenizerV2", "LanguageDetectionSkill", "LengthTokenFilter", @@ -412,11 +410,12 @@ "LexicalTokenizer", "LimitTokenFilter", "ListDataSourcesResult", + "ListIndexStatsSummary", "ListIndexersResult", "ListSkillsetsResult", "ListSynonymMapsResult", + "LookupDocument", "LuceneStandardAnalyzer", - "LuceneStandardTokenizer", "LuceneStandardTokenizerV2", "MagnitudeScoringFunction", "MagnitudeScoringParameters", @@ -424,7 +423,6 @@ "MergeSkill", "MicrosoftLanguageStemmingTokenizer", "MicrosoftLanguageTokenizer", - "NGramTokenFilter", "NGramTokenFilterV2", "NGramTokenizer", "NativeBlobSoftDeleteDeletionDetectionPolicy", @@ -539,7 +537,7 @@ "WebApiVectorizer", "WebApiVectorizerParameters", "WordDelimiterTokenFilter", - "AIStudioModelCatalogName", + "AIFoundryModelCatalogName", "AutocompleteMode", "AzureOpenAIModelName", "BlobIndexerDataToExtract", diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_enums.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_enums.py index 90f4f707bc97..ac1c13890df0 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_enums.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_enums.py @@ -11,7 +11,7 @@ from azure.core import CaseInsensitiveEnumMeta -class AIStudioModelCatalogName(str, Enum, metaclass=CaseInsensitiveEnumMeta): +class AIFoundryModelCatalogName(str, Enum, metaclass=CaseInsensitiveEnumMeta): """The name of the embedding model from the Azure AI Studio Catalog that will be called. """ @@ -42,13 +42,12 @@ class AutocompleteMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): could include 'medicaid', 'medicare', and 'medicine'.""" TWO_TERMS = "twoTerms" """Matching two-term phrases in the index will be suggested. For example, if the - input is 'medic', the suggested terms could include 'medicare coverage' and - 'medical assistant'.""" + input is 'medic', the suggested terms could include 'medicare coverage' and 'medical + assistant'.""" ONE_TERM_WITH_CONTEXT = "oneTermWithContext" """Completes the last term in a query with two or more terms, where the last two - terms are a phrase that exists in the index. For example, if the input is - 'washington medic', the suggested terms could include 'washington medicaid' and - 'washington medical'.""" + terms are a phrase that exists in the index. For example, if the input is 'washington medic', + the suggested terms could include 'washington medicaid' and 'washington medical'.""" class AzureOpenAIModelName(str, Enum, metaclass=CaseInsensitiveEnumMeta): @@ -198,9 +197,7 @@ class DocumentIntelligenceLayoutSkillMarkdownHeaderDepth(str, Enum, metaclass=Ca class DocumentIntelligenceLayoutSkillOutputMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """Controls the cardinality of the output produced by the skill. Default is - 'oneToMany'. - """ + """Controls the cardinality of the output produced by the skill. Default is 'oneToMany'.""" ONE_TO_MANY = "oneToMany" """Specify the deepest markdown header section to parse.""" @@ -290,15 +287,16 @@ class EntityRecognitionSkillLanguage(str, Enum, metaclass=CaseInsensitiveEnumMet class HybridCountAndFacetMode(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Determines whether the count and facets should includes all documents that matched the search query, or only the documents that are retrieved within the - 'maxTextRecallSize' window. The default value is 'countAllResults'. + 'maxTextRecallSize' + window. The default value is 'countAllResults'. """ COUNT_RETRIEVABLE_RESULTS = "countRetrievableResults" """Only include documents that were matched within the 'maxTextRecallSize' retrieval window when computing 'count' and 'facets'.""" COUNT_ALL_RESULTS = "countAllResults" - """Include all documents that were matched by the search query when computing - 'count' and 'facets', regardless of whether or not those documents are within + """Include all documents that were matched by the search query when computing 'count' + and 'facets', regardless of whether or not those documents are within the 'maxTextRecallSize' retrieval window.""" @@ -1451,7 +1449,7 @@ class QueryAnswerType(str, Enum, metaclass=CaseInsensitiveEnumMeta): followed by the ``threshold-`` option after the answers parameter value, such as ``extractive|threshold-0.9``. Default threshold is 0.7. The maximum character length of answers can be configured by appending the pipe - character '|' followed by the 'count-:code:``', + character '|' followed by the 'count-\\ :code:``', such as 'extractive|maxcharlength-600'. """ @@ -1469,7 +1467,7 @@ class QueryCaptionType(str, Enum, metaclass=CaseInsensitiveEnumMeta): can be configured by appending the pipe character ``|`` followed by the ``highlight-`` option, such as ``extractive|highlight-true``. Defaults to ``None``. The maximum character length of captions can be configured by - appending the pipe character '|' followed by the 'count-:code:``', such as 'extractive|maxcharlength-600'. """ @@ -2635,8 +2633,8 @@ class VectorSearchCompressionKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): values, thereby reducing the overall data size.""" BINARY_QUANTIZATION = "binaryQuantization" """Binary Quantization, a type of compression method. In binary quantization, the - original vectors values are compressed to the narrower binary type by - discretizing and representing each component of a vector using binary values, + original vectors values are compressed to the narrower binary type by discretizing + and representing each component of a vector using binary values, thereby reducing the overall data size.""" @@ -2682,8 +2680,8 @@ class VectorThresholdKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): VECTOR_SIMILARITY = "vectorSimilarity" """The results of the vector query will be filtered based on the vector similarity - metric. Note this is the canonical definition of similarity metric, not the - 'distance' version. The threshold direction (larger or smaller) will be chosen + metric. Note this is the canonical definition of similarity metric, not the 'distance' + version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field.""" SEARCH_SCORE = "searchScore" """The results of the vector query will filter based on the '@search.score' value. diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py index 80d9ffbc477b..a7ddf48b28e1 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py @@ -1,4 +1,4 @@ -# pylint: disable=too-many-lines +# pylint: disable=line-too-long,useless-suppression,too-many-lines # coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. @@ -40,9 +40,9 @@ class CognitiveServicesAccount(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - odata_type: str = rest_discriminator(name="@odata.type") + odata_type: str = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) """The discriminator for derived types. Required. Default value is None.""" - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Description of the Azure AI service resource attached to a skillset.""" @overload @@ -83,14 +83,16 @@ class AIServicesAccountIdentity(CognitiveServicesAccount, discriminator="#Micros :vartype odata_type: str """ - identity: "_models.SearchIndexerDataIdentity" = rest_field() + identity: "_models.SearchIndexerDataIdentity" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The user-assigned managed identity used for connections to AI Service. If not specified, the system-assigned managed identity is used. On updates to the skillset, if the identity is unspecified, the value remains unchanged. If set to \"none\", the value of this property is cleared. Required.""" - subdomain_url: str = rest_field(name="subdomainUrl") + subdomain_url: str = rest_field(name="subdomainUrl", visibility=["read", "create", "update", "delete", "query"]) """The subdomain url for the corresponding AI Service. Required.""" - odata_type: Literal["#Microsoft.Azure.Search.AIServicesByIdentity"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.AIServicesByIdentity"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of Azure AI service resource attached to a skillset. Required. Default value is \"#Microsoft.Azure.Search.AIServicesByIdentity\".""" @@ -131,11 +133,11 @@ class AIServicesAccountKey(CognitiveServicesAccount, discriminator="#Microsoft.A :vartype odata_type: str """ - key: str = rest_field() + key: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The key used to provision the Azure AI service resource attached to a skillset. Required.""" - subdomain_url: str = rest_field(name="subdomainUrl") + subdomain_url: str = rest_field(name="subdomainUrl", visibility=["read", "create", "update", "delete", "query"]) """The subdomain url for the corresponding AI Service. Required.""" - odata_type: Literal["#Microsoft.Azure.Search.AIServicesByKey"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.AIServicesByKey"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of Azure AI service resource attached to a skillset. Required. Default value is \"#Microsoft.Azure.Search.AIServicesByKey\".""" @@ -180,14 +182,16 @@ class AIServicesVisionParameters(_model_base.Model): :vartype auth_identity: ~azure.search.documents.models.SearchIndexerDataIdentity """ - model_version: str = rest_field(name="modelVersion") + model_version: str = rest_field(name="modelVersion", visibility=["read", "create", "update", "delete", "query"]) """The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. Required.""" - resource_uri: str = rest_field(name="resourceUri") + resource_uri: str = rest_field(name="resourceUri", visibility=["read", "create", "update", "delete", "query"]) """The resource URI of the AI Services resource. Required.""" - api_key: Optional[str] = rest_field(name="apiKey") + api_key: Optional[str] = rest_field(name="apiKey", visibility=["read", "create", "update", "delete", "query"]) """API key of the designated AI Services resource.""" - auth_identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field(name="authIdentity") + auth_identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field( + name="authIdentity", visibility=["read", "create", "update", "delete", "query"] + ) """The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the index, if the identity is unspecified, the @@ -232,9 +236,9 @@ class VectorSearchVectorizer(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - vectorizer_name: str = rest_field(name="name") + vectorizer_name: str = rest_field(name="name", visibility=["read", "create", "update", "delete", "query"]) """The name to associate with this particular vectorization method. Required.""" - kind: str = rest_discriminator(name="kind") + kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) """Type of VectorSearchVectorizer. Required. Known values are: \"azureOpenAI\", \"customWebApi\", \"aiServicesVision\", and \"aml\".""" @@ -258,8 +262,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class AIServicesVisionVectorizer(VectorSearchVectorizer, discriminator="aiServicesVision"): - """Specifies the AI Services Vision parameters for vectorizing a query image or - text. + """Clears the identity property of a datasource. :ivar vectorizer_name: The name to associate with this particular vectorization method. @@ -270,17 +273,17 @@ class AIServicesVisionVectorizer(VectorSearchVectorizer, discriminator="aiServic :vartype ai_services_vision_parameters: ~azure.search.documents.models.AIServicesVisionParameters :ivar kind: The name of the kind of vectorization method being configured for use with - vector search. Required. Generate embeddings for an image or text input at query time using - the Azure AI + vector search. Required. Generate embeddings for an image or text input at query time using the + Azure AI Services Vision Vectorize API. :vartype kind: str or ~azure.search.documents.models.AI_SERVICES_VISION """ ai_services_vision_parameters: Optional["_models.AIServicesVisionParameters"] = rest_field( - name="AIServicesVisionParameters" + name="AIServicesVisionParameters", visibility=["read", "create", "update", "delete", "query"] ) """Contains the parameters specific to AI Services Vision embedding vectorization.""" - kind: Literal[VectorSearchVectorizerKind.AI_SERVICES_VISION] = rest_discriminator(name="kind") # type: ignore + kind: Literal[VectorSearchVectorizerKind.AI_SERVICES_VISION] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The name of the kind of vectorization method being configured for use with vector search. Required. Generate embeddings for an image or text input at query time using the Azure AI @@ -308,8 +311,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class AnalyzedTokenInfo(_model_base.Model): """Information about a token returned by an analyzer. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar token: The token returned by the analyzer. Required. :vartype token: str @@ -324,18 +325,38 @@ class AnalyzedTokenInfo(_model_base.Model): :vartype position: int """ - token: str = rest_field(visibility=["read"]) + token: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The token returned by the analyzer. Required.""" - start_offset: int = rest_field(name="startOffset", visibility=["read"]) + start_offset: int = rest_field(name="startOffset", visibility=["read", "create", "update", "delete", "query"]) """The index of the first character of the token in the input text. Required.""" - end_offset: int = rest_field(name="endOffset", visibility=["read"]) + end_offset: int = rest_field(name="endOffset", visibility=["read", "create", "update", "delete", "query"]) """The index of the last character of the token in the input text. Required.""" - position: int = rest_field(visibility=["read"]) + position: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The position of the token in the input text relative to other tokens. The first token in the input text has position 0, the next has position 1, and so on. Depending on the analyzer used, some tokens might have the same position, for example if they are synonyms of each other. Required.""" + @overload + def __init__( + self, + *, + token: str, + start_offset: int, + end_offset: int, + position: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class AnalyzeRequest(_model_base.Model): """Specifies some text and analysis components used to break that text into tokens. @@ -383,9 +404,11 @@ class AnalyzeRequest(_model_base.Model): :vartype char_filters: list[str or ~azure.search.documents.models.CharFilterName] """ - text: str = rest_field() + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The text to break into tokens. Required.""" - analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = rest_field() + analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. Known values are: \"ar.microsoft\", \"ar.lucene\", @@ -408,20 +431,28 @@ class AnalyzeRequest(_model_base.Model): \"tr.microsoft\", \"tr.lucene\", \"uk.microsoft\", \"ur.microsoft\", \"vi.microsoft\", \"standard.lucene\", \"standardasciifolding.lucene\", \"keyword\", \"pattern\", \"simple\", \"stop\", and \"whitespace\".""" - tokenizer: Optional[Union[str, "_models.LexicalTokenizerName"]] = rest_field() + tokenizer: Optional[Union[str, "_models.LexicalTokenizerName"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. Known values are: \"classic\", \"edgeNGram\", \"keyword_v2\", \"letter\", \"lowercase\", \"microsoft_language_tokenizer\", \"microsoft_language_stemming_tokenizer\", \"nGram\", \"path_hierarchy_v2\", \"pattern\", \"standard_v2\", \"uax_url_email\", and \"whitespace\".""" - normalizer: Optional[Union[str, "_models.LexicalNormalizerName"]] = rest_field() + normalizer: Optional[Union[str, "_models.LexicalNormalizerName"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The name of the normalizer to use to normalize the given text. Known values are: \"asciifolding\", \"elision\", \"lowercase\", \"standard\", and \"uppercase\".""" - token_filters: Optional[List[Union[str, "_models.TokenFilterName"]]] = rest_field(name="tokenFilters") + token_filters: Optional[List[Union[str, "_models.TokenFilterName"]]] = rest_field( + name="tokenFilters", visibility=["read", "create", "update", "delete", "query"] + ) """An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter.""" - char_filters: Optional[List[Union[str, "_models.CharFilterName"]]] = rest_field(name="charFilters") + char_filters: Optional[List[Union[str, "_models.CharFilterName"]]] = rest_field( + name="charFilters", visibility=["read", "create", "update", "delete", "query"] + ) """An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter.""" @@ -456,7 +487,7 @@ class AnalyzeResult(_model_base.Model): :vartype tokens: list[~azure.search.documents.models.AnalyzedTokenInfo] """ - tokens: List["_models.AnalyzedTokenInfo"] = rest_field() + tokens: List["_models.AnalyzedTokenInfo"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The list of tokens returned by the analyzer specified in the request. Required.""" @overload @@ -482,12 +513,11 @@ class TokenFilter(_model_base.Model): You probably want to use the sub-classes and not this class directly. Known sub-classes are: AsciiFoldingTokenFilter, CjkBigramTokenFilter, CommonGramTokenFilter, - DictionaryDecompounderTokenFilter, EdgeNGramTokenFilter, EdgeNGramTokenFilterV2, - ElisionTokenFilter, KeepTokenFilter, KeywordMarkerTokenFilter, LengthTokenFilter, - LimitTokenFilter, NGramTokenFilter, NGramTokenFilterV2, PatternCaptureTokenFilter, - PatternReplaceTokenFilter, PhoneticTokenFilter, ShingleTokenFilter, SnowballTokenFilter, - StemmerOverrideTokenFilter, StemmerTokenFilter, StopwordsTokenFilter, SynonymTokenFilter, - TruncateTokenFilter, UniqueTokenFilter, WordDelimiterTokenFilter + DictionaryDecompounderTokenFilter, EdgeNGramTokenFilterV2, ElisionTokenFilter, KeepTokenFilter, + KeywordMarkerTokenFilter, LengthTokenFilter, LimitTokenFilter, NGramTokenFilterV2, + PatternCaptureTokenFilter, PatternReplaceTokenFilter, PhoneticTokenFilter, ShingleTokenFilter, + SnowballTokenFilter, StemmerOverrideTokenFilter, StemmerTokenFilter, StopwordsTokenFilter, + SynonymTokenFilter, TruncateTokenFilter, UniqueTokenFilter, WordDelimiterTokenFilter :ivar odata_type: The discriminator for derived types. Required. Default value is None. @@ -499,9 +529,9 @@ class TokenFilter(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - odata_type: str = rest_discriminator(name="@odata.type") + odata_type: str = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) """The discriminator for derived types. Required. Default value is None.""" - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. Required.""" @@ -544,9 +574,11 @@ class AsciiFoldingTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Searc :vartype odata_type: str """ - preserve_original: Optional[bool] = rest_field(name="preserveOriginal") + preserve_original: Optional[bool] = rest_field( + name="preserveOriginal", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether the original token will be kept. Default is false.""" - odata_type: Literal["#Microsoft.Azure.Search.AsciiFoldingTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.AsciiFoldingTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.AsciiFoldingTokenFilter\".""" @@ -572,8 +604,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class AutocompleteItem(_model_base.Model): """The result of Autocomplete requests. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar text: The completed term. Required. :vartype text: str @@ -581,11 +611,29 @@ class AutocompleteItem(_model_base.Model): :vartype query_plus_text: str """ - text: str = rest_field(visibility=["read"]) + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The completed term. Required.""" - query_plus_text: str = rest_field(name="queryPlusText", visibility=["read"]) + query_plus_text: str = rest_field(name="queryPlusText", visibility=["read", "create", "update", "delete", "query"]) """The query along with the completed term. Required.""" + @overload + def __init__( + self, + *, + text: str, + query_plus_text: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class AutocompleteRequest(_model_base.Model): """Parameters for fuzzy matching, and other autocomplete query behaviors. @@ -635,41 +683,53 @@ class AutocompleteRequest(_model_base.Model): :vartype top: int """ - search_text: str = rest_field(name="search") + search_text: str = rest_field(name="search", visibility=["read", "create", "update", "delete", "query"]) """The search text on which to base autocomplete results. Required.""" - autocomplete_mode: Optional[Union[str, "_models.AutocompleteMode"]] = rest_field(name="autocompleteMode") + autocomplete_mode: Optional[Union[str, "_models.AutocompleteMode"]] = rest_field( + name="autocompleteMode", visibility=["read", "create", "update", "delete", "query"] + ) """Specifies the mode for Autocomplete. The default is 'oneTerm'. Use 'twoTerms' to get shingles and 'oneTermWithContext' to use the current context while producing auto-completed terms. Known values are: \"oneTerm\", \"twoTerms\", and \"oneTermWithContext\".""" - filter: Optional[str] = rest_field() + filter: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """An OData expression that filters the documents used to produce completed terms for the Autocomplete result.""" - use_fuzzy_matching: Optional[bool] = rest_field(name="fuzzy") + use_fuzzy_matching: Optional[bool] = rest_field( + name="fuzzy", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to use fuzzy matching for the autocomplete query. Default is false. When set to true, the query will autocomplete terms even if there's a substituted or missing character in the search text. While this provides a better experience in some scenarios, it comes at a performance cost as fuzzy autocomplete queries are slower and consume more resources.""" - highlight_post_tag: Optional[str] = rest_field(name="highlightPostTag") + highlight_post_tag: Optional[str] = rest_field( + name="highlightPostTag", visibility=["read", "create", "update", "delete", "query"] + ) """A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting is disabled.""" - highlight_pre_tag: Optional[str] = rest_field(name="highlightPreTag") + highlight_pre_tag: Optional[str] = rest_field( + name="highlightPreTag", visibility=["read", "create", "update", "delete", "query"] + ) """A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting is disabled.""" - minimum_coverage: Optional[float] = rest_field(name="minimumCoverage") + minimum_coverage: Optional[float] = rest_field( + name="minimumCoverage", visibility=["read", "create", "update", "delete", "query"] + ) """A number between 0 and 100 indicating the percentage of the index that must be covered by an autocomplete query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 80.""" - search_fields: Optional[str] = rest_field(name="searchFields") + search_fields: Optional[str] = rest_field( + name="searchFields", visibility=["read", "create", "update", "delete", "query"] + ) """The comma-separated list of field names to consider when querying for auto-completed terms. Target fields must be included in the specified suggester.""" - suggester_name: str = rest_field(name="suggesterName") + suggester_name: str = rest_field(name="suggesterName", visibility=["read", "create", "update", "delete", "query"]) """The name of the suggester as specified in the suggesters collection that's part of the index definition. Required.""" - top: Optional[int] = rest_field() + top: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The number of auto-completed terms to retrieve. This must be a value between 1 and 100. The default is 5.""" @@ -703,8 +763,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class AutocompleteResult(_model_base.Model): """The result of Autocomplete query. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar coverage: A value indicating the percentage of the index that was considered by the autocomplete request, or null if minimumCoverage was not specified in the @@ -714,13 +772,35 @@ class AutocompleteResult(_model_base.Model): :vartype results: list[~azure.search.documents.models.AutocompleteItem] """ - coverage: Optional[float] = rest_field(name="@search.coverage", visibility=["read"]) + coverage: Optional[float] = rest_field( + name="@search.coverage", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating the percentage of the index that was considered by the autocomplete request, or null if minimumCoverage was not specified in the request.""" - results: List["_models.AutocompleteItem"] = rest_field(name="value", visibility=["read"]) + results: List["_models.AutocompleteItem"] = rest_field( + name="value", visibility=["read", "create", "update", "delete", "query"] + ) """The list of returned Autocompleted items. Required.""" + @overload + def __init__( + self, + *, + results: List["_models.AutocompleteItem"], + coverage: Optional[float] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class AzureActiveDirectoryApplicationCredentials(_model_base.Model): # pylint: disable=name-too-long """Credentials of a registered application created for your search service, used @@ -737,12 +817,14 @@ class AzureActiveDirectoryApplicationCredentials(_model_base.Model): # pylint: :vartype application_secret: str """ - application_id: str = rest_field(name="applicationId") + application_id: str = rest_field(name="applicationId", visibility=["read", "create", "update", "delete", "query"]) """An AAD Application ID that was granted the required access permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The Application ID should not be confused with the Object ID for your AAD Application. Required.""" - application_secret: Optional[str] = rest_field(name="applicationSecret") + application_secret: Optional[str] = rest_field( + name="applicationSecret", visibility=["read", "create", "update", "delete", "query"] + ) """The authentication key of the specified AAD application.""" @overload @@ -778,7 +860,7 @@ class AzureMachineLearningParameters(_model_base.Model): :ivar resource_id: (Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format - subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long + subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. :vartype resource_id: str :ivar timeout: (Optional) When specified, indicates the timeout for the http client making the API call. @@ -791,25 +873,31 @@ class AzureMachineLearningParameters(_model_base.Model): "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336", "Facebook-DinoV2-Image-Embeddings-ViT-Base", "Facebook-DinoV2-Image-Embeddings-ViT-Giant", "Cohere-embed-v3-english", and "Cohere-embed-v3-multilingual". - :vartype model_name: str or ~azure.search.documents.models.AIStudioModelCatalogName + :vartype model_name: str or ~azure.search.documents.models.AIFoundryModelCatalogName """ - scoring_uri: str = rest_field(name="uri") + scoring_uri: str = rest_field(name="uri", visibility=["read", "create", "update", "delete", "query"]) """(Required for no authentication or key authentication) The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. Required.""" - authentication_key: Optional[str] = rest_field(name="key") + authentication_key: Optional[str] = rest_field( + name="key", visibility=["read", "create", "update", "delete", "query"] + ) """(Required for key authentication) The key for the AML service.""" - resource_id: Optional[str] = rest_field(name="resourceId") + resource_id: Optional[str] = rest_field( + name="resourceId", visibility=["read", "create", "update", "delete", "query"] + ) """(Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format - subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long""" - timeout: Optional[datetime.timedelta] = rest_field() + subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}.""" + timeout: Optional[datetime.timedelta] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """(Optional) When specified, indicates the timeout for the http client making the API call.""" - region: Optional[str] = rest_field() + region: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """(Optional for token authentication). The region the AML service is deployed in.""" - model_name: Optional[Union[str, "_models.AIStudioModelCatalogName"]] = rest_field(name="modelName") + model_name: Optional[Union[str, "_models.AIFoundryModelCatalogName"]] = rest_field( + name="modelName", visibility=["read", "create", "update", "delete", "query"] + ) """The name of the embedding model from the Azure AI Studio Catalog that is deployed at the provided endpoint. Known values are: \"OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32\", @@ -826,7 +914,7 @@ def __init__( resource_id: Optional[str] = None, timeout: Optional[datetime.timedelta] = None, region: Optional[str] = None, - model_name: Optional[Union[str, "_models.AIStudioModelCatalogName"]] = None, + model_name: Optional[Union[str, "_models.AIFoundryModelCatalogName"]] = None, ) -> None: ... @overload @@ -875,23 +963,27 @@ class SearchIndexerSkill(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - odata_type: str = rest_discriminator(name="@odata.type") + odata_type: str = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) """The discriminator for derived types. Required. Default value is None.""" - name: Optional[str] = rest_field() + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'.""" - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The description of the skill which describes the inputs, outputs, and usage of the skill.""" - context: Optional[str] = rest_field() + context: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Represents the level at which operations take place, such as the document root or document content (for example, /document or /document/content). The default is /document.""" - inputs: List["_models.InputFieldMappingEntry"] = rest_field() + inputs: List["_models.InputFieldMappingEntry"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Inputs of the skills could be a column in the source data set, or the output of an upstream skill. Required.""" - outputs: List["_models.OutputFieldMappingEntry"] = rest_field() + outputs: List["_models.OutputFieldMappingEntry"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. Required.""" @@ -952,7 +1044,7 @@ class AzureMachineLearningSkill(SearchIndexerSkill, discriminator="#Microsoft.Sk :ivar resource_id: (Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format - subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long + subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. :vartype resource_id: str :ivar timeout: (Optional) When specified, indicates the timeout for the http client making the API call. @@ -972,29 +1064,35 @@ class AzureMachineLearningSkill(SearchIndexerSkill, discriminator="#Microsoft.Sk :vartype odata_type: str """ - scoring_uri: Optional[str] = rest_field(name="uri") + scoring_uri: Optional[str] = rest_field(name="uri", visibility=["read", "create", "update", "delete", "query"]) """(Required for no authentication or key authentication) The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed.""" - authentication_key: Optional[str] = rest_field(name="key") + authentication_key: Optional[str] = rest_field( + name="key", visibility=["read", "create", "update", "delete", "query"] + ) """(Required for key authentication) The key for the AML service.""" - resource_id: Optional[str] = rest_field(name="resourceId") + resource_id: Optional[str] = rest_field( + name="resourceId", visibility=["read", "create", "update", "delete", "query"] + ) """(Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format - subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. # pylint: disable=line-too-long""" - timeout: Optional[datetime.timedelta] = rest_field() + subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}.""" + timeout: Optional[datetime.timedelta] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """(Optional) When specified, indicates the timeout for the http client making the API call.""" - region: Optional[str] = rest_field() + region: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """(Optional for token authentication). The region the AML service is deployed in.""" - degree_of_parallelism: Optional[int] = rest_field(name="degreeOfParallelism") + degree_of_parallelism: Optional[int] = rest_field( + name="degreeOfParallelism", visibility=["read", "create", "update", "delete", "query"] + ) """(Optional) When specified, indicates the number of calls the indexer will make in parallel to the endpoint you have provided. You can decrease this value if your endpoint is failing under too high of a request load, or raise it if your endpoint is able to accept more requests and you would like an increase in the performance of the indexer. If not set, a default value of 5 is used. The degreeOfParallelism can be set to a maximum of 10 and a minimum of 1.""" - odata_type: Literal["#Microsoft.Skills.Custom.AmlSkill"] = rest_discriminator(name="@odata.type") # type: ignore + odata_type: Literal["#Microsoft.Skills.Custom.AmlSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Custom.AmlSkill\".""" @@ -1043,9 +1141,11 @@ class AzureMachineLearningVectorizer(VectorSearchVectorizer, discriminator="aml" :vartype kind: str or ~azure.search.documents.models.AML """ - aml_parameters: Optional["_models.AzureMachineLearningParameters"] = rest_field(name="AMLParameters") + aml_parameters: Optional["_models.AzureMachineLearningParameters"] = rest_field( + name="amlParameters", visibility=["read", "create", "update", "delete", "query"] + ) """Specifies the properties of the AML vectorizer.""" - kind: Literal[VectorSearchVectorizerKind.AML] = rest_discriminator(name="kind") # type: ignore + kind: Literal[VectorSearchVectorizerKind.AML] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The name of the kind of vectorization method being configured for use with vector search. Required. Generate embeddings using an Azure Machine Learning endpoint deployed via the @@ -1113,22 +1213,30 @@ class AzureOpenAIEmbeddingSkill(SearchIndexerSkill, discriminator="#Microsoft.Sk :vartype odata_type: str """ - resource_url: Optional[str] = rest_field(name="resourceUri") + resource_url: Optional[str] = rest_field( + name="resourceUri", visibility=["read", "create", "update", "delete", "query"] + ) """The resource URI of the Azure OpenAI resource.""" - deployment_name: Optional[str] = rest_field(name="deploymentId") + deployment_name: Optional[str] = rest_field( + name="deploymentId", visibility=["read", "create", "update", "delete", "query"] + ) """ID of the Azure OpenAI model deployment on the designated resource.""" - api_key: Optional[str] = rest_field(name="apiKey") + api_key: Optional[str] = rest_field(name="apiKey", visibility=["read", "create", "update", "delete", "query"]) """API key of the designated Azure OpenAI resource.""" - auth_identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field(name="authIdentity") + auth_identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field( + name="authIdentity", visibility=["read", "create", "update", "delete", "query"] + ) """The user-assigned managed identity used for outbound connections.""" - model_name: Optional[Union[str, "_models.AzureOpenAIModelName"]] = rest_field(name="modelName") + model_name: Optional[Union[str, "_models.AzureOpenAIModelName"]] = rest_field( + name="modelName", visibility=["read", "create", "update", "delete", "query"] + ) """The name of the embedding model that is deployed at the provided deploymentId path. Known values are: \"text-embedding-ada-002\", \"text-embedding-3-large\", and \"text-embedding-3-small\".""" - dimensions: Optional[int] = rest_field() + dimensions: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models.""" - odata_type: Literal["#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill\".""" @@ -1164,8 +1272,8 @@ class AzureOpenAITokenizerParameters(_model_base.Model): """Azure OpenAI Tokenizer parameters. :ivar encoder_model_name: Only applies if the unit is set to azureOpenAITokens. Options include - 'R50k_base', 'P50k_base', 'P50k_edit' and 'CL100k_base'. The default value is - 'CL100k_base'. Known values are: "r50k_base", "p50k_base", "p50k_edit", and "cl100k_base". + 'R50k_base', 'P50k_base', 'P50k_edit' and 'CL100k_base'. The default value is 'CL100k_base'. + Known values are: "r50k_base", "p50k_base", "p50k_edit", and "cl100k_base". :vartype encoder_model_name: str or ~azure.search.documents.models.SplitSkillEncoderModelName :ivar allowed_special_tokens: (Optional) Only applies if the unit is set to azureOpenAITokens. This parameter @@ -1174,12 +1282,15 @@ class AzureOpenAITokenizerParameters(_model_base.Model): :vartype allowed_special_tokens: list[str] """ - encoder_model_name: Optional[Union[str, "_models.SplitSkillEncoderModelName"]] = rest_field(name="encoderModelName") + encoder_model_name: Optional[Union[str, "_models.SplitSkillEncoderModelName"]] = rest_field( + name="encoderModelName", visibility=["read", "create", "update", "delete", "query"] + ) """Only applies if the unit is set to azureOpenAITokens. Options include - 'R50k_base', 'P50k_base', 'P50k_edit' and 'CL100k_base'. The default value is - 'CL100k_base'. Known values are: \"r50k_base\", \"p50k_base\", \"p50k_edit\", and - \"cl100k_base\".""" - allowed_special_tokens: Optional[List[str]] = rest_field(name="allowedSpecialTokens") + 'R50k_base', 'P50k_base', 'P50k_edit' and 'CL100k_base'. The default value is 'CL100k_base'. + Known values are: \"r50k_base\", \"p50k_base\", \"p50k_edit\", and \"cl100k_base\".""" + allowed_special_tokens: Optional[List[str]] = rest_field( + name="allowedSpecialTokens", visibility=["read", "create", "update", "delete", "query"] + ) """(Optional) Only applies if the unit is set to azureOpenAITokens. This parameter defines a collection of special tokens that are permitted within the tokenization process.""" @@ -1217,9 +1328,11 @@ class AzureOpenAIVectorizer(VectorSearchVectorizer, discriminator="azureOpenAI") :vartype kind: str or ~azure.search.documents.models.AZURE_OPEN_AI """ - parameters: Optional["_models.AzureOpenAIVectorizerParameters"] = rest_field(name="azureOpenAIParameters") + parameters: Optional["_models.AzureOpenAIVectorizerParameters"] = rest_field( + name="azureOpenAIParameters", visibility=["read", "create", "update", "delete", "query"] + ) """Contains the parameters specific to Azure OpenAI embedding vectorization.""" - kind: Literal[VectorSearchVectorizerKind.AZURE_OPEN_AI] = rest_discriminator(name="kind") # type: ignore + kind: Literal[VectorSearchVectorizerKind.AZURE_OPEN_AI] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The name of the kind of vectorization method being configured for use with vector search. Required. Generate embeddings using an Azure OpenAI resource at query time.""" @@ -1259,15 +1372,23 @@ class AzureOpenAIVectorizerParameters(_model_base.Model): :vartype model_name: str or ~azure.search.documents.models.AzureOpenAIModelName """ - resource_url: Optional[str] = rest_field(name="resourceUri") + resource_url: Optional[str] = rest_field( + name="resourceUri", visibility=["read", "create", "update", "delete", "query"] + ) """The resource URI of the Azure OpenAI resource.""" - deployment_name: Optional[str] = rest_field(name="deploymentId") + deployment_name: Optional[str] = rest_field( + name="deploymentId", visibility=["read", "create", "update", "delete", "query"] + ) """ID of the Azure OpenAI model deployment on the designated resource.""" - api_key: Optional[str] = rest_field(name="apiKey") + api_key: Optional[str] = rest_field(name="apiKey", visibility=["read", "create", "update", "delete", "query"]) """API key of the designated Azure OpenAI resource.""" - auth_identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field(name="authIdentity") + auth_identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field( + name="authIdentity", visibility=["read", "create", "update", "delete", "query"] + ) """The user-assigned managed identity used for outbound connections.""" - model_name: Optional[Union[str, "_models.AzureOpenAIModelName"]] = rest_field(name="modelName") + model_name: Optional[Union[str, "_models.AzureOpenAIModelName"]] = rest_field( + name="modelName", visibility=["read", "create", "update", "delete", "query"] + ) """The name of the embedding model that is deployed at the provided deploymentId path. Known values are: \"text-embedding-ada-002\", \"text-embedding-3-large\", and \"text-embedding-3-small\".""" @@ -1335,23 +1456,31 @@ class VectorSearchCompression(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - compression_name: str = rest_field(name="name") + compression_name: str = rest_field(name="name", visibility=["read", "create", "update", "delete", "query"]) """The name to associate with this particular configuration. Required.""" - rerank_with_original_vectors: Optional[bool] = rest_field(name="rerankWithOriginalVectors") + rerank_with_original_vectors: Optional[bool] = rest_field( + name="rerankWithOriginalVectors", visibility=["read", "create", "update", "delete", "query"] + ) """If set to true, once the ordered set of results calculated using compressed vectors are obtained, they will be reranked again by recalculating the full-precision similarity scores. This will improve recall at the expense of latency.""" - default_oversampling: Optional[float] = rest_field(name="defaultOversampling") + default_oversampling: Optional[float] = rest_field( + name="defaultOversampling", visibility=["read", "create", "update", "delete", "query"] + ) """Default oversampling factor. Oversampling will internally request more documents (specified by this multiplier) in the initial search. This increases the set of results that will be reranked using recomputed similarity scores from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency.""" - rescoring_options: Optional["_models.RescoringOptions"] = rest_field(name="rescoringOptions") + rescoring_options: Optional["_models.RescoringOptions"] = rest_field( + name="rescoringOptions", visibility=["read", "create", "update", "delete", "query"] + ) """Contains the options for rescoring.""" - truncation_dimension: Optional[int] = rest_field(name="truncationDimension") + truncation_dimension: Optional[int] = rest_field( + name="truncationDimension", visibility=["read", "create", "update", "delete", "query"] + ) """The number of dimensions to truncate the vectors to. Truncating the vectors reduces the size of the vectors and the amount of data that needs to be transferred during search. This can save storage cost and improve search @@ -1359,7 +1488,7 @@ class VectorSearchCompression(_model_base.Model): trained with Matryoshka Representation Learning (MRL) such as OpenAI text-embedding-3-large (small). The default value is null, which means no truncation.""" - kind: str = rest_discriminator(name="kind") + kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) """Type of VectorSearchCompression. Required. Known values are: \"scalarQuantization\" and \"binaryQuantization\".""" @@ -1421,18 +1550,18 @@ class BinaryQuantizationCompression(VectorSearchCompression, discriminator="bina :ivar kind: The name of the kind of compression method being configured for use with vector search. Required. Binary Quantization, a type of compression method. In binary quantization, the - original vectors values are compressed to the narrower binary type by - discretizing and representing each component of a vector using binary values, + original vectors values are compressed to the narrower binary type by discretizing + and representing each component of a vector using binary values, thereby reducing the overall data size. :vartype kind: str or ~azure.search.documents.models.BINARY_QUANTIZATION """ - kind: Literal[VectorSearchCompressionKind.BINARY_QUANTIZATION] = rest_discriminator(name="kind") # type: ignore + kind: Literal[VectorSearchCompressionKind.BINARY_QUANTIZATION] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The name of the kind of compression method being configured for use with vector search. Required. Binary Quantization, a type of compression method. In binary quantization, the - original vectors values are compressed to the narrower binary type by - discretizing and representing each component of a vector using binary values, + original vectors values are compressed to the narrower binary type by discretizing + and representing each component of a vector using binary values, thereby reducing the overall data size.""" @overload @@ -1472,7 +1601,7 @@ class SimilarityAlgorithm(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - odata_type: str = rest_discriminator(name="@odata.type") + odata_type: str = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) """The discriminator for derived types. Required. Default value is None.""" @overload @@ -1510,22 +1639,24 @@ class BM25SimilarityAlgorithm(SimilarityAlgorithm, discriminator="#Microsoft.Azu normalization is applied, while a value of 1.0 means the score is fully normalized by the length of the document. :vartype b: float - :ivar odata_type: Required. Default value is "#Microsoft.Azure.Search.BM25Similarity". + :ivar odata_type: The discriminator for derived types. Required. Default value is + "#Microsoft.Azure.Search.BM25Similarity". :vartype odata_type: str """ - k1: Optional[float] = rest_field() + k1: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """This property controls the scaling function between the term frequency of each matching terms and the final relevance score of a document-query pair. By default, a value of 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency.""" - b: Optional[float] = rest_field() + b: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """This property controls how the length of a document affects the relevance score. By default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, while a value of 1.0 means the score is fully normalized by the length of the document.""" - odata_type: Literal["#Microsoft.Azure.Search.BM25Similarity"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long - """Required. Default value is \"#Microsoft.Azure.Search.BM25Similarity\".""" + odata_type: Literal["#Microsoft.Azure.Search.BM25Similarity"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The discriminator for derived types. Required. Default value is + \"#Microsoft.Azure.Search.BM25Similarity\".""" @overload def __init__( @@ -1562,9 +1693,9 @@ class CharFilter(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - odata_type: str = rest_discriminator(name="@odata.type") + odata_type: str = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) """The discriminator for derived types. Required. Default value is None.""" - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the char filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. Required.""" @@ -1609,12 +1740,16 @@ class CjkBigramTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.C :vartype odata_type: str """ - ignore_scripts: Optional[List[Union[str, "_models.CjkBigramTokenFilterScripts"]]] = rest_field(name="ignoreScripts") + ignore_scripts: Optional[List[Union[str, "_models.CjkBigramTokenFilterScripts"]]] = rest_field( + name="ignoreScripts", visibility=["read", "create", "update", "delete", "query"] + ) """The scripts to ignore.""" - output_unigrams: Optional[bool] = rest_field(name="outputUnigrams") + output_unigrams: Optional[bool] = rest_field( + name="outputUnigrams", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if false). Default is false.""" - odata_type: Literal["#Microsoft.Azure.Search.CjkBigramTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.CjkBigramTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.CjkBigramTokenFilter\".""" @@ -1645,12 +1780,14 @@ class ClassicSimilarityAlgorithm(SimilarityAlgorithm, discriminator="#Microsoft. that only partially match the searched queries. - :ivar odata_type: Required. Default value is "#Microsoft.Azure.Search.ClassicSimilarity". + :ivar odata_type: The discriminator for derived types. Required. Default value is + "#Microsoft.Azure.Search.ClassicSimilarity". :vartype odata_type: str """ - odata_type: Literal["#Microsoft.Azure.Search.ClassicSimilarity"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long - """Required. Default value is \"#Microsoft.Azure.Search.ClassicSimilarity\".""" + odata_type: Literal["#Microsoft.Azure.Search.ClassicSimilarity"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The discriminator for derived types. Required. Default value is + \"#Microsoft.Azure.Search.ClassicSimilarity\".""" @overload def __init__( @@ -1672,10 +1809,9 @@ class LexicalTokenizer(_model_base.Model): """Base type for tokenizers. You probably want to use the sub-classes and not this class directly. Known sub-classes are: - ClassicTokenizer, EdgeNGramTokenizer, KeywordTokenizer, KeywordTokenizerV2, - MicrosoftLanguageStemmingTokenizer, MicrosoftLanguageTokenizer, NGramTokenizer, - PathHierarchyTokenizerV2, PatternTokenizer, LuceneStandardTokenizer, LuceneStandardTokenizerV2, - UaxUrlEmailTokenizer + ClassicTokenizer, EdgeNGramTokenizer, KeywordTokenizerV2, MicrosoftLanguageStemmingTokenizer, + MicrosoftLanguageTokenizer, NGramTokenizer, PathHierarchyTokenizerV2, PatternTokenizer, + LuceneStandardTokenizerV2, UaxUrlEmailTokenizer :ivar odata_type: The discriminator for derived types. Required. Default value is None. @@ -1687,9 +1823,9 @@ class LexicalTokenizer(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - odata_type: str = rest_discriminator(name="@odata.type") + odata_type: str = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) """The discriminator for derived types. Required. Default value is None.""" - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the tokenizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. Required.""" @@ -1731,10 +1867,12 @@ class ClassicTokenizer(LexicalTokenizer, discriminator="#Microsoft.Azure.Search. :vartype odata_type: str """ - max_token_length: Optional[int] = rest_field(name="maxTokenLength") + max_token_length: Optional[int] = rest_field( + name="maxTokenLength", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters.""" - odata_type: Literal["#Microsoft.Azure.Search.ClassicTokenizer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.ClassicTokenizer"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of tokenizer. Required. Default value is \"#Microsoft.Azure.Search.ClassicTokenizer\".""" @@ -1774,9 +1912,9 @@ class CognitiveServicesAccountKey( :vartype odata_type: str """ - key: str = rest_field() + key: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The key used to provision the Azure AI service resource attached to a skillset. Required.""" - odata_type: Literal["#Microsoft.Azure.Search.CognitiveServicesByKey"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.CognitiveServicesByKey"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of Azure AI service resource attached to a skillset. Required. Default value is \"#Microsoft.Azure.Search.CognitiveServicesByKey\".""" @@ -1824,16 +1962,20 @@ class CommonGramTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search. :vartype odata_type: str """ - common_words: List[str] = rest_field(name="commonWords") + common_words: List[str] = rest_field(name="commonWords", visibility=["read", "create", "update", "delete", "query"]) """The set of common words. Required.""" - ignore_case: Optional[bool] = rest_field(name="ignoreCase") + ignore_case: Optional[bool] = rest_field( + name="ignoreCase", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether common words matching will be case insensitive. Default is false.""" - use_query_mode: Optional[bool] = rest_field(name="queryMode") + use_query_mode: Optional[bool] = rest_field( + name="queryMode", visibility=["read", "create", "update", "delete", "query"] + ) """A value that indicates whether the token filter is in query mode. When in query mode, the token filter generates bigrams and then removes common words and single terms followed by a common word. Default is false.""" - odata_type: Literal["#Microsoft.Azure.Search.CommonGramTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.CommonGramTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.CommonGramTokenFilter\".""" @@ -1886,7 +2028,7 @@ class ConditionalSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Util :vartype odata_type: str """ - odata_type: Literal["#Microsoft.Skills.Util.ConditionalSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Skills.Util.ConditionalSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Util.ConditionalSkill\".""" @@ -1928,12 +2070,16 @@ class CorsOptions(_model_base.Model): :vartype max_age_in_seconds: int """ - allowed_origins: List[str] = rest_field(name="allowedOrigins") + allowed_origins: List[str] = rest_field( + name="allowedOrigins", visibility=["read", "create", "update", "delete", "query"] + ) """The list of origins from which JavaScript code will be granted access to your index. Can contain a list of hosts of the form {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not recommended). Required.""" - max_age_in_seconds: Optional[int] = rest_field(name="maxAgeInSeconds") + max_age_in_seconds: Optional[int] = rest_field( + name="maxAgeInSeconds", visibility=["read", "create", "update", "delete", "query"] + ) """The duration for which browsers should cache CORS preflight responses. Defaults to 5 minutes.""" @@ -1972,9 +2118,9 @@ class LexicalAnalyzer(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - odata_type: str = rest_discriminator(name="@odata.type") + odata_type: str = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) """The discriminator for derived types. Required. Default value is None.""" - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. Required.""" @@ -2032,22 +2178,28 @@ class CustomAnalyzer(LexicalAnalyzer, discriminator="#Microsoft.Azure.Search.Cus :vartype odata_type: str """ - tokenizer: Union[str, "_models.LexicalTokenizerName"] = rest_field() + tokenizer: Union[str, "_models.LexicalTokenizerName"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. Required. Known values are: \"classic\", \"edgeNGram\", \"keyword_v2\", \"letter\", \"lowercase\", \"microsoft_language_tokenizer\", \"microsoft_language_stemming_tokenizer\", \"nGram\", \"path_hierarchy_v2\", \"pattern\", \"standard_v2\", \"uax_url_email\", and \"whitespace\".""" - token_filters: Optional[List[Union[str, "_models.TokenFilterName"]]] = rest_field(name="tokenFilters") + token_filters: Optional[List[Union[str, "_models.TokenFilterName"]]] = rest_field( + name="tokenFilters", visibility=["read", "create", "update", "delete", "query"] + ) """A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed.""" - char_filters: Optional[List[Union[str, "_models.CharFilterName"]]] = rest_field(name="charFilters") + char_filters: Optional[List[Union[str, "_models.CharFilterName"]]] = rest_field( + name="charFilters", visibility=["read", "create", "update", "delete", "query"] + ) """A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed.""" - odata_type: Literal["#Microsoft.Azure.Search.CustomAnalyzer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.CustomAnalyzer"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of analyzer. Required. Default value is \"#Microsoft.Azure.Search.CustomAnalyzer\".""" @@ -2132,50 +2284,64 @@ class CustomEntity(_model_base.Model): :vartype aliases: list[~azure.search.documents.models.CustomEntityAlias] """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The top-level entity descriptor. Matches in the skill output will be grouped by this name, and it should represent the \"normalized\" form of the text being found. Required.""" - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output.""" - type: Optional[str] = rest_field() + type: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output.""" - subtype: Optional[str] = rest_field() + subtype: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output.""" - id: Optional[str] = rest_field() + id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output.""" - case_sensitive: Optional[bool] = rest_field(name="caseSensitive") + case_sensitive: Optional[bool] = rest_field( + name="caseSensitive", visibility=["read", "create", "update", "delete", "query"] + ) """Defaults to false. Boolean value denoting whether comparisons with the entity name should be sensitive to character casing. Sample case insensitive matches of \"Microsoft\" could be: microsoft, microSoft, MICROSOFT.""" - accent_sensitive: Optional[bool] = rest_field(name="accentSensitive") + accent_sensitive: Optional[bool] = rest_field( + name="accentSensitive", visibility=["read", "create", "update", "delete", "query"] + ) """Defaults to false. Boolean value denoting whether comparisons with the entity name should be sensitive to accent.""" - fuzzy_edit_distance: Optional[int] = rest_field(name="fuzzyEditDistance") + fuzzy_edit_distance: Optional[int] = rest_field( + name="fuzzyEditDistance", visibility=["read", "create", "update", "delete", "query"] + ) """Defaults to 0. Maximum value of 5. Denotes the acceptable number of divergent characters that would still constitute a match with the entity name. The smallest possible fuzziness for any given match is returned. For instance, if the edit distance is set to 3, \"Windows10\" would still match \"Windows\", \"Windows10\" and \"Windows 7\". When case sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but otherwise do.""" - default_case_sensitive: Optional[bool] = rest_field(name="defaultCaseSensitive") + default_case_sensitive: Optional[bool] = rest_field( + name="defaultCaseSensitive", visibility=["read", "create", "update", "delete", "query"] + ) """Changes the default case sensitivity value for this entity. It be used to change the default value of all aliases caseSensitive values.""" - default_accent_sensitive: Optional[bool] = rest_field(name="defaultAccentSensitive") + default_accent_sensitive: Optional[bool] = rest_field( + name="defaultAccentSensitive", visibility=["read", "create", "update", "delete", "query"] + ) """Changes the default accent sensitivity value for this entity. It be used to change the default value of all aliases accentSensitive values.""" - default_fuzzy_edit_distance: Optional[int] = rest_field(name="defaultFuzzyEditDistance") + default_fuzzy_edit_distance: Optional[int] = rest_field( + name="defaultFuzzyEditDistance", visibility=["read", "create", "update", "delete", "query"] + ) """Changes the default fuzzy edit distance value for this entity. It can be used to change the default value of all aliases fuzzyEditDistance values.""" - aliases: Optional[List["_models.CustomEntityAlias"]] = rest_field() + aliases: Optional[List["_models.CustomEntityAlias"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """An array of complex objects that can be used to specify alternative spellings or synonyms to the root entity name.""" @@ -2223,13 +2389,19 @@ class CustomEntityAlias(_model_base.Model): :vartype fuzzy_edit_distance: int """ - text: str = rest_field() + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The text of the alias. Required.""" - case_sensitive: Optional[bool] = rest_field(name="caseSensitive") + case_sensitive: Optional[bool] = rest_field( + name="caseSensitive", visibility=["read", "create", "update", "delete", "query"] + ) """Determine if the alias is case sensitive.""" - accent_sensitive: Optional[bool] = rest_field(name="accentSensitive") + accent_sensitive: Optional[bool] = rest_field( + name="accentSensitive", visibility=["read", "create", "update", "delete", "query"] + ) """Determine if the alias is accent sensitive.""" - fuzzy_edit_distance: Optional[int] = rest_field(name="fuzzyEditDistance") + fuzzy_edit_distance: Optional[int] = rest_field( + name="fuzzyEditDistance", visibility=["read", "create", "update", "delete", "query"] + ) """Determine the fuzzy edit distance of the alias.""" @overload @@ -2305,27 +2477,37 @@ class CustomEntityLookupSkill(SearchIndexerSkill, discriminator="#Microsoft.Skil """ default_language_code: Optional[Union[str, "_models.CustomEntityLookupSkillLanguage"]] = rest_field( - name="defaultLanguageCode" + name="defaultLanguageCode", visibility=["read", "create", "update", "delete", "query"] ) """A value indicating which language code to use. Default is ``en``. Known values are: \"da\", \"de\", \"en\", \"es\", \"fi\", \"fr\", \"it\", \"ko\", and \"pt\".""" - entities_definition_uri: Optional[str] = rest_field(name="entitiesDefinitionUri") + entities_definition_uri: Optional[str] = rest_field( + name="entitiesDefinitionUri", visibility=["read", "create", "update", "delete", "query"] + ) """Path to a JSON or CSV file containing all the target text to match against. This entity definition is read at the beginning of an indexer run. Any updates to this file during an indexer run will not take effect until subsequent runs. This config must be accessible over HTTPS.""" - inline_entities_definition: Optional[List["_models.CustomEntity"]] = rest_field(name="inlineEntitiesDefinition") + inline_entities_definition: Optional[List["_models.CustomEntity"]] = rest_field( + name="inlineEntitiesDefinition", visibility=["read", "create", "update", "delete", "query"] + ) """The inline CustomEntity definition.""" - global_default_case_sensitive: Optional[bool] = rest_field(name="globalDefaultCaseSensitive") + global_default_case_sensitive: Optional[bool] = rest_field( + name="globalDefaultCaseSensitive", visibility=["read", "create", "update", "delete", "query"] + ) """A global flag for CaseSensitive. If CaseSensitive is not set in CustomEntity, this value will be the default value.""" - global_default_accent_sensitive: Optional[bool] = rest_field(name="globalDefaultAccentSensitive") + global_default_accent_sensitive: Optional[bool] = rest_field( + name="globalDefaultAccentSensitive", visibility=["read", "create", "update", "delete", "query"] + ) """A global flag for AccentSensitive. If AccentSensitive is not set in CustomEntity, this value will be the default value.""" - global_default_fuzzy_edit_distance: Optional[int] = rest_field(name="globalDefaultFuzzyEditDistance") + global_default_fuzzy_edit_distance: Optional[int] = rest_field( + name="globalDefaultFuzzyEditDistance", visibility=["read", "create", "update", "delete", "query"] + ) """A global flag for FuzzyEditDistance. If FuzzyEditDistance is not set in CustomEntity, this value will be the default value.""" - odata_type: Literal["#Microsoft.Skills.Text.CustomEntityLookupSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Skills.Text.CustomEntityLookupSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Text.CustomEntityLookupSkill\".""" @@ -2373,9 +2555,9 @@ class LexicalNormalizer(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - odata_type: str = rest_discriminator(name="@odata.type") + odata_type: str = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) """The discriminator for derived types. Required. Default value is None.""" - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the char filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. Required.""" @@ -2424,15 +2606,19 @@ class CustomNormalizer(LexicalNormalizer, discriminator="#Microsoft.Azure.Search :vartype odata_type: str """ - token_filters: Optional[List[Union[str, "_models.TokenFilterName"]]] = rest_field(name="tokenFilters") + token_filters: Optional[List[Union[str, "_models.TokenFilterName"]]] = rest_field( + name="tokenFilters", visibility=["read", "create", "update", "delete", "query"] + ) """A list of token filters used to filter out or modify the input token. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed.""" - char_filters: Optional[List[Union[str, "_models.CharFilterName"]]] = rest_field(name="charFilters") + char_filters: Optional[List[Union[str, "_models.CharFilterName"]]] = rest_field( + name="charFilters", visibility=["read", "create", "update", "delete", "query"] + ) """A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed.""" - odata_type: Literal["#Microsoft.Azure.Search.CustomNormalizer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.CustomNormalizer"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of normalizer. Required. Default value is \"#Microsoft.Azure.Search.CustomNormalizer\".""" @@ -2468,7 +2654,7 @@ class DataChangeDetectionPolicy(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - odata_type: str = rest_discriminator(name="@odata.type") + odata_type: str = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) """The discriminator for derived types. Required. Default value is None.""" @overload @@ -2501,7 +2687,7 @@ class DataDeletionDetectionPolicy(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - odata_type: str = rest_discriminator(name="@odata.type") + odata_type: str = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) """The discriminator for derived types. Required. Default value is None.""" @overload @@ -2532,7 +2718,9 @@ class DataSourceCredentials(_model_base.Model): :vartype connection_string: str """ - connection_string: Optional[str] = rest_field(name="connectionString") + connection_string: Optional[str] = rest_field( + name="connectionString", visibility=["read", "create", "update", "delete", "query"] + ) """The connection string for the datasource. Set to ```` (with brackets) if you don't want the connection string updated. Set to ```` if you want to remove the connection string value from the datasource.""" @@ -2583,7 +2771,7 @@ class DefaultCognitiveServicesAccount( :vartype odata_type: str """ - odata_type: Literal["#Microsoft.Azure.Search.DefaultCognitiveServices"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.DefaultCognitiveServices"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of Azure AI service resource attached to a skillset. Required. Default value is \"#Microsoft.Azure.Search.DefaultCognitiveServices\".""" @@ -2639,21 +2827,29 @@ class DictionaryDecompounderTokenFilter( :vartype odata_type: str """ - word_list: List[str] = rest_field(name="wordList") + word_list: List[str] = rest_field(name="wordList", visibility=["read", "create", "update", "delete", "query"]) """The list of words to match against. Required.""" - min_word_size: Optional[int] = rest_field(name="minWordSize") + min_word_size: Optional[int] = rest_field( + name="minWordSize", visibility=["read", "create", "update", "delete", "query"] + ) """The minimum word size. Only words longer than this get processed. Default is 5. Maximum is 300.""" - min_subword_size: Optional[int] = rest_field(name="minSubwordSize") + min_subword_size: Optional[int] = rest_field( + name="minSubwordSize", visibility=["read", "create", "update", "delete", "query"] + ) """The minimum subword size. Only subwords longer than this are outputted. Default is 2. Maximum is 300.""" - max_subword_size: Optional[int] = rest_field(name="maxSubwordSize") + max_subword_size: Optional[int] = rest_field( + name="maxSubwordSize", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum subword size. Only subwords shorter than this are outputted. Default is 15. Maximum is 300.""" - only_longest_match: Optional[bool] = rest_field(name="onlyLongestMatch") + only_longest_match: Optional[bool] = rest_field( + name="onlyLongestMatch", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to add only the longest matching subword to the output. Default is false.""" - odata_type: Literal["#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore + odata_type: Literal["#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter\".""" @@ -2701,15 +2897,17 @@ class ScoringFunction(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - field_name: str = rest_field(name="fieldName") + field_name: str = rest_field(name="fieldName", visibility=["read", "create", "update", "delete", "query"]) """The name of the field used as input to the scoring function. Required.""" - boost: float = rest_field() + boost: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A multiplier for the raw score. Must be a positive number not equal to 1.0. Required.""" - interpolation: Optional[Union[str, "_models.ScoringFunctionInterpolation"]] = rest_field() + interpolation: Optional[Union[str, "_models.ScoringFunctionInterpolation"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating how boosting will be interpolated across document scores; defaults to \"Linear\". Known values are: \"linear\", \"constant\", \"quadratic\", and \"logarithmic\".""" - type: str = rest_discriminator(name="type") + type: str = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) """Type of ScoringFunction. Required. Default value is None.""" @overload @@ -2755,9 +2953,11 @@ class DistanceScoringFunction(ScoringFunction, discriminator="distance"): :vartype type: str """ - parameters: "_models.DistanceScoringParameters" = rest_field(name="distance") + parameters: "_models.DistanceScoringParameters" = rest_field( + name="distance", visibility=["read", "create", "update", "delete", "query"] + ) """Parameter values for the distance scoring function. Required.""" - type: Literal["distance"] = rest_discriminator(name="type") # type: ignore + type: Literal["distance"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case. Required. Default value is \"distance\".""" @@ -2797,10 +2997,14 @@ class DistanceScoringParameters(_model_base.Model): :vartype boosting_distance: float """ - reference_point_parameter: str = rest_field(name="referencePointParameter") + reference_point_parameter: str = rest_field( + name="referencePointParameter", visibility=["read", "create", "update", "delete", "query"] + ) """The name of the parameter passed in search queries to specify the reference location. Required.""" - boosting_distance: float = rest_field(name="boostingDistance") + boosting_distance: float = rest_field( + name="boostingDistance", visibility=["read", "create", "update", "delete", "query"] + ) """The distance in kilometers from the reference location where the boosting range ends. Required.""" @@ -2875,14 +3079,18 @@ class DocumentExtractionSkill(SearchIndexerSkill, discriminator="#Microsoft.Skil :vartype odata_type: str """ - parsing_mode: Optional[str] = rest_field(name="parsingMode") + parsing_mode: Optional[str] = rest_field( + name="parsingMode", visibility=["read", "create", "update", "delete", "query"] + ) """The parsingMode for the skill. Will be set to 'default' if not defined.""" - data_to_extract: Optional[str] = rest_field(name="dataToExtract") - """The type of data to be extracted for the skill. Will be set to - 'contentAndMetadata' if not defined.""" - configuration: Optional[Dict[str, Any]] = rest_field() + data_to_extract: Optional[str] = rest_field( + name="dataToExtract", visibility=["read", "create", "update", "delete", "query"] + ) + """The type of data to be extracted for the skill. Will be set to 'contentAndMetadata' if not + defined.""" + configuration: Optional[Dict[str, Any]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A dictionary of configurations for the skill.""" - odata_type: Literal["#Microsoft.Skills.Util.DocumentExtractionSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Skills.Util.DocumentExtractionSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Util.DocumentExtractionSkill\".""" @@ -2950,16 +3158,16 @@ class DocumentIntelligenceLayoutSkill( """ output_mode: Optional[Union[str, "_models.DocumentIntelligenceLayoutSkillOutputMode"]] = rest_field( - name="outputMode" + name="outputMode", visibility=["read", "create", "update", "delete", "query"] ) - """Controls the cardinality of the output produced by the skill. Default is - 'oneToMany'. \"oneToMany\"""" + """Controls the cardinality of the output produced by the skill. Default is 'oneToMany'. + \"oneToMany\"""" markdown_header_depth: Optional[Union[str, "_models.DocumentIntelligenceLayoutSkillMarkdownHeaderDepth"]] = ( - rest_field(name="markdownHeaderDepth") + rest_field(name="markdownHeaderDepth", visibility=["read", "create", "update", "delete", "query"]) ) """The depth of headers in the markdown output. Default is h6. Known values are: \"h1\", \"h2\", \"h3\", \"h4\", \"h5\", and \"h6\".""" - odata_type: Literal["#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill"] = rest_discriminator(name="@odata.type") # type: ignore + odata_type: Literal["#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill\".""" @@ -2998,9 +3206,13 @@ class DocumentKeysOrIds(_model_base.Model): :vartype datasource_document_ids: list[str] """ - document_keys: Optional[List[str]] = rest_field(name="documentKeys") + document_keys: Optional[List[str]] = rest_field( + name="documentKeys", visibility=["read", "create", "update", "delete", "query"] + ) """document keys to be reset.""" - datasource_document_ids: Optional[List[str]] = rest_field(name="datasourceDocumentIds") + datasource_document_ids: Optional[List[str]] = rest_field( + name="datasourceDocumentIds", visibility=["read", "create", "update", "delete", "query"] + ) """datasource document identifiers to be reset.""" @overload @@ -3022,60 +3234,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class EdgeNGramTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.EdgeNGramTokenFilter"): - """Generates n-grams of the given size(s) starting from the front or the back of - an input token. This token filter is implemented using Apache Lucene. - - - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and - is limited to 128 characters. Required. - :vartype name: str - :ivar min_gram: The minimum n-gram length. Default is 1. Must be less than the value of - maxGram. - :vartype min_gram: int - :ivar max_gram: The maximum n-gram length. Default is 2. - :vartype max_gram: int - :ivar side: Specifies which side of the input the n-gram should be generated from. Default - is "front". Known values are: "front" and "back". - :vartype side: str or ~azure.search.documents.models.EdgeNGramTokenFilterSide - :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value - is "#Microsoft.Azure.Search.EdgeNGramTokenFilter". - :vartype odata_type: str - """ - - min_gram: Optional[int] = rest_field(name="minGram") - """The minimum n-gram length. Default is 1. Must be less than the value of maxGram.""" - max_gram: Optional[int] = rest_field(name="maxGram") - """The maximum n-gram length. Default is 2.""" - side: Optional[Union[str, "_models.EdgeNGramTokenFilterSide"]] = rest_field() - """Specifies which side of the input the n-gram should be generated from. Default - is \"front\". Known values are: \"front\" and \"back\".""" - odata_type: Literal["#Microsoft.Azure.Search.EdgeNGramTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long - """A URI fragment specifying the type of token filter. Required. Default value is - \"#Microsoft.Azure.Search.EdgeNGramTokenFilter\".""" - - @overload - def __init__( - self, - *, - name: str, - min_gram: Optional[int] = None, - max_gram: Optional[int] = None, - side: Optional[Union[str, "_models.EdgeNGramTokenFilterSide"]] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, odata_type="#Microsoft.Azure.Search.EdgeNGramTokenFilter", **kwargs) - - class EdgeNGramTokenFilterV2(TokenFilter, discriminator="#Microsoft.Azure.Search.EdgeNGramTokenFilterV2"): """Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. @@ -3098,15 +3256,17 @@ class EdgeNGramTokenFilterV2(TokenFilter, discriminator="#Microsoft.Azure.Search :vartype odata_type: str """ - min_gram: Optional[int] = rest_field(name="minGram") + min_gram: Optional[int] = rest_field(name="minGram", visibility=["read", "create", "update", "delete", "query"]) """The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram.""" - max_gram: Optional[int] = rest_field(name="maxGram") + max_gram: Optional[int] = rest_field(name="maxGram", visibility=["read", "create", "update", "delete", "query"]) """The maximum n-gram length. Default is 2. Maximum is 300.""" - side: Optional[Union[str, "_models.EdgeNGramTokenFilterSide"]] = rest_field() + side: Optional[Union[str, "_models.EdgeNGramTokenFilterSide"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Specifies which side of the input the n-gram should be generated from. Default is \"front\". Known values are: \"front\" and \"back\".""" - odata_type: Literal["#Microsoft.Azure.Search.EdgeNGramTokenFilterV2"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.EdgeNGramTokenFilterV2"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.EdgeNGramTokenFilterV2\".""" @@ -3152,14 +3312,16 @@ class EdgeNGramTokenizer(LexicalTokenizer, discriminator="#Microsoft.Azure.Searc :vartype odata_type: str """ - min_gram: Optional[int] = rest_field(name="minGram") + min_gram: Optional[int] = rest_field(name="minGram", visibility=["read", "create", "update", "delete", "query"]) """The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram.""" - max_gram: Optional[int] = rest_field(name="maxGram") + max_gram: Optional[int] = rest_field(name="maxGram", visibility=["read", "create", "update", "delete", "query"]) """The maximum n-gram length. Default is 2. Maximum is 300.""" - token_chars: Optional[List[Union[str, "_models.TokenCharacterKind"]]] = rest_field(name="tokenChars") + token_chars: Optional[List[Union[str, "_models.TokenCharacterKind"]]] = rest_field( + name="tokenChars", visibility=["read", "create", "update", "delete", "query"] + ) """Character classes to keep in the tokens.""" - odata_type: Literal["#Microsoft.Azure.Search.EdgeNGramTokenizer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.EdgeNGramTokenizer"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of tokenizer. Required. Default value is \"#Microsoft.Azure.Search.EdgeNGramTokenizer\".""" @@ -3200,9 +3362,9 @@ class ElisionTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.Eli :vartype odata_type: str """ - articles: Optional[List[str]] = rest_field() + articles: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The set of articles to remove.""" - odata_type: Literal["#Microsoft.Azure.Search.ElisionTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.ElisionTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.ElisionTokenFilter\".""" @@ -3264,17 +3426,23 @@ class EntityLinkingSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Te :vartype odata_type: str """ - default_language_code: Optional[str] = rest_field(name="defaultLanguageCode") + default_language_code: Optional[str] = rest_field( + name="defaultLanguageCode", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating which language code to use. Default is ``en``.""" - minimum_precision: Optional[float] = rest_field(name="minimumPrecision") + minimum_precision: Optional[float] = rest_field( + name="minimumPrecision", visibility=["read", "create", "update", "delete", "query"] + ) """A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included.""" - model_version: Optional[str] = rest_field(name="modelVersion") + model_version: Optional[str] = rest_field( + name="modelVersion", visibility=["read", "create", "update", "delete", "query"] + ) """The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary.""" - odata_type: Literal["#Microsoft.Skills.Text.V3.EntityLinkingSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Skills.Text.V3.EntityLinkingSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Text.V3.EntityLinkingSkill\".""" @@ -3348,25 +3516,31 @@ class EntityRecognitionSkill(SearchIndexerSkill, discriminator="#Microsoft.Skill :vartype odata_type: str """ - categories: Optional[List[Union[str, "_models.EntityCategory"]]] = rest_field() + categories: Optional[List[Union[str, "_models.EntityCategory"]]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A list of entity categories that should be extracted.""" default_language_code: Optional[Union[str, "_models.EntityRecognitionSkillLanguage"]] = rest_field( - name="defaultLanguageCode" + name="defaultLanguageCode", visibility=["read", "create", "update", "delete", "query"] ) """A value indicating which language code to use. Default is ``en``. Known values are: \"ar\", \"cs\", \"zh-Hans\", \"zh-Hant\", \"da\", \"nl\", \"en\", \"fi\", \"fr\", \"de\", \"el\", \"hu\", \"it\", \"ja\", \"ko\", \"no\", \"pl\", \"pt-PT\", \"pt-BR\", \"ru\", \"es\", \"sv\", and \"tr\".""" - include_typeless_entities: Optional[bool] = rest_field(name="includeTypelessEntities") + include_typeless_entities: Optional[bool] = rest_field( + name="includeTypelessEntities", visibility=["read", "create", "update", "delete", "query"] + ) """Determines whether or not to include entities which are well known but don't conform to a pre-defined type. If this configuration is not set (default), set to null or set to false, entities which don't conform to one of the pre-defined types will not be surfaced.""" - minimum_precision: Optional[float] = rest_field(name="minimumPrecision") + minimum_precision: Optional[float] = rest_field( + name="minimumPrecision", visibility=["read", "create", "update", "delete", "query"] + ) """A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included.""" - odata_type: Literal["#Microsoft.Skills.Text.EntityRecognitionSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Skills.Text.EntityRecognitionSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Text.EntityRecognitionSkill\".""" @@ -3437,19 +3611,25 @@ class EntityRecognitionSkillV3(SearchIndexerSkill, discriminator="#Microsoft.Ski :vartype odata_type: str """ - categories: Optional[List[str]] = rest_field() + categories: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A list of entity categories that should be extracted.""" - default_language_code: Optional[str] = rest_field(name="defaultLanguageCode") + default_language_code: Optional[str] = rest_field( + name="defaultLanguageCode", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating which language code to use. Default is ``en``.""" - minimum_precision: Optional[float] = rest_field(name="minimumPrecision") + minimum_precision: Optional[float] = rest_field( + name="minimumPrecision", visibility=["read", "create", "update", "delete", "query"] + ) """A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included.""" - model_version: Optional[str] = rest_field(name="modelVersion") + model_version: Optional[str] = rest_field( + name="modelVersion", visibility=["read", "create", "update", "delete", "query"] + ) """The version of the model to use when calling the Text Analytics API. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary.""" - odata_type: Literal["#Microsoft.Skills.Text.V3.EntityRecognitionSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Skills.Text.V3.EntityRecognitionSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Text.V3.EntityRecognitionSkill\".""" @@ -3482,25 +3662,39 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class ErrorAdditionalInfo(_model_base.Model): """The resource management error additional info. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar type: The additional info type. :vartype type: str :ivar info: The additional info. :vartype info: dict[str, str] """ - type: Optional[str] = rest_field(visibility=["read"]) + type: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The additional info type.""" - info: Optional[Dict[str, str]] = rest_field(visibility=["read"]) + info: Optional[Dict[str, str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The additional info.""" + @overload + def __init__( + self, + *, + type: Optional[str] = None, + info: Optional[Dict[str, str]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class ErrorDetail(_model_base.Model): """The error detail. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar code: The error code. :vartype code: str :ivar message: The error message. @@ -3513,19 +3707,42 @@ class ErrorDetail(_model_base.Model): :vartype additional_info: list[~azure.search.documents.models.ErrorAdditionalInfo] """ - code: Optional[str] = rest_field(visibility=["read"]) + code: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The error code.""" - message: Optional[str] = rest_field(visibility=["read"]) + message: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The error message.""" - target: Optional[str] = rest_field(visibility=["read"]) + target: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The error target.""" - details: Optional[List["_models.ErrorDetail"]] = rest_field(visibility=["read"]) + details: Optional[List["_models.ErrorDetail"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The error details.""" additional_info: Optional[List["_models.ErrorAdditionalInfo"]] = rest_field( - name="additionalInfo", visibility=["read"] + name="additionalInfo", visibility=["read", "create", "update", "delete", "query"] ) """The error additional info.""" + @overload + def __init__( + self, + *, + code: Optional[str] = None, + message: Optional[str] = None, + target: Optional[str] = None, + details: Optional[List["_models.ErrorDetail"]] = None, + additional_info: Optional[List["_models.ErrorAdditionalInfo"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class ErrorResponse(_model_base.Model): """Common error response for all Azure Resource Manager APIs to return error @@ -3536,7 +3753,7 @@ class ErrorResponse(_model_base.Model): :vartype error: ~azure.search.documents.models.ErrorDetail """ - error: Optional["_models.ErrorDetail"] = rest_field() + error: Optional["_models.ErrorDetail"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The error object.""" @overload @@ -3573,9 +3790,9 @@ class VectorSearchAlgorithmConfiguration(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name to associate with this particular configuration. Required.""" - kind: str = rest_discriminator(name="kind") + kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) """Type of VectorSearchAlgorithmConfiguration. Required. Known values are: \"hnsw\" and \"exhaustiveKnn\".""" @@ -3613,9 +3830,11 @@ class ExhaustiveKnnAlgorithmConfiguration(VectorSearchAlgorithmConfiguration, di :vartype kind: str or ~azure.search.documents.models.EXHAUSTIVE_KNN """ - parameters: Optional["_models.ExhaustiveKnnParameters"] = rest_field(name="exhaustiveKnnParameters") + parameters: Optional["_models.ExhaustiveKnnParameters"] = rest_field( + name="exhaustiveKnnParameters", visibility=["read", "create", "update", "delete", "query"] + ) """Contains the parameters specific to exhaustive KNN algorithm.""" - kind: Literal[VectorSearchAlgorithmKind.EXHAUSTIVE_KNN] = rest_discriminator(name="kind") # type: ignore + kind: Literal[VectorSearchAlgorithmKind.EXHAUSTIVE_KNN] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The name of the kind of algorithm being configured for use with vector search. Required. Exhaustive KNN algorithm which will perform brute-force search.""" @@ -3646,7 +3865,9 @@ class ExhaustiveKnnParameters(_model_base.Model): :vartype metric: str or ~azure.search.documents.models.VectorSearchAlgorithmMetric """ - metric: Optional[Union[str, "_models.VectorSearchAlgorithmMetric"]] = rest_field() + metric: Optional[Union[str, "_models.VectorSearchAlgorithmMetric"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The similarity metric to use for vector comparisons. Known values are: \"cosine\", \"euclidean\", \"dotProduct\", and \"hamming\".""" @@ -3684,7 +3905,7 @@ class FacetResult(_model_base.Model): :vartype facets: dict[str, list[~azure.search.documents.models.FacetResult]] """ - count: Optional[int] = rest_field(visibility=["read"]) + count: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The approximate count of documents falling within the bucket described by this facet.""" facets: Optional[Dict[str, List["_models.FacetResult"]]] = rest_field(name="@search.facets", visibility=["read"]) @@ -3692,6 +3913,23 @@ class FacetResult(_model_base.Model): collection of buckets for each faceted field; null if the query did not contain any nested facets.""" + @overload + def __init__( + self, + *, + count: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class FieldMapping(_model_base.Model): """Defines a mapping between a field in a data source and a target field in an @@ -3708,12 +3946,18 @@ class FieldMapping(_model_base.Model): :vartype mapping_function: ~azure.search.documents.models.FieldMappingFunction """ - source_field_name: str = rest_field(name="sourceFieldName") + source_field_name: str = rest_field( + name="sourceFieldName", visibility=["read", "create", "update", "delete", "query"] + ) """The name of the field in the data source. Required.""" - target_field_name: Optional[str] = rest_field(name="targetFieldName") + target_field_name: Optional[str] = rest_field( + name="targetFieldName", visibility=["read", "create", "update", "delete", "query"] + ) """The name of the target field in the index. Same as the source field name by default.""" - mapping_function: Optional["_models.FieldMappingFunction"] = rest_field(name="mappingFunction") + mapping_function: Optional["_models.FieldMappingFunction"] = rest_field( + name="mappingFunction", visibility=["read", "create", "update", "delete", "query"] + ) """A function to apply to each source field value before indexing.""" @overload @@ -3749,9 +3993,9 @@ class FieldMappingFunction(_model_base.Model): :vartype parameters: dict[str, any] """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the field mapping function. Required.""" - parameters: Optional[Dict[str, Any]] = rest_field() + parameters: Optional[Dict[str, Any]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A dictionary of parameter name/value pairs to pass to the function. Each value must be of a primitive type.""" @@ -3795,9 +4039,11 @@ class FreshnessScoringFunction(ScoringFunction, discriminator="freshness"): :vartype type: str """ - parameters: "_models.FreshnessScoringParameters" = rest_field(name="freshness") + parameters: "_models.FreshnessScoringParameters" = rest_field( + name="freshness", visibility=["read", "create", "update", "delete", "query"] + ) """Parameter values for the freshness scoring function. Required.""" - type: Literal["freshness"] = rest_discriminator(name="type") # type: ignore + type: Literal["freshness"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case. Required. Default value is \"freshness\".""" @@ -3832,7 +4078,9 @@ class FreshnessScoringParameters(_model_base.Model): :vartype boosting_duration: ~datetime.timedelta """ - boosting_duration: datetime.timedelta = rest_field(name="boostingDuration") + boosting_duration: datetime.timedelta = rest_field( + name="boostingDuration", visibility=["read", "create", "update", "delete", "query"] + ) """The expiration period after which boosting will stop for a particular document. Required.""" @overload @@ -3857,8 +4105,6 @@ class GetIndexStatisticsResult(_model_base.Model): """Statistics for a given index. Statistics are collected periodically and are not guaranteed to always be up-to-date. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar document_count: The number of documents in the index. Required. :vartype document_count: int @@ -3869,13 +4115,34 @@ class GetIndexStatisticsResult(_model_base.Model): :vartype vector_index_size: int """ - document_count: int = rest_field(name="documentCount", visibility=["read"]) + document_count: int = rest_field(name="documentCount", visibility=["read", "create", "update", "delete", "query"]) """The number of documents in the index. Required.""" - storage_size: int = rest_field(name="storageSize", visibility=["read"]) + storage_size: int = rest_field(name="storageSize", visibility=["read", "create", "update", "delete", "query"]) """The amount of storage in bytes consumed by the index. Required.""" - vector_index_size: int = rest_field(name="vectorIndexSize", visibility=["read"]) + vector_index_size: int = rest_field( + name="vectorIndexSize", visibility=["read", "create", "update", "delete", "query"] + ) """The amount of memory in bytes consumed by vectors in the index. Required.""" + @overload + def __init__( + self, + *, + document_count: int, + storage_size: int, + vector_index_size: int, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class HighWaterMarkChangeDetectionPolicy( DataChangeDetectionPolicy, discriminator="#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy" @@ -3891,9 +4158,11 @@ class HighWaterMarkChangeDetectionPolicy( :vartype odata_type: str """ - high_water_mark_column_name: str = rest_field(name="highWaterMarkColumnName") + high_water_mark_column_name: str = rest_field( + name="highWaterMarkColumnName", visibility=["read", "create", "update", "delete", "query"] + ) """The name of the high water mark column. Required.""" - odata_type: Literal["#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy"] = rest_discriminator(name="@odata.type") # type: ignore + odata_type: Literal["#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of data change detection policy. Required. Default value is \"#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy\".""" @@ -3931,9 +4200,11 @@ class HnswAlgorithmConfiguration(VectorSearchAlgorithmConfiguration, discriminat :vartype kind: str or ~azure.search.documents.models.HNSW """ - parameters: Optional["_models.HnswParameters"] = rest_field(name="hnswParameters") + parameters: Optional["_models.HnswParameters"] = rest_field( + name="hnswParameters", visibility=["read", "create", "update", "delete", "query"] + ) """Contains the parameters specific to HNSW algorithm.""" - kind: Literal[VectorSearchAlgorithmKind.HNSW] = rest_discriminator(name="kind") # type: ignore + kind: Literal[VectorSearchAlgorithmKind.HNSW] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The name of the kind of algorithm being configured for use with vector search. Required. HNSW (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm.""" @@ -3981,22 +4252,26 @@ class HnswParameters(_model_base.Model): :vartype metric: str or ~azure.search.documents.models.VectorSearchAlgorithmMetric """ - m: Optional[int] = rest_field() + m: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The number of bi-directional links created for every new element during construction. Increasing this parameter value may improve recall and reduce retrieval times for datasets with high intrinsic dimensionality at the expense of increased memory consumption and longer indexing time.""" - ef_construction: Optional[int] = rest_field(name="efConstruction") + ef_construction: Optional[int] = rest_field( + name="efConstruction", visibility=["read", "create", "update", "delete", "query"] + ) """The size of the dynamic list containing the nearest neighbors, which is used during index time. Increasing this parameter may improve index quality, at the expense of increased indexing time. At a certain point, increasing this parameter leads to diminishing returns.""" - ef_search: Optional[int] = rest_field(name="efSearch") + ef_search: Optional[int] = rest_field(name="efSearch", visibility=["read", "create", "update", "delete", "query"]) """The size of the dynamic list containing the nearest neighbors, which is used during search time. Increasing this parameter may improve search results, at the expense of slower search. At a certain point, increasing this parameter leads to diminishing returns.""" - metric: Optional[Union[str, "_models.VectorSearchAlgorithmMetric"]] = rest_field() + metric: Optional[Union[str, "_models.VectorSearchAlgorithmMetric"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The similarity metric to use for vector comparisons. Known values are: \"cosine\", \"euclidean\", \"dotProduct\", and \"hamming\".""" @@ -4040,7 +4315,9 @@ class HybridSearch(_model_base.Model): :vartype count_and_facet_mode: str or ~azure.search.documents.models.HybridCountAndFacetMode """ - max_text_recall_size: Optional[int] = rest_field(name="maxTextRecallSize") + max_text_recall_size: Optional[int] = rest_field( + name="maxTextRecallSize", visibility=["read", "create", "update", "delete", "query"] + ) """Determines the maximum number of documents to be retrieved by the text query portion of a hybrid search request. Those documents will be combined with the documents matching the vector queries to produce a single final list of @@ -4048,7 +4325,9 @@ class HybridSearch(_model_base.Model): paging through more documents (using the top and skip parameters), at the cost of higher resource utilization and higher latency. The value needs to be between 1 and 10,000. Default is 1000.""" - count_and_facet_mode: Optional[Union[str, "_models.HybridCountAndFacetMode"]] = rest_field(name="countAndFacetMode") + count_and_facet_mode: Optional[Union[str, "_models.HybridCountAndFacetMode"]] = rest_field( + name="countAndFacetMode", visibility=["read", "create", "update", "delete", "query"] + ) """Determines whether the count and facets should includes all documents that matched the search query, or only the documents that are retrieved within the 'maxTextRecallSize' window. Known values are: \"countRetrievableResults\" and @@ -4113,7 +4392,7 @@ class ImageAnalysisSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Vi """ default_language_code: Optional[Union[str, "_models.ImageAnalysisSkillLanguage"]] = rest_field( - name="defaultLanguageCode" + name="defaultLanguageCode", visibility=["read", "create", "update", "delete", "query"] ) """A value indicating which language code to use. Default is ``en``. Known values are: \"ar\", \"az\", \"bg\", \"bs\", \"ca\", \"cs\", \"cy\", \"da\", \"de\", \"el\", \"en\", \"es\", \"et\", @@ -4121,11 +4400,15 @@ class ImageAnalysisSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Vi \"kk\", \"ko\", \"lt\", \"lv\", \"mk\", \"ms\", \"nb\", \"nl\", \"pl\", \"prs\", \"pt-BR\", \"pt\", \"pt-PT\", \"ro\", \"ru\", \"sk\", \"sl\", \"sr-Cyrl\", \"sr-Latn\", \"sv\", \"th\", \"tr\", \"uk\", \"vi\", \"zh\", \"zh-Hans\", and \"zh-Hant\".""" - visual_features: Optional[List[Union[str, "_models.VisualFeature"]]] = rest_field(name="visualFeatures") + visual_features: Optional[List[Union[str, "_models.VisualFeature"]]] = rest_field( + name="visualFeatures", visibility=["read", "create", "update", "delete", "query"] + ) """A list of visual features.""" - details: Optional[List[Union[str, "_models.ImageDetail"]]] = rest_field() + details: Optional[List[Union[str, "_models.ImageDetail"]]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A string indicating which domain-specific details to return.""" - odata_type: Literal["#Microsoft.Skills.Vision.ImageAnalysisSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Skills.Vision.ImageAnalysisSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Vision.ImageAnalysisSkill\".""" @@ -4162,7 +4445,9 @@ class IndexAction(_model_base.Model): :vartype action_type: str or ~azure.search.documents.models.IndexActionType """ - action_type: Optional[Union[str, "_models.IndexActionType"]] = rest_field(name="@search.action") + action_type: Optional[Union[str, "_models.IndexActionType"]] = rest_field( + name="@search.action", visibility=["read", "create", "update", "delete", "query"] + ) """The operation to perform on a document in an indexing batch. Known values are: \"upload\", \"merge\", \"mergeOrUpload\", and \"delete\".""" @@ -4193,7 +4478,9 @@ class IndexBatch(_model_base.Model): :vartype actions: list[~azure.search.documents.models.IndexAction] """ - actions: List["_models.IndexAction"] = rest_field(name="value") + actions: List["_models.IndexAction"] = rest_field( + name="value", visibility=["read", "create", "update", "delete", "query"] + ) """The actions in the batch. Required.""" @overload @@ -4218,17 +4505,34 @@ class IndexDocumentsResult(_model_base.Model): """Response containing the status of operations for all documents in the indexing request. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar results: The list of status information for each document in the indexing request. Required. :vartype results: list[~azure.search.documents.models.IndexingResult] """ - results: List["_models.IndexingResult"] = rest_field(name="value", visibility=["read"]) + results: List["_models.IndexingResult"] = rest_field( + name="value", visibility=["read", "create", "update", "delete", "query"] + ) """The list of status information for each document in the indexing request. Required.""" + @overload + def __init__( + self, + *, + results: List["_models.IndexingResult"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class IndexerCurrentState(_model_base.Model): """Represents all of the state that defines and dictates the indexer's current @@ -4339,7 +4643,9 @@ class IndexerExecutionResult(_model_base.Model): :vartype final_tracking_state: str """ - status: Union[str, "_models.IndexerExecutionStatus"] = rest_field(visibility=["read"]) + status: Union[str, "_models.IndexerExecutionStatus"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The outcome of this indexer execution. Required. Known values are: \"transientFailure\", \"success\", \"inProgress\", and \"reset\".""" status_detail: Optional[Union[str, "_models.IndexerExecutionStatusDetail"]] = rest_field( @@ -4348,27 +4654,65 @@ class IndexerExecutionResult(_model_base.Model): """The outcome of this indexer execution. \"resetDocs\"""" current_state: Optional["_models.IndexerCurrentState"] = rest_field(name="currentState", visibility=["read"]) """All of the state that defines and dictates the indexer's current execution.""" - error_message: Optional[str] = rest_field(name="errorMessage", visibility=["read"]) + error_message: Optional[str] = rest_field( + name="errorMessage", visibility=["read", "create", "update", "delete", "query"] + ) """The error message indicating the top-level error, if any.""" - start_time: Optional[datetime.datetime] = rest_field(name="startTime", visibility=["read"], format="rfc3339") + start_time: Optional[datetime.datetime] = rest_field( + name="startTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) """The start time of this indexer execution.""" - end_time: Optional[datetime.datetime] = rest_field(name="endTime", visibility=["read"], format="rfc3339") + end_time: Optional[datetime.datetime] = rest_field( + name="endTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) """The end time of this indexer execution, if the execution has already completed.""" - errors: List["_models.SearchIndexerError"] = rest_field(visibility=["read"]) + errors: List["_models.SearchIndexerError"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The item-level indexing errors. Required.""" - warnings: List["_models.SearchIndexerWarning"] = rest_field(visibility=["read"]) + warnings: List["_models.SearchIndexerWarning"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The item-level indexing warnings. Required.""" - item_count: int = rest_field(name="itemsProcessed", visibility=["read"]) + item_count: int = rest_field(name="itemsProcessed", visibility=["read", "create", "update", "delete", "query"]) """The number of items that were processed during this indexer execution. This includes both successfully processed items and items where indexing was attempted but failed. Required.""" - failed_item_count: int = rest_field(name="itemsFailed", visibility=["read"]) + failed_item_count: int = rest_field(name="itemsFailed", visibility=["read", "create", "update", "delete", "query"]) """The number of items that failed to be indexed during this indexer execution. Required.""" - initial_tracking_state: Optional[str] = rest_field(name="initialTrackingState", visibility=["read"]) + initial_tracking_state: Optional[str] = rest_field( + name="initialTrackingState", visibility=["read", "create", "update", "delete", "query"] + ) """Change tracking state with which an indexer execution started.""" - final_tracking_state: Optional[str] = rest_field(name="finalTrackingState", visibility=["read"]) + final_tracking_state: Optional[str] = rest_field( + name="finalTrackingState", visibility=["read", "create", "update", "delete", "query"] + ) """Change tracking state with which an indexer execution finished.""" + @overload + def __init__( + self, + *, + status: Union[str, "_models.IndexerExecutionStatus"], + errors: List["_models.SearchIndexerError"], + warnings: List["_models.SearchIndexerWarning"], + item_count: int, + failed_item_count: int, + error_message: Optional[str] = None, + start_time: Optional[datetime.datetime] = None, + end_time: Optional[datetime.datetime] = None, + initial_tracking_state: Optional[str] = None, + final_tracking_state: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class IndexingParameters(_model_base.Model): """Represents parameters for indexer execution. @@ -4392,17 +4736,23 @@ class IndexingParameters(_model_base.Model): :vartype configuration: ~azure.search.documents.models.IndexingParametersConfiguration """ - batch_size: Optional[int] = rest_field(name="batchSize") + batch_size: Optional[int] = rest_field(name="batchSize", visibility=["read", "create", "update", "delete", "query"]) """The number of items that are read from the data source and indexed as a single batch in order to improve performance. The default depends on the data source type.""" - max_failed_items: Optional[int] = rest_field(name="maxFailedItems") + max_failed_items: Optional[int] = rest_field( + name="maxFailedItems", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum number of items that can fail indexing for indexer execution to still be considered successful. -1 means no limit. Default is 0.""" - max_failed_items_per_batch: Optional[int] = rest_field(name="maxFailedItemsPerBatch") + max_failed_items_per_batch: Optional[int] = rest_field( + name="maxFailedItemsPerBatch", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum number of items in a single batch that can fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0.""" - configuration: Optional["_models.IndexingParametersConfiguration"] = rest_field() + configuration: Optional["_models.IndexingParametersConfiguration"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type.""" @@ -4517,82 +4867,110 @@ class IndexingParametersConfiguration(_model_base.Model): :vartype query_timeout: str """ - parsing_mode: Optional[Union[str, "_models.BlobIndexerParsingMode"]] = rest_field(name="parsingMode") + parsing_mode: Optional[Union[str, "_models.BlobIndexerParsingMode"]] = rest_field( + name="parsingMode", visibility=["read", "create", "update", "delete", "query"] + ) """Represents the parsing mode for indexing from an Azure blob data source. Known values are: \"default\", \"text\", \"delimitedText\", \"json\", \"jsonArray\", \"jsonLines\", and \"markdown\".""" - excluded_file_name_extensions: Optional[str] = rest_field(name="excludedFileNameExtensions") + excluded_file_name_extensions: Optional[str] = rest_field( + name="excludedFileNameExtensions", visibility=["read", "create", "update", "delete", "query"] + ) """Comma-delimited list of filename extensions to ignore when processing from Azure blob storage. For example, you could exclude \".png, .mp4\" to skip over those files during indexing.""" - indexed_file_name_extensions: Optional[str] = rest_field(name="indexedFileNameExtensions") + indexed_file_name_extensions: Optional[str] = rest_field( + name="indexedFileNameExtensions", visibility=["read", "create", "update", "delete", "query"] + ) """Comma-delimited list of filename extensions to select when processing from Azure blob storage. For example, you could focus indexing on specific application files \".docx, .pptx, .msg\" to specifically include those file types.""" - fail_on_unsupported_content_type: Optional[bool] = rest_field(name="failOnUnsupportedContentType") + fail_on_unsupported_content_type: Optional[bool] = rest_field( + name="failOnUnsupportedContentType", visibility=["read", "create", "update", "delete", "query"] + ) """For Azure blobs, set to false if you want to continue indexing when an unsupported content type is encountered, and you don't know all the content types (file extensions) in advance.""" - fail_on_unprocessable_document: Optional[bool] = rest_field(name="failOnUnprocessableDocument") + fail_on_unprocessable_document: Optional[bool] = rest_field( + name="failOnUnprocessableDocument", visibility=["read", "create", "update", "delete", "query"] + ) """For Azure blobs, set to false if you want to continue indexing if a document fails indexing.""" index_storage_metadata_only_for_oversized_documents: Optional[bool] = rest_field( - name="indexStorageMetadataOnlyForOversizedDocuments" + name="indexStorageMetadataOnlyForOversizedDocuments", visibility=["read", "create", "update", "delete", "query"] ) """For Azure blobs, set this property to true to still index storage metadata for blob content that is too large to process. Oversized blobs are treated as errors by default. For limits on blob size, see https://learn.microsoft.com/azure/search/search-limits-quotas-capacity.""" - delimited_text_headers: Optional[str] = rest_field(name="delimitedTextHeaders") + delimited_text_headers: Optional[str] = rest_field( + name="delimitedTextHeaders", visibility=["read", "create", "update", "delete", "query"] + ) """For CSV blobs, specifies a comma-delimited list of column headers, useful for mapping source fields to destination fields in an index.""" - delimited_text_delimiter: Optional[str] = rest_field(name="delimitedTextDelimiter") + delimited_text_delimiter: Optional[str] = rest_field( + name="delimitedTextDelimiter", visibility=["read", "create", "update", "delete", "query"] + ) """For CSV blobs, specifies the end-of-line single-character delimiter for CSV files where each line starts a new document (for example, \"|\").""" - first_line_contains_headers: Optional[bool] = rest_field(name="firstLineContainsHeaders") + first_line_contains_headers: Optional[bool] = rest_field( + name="firstLineContainsHeaders", visibility=["read", "create", "update", "delete", "query"] + ) """For CSV blobs, indicates that the first (non-blank) line of each blob contains headers.""" markdown_parsing_submode: Optional[Union[str, "_models.MarkdownParsingSubmode"]] = rest_field( - name="markdownParsingSubmode" + name="markdownParsingSubmode", visibility=["read", "create", "update", "delete", "query"] ) """Specifies the submode that will determine whether a markdown file will be parsed into exactly one search document or multiple search documents. Default is ``oneToMany``. Known values are: \"oneToMany\" and \"oneToOne\".""" - markdown_header_depth: Optional[Union[str, "_models.MarkdownHeaderDepth"]] = rest_field(name="markdownHeaderDepth") + markdown_header_depth: Optional[Union[str, "_models.MarkdownHeaderDepth"]] = rest_field( + name="markdownHeaderDepth", visibility=["read", "create", "update", "delete", "query"] + ) """Specifies the max header depth that will be considered while grouping markdown content. Default is ``h6``. Known values are: \"h1\", \"h2\", \"h3\", \"h4\", \"h5\", and \"h6\".""" - document_root: Optional[str] = rest_field(name="documentRoot") + document_root: Optional[str] = rest_field( + name="documentRoot", visibility=["read", "create", "update", "delete", "query"] + ) """For JSON arrays, given a structured or semi-structured document, you can specify a path to the array using this property.""" - data_to_extract: Optional[Union[str, "_models.BlobIndexerDataToExtract"]] = rest_field(name="dataToExtract") + data_to_extract: Optional[Union[str, "_models.BlobIndexerDataToExtract"]] = rest_field( + name="dataToExtract", visibility=["read", "create", "update", "delete", "query"] + ) """Specifies the data to extract from Azure blob storage and tells the indexer which data to extract from image content when \"imageAction\" is set to a value other than \"none\". This applies to embedded image content in a .PDF or other application, or image files such as .jpg and .png, in Azure blobs. Known values are: \"storageMetadata\", \"allMetadata\", and \"contentAndMetadata\".""" - image_action: Optional[Union[str, "_models.BlobIndexerImageAction"]] = rest_field(name="imageAction") + image_action: Optional[Union[str, "_models.BlobIndexerImageAction"]] = rest_field( + name="imageAction", visibility=["read", "create", "update", "delete", "query"] + ) """Determines how to process embedded images and image files in Azure blob storage. Setting the \"imageAction\" configuration to any value other than \"none\" requires that a skillset also be attached to that indexer. Known values are: \"none\", \"generateNormalizedImages\", and \"generateNormalizedImagePerPage\".""" - allow_skillset_to_read_file_data: Optional[bool] = rest_field(name="allowSkillsetToReadFileData") + allow_skillset_to_read_file_data: Optional[bool] = rest_field( + name="allowSkillsetToReadFileData", visibility=["read", "create", "update", "delete", "query"] + ) """If true, will create a path //document//file_data that is an object representing the original file data downloaded from your blob data source. This allows you to pass the original file data to a custom skill for processing within the enrichment pipeline, or to the Document Extraction skill.""" pdf_text_rotation_algorithm: Optional[Union[str, "_models.BlobIndexerPDFTextRotationAlgorithm"]] = rest_field( - name="pdfTextRotationAlgorithm" + name="pdfTextRotationAlgorithm", visibility=["read", "create", "update", "delete", "query"] ) """Determines algorithm for text extraction from PDF files in Azure blob storage. Known values are: \"none\" and \"detectAngles\".""" execution_environment: Optional[Union[str, "_models.IndexerExecutionEnvironment"]] = rest_field( - name="executionEnvironment" + name="executionEnvironment", visibility=["read", "create", "update", "delete", "query"] ) """Specifies the environment in which the indexer should execute. Known values are: \"standard\" and \"private\".""" - query_timeout: Optional[str] = rest_field(name="queryTimeout") + query_timeout: Optional[str] = rest_field( + name="queryTimeout", visibility=["read", "create", "update", "delete", "query"] + ) """Increases the timeout beyond the 5-minute default for Azure SQL database data sources, specified in the format \"hh:mm:ss\".""" @@ -4634,8 +5012,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class IndexingResult(_model_base.Model): """Status of an indexing operation for a single document. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar key: The key of a document that was in the indexing request. Required. :vartype key: str @@ -4655,21 +5031,43 @@ class IndexingResult(_model_base.Model): :vartype status_code: int """ - key: str = rest_field(visibility=["read"]) + key: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The key of a document that was in the indexing request. Required.""" - error_message: Optional[str] = rest_field(name="errorMessage", visibility=["read"]) + error_message: Optional[str] = rest_field( + name="errorMessage", visibility=["read", "create", "update", "delete", "query"] + ) """The error message explaining why the indexing operation failed for the document identified by the key; null if indexing succeeded.""" - succeeded: bool = rest_field(name="status", visibility=["read"]) + succeeded: bool = rest_field(name="status", visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether the indexing operation succeeded for the document identified by the key. Required.""" - status_code: int = rest_field(name="statusCode", visibility=["read"]) + status_code: int = rest_field(name="statusCode", visibility=["read", "create", "update", "delete", "query"]) """The status code of the indexing operation. Possible values include: 200 for a successful update or delete, 201 for successful document creation, 400 for a malformed input document, 404 for document not found, 409 for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the service is too busy. Required.""" + @overload + def __init__( + self, + *, + key: str, + succeeded: bool, + status_code: int, + error_message: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class IndexingSchedule(_model_base.Model): """Represents a schedule for indexer execution. @@ -4681,9 +5079,11 @@ class IndexingSchedule(_model_base.Model): :vartype start_time: ~datetime.datetime """ - interval: datetime.timedelta = rest_field() + interval: datetime.timedelta = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The interval of time between indexer executions. Required.""" - start_time: Optional[datetime.datetime] = rest_field(name="startTime", format="rfc3339") + start_time: Optional[datetime.datetime] = rest_field( + name="startTime", visibility=["read", "create", "update", "delete", "query"], format="rfc3339" + ) """The time when an indexer should start running.""" @overload @@ -4705,6 +5105,33 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) +class IndexStatisticsSummary(_model_base.Model): + """Statistics for a given index. Statistics are collected periodically and are not guaranteed to + always be up-to-date. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar name: The name of the index. Required. + :vartype name: str + :ivar document_count: The number of documents in the index. Required. + :vartype document_count: int + :ivar storage_size: The amount of storage in bytes consumed by the index. Required. + :vartype storage_size: int + :ivar vector_index_size: The amount of memory in bytes consumed by vectors in the index. + :vartype vector_index_size: int + """ + + name: str = rest_field(visibility=["read"]) + """The name of the index. Required.""" + document_count: int = rest_field(name="documentCount", visibility=["read"]) + """The number of documents in the index. Required.""" + storage_size: int = rest_field(name="storageSize", visibility=["read"]) + """The amount of storage in bytes consumed by the index. Required.""" + vector_index_size: Optional[int] = rest_field(name="vectorIndexSize", visibility=["read"]) + """The amount of memory in bytes consumed by vectors in the index.""" + + class InputFieldMappingEntry(_model_base.Model): """Input field mapping for a skill. @@ -4719,13 +5146,17 @@ class InputFieldMappingEntry(_model_base.Model): :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the input. Required.""" - source: Optional[str] = rest_field() + source: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The source of the input.""" - source_context: Optional[str] = rest_field(name="sourceContext") + source_context: Optional[str] = rest_field( + name="sourceContext", visibility=["read", "create", "update", "delete", "query"] + ) """The source context used for selecting recursive inputs.""" - inputs: Optional[List["_models.InputFieldMappingEntry"]] = rest_field() + inputs: Optional[List["_models.InputFieldMappingEntry"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The recursive inputs used when creating a complex type.""" @overload @@ -4768,11 +5199,13 @@ class KeepTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.KeepTo :vartype odata_type: str """ - keep_words: List[str] = rest_field(name="keepWords") + keep_words: List[str] = rest_field(name="keepWords", visibility=["read", "create", "update", "delete", "query"]) """The list of words to keep. Required.""" - lower_case_keep_words: Optional[bool] = rest_field(name="keepWordsCase") + lower_case_keep_words: Optional[bool] = rest_field( + name="keepWordsCase", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to lower case all words first. Default is false.""" - odata_type: Literal["#Microsoft.Azure.Search.KeepTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.KeepTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.KeepTokenFilter\".""" @@ -4838,19 +5271,23 @@ class KeyPhraseExtractionSkill(SearchIndexerSkill, discriminator="#Microsoft.Ski """ default_language_code: Optional[Union[str, "_models.KeyPhraseExtractionSkillLanguage"]] = rest_field( - name="defaultLanguageCode" + name="defaultLanguageCode", visibility=["read", "create", "update", "delete", "query"] ) """A value indicating which language code to use. Default is ``en``. Known values are: \"da\", \"nl\", \"en\", \"fi\", \"fr\", \"de\", \"it\", \"ja\", \"ko\", \"no\", \"pl\", \"pt-PT\", \"pt-BR\", \"ru\", \"es\", and \"sv\".""" - max_key_phrase_count: Optional[int] = rest_field(name="maxKeyPhraseCount") + max_key_phrase_count: Optional[int] = rest_field( + name="maxKeyPhraseCount", visibility=["read", "create", "update", "delete", "query"] + ) """A number indicating how many key phrases to return. If absent, all identified key phrases will be returned.""" - model_version: Optional[str] = rest_field(name="modelVersion") + model_version: Optional[str] = rest_field( + name="modelVersion", visibility=["read", "create", "update", "delete", "query"] + ) """The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary.""" - odata_type: Literal["#Microsoft.Skills.Text.KeyPhraseExtractionSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Skills.Text.KeyPhraseExtractionSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Text.KeyPhraseExtractionSkill\".""" @@ -4898,12 +5335,14 @@ class KeywordMarkerTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Sear :vartype odata_type: str """ - keywords: List[str] = rest_field() + keywords: List[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A list of words to mark as keywords. Required.""" - ignore_case: Optional[bool] = rest_field(name="ignoreCase") + ignore_case: Optional[bool] = rest_field( + name="ignoreCase", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false.""" - odata_type: Literal["#Microsoft.Azure.Search.KeywordMarkerTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.KeywordMarkerTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.KeywordMarkerTokenFilter\".""" @@ -4927,47 +5366,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, odata_type="#Microsoft.Azure.Search.KeywordMarkerTokenFilter", **kwargs) -class KeywordTokenizer(LexicalTokenizer, discriminator="#Microsoft.Azure.Search.KeywordTokenizer"): - """Emits the entire input as a single token. This tokenizer is implemented using - Apache Lucene. - - - :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. Required. - :vartype name: str - :ivar buffer_size: The read buffer size in bytes. Default is 256. - :vartype buffer_size: int - :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. Default value is - "#Microsoft.Azure.Search.KeywordTokenizer". - :vartype odata_type: str - """ - - buffer_size: Optional[int] = rest_field(name="bufferSize") - """The read buffer size in bytes. Default is 256.""" - odata_type: Literal["#Microsoft.Azure.Search.KeywordTokenizer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long - """A URI fragment specifying the type of tokenizer. Required. Default value is - \"#Microsoft.Azure.Search.KeywordTokenizer\".""" - - @overload - def __init__( - self, - *, - name: str, - buffer_size: Optional[int] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, odata_type="#Microsoft.Azure.Search.KeywordTokenizer", **kwargs) - - class KeywordTokenizerV2(LexicalTokenizer, discriminator="#Microsoft.Azure.Search.KeywordTokenizerV2"): """Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. @@ -4986,10 +5384,12 @@ class KeywordTokenizerV2(LexicalTokenizer, discriminator="#Microsoft.Azure.Searc :vartype odata_type: str """ - max_token_length: Optional[int] = rest_field(name="maxTokenLength") + max_token_length: Optional[int] = rest_field( + name="maxTokenLength", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum token length. Default is 256. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters.""" - odata_type: Literal["#Microsoft.Azure.Search.KeywordTokenizerV2"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.KeywordTokenizerV2"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of tokenizer. Required. Default value is \"#Microsoft.Azure.Search.KeywordTokenizerV2\".""" @@ -5050,14 +5450,18 @@ class LanguageDetectionSkill(SearchIndexerSkill, discriminator="#Microsoft.Skill :vartype odata_type: str """ - default_country_hint: Optional[str] = rest_field(name="defaultCountryHint") + default_country_hint: Optional[str] = rest_field( + name="defaultCountryHint", visibility=["read", "create", "update", "delete", "query"] + ) """A country code to use as a hint to the language detection model if it cannot disambiguate the language.""" - model_version: Optional[str] = rest_field(name="modelVersion") + model_version: Optional[str] = rest_field( + name="modelVersion", visibility=["read", "create", "update", "delete", "query"] + ) """The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary.""" - odata_type: Literal["#Microsoft.Skills.Text.LanguageDetectionSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Skills.Text.LanguageDetectionSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Text.LanguageDetectionSkill\".""" @@ -5104,12 +5508,12 @@ class LengthTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.Leng :vartype odata_type: str """ - min_length: Optional[int] = rest_field(name="min") + min_length: Optional[int] = rest_field(name="min", visibility=["read", "create", "update", "delete", "query"]) """The minimum length in characters. Default is 0. Maximum is 300. Must be less than the value of max.""" - max_length: Optional[int] = rest_field(name="max") + max_length: Optional[int] = rest_field(name="max", visibility=["read", "create", "update", "delete", "query"]) """The maximum length in characters. Default and maximum is 300.""" - odata_type: Literal["#Microsoft.Azure.Search.LengthTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.LengthTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.LengthTokenFilter\".""" @@ -5153,12 +5557,16 @@ class LimitTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.Limit :vartype odata_type: str """ - max_token_count: Optional[int] = rest_field(name="maxTokenCount") + max_token_count: Optional[int] = rest_field( + name="maxTokenCount", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum number of tokens to produce. Default is 1.""" - consume_all_tokens: Optional[bool] = rest_field(name="consumeAllTokens") + consume_all_tokens: Optional[bool] = rest_field( + name="consumeAllTokens", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether all tokens from the input must be consumed even if maxTokenCount is reached. Default is false.""" - odata_type: Literal["#Microsoft.Azure.Search.LimitTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.LimitTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.LimitTokenFilter\".""" @@ -5186,98 +5594,133 @@ class ListDataSourcesResult(_model_base.Model): """Response from a List Datasources request. If successful, it includes the full definitions of all datasources. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar data_sources: The datasources in the Search service. Required. :vartype data_sources: list[~azure.search.documents.models.SearchIndexerDataSource] """ - data_sources: List["_models.SearchIndexerDataSource"] = rest_field(name="value", visibility=["read"]) + data_sources: List["_models.SearchIndexerDataSource"] = rest_field( + name="value", visibility=["read", "create", "update", "delete", "query"] + ) """The datasources in the Search service. Required.""" + @overload + def __init__( + self, + *, + data_sources: List["_models.SearchIndexerDataSource"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class ListIndexersResult(_model_base.Model): """Response from a List Indexers request. If successful, it includes the full definitions of all indexers. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar indexers: The indexers in the Search service. Required. :vartype indexers: list[~azure.search.documents.models.SearchIndexer] """ - indexers: List["_models.SearchIndexer"] = rest_field(name="value", visibility=["read"]) + indexers: List["_models.SearchIndexer"] = rest_field( + name="value", visibility=["read", "create", "update", "delete", "query"] + ) """The indexers in the Search service. Required.""" + @overload + def __init__( + self, + *, + indexers: List["_models.SearchIndexer"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + + +class ListIndexStatsSummary(_model_base.Model): + """Response from a request to retrieve stats summary of all indexes. If successful, it includes + the stats of each index in the service. + + Readonly variables are only populated by the server, and will be ignored when sending a request. + + + :ivar indexes_statistics: The Statistics summary of all indexes in the Search service. + Required. + :vartype indexes_statistics: list[~azure.search.documents.models.IndexStatisticsSummary] + """ + + indexes_statistics: List["_models.IndexStatisticsSummary"] = rest_field(name="value", visibility=["read"]) + """The Statistics summary of all indexes in the Search service. Required.""" + class ListSkillsetsResult(_model_base.Model): """Response from a list skillset request. If successful, it includes the full definitions of all skillsets. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar skillsets: The skillsets defined in the Search service. Required. :vartype skillsets: list[~azure.search.documents.models.SearchIndexerSkillset] """ - skillsets: List["_models.SearchIndexerSkillset"] = rest_field(name="value", visibility=["read"]) + skillsets: List["_models.SearchIndexerSkillset"] = rest_field( + name="value", visibility=["read", "create", "update", "delete", "query"] + ) """The skillsets defined in the Search service. Required.""" + @overload + def __init__( + self, + *, + skillsets: List["_models.SearchIndexerSkillset"], + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class ListSynonymMapsResult(_model_base.Model): """Response from a List SynonymMaps request. If successful, it includes the full definitions of all synonym maps. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar synonym_maps: The synonym maps in the Search service. Required. :vartype synonym_maps: list[~azure.search.documents.models.SynonymMap] """ - synonym_maps: List["_models.SynonymMap"] = rest_field(name="value", visibility=["read"]) + synonym_maps: List["_models.SynonymMap"] = rest_field( + name="value", visibility=["read", "create", "update", "delete", "query"] + ) """The synonym maps in the Search service. Required.""" - -class LuceneStandardAnalyzer(LexicalAnalyzer, discriminator="#Microsoft.Azure.Search.StandardAnalyzer"): - """Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase - filter and stop filter. - - - :ivar name: The name of the analyzer. It must only contain letters, digits, spaces, dashes - or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. Required. - :vartype name: str - :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length - are split. The maximum token length that can be used is 300 characters. - :vartype max_token_length: int - :ivar stopwords: A list of stopwords. - :vartype stopwords: list[str] - :ivar odata_type: A URI fragment specifying the type of analyzer. Required. Default value is - "#Microsoft.Azure.Search.StandardAnalyzer". - :vartype odata_type: str - """ - - max_token_length: Optional[int] = rest_field(name="maxTokenLength") - """The maximum token length. Default is 255. Tokens longer than the maximum length - are split. The maximum token length that can be used is 300 characters.""" - stopwords: Optional[List[str]] = rest_field() - """A list of stopwords.""" - odata_type: Literal["#Microsoft.Azure.Search.StandardAnalyzer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long - """A URI fragment specifying the type of analyzer. Required. Default value is - \"#Microsoft.Azure.Search.StandardAnalyzer\".""" - @overload def __init__( self, *, - name: str, - max_token_length: Optional[int] = None, - stopwords: Optional[List[str]] = None, + synonym_maps: List["_models.SynonymMap"], ) -> None: ... @overload @@ -5288,33 +5731,43 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, odata_type="#Microsoft.Azure.Search.StandardAnalyzer", **kwargs) + super().__init__(*args, **kwargs) -class LuceneStandardTokenizer(LexicalTokenizer, discriminator="#Microsoft.Azure.Search.StandardTokenizer"): - """Breaks text following the Unicode Text Segmentation rules. This tokenizer is - implemented using Apache Lucene. +class LookupDocument(_model_base.Model): + """A document retrieved via a document lookup operation.""" - :ivar name: The name of the tokenizer. It must only contain letters, digits, spaces, dashes +class LuceneStandardAnalyzer(LexicalAnalyzer, discriminator="#Microsoft.Azure.Search.StandardAnalyzer"): + """Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase + filter and stop filter. + + + :ivar name: The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. Required. :vartype name: str :ivar max_token_length: The maximum token length. Default is 255. Tokens longer than the maximum length - are split. + are split. The maximum token length that can be used is 300 characters. :vartype max_token_length: int - :ivar odata_type: A URI fragment specifying the type of tokenizer. Required. Default value is - "#Microsoft.Azure.Search.StandardTokenizer". + :ivar stopwords: A list of stopwords. + :vartype stopwords: list[str] + :ivar odata_type: A URI fragment specifying the type of analyzer. Required. Default value is + "#Microsoft.Azure.Search.StandardAnalyzer". :vartype odata_type: str """ - max_token_length: Optional[int] = rest_field(name="maxTokenLength") + max_token_length: Optional[int] = rest_field( + name="maxTokenLength", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum token length. Default is 255. Tokens longer than the maximum length - are split.""" - odata_type: Literal["#Microsoft.Azure.Search.StandardTokenizer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long - """A URI fragment specifying the type of tokenizer. Required. Default value is - \"#Microsoft.Azure.Search.StandardTokenizer\".""" + are split. The maximum token length that can be used is 300 characters.""" + stopwords: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A list of stopwords.""" + odata_type: Literal["#Microsoft.Azure.Search.StandardAnalyzer"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """A URI fragment specifying the type of analyzer. Required. Default value is + \"#Microsoft.Azure.Search.StandardAnalyzer\".""" @overload def __init__( @@ -5322,6 +5775,7 @@ def __init__( *, name: str, max_token_length: Optional[int] = None, + stopwords: Optional[List[str]] = None, ) -> None: ... @overload @@ -5332,7 +5786,7 @@ def __init__(self, mapping: Mapping[str, Any]) -> None: """ def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, odata_type="#Microsoft.Azure.Search.StandardTokenizer", **kwargs) + super().__init__(*args, odata_type="#Microsoft.Azure.Search.StandardAnalyzer", **kwargs) class LuceneStandardTokenizerV2(LexicalTokenizer, discriminator="#Microsoft.Azure.Search.StandardTokenizerV2"): @@ -5353,10 +5807,12 @@ class LuceneStandardTokenizerV2(LexicalTokenizer, discriminator="#Microsoft.Azur :vartype odata_type: str """ - max_token_length: Optional[int] = rest_field(name="maxTokenLength") + max_token_length: Optional[int] = rest_field( + name="maxTokenLength", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters.""" - odata_type: Literal["#Microsoft.Azure.Search.StandardTokenizerV2"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.StandardTokenizerV2"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of tokenizer. Required. Default value is \"#Microsoft.Azure.Search.StandardTokenizerV2\".""" @@ -5400,9 +5856,11 @@ class MagnitudeScoringFunction(ScoringFunction, discriminator="magnitude"): :vartype type: str """ - parameters: "_models.MagnitudeScoringParameters" = rest_field(name="magnitude") + parameters: "_models.MagnitudeScoringParameters" = rest_field( + name="magnitude", visibility=["read", "create", "update", "delete", "query"] + ) """Parameter values for the magnitude scoring function. Required.""" - type: Literal["magnitude"] = rest_discriminator(name="type") # type: ignore + type: Literal["magnitude"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case. Required. Default value is \"magnitude\".""" @@ -5442,11 +5900,17 @@ class MagnitudeScoringParameters(_model_base.Model): :vartype should_boost_beyond_range_by_constant: bool """ - boosting_range_start: float = rest_field(name="boostingRangeStart") + boosting_range_start: float = rest_field( + name="boostingRangeStart", visibility=["read", "create", "update", "delete", "query"] + ) """The field value at which boosting starts. Required.""" - boosting_range_end: float = rest_field(name="boostingRangeEnd") + boosting_range_end: float = rest_field( + name="boostingRangeEnd", visibility=["read", "create", "update", "delete", "query"] + ) """The field value at which boosting ends. Required.""" - should_boost_beyond_range_by_constant: Optional[bool] = rest_field(name="constantBoostBeyondRange") + should_boost_beyond_range_by_constant: Optional[bool] = rest_field( + name="constantBoostBeyondRange", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to apply a constant boost for field values beyond the range end value; default is false.""" @@ -5489,10 +5953,10 @@ class MappingCharFilter(CharFilter, discriminator="#Microsoft.Azure.Search.Mappi :vartype odata_type: str """ - mappings: List[str] = rest_field() + mappings: List[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A list of mappings of the following format: \"a=>b\" (all occurrences of the character \"a\" will be replaced with character \"b\"). Required.""" - odata_type: Literal["#Microsoft.Azure.Search.MappingCharFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.MappingCharFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of char filter. Required. Default value is \"#Microsoft.Azure.Search.MappingCharFilter\".""" @@ -5551,13 +6015,17 @@ class MergeSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.Merge :vartype odata_type: str """ - insert_pre_tag: Optional[str] = rest_field(name="insertPreTag") + insert_pre_tag: Optional[str] = rest_field( + name="insertPreTag", visibility=["read", "create", "update", "delete", "query"] + ) """The tag indicates the start of the merged text. By default, the tag is an empty space.""" - insert_post_tag: Optional[str] = rest_field(name="insertPostTag") + insert_post_tag: Optional[str] = rest_field( + name="insertPostTag", visibility=["read", "create", "update", "delete", "query"] + ) """The tag indicates the end of the merged text. By default, the tag is an empty space.""" - odata_type: Literal["#Microsoft.Skills.Text.MergeSkill"] = rest_discriminator(name="@odata.type") # type: ignore + odata_type: Literal["#Microsoft.Skills.Text.MergeSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Text.MergeSkill\".""" @@ -5619,15 +6087,21 @@ class MicrosoftLanguageStemmingTokenizer( :vartype odata_type: str """ - max_token_length: Optional[int] = rest_field(name="maxTokenLength") + max_token_length: Optional[int] = rest_field( + name="maxTokenLength", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255.""" - is_search_tokenizer: Optional[bool] = rest_field(name="isSearchTokenizer") + is_search_tokenizer: Optional[bool] = rest_field( + name="isSearchTokenizer", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set to false if used as the indexing tokenizer. Default is false.""" - language: Optional[Union[str, "_models.MicrosoftStemmingTokenizerLanguage"]] = rest_field() + language: Optional[Union[str, "_models.MicrosoftStemmingTokenizerLanguage"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The language to use. The default is English. Known values are: \"arabic\", \"bangla\", \"bulgarian\", \"catalan\", \"croatian\", \"czech\", \"danish\", \"dutch\", \"english\", \"estonian\", \"finnish\", \"french\", \"german\", \"greek\", \"gujarati\", \"hebrew\", @@ -5636,7 +6110,7 @@ class MicrosoftLanguageStemmingTokenizer( \"portuguese\", \"portugueseBrazilian\", \"punjabi\", \"romanian\", \"russian\", \"serbianCyrillic\", \"serbianLatin\", \"slovak\", \"slovenian\", \"spanish\", \"swedish\", \"tamil\", \"telugu\", \"turkish\", \"ukrainian\", and \"urdu\".""" - odata_type: Literal["#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer"] = rest_discriminator(name="@odata.type") # type: ignore + odata_type: Literal["#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of tokenizer. Required. Default value is \"#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer\".""" @@ -5692,15 +6166,21 @@ class MicrosoftLanguageTokenizer(LexicalTokenizer, discriminator="#Microsoft.Azu :vartype odata_type: str """ - max_token_length: Optional[int] = rest_field(name="maxTokenLength") + max_token_length: Optional[int] = rest_field( + name="maxTokenLength", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255.""" - is_search_tokenizer: Optional[bool] = rest_field(name="isSearchTokenizer") + is_search_tokenizer: Optional[bool] = rest_field( + name="isSearchTokenizer", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set to false if used as the indexing tokenizer. Default is false.""" - language: Optional[Union[str, "_models.MicrosoftTokenizerLanguage"]] = rest_field() + language: Optional[Union[str, "_models.MicrosoftTokenizerLanguage"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The language to use. The default is English. Known values are: \"bangla\", \"bulgarian\", \"catalan\", \"chineseSimplified\", \"chineseTraditional\", \"croatian\", \"czech\", \"danish\", \"dutch\", \"english\", \"french\", \"german\", \"greek\", \"gujarati\", \"hindi\", @@ -5709,7 +6189,7 @@ class MicrosoftLanguageTokenizer(LexicalTokenizer, discriminator="#Microsoft.Azu \"portugueseBrazilian\", \"punjabi\", \"romanian\", \"russian\", \"serbianCyrillic\", \"serbianLatin\", \"slovenian\", \"spanish\", \"swedish\", \"tamil\", \"telugu\", \"thai\", \"ukrainian\", \"urdu\", and \"vietnamese\".""" - odata_type: Literal["#Microsoft.Azure.Search.MicrosoftLanguageTokenizer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.MicrosoftLanguageTokenizer"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of tokenizer. Required. Default value is \"#Microsoft.Azure.Search.MicrosoftLanguageTokenizer\".""" @@ -5747,7 +6227,7 @@ class NativeBlobSoftDeleteDeletionDetectionPolicy( :vartype odata_type: str """ - odata_type: Literal["#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy"] = rest_discriminator(name="@odata.type") # type: ignore + odata_type: Literal["#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of data deletion detection policy. Required. Default value is \"#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy\".""" @@ -5769,53 +6249,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: ) -class NGramTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.NGramTokenFilter"): - """Generates n-grams of the given size(s). This token filter is implemented using - Apache Lucene. - - - :ivar name: The name of the token filter. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and - is limited to 128 characters. Required. - :vartype name: str - :ivar min_gram: The minimum n-gram length. Default is 1. Must be less than the value of - maxGram. - :vartype min_gram: int - :ivar max_gram: The maximum n-gram length. Default is 2. - :vartype max_gram: int - :ivar odata_type: A URI fragment specifying the type of token filter. Required. Default value - is "#Microsoft.Azure.Search.NGramTokenFilter". - :vartype odata_type: str - """ - - min_gram: Optional[int] = rest_field(name="minGram") - """The minimum n-gram length. Default is 1. Must be less than the value of maxGram.""" - max_gram: Optional[int] = rest_field(name="maxGram") - """The maximum n-gram length. Default is 2.""" - odata_type: Literal["#Microsoft.Azure.Search.NGramTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long - """A URI fragment specifying the type of token filter. Required. Default value is - \"#Microsoft.Azure.Search.NGramTokenFilter\".""" - - @overload - def __init__( - self, - *, - name: str, - min_gram: Optional[int] = None, - max_gram: Optional[int] = None, - ) -> None: ... - - @overload - def __init__(self, mapping: Mapping[str, Any]) -> None: - """ - :param mapping: raw JSON to initialize the model. - :type mapping: Mapping[str, Any] - """ - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, odata_type="#Microsoft.Azure.Search.NGramTokenFilter", **kwargs) - - class NGramTokenFilterV2(TokenFilter, discriminator="#Microsoft.Azure.Search.NGramTokenFilterV2"): """Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. @@ -5835,12 +6268,12 @@ class NGramTokenFilterV2(TokenFilter, discriminator="#Microsoft.Azure.Search.NGr :vartype odata_type: str """ - min_gram: Optional[int] = rest_field(name="minGram") + min_gram: Optional[int] = rest_field(name="minGram", visibility=["read", "create", "update", "delete", "query"]) """The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram.""" - max_gram: Optional[int] = rest_field(name="maxGram") + max_gram: Optional[int] = rest_field(name="maxGram", visibility=["read", "create", "update", "delete", "query"]) """The maximum n-gram length. Default is 2. Maximum is 300.""" - odata_type: Literal["#Microsoft.Azure.Search.NGramTokenFilterV2"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.NGramTokenFilterV2"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.NGramTokenFilterV2\".""" @@ -5885,14 +6318,16 @@ class NGramTokenizer(LexicalTokenizer, discriminator="#Microsoft.Azure.Search.NG :vartype odata_type: str """ - min_gram: Optional[int] = rest_field(name="minGram") + min_gram: Optional[int] = rest_field(name="minGram", visibility=["read", "create", "update", "delete", "query"]) """The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram.""" - max_gram: Optional[int] = rest_field(name="maxGram") + max_gram: Optional[int] = rest_field(name="maxGram", visibility=["read", "create", "update", "delete", "query"]) """The maximum n-gram length. Default is 2. Maximum is 300.""" - token_chars: Optional[List[Union[str, "_models.TokenCharacterKind"]]] = rest_field(name="tokenChars") + token_chars: Optional[List[Union[str, "_models.TokenCharacterKind"]]] = rest_field( + name="tokenChars", visibility=["read", "create", "update", "delete", "query"] + ) """Character classes to keep in the tokens.""" - odata_type: Literal["#Microsoft.Azure.Search.NGramTokenizer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.NGramTokenizer"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of tokenizer. Required. Default value is \"#Microsoft.Azure.Search.NGramTokenizer\".""" @@ -5967,7 +6402,9 @@ class OcrSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Vision.OcrSk :vartype odata_type: str """ - default_language_code: Optional[Union[str, "_models.OcrSkillLanguage"]] = rest_field(name="defaultLanguageCode") + default_language_code: Optional[Union[str, "_models.OcrSkillLanguage"]] = rest_field( + name="defaultLanguageCode", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating which language code to use. Default is ``en``. Known values are: \"af\", \"sq\", \"anp\", \"ar\", \"ast\", \"awa\", \"az\", \"bfy\", \"eu\", \"be\", \"be-cyrl\", \"be-latn\", \"bho\", \"bi\", \"brx\", \"bs\", \"bra\", \"br\", \"bg\", \"bns\", \"bua\", @@ -5986,13 +6423,17 @@ class OcrSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Vision.OcrSk \"sw\", \"sv\", \"tg\", \"tt\", \"tet\", \"thf\", \"to\", \"tr\", \"tk\", \"tyv\", \"hsb\", \"ur\", \"ug\", \"uz-arab\", \"uz-cyrl\", \"uz\", \"vo\", \"wae\", \"cy\", \"fy\", \"yua\", \"za\", \"zu\", and \"unk\".""" - should_detect_orientation: Optional[bool] = rest_field(name="detectOrientation") + should_detect_orientation: Optional[bool] = rest_field( + name="detectOrientation", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating to turn orientation detection on or not. Default is false.""" - line_ending: Optional[Union[str, "_models.OcrLineEnding"]] = rest_field(name="lineEnding") + line_ending: Optional[Union[str, "_models.OcrLineEnding"]] = rest_field( + name="lineEnding", visibility=["read", "create", "update", "delete", "query"] + ) """Defines the sequence of characters to use between the lines of text recognized by the OCR skill. The default value is \"space\". Known values are: \"space\", \"carriageReturn\", \"lineFeed\", and \"carriageReturnLineFeed\".""" - odata_type: Literal["#Microsoft.Skills.Vision.OcrSkill"] = rest_discriminator(name="@odata.type") # type: ignore + odata_type: Literal["#Microsoft.Skills.Vision.OcrSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Vision.OcrSkill\".""" @@ -6031,9 +6472,11 @@ class OutputFieldMappingEntry(_model_base.Model): :vartype target_name: str """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the output defined by the skill. Required.""" - target_name: Optional[str] = rest_field(name="targetName") + target_name: Optional[str] = rest_field( + name="targetName", visibility=["read", "create", "update", "delete", "query"] + ) """The target name of the output. It is optional and default to name.""" @overload @@ -6081,18 +6524,24 @@ class PathHierarchyTokenizerV2(LexicalTokenizer, discriminator="#Microsoft.Azure :vartype odata_type: str """ - delimiter: Optional[str] = rest_field() + delimiter: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The delimiter character to use. Default is \"/\".""" - replacement: Optional[str] = rest_field() + replacement: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value that, if set, replaces the delimiter character. Default is \"/\".""" - max_token_length: Optional[int] = rest_field(name="maxTokenLength") + max_token_length: Optional[int] = rest_field( + name="maxTokenLength", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum token length. Default and maximum is 300.""" - reverse_token_order: Optional[bool] = rest_field(name="reverse") + reverse_token_order: Optional[bool] = rest_field( + name="reverse", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to generate tokens in reverse order. Default is false.""" - number_of_tokens_to_skip: Optional[int] = rest_field(name="skip") + number_of_tokens_to_skip: Optional[int] = rest_field( + name="skip", visibility=["read", "create", "update", "delete", "query"] + ) """The number of initial tokens to skip. Default is 0.""" - odata_type: Literal["#Microsoft.Azure.Search.PathHierarchyTokenizerV2"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.PathHierarchyTokenizerV2"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of tokenizer. Required. Default value is \"#Microsoft.Azure.Search.PathHierarchyTokenizerV2\".""" @@ -6144,17 +6593,21 @@ class PatternAnalyzer(LexicalAnalyzer, discriminator="#Microsoft.Azure.Search.Pa :vartype odata_type: str """ - lower_case_terms: Optional[bool] = rest_field(name="lowercase") + lower_case_terms: Optional[bool] = rest_field( + name="lowercase", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether terms should be lower-cased. Default is true.""" - pattern: Optional[str] = rest_field() + pattern: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters.""" - flags: Optional[Union[str, "_models.RegexFlags"]] = rest_field() + flags: Optional[Union[str, "_models.RegexFlags"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Regular expression flags. Known values are: \"CANON_EQ\", \"CASE_INSENSITIVE\", \"COMMENTS\", \"DOTALL\", \"LITERAL\", \"MULTILINE\", \"UNICODE_CASE\", and \"UNIX_LINES\".""" - stopwords: Optional[List[str]] = rest_field() + stopwords: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A list of stopwords.""" - odata_type: Literal["#Microsoft.Azure.Search.PatternAnalyzer"] = rest_discriminator(name="odataType") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.PatternAnalyzer"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of analyzer. Required. Default value is \"#Microsoft.Azure.Search.PatternAnalyzer\".""" @@ -6200,12 +6653,14 @@ class PatternCaptureTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Sea :vartype odata_type: str """ - patterns: List[str] = rest_field() + patterns: List[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A list of patterns to match against each token. Required.""" - preserve_original: Optional[bool] = rest_field(name="preserveOriginal") + preserve_original: Optional[bool] = rest_field( + name="preserveOriginal", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to return the original token even if one of the patterns matches. Default is true.""" - odata_type: Literal["#Microsoft.Azure.Search.PatternCaptureTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.PatternCaptureTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.PatternCaptureTokenFilter\".""" @@ -6251,11 +6706,11 @@ class PatternReplaceCharFilter(CharFilter, discriminator="#Microsoft.Azure.Searc :vartype odata_type: str """ - pattern: str = rest_field() + pattern: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A regular expression pattern. Required.""" - replacement: str = rest_field() + replacement: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The replacement text. Required.""" - odata_type: Literal["#Microsoft.Azure.Search.PatternReplaceCharFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.PatternReplaceCharFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of char filter. Required. Default value is \"#Microsoft.Azure.Search.PatternReplaceCharFilter\".""" @@ -6301,11 +6756,11 @@ class PatternReplaceTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Sea :vartype odata_type: str """ - pattern: str = rest_field() + pattern: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A regular expression pattern. Required.""" - replacement: str = rest_field() + replacement: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The replacement text. Required.""" - odata_type: Literal["#Microsoft.Azure.Search.PatternReplaceTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.PatternReplaceTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.PatternReplaceTokenFilter\".""" @@ -6353,17 +6808,19 @@ class PatternTokenizer(LexicalTokenizer, discriminator="#Microsoft.Azure.Search. :vartype odata_type: str """ - pattern: Optional[str] = rest_field() + pattern: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters.""" - flags: Optional[Union[str, "_models.RegexFlags"]] = rest_field() + flags: Optional[Union[str, "_models.RegexFlags"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Regular expression flags. Known values are: \"CANON_EQ\", \"CASE_INSENSITIVE\", \"COMMENTS\", \"DOTALL\", \"LITERAL\", \"MULTILINE\", \"UNICODE_CASE\", and \"UNIX_LINES\".""" - group: Optional[int] = rest_field() + group: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The zero-based ordinal of the matching group in the regular expression pattern to extract into tokens. Use -1 if you want to use the entire pattern to split the input into tokens, irrespective of matching groups. Default is -1.""" - odata_type: Literal["#Microsoft.Azure.Search.PatternTokenizer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.PatternTokenizer"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of tokenizer. Required. Default value is \"#Microsoft.Azure.Search.PatternTokenizer\".""" @@ -6410,14 +6867,18 @@ class PhoneticTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.Ph :vartype odata_type: str """ - encoder: Optional[Union[str, "_models.PhoneticEncoder"]] = rest_field() + encoder: Optional[Union[str, "_models.PhoneticEncoder"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The phonetic encoder to use. Default is \"metaphone\". Known values are: \"metaphone\", \"doubleMetaphone\", \"soundex\", \"refinedSoundex\", \"caverphone1\", \"caverphone2\", \"cologne\", \"nysiis\", \"koelnerPhonetik\", \"haasePhonetik\", and \"beiderMorse\".""" - replace_original_tokens: Optional[bool] = rest_field(name="replace") + replace_original_tokens: Optional[bool] = rest_field( + name="replace", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether encoded tokens should replace original tokens. If false, encoded tokens are added as synonyms. Default is true.""" - odata_type: Literal["#Microsoft.Azure.Search.PhoneticTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.PhoneticTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.PhoneticTokenFilter\".""" @@ -6492,28 +6953,40 @@ class PIIDetectionSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Tex :vartype odata_type: str """ - default_language_code: Optional[str] = rest_field(name="defaultLanguageCode") + default_language_code: Optional[str] = rest_field( + name="defaultLanguageCode", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating which language code to use. Default is ``en``.""" - minimum_precision: Optional[float] = rest_field(name="minimumPrecision") + minimum_precision: Optional[float] = rest_field( + name="minimumPrecision", visibility=["read", "create", "update", "delete", "query"] + ) """A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included.""" - masking_mode: Optional[Union[str, "_models.PIIDetectionSkillMaskingMode"]] = rest_field(name="maskingMode") + masking_mode: Optional[Union[str, "_models.PIIDetectionSkillMaskingMode"]] = rest_field( + name="maskingMode", visibility=["read", "create", "update", "delete", "query"] + ) """A parameter that provides various ways to mask the personal information detected in the input text. Default is 'none'. Known values are: \"none\" and \"replace\".""" - mask: Optional[str] = rest_field(name="maskingCharacter") + mask: Optional[str] = rest_field( + name="maskingCharacter", visibility=["read", "create", "update", "delete", "query"] + ) """The character used to mask the text if the maskingMode parameter is set to replace. Default is '*'.""" - model_version: Optional[str] = rest_field(name="modelVersion") + model_version: Optional[str] = rest_field( + name="modelVersion", visibility=["read", "create", "update", "delete", "query"] + ) """The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary.""" - pii_categories: Optional[List[str]] = rest_field(name="piiCategories") + pii_categories: Optional[List[str]] = rest_field( + name="piiCategories", visibility=["read", "create", "update", "delete", "query"] + ) """A list of PII entity categories that should be extracted and masked.""" - domain: Optional[str] = rest_field() + domain: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """If specified, will set the PII domain to include only a subset of the entity categories. Possible values include: 'phi', 'none'. Default is 'none'.""" - odata_type: Literal["#Microsoft.Skills.Text.PIIDetectionSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Skills.Text.PIIDetectionSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Text.PIIDetectionSkill\".""" @@ -6551,8 +7024,6 @@ class QueryAnswerResult(_model_base.Model): documents that matched the query. Answers are extracted from the top search results. Answer candidates are scored and the top answers are selected. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar score: The score value represents how relevant the answer is to the query relative to other answers returned for the query. :vartype score: float @@ -6565,25 +7036,43 @@ class QueryAnswerResult(_model_base.Model): :vartype highlights: str """ - score: Optional[float] = rest_field(visibility=["read"]) + score: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The score value represents how relevant the answer is to the query relative to other answers returned for the query.""" - key: Optional[str] = rest_field(visibility=["read"]) + key: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The key of the document the answer was extracted from.""" - text: Optional[str] = rest_field(visibility=["read"]) + text: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The text passage extracted from the document contents as the answer.""" - highlights: Optional[str] = rest_field(visibility=["read"]) + highlights: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Same text passage as in the Text property with highlighted text phrases most relevant to the query.""" + @overload + def __init__( + self, + *, + score: Optional[float] = None, + key: Optional[str] = None, + text: Optional[str] = None, + highlights: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class QueryCaptionResult(_model_base.Model): """Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type ``semantic``. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar text: A representative text passage extracted from the document most relevant to the search query. :vartype text: str @@ -6592,13 +7081,31 @@ class QueryCaptionResult(_model_base.Model): :vartype highlights: str """ - text: Optional[str] = rest_field(visibility=["read"]) + text: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A representative text passage extracted from the document most relevant to the search query.""" - highlights: Optional[str] = rest_field(visibility=["read"]) + highlights: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Same text passage as in the Text property with highlighted phrases most relevant to the query.""" + @overload + def __init__( + self, + *, + text: Optional[str] = None, + highlights: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class QueryResultDocumentRerankerInput(_model_base.Model): """The raw concatenated strings that were sent to the semantic enrichment process. @@ -6715,9 +7222,8 @@ class RescoringOptions(_model_base.Model): of potential documents to offset the resolution loss due to quantization. This increases the set of results that will be rescored on full-precision vectors. Minimum value - is 1, meaning no oversampling (1x). This parameter can only be set when - 'enableRescoring' is true. Higher values improve recall at the expense of - latency. + is 1, meaning no oversampling (1x). This parameter can only be set when 'enableRescoring' + is true. Higher values improve recall at the expense of latency. :vartype default_oversampling: float :ivar rescore_storage_method: Controls the storage method for original vectors. This setting is immutable. Known values are: "preserveOriginals" and "discardOriginals". @@ -6725,19 +7231,22 @@ class RescoringOptions(_model_base.Model): ~azure.search.documents.models.VectorSearchCompressionRescoreStorageMethod """ - enable_rescoring: Optional[bool] = rest_field(name="enableRescoring") + enable_rescoring: Optional[bool] = rest_field( + name="enableRescoring", visibility=["read", "create", "update", "delete", "query"] + ) """If set to true, after the initial search on the compressed vectors, the similarity scores are recalculated using the full-precision vectors. This will improve recall at the expense of latency.""" - default_oversampling: Optional[float] = rest_field(name="defaultOversampling") + default_oversampling: Optional[float] = rest_field( + name="defaultOversampling", visibility=["read", "create", "update", "delete", "query"] + ) """Default oversampling factor. Oversampling retrieves a greater set of potential documents to offset the resolution loss due to quantization. This increases the set of results that will be rescored on full-precision vectors. Minimum value - is 1, meaning no oversampling (1x). This parameter can only be set when - 'enableRescoring' is true. Higher values improve recall at the expense of - latency.""" + is 1, meaning no oversampling (1x). This parameter can only be set when 'enableRescoring' + is true. Higher values improve recall at the expense of latency.""" rescore_storage_method: Optional[Union[str, "_models.VectorSearchCompressionRescoreStorageMethod"]] = rest_field( - name="rescoreStorageMethod" + name="rescoreStorageMethod", visibility=["read", "create", "update", "delete", "query"] ) """Controls the storage method for original vectors. This setting is immutable. Known values are: \"preserveOriginals\" and \"discardOriginals\".""" @@ -6772,9 +7281,9 @@ class ResourceCounter(_model_base.Model): :vartype quota: int """ - usage: int = rest_field() + usage: int = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The resource usage amount. Required.""" - quota: Optional[int] = rest_field() + quota: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The resource amount quota.""" @overload @@ -6839,9 +7348,11 @@ class ScalarQuantizationCompression(VectorSearchCompression, discriminator="scal :vartype kind: str or ~azure.search.documents.models.SCALAR_QUANTIZATION """ - parameters: Optional["_models.ScalarQuantizationParameters"] = rest_field(name="scalarQuantizationParameters") + parameters: Optional["_models.ScalarQuantizationParameters"] = rest_field( + name="scalarQuantizationParameters", visibility=["read", "create", "update", "delete", "query"] + ) """Contains the parameters specific to Scalar Quantization.""" - kind: Literal[VectorSearchCompressionKind.SCALAR_QUANTIZATION] = rest_discriminator(name="kind") # type: ignore + kind: Literal[VectorSearchCompressionKind.SCALAR_QUANTIZATION] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The name of the kind of compression method being configured for use with vector search. Required. Scalar Quantization, a type of compression method. In scalar quantization, the @@ -6881,7 +7392,7 @@ class ScalarQuantizationParameters(_model_base.Model): """ quantized_data_type: Optional[Union[str, "_models.VectorSearchCompressionTarget"]] = rest_field( - name="quantizedDataType" + name="quantizedDataType", visibility=["read", "create", "update", "delete", "query"] ) """The quantized data type of compressed vector values. \"int8\"""" @@ -6921,14 +7432,18 @@ class ScoringProfile(_model_base.Model): :vartype function_aggregation: str or ~azure.search.documents.models.ScoringFunctionAggregation """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the scoring profile. Required.""" - text_weights: Optional["_models.TextWeights"] = rest_field(name="text") + text_weights: Optional["_models.TextWeights"] = rest_field( + name="text", visibility=["read", "create", "update", "delete", "query"] + ) """Parameters that boost scoring based on text matches in certain index fields.""" - functions: Optional[List["_models.ScoringFunction"]] = rest_field() + functions: Optional[List["_models.ScoringFunction"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The collection of functions that influence the scoring of documents.""" function_aggregation: Optional[Union[str, "_models.ScoringFunctionAggregation"]] = rest_field( - name="functionAggregation" + name="functionAggregation", visibility=["read", "create", "update", "delete", "query"] ) """A value indicating how the results of individual scoring functions should be combined. Defaults to \"Sum\". Ignored if there are no scoring functions. Known values are: @@ -6970,11 +7485,11 @@ class SearchAlias(_model_base.Model): :vartype e_tag: str """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the alias. Required.""" - indexes: List[str] = rest_field() + indexes: List[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the index this alias maps to. Only one index name may be specified. Required.""" - e_tag: Optional[str] = rest_field(name="@odata.etag") + e_tag: Optional[str] = rest_field(name="@odata.etag", visibility=["read", "create", "update", "delete", "query"]) """The ETag of the alias.""" @overload @@ -7023,9 +7538,7 @@ class SearchDocumentsResult(_model_base.Model): :vartype debug_info: ~azure.search.documents.models.DebugInfo :ivar next_page_parameters: Continuation JSON payload returned when the query can't return all the - requested results in a single response. You can use this JSON along with - @odata.nextLink to formulate another POST Search request to get the next part - of the search response. + requested results in a single response. You can use this JSON along with. :vartype next_page_parameters: ~azure.search.documents.models.SearchRequest :ivar results: The sequence of results returned by the query. Required. :vartype results: list[~azure.search.documents.models.SearchResult] @@ -7050,45 +7563,53 @@ class SearchDocumentsResult(_model_base.Model): ~azure.search.documents.models.SemanticQueryRewritesResultType """ - count: Optional[int] = rest_field(name="@odata.count", visibility=["read"]) + count: Optional[int] = rest_field(name="@odata.count", visibility=["read", "create", "update", "delete", "query"]) """The total count of results found by the search operation, or null if the count was not requested. If present, the count may be greater than the number of results in this response. This can happen if you use the $top or $skip parameters, or if the query can't return all the requested documents in a single response.""" - coverage: Optional[float] = rest_field(name="@search.coverage", visibility=["read"]) + coverage: Optional[float] = rest_field( + name="@search.coverage", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating the percentage of the index that was included in the query, or null if minimumCoverage was not specified in the request.""" - facets: Optional[Dict[str, List["_models.FacetResult"]]] = rest_field(name="@search.facets", visibility=["read"]) + facets: Optional[Dict[str, List["_models.FacetResult"]]] = rest_field( + name="@search.facets", visibility=["read", "create", "update", "delete", "query"] + ) """The facet query results for the search operation, organized as a collection of buckets for each faceted field; null if the query did not include any facet expressions.""" - answers: Optional[List["_models.QueryAnswerResult"]] = rest_field(name="@search.answers", visibility=["read"]) + answers: Optional[List["_models.QueryAnswerResult"]] = rest_field( + name="@search.answers", visibility=["read", "create", "update", "delete", "query"] + ) """The answers query results for the search operation; null if the answers query parameter was not specified or set to 'none'.""" - debug_info: Optional["_models.DebugInfo"] = rest_field(name="@search.debugInfo", visibility=["read"]) + debug_info: Optional["_models.DebugInfo"] = rest_field(name="@search.debug", visibility=["read"]) """Debug information that applies to the search results as a whole.""" next_page_parameters: Optional["_models.SearchRequest"] = rest_field( - name="@search.nextPageParameters", visibility=["read"] + name="@search.nextPageParameters", visibility=["read", "create", "update", "delete", "query"] ) """Continuation JSON payload returned when the query can't return all the - requested results in a single response. You can use this JSON along with - @odata.nextLink to formulate another POST Search request to get the next part - of the search response.""" - results: List["_models.SearchResult"] = rest_field(name="value", visibility=["read"]) + requested results in a single response. You can use this JSON along with.""" + results: List["_models.SearchResult"] = rest_field( + name="value", visibility=["read", "create", "update", "delete", "query"] + ) """The sequence of results returned by the query. Required.""" - next_link: Optional[str] = rest_field(name="@odata.nextLink", visibility=["read"]) + next_link: Optional[str] = rest_field( + name="@odata.nextLink", visibility=["read", "create", "update", "delete", "query"] + ) """Continuation URL returned when the query can't return all the requested results in a single response. You can use this URL to formulate another GET or POST Search request to get the next part of the search response. Make sure to use the same verb (GET or POST) as the request that produced this response.""" semantic_partial_response_reason: Optional[Union[str, "_models.SemanticErrorReason"]] = rest_field( - name="@search.semanticPartialResponseReason", visibility=["read"] + name="@search.semanticPartialResponseReason", visibility=["read", "create", "update", "delete", "query"] ) """Reason that a partial response was returned for a semantic ranking request. Known values are: \"maxWaitExceeded\", \"capacityOverloaded\", and \"transient\".""" semantic_partial_response_type: Optional[Union[str, "_models.SemanticSearchResultsType"]] = rest_field( - name="@search.semanticPartialResponseType", visibility=["read"] + name="@search.semanticPartialResponseType", visibility=["read", "create", "update", "delete", "query"] ) """Type of partial response that was returned for a semantic ranking request. Known values are: \"baseResults\" and \"rerankedResults\".""" @@ -7097,6 +7618,31 @@ class SearchDocumentsResult(_model_base.Model): ) """Type of query rewrite that was used to retrieve documents. \"originalQueryOnly\"""" + @overload + def __init__( + self, + *, + results: List["_models.SearchResult"], + count: Optional[int] = None, + coverage: Optional[float] = None, + facets: Optional[Dict[str, List["_models.FacetResult"]]] = None, + answers: Optional[List["_models.QueryAnswerResult"]] = None, + next_page_parameters: Optional["_models.SearchRequest"] = None, + next_link: Optional[str] = None, + semantic_partial_response_reason: Optional[Union[str, "_models.SemanticErrorReason"]] = None, + semantic_partial_response_type: Optional[Union[str, "_models.SemanticSearchResultsType"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class SearchField(_model_base.Model): """Represents a field in an index definition, which describes the name, data type, @@ -7273,21 +7819,23 @@ class SearchField(_model_base.Model): :vartype fields: list[~azure.search.documents.models.SearchField] """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the field, which must be unique within the fields collection of the index or parent field. Required.""" - type: Union[str, "_models.SearchFieldDataType"] = rest_field() + type: Union[str, "_models.SearchFieldDataType"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The data type of the field. Required. Known values are: \"Edm.String\", \"Edm.Int32\", \"Edm.Int64\", \"Edm.Double\", \"Edm.Boolean\", \"Edm.DateTimeOffset\", \"Edm.GeographyPoint\", \"Edm.ComplexType\", \"Edm.Single\", \"Edm.Half\", \"Edm.Int16\", \"Edm.SByte\", and \"Edm.Byte\".""" - key: Optional[bool] = rest_field() + key: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether the field uniquely identifies documents in the index. Exactly one top-level field in each index must be chosen as the key field and it must be of type Edm.String. Key fields can be used to look up documents directly and update or delete specific documents. Default is false for simple fields and null for complex fields.""" - retrievable: Optional[bool] = rest_field() + retrievable: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether the field can be returned in a search result. You can disable this option if you want to use a field (for example, margin) as a filter, sorting, or scoring mechanism but do not want the field to be visible @@ -7296,7 +7844,7 @@ class SearchField(_model_base.Model): this property does not cause any increase in index storage requirements. Default is true for simple fields, false for vector fields, and null for complex fields.""" - stored: Optional[bool] = rest_field() + stored: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """An immutable value indicating whether the field will be persisted separately on disk to be returned in a search result. You can disable this option if you don't plan to return the field contents in a search response to save on storage @@ -7307,7 +7855,7 @@ class SearchField(_model_base.Model): new fields, and for non-vector fields, and it must be null for complex fields. Disabling this property will reduce index storage requirements. The default is true for vector fields.""" - searchable: Optional[bool] = rest_field() + searchable: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether the field is full-text searchable. This means it will undergo analysis such as word-breaking during indexing. If you set a searchable field to a value like \"sunny day\", internally it will be split into @@ -7319,7 +7867,7 @@ class SearchField(_model_base.Model): of the field value for full-text searches. If you want to save space in your index and you don't need a field to be included in searches, set searchable to false.""" - filterable: Optional[bool] = rest_field() + filterable: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether to enable the field to be referenced in $filter queries. filterable differs from searchable in how strings are handled. Fields of type Edm.String or Collection(Edm.String) that are filterable do not undergo @@ -7327,7 +7875,7 @@ class SearchField(_model_base.Model): set such a field f to \"sunny day\", $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property must be null for complex fields. Default is true for simple fields and null for complex fields.""" - sortable: Optional[bool] = rest_field() + sortable: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether to enable the field to be referenced in $orderby expressions. By default, the search engine sorts results by score, but in many experiences users will want to sort by fields in the documents. A simple field @@ -7339,7 +7887,7 @@ class SearchField(_model_base.Model): Complex fields cannot be sortable and the sortable property must be null for such fields. The default for sortable is true for single-valued simple fields, false for multi-valued simple fields, and null for complex fields.""" - facetable: Optional[bool] = rest_field() + facetable: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether to enable the field to be referenced in facet queries. Typically used in a presentation of search results that includes hit count by category (for example, search for digital cameras and see hits by @@ -7347,7 +7895,9 @@ class SearchField(_model_base.Model): complex fields. Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple fields.""" - analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = rest_field() + analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The name of the analyzer to use for the field. This option can be used only with searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the @@ -7371,7 +7921,9 @@ class SearchField(_model_base.Model): \"tr.microsoft\", \"tr.lucene\", \"uk.microsoft\", \"ur.microsoft\", \"vi.microsoft\", \"standard.lucene\", \"standardasciifolding.lucene\", \"keyword\", \"pattern\", \"simple\", \"stop\", and \"whitespace\".""" - search_analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = rest_field(name="searchAnalyzer") + search_analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = rest_field( + name="searchAnalyzer", visibility=["read", "create", "update", "delete", "query"] + ) """The name of the analyzer used at search time for the field. This option can be used only with searchable fields. It must be set together with indexAnalyzer and it cannot be set together with the analyzer option. This property cannot be @@ -7397,7 +7949,9 @@ class SearchField(_model_base.Model): \"tr.microsoft\", \"tr.lucene\", \"uk.microsoft\", \"ur.microsoft\", \"vi.microsoft\", \"standard.lucene\", \"standardasciifolding.lucene\", \"keyword\", \"pattern\", \"simple\", \"stop\", and \"whitespace\".""" - index_analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = rest_field(name="indexAnalyzer") + index_analyzer: Optional[Union[str, "_models.LexicalAnalyzerName"]] = rest_field( + name="indexAnalyzer", visibility=["read", "create", "update", "delete", "query"] + ) """The name of the analyzer used at indexing time for the field. This option can be used only with searchable fields. It must be set together with searchAnalyzer and it cannot be set together with the analyzer option. This @@ -7423,27 +7977,39 @@ class SearchField(_model_base.Model): \"th.lucene\", \"tr.microsoft\", \"tr.lucene\", \"uk.microsoft\", \"ur.microsoft\", \"vi.microsoft\", \"standard.lucene\", \"standardasciifolding.lucene\", \"keyword\", \"pattern\", \"simple\", \"stop\", and \"whitespace\".""" - normalizer: Optional[Union[str, "_models.LexicalNormalizerName"]] = rest_field() + normalizer: Optional[Union[str, "_models.LexicalNormalizerName"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The name of the normalizer to use for the field. This option can be used only with fields with filterable, sortable, or facetable enabled. Once the normalizer is chosen, it cannot be changed for the field. Must be null for complex fields. Known values are: \"asciifolding\", \"elision\", \"lowercase\", \"standard\", and \"uppercase\".""" - vector_search_dimensions: Optional[int] = rest_field(name="dimensions") + vector_search_dimensions: Optional[int] = rest_field( + name="dimensions", visibility=["read", "create", "update", "delete", "query"] + ) """The dimensionality of the vector field.""" - vector_search_profile_name: Optional[str] = rest_field(name="vectorSearchProfile") + vector_search_profile_name: Optional[str] = rest_field( + name="vectorSearchProfile", visibility=["read", "create", "update", "delete", "query"] + ) """The name of the vector search profile that specifies the algorithm and vectorizer to use when searching the vector field.""" - vector_encoding_format: Optional[Union[str, "_models.VectorEncodingFormat"]] = rest_field(name="vectorEncoding") + vector_encoding_format: Optional[Union[str, "_models.VectorEncodingFormat"]] = rest_field( + name="vectorEncoding", visibility=["read", "create", "update", "delete", "query"] + ) """The encoding format to interpret the field contents. \"packedBit\"""" - synonym_maps: Optional[List[str]] = rest_field(name="synonymMaps") + synonym_maps: Optional[List[str]] = rest_field( + name="synonymMaps", visibility=["read", "create", "update", "delete", "query"] + ) """A list of the names of synonym maps to associate with this field. This option can be used only with searchable fields. Currently only one synonym map per field is supported. Assigning a synonym map to a field ensures that query terms targeting that field are expanded at query-time using the rules in the synonym map. This attribute can be changed on existing fields. Must be null or an empty collection for complex fields.""" - fields: Optional[List["_models.SearchField"]] = rest_field() + fields: Optional[List["_models.SearchField"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A list of sub-fields if this is a field of type Edm.ComplexType or Collection(Edm.ComplexType). Must be null or empty for simple fields.""" @@ -7537,31 +8103,51 @@ class SearchIndex(_model_base.Model): :vartype e_tag: str """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the index. Required.""" - fields: List["_models.SearchField"] = rest_field() + fields: List["_models.SearchField"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The fields of the index. Required.""" - scoring_profiles: Optional[List["_models.ScoringProfile"]] = rest_field(name="scoringProfiles") + scoring_profiles: Optional[List["_models.ScoringProfile"]] = rest_field( + name="scoringProfiles", visibility=["read", "create", "update", "delete", "query"] + ) """The scoring profiles for the index.""" - default_scoring_profile: Optional[str] = rest_field(name="defaultScoringProfile") + default_scoring_profile: Optional[str] = rest_field( + name="defaultScoringProfile", visibility=["read", "create", "update", "delete", "query"] + ) """The name of the scoring profile to use if none is specified in the query. If this property is not set and no scoring profile is specified in the query, then default scoring (tf-idf) will be used.""" - cors_options: Optional["_models.CorsOptions"] = rest_field(name="corsOptions") + cors_options: Optional["_models.CorsOptions"] = rest_field( + name="corsOptions", visibility=["read", "create", "update", "delete", "query"] + ) """Options to control Cross-Origin Resource Sharing (CORS) for the index.""" - suggesters: Optional[List["_models.SearchSuggester"]] = rest_field() + suggesters: Optional[List["_models.SearchSuggester"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The suggesters for the index.""" - analyzers: Optional[List["_models.LexicalAnalyzer"]] = rest_field() + analyzers: Optional[List["_models.LexicalAnalyzer"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The analyzers for the index.""" - tokenizers: Optional[List["_models.LexicalTokenizer"]] = rest_field() + tokenizers: Optional[List["_models.LexicalTokenizer"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The tokenizers for the index.""" - token_filters: Optional[List["_models.TokenFilter"]] = rest_field(name="tokenFilters") + token_filters: Optional[List["_models.TokenFilter"]] = rest_field( + name="tokenFilters", visibility=["read", "create", "update", "delete", "query"] + ) """The token filters for the index.""" - char_filters: Optional[List["_models.CharFilter"]] = rest_field(name="charFilters") + char_filters: Optional[List["_models.CharFilter"]] = rest_field( + name="charFilters", visibility=["read", "create", "update", "delete", "query"] + ) """The character filters for the index.""" - normalizers: Optional[List["_models.LexicalNormalizer"]] = rest_field() + normalizers: Optional[List["_models.LexicalNormalizer"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The normalizers for the index.""" - encryption_key: Optional["_models.SearchResourceEncryptionKey"] = rest_field(name="encryptionKey") + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = rest_field( + name="encryptionKey", visibility=["read", "create", "update", "delete", "query"] + ) """A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data. @@ -7571,16 +8157,22 @@ class SearchIndex(_model_base.Model): unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019.""" - similarity: Optional["_models.SimilarityAlgorithm"] = rest_field() + similarity: Optional["_models.SimilarityAlgorithm"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The type of similarity algorithm to be used when scoring and ranking the documents matching a search query. The similarity algorithm can only be defined at index creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity algorithm is used.""" - semantic_search: Optional["_models.SemanticSearch"] = rest_field(name="semantic") + semantic_search: Optional["_models.SemanticSearch"] = rest_field( + name="semantic", visibility=["read", "create", "update", "delete", "query"] + ) """Defines parameters for a search index that influence semantic capabilities.""" - vector_search: Optional["_models.VectorSearch"] = rest_field(name="vectorSearch") + vector_search: Optional["_models.VectorSearch"] = rest_field( + name="vectorSearch", visibility=["read", "create", "update", "delete", "query"] + ) """Contains configuration options related to vector search.""" - e_tag: Optional[str] = rest_field(name="@odata.etag") + e_tag: Optional[str] = rest_field(name="@odata.etag", visibility=["read", "create", "update", "delete", "query"]) """The ETag of the index.""" @overload @@ -7664,31 +8256,49 @@ class SearchIndexer(_model_base.Model): :vartype cache: ~azure.search.documents.models.SearchIndexerCache """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the indexer. Required.""" - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The description of the indexer.""" - data_source_name: str = rest_field(name="dataSourceName") + data_source_name: str = rest_field( + name="dataSourceName", visibility=["read", "create", "update", "delete", "query"] + ) """The name of the datasource from which this indexer reads data. Required.""" - skillset_name: Optional[str] = rest_field(name="skillsetName") + skillset_name: Optional[str] = rest_field( + name="skillsetName", visibility=["read", "create", "update", "delete", "query"] + ) """The name of the skillset executing with this indexer.""" - target_index_name: str = rest_field(name="targetIndexName") + target_index_name: str = rest_field( + name="targetIndexName", visibility=["read", "create", "update", "delete", "query"] + ) """The name of the index to which this indexer writes data. Required.""" - schedule: Optional["_models.IndexingSchedule"] = rest_field() + schedule: Optional["_models.IndexingSchedule"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The schedule for this indexer.""" - parameters: Optional["_models.IndexingParameters"] = rest_field() + parameters: Optional["_models.IndexingParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Parameters for indexer execution.""" - field_mappings: Optional[List["_models.FieldMapping"]] = rest_field(name="fieldMappings") + field_mappings: Optional[List["_models.FieldMapping"]] = rest_field( + name="fieldMappings", visibility=["read", "create", "update", "delete", "query"] + ) """Defines mappings between fields in the data source and corresponding target fields in the index.""" - output_field_mappings: Optional[List["_models.FieldMapping"]] = rest_field(name="outputFieldMappings") + output_field_mappings: Optional[List["_models.FieldMapping"]] = rest_field( + name="outputFieldMappings", visibility=["read", "create", "update", "delete", "query"] + ) """Output field mappings are applied after enrichment and immediately before indexing.""" - is_disabled: Optional[bool] = rest_field(name="disabled") + is_disabled: Optional[bool] = rest_field( + name="disabled", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether the indexer is disabled. Default is false.""" - e_tag: Optional[str] = rest_field(name="@odata.etag") + e_tag: Optional[str] = rest_field(name="@odata.etag", visibility=["read", "create", "update", "delete", "query"]) """The ETag of the indexer.""" - encryption_key: Optional["_models.SearchResourceEncryptionKey"] = rest_field(name="encryptionKey") + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = rest_field( + name="encryptionKey", visibility=["read", "create", "update", "delete", "query"] + ) """A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your indexer definition (as well as indexer execution status) when you want full assurance @@ -7699,7 +8309,9 @@ class SearchIndexer(_model_base.Model): indexer execution status) will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019.""" - cache: Optional["_models.SearchIndexerCache"] = rest_field() + cache: Optional["_models.SearchIndexerCache"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Adds caching to an enrichment pipeline to allow for incremental modification steps without having to rebuild the index every time.""" @@ -7750,12 +8362,18 @@ class SearchIndexerCache(_model_base.Model): :vartype identity: ~azure.search.documents.models.SearchIndexerDataIdentity """ - storage_connection_string: Optional[str] = rest_field(name="storageConnectionString") + storage_connection_string: Optional[str] = rest_field( + name="storageConnectionString", visibility=["read", "create", "update", "delete", "query"] + ) """The connection string to the storage account where the cache data will be persisted.""" - enable_reprocessing: Optional[bool] = rest_field(name="enableReprocessing") + enable_reprocessing: Optional[bool] = rest_field( + name="enableReprocessing", visibility=["read", "create", "update", "delete", "query"] + ) """Specifies whether incremental reprocessing is enabled.""" - identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field() + identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The user-assigned managed identity used for connections to the enrichment cache. If the connection string indicates an identity (ResourceId) and it's not specified, the system-assigned managed identity is used. On updates to the @@ -7795,10 +8413,10 @@ class SearchIndexerDataContainer(_model_base.Model): :vartype query: str """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the table or view (for Azure SQL data source) or collection (for CosmosDB data source) that will be indexed. Required.""" - query: Optional[str] = rest_field() + query: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A query that is applied to this data container. The syntax and meaning of this parameter is datasource-specific. Not supported by Azure SQL datasources.""" @@ -7828,13 +8446,14 @@ class SearchIndexerDataIdentity(_model_base.Model): SearchIndexerDataNoneIdentity, SearchIndexerDataUserAssignedIdentity - :ivar odata_type: The discriminator for derived types. Required. Default value is None. + :ivar odata_type: A URI fragment specifying the type of identity. Required. Default value is + None. :vartype odata_type: str """ __mapping__: Dict[str, _model_base.Model] = {} - odata_type: str = rest_discriminator(name="@odata.type") - """The discriminator for derived types. Required. Default value is None.""" + odata_type: str = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) + """A URI fragment specifying the type of identity. Required. Default value is None.""" @overload def __init__( @@ -7860,13 +8479,13 @@ class SearchIndexerDataNoneIdentity( """Clears the identity property of a datasource. - :ivar odata_type: A URI fragment specifying the type of identity. Required. Default value is + :ivar odata_type: The discriminator for derived types. Required. Default value is "#Microsoft.Azure.Search.DataNoneIdentity". :vartype odata_type: str """ - odata_type: Literal["#Microsoft.Azure.Search.DataNoneIdentity"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long - """A URI fragment specifying the type of identity. Required. Default value is + odata_type: Literal["#Microsoft.Azure.Search.DataNoneIdentity"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore + """The discriminator for derived types. Required. Default value is \"#Microsoft.Azure.Search.DataNoneIdentity\".""" @overload @@ -7926,33 +8545,43 @@ class SearchIndexerDataSource(_model_base.Model): :vartype encryption_key: ~azure.search.documents.models.SearchResourceEncryptionKey """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the datasource. Required.""" - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The description of the datasource.""" - type: Union[str, "_models.SearchIndexerDataSourceType"] = rest_field() + type: Union[str, "_models.SearchIndexerDataSourceType"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The type of the datasource. Required. Known values are: \"azuresql\", \"cosmosdb\", \"azureblob\", \"azuretable\", \"mysql\", \"adlsgen2\", and \"onelake\".""" - credentials: "_models.DataSourceCredentials" = rest_field() + credentials: "_models.DataSourceCredentials" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Credentials for the datasource. Required.""" - container: "_models.SearchIndexerDataContainer" = rest_field() + container: "_models.SearchIndexerDataContainer" = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The data container for the datasource. Required.""" - identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field() + identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """An explicit managed identity to use for this datasource. If not specified and the connection string is a managed identity, the system-assigned managed identity is used. If not specified, the value remains unchanged. If \"none\" is specified, the value of this property is cleared.""" data_change_detection_policy: Optional["_models.DataChangeDetectionPolicy"] = rest_field( - name="dataChangeDetectionPolicy" + name="dataChangeDetectionPolicy", visibility=["read", "create", "update", "delete", "query"] ) """The data change detection policy for the datasource.""" data_deletion_detection_policy: Optional["_models.DataDeletionDetectionPolicy"] = rest_field( - name="dataDeletionDetectionPolicy" + name="dataDeletionDetectionPolicy", visibility=["read", "create", "update", "delete", "query"] ) """The data deletion detection policy for the datasource.""" - e_tag: Optional[str] = rest_field(name="@odata.etag") + e_tag: Optional[str] = rest_field(name="@odata.etag", visibility=["read", "create", "update", "delete", "query"]) """The ETag of the data source.""" - encryption_key: Optional["_models.SearchResourceEncryptionKey"] = rest_field(name="encryptionKey") + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = rest_field( + name="encryptionKey", visibility=["read", "create", "update", "delete", "query"] + ) """A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your datasource definition when you want full assurance that no one, not even @@ -7999,7 +8628,7 @@ class SearchIndexerDataUserAssignedIdentity( :ivar resource_id: The fully qualified Azure resource Id of a user assigned managed identity typically in the form - "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" # pylint: disable=line-too-long + "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" that should have been assigned to the search service. Required. :vartype resource_id: str :ivar odata_type: A URI fragment specifying the type of identity. Required. Default value is @@ -8007,12 +8636,14 @@ class SearchIndexerDataUserAssignedIdentity( :vartype odata_type: str """ - resource_id: str = rest_field(name="userAssignedIdentity") + resource_id: str = rest_field( + name="userAssignedIdentity", visibility=["read", "create", "update", "delete", "query"] + ) """The fully qualified Azure resource Id of a user assigned managed identity typically in the form - \"/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId\" # pylint: disable=line-too-long + \"/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId\" that should have been assigned to the search service. Required.""" - odata_type: Literal["#Microsoft.Azure.Search.DataUserAssignedIdentity"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.DataUserAssignedIdentity"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of identity. Required. Default value is \"#Microsoft.Azure.Search.DataUserAssignedIdentity\".""" @@ -8037,8 +8668,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class SearchIndexerError(_model_base.Model): """Represents an item- or document-level indexing error. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar key: The key of the item for which indexing failed. :vartype key: str @@ -8064,26 +8693,50 @@ class SearchIndexerError(_model_base.Model): :vartype documentation_link: str """ - key: Optional[str] = rest_field(visibility=["read"]) + key: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The key of the item for which indexing failed.""" - error_message: str = rest_field(name="errorMessage", visibility=["read"]) + error_message: str = rest_field(name="errorMessage", visibility=["read", "create", "update", "delete", "query"]) """The message describing the error that occurred while processing the item. Required.""" - status_code: int = rest_field(name="statusCode", visibility=["read"]) + status_code: int = rest_field(name="statusCode", visibility=["read", "create", "update", "delete", "query"]) """The status code indicating why the indexing operation failed. Possible values include: 400 for a malformed input document, 404 for document not found, 409 for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the service is too busy. Required.""" - name: Optional[str] = rest_field(visibility=["read"]) + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the source at which the error originated. For example, this could refer to a particular skill in the attached skillset. This may not be always available.""" - details: Optional[str] = rest_field(visibility=["read"]) + details: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Additional, verbose details about the error to assist in debugging the indexer. This may not be always available.""" - documentation_link: Optional[str] = rest_field(name="documentationLink", visibility=["read"]) + documentation_link: Optional[str] = rest_field( + name="documentationLink", visibility=["read", "create", "update", "delete", "query"] + ) """A link to a troubleshooting guide for these classes of errors. This may not be always available.""" + @overload + def __init__( + self, + *, + error_message: str, + status_code: int, + key: Optional[str] = None, + name: Optional[str] = None, + details: Optional[str] = None, + documentation_link: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class SearchIndexerIndexProjection(_model_base.Model): """Definition of additional projections to secondary search indexes. @@ -8096,9 +8749,13 @@ class SearchIndexerIndexProjection(_model_base.Model): :vartype parameters: ~azure.search.documents.models.SearchIndexerIndexProjectionsParameters """ - selectors: List["_models.SearchIndexerIndexProjectionSelector"] = rest_field() + selectors: List["_models.SearchIndexerIndexProjectionSelector"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A list of projections to be performed to secondary search indexes. Required.""" - parameters: Optional["_models.SearchIndexerIndexProjectionsParameters"] = rest_field() + parameters: Optional["_models.SearchIndexerIndexProjectionsParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type.""" @@ -8141,16 +8798,22 @@ class SearchIndexerIndexProjectionSelector(_model_base.Model): :vartype mappings: list[~azure.search.documents.models.InputFieldMappingEntry] """ - target_index_name: str = rest_field(name="targetIndexName") - """Name of the search index to project to. Must have a key field with the - 'keyword' analyzer set. Required.""" - parent_key_field_name: str = rest_field(name="parentKeyFieldName") + target_index_name: str = rest_field( + name="targetIndexName", visibility=["read", "create", "update", "delete", "query"] + ) + """Name of the search index to project to. Must have a key field with the 'keyword' analyzer set. + Required.""" + parent_key_field_name: str = rest_field( + name="parentKeyFieldName", visibility=["read", "create", "update", "delete", "query"] + ) """Name of the field in the search index to map the parent document's key value to. Must be a string field that is filterable and not the key field. Required.""" - source_context: str = rest_field(name="sourceContext") + source_context: str = rest_field(name="sourceContext", visibility=["read", "create", "update", "delete", "query"]) """Source context for the projections. Represents the cardinality at which the document will be split into multiple sub documents. Required.""" - mappings: List["_models.InputFieldMappingEntry"] = rest_field() + mappings: List["_models.InputFieldMappingEntry"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Mappings for the projection, or which source should be mapped to which field in the target index. Required.""" @@ -8184,7 +8847,9 @@ class SearchIndexerIndexProjectionsParameters(_model_base.Model): :vartype projection_mode: str or ~azure.search.documents.models.IndexProjectionMode """ - projection_mode: Optional[Union[str, "_models.IndexProjectionMode"]] = rest_field(name="projectionMode") + projection_mode: Optional[Union[str, "_models.IndexProjectionMode"]] = rest_field( + name="projectionMode", visibility=["read", "create", "update", "delete", "query"] + ) """Defines behavior of the index projections in relation to the rest of the indexer. Known values are: \"skipIndexingParentDocuments\" and \"includeIndexingParentDocuments\".""" @@ -8231,18 +8896,26 @@ class SearchIndexerKnowledgeStore(_model_base.Model): :vartype parameters: ~azure.search.documents.models.SearchIndexerKnowledgeStoreParameters """ - storage_connection_string: str = rest_field(name="storageConnectionString") + storage_connection_string: str = rest_field( + name="storageConnectionString", visibility=["read", "create", "update", "delete", "query"] + ) """The connection string to the storage account projections will be stored in. Required.""" - projections: List["_models.SearchIndexerKnowledgeStoreProjection"] = rest_field() + projections: List["_models.SearchIndexerKnowledgeStoreProjection"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A list of additional projections to perform during indexing. Required.""" - identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field() + identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The user-assigned managed identity used for connections to Azure Storage when writing knowledge store projections. If the connection string indicates an identity (ResourceId) and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to \"none\", the value of this property is cleared.""" - parameters: Optional["_models.SearchIndexerKnowledgeStoreParameters"] = rest_field() + parameters: Optional["_models.SearchIndexerKnowledgeStoreParameters"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A dictionary of knowledge store-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type.""" @@ -8282,15 +8955,23 @@ class SearchIndexerKnowledgeStoreProjectionSelector(_model_base.Model): # pylin :vartype inputs: list[~azure.search.documents.models.InputFieldMappingEntry] """ - reference_key_name: Optional[str] = rest_field(name="referenceKeyName") + reference_key_name: Optional[str] = rest_field( + name="referenceKeyName", visibility=["read", "create", "update", "delete", "query"] + ) """Name of reference key to different projection.""" - generated_key_name: Optional[str] = rest_field(name="generatedKeyName") + generated_key_name: Optional[str] = rest_field( + name="generatedKeyName", visibility=["read", "create", "update", "delete", "query"] + ) """Name of generated key to store projection under.""" - source: Optional[str] = rest_field() + source: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Source data to project.""" - source_context: Optional[str] = rest_field(name="sourceContext") + source_context: Optional[str] = rest_field( + name="sourceContext", visibility=["read", "create", "update", "delete", "query"] + ) """Source context for complex projections.""" - inputs: Optional[List["_models.InputFieldMappingEntry"]] = rest_field() + inputs: Optional[List["_models.InputFieldMappingEntry"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Nested inputs for complex projections.""" @overload @@ -8335,7 +9016,9 @@ class SearchIndexerKnowledgeStoreBlobProjectionSelector( :vartype storage_container: str """ - storage_container: str = rest_field(name="storageContainer") + storage_container: str = rest_field( + name="storageContainer", visibility=["read", "create", "update", "delete", "query"] + ) """Blob container to store projections in. Required.""" @overload @@ -8457,7 +9140,9 @@ class SearchIndexerKnowledgeStoreParameters(_model_base.Model): :vartype synthesize_generated_key_name: bool """ - synthesize_generated_key_name: Optional[bool] = rest_field(name="synthesizeGeneratedKeyName") + synthesize_generated_key_name: Optional[bool] = rest_field( + name="synthesizeGeneratedKeyName", visibility=["read", "create", "update", "delete", "query"] + ) """Whether or not projections should synthesize a generated key name if one isn't already present.""" @@ -8493,11 +9178,17 @@ class SearchIndexerKnowledgeStoreProjection(_model_base.Model): list[~azure.search.documents.models.SearchIndexerKnowledgeStoreFileProjectionSelector] """ - tables: Optional[List["_models.SearchIndexerKnowledgeStoreTableProjectionSelector"]] = rest_field() + tables: Optional[List["_models.SearchIndexerKnowledgeStoreTableProjectionSelector"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Projections to Azure Table storage.""" - objects: Optional[List["_models.SearchIndexerKnowledgeStoreObjectProjectionSelector"]] = rest_field() + objects: Optional[List["_models.SearchIndexerKnowledgeStoreObjectProjectionSelector"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Projections to Azure Blob storage.""" - files: Optional[List["_models.SearchIndexerKnowledgeStoreFileProjectionSelector"]] = rest_field() + files: Optional[List["_models.SearchIndexerKnowledgeStoreFileProjectionSelector"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Projections to Azure File storage.""" @overload @@ -8540,7 +9231,7 @@ class SearchIndexerKnowledgeStoreTableProjectionSelector( :vartype table_name: str """ - table_name: str = rest_field(name="tableName") + table_name: str = rest_field(name="tableName", visibility=["read", "create", "update", "delete", "query"]) """Name of the Azure table to store projected data in. Required.""" @overload @@ -8569,8 +9260,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class SearchIndexerLimits(_model_base.Model): """Represents the limits that can be applied to an indexer. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar max_run_time: The maximum duration that the indexer is permitted to run for one execution. :vartype max_run_time: ~datetime.timedelta @@ -8584,17 +9273,40 @@ class SearchIndexerLimits(_model_base.Model): :vartype max_document_content_characters_to_extract: int """ - max_run_time: Optional[datetime.timedelta] = rest_field(name="maxRunTime", visibility=["read"]) + max_run_time: Optional[datetime.timedelta] = rest_field( + name="maxRunTime", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum duration that the indexer is permitted to run for one execution.""" - max_document_extraction_size: Optional[int] = rest_field(name="maxDocumentExtractionSize", visibility=["read"]) + max_document_extraction_size: Optional[int] = rest_field( + name="maxDocumentExtractionSize", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum size of a document, in bytes, which will be considered valid for indexing.""" max_document_content_characters_to_extract: Optional[int] = rest_field( - name="maxDocumentContentCharactersToExtract", visibility=["read"] + name="maxDocumentContentCharactersToExtract", visibility=["read", "create", "update", "delete", "query"] ) """The maximum number of characters that will be extracted from a document picked up for indexing.""" + @overload + def __init__( + self, + *, + max_run_time: Optional[datetime.timedelta] = None, + max_document_extraction_size: Optional[int] = None, + max_document_content_characters_to_extract: Optional[int] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class SearchIndexerSkillset(_model_base.Model): """A list of skills. @@ -8630,22 +9342,30 @@ class SearchIndexerSkillset(_model_base.Model): :vartype encryption_key: ~azure.search.documents.models.SearchResourceEncryptionKey """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the skillset. Required.""" - description: Optional[str] = rest_field() + description: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The description of the skillset.""" - skills: List["_models.SearchIndexerSkill"] = rest_field() + skills: List["_models.SearchIndexerSkill"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A list of skills in the skillset. Required.""" - cognitive_services_account: Optional["_models.CognitiveServicesAccount"] = rest_field(name="cognitiveServices") + cognitive_services_account: Optional["_models.CognitiveServicesAccount"] = rest_field( + name="cognitiveServices", visibility=["read", "create", "update", "delete", "query"] + ) """Details about the Azure AI service to be used when running skills.""" - knowledge_store: Optional["_models.SearchIndexerKnowledgeStore"] = rest_field(name="knowledgeStore") + knowledge_store: Optional["_models.SearchIndexerKnowledgeStore"] = rest_field( + name="knowledgeStore", visibility=["read", "create", "update", "delete", "query"] + ) """Definition of additional projections to Azure blob, table, or files, of enriched data.""" - index_projection: Optional["_models.SearchIndexerIndexProjection"] = rest_field(name="indexProjections") + index_projection: Optional["_models.SearchIndexerIndexProjection"] = rest_field( + name="indexProjections", visibility=["read", "create", "update", "delete", "query"] + ) """Definition of additional projections to secondary search index(es).""" - e_tag: Optional[str] = rest_field(name="@odata.etag") + e_tag: Optional[str] = rest_field(name="@odata.etag", visibility=["read", "create", "update", "delete", "query"]) """The ETag of the skillset.""" - encryption_key: Optional["_models.SearchResourceEncryptionKey"] = rest_field(name="encryptionKey") + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = rest_field( + name="encryptionKey", visibility=["read", "create", "update", "delete", "query"] + ) """A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your skillset definition when you want full assurance that no one, not even Microsoft, can @@ -8685,8 +9405,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class SearchIndexerStatus(_model_base.Model): """Represents the current status and execution history of an indexer. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar status: Overall indexer status. Required. Known values are: "unknown", "error", and "running". @@ -8700,21 +9418,43 @@ class SearchIndexerStatus(_model_base.Model): :vartype limits: ~azure.search.documents.models.SearchIndexerLimits """ - status: Union[str, "_models.IndexerStatus"] = rest_field(visibility=["read"]) + status: Union[str, "_models.IndexerStatus"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Overall indexer status. Required. Known values are: \"unknown\", \"error\", and \"running\".""" - last_result: Optional["_models.IndexerExecutionResult"] = rest_field(name="lastResult", visibility=["read"]) + last_result: Optional["_models.IndexerExecutionResult"] = rest_field( + name="lastResult", visibility=["read", "create", "update", "delete", "query"] + ) """The result of the most recent or an in-progress indexer execution.""" - execution_history: List["_models.IndexerExecutionResult"] = rest_field(name="executionHistory", visibility=["read"]) + execution_history: List["_models.IndexerExecutionResult"] = rest_field( + name="executionHistory", visibility=["read", "create", "update", "delete", "query"] + ) """History of the recent indexer executions, sorted in reverse chronological order. Required.""" - limits: "_models.SearchIndexerLimits" = rest_field(visibility=["read"]) + limits: "_models.SearchIndexerLimits" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The execution limits for the indexer. Required.""" + @overload + def __init__( + self, + *, + status: Union[str, "_models.IndexerStatus"], + execution_history: List["_models.IndexerExecutionResult"], + limits: "_models.SearchIndexerLimits", + last_result: Optional["_models.IndexerExecutionResult"] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class SearchIndexerWarning(_model_base.Model): """Represents an item-level warning. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar key: The key of the item which generated a warning. :vartype key: str @@ -8734,21 +9474,44 @@ class SearchIndexerWarning(_model_base.Model): :vartype documentation_link: str """ - key: Optional[str] = rest_field(visibility=["read"]) + key: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The key of the item which generated a warning.""" - message: str = rest_field(visibility=["read"]) + message: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The message describing the warning that occurred while processing the item. Required.""" - name: Optional[str] = rest_field(visibility=["read"]) + name: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the source at which the warning originated. For example, this could refer to a particular skill in the attached skillset. This may not be always available.""" - details: Optional[str] = rest_field(visibility=["read"]) + details: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Additional, verbose details about the warning to assist in debugging the indexer. This may not be always available.""" - documentation_link: Optional[str] = rest_field(name="documentationLink", visibility=["read"]) + documentation_link: Optional[str] = rest_field( + name="documentationLink", visibility=["read", "create", "update", "delete", "query"] + ) """A link to a troubleshooting guide for these classes of warnings. This may not be always available.""" + @overload + def __init__( + self, + *, + message: str, + key: Optional[str] = None, + name: Optional[str] = None, + details: Optional[str] = None, + documentation_link: Optional[str] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class SearchRequest(_model_base.Model): """Parameters for filtering, sorting, faceting, paging, and other search query @@ -8902,31 +9665,41 @@ class SearchRequest(_model_base.Model): :vartype hybrid_search: ~azure.search.documents.models.HybridSearch """ - include_total_result_count: Optional[bool] = rest_field(name="count") + include_total_result_count: Optional[bool] = rest_field( + name="count", visibility=["read", "create", "update", "delete", "query"] + ) """A value that specifies whether to fetch the total count of results. Default is false. Setting this value to true may have a performance impact. Note that the count returned is an approximation.""" - facets: Optional[List[str]] = rest_field() + facets: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The list of facet expressions to apply to the search query. Each facet expression contains a field name, optionally followed by a comma-separated list of name:value pairs.""" - filter: Optional[str] = rest_field() + filter: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The OData $filter expression to apply to the search query.""" - highlight_fields: Optional[str] = rest_field(name="highlight") + highlight_fields: Optional[str] = rest_field( + name="highlight", visibility=["read", "create", "update", "delete", "query"] + ) """The comma-separated list of field names to use for hit highlights. Only searchable fields can be used for hit highlighting.""" - highlight_post_tag: Optional[str] = rest_field(name="highlightPostTag") + highlight_post_tag: Optional[str] = rest_field( + name="highlightPostTag", visibility=["read", "create", "update", "delete", "query"] + ) """A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is </em>.""" - highlight_pre_tag: Optional[str] = rest_field(name="highlightPreTag") + highlight_pre_tag: Optional[str] = rest_field( + name="highlightPreTag", visibility=["read", "create", "update", "delete", "query"] + ) """A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default is <em>.""" - minimum_coverage: Optional[float] = rest_field(name="minimumCoverage") + minimum_coverage: Optional[float] = rest_field( + name="minimumCoverage", visibility=["read", "create", "update", "delete", "query"] + ) """A number between 0 and 100 indicating the percentage of the index that must be covered by a search query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 100.""" - order_by: Optional[str] = rest_field(name="orderby") + order_by: Optional[str] = rest_field(name="orderby", visibility=["read", "create", "update", "delete", "query"]) """The comma-separated list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed @@ -8934,47 +9707,63 @@ class SearchRequest(_model_base.Model): ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses.""" - query_type: Optional[Union[str, "_models.QueryType"]] = rest_field(name="queryType") + query_type: Optional[Union[str, "_models.QueryType"]] = rest_field( + name="queryType", visibility=["read", "create", "update", "delete", "query"] + ) """A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if your query uses the Lucene query syntax. Known values are: \"simple\", \"full\", and \"semantic\".""" - scoring_statistics: Optional[Union[str, "_models.ScoringStatistics"]] = rest_field(name="scoringStatistics") + scoring_statistics: Optional[Union[str, "_models.ScoringStatistics"]] = rest_field( + name="scoringStatistics", visibility=["read", "create", "update", "delete", "query"] + ) """A value that specifies whether we want to calculate scoring statistics (such as document frequency) globally for more consistent scoring, or locally, for lower latency. The default is 'local'. Use 'global' to aggregate scoring statistics globally before scoring. Using global scoring statistics can increase latency of search queries. Known values are: \"local\" and \"global\".""" - session_id: Optional[str] = rest_field(name="sessionId") + session_id: Optional[str] = rest_field(name="sessionId", visibility=["read", "create", "update", "delete", "query"]) """A value to be used to create a sticky session, which can help getting more consistent results. As long as the same sessionId is used, a best-effort attempt will be made to target the same replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the load balancing of the requests across replicas and adversely affect the performance of the search service. The value used as sessionId cannot start with a '_' character.""" - scoring_parameters: Optional[List[str]] = rest_field(name="scoringParameters") + scoring_parameters: Optional[List[str]] = rest_field( + name="scoringParameters", visibility=["read", "create", "update", "delete", "query"] + ) """The list of parameter values to be used in scoring functions (for example, referencePointParameter) using the format name-values. For example, if the scoring profile defines a function with a parameter called 'mylocation' the parameter string would be \"mylocation--122.2,44.8\" (without the quotes).""" - scoring_profile: Optional[str] = rest_field(name="scoringProfile") + scoring_profile: Optional[str] = rest_field( + name="scoringProfile", visibility=["read", "create", "update", "delete", "query"] + ) """The name of a scoring profile to evaluate match scores for matching documents in order to sort the results.""" - debug: Optional[Union[str, "_models.QueryDebugMode"]] = rest_field() + debug: Optional[Union[str, "_models.QueryDebugMode"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Enables a debugging tool that can be used to further explore your reranked results. Known values are: \"disabled\", \"semantic\", \"vector\", \"queryRewrites\", and \"all\".""" - search_text: Optional[str] = rest_field(name="search") + search_text: Optional[str] = rest_field(name="search", visibility=["read", "create", "update", "delete", "query"]) """A full-text search query expression; Use \"*\" or omit this parameter to match all documents.""" - search_fields: Optional[str] = rest_field(name="searchFields") + search_fields: Optional[str] = rest_field( + name="searchFields", visibility=["read", "create", "update", "delete", "query"] + ) """The comma-separated list of field names to which to scope the full-text search. When using fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each fielded search expression take precedence over any field names listed in this parameter.""" - search_mode: Optional[Union[str, "_models.SearchMode"]] = rest_field(name="searchMode") + search_mode: Optional[Union[str, "_models.SearchMode"]] = rest_field( + name="searchMode", visibility=["read", "create", "update", "delete", "query"] + ) """A value that specifies whether any or all of the search terms must be matched in order to count the document as a match. Known values are: \"any\" and \"all\".""" - query_language: Optional[Union[str, "_models.QueryLanguage"]] = rest_field(name="queryLanguage") + query_language: Optional[Union[str, "_models.QueryLanguage"]] = rest_field( + name="queryLanguage", visibility=["read", "create", "update", "delete", "query"] + ) """A value that specifies the language of the search query. Known values are: \"none\", \"en-us\", \"en-gb\", \"en-in\", \"en-ca\", \"en-au\", \"fr-fr\", \"fr-ca\", \"de-de\", \"es-es\", \"es-mx\", \"zh-cn\", \"zh-tw\", \"pt-br\", \"pt-pt\", \"it-it\", \"ja-jp\", \"ko-kr\", @@ -8985,58 +9774,80 @@ class SearchRequest(_model_base.Model): \"et-ee\", \"ca-es\", \"fi-fi\", \"sr-ba\", \"sr-me\", \"sr-rs\", \"sk-sk\", \"nb-no\", \"hy-am\", \"bn-in\", \"eu-es\", \"gl-es\", \"gu-in\", \"he-il\", \"ga-ie\", \"kn-in\", \"ml-in\", \"mr-in\", \"fa-ae\", \"pa-in\", \"te-in\", and \"ur-pk\".""" - speller: Optional[Union[str, "_models.QuerySpellerType"]] = rest_field() + speller: Optional[Union[str, "_models.QuerySpellerType"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A value that specified the type of the speller to use to spell-correct individual search query terms. Known values are: \"none\" and \"lexicon\".""" - select: Optional[str] = rest_field() + select: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The comma-separated list of fields to retrieve. If unspecified, all fields marked as retrievable in the schema are included.""" - skip: Optional[int] = rest_field() + skip: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The number of search results to skip. This value cannot be greater than 100,000. If you need to scan documents in sequence, but cannot use skip due to this limitation, consider using orderby on a totally-ordered key and filter with a range query instead.""" - top: Optional[int] = rest_field() + top: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The number of search results to retrieve. This can be used in conjunction with $skip to implement client-side paging of search results. If results are truncated due to server-side paging, the response will include a continuation token that can be used to issue another Search request for the next page of results.""" - semantic_configuration: Optional[str] = rest_field(name="semanticConfiguration") + semantic_configuration: Optional[str] = rest_field( + name="semanticConfiguration", visibility=["read", "create", "update", "delete", "query"] + ) """The name of a semantic configuration that will be used when processing documents for queries of type semantic.""" semantic_error_handling: Optional[Union[str, "_models.SemanticErrorMode"]] = rest_field( - name="semanticErrorHandling" + name="semanticErrorHandling", visibility=["read", "create", "update", "delete", "query"] ) """Allows the user to choose whether a semantic call should fail completely (default / current behavior), or to return partial results. Known values are: \"partial\" and \"fail\".""" - semantic_max_wait_in_milliseconds: Optional[int] = rest_field(name="semanticMaxWaitInMilliseconds") + semantic_max_wait_in_milliseconds: Optional[int] = rest_field( + name="semanticMaxWaitInMilliseconds", visibility=["read", "create", "update", "delete", "query"] + ) """Allows the user to set an upper bound on the amount of time it takes for semantic enrichment to finish processing before the request fails.""" - semantic_query: Optional[str] = rest_field(name="semanticQuery") + semantic_query: Optional[str] = rest_field( + name="semanticQuery", visibility=["read", "create", "update", "delete", "query"] + ) """Allows setting a separate search query that will be solely used for semantic reranking, semantic captions and semantic answers. Is useful for scenarios where there is a need to use different queries between the base retrieval and ranking phase, and the L2 semantic phase.""" - answers: Optional[Union[str, "_models.QueryAnswerType"]] = rest_field() + answers: Optional[Union[str, "_models.QueryAnswerType"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A value that specifies whether answers should be returned as part of the search response. Known values are: \"none\" and \"extractive\".""" - captions: Optional[Union[str, "_models.QueryCaptionType"]] = rest_field() + captions: Optional[Union[str, "_models.QueryCaptionType"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """A value that specifies whether captions should be returned as part of the search response. Known values are: \"none\" and \"extractive\".""" - query_rewrites: Optional[Union[str, "_models.QueryRewritesType"]] = rest_field(name="queryRewrites") + query_rewrites: Optional[Union[str, "_models.QueryRewritesType"]] = rest_field( + name="queryRewrites", visibility=["read", "create", "update", "delete", "query"] + ) """A value that specifies whether query rewrites should be generated to augment the search query. Known values are: \"none\" and \"generative\".""" - semantic_fields: Optional[str] = rest_field(name="semanticFields") + semantic_fields: Optional[str] = rest_field( + name="semanticFields", visibility=["read", "create", "update", "delete", "query"] + ) """The comma-separated list of field names used for semantic ranking.""" - vector_queries: Optional[List["_models.VectorQuery"]] = rest_field(name="vectorQueries") + vector_queries: Optional[List["_models.VectorQuery"]] = rest_field( + name="vectorQueries", visibility=["read", "create", "update", "delete", "query"] + ) """The query parameters for vector and hybrid search queries.""" - vector_filter_mode: Optional[Union[str, "_models.VectorFilterMode"]] = rest_field(name="vectorFilterMode") + vector_filter_mode: Optional[Union[str, "_models.VectorFilterMode"]] = rest_field( + name="vectorFilterMode", visibility=["read", "create", "update", "delete", "query"] + ) """Determines whether or not filters are applied before or after the vector search is performed. Default is 'preFilter' for new indexes. Known values are: \"postFilter\" and \"preFilter\".""" - hybrid_search: Optional["_models.HybridSearch"] = rest_field(name="hybridSearch") + hybrid_search: Optional["_models.HybridSearch"] = rest_field( + name="hybridSearch", visibility=["read", "create", "update", "delete", "query"] + ) """The query parameters to configure hybrid search behaviors.""" @overload @@ -9118,20 +9929,22 @@ class SearchResourceEncryptionKey(_model_base.Model): :vartype identity: ~azure.search.documents.models.SearchIndexerDataIdentity """ - key_name: str = rest_field(name="keyVaultKeyName") + key_name: str = rest_field(name="keyVaultKeyName", visibility=["read", "create", "update", "delete", "query"]) """The name of your Azure Key Vault key to be used to encrypt your data at rest. Required.""" - key_version: str = rest_field(name="keyVaultKeyVersion") + key_version: str = rest_field(name="keyVaultKeyVersion", visibility=["read", "create", "update", "delete", "query"]) """The version of your Azure Key Vault key to be used to encrypt your data at rest. Required.""" - vault_uri: str = rest_field(name="keyVaultUri") + vault_uri: str = rest_field(name="keyVaultUri", visibility=["read", "create", "update", "delete", "query"]) """The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be used to encrypt your data at rest. An example URI might be ``https://my-keyvault-name.vault.azure.net``. Required.""" access_credentials: Optional["_models.AzureActiveDirectoryApplicationCredentials"] = rest_field( - name="accessCredentials" + name="accessCredentials", visibility=["read", "create", "update", "delete", "query"] ) """Optional Azure Active Directory credentials used for accessing your Azure Key Vault. Not required if using managed identity instead.""" - identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field() + identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """An explicit managed identity to use for this encryption key. If not specified and the access credentials property is null, the system-assigned managed identity is used. On update to the resource, if the explicit identity is @@ -9188,18 +10001,24 @@ class SearchResult(_model_base.Model): :vartype document_debug_info: list[~azure.search.documents.models.DocumentDebugInfo] """ - score: float = rest_field(name="@search.score", visibility=["read"]) + score: float = rest_field(name="@search.score", visibility=["read", "create", "update", "delete", "query"]) """The relevance score of the document compared to other documents returned by the query. Required.""" - reranker_score: Optional[float] = rest_field(name="@search.rerankerScore", visibility=["read"]) + reranker_score: Optional[float] = rest_field( + name="@search.rerankerScore", visibility=["read", "create", "update", "delete", "query"] + ) """The relevance score computed by the semantic ranker for the top search results. Search results are sorted by the RerankerScore first and then by the Score. RerankerScore is only returned for queries of type 'semantic'.""" - highlights: Optional[Dict[str, List[str]]] = rest_field(name="@search.highlights", visibility=["read"]) + highlights: Optional[Dict[str, List[str]]] = rest_field( + name="@search.highlights", visibility=["read", "create", "update", "delete", "query"] + ) """Text fragments from the document that indicate the matching search terms, organized by each applicable field; null if hit highlighting was not enabled for the query.""" - captions: Optional[List["_models.QueryCaptionResult"]] = rest_field(name="@search.captions", visibility=["read"]) + captions: Optional[List["_models.QueryCaptionResult"]] = rest_field( + name="@search.captions", visibility=["read", "create", "update", "delete", "query"] + ) """Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type 'semantic'.""" @@ -9209,6 +10028,26 @@ class SearchResult(_model_base.Model): """Contains debugging information that can be used to further explore your search results.""" + @overload + def __init__( + self, + *, + score: float, + reranker_score: Optional[float] = None, + highlights: Optional[Dict[str, List[str]]] = None, + captions: Optional[List["_models.QueryCaptionResult"]] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class VectorThreshold(_model_base.Model): """The threshold used for vector queries. @@ -9223,7 +10062,7 @@ class VectorThreshold(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - kind: str = rest_discriminator(name="kind") + kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) """Type of threshold. Required. Known values are: \"vectorSimilarity\" and \"searchScore\".""" @overload @@ -9261,11 +10100,11 @@ class SearchScoreThreshold(VectorThreshold, discriminator="searchScore"): :vartype kind: str or ~azure.search.documents.models.SEARCH_SCORE """ - value: float = rest_field() + value: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The threshold will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The threshold direction will be chosen for higher @search.score. Required.""" - kind: Literal[VectorThresholdKind.SEARCH_SCORE] = rest_discriminator(name="kind") # type: ignore + kind: Literal[VectorThresholdKind.SEARCH_SCORE] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The kind of threshold used to filter vector queries. Required. The results of the vector query will filter based on the '@search.score' value. Note this is the @search.score returned as part of the search response. The @@ -9293,6 +10132,8 @@ class SearchServiceCounters(_model_base.Model): """Represents service-level resource counters and quotas. + :ivar alias_counter: Total number of aliases. Required. + :vartype alias_counter: ~azure.search.documents.models.ResourceCounter :ivar document_counter: Total number of documents across all indexes in the service. Required. :vartype document_counter: ~azure.search.documents.models.ResourceCounter :ivar index_counter: Total number of indexes. Required. @@ -9312,27 +10153,48 @@ class SearchServiceCounters(_model_base.Model): :vartype vector_index_size_counter: ~azure.search.documents.models.ResourceCounter """ - document_counter: "_models.ResourceCounter" = rest_field(name="documentCount") + alias_counter: "_models.ResourceCounter" = rest_field( + name="aliasesCount", visibility=["read", "create", "update", "delete", "query"] + ) + """Total number of aliases. Required.""" + document_counter: "_models.ResourceCounter" = rest_field( + name="documentCount", visibility=["read", "create", "update", "delete", "query"] + ) """Total number of documents across all indexes in the service. Required.""" - index_counter: "_models.ResourceCounter" = rest_field(name="indexesCount") + index_counter: "_models.ResourceCounter" = rest_field( + name="indexesCount", visibility=["read", "create", "update", "delete", "query"] + ) """Total number of indexes. Required.""" - indexer_counter: "_models.ResourceCounter" = rest_field(name="indexersCount") + indexer_counter: "_models.ResourceCounter" = rest_field( + name="indexersCount", visibility=["read", "create", "update", "delete", "query"] + ) """Total number of indexers. Required.""" - data_source_counter: "_models.ResourceCounter" = rest_field(name="dataSourcesCount") + data_source_counter: "_models.ResourceCounter" = rest_field( + name="dataSourcesCount", visibility=["read", "create", "update", "delete", "query"] + ) """Total number of data sources. Required.""" - storage_size_counter: "_models.ResourceCounter" = rest_field(name="storageSize") + storage_size_counter: "_models.ResourceCounter" = rest_field( + name="storageSize", visibility=["read", "create", "update", "delete", "query"] + ) """Total size of used storage in bytes. Required.""" - synonym_map_counter: "_models.ResourceCounter" = rest_field(name="synonymMaps") + synonym_map_counter: "_models.ResourceCounter" = rest_field( + name="synonymMaps", visibility=["read", "create", "update", "delete", "query"] + ) """Total number of synonym maps. Required.""" - skillset_counter: "_models.ResourceCounter" = rest_field(name="skillsetCount") + skillset_counter: "_models.ResourceCounter" = rest_field( + name="skillsetCount", visibility=["read", "create", "update", "delete", "query"] + ) """Total number of skillsets. Required.""" - vector_index_size_counter: "_models.ResourceCounter" = rest_field(name="vectorIndexSize") + vector_index_size_counter: "_models.ResourceCounter" = rest_field( + name="vectorIndexSize", visibility=["read", "create", "update", "delete", "query"] + ) """Total memory consumption of all vector indexes within the service, in bytes. Required.""" @overload def __init__( self, *, + alias_counter: "_models.ResourceCounter", document_counter: "_models.ResourceCounter", index_counter: "_models.ResourceCounter", indexer_counter: "_models.ResourceCounter", @@ -9374,19 +10236,27 @@ class SearchServiceLimits(_model_base.Model): :vartype max_storage_per_index_in_bytes: int """ - max_fields_per_index: Optional[int] = rest_field(name="maxFieldsPerIndex") + max_fields_per_index: Optional[int] = rest_field( + name="maxFieldsPerIndex", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum allowed fields per index.""" - max_field_nesting_depth_per_index: Optional[int] = rest_field(name="maxFieldNestingDepthPerIndex") + max_field_nesting_depth_per_index: Optional[int] = rest_field( + name="maxFieldNestingDepthPerIndex", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum depth which you can nest sub-fields in an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3.""" - max_complex_collection_fields_per_index: Optional[int] = rest_field(name="maxComplexCollectionFieldsPerIndex") + max_complex_collection_fields_per_index: Optional[int] = rest_field( + name="maxComplexCollectionFieldsPerIndex", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum number of fields of type Collection(Edm.ComplexType) allowed in an index.""" max_complex_objects_in_collections_per_document: Optional[int] = rest_field( - name="maxComplexObjectsInCollectionsPerDocument" + name="maxComplexObjectsInCollectionsPerDocument", visibility=["read", "create", "update", "delete", "query"] ) """The maximum number of objects in complex collections allowed per document.""" - max_storage_per_index_in_bytes: Optional[int] = rest_field(name="maxStoragePerIndex") + max_storage_per_index_in_bytes: Optional[int] = rest_field( + name="maxStoragePerIndex", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum amount of storage in bytes allowed per index.""" @overload @@ -9422,9 +10292,9 @@ class SearchServiceStatistics(_model_base.Model): :vartype limits: ~azure.search.documents.models.SearchServiceLimits """ - counters: "_models.SearchServiceCounters" = rest_field() + counters: "_models.SearchServiceCounters" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Service level resource counters. Required.""" - limits: "_models.SearchServiceLimits" = rest_field() + limits: "_models.SearchServiceLimits" = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Service level general limits. Required.""" @overload @@ -9462,12 +10332,16 @@ class SearchSuggester(_model_base.Model): :vartype source_fields: list[str] """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the suggester. Required.""" - search_mode: Literal["analyzingInfixMatching"] = rest_field(name="searchMode") + search_mode: Literal["analyzingInfixMatching"] = rest_field( + name="searchMode", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating the capabilities of the suggester. Required. Default value is \"analyzingInfixMatching\".""" - source_fields: List[str] = rest_field(name="sourceFields") + source_fields: List[str] = rest_field( + name="sourceFields", visibility=["read", "create", "update", "delete", "query"] + ) """The list of field names to which the suggester applies. Each field must be searchable. Required.""" @@ -9506,9 +10380,11 @@ class SemanticConfiguration(_model_base.Model): :vartype prioritized_fields: ~azure.search.documents.models.SemanticPrioritizedFields """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the semantic configuration. Required.""" - prioritized_fields: "_models.SemanticPrioritizedFields" = rest_field(name="prioritizedFields") + prioritized_fields: "_models.SemanticPrioritizedFields" = rest_field( + name="prioritizedFields", visibility=["read", "create", "update", "delete", "query"] + ) """Describes the title, content, and keyword fields to be used for semantic ranking, captions, highlights, and answers. At least one of the three sub properties (titleField, prioritizedKeywordsFields and prioritizedContentFields) @@ -9583,7 +10459,7 @@ class SemanticField(_model_base.Model): :vartype field_name: str """ - field_name: str = rest_field(name="fieldName") + field_name: str = rest_field(name="fieldName", visibility=["read", "create", "update", "delete", "query"]) """File name. Required.""" @overload @@ -9626,16 +10502,22 @@ class SemanticPrioritizedFields(_model_base.Model): :vartype keywords_fields: list[~azure.search.documents.models.SemanticField] """ - title_field: Optional["_models.SemanticField"] = rest_field(name="titleField") + title_field: Optional["_models.SemanticField"] = rest_field( + name="titleField", visibility=["read", "create", "update", "delete", "query"] + ) """Defines the title field to be used for semantic ranking, captions, highlights, and answers. If you don't have a title field in your index, leave this blank.""" - content_fields: Optional[List["_models.SemanticField"]] = rest_field(name="prioritizedContentFields") + content_fields: Optional[List["_models.SemanticField"]] = rest_field( + name="prioritizedContentFields", visibility=["read", "create", "update", "delete", "query"] + ) """Defines the content fields to be used for semantic ranking, captions, highlights, and answers. For the best result, the selected fields should contain text in natural language form. The order of the fields in the array represents their priority. Fields with lower priority may get truncated if the content is long.""" - keywords_fields: Optional[List["_models.SemanticField"]] = rest_field(name="prioritizedKeywordsFields") + keywords_fields: Optional[List["_models.SemanticField"]] = rest_field( + name="prioritizedKeywordsFields", visibility=["read", "create", "update", "delete", "query"] + ) """Defines the keyword fields to be used for semantic ranking, captions, highlights, and answers. For the best result, the selected fields should contain a list of keywords. The order of the fields in the array represents @@ -9673,10 +10555,14 @@ class SemanticSearch(_model_base.Model): :vartype configurations: list[~azure.search.documents.models.SemanticConfiguration] """ - default_configuration_name: Optional[str] = rest_field(name="defaultConfiguration") + default_configuration_name: Optional[str] = rest_field( + name="defaultConfiguration", visibility=["read", "create", "update", "delete", "query"] + ) """Allows you to set the name of a default semantic configuration in your index, making it optional to pass it on as a query parameter every time.""" - configurations: Optional[List["_models.SemanticConfiguration"]] = rest_field() + configurations: Optional[List["_models.SemanticConfiguration"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The semantic configurations for the index.""" @overload @@ -9730,12 +10616,12 @@ class SentimentSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.S """ default_language_code: Optional[Union[str, "_models.SentimentSkillLanguage"]] = rest_field( - name="defaultLanguageCode" + name="defaultLanguageCode", visibility=["read", "create", "update", "delete", "query"] ) """A value indicating which language code to use. Default is ``en``. Known values are: \"da\", \"nl\", \"en\", \"fi\", \"fr\", \"de\", \"el\", \"it\", \"no\", \"pl\", \"pt-PT\", \"ru\", \"es\", \"sv\", and \"tr\".""" - odata_type: Literal["#Microsoft.Skills.Text.SentimentSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Skills.Text.SentimentSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Text.SentimentSkill\".""" @@ -9804,17 +10690,23 @@ class SentimentSkillV3(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text :vartype odata_type: str """ - default_language_code: Optional[str] = rest_field(name="defaultLanguageCode") + default_language_code: Optional[str] = rest_field( + name="defaultLanguageCode", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating which language code to use. Default is ``en``.""" - include_opinion_mining: Optional[bool] = rest_field(name="includeOpinionMining") + include_opinion_mining: Optional[bool] = rest_field( + name="includeOpinionMining", visibility=["read", "create", "update", "delete", "query"] + ) """If set to true, the skill output will include information from Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated assessment (adjective) in the text. Default is false.""" - model_version: Optional[str] = rest_field(name="modelVersion") + model_version: Optional[str] = rest_field( + name="modelVersion", visibility=["read", "create", "update", "delete", "query"] + ) """The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary.""" - odata_type: Literal["#Microsoft.Skills.Text.V3.SentimentSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Skills.Text.V3.SentimentSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Text.V3.SentimentSkill\".""" @@ -9871,7 +10763,7 @@ class ShaperSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Util.Shap :vartype odata_type: str """ - odata_type: Literal["#Microsoft.Skills.Util.ShaperSkill"] = rest_discriminator(name="@odata.type") # type: ignore + odata_type: Literal["#Microsoft.Skills.Util.ShaperSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Util.ShaperSkill\".""" @@ -9934,25 +10826,37 @@ class ShingleTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.Shi :vartype odata_type: str """ - max_shingle_size: Optional[int] = rest_field(name="maxShingleSize") + max_shingle_size: Optional[int] = rest_field( + name="maxShingleSize", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum shingle size. Default and minimum value is 2.""" - min_shingle_size: Optional[int] = rest_field(name="minShingleSize") + min_shingle_size: Optional[int] = rest_field( + name="minShingleSize", visibility=["read", "create", "update", "delete", "query"] + ) """The minimum shingle size. Default and minimum value is 2. Must be less than the value of maxShingleSize.""" - output_unigrams: Optional[bool] = rest_field(name="outputUnigrams") + output_unigrams: Optional[bool] = rest_field( + name="outputUnigrams", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether the output stream will contain the input tokens (unigrams) as well as shingles. Default is true.""" - output_unigrams_if_no_shingles: Optional[bool] = rest_field(name="outputUnigramsIfNoShingles") + output_unigrams_if_no_shingles: Optional[bool] = rest_field( + name="outputUnigramsIfNoShingles", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to output unigrams for those times when no shingles are available. This property takes precedence when outputUnigrams is set to false. Default is false.""" - token_separator: Optional[str] = rest_field(name="tokenSeparator") + token_separator: Optional[str] = rest_field( + name="tokenSeparator", visibility=["read", "create", "update", "delete", "query"] + ) """The string to use when joining adjacent tokens to form a shingle. Default is a single space (\" \").""" - filter_token: Optional[str] = rest_field(name="filterToken") + filter_token: Optional[str] = rest_field( + name="filterToken", visibility=["read", "create", "update", "delete", "query"] + ) """The string to insert for each position at which there is no token. Default is an underscore (\"_\").""" - odata_type: Literal["#Microsoft.Azure.Search.ShingleTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.ShingleTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.ShingleTokenFilter\".""" @@ -10013,7 +10917,9 @@ class SkillNames(_model_base.Model): :vartype skill_names: list[str] """ - skill_names: Optional[List[str]] = rest_field(name="skillNames") + skill_names: Optional[List[str]] = rest_field( + name="skillNames", visibility=["read", "create", "update", "delete", "query"] + ) """the names of skills to be reset.""" @overload @@ -10053,12 +10959,14 @@ class SnowballTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.Sn :vartype odata_type: str """ - language: Union[str, "_models.SnowballTokenFilterLanguage"] = rest_field() + language: Union[str, "_models.SnowballTokenFilterLanguage"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The language to use. Required. Known values are: \"armenian\", \"basque\", \"catalan\", \"danish\", \"dutch\", \"english\", \"finnish\", \"french\", \"german\", \"german2\", \"hungarian\", \"italian\", \"kp\", \"lovins\", \"norwegian\", \"porter\", \"portuguese\", \"romanian\", \"russian\", \"spanish\", \"swedish\", and \"turkish\".""" - odata_type: Literal["#Microsoft.Azure.Search.SnowballTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.SnowballTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.SnowballTokenFilter\".""" @@ -10098,11 +11006,15 @@ class SoftDeleteColumnDeletionDetectionPolicy( :vartype odata_type: str """ - soft_delete_column_name: Optional[str] = rest_field(name="softDeleteColumnName") + soft_delete_column_name: Optional[str] = rest_field( + name="softDeleteColumnName", visibility=["read", "create", "update", "delete", "query"] + ) """The name of the column to use for soft-deletion detection.""" - soft_delete_marker_value: Optional[str] = rest_field(name="softDeleteMarkerValue") + soft_delete_marker_value: Optional[str] = rest_field( + name="softDeleteMarkerValue", visibility=["read", "create", "update", "delete", "query"] + ) """The marker value that identifies an item as deleted.""" - odata_type: Literal["#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy"] = rest_discriminator(name="@odata.type") # type: ignore + odata_type: Literal["#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of data deletion detection policy. Required. Default value is \"#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy\".""" @@ -10163,8 +11075,8 @@ class SplitSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.Split :vartype page_overlap_length: int :ivar maximum_pages_to_take: Only applicable when textSplitMode is set to 'pages'. If specified, the - SplitSkill will discontinue splitting after processing the first - 'maximumPagesToTake' pages, in order to improve performance when only a few + SplitSkill will discontinue splitting after processing the first 'maximumPagesToTake' + pages, in order to improve performance when only a few initial pages are needed from each document. :vartype maximum_pages_to_take: int :ivar unit: Only applies if textSplitMode is set to pages. There are two possible values. @@ -10175,8 +11087,7 @@ class SplitSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.Split :ivar azure_open_ai_tokenizer_parameters: Only applies if the unit is set to azureOpenAITokens. If specified, the splitSkill will use these parameters when performing the tokenization. The - parameters are a valid 'encoderModelName' and an optional - 'allowedSpecialTokens' property. + parameters are a valid 'encoderModelName' and an optional 'allowedSpecialTokens' property. :vartype azure_open_ai_tokenizer_parameters: ~azure.search.documents.models.AzureOpenAITokenizerParameters :ivar odata_type: A URI fragment specifying the type of skill. Required. Default value is @@ -10184,37 +11095,48 @@ class SplitSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Text.Split :vartype odata_type: str """ - default_language_code: Optional[Union[str, "_models.SplitSkillLanguage"]] = rest_field(name="defaultLanguageCode") + default_language_code: Optional[Union[str, "_models.SplitSkillLanguage"]] = rest_field( + name="defaultLanguageCode", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating which language code to use. Default is ``en``. Known values are: \"am\", \"bs\", \"cs\", \"da\", \"de\", \"en\", \"es\", \"et\", \"fi\", \"fr\", \"he\", \"hi\", \"hr\", \"hu\", \"id\", \"is\", \"it\", \"ja\", \"ko\", \"lv\", \"nb\", \"nl\", \"pl\", \"pt\", \"pt-br\", \"ru\", \"sk\", \"sl\", \"sr\", \"sv\", \"tr\", \"ur\", and \"zh\".""" - text_split_mode: Optional[Union[str, "_models.TextSplitMode"]] = rest_field(name="textSplitMode") + text_split_mode: Optional[Union[str, "_models.TextSplitMode"]] = rest_field( + name="textSplitMode", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating which split mode to perform. Known values are: \"pages\" and \"sentences\".""" - maximum_page_length: Optional[int] = rest_field(name="maximumPageLength") + maximum_page_length: Optional[int] = rest_field( + name="maximumPageLength", visibility=["read", "create", "update", "delete", "query"] + ) """The desired maximum page length. Default is 10000.""" - page_overlap_length: Optional[int] = rest_field(name="pageOverlapLength") + page_overlap_length: Optional[int] = rest_field( + name="pageOverlapLength", visibility=["read", "create", "update", "delete", "query"] + ) """Only applicable when textSplitMode is set to 'pages'. If specified, n+1th chunk will start with this number of characters/tokens from the end of the nth chunk.""" - maximum_pages_to_take: Optional[int] = rest_field(name="maximumPagesToTake") + maximum_pages_to_take: Optional[int] = rest_field( + name="maximumPagesToTake", visibility=["read", "create", "update", "delete", "query"] + ) """Only applicable when textSplitMode is set to 'pages'. If specified, the - SplitSkill will discontinue splitting after processing the first - 'maximumPagesToTake' pages, in order to improve performance when only a few + SplitSkill will discontinue splitting after processing the first 'maximumPagesToTake' + pages, in order to improve performance when only a few initial pages are needed from each document.""" - unit: Optional[Union[str, "_models.SplitSkillUnit"]] = rest_field() + unit: Optional[Union[str, "_models.SplitSkillUnit"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Only applies if textSplitMode is set to pages. There are two possible values. The choice of the values will decide the length (maximumPageLength and pageOverlapLength) measurement. The default is 'characters', which means the length will be measured by character. Known values are: \"characters\" and \"azureOpenAITokens\".""" azure_open_ai_tokenizer_parameters: Optional["_models.AzureOpenAITokenizerParameters"] = rest_field( - name="azureOpenAITokenizerParameters" + name="azureOpenAITokenizerParameters", visibility=["read", "create", "update", "delete", "query"] ) """Only applies if the unit is set to azureOpenAITokens. If specified, the splitSkill will use these parameters when performing the tokenization. The - parameters are a valid 'encoderModelName' and an optional - 'allowedSpecialTokens' property.""" - odata_type: Literal["#Microsoft.Skills.Text.SplitSkill"] = rest_discriminator(name="@odata.type") # type: ignore + parameters are a valid 'encoderModelName' and an optional 'allowedSpecialTokens' property.""" + odata_type: Literal["#Microsoft.Skills.Text.SplitSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Text.SplitSkill\".""" @@ -10259,7 +11181,7 @@ class SqlIntegratedChangeTrackingPolicy( :vartype odata_type: str """ - odata_type: Literal["#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy"] = rest_discriminator(name="@odata.type") # type: ignore + odata_type: Literal["#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of data change detection policy. Required. Default value is \"#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy\".""" @@ -10299,10 +11221,10 @@ class StemmerOverrideTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Se :vartype odata_type: str """ - rules: List[str] = rest_field() + rules: List[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A list of stemming rules in the following format: \"word => stem\", for example: \"ran => run\". Required.""" - odata_type: Literal["#Microsoft.Azure.Search.StemmerOverrideTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.StemmerOverrideTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.StemmerOverrideTokenFilter\".""" @@ -10349,7 +11271,9 @@ class StemmerTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.Ste :vartype odata_type: str """ - language: Union[str, "_models.StemmerTokenFilterLanguage"] = rest_field() + language: Union[str, "_models.StemmerTokenFilterLanguage"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """The language to use. Required. Known values are: \"arabic\", \"armenian\", \"basque\", \"brazilian\", \"bulgarian\", \"catalan\", \"czech\", \"danish\", \"dutch\", \"dutchKp\", \"english\", \"lightEnglish\", \"minimalEnglish\", \"possessiveEnglish\", \"porter2\", @@ -10361,7 +11285,7 @@ class StemmerTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.Ste \"lightPortuguese\", \"minimalPortuguese\", \"portugueseRslp\", \"romanian\", \"russian\", \"lightRussian\", \"spanish\", \"lightSpanish\", \"swedish\", \"lightSwedish\", and \"turkish\".""" - odata_type: Literal["#Microsoft.Azure.Search.StemmerTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.StemmerTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.StemmerTokenFilter\".""" @@ -10400,9 +11324,9 @@ class StopAnalyzer(LexicalAnalyzer, discriminator="#Microsoft.Azure.Search.StopA :vartype odata_type: str """ - stopwords: Optional[List[str]] = rest_field() + stopwords: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A list of stopwords.""" - odata_type: Literal["#Microsoft.Azure.Search.StopAnalyzer"] = rest_discriminator(name="@odata.type") # type: ignore + odata_type: Literal["#Microsoft.Azure.Search.StopAnalyzer"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of analyzer. Required. Default value is \"#Microsoft.Azure.Search.StopAnalyzer\".""" @@ -10458,10 +11382,12 @@ class StopwordsTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.S :vartype odata_type: str """ - stopwords: Optional[List[str]] = rest_field() + stopwords: Optional[List[str]] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The list of stopwords. This property and the stopwords list property cannot both be set.""" - stopwords_list: Optional[Union[str, "_models.StopwordsList"]] = rest_field(name="stopwordsList") + stopwords_list: Optional[Union[str, "_models.StopwordsList"]] = rest_field( + name="stopwordsList", visibility=["read", "create", "update", "delete", "query"] + ) """A predefined list of stopwords to use. This property and the stopwords property cannot both be set. Default is English. Known values are: \"arabic\", \"armenian\", \"basque\", \"brazilian\", \"bulgarian\", \"catalan\", \"czech\", \"danish\", \"dutch\", \"english\", @@ -10469,13 +11395,17 @@ class StopwordsTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.S \"indonesian\", \"irish\", \"italian\", \"latvian\", \"norwegian\", \"persian\", \"portuguese\", \"romanian\", \"russian\", \"sorani\", \"spanish\", \"swedish\", \"thai\", and \"turkish\".""" - ignore_case: Optional[bool] = rest_field(name="ignoreCase") + ignore_case: Optional[bool] = rest_field( + name="ignoreCase", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false.""" - remove_trailing_stop_words: Optional[bool] = rest_field(name="removeTrailing") + remove_trailing_stop_words: Optional[bool] = rest_field( + name="removeTrailing", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to ignore the last search term if it's a stop word. Default is true.""" - odata_type: Literal["#Microsoft.Azure.Search.StopwordsTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.StopwordsTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.StopwordsTokenFilter\".""" @@ -10504,8 +11434,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class SuggestDocumentsResult(_model_base.Model): """Response containing suggestion query results from an index. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar results: The sequence of results returned by the query. Required. :vartype results: list[~azure.search.documents.models.SuggestResult] @@ -10514,12 +11442,34 @@ class SuggestDocumentsResult(_model_base.Model): :vartype coverage: float """ - results: List["_models.SuggestResult"] = rest_field(name="value", visibility=["read"]) + results: List["_models.SuggestResult"] = rest_field( + name="value", visibility=["read", "create", "update", "delete", "query"] + ) """The sequence of results returned by the query. Required.""" - coverage: Optional[float] = rest_field(name="@search.coverage", visibility=["read"]) + coverage: Optional[float] = rest_field( + name="@search.coverage", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating the percentage of the index that was included in the query, or null if minimumCoverage was not set in the request.""" + @overload + def __init__( + self, + *, + results: List["_models.SuggestResult"], + coverage: Optional[float] = None, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class SuggestRequest(_model_base.Model): """Parameters for filtering, sorting, fuzzy matching, and other suggestions query @@ -10575,26 +11525,34 @@ class SuggestRequest(_model_base.Model): :vartype top: int """ - filter: Optional[str] = rest_field() + filter: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """An OData expression that filters the documents considered for suggestions.""" - use_fuzzy_matching: Optional[bool] = rest_field(name="fuzzy") + use_fuzzy_matching: Optional[bool] = rest_field( + name="fuzzy", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to use fuzzy matching for the suggestion query. Default is false. When set to true, the query will find suggestions even if there's a substituted or missing character in the search text. While this provides a better experience in some scenarios, it comes at a performance cost as fuzzy suggestion searches are slower and consume more resources.""" - highlight_post_tag: Optional[str] = rest_field(name="highlightPostTag") + highlight_post_tag: Optional[str] = rest_field( + name="highlightPostTag", visibility=["read", "create", "update", "delete", "query"] + ) """A string tag that is appended to hit highlights. Must be set with highlightPreTag. If omitted, hit highlighting of suggestions is disabled.""" - highlight_pre_tag: Optional[str] = rest_field(name="highlightPreTag") + highlight_pre_tag: Optional[str] = rest_field( + name="highlightPreTag", visibility=["read", "create", "update", "delete", "query"] + ) """A string tag that is prepended to hit highlights. Must be set with highlightPostTag. If omitted, hit highlighting of suggestions is disabled.""" - minimum_coverage: Optional[float] = rest_field(name="minimumCoverage") + minimum_coverage: Optional[float] = rest_field( + name="minimumCoverage", visibility=["read", "create", "update", "delete", "query"] + ) """A number between 0 and 100 indicating the percentage of the index that must be covered by a suggestion query in order for the query to be reported as a success. This parameter can be useful for ensuring search availability even for services with only one replica. The default is 80.""" - order_by: Optional[str] = rest_field(name="orderby") + order_by: Optional[str] = rest_field(name="orderby", visibility=["read", "create", "update", "delete", "query"]) """The comma-separated list of OData $orderby expressions by which to sort the results. Each expression can be either a field name or a call to either the geo.distance() or the search.score() functions. Each expression can be followed @@ -10602,19 +11560,21 @@ class SuggestRequest(_model_base.Model): ascending order. Ties will be broken by the match scores of documents. If no $orderby is specified, the default sort order is descending by document match score. There can be at most 32 $orderby clauses.""" - search_text: str = rest_field(name="search") + search_text: str = rest_field(name="search", visibility=["read", "create", "update", "delete", "query"]) """The search text to use to suggest documents. Must be at least 1 character, and no more than 100 characters. Required.""" - search_fields: Optional[str] = rest_field(name="searchFields") + search_fields: Optional[str] = rest_field( + name="searchFields", visibility=["read", "create", "update", "delete", "query"] + ) """The comma-separated list of field names to search for the specified search text. Target fields must be included in the specified suggester.""" - select: Optional[str] = rest_field() + select: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The comma-separated list of fields to retrieve. If unspecified, only the key field will be included in the results.""" - suggester_name: str = rest_field(name="suggesterName") + suggester_name: str = rest_field(name="suggesterName", visibility=["read", "create", "update", "delete", "query"]) """The name of the suggester as specified in the suggesters collection that's part of the index definition. Required.""" - top: Optional[int] = rest_field() + top: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The number of suggestions to retrieve. This must be a value between 1 and 100. The default is 5.""" @@ -10650,16 +11610,31 @@ class SuggestResult(_model_base.Model): """A result containing a document found by a suggestion query, plus associated metadata. - Readonly variables are only populated by the server, and will be ignored when sending a request. - :ivar text: The text of the suggestion result. Required. :vartype text: str """ - text: str = rest_field(name="@search.text", visibility=["read"]) + text: str = rest_field(name="@search.text", visibility=["read", "create", "update", "delete", "query"]) """The text of the suggestion result. Required.""" + @overload + def __init__( + self, + *, + text: str, + ) -> None: ... + + @overload + def __init__(self, mapping: Mapping[str, Any]) -> None: + """ + :param mapping: raw JSON to initialize the model. + :type mapping: Mapping[str, Any] + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + class SynonymMap(_model_base.Model): """Represents a synonym map definition. @@ -10690,15 +11665,17 @@ class SynonymMap(_model_base.Model): :vartype e_tag: str """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name of the synonym map. Required.""" - format: Literal["solr"] = rest_field() + format: Literal["solr"] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The format of the synonym map. Only the 'solr' format is currently supported. Required. Default value is \"solr\".""" - synonyms: str = rest_field() + synonyms: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A series of synonym rules in the specified synonym map format. The rules must be separated by newlines. Required.""" - encryption_key: Optional["_models.SearchResourceEncryptionKey"] = rest_field(name="encryptionKey") + encryption_key: Optional["_models.SearchResourceEncryptionKey"] = rest_field( + name="encryptionKey", visibility=["read", "create", "update", "delete", "query"] + ) """A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data. @@ -10708,7 +11685,7 @@ class SynonymMap(_model_base.Model): unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019.""" - e_tag: Optional[str] = rest_field(name="@odata.etag") + e_tag: Optional[str] = rest_field(name="@odata.etag", visibility=["read", "create", "update", "delete", "query"]) """The ETag of the synonym map.""" @overload @@ -10765,15 +11742,17 @@ class SynonymTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.Syn :vartype odata_type: str """ - synonyms: List[str] = rest_field() + synonyms: List[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A list of synonyms in following one of two formats: 1. incredible, unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma separated list of equivalent words. Set the expand option to change how this list is interpreted. Required.""" - ignore_case: Optional[bool] = rest_field(name="ignoreCase") + ignore_case: Optional[bool] = rest_field( + name="ignoreCase", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to case-fold input for matching. Default is false.""" - expand: Optional[bool] = rest_field() + expand: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """A value indicating whether all words in the list of synonyms (if => notation is not used) will map to one another. If true, all words in the list of synonyms (if => notation is not used) will map to one another. The following list: @@ -10782,7 +11761,7 @@ class SynonymTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.Syn If false, the following list: incredible, unbelievable, fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing => incredible. Default is true.""" - odata_type: Literal["#Microsoft.Azure.Search.SynonymTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.SynonymTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.SynonymTokenFilter\".""" @@ -10829,9 +11808,11 @@ class TagScoringFunction(ScoringFunction, discriminator="tag"): :vartype type: str """ - parameters: "_models.TagScoringParameters" = rest_field(name="tag") + parameters: "_models.TagScoringParameters" = rest_field( + name="tag", visibility=["read", "create", "update", "delete", "query"] + ) """Parameter values for the tag scoring function. Required.""" - type: Literal["tag"] = rest_discriminator(name="type") # type: ignore + type: Literal["tag"] = rest_discriminator(name="type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """Indicates the type of function to use. Valid values include magnitude, freshness, distance, and tag. The function type must be lower case. Required. Default value is \"tag\".""" @@ -10867,7 +11848,7 @@ class TagScoringParameters(_model_base.Model): :vartype tags_parameter: str """ - tags_parameter: str = rest_field(name="tagsParameter") + tags_parameter: str = rest_field(name="tagsParameter", visibility=["read", "create", "update", "delete", "query"]) """The name of the parameter passed in search queries to specify the list of tags to compare against the target field. Required.""" @@ -10960,7 +11941,7 @@ class TextTranslationSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills. """ default_to_language_code: Union[str, "_models.TextTranslationSkillLanguage"] = rest_field( - name="defaultToLanguageCode" + name="defaultToLanguageCode", visibility=["read", "create", "update", "delete", "query"] ) """The language code to translate documents into for documents that don't specify the to language explicitly. Required. Known values are: \"af\", \"ar\", \"bn\", \"bs\", \"bg\", @@ -10972,7 +11953,7 @@ class TextTranslationSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills. \"ta\", \"te\", \"th\", \"to\", \"tr\", \"uk\", \"ur\", \"vi\", \"cy\", \"yua\", \"ga\", \"kn\", \"mi\", \"ml\", and \"pa\".""" default_from_language_code: Optional[Union[str, "_models.TextTranslationSkillLanguage"]] = rest_field( - name="defaultFromLanguageCode" + name="defaultFromLanguageCode", visibility=["read", "create", "update", "delete", "query"] ) """The language code to translate documents from for documents that don't specify the from language explicitly. Known values are: \"af\", \"ar\", \"bn\", \"bs\", \"bg\", @@ -10983,7 +11964,9 @@ class TextTranslationSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills. \"ro\", \"ru\", \"sm\", \"sr-Cyrl\", \"sr-Latn\", \"sk\", \"sl\", \"es\", \"sv\", \"ty\", \"ta\", \"te\", \"th\", \"to\", \"tr\", \"uk\", \"ur\", \"vi\", \"cy\", \"yua\", \"ga\", \"kn\", \"mi\", \"ml\", and \"pa\".""" - suggested_from: Optional[Union[str, "_models.TextTranslationSkillLanguage"]] = rest_field(name="suggestedFrom") + suggested_from: Optional[Union[str, "_models.TextTranslationSkillLanguage"]] = rest_field( + name="suggestedFrom", visibility=["read", "create", "update", "delete", "query"] + ) """The language code to translate documents from when neither the fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the automatic language detection is unsuccessful. Default is ``en``. Known values are: \"af\", \"ar\", @@ -10994,7 +11977,7 @@ class TextTranslationSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills. \"pt-br\", \"pt-PT\", \"otq\", \"ro\", \"ru\", \"sm\", \"sr-Cyrl\", \"sr-Latn\", \"sk\", \"sl\", \"es\", \"sv\", \"ty\", \"ta\", \"te\", \"th\", \"to\", \"tr\", \"uk\", \"ur\", \"vi\", \"cy\", \"yua\", \"ga\", \"kn\", \"mi\", \"ml\", and \"pa\".""" - odata_type: Literal["#Microsoft.Skills.Text.TranslationSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Skills.Text.TranslationSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Text.TranslationSkill\".""" @@ -11033,7 +12016,7 @@ class TextWeights(_model_base.Model): :vartype weights: dict[str, float] """ - weights: Dict[str, float] = rest_field() + weights: Dict[str, float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The dictionary of per-field weights to boost document scoring. The keys are field names and the values are the weights for each field. Required.""" @@ -11071,9 +12054,9 @@ class TruncateTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.Tr :vartype odata_type: str """ - length: Optional[int] = rest_field() + length: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The length at which terms will be truncated. Default and maximum is 300.""" - odata_type: Literal["#Microsoft.Azure.Search.TruncateTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.TruncateTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.TruncateTokenFilter\".""" @@ -11114,10 +12097,12 @@ class UaxUrlEmailTokenizer(LexicalTokenizer, discriminator="#Microsoft.Azure.Sea :vartype odata_type: str """ - max_token_length: Optional[int] = rest_field(name="maxTokenLength") + max_token_length: Optional[int] = rest_field( + name="maxTokenLength", visibility=["read", "create", "update", "delete", "query"] + ) """The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters.""" - odata_type: Literal["#Microsoft.Azure.Search.UaxUrlEmailTokenizer"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.UaxUrlEmailTokenizer"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of tokenizer. Required. Default value is \"#Microsoft.Azure.Search.UaxUrlEmailTokenizer\".""" @@ -11158,10 +12143,12 @@ class UniqueTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Search.Uniq :vartype odata_type: str """ - only_on_same_position: Optional[bool] = rest_field(name="onlyOnSamePosition") + only_on_same_position: Optional[bool] = rest_field( + name="onlyOnSamePosition", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to remove duplicates only at the same position. Default is false.""" - odata_type: Literal["#Microsoft.Azure.Search.UniqueTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.UniqueTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.UniqueTokenFilter\".""" @@ -11204,7 +12191,8 @@ class VectorQuery(_model_base.Model): :ivar oversampling: Oversampling factor. Minimum value is 1. It overrides the 'defaultOversampling' parameter configured in the index definition. It can be set only when - 'rerankWithOriginalVectors' is true. This parameter is only permitted when a + 'rerankWithOriginalVectors' + is true. This parameter is only permitted when a compression method is used on the underlying vector field. :vartype oversampling: float :ivar weight: Relative weight of the vector query when compared to other vector query and/or @@ -11229,21 +12217,24 @@ class VectorQuery(_model_base.Model): """ __mapping__: Dict[str, _model_base.Model] = {} - k_nearest_neighbors: Optional[int] = rest_field(name="k") + k_nearest_neighbors: Optional[int] = rest_field( + name="k", visibility=["read", "create", "update", "delete", "query"] + ) """Number of nearest neighbors to return as top hits.""" - fields: Optional[str] = rest_field() + fields: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Vector Fields of type Collection(Edm.Single) to be included in the vector searched.""" - exhaustive: Optional[bool] = rest_field() + exhaustive: Optional[bool] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """When true, triggers an exhaustive k-nearest neighbor search across all vectors within the vector index. Useful for scenarios where exact matches are critical, such as determining ground truth values.""" - oversampling: Optional[float] = rest_field() + oversampling: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Oversampling factor. Minimum value is 1. It overrides the 'defaultOversampling' parameter configured in the index definition. It can be set only when - 'rerankWithOriginalVectors' is true. This parameter is only permitted when a + 'rerankWithOriginalVectors' + is true. This parameter is only permitted when a compression method is used on the underlying vector field.""" - weight: Optional[float] = rest_field() + weight: Optional[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """Relative weight of the vector query when compared to other vector query and/or the text query within the same search request. This value is used when combining the results of multiple ranking lists produced by the different @@ -11251,14 +12242,18 @@ class VectorQuery(_model_base.Model): the weight, the higher the documents that matched that query will be in the final ranking. Default is 1.0 and the value needs to be a positive number larger than zero.""" - threshold: Optional["_models.VectorThreshold"] = rest_field() - """The threshold used for vector queries. Note this can only be set if all - 'fields' use the same similarity metric.""" - filter_override: Optional[str] = rest_field(name="filterOverride") + threshold: Optional["_models.VectorThreshold"] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) + """The threshold used for vector queries. Note this can only be set if all 'fields' use the same + similarity metric.""" + filter_override: Optional[str] = rest_field( + name="filterOverride", visibility=["read", "create", "update", "delete", "query"] + ) """The OData filter expression to apply to this specific vector query. If no filter expression is defined at the vector level, the expression defined in the top level filter parameter is used instead.""" - kind: str = rest_discriminator(name="kind") + kind: str = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) """Type of query. Required. Known values are: \"vector\", \"text\", \"imageUrl\", and \"imageBinary\".""" @@ -11305,7 +12300,8 @@ class VectorizableImageBinaryQuery(VectorQuery, discriminator="imageBinary"): :ivar oversampling: Oversampling factor. Minimum value is 1. It overrides the 'defaultOversampling' parameter configured in the index definition. It can be set only when - 'rerankWithOriginalVectors' is true. This parameter is only permitted when a + 'rerankWithOriginalVectors' + is true. This parameter is only permitted when a compression method is used on the underlying vector field. :vartype oversampling: float :ivar weight: Relative weight of the vector query when compared to other vector query and/or @@ -11333,10 +12329,12 @@ class VectorizableImageBinaryQuery(VectorQuery, discriminator="imageBinary"): :vartype kind: str or ~azure.search.documents.models.IMAGE_BINARY """ - base64_image: Optional[str] = rest_field(name="base64Image") + base64_image: Optional[str] = rest_field( + name="base64Image", visibility=["read", "create", "update", "delete", "query"] + ) """The base 64 encoded binary of an image to be vectorized to perform a vector search query.""" - kind: Literal[VectorQueryKind.IMAGE_BINARY] = rest_discriminator(name="kind") # type: ignore + kind: Literal[VectorQueryKind.IMAGE_BINARY] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The kind of vector query being performed. Required. Vector query where a base 64 encoded binary of an image that needs to be vectorized is provided.""" @@ -11384,7 +12382,8 @@ class VectorizableImageUrlQuery(VectorQuery, discriminator="imageUrl"): :ivar oversampling: Oversampling factor. Minimum value is 1. It overrides the 'defaultOversampling' parameter configured in the index definition. It can be set only when - 'rerankWithOriginalVectors' is true. This parameter is only permitted when a + 'rerankWithOriginalVectors' + is true. This parameter is only permitted when a compression method is used on the underlying vector field. :vartype oversampling: float :ivar weight: Relative weight of the vector query when compared to other vector query and/or @@ -11411,9 +12410,9 @@ class VectorizableImageUrlQuery(VectorQuery, discriminator="imageUrl"): :vartype kind: str or ~azure.search.documents.models.IMAGE_URL """ - url: Optional[str] = rest_field() + url: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The URL of an image to be vectorized to perform a vector search query.""" - kind: Literal[VectorQueryKind.IMAGE_URL] = rest_discriminator(name="kind") # type: ignore + kind: Literal[VectorQueryKind.IMAGE_URL] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The kind of vector query being performed. Required. Vector query where an url that represents an image value that needs to be vectorized is provided.""" @@ -11461,7 +12460,8 @@ class VectorizableTextQuery(VectorQuery, discriminator="text"): :ivar oversampling: Oversampling factor. Minimum value is 1. It overrides the 'defaultOversampling' parameter configured in the index definition. It can be set only when - 'rerankWithOriginalVectors' is true. This parameter is only permitted when a + 'rerankWithOriginalVectors' + is true. This parameter is only permitted when a compression method is used on the underlying vector field. :vartype oversampling: float :ivar weight: Relative weight of the vector query when compared to other vector query and/or @@ -11491,12 +12491,14 @@ class VectorizableTextQuery(VectorQuery, discriminator="text"): :vartype kind: str or ~azure.search.documents.models.TEXT """ - text: str = rest_field() + text: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The text to be vectorized to perform a vector search query. Required.""" - query_rewrites: Optional[Union[str, "_models.QueryRewritesType"]] = rest_field(name="queryRewrites") + query_rewrites: Optional[Union[str, "_models.QueryRewritesType"]] = rest_field( + name="queryRewrites", visibility=["read", "create", "update", "delete", "query"] + ) """Can be configured to let a generative model rewrite the query before sending it to be vectorized. Known values are: \"none\" and \"generative\".""" - kind: Literal[VectorQueryKind.TEXT] = rest_discriminator(name="kind") # type: ignore + kind: Literal[VectorQueryKind.TEXT] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The kind of vector query being performed. Required. Vector query where a text value that needs to be vectorized is provided.""" @@ -11544,7 +12546,8 @@ class VectorizedQuery(VectorQuery, discriminator="vector"): :ivar oversampling: Oversampling factor. Minimum value is 1. It overrides the 'defaultOversampling' parameter configured in the index definition. It can be set only when - 'rerankWithOriginalVectors' is true. This parameter is only permitted when a + 'rerankWithOriginalVectors' + is true. This parameter is only permitted when a compression method is used on the underlying vector field. :vartype oversampling: float :ivar weight: Relative weight of the vector query when compared to other vector query and/or @@ -11570,9 +12573,9 @@ class VectorizedQuery(VectorQuery, discriminator="vector"): :vartype kind: str or ~azure.search.documents.models.VECTOR """ - vector: List[float] = rest_field() + vector: List[float] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The vector representation of a search query. Required.""" - kind: Literal[VectorQueryKind.VECTOR] = rest_discriminator(name="kind") # type: ignore + kind: Literal[VectorQueryKind.VECTOR] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The kind of vector query being performed. Required. Vector query where a raw vector value is provided.""" @@ -11602,7 +12605,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class VectorsDebugInfo(_model_base.Model): - """Contains debugging information specific to vector and hybrid search. + """ "Contains debugging information specific to vector and hybrid search."). Readonly variables are only populated by the server, and will be ignored when sending a request. @@ -11632,14 +12635,22 @@ class VectorSearch(_model_base.Model): :vartype compressions: list[~azure.search.documents.models.VectorSearchCompression] """ - profiles: Optional[List["_models.VectorSearchProfile"]] = rest_field() + profiles: Optional[List["_models.VectorSearchProfile"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Defines combinations of configurations to use with vector search.""" - algorithms: Optional[List["_models.VectorSearchAlgorithmConfiguration"]] = rest_field() + algorithms: Optional[List["_models.VectorSearchAlgorithmConfiguration"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Contains configuration options specific to the algorithm used during indexing or querying.""" - vectorizers: Optional[List["_models.VectorSearchVectorizer"]] = rest_field() + vectorizers: Optional[List["_models.VectorSearchVectorizer"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Contains configuration options on how to vectorize text vector queries.""" - compressions: Optional[List["_models.VectorSearchCompression"]] = rest_field() + compressions: Optional[List["_models.VectorSearchCompression"]] = rest_field( + visibility=["read", "create", "update", "delete", "query"] + ) """Contains configuration options specific to the compression method used during indexing or querying.""" @@ -11683,14 +12694,20 @@ class VectorSearchProfile(_model_base.Model): :vartype compression_name: str """ - name: str = rest_field() + name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The name to associate with this particular vector search profile. Required.""" - algorithm_configuration_name: str = rest_field(name="algorithm") + algorithm_configuration_name: str = rest_field( + name="algorithm", visibility=["read", "create", "update", "delete", "query"] + ) """The name of the vector search algorithm configuration that specifies the algorithm and optional parameters. Required.""" - vectorizer_name: Optional[str] = rest_field(name="vectorizer") + vectorizer_name: Optional[str] = rest_field( + name="vectorizer", visibility=["read", "create", "update", "delete", "query"] + ) """The name of the vectorization being configured for use with vector search.""" - compression_name: Optional[str] = rest_field(name="compression") + compression_name: Optional[str] = rest_field( + name="compression", visibility=["read", "create", "update", "delete", "query"] + ) """The name of the compression method configuration that specifies the compression method and optional parameters.""" @@ -11717,8 +12734,8 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class VectorSimilarityThreshold(VectorThreshold, discriminator="vectorSimilarity"): """The results of the vector query will be filtered based on the vector similarity - metric. Note this is the canonical definition of similarity metric, not the - 'distance' version. The threshold direction (larger or smaller) will be chosen + metric. Note this is the canonical definition of similarity metric, not the 'distance' + version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. @@ -11729,22 +12746,22 @@ class VectorSimilarityThreshold(VectorThreshold, discriminator="vectorSimilarity :vartype value: float :ivar kind: The kind of threshold used to filter vector queries. Required. The results of the vector query will be filtered based on the vector similarity - metric. Note this is the canonical definition of similarity metric, not the - 'distance' version. The threshold direction (larger or smaller) will be chosen + metric. Note this is the canonical definition of similarity metric, not the 'distance' + version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. :vartype kind: str or ~azure.search.documents.models.VECTOR_SIMILARITY """ - value: float = rest_field() + value: float = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The threshold will filter based on the similarity metric value. Note this is the canonical definition of similarity metric, not the 'distance' version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field. Required.""" - kind: Literal[VectorThresholdKind.VECTOR_SIMILARITY] = rest_discriminator(name="kind") # type: ignore + kind: Literal[VectorThresholdKind.VECTOR_SIMILARITY] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The kind of threshold used to filter vector queries. Required. The results of the vector query will be filtered based on the vector similarity - metric. Note this is the canonical definition of similarity metric, not the - 'distance' version. The threshold direction (larger or smaller) will be chosen + metric. Note this is the canonical definition of similarity metric, not the 'distance' + version. The threshold direction (larger or smaller) will be chosen automatically according to the metric used by the field.""" @overload @@ -11797,10 +12814,10 @@ class VisionVectorizeSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills. :vartype odata_type: str """ - model_version: str = rest_field(name="modelVersion") + model_version: str = rest_field(name="modelVersion", visibility=["read", "create", "update", "delete", "query"]) """The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. Required.""" - odata_type: Literal["#Microsoft.Skills.Vision.VectorizeSkill"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Skills.Vision.VectorizeSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Vision.VectorizeSkill\".""" @@ -11883,19 +12900,27 @@ class WebApiSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Custom.We :vartype odata_type: str """ - uri: str = rest_field() + uri: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The url for the Web API. Required.""" - http_headers: Optional[Dict[str, str]] = rest_field(name="httpHeaders") + http_headers: Optional[Dict[str, str]] = rest_field( + name="httpHeaders", visibility=["read", "create", "update", "delete", "query"] + ) """The headers required to make the http request.""" - http_method: Optional[str] = rest_field(name="httpMethod") + http_method: Optional[str] = rest_field( + name="httpMethod", visibility=["read", "create", "update", "delete", "query"] + ) """The method for the http request.""" - timeout: Optional[datetime.timedelta] = rest_field() + timeout: Optional[datetime.timedelta] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The desired timeout for the request. Default is 30 seconds.""" - batch_size: Optional[int] = rest_field(name="batchSize") + batch_size: Optional[int] = rest_field(name="batchSize", visibility=["read", "create", "update", "delete", "query"]) """The desired batch size which indicates number of documents.""" - degree_of_parallelism: Optional[int] = rest_field(name="degreeOfParallelism") + degree_of_parallelism: Optional[int] = rest_field( + name="degreeOfParallelism", visibility=["read", "create", "update", "delete", "query"] + ) """If set, the number of parallel calls that can be made to the Web API.""" - auth_resource_id: Optional[str] = rest_field(name="authResourceId") + auth_resource_id: Optional[str] = rest_field( + name="authResourceId", visibility=["read", "create", "update", "delete", "query"] + ) """Applies to custom skills that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with @@ -11903,13 +12928,15 @@ class WebApiSkill(SearchIndexerSkill, discriminator="#Microsoft.Skills.Custom.We function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token.""" - auth_identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field(name="authIdentity") + auth_identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field( + name="authIdentity", visibility=["read", "create", "update", "delete", "query"] + ) """The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to \"none\", the value of this property is cleared.""" - odata_type: Literal["#Microsoft.Skills.Custom.WebApiSkill"] = rest_discriminator(name="@odata.type") # type: ignore + odata_type: Literal["#Microsoft.Skills.Custom.WebApiSkill"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of skill. Required. Default value is \"#Microsoft.Skills.Custom.WebApiSkill\".""" @@ -11959,9 +12986,11 @@ class WebApiVectorizer(VectorSearchVectorizer, discriminator="customWebApi"): :vartype kind: str or ~azure.search.documents.models.CUSTOM_WEB_API """ - web_api_parameters: Optional["_models.WebApiVectorizerParameters"] = rest_field(name="customWebApiParameters") + web_api_parameters: Optional["_models.WebApiVectorizerParameters"] = rest_field( + name="customWebApiParameters", visibility=["read", "create", "update", "delete", "query"] + ) """Specifies the properties of the user-defined vectorizer.""" - kind: Literal[VectorSearchVectorizerKind.CUSTOM_WEB_API] = rest_discriminator(name="kind") # type: ignore + kind: Literal[VectorSearchVectorizerKind.CUSTOM_WEB_API] = rest_discriminator(name="kind", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """The name of the kind of vectorization method being configured for use with vector search. Required. Generate embeddings using a custom web endpoint at query time.""" @@ -12012,15 +13041,21 @@ class WebApiVectorizerParameters(_model_base.Model): :vartype auth_identity: ~azure.search.documents.models.SearchIndexerDataIdentity """ - url: Optional[str] = rest_field(name="uri") + url: Optional[str] = rest_field(name="uri", visibility=["read", "create", "update", "delete", "query"]) """The URI of the Web API providing the vectorizer.""" - http_headers: Optional[Dict[str, str]] = rest_field(name="httpHeaders") + http_headers: Optional[Dict[str, str]] = rest_field( + name="httpHeaders", visibility=["read", "create", "update", "delete", "query"] + ) """The headers required to make the HTTP request.""" - http_method: Optional[str] = rest_field(name="httpMethod") + http_method: Optional[str] = rest_field( + name="httpMethod", visibility=["read", "create", "update", "delete", "query"] + ) """The method for the HTTP request.""" - timeout: Optional[datetime.timedelta] = rest_field() + timeout: Optional[datetime.timedelta] = rest_field(visibility=["read", "create", "update", "delete", "query"]) """The desired timeout for the request. Default is 30 seconds.""" - auth_resource_id: Optional[str] = rest_field(name="authResourceId") + auth_resource_id: Optional[str] = rest_field( + name="authResourceId", visibility=["read", "create", "update", "delete", "query"] + ) """Applies to custom endpoints that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered @@ -12028,7 +13063,9 @@ class WebApiVectorizerParameters(_model_base.Model): function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token.""" - auth_identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field(name="authIdentity") + auth_identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field( + name="authIdentity", visibility=["read", "create", "update", "delete", "query"] + ) """The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, @@ -12111,37 +13148,57 @@ class WordDelimiterTokenFilter(TokenFilter, discriminator="#Microsoft.Azure.Sear :vartype odata_type: str """ - generate_word_parts: Optional[bool] = rest_field(name="generateWordParts") + generate_word_parts: Optional[bool] = rest_field( + name="generateWordParts", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to generate part words. If set, causes parts of words to be generated; for example \"AzureSearch\" becomes \"Azure\" \"Search\". Default is true.""" - generate_number_parts: Optional[bool] = rest_field(name="generateNumberParts") + generate_number_parts: Optional[bool] = rest_field( + name="generateNumberParts", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to generate number subwords. Default is true.""" - catenate_words: Optional[bool] = rest_field(name="catenateWords") + catenate_words: Optional[bool] = rest_field( + name="catenateWords", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether maximum runs of word parts will be catenated. For example, if this is set to true, \"Azure-Search\" becomes \"AzureSearch\". Default is false.""" - catenate_numbers: Optional[bool] = rest_field(name="catenateNumbers") + catenate_numbers: Optional[bool] = rest_field( + name="catenateNumbers", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether maximum runs of number parts will be catenated. For example, if this is set to true, \"1-2\" becomes \"12\". Default is false.""" - catenate_all: Optional[bool] = rest_field(name="catenateAll") + catenate_all: Optional[bool] = rest_field( + name="catenateAll", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether all subword parts will be catenated. For example, if this is set to true, \"Azure-Search-1\" becomes \"AzureSearch1\". Default is false.""" - split_on_case_change: Optional[bool] = rest_field(name="splitOnCaseChange") + split_on_case_change: Optional[bool] = rest_field( + name="splitOnCaseChange", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to split words on caseChange. For example, if this is set to true, \"AzureSearch\" becomes \"Azure\" \"Search\". Default is true.""" - preserve_original: Optional[bool] = rest_field(name="preserveOriginal") + preserve_original: Optional[bool] = rest_field( + name="preserveOriginal", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether original words will be preserved and added to the subword list. Default is false.""" - split_on_numerics: Optional[bool] = rest_field(name="splitOnNumerics") + split_on_numerics: Optional[bool] = rest_field( + name="splitOnNumerics", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to split on numbers. For example, if this is set to true, \"Azure1Search\" becomes \"Azure\" \"1\" \"Search\". Default is true.""" - stem_english_possessive: Optional[bool] = rest_field(name="stemEnglishPossessive") + stem_english_possessive: Optional[bool] = rest_field( + name="stemEnglishPossessive", visibility=["read", "create", "update", "delete", "query"] + ) """A value indicating whether to remove trailing \"'s\" for each subword. Default is true.""" - protected_words: Optional[List[str]] = rest_field(name="protectedWords") + protected_words: Optional[List[str]] = rest_field( + name="protectedWords", visibility=["read", "create", "update", "delete", "query"] + ) """A list of tokens to protect from being delimited.""" - odata_type: Literal["#Microsoft.Azure.Search.WordDelimiterTokenFilter"] = rest_discriminator(name="@odata.type") # type: ignore # pylint: disable=line-too-long + odata_type: Literal["#Microsoft.Azure.Search.WordDelimiterTokenFilter"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore """A URI fragment specifying the type of token filter. Required. Default value is \"#Microsoft.Azure.Search.WordDelimiterTokenFilter\".""" diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/__init__.py index cad45d7952dd..c4716a340135 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/__init__.py @@ -12,13 +12,13 @@ if TYPE_CHECKING: from ._patch import * # pylint: disable=unused-wildcard-import -from ._operations import DataSourcesOperationsOperations # type: ignore -from ._operations import IndexersOperationsOperations # type: ignore -from ._operations import SkillsetsOperationsOperations # type: ignore -from ._operations import SynonymMapsOperationsOperations # type: ignore -from ._operations import IndexesOperationsOperations # type: ignore -from ._operations import AliasesOperationsOperations # type: ignore -from ._operations import DocumentsOperationsOperations # type: ignore +from ._operations import DataSourcesOperations # type: ignore +from ._operations import IndexersOperations # type: ignore +from ._operations import SkillsetsOperations # type: ignore +from ._operations import SynonymMapsOperations # type: ignore +from ._operations import IndexesOperations # type: ignore +from ._operations import AliasesOperations # type: ignore +from ._operations import DocumentsOperations # type: ignore from ._operations import SearchClientOperationsMixin # type: ignore from ._patch import __all__ as _patch_all @@ -26,13 +26,13 @@ from ._patch import patch_sdk as _patch_sdk __all__ = [ - "DataSourcesOperationsOperations", - "IndexersOperationsOperations", - "SkillsetsOperationsOperations", - "SynonymMapsOperationsOperations", - "IndexesOperationsOperations", - "AliasesOperationsOperations", - "DocumentsOperationsOperations", + "DataSourcesOperations", + "IndexersOperations", + "SkillsetsOperations", + "SynonymMapsOperations", + "IndexesOperations", + "AliasesOperations", + "DocumentsOperations", "SearchClientOperationsMixin", ] __all__.extend([p for p in _patch_all if p not in __all__]) # pyright: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_operations.py index e382ca31e02d..e40d1c993a7b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_operations.py @@ -12,7 +12,7 @@ from typing import Any, Callable, Dict, IO, Iterable, List, Literal, Optional, TypeVar, Union, overload import urllib.parse -from azure.core import MatchConditions +from azure.core import MatchConditions, PipelineClient from azure.core.exceptions import ( ClientAuthenticationError, HttpResponseError, @@ -31,8 +31,9 @@ from azure.core.utils import case_insensitive_dict from .. import models as _models -from .._model_base import SdkJSONEncoder, _deserialize -from .._serialization import Serializer +from .._configuration import SearchClientConfiguration +from .._model_base import SdkJSONEncoder, _deserialize, _failsafe_deserialize +from .._serialization import Deserializer, Serializer from .._validation import api_version_validation from .._vendor import SearchClientMixinABC, prep_if_match, prep_if_none_match @@ -48,7 +49,7 @@ _SERIALIZER.client_side_validation = False -def build_data_sources_operations_create_or_update_request( # pylint: disable=name-too-long +def build_data_sources_create_or_update_request( # pylint: disable=name-too-long data_source_name: str, *, skip_indexer_reset_requirement_for_cache: Optional[bool] = None, @@ -61,7 +62,7 @@ def build_data_sources_operations_create_or_update_request( # pylint: disable=n prefer: Literal["return=representation"] = kwargs.pop("prefer") content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -94,7 +95,7 @@ def build_data_sources_operations_create_or_update_request( # pylint: disable=n return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) -def build_data_sources_operations_delete_request( # pylint: disable=name-too-long +def build_data_sources_delete_request( data_source_name: str, *, etag: Optional[str] = None, @@ -104,7 +105,7 @@ def build_data_sources_operations_delete_request( # pylint: disable=name-too-lo _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -130,13 +131,11 @@ def build_data_sources_operations_delete_request( # pylint: disable=name-too-lo return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) -def build_data_sources_operations_get_request( # pylint: disable=name-too-long - data_source_name: str, **kwargs: Any -) -> HttpRequest: +def build_data_sources_get_request(data_source_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -156,13 +155,11 @@ def build_data_sources_operations_get_request( # pylint: disable=name-too-long return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_data_sources_operations_list_request( # pylint: disable=name-too-long - *, _select: Optional[str] = None, **kwargs: Any -) -> HttpRequest: +def build_data_sources_list_request(*, select: Optional[str] = None, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -170,8 +167,8 @@ def build_data_sources_operations_list_request( # pylint: disable=name-too-long # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if _select is not None: - _params["$select"] = _SERIALIZER.query("select", _select, "str") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "str") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -179,12 +176,12 @@ def build_data_sources_operations_list_request( # pylint: disable=name-too-long return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_data_sources_operations_create_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long +def build_data_sources_create_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -201,11 +198,11 @@ def build_data_sources_operations_create_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_indexers_operations_reset_request(indexer_name: str, **kwargs: Any) -> HttpRequest: +def build_indexers_reset_request(indexer_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -225,14 +222,14 @@ def build_indexers_operations_reset_request(indexer_name: str, **kwargs: Any) -> return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_indexers_operations_reset_docs_request( # pylint: disable=name-too-long +def build_indexers_reset_docs_request( indexer_name: str, *, overwrite: Optional[bool] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -256,11 +253,11 @@ def build_indexers_operations_reset_docs_request( # pylint: disable=name-too-lo return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_indexers_operations_run_request(indexer_name: str, **kwargs: Any) -> HttpRequest: +def build_indexers_run_request(indexer_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -280,7 +277,7 @@ def build_indexers_operations_run_request(indexer_name: str, **kwargs: Any) -> H return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_indexers_operations_create_or_update_request( # pylint: disable=name-too-long +def build_indexers_create_or_update_request( indexer_name: str, *, skip_indexer_reset_requirement_for_cache: Optional[bool] = None, @@ -294,7 +291,7 @@ def build_indexers_operations_create_or_update_request( # pylint: disable=name- prefer: Literal["return=representation"] = kwargs.pop("prefer") content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -331,13 +328,13 @@ def build_indexers_operations_create_or_update_request( # pylint: disable=name- return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) -def build_indexers_operations_delete_request( +def build_indexers_delete_request( indexer_name: str, *, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -363,11 +360,11 @@ def build_indexers_operations_delete_request( return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) -def build_indexers_operations_get_request(indexer_name: str, **kwargs: Any) -> HttpRequest: +def build_indexers_get_request(indexer_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -387,11 +384,11 @@ def build_indexers_operations_get_request(indexer_name: str, **kwargs: Any) -> H return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_indexers_operations_list_request(*, _select: Optional[str] = None, **kwargs: Any) -> HttpRequest: +def build_indexers_list_request(*, select: Optional[str] = None, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -399,8 +396,8 @@ def build_indexers_operations_list_request(*, _select: Optional[str] = None, **k # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if _select is not None: - _params["$select"] = _SERIALIZER.query("select", _select, "str") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "str") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -408,12 +405,12 @@ def build_indexers_operations_list_request(*, _select: Optional[str] = None, **k return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_indexers_operations_create_request(**kwargs: Any) -> HttpRequest: +def build_indexers_create_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -430,13 +427,11 @@ def build_indexers_operations_create_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_indexers_operations_get_status_request( # pylint: disable=name-too-long - indexer_name: str, **kwargs: Any -) -> HttpRequest: +def build_indexers_get_status_request(indexer_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -456,7 +451,7 @@ def build_indexers_operations_get_status_request( # pylint: disable=name-too-lo return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_skillsets_operations_create_or_update_request( # pylint: disable=name-too-long +def build_skillsets_create_or_update_request( skillset_name: str, *, skip_indexer_reset_requirement_for_cache: Optional[bool] = None, @@ -470,7 +465,7 @@ def build_skillsets_operations_create_or_update_request( # pylint: disable=name prefer: Literal["return=representation"] = kwargs.pop("prefer") content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -507,13 +502,13 @@ def build_skillsets_operations_create_or_update_request( # pylint: disable=name return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) -def build_skillsets_operations_delete_request( # pylint: disable=name-too-long +def build_skillsets_delete_request( skillset_name: str, *, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -539,11 +534,11 @@ def build_skillsets_operations_delete_request( # pylint: disable=name-too-long return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) -def build_skillsets_operations_get_request(skillset_name: str, **kwargs: Any) -> HttpRequest: +def build_skillsets_get_request(skillset_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -563,11 +558,11 @@ def build_skillsets_operations_get_request(skillset_name: str, **kwargs: Any) -> return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_skillsets_operations_list_request(*, _select: Optional[str] = None, **kwargs: Any) -> HttpRequest: +def build_skillsets_list_request(*, select: Optional[str] = None, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -575,8 +570,8 @@ def build_skillsets_operations_list_request(*, _select: Optional[str] = None, ** # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if _select is not None: - _params["$select"] = _SERIALIZER.query("select", _select, "str") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "str") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -584,12 +579,12 @@ def build_skillsets_operations_list_request(*, _select: Optional[str] = None, ** return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_skillsets_operations_create_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long +def build_skillsets_create_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -606,14 +601,12 @@ def build_skillsets_operations_create_request(**kwargs: Any) -> HttpRequest: # return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_skillsets_operations_reset_skills_request( # pylint: disable=name-too-long - skillset_name: str, **kwargs: Any -) -> HttpRequest: +def build_skillsets_reset_skills_request(skillset_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -635,7 +628,7 @@ def build_skillsets_operations_reset_skills_request( # pylint: disable=name-too return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_synonym_maps_operations_create_or_update_request( # pylint: disable=name-too-long +def build_synonym_maps_create_or_update_request( # pylint: disable=name-too-long synonym_map_name: str, *, etag: Optional[str] = None, @@ -647,7 +640,7 @@ def build_synonym_maps_operations_create_or_update_request( # pylint: disable=n prefer: Literal["return=representation"] = kwargs.pop("prefer") content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -676,7 +669,7 @@ def build_synonym_maps_operations_create_or_update_request( # pylint: disable=n return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) -def build_synonym_maps_operations_delete_request( # pylint: disable=name-too-long +def build_synonym_maps_delete_request( synonym_map_name: str, *, etag: Optional[str] = None, @@ -686,7 +679,7 @@ def build_synonym_maps_operations_delete_request( # pylint: disable=name-too-lo _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -712,13 +705,11 @@ def build_synonym_maps_operations_delete_request( # pylint: disable=name-too-lo return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) -def build_synonym_maps_operations_get_request( # pylint: disable=name-too-long - synonym_map_name: str, **kwargs: Any -) -> HttpRequest: +def build_synonym_maps_get_request(synonym_map_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -738,13 +729,11 @@ def build_synonym_maps_operations_get_request( # pylint: disable=name-too-long return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_synonym_maps_operations_list_request( # pylint: disable=name-too-long - *, _select: Optional[str] = None, **kwargs: Any -) -> HttpRequest: +def build_synonym_maps_list_request(*, select: Optional[str] = None, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -752,8 +741,8 @@ def build_synonym_maps_operations_list_request( # pylint: disable=name-too-long # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if _select is not None: - _params["$select"] = _SERIALIZER.query("select", _select, "str") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "str") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -761,12 +750,12 @@ def build_synonym_maps_operations_list_request( # pylint: disable=name-too-long return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_synonym_maps_operations_create_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long +def build_synonym_maps_create_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -783,12 +772,12 @@ def build_synonym_maps_operations_create_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_indexes_operations_create_request(**kwargs: Any) -> HttpRequest: +def build_indexes_create_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -805,11 +794,11 @@ def build_indexes_operations_create_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_indexes_operations_list_request(*, _select: Optional[str] = None, **kwargs: Any) -> HttpRequest: +def build_indexes_list_request(*, select: Optional[str] = None, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -817,8 +806,8 @@ def build_indexes_operations_list_request(*, _select: Optional[str] = None, **kw # Construct parameters _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") - if _select is not None: - _params["$select"] = _SERIALIZER.query("select", _select, "str") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "str") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -826,7 +815,7 @@ def build_indexes_operations_list_request(*, _select: Optional[str] = None, **kw return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_indexes_operations_create_or_update_request( # pylint: disable=name-too-long +def build_indexes_create_or_update_request( index_name: str, *, allow_index_downtime: Optional[bool] = None, @@ -839,7 +828,7 @@ def build_indexes_operations_create_or_update_request( # pylint: disable=name-t prefer: Literal["return=representation"] = kwargs.pop("prefer") content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -870,13 +859,13 @@ def build_indexes_operations_create_or_update_request( # pylint: disable=name-t return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) -def build_indexes_operations_delete_request( +def build_indexes_delete_request( index_name: str, *, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -902,11 +891,11 @@ def build_indexes_operations_delete_request( return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) -def build_indexes_operations_get_request(index_name: str, **kwargs: Any) -> HttpRequest: +def build_indexes_get_request(index_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -926,13 +915,11 @@ def build_indexes_operations_get_request(index_name: str, **kwargs: Any) -> Http return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_indexes_operations_get_statistics_request( # pylint: disable=name-too-long - index_name: str, **kwargs: Any -) -> HttpRequest: +def build_indexes_get_statistics_request(index_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -952,12 +939,12 @@ def build_indexes_operations_get_statistics_request( # pylint: disable=name-too return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_indexes_operations_analyze_request(index_name: str, **kwargs: Any) -> HttpRequest: +def build_indexes_analyze_request(index_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -979,12 +966,12 @@ def build_indexes_operations_analyze_request(index_name: str, **kwargs: Any) -> return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_aliases_operations_create_request(**kwargs: Any) -> HttpRequest: +def build_aliases_create_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1001,11 +988,11 @@ def build_aliases_operations_create_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_aliases_operations_list_request(**kwargs: Any) -> HttpRequest: +def build_aliases_list_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1020,7 +1007,7 @@ def build_aliases_operations_list_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_aliases_operations_create_or_update_request( # pylint: disable=name-too-long +def build_aliases_create_or_update_request( alias_name: str, *, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) @@ -1028,7 +1015,7 @@ def build_aliases_operations_create_or_update_request( # pylint: disable=name-t prefer: Literal["return=representation"] = kwargs.pop("prefer") content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1057,13 +1044,13 @@ def build_aliases_operations_create_or_update_request( # pylint: disable=name-t return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs) -def build_aliases_operations_delete_request( +def build_aliases_delete_request( alias_name: str, *, etag: Optional[str] = None, match_condition: Optional[MatchConditions] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1089,11 +1076,11 @@ def build_aliases_operations_delete_request( return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs) -def build_aliases_operations_get_request(alias_name: str, **kwargs: Any) -> HttpRequest: +def build_aliases_get_request(alias_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1113,11 +1100,11 @@ def build_aliases_operations_get_request(alias_name: str, **kwargs: Any) -> Http return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_documents_operations_count_request(index_name: str, **kwargs: Any) -> HttpRequest: +def build_documents_count_request(index_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1137,13 +1124,13 @@ def build_documents_operations_count_request(index_name: str, **kwargs: Any) -> return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_documents_operations_search_get_request( # pylint: disable=name-too-long +def build_documents_search_get_request( index_name: str, *, search_text: Optional[str] = None, include_total_result_count: Optional[bool] = None, facets: Optional[List[str]] = None, - _filter: Optional[str] = None, + filter: Optional[str] = None, highlight_fields: Optional[List[str]] = None, highlight_post_tag: Optional[str] = None, highlight_pre_tag: Optional[str] = None, @@ -1156,9 +1143,9 @@ def build_documents_operations_search_get_request( # pylint: disable=name-too-l search_mode: Optional[Union[str, _models.SearchMode]] = None, scoring_statistics: Optional[Union[str, _models.ScoringStatistics]] = None, session_id: Optional[str] = None, - _select: Optional[List[str]] = None, - _skip: Optional[int] = None, - _top: Optional[int] = None, + select: Optional[List[str]] = None, + skip: Optional[int] = None, + top: Optional[int] = None, semantic_configuration: Optional[str] = None, semantic_error_handling: Optional[Union[str, _models.SemanticErrorMode]] = None, semantic_max_wait_in_milliseconds: Optional[int] = None, @@ -1175,7 +1162,7 @@ def build_documents_operations_search_get_request( # pylint: disable=name-too-l _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1194,8 +1181,8 @@ def build_documents_operations_search_get_request( # pylint: disable=name-too-l _params["$count"] = _SERIALIZER.query("include_total_result_count", include_total_result_count, "bool") if facets is not None: _params["facet"] = [_SERIALIZER.query("facets", q, "str") if q is not None else "" for q in facets] - if _filter is not None: - _params["$filter"] = _SERIALIZER.query("filter", _filter, "str") + if filter is not None: + _params["$filter"] = _SERIALIZER.query("filter", filter, "str") if highlight_fields is not None: _params["highlight"] = _SERIALIZER.query("highlight_fields", highlight_fields, "[str]", div=",") if highlight_post_tag is not None: @@ -1222,12 +1209,12 @@ def build_documents_operations_search_get_request( # pylint: disable=name-too-l _params["scoringStatistics"] = _SERIALIZER.query("scoring_statistics", scoring_statistics, "str") if session_id is not None: _params["sessionId"] = _SERIALIZER.query("session_id", session_id, "str") - if _select is not None: - _params["$select"] = _SERIALIZER.query("select", _select, "[str]", div=",") - if _skip is not None: - _params["$skip"] = _SERIALIZER.query("skip", _skip, "int") - if _top is not None: - _params["$top"] = _SERIALIZER.query("top", _top, "int") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") + if skip is not None: + _params["$skip"] = _SERIALIZER.query("skip", skip, "int") + if top is not None: + _params["$top"] = _SERIALIZER.query("top", top, "int") if semantic_configuration is not None: _params["semanticConfiguration"] = _SERIALIZER.query("semantic_configuration", semantic_configuration, "str") if semantic_error_handling is not None: @@ -1259,14 +1246,12 @@ def build_documents_operations_search_get_request( # pylint: disable=name-too-l return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_documents_operations_search_post_request( # pylint: disable=name-too-long - index_name: str, **kwargs: Any -) -> HttpRequest: +def build_documents_search_post_request(index_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1288,13 +1273,13 @@ def build_documents_operations_search_post_request( # pylint: disable=name-too- return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_documents_operations_get_request( +def build_documents_get_request( key: str, index_name: str, *, selected_fields: Optional[List[str]] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1317,26 +1302,26 @@ def build_documents_operations_get_request( return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_documents_operations_suggest_get_request( # pylint: disable=name-too-long +def build_documents_suggest_get_request( index_name: str, *, search_text: str, suggester_name: str, - _filter: Optional[str] = None, + filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, highlight_pre_tag: Optional[str] = None, minimum_coverage: Optional[float] = None, order_by: Optional[List[str]] = None, search_fields: Optional[List[str]] = None, - _select: Optional[List[str]] = None, - _top: Optional[int] = None, + select: Optional[List[str]] = None, + top: Optional[int] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1351,8 +1336,8 @@ def build_documents_operations_suggest_get_request( # pylint: disable=name-too- _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") _params["search"] = _SERIALIZER.query("search_text", search_text, "str") _params["suggesterName"] = _SERIALIZER.query("suggester_name", suggester_name, "str") - if _filter is not None: - _params["$filter"] = _SERIALIZER.query("filter", _filter, "str") + if filter is not None: + _params["$filter"] = _SERIALIZER.query("filter", filter, "str") if use_fuzzy_matching is not None: _params["fuzzy"] = _SERIALIZER.query("use_fuzzy_matching", use_fuzzy_matching, "bool") if highlight_post_tag is not None: @@ -1365,10 +1350,10 @@ def build_documents_operations_suggest_get_request( # pylint: disable=name-too- _params["$orderby"] = _SERIALIZER.query("order_by", order_by, "[str]", div=",") if search_fields is not None: _params["searchFields"] = _SERIALIZER.query("search_fields", search_fields, "[str]", div=",") - if _select is not None: - _params["$select"] = _SERIALIZER.query("select", _select, "[str]", div=",") - if _top is not None: - _params["$top"] = _SERIALIZER.query("top", _top, "int") + if select is not None: + _params["$select"] = _SERIALIZER.query("select", select, "[str]", div=",") + if top is not None: + _params["$top"] = _SERIALIZER.query("top", top, "int") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -1376,14 +1361,12 @@ def build_documents_operations_suggest_get_request( # pylint: disable=name-too- return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_documents_operations_suggest_post_request( # pylint: disable=name-too-long - index_name: str, **kwargs: Any -) -> HttpRequest: +def build_documents_suggest_post_request(index_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1405,12 +1388,12 @@ def build_documents_operations_suggest_post_request( # pylint: disable=name-too return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_documents_operations_index_request(index_name: str, **kwargs: Any) -> HttpRequest: +def build_documents_index_request(index_name: str, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1432,25 +1415,25 @@ def build_documents_operations_index_request(index_name: str, **kwargs: Any) -> return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs) -def build_documents_operations_autocomplete_get_request( # pylint: disable=name-too-long +def build_documents_autocomplete_get_request( index_name: str, *, search_text: str, suggester_name: str, autocomplete_mode: Optional[Union[str, _models.AutocompleteMode]] = None, - _filter: Optional[str] = None, + filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, highlight_pre_tag: Optional[str] = None, minimum_coverage: Optional[float] = None, search_fields: Optional[List[str]] = None, - _top: Optional[int] = None, + top: Optional[int] = None, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1467,8 +1450,8 @@ def build_documents_operations_autocomplete_get_request( # pylint: disable=name _params["suggesterName"] = _SERIALIZER.query("suggester_name", suggester_name, "str") if autocomplete_mode is not None: _params["autocompleteMode"] = _SERIALIZER.query("autocomplete_mode", autocomplete_mode, "str") - if _filter is not None: - _params["$filter"] = _SERIALIZER.query("filter", _filter, "str") + if filter is not None: + _params["$filter"] = _SERIALIZER.query("filter", filter, "str") if use_fuzzy_matching is not None: _params["fuzzy"] = _SERIALIZER.query("use_fuzzy_matching", use_fuzzy_matching, "bool") if highlight_post_tag is not None: @@ -1479,8 +1462,8 @@ def build_documents_operations_autocomplete_get_request( # pylint: disable=name _params["minimumCoverage"] = _SERIALIZER.query("minimum_coverage", minimum_coverage, "float") if search_fields is not None: _params["searchFields"] = _SERIALIZER.query("search_fields", search_fields, "[str]", div=",") - if _top is not None: - _params["$top"] = _SERIALIZER.query("top", _top, "int") + if top is not None: + _params["$top"] = _SERIALIZER.query("top", top, "int") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") @@ -1488,14 +1471,14 @@ def build_documents_operations_autocomplete_get_request( # pylint: disable=name return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -def build_documents_operations_autocomplete_post_request( # pylint: disable=name-too-long +def build_documents_autocomplete_post_request( # pylint: disable=name-too-long index_name: str, **kwargs: Any ) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None)) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1521,7 +1504,7 @@ def build_search_get_service_statistics_request(**kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) - api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2024-11-01-preview")) + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) accept = _headers.pop("Accept", "application/json") # Construct URL @@ -1536,22 +1519,41 @@ def build_search_get_service_statistics_request(**kwargs: Any) -> HttpRequest: return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) -class DataSourcesOperationsOperations: +def build_search_get_index_stats_summary_request(**kwargs: Any) -> HttpRequest: # pylint: disable=name-too-long + _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) + _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) + + api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2025-03-01-preview")) + accept = _headers.pop("Accept", "application/json") + + # Construct URL + _url = "/indexstats" + + # Construct parameters + _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") + + # Construct headers + _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") + + return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs) + + +class DataSourcesOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.search.documents.SearchClient`'s - :attr:`data_sources_operations` attribute. + :attr:`data_sources` attribute. """ def __init__(self, *args, **kwargs): input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: SearchClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @overload def create_or_update( @@ -1657,7 +1659,7 @@ def create_or_update( @distributed_trace @api_version_validation( - params_added_on={"2024-11-01-preview": ["skip_indexer_reset_requirement_for_cache"]}, + params_added_on={"2025-03-01-preview": ["skip_indexer_reset_requirement_for_cache"]}, ) def create_or_update( self, @@ -1716,7 +1718,7 @@ def create_or_update( else: _content = json.dumps(data_source, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_data_sources_operations_create_or_update_request( + _request = build_data_sources_create_or_update_request( data_source_name=data_source_name, skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, etag=etag, @@ -1747,7 +1749,7 @@ def create_or_update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -1801,7 +1803,7 @@ def delete( # pylint: disable=inconsistent-return-statements cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_data_sources_operations_delete_request( + _request = build_data_sources_delete_request( data_source_name=data_source_name, etag=etag, match_condition=match_condition, @@ -1821,9 +1823,9 @@ def delete( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: @@ -1852,7 +1854,7 @@ def get(self, data_source_name: str, **kwargs: Any) -> _models.SearchIndexerData cls: ClsType[_models.SearchIndexerDataSource] = kwargs.pop("cls", None) - _request = build_data_sources_operations_get_request( + _request = build_data_sources_get_request( data_source_name=data_source_name, api_version=self._config.api_version, headers=_headers, @@ -1870,14 +1872,14 @@ def get(self, data_source_name: str, **kwargs: Any) -> _models.SearchIndexerData response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [200]: if _stream: try: response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -1891,13 +1893,13 @@ def get(self, data_source_name: str, **kwargs: Any) -> _models.SearchIndexerData return deserialized # type: ignore @distributed_trace - def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListDataSourcesResult: + def list(self, *, select: Optional[str] = None, **kwargs: Any) -> _models.ListDataSourcesResult: """Lists all datasources available for a search service. - :keyword _select: Selects which top-level properties to retrieve. + :keyword select: Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. Default value is None. - :paramtype _select: str + :paramtype select: str :return: ListDataSourcesResult. The ListDataSourcesResult is compatible with MutableMapping :rtype: ~azure.search.documents.models.ListDataSourcesResult :raises ~azure.core.exceptions.HttpResponseError: @@ -1915,8 +1917,8 @@ def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListD cls: ClsType[_models.ListDataSourcesResult] = kwargs.pop("cls", None) - _request = build_data_sources_operations_list_request( - _select=_select, + _request = build_data_sources_list_request( + select=select, api_version=self._config.api_version, headers=_headers, params=_params, @@ -1940,7 +1942,7 @@ def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListD except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -2035,7 +2037,7 @@ def create( else: _content = json.dumps(data_source, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_data_sources_operations_create_request( + _request = build_data_sources_create_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -2054,14 +2056,14 @@ def create( response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [201]: if _stream: try: response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -2075,22 +2077,22 @@ def create( return deserialized # type: ignore -class IndexersOperationsOperations: +class IndexersOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.search.documents.SearchClient`'s - :attr:`indexers_operations` attribute. + :attr:`indexers` attribute. """ def __init__(self, *args, **kwargs): input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: SearchClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace def reset(self, indexer_name: str, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements @@ -2115,7 +2117,7 @@ def reset(self, indexer_name: str, **kwargs: Any) -> None: # pylint: disable=in cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_indexers_operations_reset_request( + _request = build_indexers_reset_request( indexer_name=indexer_name, api_version=self._config.api_version, headers=_headers, @@ -2135,7 +2137,7 @@ def reset(self, indexer_name: str, **kwargs: Any) -> None: # pylint: disable=in if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: @@ -2239,9 +2241,9 @@ def reset_docs( @distributed_trace @api_version_validation( - method_added_on="2024-11-01-preview", + method_added_on="2025-03-01-preview", params_added_on={ - "2024-11-01-preview": [ + "2025-03-01-preview": [ "api_version", "overwrite", "client_request_id", @@ -2302,7 +2304,7 @@ def reset_docs( # pylint: disable=inconsistent-return-statements else: _content = None - _request = build_indexers_operations_reset_docs_request( + _request = build_indexers_reset_docs_request( indexer_name=indexer_name, overwrite=overwrite, content_type=content_type, @@ -2325,7 +2327,7 @@ def reset_docs( # pylint: disable=inconsistent-return-statements if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: @@ -2354,7 +2356,7 @@ def run(self, indexer_name: str, **kwargs: Any) -> None: # pylint: disable=inco cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_indexers_operations_run_request( + _request = build_indexers_run_request( indexer_name=indexer_name, api_version=self._config.api_version, headers=_headers, @@ -2372,9 +2374,9 @@ def run(self, indexer_name: str, **kwargs: Any) -> None: # pylint: disable=inco response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: @@ -2497,7 +2499,7 @@ def create_or_update( @distributed_trace @api_version_validation( params_added_on={ - "2024-11-01-preview": [ + "2025-03-01-preview": [ "skip_indexer_reset_requirement_for_cache", "disable_cache_reprocessing_change_detection", ] @@ -2564,7 +2566,7 @@ def create_or_update( else: _content = json.dumps(indexer, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_indexers_operations_create_or_update_request( + _request = build_indexers_create_or_update_request( indexer_name=indexer_name, skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, @@ -2596,7 +2598,7 @@ def create_or_update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -2650,7 +2652,7 @@ def delete( # pylint: disable=inconsistent-return-statements cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_indexers_operations_delete_request( + _request = build_indexers_delete_request( indexer_name=indexer_name, etag=etag, match_condition=match_condition, @@ -2670,9 +2672,9 @@ def delete( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: @@ -2701,7 +2703,7 @@ def get(self, indexer_name: str, **kwargs: Any) -> _models.SearchIndexer: cls: ClsType[_models.SearchIndexer] = kwargs.pop("cls", None) - _request = build_indexers_operations_get_request( + _request = build_indexers_get_request( indexer_name=indexer_name, api_version=self._config.api_version, headers=_headers, @@ -2719,14 +2721,14 @@ def get(self, indexer_name: str, **kwargs: Any) -> _models.SearchIndexer: response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [200]: if _stream: try: response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -2740,13 +2742,13 @@ def get(self, indexer_name: str, **kwargs: Any) -> _models.SearchIndexer: return deserialized # type: ignore @distributed_trace - def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListIndexersResult: + def list(self, *, select: Optional[str] = None, **kwargs: Any) -> _models.ListIndexersResult: """Lists all indexers available for a search service. - :keyword _select: Selects which top-level properties to retrieve. + :keyword select: Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. Default value is None. - :paramtype _select: str + :paramtype select: str :return: ListIndexersResult. The ListIndexersResult is compatible with MutableMapping :rtype: ~azure.search.documents.models.ListIndexersResult :raises ~azure.core.exceptions.HttpResponseError: @@ -2764,8 +2766,8 @@ def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListI cls: ClsType[_models.ListIndexersResult] = kwargs.pop("cls", None) - _request = build_indexers_operations_list_request( - _select=_select, + _request = build_indexers_list_request( + select=select, api_version=self._config.api_version, headers=_headers, params=_params, @@ -2789,7 +2791,7 @@ def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListI except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -2880,7 +2882,7 @@ def create(self, indexer: Union[_models.SearchIndexer, JSON, IO[bytes]], **kwarg else: _content = json.dumps(indexer, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_indexers_operations_create_request( + _request = build_indexers_create_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -2899,14 +2901,14 @@ def create(self, indexer: Union[_models.SearchIndexer, JSON, IO[bytes]], **kwarg response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [201]: if _stream: try: response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -2942,7 +2944,7 @@ def get_status(self, indexer_name: str, **kwargs: Any) -> _models.SearchIndexerS cls: ClsType[_models.SearchIndexerStatus] = kwargs.pop("cls", None) - _request = build_indexers_operations_get_status_request( + _request = build_indexers_get_status_request( indexer_name=indexer_name, api_version=self._config.api_version, headers=_headers, @@ -2967,7 +2969,7 @@ def get_status(self, indexer_name: str, **kwargs: Any) -> _models.SearchIndexerS except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -2981,22 +2983,22 @@ def get_status(self, indexer_name: str, **kwargs: Any) -> _models.SearchIndexerS return deserialized # type: ignore -class SkillsetsOperationsOperations: +class SkillsetsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.search.documents.SearchClient`'s - :attr:`skillsets_operations` attribute. + :attr:`skillsets` attribute. """ def __init__(self, *args, **kwargs): input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: SearchClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @overload def create_or_update( @@ -3121,7 +3123,7 @@ def create_or_update( @distributed_trace @api_version_validation( params_added_on={ - "2024-11-01-preview": [ + "2025-03-01-preview": [ "skip_indexer_reset_requirement_for_cache", "disable_cache_reprocessing_change_detection", ] @@ -3189,7 +3191,7 @@ def create_or_update( else: _content = json.dumps(skillset, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_skillsets_operations_create_or_update_request( + _request = build_skillsets_create_or_update_request( skillset_name=skillset_name, skip_indexer_reset_requirement_for_cache=skip_indexer_reset_requirement_for_cache, disable_cache_reprocessing_change_detection=disable_cache_reprocessing_change_detection, @@ -3221,7 +3223,7 @@ def create_or_update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -3275,7 +3277,7 @@ def delete( # pylint: disable=inconsistent-return-statements cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_skillsets_operations_delete_request( + _request = build_skillsets_delete_request( skillset_name=skillset_name, etag=etag, match_condition=match_condition, @@ -3295,9 +3297,9 @@ def delete( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: @@ -3326,7 +3328,7 @@ def get(self, skillset_name: str, **kwargs: Any) -> _models.SearchIndexerSkillse cls: ClsType[_models.SearchIndexerSkillset] = kwargs.pop("cls", None) - _request = build_skillsets_operations_get_request( + _request = build_skillsets_get_request( skillset_name=skillset_name, api_version=self._config.api_version, headers=_headers, @@ -3344,14 +3346,14 @@ def get(self, skillset_name: str, **kwargs: Any) -> _models.SearchIndexerSkillse response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [200]: if _stream: try: response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -3365,13 +3367,13 @@ def get(self, skillset_name: str, **kwargs: Any) -> _models.SearchIndexerSkillse return deserialized # type: ignore @distributed_trace - def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListSkillsetsResult: + def list(self, *, select: Optional[str] = None, **kwargs: Any) -> _models.ListSkillsetsResult: """List all skillsets in a search service. - :keyword _select: Selects which top-level properties to retrieve. + :keyword select: Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. Default value is None. - :paramtype _select: str + :paramtype select: str :return: ListSkillsetsResult. The ListSkillsetsResult is compatible with MutableMapping :rtype: ~azure.search.documents.models.ListSkillsetsResult :raises ~azure.core.exceptions.HttpResponseError: @@ -3389,8 +3391,8 @@ def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListS cls: ClsType[_models.ListSkillsetsResult] = kwargs.pop("cls", None) - _request = build_skillsets_operations_list_request( - _select=_select, + _request = build_skillsets_list_request( + select=select, api_version=self._config.api_version, headers=_headers, params=_params, @@ -3414,7 +3416,7 @@ def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListS except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -3512,7 +3514,7 @@ def create( else: _content = json.dumps(skillset, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_skillsets_operations_create_request( + _request = build_skillsets_create_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -3531,14 +3533,14 @@ def create( response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [201]: if _stream: try: response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -3618,9 +3620,9 @@ def reset_skills( @distributed_trace @api_version_validation( - method_added_on="2024-11-01-preview", + method_added_on="2025-03-01-preview", params_added_on={ - "2024-11-01-preview": ["api_version", "client_request_id", "skillset_name", "content_type", "accept"] + "2025-03-01-preview": ["api_version", "client_request_id", "skillset_name", "content_type", "accept"] }, ) def reset_skills( # pylint: disable=inconsistent-return-statements @@ -3659,7 +3661,7 @@ def reset_skills( # pylint: disable=inconsistent-return-statements else: _content = json.dumps(skill_names, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_skillsets_operations_reset_skills_request( + _request = build_skillsets_reset_skills_request( skillset_name=skillset_name, content_type=content_type, api_version=self._config.api_version, @@ -3681,29 +3683,29 @@ def reset_skills( # pylint: disable=inconsistent-return-statements if response.status_code not in [204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: return cls(pipeline_response, None, {}) # type: ignore -class SynonymMapsOperationsOperations: +class SynonymMapsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.search.documents.SearchClient`'s - :attr:`synonym_maps_operations` attribute. + :attr:`synonym_maps` attribute. """ def __init__(self, *args, **kwargs): input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: SearchClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @overload def create_or_update( @@ -3849,7 +3851,7 @@ def create_or_update( else: _content = json.dumps(synonym_map, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_synonym_maps_operations_create_or_update_request( + _request = build_synonym_maps_create_or_update_request( synonym_map_name=synonym_map_name, etag=etag, match_condition=match_condition, @@ -3879,7 +3881,7 @@ def create_or_update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -3933,7 +3935,7 @@ def delete( # pylint: disable=inconsistent-return-statements cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_synonym_maps_operations_delete_request( + _request = build_synonym_maps_delete_request( synonym_map_name=synonym_map_name, etag=etag, match_condition=match_condition, @@ -3953,9 +3955,9 @@ def delete( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: @@ -3984,7 +3986,7 @@ def get(self, synonym_map_name: str, **kwargs: Any) -> _models.SynonymMap: cls: ClsType[_models.SynonymMap] = kwargs.pop("cls", None) - _request = build_synonym_maps_operations_get_request( + _request = build_synonym_maps_get_request( synonym_map_name=synonym_map_name, api_version=self._config.api_version, headers=_headers, @@ -4002,14 +4004,14 @@ def get(self, synonym_map_name: str, **kwargs: Any) -> _models.SynonymMap: response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [200]: if _stream: try: response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -4023,13 +4025,13 @@ def get(self, synonym_map_name: str, **kwargs: Any) -> _models.SynonymMap: return deserialized # type: ignore @distributed_trace - def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListSynonymMapsResult: + def list(self, *, select: Optional[str] = None, **kwargs: Any) -> _models.ListSynonymMapsResult: """Lists all synonym maps available for a search service. - :keyword _select: Selects which top-level properties to retrieve. + :keyword select: Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. Default value is None. - :paramtype _select: str + :paramtype select: str :return: ListSynonymMapsResult. The ListSynonymMapsResult is compatible with MutableMapping :rtype: ~azure.search.documents.models.ListSynonymMapsResult :raises ~azure.core.exceptions.HttpResponseError: @@ -4047,8 +4049,8 @@ def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListS cls: ClsType[_models.ListSynonymMapsResult] = kwargs.pop("cls", None) - _request = build_synonym_maps_operations_list_request( - _select=_select, + _request = build_synonym_maps_list_request( + select=select, api_version=self._config.api_version, headers=_headers, params=_params, @@ -4072,7 +4074,7 @@ def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> _models.ListS except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -4163,7 +4165,7 @@ def create(self, synonym_map: Union[_models.SynonymMap, JSON, IO[bytes]], **kwar else: _content = json.dumps(synonym_map, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_synonym_maps_operations_create_request( + _request = build_synonym_maps_create_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -4182,14 +4184,14 @@ def create(self, synonym_map: Union[_models.SynonymMap, JSON, IO[bytes]], **kwar response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [201]: if _stream: try: response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -4203,22 +4205,22 @@ def create(self, synonym_map: Union[_models.SynonymMap, JSON, IO[bytes]], **kwar return deserialized # type: ignore -class IndexesOperationsOperations: +class IndexesOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.search.documents.SearchClient`'s - :attr:`indexes_operations` attribute. + :attr:`indexes` attribute. """ def __init__(self, *args, **kwargs): input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: SearchClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @overload def create( @@ -4296,7 +4298,7 @@ def create(self, index: Union[_models.SearchIndex, JSON, IO[bytes]], **kwargs: A else: _content = json.dumps(index, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_indexes_operations_create_request( + _request = build_indexes_create_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -4315,14 +4317,14 @@ def create(self, index: Union[_models.SearchIndex, JSON, IO[bytes]], **kwargs: A response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [201]: if _stream: try: response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -4336,13 +4338,13 @@ def create(self, index: Union[_models.SearchIndex, JSON, IO[bytes]], **kwargs: A return deserialized # type: ignore @distributed_trace - def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> Iterable["_models.SearchIndex"]: + def list(self, *, select: Optional[str] = None, **kwargs: Any) -> Iterable["_models.SearchIndex"]: """Lists all indexes available for a search service. - :keyword _select: Selects which top-level properties to retrieve. + :keyword select: Selects which top-level properties to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. Default value is None. - :paramtype _select: str + :paramtype select: str :return: An iterator like instance of SearchIndex :rtype: ~azure.core.paging.ItemPaged[~azure.search.documents.models.SearchIndex] :raises ~azure.core.exceptions.HttpResponseError: @@ -4363,8 +4365,8 @@ def list(self, *, _select: Optional[str] = None, **kwargs: Any) -> Iterable["_mo def prepare_request(next_link=None): if not next_link: - _request = build_indexes_operations_list_request( - _select=_select, + _request = build_indexes_list_request( + select=select, api_version=self._config.api_version, headers=_headers, params=_params, @@ -4416,7 +4418,7 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -4599,7 +4601,7 @@ def create_or_update( else: _content = json.dumps(index, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_indexes_operations_create_or_update_request( + _request = build_indexes_create_or_update_request( index_name=index_name, allow_index_downtime=allow_index_downtime, etag=etag, @@ -4630,7 +4632,7 @@ def create_or_update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -4687,7 +4689,7 @@ def delete( # pylint: disable=inconsistent-return-statements cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_indexes_operations_delete_request( + _request = build_indexes_delete_request( index_name=index_name, etag=etag, match_condition=match_condition, @@ -4707,9 +4709,9 @@ def delete( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: @@ -4738,7 +4740,7 @@ def get(self, index_name: str, **kwargs: Any) -> _models.SearchIndex: cls: ClsType[_models.SearchIndex] = kwargs.pop("cls", None) - _request = build_indexes_operations_get_request( + _request = build_indexes_get_request( index_name=index_name, api_version=self._config.api_version, headers=_headers, @@ -4756,14 +4758,14 @@ def get(self, index_name: str, **kwargs: Any) -> _models.SearchIndex: response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [200]: if _stream: try: response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -4801,7 +4803,7 @@ def get_statistics(self, index_name: str, **kwargs: Any) -> _models.GetIndexStat cls: ClsType[_models.GetIndexStatisticsResult] = kwargs.pop("cls", None) - _request = build_indexes_operations_get_statistics_request( + _request = build_indexes_get_statistics_request( index_name=index_name, api_version=self._config.api_version, headers=_headers, @@ -4826,7 +4828,7 @@ def get_statistics(self, index_name: str, **kwargs: Any) -> _models.GetIndexStat except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -4929,7 +4931,7 @@ def analyze( else: _content = json.dumps(request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_indexes_operations_analyze_request( + _request = build_indexes_analyze_request( index_name=index_name, content_type=content_type, api_version=self._config.api_version, @@ -4956,7 +4958,7 @@ def analyze( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -4970,22 +4972,22 @@ def analyze( return deserialized # type: ignore -class AliasesOperationsOperations: +class AliasesOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.search.documents.SearchClient`'s - :attr:`aliases_operations` attribute. + :attr:`aliases` attribute. """ def __init__(self, *args, **kwargs): input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: SearchClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @overload def create( @@ -5033,8 +5035,8 @@ def create(self, alias: IO[bytes], *, content_type: str = "application/json", ** @distributed_trace @api_version_validation( - method_added_on="2024-11-01-preview", - params_added_on={"2024-11-01-preview": ["api_version", "client_request_id", "content_type", "accept"]}, + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "client_request_id", "content_type", "accept"]}, ) def create(self, alias: Union[_models.SearchAlias, JSON, IO[bytes]], **kwargs: Any) -> _models.SearchAlias: """Creates a new search alias. @@ -5067,7 +5069,7 @@ def create(self, alias: Union[_models.SearchAlias, JSON, IO[bytes]], **kwargs: A else: _content = json.dumps(alias, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_aliases_operations_create_request( + _request = build_aliases_create_request( content_type=content_type, api_version=self._config.api_version, content=_content, @@ -5086,14 +5088,14 @@ def create(self, alias: Union[_models.SearchAlias, JSON, IO[bytes]], **kwargs: A response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [201]: if _stream: try: response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -5108,8 +5110,8 @@ def create(self, alias: Union[_models.SearchAlias, JSON, IO[bytes]], **kwargs: A @distributed_trace @api_version_validation( - method_added_on="2024-11-01-preview", - params_added_on={"2024-11-01-preview": ["api_version", "client_request_id", "accept"]}, + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "client_request_id", "accept"]}, ) def list(self, **kwargs: Any) -> Iterable["_models.SearchAlias"]: """Lists all aliases available for a search service. @@ -5134,7 +5136,7 @@ def list(self, **kwargs: Any) -> Iterable["_models.SearchAlias"]: def prepare_request(next_link=None): if not next_link: - _request = build_aliases_operations_list_request( + _request = build_aliases_list_request( api_version=self._config.api_version, headers=_headers, params=_params, @@ -5186,7 +5188,7 @@ def get_next(next_link=None): if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) return pipeline_response @@ -5285,9 +5287,9 @@ def create_or_update( @distributed_trace @api_version_validation( - method_added_on="2024-11-01-preview", + method_added_on="2025-03-01-preview", params_added_on={ - "2024-11-01-preview": [ + "2025-03-01-preview": [ "api_version", "prefer", "client_request_id", @@ -5352,7 +5354,7 @@ def create_or_update( else: _content = json.dumps(alias, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_aliases_operations_create_or_update_request( + _request = build_aliases_create_or_update_request( alias_name=alias_name, etag=etag, match_condition=match_condition, @@ -5382,7 +5384,7 @@ def create_or_update( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -5397,9 +5399,9 @@ def create_or_update( @distributed_trace @api_version_validation( - method_added_on="2024-11-01-preview", + method_added_on="2025-03-01-preview", params_added_on={ - "2024-11-01-preview": [ + "2025-03-01-preview": [ "api_version", "client_request_id", "alias_name", @@ -5451,7 +5453,7 @@ def delete( # pylint: disable=inconsistent-return-statements cls: ClsType[None] = kwargs.pop("cls", None) - _request = build_aliases_operations_delete_request( + _request = build_aliases_delete_request( alias_name=alias_name, etag=etag, match_condition=match_condition, @@ -5471,9 +5473,9 @@ def delete( # pylint: disable=inconsistent-return-statements response = pipeline_response.http_response - if response.status_code not in [204]: + if response.status_code not in [204, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if cls: @@ -5481,8 +5483,8 @@ def delete( # pylint: disable=inconsistent-return-statements @distributed_trace @api_version_validation( - method_added_on="2024-11-01-preview", - params_added_on={"2024-11-01-preview": ["api_version", "client_request_id", "alias_name", "accept"]}, + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "client_request_id", "alias_name", "accept"]}, ) def get(self, alias_name: str, **kwargs: Any) -> _models.SearchAlias: """Retrieves an alias definition. @@ -5506,7 +5508,7 @@ def get(self, alias_name: str, **kwargs: Any) -> _models.SearchAlias: cls: ClsType[_models.SearchAlias] = kwargs.pop("cls", None) - _request = build_aliases_operations_get_request( + _request = build_aliases_get_request( alias_name=alias_name, api_version=self._config.api_version, headers=_headers, @@ -5524,14 +5526,14 @@ def get(self, alias_name: str, **kwargs: Any) -> _models.SearchAlias: response = pipeline_response.http_response - if response.status_code not in [200, 201]: + if response.status_code not in [200]: if _stream: try: response.read() # Load the body in memory and close the socket except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -5545,22 +5547,22 @@ def get(self, alias_name: str, **kwargs: Any) -> _models.SearchAlias: return deserialized # type: ignore -class DocumentsOperationsOperations: +class DocumentsOperations: """ .. warning:: **DO NOT** instantiate this class directly. Instead, you should access the following operations through :class:`~azure.search.documents.SearchClient`'s - :attr:`documents_operations` attribute. + :attr:`documents` attribute. """ def __init__(self, *args, **kwargs): input_args = list(args) - self._client = input_args.pop(0) if input_args else kwargs.pop("client") - self._config = input_args.pop(0) if input_args else kwargs.pop("config") - self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer") - self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer") + self._client: PipelineClient = input_args.pop(0) if input_args else kwargs.pop("client") + self._config: SearchClientConfiguration = input_args.pop(0) if input_args else kwargs.pop("config") + self._serialize: Serializer = input_args.pop(0) if input_args else kwargs.pop("serializer") + self._deserialize: Deserializer = input_args.pop(0) if input_args else kwargs.pop("deserializer") @distributed_trace def count(self, index_name: str, **kwargs: Any) -> int: @@ -5585,7 +5587,7 @@ def count(self, index_name: str, **kwargs: Any) -> int: cls: ClsType[int] = kwargs.pop("cls", None) - _request = build_documents_operations_count_request( + _request = build_documents_count_request( index_name=index_name, api_version=self._config.api_version, headers=_headers, @@ -5610,7 +5612,7 @@ def count(self, index_name: str, **kwargs: Any) -> int: except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -5626,7 +5628,7 @@ def count(self, index_name: str, **kwargs: Any) -> int: @distributed_trace @api_version_validation( params_added_on={ - "2024-11-01-preview": ["query_rewrites", "debug", "query_language", "speller", "semantic_fields"] + "2025-03-01-preview": ["query_rewrites", "debug", "query_language", "speller", "semantic_fields"] }, ) def search_get( @@ -5636,7 +5638,7 @@ def search_get( search_text: Optional[str] = None, include_total_result_count: Optional[bool] = None, facets: Optional[List[str]] = None, - _filter: Optional[str] = None, + filter: Optional[str] = None, highlight_fields: Optional[List[str]] = None, highlight_post_tag: Optional[str] = None, highlight_pre_tag: Optional[str] = None, @@ -5649,9 +5651,9 @@ def search_get( search_mode: Optional[Union[str, _models.SearchMode]] = None, scoring_statistics: Optional[Union[str, _models.ScoringStatistics]] = None, session_id: Optional[str] = None, - _select: Optional[List[str]] = None, - _skip: Optional[int] = None, - _top: Optional[int] = None, + select: Optional[List[str]] = None, + skip: Optional[int] = None, + top: Optional[int] = None, semantic_configuration: Optional[str] = None, semantic_error_handling: Optional[Union[str, _models.SemanticErrorMode]] = None, semantic_max_wait_in_milliseconds: Optional[int] = None, @@ -5682,9 +5684,9 @@ def search_get( expression contains a field name, optionally followed by a comma-separated list of name:value pairs. Default value is None. :paramtype facets: list[str] - :keyword _filter: The OData $filter expression to apply to the search query. Default value is + :keyword filter: The OData $filter expression to apply to the search query. Default value is None. - :paramtype _filter: str + :paramtype filter: str :keyword highlight_fields: The list of field names to use for hit highlights. Only searchable fields can be used for hit highlighting. Default value is None. @@ -5748,20 +5750,20 @@ def search_get( requests across replicas and adversely affect the performance of the search service. The value used as sessionId cannot start with a '_' character. Default value is None. :paramtype session_id: str - :keyword _select: The list of fields to retrieve. If unspecified, all fields marked as + :keyword select: The list of fields to retrieve. If unspecified, all fields marked as retrievable in the schema are included. Default value is None. - :paramtype _select: list[str] - :keyword _skip: The number of search results to skip. This value cannot be greater than + :paramtype select: list[str] + :keyword skip: The number of search results to skip. This value cannot be greater than 100,000. If you need to scan documents in sequence, but cannot use $skip due to this limitation, consider using $orderby on a totally-ordered key and $filter with a range query instead. Default value is None. - :paramtype _skip: int - :keyword _top: The number of search results to retrieve. This can be used in conjunction with + :paramtype skip: int + :keyword top: The number of search results to retrieve. This can be used in conjunction with $skip to implement client-side paging of search results. If results are truncated due to server-side paging, the response will include a continuation token that can be used to issue another Search request for the next page of results. Default value is None. - :paramtype _top: int + :paramtype top: int :keyword semantic_configuration: The name of the semantic configuration that lists which fields should be used for semantic ranking, captions, highlights, and answers. Default value is None. @@ -5785,7 +5787,7 @@ def search_get( followed by the ``threshold-`` option after the answers parameter value, such as ``extractive|threshold-0.9``. Default threshold is 0.7. The maximum character length of answers can be configured by appending the pipe - character '|' followed by the 'count-:code:``', + character '|' followed by the 'count-\\ :code:``', such as 'extractive|maxcharlength-600'. Known values are: "none" and "extractive". Default value is None. :paramtype answers: str or ~azure.search.documents.models.QueryAnswerType @@ -5796,7 +5798,7 @@ def search_get( can be configured by appending the pipe character ``|`` followed by the ``highlight-`` option, such as ``extractive|highlight-true``. Defaults to ``None``. The maximum character length of captions can be configured by - appending the pipe character '|' followed by the 'count-:code:``', such as 'extractive|maxcharlength-600'. Known values are: "none" and "extractive". Default value is None. :paramtype captions: str or ~azure.search.documents.models.QueryCaptionType @@ -5851,12 +5853,12 @@ def search_get( cls: ClsType[_models.SearchDocumentsResult] = kwargs.pop("cls", None) - _request = build_documents_operations_search_get_request( + _request = build_documents_search_get_request( index_name=index_name, search_text=search_text, include_total_result_count=include_total_result_count, facets=facets, - _filter=_filter, + filter=filter, highlight_fields=highlight_fields, highlight_post_tag=highlight_post_tag, highlight_pre_tag=highlight_pre_tag, @@ -5869,9 +5871,9 @@ def search_get( search_mode=search_mode, scoring_statistics=scoring_statistics, session_id=session_id, - _select=_select, - _skip=_skip, - _top=_top, + select=select, + skip=skip, + top=top, semantic_configuration=semantic_configuration, semantic_error_handling=semantic_error_handling, semantic_max_wait_in_milliseconds=semantic_max_wait_in_milliseconds, @@ -5906,7 +5908,7 @@ def search_get( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -6014,7 +6016,7 @@ def search_post( else: _content = json.dumps(search_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_documents_operations_search_post_request( + _request = build_documents_search_post_request( index_name=index_name, content_type=content_type, api_version=self._config.api_version, @@ -6041,7 +6043,7 @@ def search_post( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -6057,7 +6059,7 @@ def search_post( @distributed_trace def get( self, key: str, index_name: str, *, selected_fields: Optional[List[str]] = None, **kwargs: Any - ) -> Dict[str, Any]: + ) -> _models.LookupDocument: """Retrieves a document from the index. :param key: The key of the document to retrieve. Required. @@ -6068,8 +6070,8 @@ def get( retrieved will be missing from the returned document. Default value is None. :paramtype selected_fields: list[str] - :return: dict mapping str to any - :rtype: dict[str, any] + :return: LookupDocument. The LookupDocument is compatible with MutableMapping + :rtype: ~azure.search.documents.models.LookupDocument :raises ~azure.core.exceptions.HttpResponseError: """ error_map: MutableMapping = { @@ -6083,9 +6085,9 @@ def get( _headers = kwargs.pop("headers", {}) or {} _params = kwargs.pop("params", {}) or {} - cls: ClsType[Dict[str, Any]] = kwargs.pop("cls", None) + cls: ClsType[_models.LookupDocument] = kwargs.pop("cls", None) - _request = build_documents_operations_get_request( + _request = build_documents_get_request( key=key, index_name=index_name, selected_fields=selected_fields, @@ -6112,13 +6114,13 @@ def get( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: deserialized = response.iter_bytes() else: - deserialized = _deserialize(Dict[str, Any], response.json()) + deserialized = _deserialize(_models.LookupDocument, response.json()) if cls: return cls(pipeline_response, deserialized, {}) # type: ignore @@ -6132,15 +6134,15 @@ def suggest_get( *, search_text: str, suggester_name: str, - _filter: Optional[str] = None, + filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, highlight_pre_tag: Optional[str] = None, minimum_coverage: Optional[float] = None, order_by: Optional[List[str]] = None, search_fields: Optional[List[str]] = None, - _select: Optional[List[str]] = None, - _top: Optional[int] = None, + select: Optional[List[str]] = None, + top: Optional[int] = None, **kwargs: Any ) -> _models.SuggestDocumentsResult: """Suggests documents in the index that match the given partial query text. @@ -6155,9 +6157,9 @@ def suggest_get( that's part of the index definition. Required. :paramtype suggester_name: str - :keyword _filter: An OData expression that filters the documents considered for suggestions. + :keyword filter: An OData expression that filters the documents considered for suggestions. Default value is None. - :paramtype _filter: str + :paramtype filter: str :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the suggestions query. Default is false. When set to true, the query will find terms even if there's a @@ -6191,14 +6193,14 @@ def suggest_get( fields must be included in the specified suggester. Default value is None. :paramtype search_fields: list[str] - :keyword _select: The list of fields to retrieve. If unspecified, only the key field will be + :keyword select: The list of fields to retrieve. If unspecified, only the key field will be included in the results. Default value is None. - :paramtype _select: list[str] - :keyword _top: The number of suggestions to retrieve. The value must be a number between 1 and + :paramtype select: list[str] + :keyword top: The number of suggestions to retrieve. The value must be a number between 1 and #. The default is 5. Default value is None. - :paramtype _top: int + :paramtype top: int :return: SuggestDocumentsResult. The SuggestDocumentsResult is compatible with MutableMapping :rtype: ~azure.search.documents.models.SuggestDocumentsResult :raises ~azure.core.exceptions.HttpResponseError: @@ -6216,19 +6218,19 @@ def suggest_get( cls: ClsType[_models.SuggestDocumentsResult] = kwargs.pop("cls", None) - _request = build_documents_operations_suggest_get_request( + _request = build_documents_suggest_get_request( index_name=index_name, search_text=search_text, suggester_name=suggester_name, - _filter=_filter, + filter=filter, use_fuzzy_matching=use_fuzzy_matching, highlight_post_tag=highlight_post_tag, highlight_pre_tag=highlight_pre_tag, minimum_coverage=minimum_coverage, order_by=order_by, search_fields=search_fields, - _select=_select, - _top=_top, + select=select, + top=top, api_version=self._config.api_version, headers=_headers, params=_params, @@ -6252,7 +6254,7 @@ def suggest_get( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -6360,7 +6362,7 @@ def suggest_post( else: _content = json.dumps(suggest_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_documents_operations_suggest_post_request( + _request = build_documents_suggest_post_request( index_name=index_name, content_type=content_type, api_version=self._config.api_version, @@ -6387,7 +6389,7 @@ def suggest_post( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -6490,7 +6492,7 @@ def index( else: _content = json.dumps(batch, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_documents_operations_index_request( + _request = build_documents_index_request( index_name=index_name, content_type=content_type, api_version=self._config.api_version, @@ -6517,7 +6519,7 @@ def index( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -6538,13 +6540,13 @@ def autocomplete_get( search_text: str, suggester_name: str, autocomplete_mode: Optional[Union[str, _models.AutocompleteMode]] = None, - _filter: Optional[str] = None, + filter: Optional[str] = None, use_fuzzy_matching: Optional[bool] = None, highlight_post_tag: Optional[str] = None, highlight_pre_tag: Optional[str] = None, minimum_coverage: Optional[float] = None, search_fields: Optional[List[str]] = None, - _top: Optional[int] = None, + top: Optional[int] = None, **kwargs: Any ) -> _models.AutocompleteResult: """Autocompletes incomplete query terms based on input text and matching terms in @@ -6564,10 +6566,9 @@ def autocomplete_get( producing auto-completed terms. Known values are: "oneTerm", "twoTerms", and "oneTermWithContext". Default value is None. :paramtype autocomplete_mode: str or ~azure.search.documents.models.AutocompleteMode - :keyword _filter: An OData expression that filters the documents used to produce completed - terms + :keyword filter: An OData expression that filters the documents used to produce completed terms for the Autocomplete result. Default value is None. - :paramtype _filter: str + :paramtype filter: str :keyword use_fuzzy_matching: A value indicating whether to use fuzzy matching for the autocomplete query. Default is false. When set to true, the query will find terms even if there's a @@ -6591,9 +6592,9 @@ def autocomplete_get( terms. Target fields must be included in the specified suggester. Default value is None. :paramtype search_fields: list[str] - :keyword _top: The number of auto-completed terms to retrieve. This must be a value between 1 + :keyword top: The number of auto-completed terms to retrieve. This must be a value between 1 and 100. The default is 5. Default value is None. - :paramtype _top: int + :paramtype top: int :return: AutocompleteResult. The AutocompleteResult is compatible with MutableMapping :rtype: ~azure.search.documents.models.AutocompleteResult :raises ~azure.core.exceptions.HttpResponseError: @@ -6611,18 +6612,18 @@ def autocomplete_get( cls: ClsType[_models.AutocompleteResult] = kwargs.pop("cls", None) - _request = build_documents_operations_autocomplete_get_request( + _request = build_documents_autocomplete_get_request( index_name=index_name, search_text=search_text, suggester_name=suggester_name, autocomplete_mode=autocomplete_mode, - _filter=_filter, + filter=filter, use_fuzzy_matching=use_fuzzy_matching, highlight_post_tag=highlight_post_tag, highlight_pre_tag=highlight_pre_tag, minimum_coverage=minimum_coverage, search_fields=search_fields, - _top=_top, + top=top, api_version=self._config.api_version, headers=_headers, params=_params, @@ -6646,7 +6647,7 @@ def autocomplete_get( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -6759,7 +6760,7 @@ def autocomplete_post( else: _content = json.dumps(autocomplete_request, cls=SdkJSONEncoder, exclude_readonly=True) # type: ignore - _request = build_documents_operations_autocomplete_post_request( + _request = build_documents_autocomplete_post_request( index_name=index_name, content_type=content_type, api_version=self._config.api_version, @@ -6786,7 +6787,7 @@ def autocomplete_post( except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -6847,7 +6848,7 @@ def get_service_statistics(self, **kwargs: Any) -> _models.SearchServiceStatisti except (StreamConsumedError, StreamClosedError): pass map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _deserialize(_models.ErrorResponse, response.json()) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) raise HttpResponseError(response=response, model=error) if _stream: @@ -6859,3 +6860,65 @@ def get_service_statistics(self, **kwargs: Any) -> _models.SearchServiceStatisti return cls(pipeline_response, deserialized, {}) # type: ignore return deserialized # type: ignore + + @distributed_trace + @api_version_validation( + method_added_on="2025-03-01-preview", + params_added_on={"2025-03-01-preview": ["api_version", "client_request_id", "accept"]}, + ) + def get_index_stats_summary(self, **kwargs: Any) -> _models.ListIndexStatsSummary: + """Gets service level statistics for a search service. + + :return: ListIndexStatsSummary. The ListIndexStatsSummary is compatible with MutableMapping + :rtype: ~azure.search.documents.models.ListIndexStatsSummary + :raises ~azure.core.exceptions.HttpResponseError: + """ + error_map: MutableMapping = { + 401: ClientAuthenticationError, + 404: ResourceNotFoundError, + 409: ResourceExistsError, + 304: ResourceNotModifiedError, + } + error_map.update(kwargs.pop("error_map", {}) or {}) + + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[_models.ListIndexStatsSummary] = kwargs.pop("cls", None) + + _request = build_search_get_index_stats_summary_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) + + _stream = kwargs.pop("stream", False) + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + + response = pipeline_response.http_response + + if response.status_code not in [200]: + if _stream: + try: + response.read() # Load the body in memory and close the socket + except (StreamConsumedError, StreamClosedError): + pass + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) + + if _stream: + deserialized = response.iter_bytes() + else: + deserialized = _deserialize(_models.ListIndexStatsSummary, response.json()) + + if cls: + return cls(pipeline_response, deserialized, {}) # type: ignore + + return deserialized # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_paging.py b/sdk/search/azure-search-documents/azure/search/documents/_paging.py index 2039cccfa341..e60e3709602e 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_paging.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_paging.py @@ -135,13 +135,13 @@ def __init__(self, client, index_name, initial_query, kwargs, continuation_token def _get_next_cb(self, continuation_token): if continuation_token is None: - return self._client.documents_operations.search_post( + return self._client.documents.search_post( index_name=self._index_name, search_request=self._initial_query.request, **self._kwargs ) _next_link, next_page_request = unpack_continuation_token(continuation_token) - return self._client.documents_operations.search_post( + return self._client.documents.search_post( index_name=self._index_name, search_request=next_page_request, **self._kwargs ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_search_client.py b/sdk/search/azure-search-documents/azure/search/documents/_search_client.py index 43ae6d8aa76c..d88f5fc0cc8a 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_search_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_search_client.py @@ -111,7 +111,7 @@ def get_document_count(self, **kwargs: Any) -> int: :rtype: int """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - return int(self._client.documents_operations.count(index_name=self._index_name, **kwargs)) + return int(self._client.documents.count(index_name=self._index_name, **kwargs)) @distributed_trace def get_document(self, key: str, selected_fields: Optional[List[str]] = None, **kwargs: Any) -> Dict: @@ -134,7 +134,7 @@ def get_document(self, key: str, selected_fields: Optional[List[str]] = None, ** :caption: Get a specific document from the search index. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.documents_operations.get( + result = self._client.documents.get( index_name=self._index_name, key=key, selected_fields=selected_fields, **kwargs ) return cast(dict, result) @@ -481,7 +481,7 @@ def suggest( query.order_by(order_by) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) request = cast(SuggestRequest, query.request) - response = self._client.documents_operations.suggest_post( + response = self._client.documents.suggest_post( index_name=self._index_name, suggest_request=request, **kwargs ) assert response.results is not None # Hint for mypy @@ -562,7 +562,7 @@ def autocomplete( kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) request = cast(AutocompleteRequest, query.request) - response = self._client.documents_operations.autocomplete_post( + response = self._client.documents.autocomplete_post( index_name=self._index_name, autocomplete_request=request, **kwargs ) assert response.results is not None # Hint for mypy @@ -702,7 +702,7 @@ def _index_documents_actions(self, actions: List[IndexAction], **kwargs: Any) -> kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) batch = IndexBatch(actions=actions) try: - batch_response = self._client.documents_operations.index( + batch_response = self._client.documents.index( index_name=self._index_name, batch=batch, error_map=error_map, **kwargs ) return cast(List[IndexingResult], batch_response.results) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_search_indexing_buffered_sender.py b/sdk/search/azure-search-documents/azure/search/documents/_search_indexing_buffered_sender.py index 6b7c73fe3bf9..4b3fe096db9c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_search_indexing_buffered_sender.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_search_indexing_buffered_sender.py @@ -273,7 +273,7 @@ def _index_documents_actions(self, actions: List[IndexAction], **kwargs) -> List kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) batch = IndexBatch(actions=actions) try: - batch_response = self._client.documents_operations.index( + batch_response = self._client.documents.index( index_name=self._index_name, batch=batch, error_map=error_map, **kwargs ) return cast(List[IndexingResult], batch_response.results) diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py index a5294602684e..eab8dd64f0fd 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py @@ -118,13 +118,13 @@ def __init__(self, client, index_name, initial_query, kwargs, continuation_token async def _get_next_cb(self, continuation_token): if continuation_token is None: - return await self._client.documents_operations.search_post( + return await self._client.documents.search_post( index_name=self._index_name, search_request=self._initial_query.request, **self._kwargs ) _next_link, next_page_request = unpack_continuation_token(continuation_token) - return await self._client.documents_operations.search_post( + return await self._client.documents.search_post( index_name=self._index_name, search_request=next_page_request, **self._kwargs ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py index b3521a2b5d37..61f4c42ef19b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py @@ -113,7 +113,7 @@ async def get_document_count(self, **kwargs: Any) -> int: :rtype: int """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - return int(await self._client.documents_operations.count(index_name=self._index_name, **kwargs)) + return int(await self._client.documents.count(index_name=self._index_name, **kwargs)) @distributed_trace_async async def get_document(self, key: str, selected_fields: Optional[List[str]] = None, **kwargs: Any) -> Dict: @@ -136,7 +136,7 @@ async def get_document(self, key: str, selected_fields: Optional[List[str]] = No :caption: Get a specific document from the search index. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.documents_operations.get( + result = await self._client.documents.get( index_name=self._index_name, key=key, selected_fields=selected_fields, **kwargs ) return cast(dict, result) @@ -480,7 +480,7 @@ async def suggest( query.order_by(order_by) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) request = cast(SuggestRequest, query.request) - response = await self._client.documents_operations.suggest_post( + response = await self._client.documents.suggest_post( index_name=self._index_name, suggest_request=request, **kwargs ) assert response.results is not None # Hint for mypy @@ -561,7 +561,7 @@ async def autocomplete( kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) request = cast(AutocompleteRequest, query.request) - response = await self._client.documents_operations.autocomplete_post( + response = await self._client.documents.autocomplete_post( index_name=self._index_name, autocomplete_request=request, **kwargs ) assert response.results is not None # Hint for mypy @@ -701,7 +701,7 @@ async def _index_documents_actions(self, actions: List[IndexAction], **kwargs: A kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) batch = IndexBatch(actions=actions) try: - batch_response = await self._client.documents_operations.index( + batch_response = await self._client.documents.index( index_name=self._index_name, batch=batch, error_map=error_map, **kwargs ) return cast(List[IndexingResult], batch_response.results) diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_indexing_buffered_sender_async.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_indexing_buffered_sender_async.py index 0231539db592..276b72b51f9c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_indexing_buffered_sender_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_indexing_buffered_sender_async.py @@ -276,7 +276,7 @@ async def _index_documents_actions(self, actions: List[IndexAction], **kwargs: A kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) batch = IndexBatch(actions=actions) try: - batch_response = await self._client.documents_operations.index( + batch_response = await self._client.documents.index( index_name=self._index_name, batch=batch, error_map=error_map, **kwargs ) return cast(List[IndexingResult], batch_response.results) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py index 2fec4f0cd2f6..75e4749d175b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py @@ -113,7 +113,7 @@ def list_indexes(self, *, select: Optional[List[str]] = None, **kwargs: Any) -> if select: kwargs["select"] = ",".join(select) # pylint:disable=protected-access - indexes = self._client.indexes_operations.list( + indexes = self._client.indexes.list( cls=lambda objs: [SearchIndex._from_generated(x) for x in objs], **kwargs ) return cast(ItemPaged[SearchIndex], indexes) @@ -129,7 +129,7 @@ def list_index_names(self, **kwargs: Any) -> ItemPaged[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - names = self._client.indexes_operations.list(cls=lambda objs: [x.name for x in objs], **kwargs) + names = self._client.indexes.list(cls=lambda objs: [x.name for x in objs], **kwargs) return cast(ItemPaged[str], names) @distributed_trace @@ -152,7 +152,7 @@ def get_index(self, name: str, **kwargs: Any) -> SearchIndex: :caption: Get an index. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.indexes_operations.get(name, **kwargs) + result = self._client.indexes.get(name, **kwargs) return cast(SearchIndex, SearchIndex._from_generated(result)) # pylint:disable=protected-access @distributed_trace @@ -168,7 +168,7 @@ def get_index_statistics(self, index_name: str, **kwargs: Any) -> MutableMapping """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.indexes_operations.get_statistics(index_name, **kwargs) + result = self._client.indexes.get_statistics(index_name, **kwargs) return result @distributed_trace @@ -206,7 +206,7 @@ def delete_index( etag = index.e_tag # type: ignore except AttributeError: index_name = index - self._client.indexes_operations.delete( + self._client.indexes.delete( index_name=index_name, etag=etag, match_condition=match_condition, **kwargs ) @@ -231,7 +231,7 @@ def create_index(self, index: SearchIndex, **kwargs: Any) -> SearchIndex: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) patched_index = index._to_generated() # pylint:disable=protected-access - result = self._client.indexes_operations.create(patched_index, **kwargs) + result = self._client.indexes.create(patched_index, **kwargs) return cast(SearchIndex, SearchIndex._from_generated(result)) # pylint:disable=protected-access @distributed_trace @@ -274,7 +274,7 @@ def create_or_update_index( """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) patched_index = index._to_generated() # pylint:disable=protected-access - result = self._client.indexes_operations.create_or_update( + result = self._client.indexes.create_or_update( index_name=index.name, index=patched_index, allow_index_downtime=allow_index_downtime, @@ -307,7 +307,7 @@ def analyze_text(self, index_name: str, analyze_request: AnalyzeTextOptions, **k :caption: Analyze text """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.indexes_operations.analyze( + result = self._client.indexes.analyze( index_name=index_name, request=analyze_request._to_generated(), **kwargs # pylint:disable=protected-access ) return result @@ -337,7 +337,7 @@ def get_synonym_maps(self, *, select: Optional[List[str]] = None, **kwargs) -> L kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: kwargs["select"] = ",".join(select) - result = self._client.synonym_maps_operations.list(**kwargs) + result = self._client.synonym_maps.list(**kwargs) assert result.synonym_maps is not None # Hint for mypy # pylint:disable=protected-access return [cast(SynonymMap, SynonymMap._from_generated(x)) for x in result.synonym_maps] @@ -352,7 +352,7 @@ def get_synonym_map_names(self, **kwargs: Any) -> List[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.synonym_maps_operations.list(**kwargs) + result = self._client.synonym_maps.list(**kwargs) assert result.synonym_maps is not None # Hint for mypy return [x.name for x in result.synonym_maps] @@ -377,7 +377,7 @@ def get_synonym_map(self, name: str, **kwargs: Any) -> SynonymMap: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.synonym_maps_operations.get(name, **kwargs) + result = self._client.synonym_maps.get(name, **kwargs) return cast(SynonymMap, SynonymMap._from_generated(result)) # pylint:disable=protected-access @distributed_trace @@ -416,7 +416,7 @@ def delete_synonym_map( etag = synonym_map.e_tag # type: ignore except AttributeError: name = synonym_map - self._client.synonym_maps_operations.delete( + self._client.synonym_maps.delete( synonym_map_name=name, etag=etag, match_condition=match_condition, **kwargs ) @@ -441,7 +441,7 @@ def create_synonym_map(self, synonym_map: SynonymMap, **kwargs: Any) -> SynonymM """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) patched_synonym_map = synonym_map._to_generated() # pylint:disable=protected-access - result = self._client.synonym_maps_operations.create(patched_synonym_map, **kwargs) + result = self._client.synonym_maps.create(patched_synonym_map, **kwargs) return cast(SynonymMap, SynonymMap._from_generated(result)) # pylint:disable=protected-access @distributed_trace @@ -465,7 +465,7 @@ def create_or_update_synonym_map( """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) patched_synonym_map = synonym_map._to_generated() # pylint:disable=protected-access - result = self._client.synonym_maps_operations.create_or_update( + result = self._client.synonym_maps.create_or_update( synonym_map_name=synonym_map.name, synonym_map=patched_synonym_map, prefer="return=representation", @@ -514,7 +514,7 @@ def list_aliases(self, *, select: Optional[List[str]] = None, **kwargs: Any) -> if select: kwargs["select"] = ",".join(select) # pylint:disable=protected-access - return cast(ItemPaged[SearchAlias], self._client.aliases_operations.list(**kwargs)) + return cast(ItemPaged[SearchAlias], self._client.aliases.list(**kwargs)) @distributed_trace def list_alias_names(self, **kwargs: Any) -> ItemPaged[str]: @@ -527,7 +527,7 @@ def list_alias_names(self, **kwargs: Any) -> ItemPaged[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - names = self._client.aliases_operations.list(cls=lambda objs: [x.name for x in objs], **kwargs) + names = self._client.aliases.list(cls=lambda objs: [x.name for x in objs], **kwargs) return cast(ItemPaged[str], names) @distributed_trace @@ -541,7 +541,7 @@ def get_alias(self, name: str, **kwargs: Any) -> SearchAlias: :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.aliases_operations.get(name, **kwargs) + result = self._client.aliases.get(name, **kwargs) return cast(SearchAlias, result) @distributed_trace @@ -579,7 +579,7 @@ def delete_alias( etag = alias.e_tag # type: ignore except AttributeError: alias_name = alias - self._client.aliases_operations.delete( + self._client.aliases.delete( alias_name=alias_name, etag=etag, match_condition=match_condition, **kwargs ) @@ -603,7 +603,7 @@ def create_alias(self, alias: SearchAlias, **kwargs: Any) -> SearchAlias: :caption: Creating a new alias. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.aliases_operations.create(alias, **kwargs) + result = self._client.aliases.create(alias, **kwargs) return cast(SearchAlias, result) @distributed_trace @@ -635,7 +635,7 @@ def create_or_update_alias( :caption: Updating an alias. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.aliases_operations.create_or_update( + result = self._client.aliases.create_or_update( alias_name=alias.name, alias=alias, prefer="return=representation", diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py index 7e3e0325c421..9b04618a691b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py @@ -362,7 +362,7 @@ def create_data_source_connection( kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) # pylint:disable=protected-access packed_data_source = data_source_connection._to_generated() - result = self._client.data_sources_operations.create(packed_data_source, **kwargs) + result = self._client.data_sources.create(packed_data_source, **kwargs) return cast(SearchIndexerDataSourceConnection, SearchIndexerDataSourceConnection._from_generated(result)) @distributed_trace @@ -389,7 +389,7 @@ def create_or_update_data_source_connection( kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) name = data_source_connection.name packed_data_source = data_source_connection._to_generated() # pylint:disable=protected-access - result = self._client.data_sources_operations.create_or_update( + result = self._client.data_sources.create_or_update( data_source_name=name, data_source=packed_data_source, prefer="return=representation", @@ -420,7 +420,7 @@ def get_data_source_connection(self, name: str, **kwargs: Any) -> SearchIndexerD :caption: Retrieve a SearchIndexerDataSourceConnection """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.data_sources_operations.get(name, **kwargs) + result = self._client.data_sources.get(name, **kwargs) # pylint:disable=protected-access return cast(SearchIndexerDataSourceConnection, SearchIndexerDataSourceConnection._from_generated(result)) @@ -449,7 +449,7 @@ def get_data_source_connections( kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: kwargs["select"] = ",".join(select) - result = self._client.data_sources_operations.list(**kwargs) + result = self._client.data_sources.list(**kwargs) assert result.data_sources is not None # Hint for mypy # pylint:disable=protected-access return [ @@ -466,7 +466,7 @@ def get_data_source_connection_names(self, **kwargs: Any) -> Sequence[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.data_sources_operations.list(**kwargs) + result = self._client.data_sources.list(**kwargs) assert result.data_sources is not None # Hint for mypy return [x.name for x in result.data_sources] @@ -505,7 +505,7 @@ def delete_data_source_connection( etag = data_source_connection.e_tag # type: ignore except AttributeError: name = data_source_connection - self._client.data_sources_operations.delete( + self._client.data_sources.delete( data_source_name=name, etag=etag, match_condition=match_condition, **kwargs ) @@ -526,7 +526,7 @@ def get_skillsets(self, *, select: Optional[List[str]] = None, **kwargs: Any) -> kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: kwargs["select"] = ",".join(select) - result = self._client.skillsets_operations.list(**kwargs) + result = self._client.skillsets.list(**kwargs) assert result.skillsets is not None # Hint for mypy return [ cast(SearchIndexerSkillset, SearchIndexerSkillset._from_generated(skillset)) @@ -543,7 +543,7 @@ def get_skillset_names(self, **kwargs: Any) -> List[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.skillsets_operations.list(**kwargs) + result = self._client.skillsets.list(**kwargs) assert result.skillsets is not None # Hint for mypy return [x.name for x in result.skillsets] @@ -558,7 +558,7 @@ def get_skillset(self, name: str, **kwargs: Any) -> SearchIndexerSkillset: :raises ~azure.core.exceptions.ResourceNotFoundError: If the skillset cannot be found. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.skillsets_operations.get(name, **kwargs) + result = self._client.skillsets.get(name, **kwargs) # pylint:disable=protected-access return cast(SearchIndexerSkillset, SearchIndexerSkillset._from_generated(result)) @@ -588,7 +588,7 @@ def delete_skillset( etag = skillset.e_tag # type: ignore except AttributeError: name = skillset - self._client.skillsets_operations.delete(name, etag=etag, match_condition=match_condition, **kwargs) + self._client.skillsets.delete(name, etag=etag, match_condition=match_condition, **kwargs) @distributed_trace def create_skillset(self, skillset: SearchIndexerSkillset, **kwargs: Any) -> SearchIndexerSkillset: @@ -638,7 +638,7 @@ def create_or_update_skillset( _validate_skillset(skillset) skillset_gen = skillset._to_generated() if hasattr(skillset, "_to_generated") else skillset - result = self._client.skillsets_operations.create_or_update( + result = self._client.skillsets.create_or_update( skillset_name=skillset.name, skillset=skillset_gen, # type: ignore prefer="return=representation", @@ -668,7 +668,7 @@ def reset_skills(self, skillset: Union[str, SearchIndexerSkillset], skill_names: except AttributeError: name = skillset names = SkillNames(skill_names=skill_names) - return self._client.skillsets_operations.reset_skills(skillset_name=name, skill_names=names, **kwargs) + return self._client.skillsets.reset_skills(skillset_name=name, skill_names=names, **kwargs) def _validate_skillset(skillset: SearchIndexerSkillset): diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py index 4d6cea1a079d..edb12b9b222d 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py @@ -112,7 +112,7 @@ def list_indexes(self, *, select: Optional[List[str]] = None, **kwargs) -> Async if select: kwargs["select"] = ",".join(select) # pylint:disable=protected-access - indexes = self._client.indexes_operations.list( + indexes = self._client.indexes.list( cls=lambda objs: [SearchIndex._from_generated(x) for x in objs], **kwargs ) return cast(AsyncItemPaged[SearchIndex], indexes) @@ -127,7 +127,7 @@ def list_index_names(self, **kwargs: Any) -> AsyncItemPaged[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - names = self._client.indexes_operations.list(cls=lambda objs: [x.name for x in objs], **kwargs) + names = self._client.indexes.list(cls=lambda objs: [x.name for x in objs], **kwargs) return cast(AsyncItemPaged[str], names) @distributed_trace_async @@ -150,7 +150,7 @@ async def get_index(self, name: str, **kwargs: Any) -> SearchIndex: :caption: Get an index. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.indexes_operations.get(name, **kwargs) + result = await self._client.indexes.get(name, **kwargs) return cast(SearchIndex, SearchIndex._from_generated(result)) # pylint:disable=protected-access @distributed_trace_async @@ -165,7 +165,7 @@ async def get_index_statistics(self, index_name: str, **kwargs: Any) -> MutableM :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.indexes_operations.get_statistics(index_name, **kwargs) + result = await self._client.indexes.get_statistics(index_name, **kwargs) return result @distributed_trace_async @@ -203,7 +203,7 @@ async def delete_index( etag = index.e_tag # type: ignore except AttributeError: index_name = index - await self._client.indexes_operations.delete( + await self._client.indexes.delete( index_name=index_name, etag=etag, match_condition=match_condition, **kwargs ) @@ -228,7 +228,7 @@ async def create_index(self, index: SearchIndex, **kwargs: Any) -> SearchIndex: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) patched_index = index._to_generated() # pylint:disable=protected-access - result = await self._client.indexes_operations.create(patched_index, **kwargs) + result = await self._client.indexes.create(patched_index, **kwargs) return cast(SearchIndex, SearchIndex._from_generated(result)) # pylint:disable=protected-access @distributed_trace_async @@ -271,7 +271,7 @@ async def create_or_update_index( """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) patched_index = index._to_generated() # pylint:disable=protected-access - result = await self._client.indexes_operations.create_or_update( + result = await self._client.indexes.create_or_update( index_name=index.name, index=patched_index, allow_index_downtime=allow_index_downtime, @@ -304,7 +304,7 @@ async def analyze_text(self, index_name: str, analyze_request: AnalyzeTextOption :caption: Analyze text """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.indexes_operations.analyze( + result = await self._client.indexes.analyze( index_name=index_name, request=analyze_request._to_generated(), **kwargs # pylint:disable=protected-access ) return result @@ -334,7 +334,7 @@ async def get_synonym_maps(self, *, select: Optional[List[str]] = None, **kwargs kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: kwargs["select"] = ",".join(select) - result = await self._client.synonym_maps_operations.list(**kwargs) + result = await self._client.synonym_maps.list(**kwargs) assert result.synonym_maps is not None # Hint for mypy # pylint:disable=protected-access return [cast(SynonymMap, SynonymMap._from_generated(x)) for x in result.synonym_maps] @@ -349,7 +349,7 @@ async def get_synonym_map_names(self, **kwargs: Any) -> List[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.synonym_maps_operations.list(**kwargs) + result = await self._client.synonym_maps.list(**kwargs) assert result.synonym_maps is not None # Hint for mypy return [x.name for x in result.synonym_maps] @@ -374,7 +374,7 @@ async def get_synonym_map(self, name: str, **kwargs: Any) -> SynonymMap: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.synonym_maps_operations.get(name, **kwargs) + result = await self._client.synonym_maps.get(name, **kwargs) return cast(SynonymMap, SynonymMap._from_generated(result)) # pylint:disable=protected-access @distributed_trace_async @@ -413,7 +413,7 @@ async def delete_synonym_map( etag = synonym_map.e_tag # type: ignore except AttributeError: name = synonym_map - await self._client.synonym_maps_operations.delete( + await self._client.synonym_maps.delete( synonym_map_name=name, etag=etag, match_condition=match_condition, **kwargs ) @@ -438,7 +438,7 @@ async def create_synonym_map(self, synonym_map: SynonymMap, **kwargs: Any) -> Sy """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) patched_synonym_map = synonym_map._to_generated() # pylint:disable=protected-access - result = await self._client.synonym_maps_operations.create(patched_synonym_map, **kwargs) + result = await self._client.synonym_maps.create(patched_synonym_map, **kwargs) return cast(SynonymMap, SynonymMap._from_generated(result)) # pylint:disable=protected-access @distributed_trace_async @@ -461,7 +461,7 @@ async def create_or_update_synonym_map( """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) patched_synonym_map = synonym_map._to_generated() # pylint:disable=protected-access - result = await self._client.synonym_maps_operations.create_or_update( + result = await self._client.synonym_maps.create_or_update( synonym_map_name=synonym_map.name, synonym_map=patched_synonym_map, prefer="return=representation", @@ -510,7 +510,7 @@ def list_aliases(self, *, select: Optional[List[str]] = None, **kwargs) -> Async if select: kwargs["select"] = ",".join(select) # pylint:disable=protected-access - return cast(AsyncItemPaged[SearchAlias], self._client.aliases_operations.list(**kwargs)) + return cast(AsyncItemPaged[SearchAlias], self._client.aliases.list(**kwargs)) @distributed_trace def list_alias_names(self, **kwargs) -> AsyncItemPaged[str]: @@ -522,7 +522,7 @@ def list_alias_names(self, **kwargs) -> AsyncItemPaged[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - names = self._client.aliases_operations.list(cls=lambda objs: [x.name for x in objs], **kwargs) + names = self._client.aliases.list(cls=lambda objs: [x.name for x in objs], **kwargs) return cast(AsyncItemPaged[str], names) @distributed_trace_async @@ -536,7 +536,7 @@ async def get_alias(self, name: str, **kwargs) -> SearchAlias: :raises ~azure.core.exceptions.HttpResponseError: If the operation fails. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.aliases_operations.get(name, **kwargs) + result = await self._client.aliases.get(name, **kwargs) return cast(SearchAlias, result) @distributed_trace_async @@ -574,7 +574,7 @@ async def delete_alias( etag = alias.e_tag # type: ignore except AttributeError: alias_name = alias - await self._client.aliases_operations.delete( + await self._client.aliases.delete( alias_name=alias_name, etag=etag, match_condition=match_condition, **kwargs ) @@ -598,7 +598,7 @@ async def create_alias(self, alias: SearchAlias, **kwargs: Any) -> SearchAlias: :caption: Create an alias. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.aliases_operations.create(alias, **kwargs) + result = await self._client.aliases.create(alias, **kwargs) return cast(SearchAlias, result) @distributed_trace_async @@ -629,7 +629,7 @@ async def create_or_update_alias( :caption: Update an alias. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.aliases_operations.create_or_update( + result = await self._client.aliases.create_or_update( alias_name=alias.name, alias=alias, prefer="return=representation", diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py index 6d41372bcdb1..26aa464a0336 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py @@ -349,7 +349,7 @@ async def create_data_source_connection( kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) # pylint:disable=protected-access packed_data_source = data_source_connection._to_generated() - result = await self._client.data_sources_operations.create(packed_data_source, **kwargs) + result = await self._client.data_sources.create(packed_data_source, **kwargs) return cast(SearchIndexerDataSourceConnection, SearchIndexerDataSourceConnection._from_generated(result)) @distributed_trace_async @@ -376,7 +376,7 @@ async def create_or_update_data_source_connection( name = data_source_connection.name # pylint:disable=protected-access packed_data_source = data_source_connection._to_generated() - result = await self._client.data_sources_operations.create_or_update( + result = await self._client.data_sources.create_or_update( data_source_name=name, data_source=packed_data_source, prefer="return=representation", @@ -422,7 +422,7 @@ async def delete_data_source_connection( etag = data_source_connection.e_tag # type: ignore except AttributeError: name = data_source_connection - await self._client.data_sources_operations.delete( + await self._client.data_sources.delete( data_source_name=name, etag=etag, match_condition=match_condition, **kwargs ) @@ -451,7 +451,7 @@ async def get_data_source_connection( kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: kwargs["select"] = ",".join(select) - result = await self._client.data_sources_operations.get(name, **kwargs) + result = await self._client.data_sources.get(name, **kwargs) # pylint:disable=protected-access return cast(SearchIndexerDataSourceConnection, SearchIndexerDataSourceConnection._from_generated(result)) @@ -472,7 +472,7 @@ async def get_data_source_connections(self, **kwargs: Any) -> Sequence[SearchInd :caption: List all SearchIndexerDataSourceConnections """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.data_sources_operations.list(**kwargs) + result = await self._client.data_sources.list(**kwargs) assert result.data_sources is not None # Hint for mypy # pylint:disable=protected-access return [ @@ -489,7 +489,7 @@ async def get_data_source_connection_names(self, **kwargs) -> Sequence[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.data_sources_operations.list(**kwargs) + result = await self._client.data_sources.list(**kwargs) assert result.data_sources is not None # Hint for mypy return [x.name for x in result.data_sources] @@ -509,7 +509,7 @@ async def get_skillsets(self, *, select: Optional[List[str]] = None, **kwargs) - kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: kwargs["select"] = ",".join(select) - result = await self._client.skillsets_operations.list(**kwargs) + result = await self._client.skillsets.list(**kwargs) assert result.skillsets is not None # Hint for mypy return [ cast(SearchIndexerSkillset, SearchIndexerSkillset._from_generated(skillset)) @@ -526,7 +526,7 @@ async def get_skillset_names(self, **kwargs) -> List[str]: """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.skillsets_operations.list(**kwargs) + result = await self._client.skillsets.list(**kwargs) assert result.skillsets is not None # Hint for mypy return [x.name for x in result.skillsets] @@ -541,7 +541,7 @@ async def get_skillset(self, name: str, **kwargs) -> SearchIndexerSkillset: :raises ~azure.core.exceptions.ResourceNotFoundError: If the skillset doesn't exist. """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.skillsets_operations.get(name, **kwargs) + result = await self._client.skillsets.get(name, **kwargs) # pylint:disable=protected-access return cast(SearchIndexerSkillset, SearchIndexerSkillset._from_generated(result)) @@ -571,7 +571,7 @@ async def delete_skillset( etag = skillset.e_tag # type: ignore except AttributeError: name = skillset - await self._client.skillsets_operations.delete(name, etag=etag, match_condition=match_condition, **kwargs) + await self._client.skillsets.delete(name, etag=etag, match_condition=match_condition, **kwargs) @distributed_trace_async async def create_skillset(self, skillset: SearchIndexerSkillset, **kwargs: Any) -> SearchIndexerSkillset: @@ -618,7 +618,7 @@ async def create_or_update_skillset( kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) skillset_gen = skillset._to_generated() if hasattr(skillset, "_to_generated") else skillset - result = await self._client.skillsets_operations.create_or_update( + result = await self._client.skillsets.create_or_update( skillset_name=skillset.name, skillset=skillset_gen, # type: ignore prefer="return=representation", @@ -648,5 +648,5 @@ async def reset_skills(self, skillset: Union[str, SearchIndexerSkillset], skill_ except AttributeError: name = skillset names = SkillNames(skill_names=skill_names) - await self._client.skillsets_operations.reset_skills(skillset_name=name, skill_names=names, **kwargs) + await self._client.skillsets.reset_skills(skillset_name=name, skill_names=names, **kwargs) return diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py index 5da81d7ff270..859f78ac7b85 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py @@ -86,7 +86,7 @@ DocumentExtractionSkill, DocumentIntelligenceLayoutSkill, DocumentKeysOrIds, - EdgeNGramTokenFilter, + EdgeNGramTokenFilterV2 as EdgeNGramTokenFilter, EdgeNGramTokenizer, EdgeNGramTokenFilterSide, ElisionTokenFilter, @@ -133,7 +133,7 @@ LimitTokenFilter, IndexStatisticsSummary, LuceneStandardAnalyzer, - LuceneStandardTokenizer, + LuceneStandardTokenizerV2 as LuceneStandardTokenizer, MagnitudeScoringFunction, MagnitudeScoringParameters, MarkdownHeaderDepth, @@ -145,7 +145,7 @@ MicrosoftStemmingTokenizerLanguage, MicrosoftTokenizerLanguage, NativeBlobSoftDeleteDeletionDetectionPolicy, - NGramTokenFilter, + NGramTokenFilterV2 as NGramTokenFilter, NGramTokenizer, OcrLineEnding, OcrSkill, diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_search_client_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_search_client_async.py index 9d28d911dacf..ddc702cec6bb 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_search_client_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_search_client_async.py @@ -15,7 +15,7 @@ class TestSearchClientAsync: @await_prepared_test @mock.patch( - "azure.search.documents._generated.aio.operations._operations.DocumentsOperationsOperations.search_post" + "azure.search.documents._generated.aio.operations._operations.DocumentsOperations.search_post" ) async def test_get_count_reset_continuation_token(self, mock_search_post): client = SearchClient("endpoint", "index name", CREDENTIAL) diff --git a/sdk/search/azure-search-documents/tests/test_search_client.py b/sdk/search/azure-search-documents/tests/test_search_client.py index 827249522ed0..3d813e9db5ed 100644 --- a/sdk/search/azure-search-documents/tests/test_search_client.py +++ b/sdk/search/azure-search-documents/tests/test_search_client.py @@ -110,7 +110,7 @@ def test_repr(self): client = SearchClient("endpoint", "index name", CREDENTIAL) assert repr(client) == "".format(repr("endpoint"), repr("index name")) - @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.count") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperations.count") def test_get_document_count(self, mock_count): client = SearchClient("endpoint", "index name", CREDENTIAL) client.get_document_count() @@ -118,7 +118,7 @@ def test_get_document_count(self, mock_count): assert mock_count.call_args[0] == () assert mock_count.call_args[1]["headers"] == client._headers - @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.get") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperations.get") def test_get_document(self, mock_get): client = SearchClient("endpoint", "index name", CREDENTIAL) client.get_document("some_key") @@ -137,7 +137,7 @@ def test_get_document(self, mock_get): assert mock_get.call_args[1]["key"] == "some_key" assert mock_get.call_args[1]["selected_fields"] == "foo" - @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.search_post") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperations.search_post") def test_search_query_argument(self, mock_search_post): client = SearchClient("endpoint", "index name", CREDENTIAL) result = client.search(search_text="search text") @@ -152,7 +152,7 @@ def test_search_query_argument(self, mock_search_post): assert mock_search_post.call_args[0] == () assert mock_search_post.call_args[1]["search_request"].search_text == "search text" - @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.suggest_post") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperations.suggest_post") def test_suggest_query_argument(self, mock_suggest_post): client = SearchClient("endpoint", "index name", CREDENTIAL) result = client.suggest(search_text="search text", suggester_name="sg") @@ -167,7 +167,7 @@ def test_suggest_bad_argument(self): client.suggest("bad_query") assert str(e) == "Expected a SuggestQuery for 'query', but got {}".format(repr("bad_query")) - @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.search_post") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperations.search_post") def test_get_count_reset_continuation_token(self, mock_search_post): client = SearchClient("endpoint", "index name", CREDENTIAL) result = client.search(search_text="search text") @@ -182,7 +182,7 @@ def test_get_count_reset_continuation_token(self, mock_search_post): assert not result._first_page_iterator_instance.continuation_token @mock.patch( - "azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.autocomplete_post" + "azure.search.documents._generated.operations._operations.DocumentsOperations.autocomplete_post" ) def test_autocomplete_query_argument(self, mock_autocomplete_post): client = SearchClient("endpoint", "index name", CREDENTIAL) @@ -192,7 +192,7 @@ def test_autocomplete_query_argument(self, mock_autocomplete_post): assert mock_autocomplete_post.call_args[1]["headers"] == client._headers assert mock_autocomplete_post.call_args[1]["autocomplete_request"].search_text == "search text" - @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.count") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperations.count") def test_get_document_count_v2020_06_30(self, mock_count): client = SearchClient("endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30) client.get_document_count() @@ -200,7 +200,7 @@ def test_get_document_count_v2020_06_30(self, mock_count): assert mock_count.call_args[0] == () assert mock_count.call_args[1]["headers"] == client._headers - @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.get") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperations.get") def test_get_document_v2020_06_30(self, mock_get): client = SearchClient("endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30) client.get_document("some_key") @@ -219,7 +219,7 @@ def test_get_document_v2020_06_30(self, mock_get): assert mock_get.call_args[1]["key"] == "some_key" assert mock_get.call_args[1]["selected_fields"] == "foo" - @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.search_post") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperations.search_post") def test_search_query_argument_v2020_06_30(self, mock_search_post): client = SearchClient("endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30) result = client.search(search_text="search text") @@ -234,7 +234,7 @@ def test_search_query_argument_v2020_06_30(self, mock_search_post): assert mock_search_post.call_args[0] == () assert mock_search_post.call_args[1]["search_request"].search_text == "search text" - @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.suggest_post") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperations.suggest_post") def test_suggest_query_argument_v2020_06_30(self, mock_suggest_post): client = SearchClient("endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30) result = client.suggest(search_text="search text", suggester_name="sg") @@ -244,7 +244,7 @@ def test_suggest_query_argument_v2020_06_30(self, mock_suggest_post): assert mock_suggest_post.call_args[1]["suggest_request"].search_text == "search text" @mock.patch( - "azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.autocomplete_post" + "azure.search.documents._generated.operations._operations.DocumentsOperations.autocomplete_post" ) def test_autocomplete_query_argument_v2020_06_30(self, mock_autocomplete_post): client = SearchClient("endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30) @@ -277,7 +277,7 @@ def test_add_method(self, arg, method_name): assert mock_index_documents.call_args[1]["headers"] == client._headers assert mock_index_documents.call_args[1]["extra"] == "foo" - @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperationsOperations.index") + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperations.index") def test_index_documents(self, mock_index): client = SearchClient("endpoint", "index name", CREDENTIAL) From 5a2db652c2b9dc35219156ca7793d82fa2e784dd Mon Sep 17 00:00:00 2001 From: Xiang Yan Date: Fri, 7 Mar 2025 11:47:33 -0800 Subject: [PATCH 09/12] update --- .../azure/search/documents/_generated/_serialization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_serialization.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_serialization.py index e2a20b1d534c..7a0232de5ddc 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/_serialization.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_serialization.py @@ -411,7 +411,7 @@ def from_dict( :param function key_extractors: A key extractor function. :param str content_type: JSON by default, set application/xml if XML. :returns: An instance of this model - :raises: DeserializationError if something went wrong + :raises DeserializationError: if something went wrong :rtype: Self """ deserializer = Deserializer(cls._infer_class_models()) From 34dbce0f8c87d38aced6c687add473c5565f2b8c Mon Sep 17 00:00:00 2001 From: Xiang Yan Date: Fri, 7 Mar 2025 14:59:36 -0800 Subject: [PATCH 10/12] updates --- .../_generated/aio/operations/_operations.py | 102 +++++++++++------- .../documents/_generated/models/__init__.py | 2 - .../documents/_generated/models/_enums.py | 6 +- .../documents/_generated/models/_models.py | 61 ++++++----- .../_generated/operations/_operations.py | 99 ++++++++++------- .../indexes/_search_indexer_client.py | 20 ++-- .../indexes/aio/_search_indexer_client.py | 20 ++-- 7 files changed, 181 insertions(+), 129 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_operations.py index b17c26c4aa1d..cfc0523f237f 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/aio/operations/_operations.py @@ -5436,18 +5436,24 @@ async def get_service_statistics(self, **kwargs: Any) -> _models.SearchServiceSt return deserialized # type: ignore - @distributed_trace_async + @distributed_trace @api_version_validation( method_added_on="2025-03-01-preview", params_added_on={"2025-03-01-preview": ["api_version", "client_request_id", "accept"]}, ) - async def get_index_stats_summary(self, **kwargs: Any) -> _models.ListIndexStatsSummary: - """Gets service level statistics for a search service. + def get_index_stats_summary(self, **kwargs: Any) -> AsyncIterable["_models.IndexStatisticsSummary"]: + """Retrieves a summary of statistics for all indexes in the search service. - :return: ListIndexStatsSummary. The ListIndexStatsSummary is compatible with MutableMapping - :rtype: ~azure.search.documents.models.ListIndexStatsSummary + :return: An iterator like instance of IndexStatisticsSummary + :rtype: + ~azure.core.async_paging.AsyncItemPaged[~azure.search.documents.models.IndexStatisticsSummary] :raises ~azure.core.exceptions.HttpResponseError: """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.IndexStatisticsSummary]] = kwargs.pop("cls", None) + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -5456,44 +5462,64 @@ async def get_index_stats_summary(self, **kwargs: Any) -> _models.ListIndexStats } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} + def prepare_request(next_link=None): + if not next_link: - cls: ClsType[_models.ListIndexStatsSummary] = kwargs.pop("cls", None) + _request = build_search_get_index_stats_summary_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - _request = build_search_get_index_stats_summary_request( - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) + return _request - response = pipeline_response.http_response + async def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.IndexStatisticsSummary], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return None, AsyncList(list_of_elem) - if response.status_code not in [200]: - if _stream: - try: - await response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.ErrorResponse, response.json()) - raise HttpResponseError(response=response, model=error) + async def get_next(next_link=None): + _request = prepare_request(next_link) - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ListIndexStatsSummary, response.json()) + _stream = False + pipeline_response: PipelineResponse = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) - return deserialized # type: ignore + return pipeline_response + + return AsyncItemPaged(get_next, extract_data) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py index ef116bba5932..52aa7ae56d2a 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/__init__.py @@ -105,7 +105,6 @@ LexicalTokenizer, LimitTokenFilter, ListDataSourcesResult, - ListIndexStatsSummary, ListIndexersResult, ListSkillsetsResult, ListSynonymMapsResult, @@ -410,7 +409,6 @@ "LexicalTokenizer", "LimitTokenFilter", "ListDataSourcesResult", - "ListIndexStatsSummary", "ListIndexersResult", "ListSkillsetsResult", "ListSynonymMapsResult", diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_enums.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_enums.py index ac1c13890df0..059ad11677ea 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_enums.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_enums.py @@ -12,7 +12,7 @@ class AIFoundryModelCatalogName(str, Enum, metaclass=CaseInsensitiveEnumMeta): - """The name of the embedding model from the Azure AI Studio Catalog that will be + """The name of the embedding model from the Azure AI Foundry Catalog that will be called. """ @@ -2459,7 +2459,7 @@ class TokenFilterName(str, Enum, metaclass=CaseInsensitiveEnumMeta): """Forms bigrams of CJK terms that are generated from the standard tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html""" CJK_WIDTH = "cjk_width" - """Normalizes CJK width differences. Folds fullwidth ASCII variants into the + """Normalizes CJK width differences. Folds full-width ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html""" @@ -2672,7 +2672,7 @@ class VectorSearchVectorizerKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): Services Vision Vectorize API.""" AML = "aml" """Generate embeddings using an Azure Machine Learning endpoint deployed via the - Azure AI Studio Model Catalog at query time.""" + Azure AI Foundry Model Catalog at query time.""" class VectorThresholdKind(str, Enum, metaclass=CaseInsensitiveEnumMeta): diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py index a7ddf48b28e1..d2d365f3ab3b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/models/_models.py @@ -74,7 +74,7 @@ class AIServicesAccountIdentity(CognitiveServicesAccount, discriminator="#Micros :ivar identity: The user-assigned managed identity used for connections to AI Service. If not specified, the system-assigned managed identity is used. On updates to the skillset, if the identity is unspecified, the value remains unchanged. If set - to "none", the value of this property is cleared. Required. + to "none", the value of this property is cleared. :vartype identity: ~azure.search.documents.models.SearchIndexerDataIdentity :ivar subdomain_url: The subdomain url for the corresponding AI Service. Required. :vartype subdomain_url: str @@ -83,13 +83,13 @@ class AIServicesAccountIdentity(CognitiveServicesAccount, discriminator="#Micros :vartype odata_type: str """ - identity: "_models.SearchIndexerDataIdentity" = rest_field( + identity: Optional["_models.SearchIndexerDataIdentity"] = rest_field( visibility=["read", "create", "update", "delete", "query"] ) """The user-assigned managed identity used for connections to AI Service. If not specified, the system-assigned managed identity is used. On updates to the skillset, if the identity is unspecified, the value remains unchanged. If set - to \"none\", the value of this property is cleared. Required.""" + to \"none\", the value of this property is cleared.""" subdomain_url: str = rest_field(name="subdomainUrl", visibility=["read", "create", "update", "delete", "query"]) """The subdomain url for the corresponding AI Service. Required.""" odata_type: Literal["#Microsoft.Azure.Search.AIServicesByIdentity"] = rest_discriminator(name="@odata.type", visibility=["read", "create", "update", "delete", "query"]) # type: ignore @@ -100,9 +100,9 @@ class AIServicesAccountIdentity(CognitiveServicesAccount, discriminator="#Micros def __init__( self, *, - identity: "_models.SearchIndexerDataIdentity", subdomain_url: str, description: Optional[str] = None, + identity: Optional["_models.SearchIndexerDataIdentity"] = None, ) -> None: ... @overload @@ -867,7 +867,7 @@ class AzureMachineLearningParameters(_model_base.Model): :vartype timeout: ~datetime.timedelta :ivar region: (Optional for token authentication). The region the AML service is deployed in. :vartype region: str - :ivar model_name: The name of the embedding model from the Azure AI Studio Catalog that is + :ivar model_name: The name of the embedding model from the Azure AI Foundry Catalog that is deployed at the provided endpoint. Known values are: "OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32", "OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336", @@ -898,7 +898,7 @@ class AzureMachineLearningParameters(_model_base.Model): model_name: Optional[Union[str, "_models.AIFoundryModelCatalogName"]] = rest_field( name="modelName", visibility=["read", "create", "update", "delete", "query"] ) - """The name of the embedding model from the Azure AI Studio Catalog that is + """The name of the embedding model from the Azure AI Foundry Catalog that is deployed at the provided endpoint. Known values are: \"OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32\", \"OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336\", @@ -1125,7 +1125,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: class AzureMachineLearningVectorizer(VectorSearchVectorizer, discriminator="aml"): - """Specifies an Azure Machine Learning endpoint deployed via the Azure AI Studio + """Specifies an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog for generating the vector embedding of a query string. @@ -1137,7 +1137,7 @@ class AzureMachineLearningVectorizer(VectorSearchVectorizer, discriminator="aml" :ivar kind: The name of the kind of vectorization method being configured for use with vector search. Required. Generate embeddings using an Azure Machine Learning endpoint deployed via the - Azure AI Studio Model Catalog at query time. + Azure AI Foundry Model Catalog at query time. :vartype kind: str or ~azure.search.documents.models.AML """ @@ -1149,7 +1149,7 @@ class AzureMachineLearningVectorizer(VectorSearchVectorizer, discriminator="aml" """The name of the kind of vectorization method being configured for use with vector search. Required. Generate embeddings using an Azure Machine Learning endpoint deployed via the - Azure AI Studio Model Catalog at query time.""" + Azure AI Foundry Model Catalog at query time.""" @overload def __init__( @@ -3903,6 +3903,8 @@ class FacetResult(_model_base.Model): collection of buckets for each faceted field; null if the query did not contain any nested facets. :vartype facets: dict[str, list[~azure.search.documents.models.FacetResult]] + :ivar sum: The resulting total sum for the facet when a sum metric is requested. + :vartype sum: int """ count: Optional[int] = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -3912,6 +3914,8 @@ class FacetResult(_model_base.Model): """The nested facet query results for the search operation, organized as a collection of buckets for each faceted field; null if the query did not contain any nested facets.""" + sum: Optional[int] = rest_field(visibility=["read"]) + """The resulting total sum for the facet when a sum metric is requested.""" @overload def __init__( @@ -5654,22 +5658,6 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) -class ListIndexStatsSummary(_model_base.Model): - """Response from a request to retrieve stats summary of all indexes. If successful, it includes - the stats of each index in the service. - - Readonly variables are only populated by the server, and will be ignored when sending a request. - - - :ivar indexes_statistics: The Statistics summary of all indexes in the Search service. - Required. - :vartype indexes_statistics: list[~azure.search.documents.models.IndexStatisticsSummary] - """ - - indexes_statistics: List["_models.IndexStatisticsSummary"] = rest_field(name="value", visibility=["read"]) - """The Statistics summary of all indexes in the Search service. Required.""" - - class ListSkillsetsResult(_model_base.Model): """Response from a list skillset request. If successful, it includes the full definitions of all skillsets. @@ -8360,6 +8348,8 @@ class SearchIndexerCache(_model_base.Model): indexer, if the identity is unspecified, the value remains unchanged. If set to "none", the value of this property is cleared. :vartype identity: ~azure.search.documents.models.SearchIndexerDataIdentity + :ivar id: A guid for the SearchIndexerCache. + :vartype id: str """ storage_connection_string: Optional[str] = rest_field( @@ -8379,6 +8369,8 @@ class SearchIndexerCache(_model_base.Model): not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to \"none\", the value of this property is cleared.""" + id: Optional[str] = rest_field(visibility=["read", "create", "update", "delete", "query"]) + """A guid for the SearchIndexerCache.""" @overload def __init__( @@ -8387,6 +8379,7 @@ def __init__( storage_connection_string: Optional[str] = None, enable_reprocessing: Optional[bool] = None, identity: Optional["_models.SearchIndexerDataIdentity"] = None, + id: Optional[str] = None, # pylint: disable=redefined-builtin ) -> None: ... @overload @@ -9910,7 +9903,7 @@ class SearchResourceEncryptionKey(_model_base.Model): Required. :vartype key_name: str :ivar key_version: The version of your Azure Key Vault key to be used to encrypt your data at - rest. Required. + rest. :vartype key_version: str :ivar vault_uri: The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be used to encrypt your data at rest. An example URI might be @@ -9931,8 +9924,10 @@ class SearchResourceEncryptionKey(_model_base.Model): key_name: str = rest_field(name="keyVaultKeyName", visibility=["read", "create", "update", "delete", "query"]) """The name of your Azure Key Vault key to be used to encrypt your data at rest. Required.""" - key_version: str = rest_field(name="keyVaultKeyVersion", visibility=["read", "create", "update", "delete", "query"]) - """The version of your Azure Key Vault key to be used to encrypt your data at rest. Required.""" + key_version: Optional[str] = rest_field( + name="keyVaultKeyVersion", visibility=["read", "create", "update", "delete", "query"] + ) + """The version of your Azure Key Vault key to be used to encrypt your data at rest.""" vault_uri: str = rest_field(name="keyVaultUri", visibility=["read", "create", "update", "delete", "query"]) """The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be used to encrypt your data at rest. An example URI might be @@ -9956,8 +9951,8 @@ def __init__( self, *, key_name: str, - key_version: str, vault_uri: str, + key_version: Optional[str] = None, access_credentials: Optional["_models.AzureActiveDirectoryApplicationCredentials"] = None, identity: Optional["_models.SearchIndexerDataIdentity"] = None, ) -> None: ... @@ -10378,6 +10373,9 @@ class SemanticConfiguration(_model_base.Model): properties (titleField, prioritizedKeywordsFields and prioritizedContentFields) need to be set. Required. :vartype prioritized_fields: ~azure.search.documents.models.SemanticPrioritizedFields + :ivar flighting_opt_in: Determines how which semantic or query rewrite models to use during + model flighting/upgrades. + :vartype flighting_opt_in: bool """ name: str = rest_field(visibility=["read", "create", "update", "delete", "query"]) @@ -10389,6 +10387,10 @@ class SemanticConfiguration(_model_base.Model): ranking, captions, highlights, and answers. At least one of the three sub properties (titleField, prioritizedKeywordsFields and prioritizedContentFields) need to be set. Required.""" + flighting_opt_in: Optional[bool] = rest_field( + name="flightingOptIn", visibility=["read", "create", "update", "delete", "query"] + ) + """Determines how which semantic or query rewrite models to use during model flighting/upgrades.""" @overload def __init__( @@ -10396,6 +10398,7 @@ def __init__( *, name: str, prioritized_fields: "_models.SemanticPrioritizedFields", + flighting_opt_in: Optional[bool] = None, ) -> None: ... @overload diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_operations.py index e40d1c993a7b..06f6dbef1d3c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/operations/_operations.py @@ -6866,13 +6866,18 @@ def get_service_statistics(self, **kwargs: Any) -> _models.SearchServiceStatisti method_added_on="2025-03-01-preview", params_added_on={"2025-03-01-preview": ["api_version", "client_request_id", "accept"]}, ) - def get_index_stats_summary(self, **kwargs: Any) -> _models.ListIndexStatsSummary: - """Gets service level statistics for a search service. + def get_index_stats_summary(self, **kwargs: Any) -> Iterable["_models.IndexStatisticsSummary"]: + """Retrieves a summary of statistics for all indexes in the search service. - :return: ListIndexStatsSummary. The ListIndexStatsSummary is compatible with MutableMapping - :rtype: ~azure.search.documents.models.ListIndexStatsSummary + :return: An iterator like instance of IndexStatisticsSummary + :rtype: ~azure.core.paging.ItemPaged[~azure.search.documents.models.IndexStatisticsSummary] :raises ~azure.core.exceptions.HttpResponseError: """ + _headers = kwargs.pop("headers", {}) or {} + _params = kwargs.pop("params", {}) or {} + + cls: ClsType[List[_models.IndexStatisticsSummary]] = kwargs.pop("cls", None) + error_map: MutableMapping = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, @@ -6881,44 +6886,64 @@ def get_index_stats_summary(self, **kwargs: Any) -> _models.ListIndexStatsSummar } error_map.update(kwargs.pop("error_map", {}) or {}) - _headers = kwargs.pop("headers", {}) or {} - _params = kwargs.pop("params", {}) or {} + def prepare_request(next_link=None): + if not next_link: - cls: ClsType[_models.ListIndexStatsSummary] = kwargs.pop("cls", None) + _request = build_search_get_index_stats_summary_request( + api_version=self._config.api_version, + headers=_headers, + params=_params, + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - _request = build_search_get_index_stats_summary_request( - api_version=self._config.api_version, - headers=_headers, - params=_params, - ) - path_format_arguments = { - "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True), - } - _request.url = self._client.format_url(_request.url, **path_format_arguments) + else: + # make call to next link with the client's api-version + _parsed_next_link = urllib.parse.urlparse(next_link) + _next_request_params = case_insensitive_dict( + { + key: [urllib.parse.quote(v) for v in value] + for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items() + } + ) + _next_request_params["api-version"] = self._config.api_version + _request = HttpRequest( + "GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params + ) + path_format_arguments = { + "endpoint": self._serialize.url( + "self._config.endpoint", self._config.endpoint, "str", skip_quote=True + ), + } + _request.url = self._client.format_url(_request.url, **path_format_arguments) - _stream = kwargs.pop("stream", False) - pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access - _request, stream=_stream, **kwargs - ) + return _request - response = pipeline_response.http_response + def extract_data(pipeline_response): + deserialized = pipeline_response.http_response.json() + list_of_elem = _deserialize(List[_models.IndexStatisticsSummary], deserialized["value"]) + if cls: + list_of_elem = cls(list_of_elem) # type: ignore + return None, iter(list_of_elem) - if response.status_code not in [200]: - if _stream: - try: - response.read() # Load the body in memory and close the socket - except (StreamConsumedError, StreamClosedError): - pass - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = _failsafe_deserialize(_models.ErrorResponse, response.json()) - raise HttpResponseError(response=response, model=error) + def get_next(next_link=None): + _request = prepare_request(next_link) - if _stream: - deserialized = response.iter_bytes() - else: - deserialized = _deserialize(_models.ListIndexStatsSummary, response.json()) + _stream = False + pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access + _request, stream=_stream, **kwargs + ) + response = pipeline_response.http_response - if cls: - return cls(pipeline_response, deserialized, {}) # type: ignore + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = _failsafe_deserialize(_models.ErrorResponse, response.json()) + raise HttpResponseError(response=response, model=error) - return deserialized # type: ignore + return pipeline_response + + return ItemPaged(get_next, extract_data) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py index 9b04618a691b..43e55ebb83b0 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py @@ -99,7 +99,7 @@ def create_indexer(self, indexer: SearchIndexer, **kwargs: Any) -> SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) patched_indexer = indexer._to_generated() # pylint:disable=protected-access - result = self._client.indexers_operations.create(patched_indexer, **kwargs) + result = self._client.indexers.create(patched_indexer, **kwargs) return cast(SearchIndexer, SearchIndexer._from_generated(result)) # pylint:disable=protected-access @distributed_trace @@ -129,7 +129,7 @@ def create_or_update_indexer( kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) name = indexer.name patched_indexer = indexer._to_generated() # pylint:disable=protected-access - result = self._client.indexers_operations.create_or_update( + result = self._client.indexers.create_or_update( indexer_name=name, indexer=patched_indexer, prefer="return=representation", @@ -160,7 +160,7 @@ def get_indexer(self, name: str, **kwargs: Any) -> SearchIndexer: :caption: Retrieve a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.indexers_operations.get(name, **kwargs) + result = self._client.indexers.get(name, **kwargs) return cast(SearchIndexer, SearchIndexer._from_generated(result)) # pylint:disable=protected-access @distributed_trace @@ -186,7 +186,7 @@ def get_indexers(self, *, select: Optional[List[str]] = None, **kwargs: Any) -> kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: kwargs["select"] = ",".join(select) - result = self._client.indexers_operations.list(**kwargs) + result = self._client.indexers.list(**kwargs) assert result.indexers is not None # Hint for mypy # pylint:disable=protected-access return [cast(SearchIndexer, SearchIndexer._from_generated(index)) for index in result.indexers] @@ -208,7 +208,7 @@ def get_indexer_names(self, **kwargs: Any) -> Sequence[str]: :caption: List all the SearchIndexers """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = self._client.indexers_operations.list(**kwargs) + result = self._client.indexers.list(**kwargs) assert result.indexers is not None # Hint for mypy return [x.name for x in result.indexers] @@ -247,7 +247,7 @@ def delete_indexer( etag = indexer.e_tag # type: ignore except AttributeError: name = indexer - self._client.indexers_operations.delete(name, etag=etag, match_condition=match_condition, **kwargs) + self._client.indexers.delete(name, etag=etag, match_condition=match_condition, **kwargs) @distributed_trace def run_indexer(self, name: str, **kwargs: Any) -> None: @@ -266,7 +266,7 @@ def run_indexer(self, name: str, **kwargs: Any) -> None: :caption: Run a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - self._client.indexers_operations.run(name, **kwargs) + self._client.indexers.run(name, **kwargs) @distributed_trace def reset_indexer(self, name: str, **kwargs: Any) -> None: @@ -285,7 +285,7 @@ def reset_indexer(self, name: str, **kwargs: Any) -> None: :caption: Reset a SearchIndexer's change tracking state """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - self._client.indexers_operations.reset(name, **kwargs) + self._client.indexers.reset(name, **kwargs) @distributed_trace def reset_documents( @@ -315,7 +315,7 @@ def reset_documents( name = indexer.name # type: ignore except AttributeError: name = indexer - return self._client.indexers_operations.reset_docs(name, overwrite=overwrite, **kwargs) + return self._client.indexers.reset_docs(name, overwrite=overwrite, **kwargs) @distributed_trace def get_indexer_status(self, name: str, **kwargs: Any) -> SearchIndexerStatus: @@ -337,7 +337,7 @@ def get_indexer_status(self, name: str, **kwargs: Any) -> SearchIndexerStatus: :caption: Get a SearchIndexer's status """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - return self._client.indexers_operations.get_status(name, **kwargs) + return self._client.indexers.get_status(name, **kwargs) @distributed_trace def create_data_source_connection( diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py index 26aa464a0336..2fd3c2b7a3d8 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_indexer_client.py @@ -94,7 +94,7 @@ async def create_indexer(self, indexer: SearchIndexer, **kwargs: Any) -> SearchI """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) patched_indexer = indexer._to_generated() # pylint:disable=protected-access - result = await self._client.indexers_operations.create(patched_indexer, **kwargs) + result = await self._client.indexers.create(patched_indexer, **kwargs) return cast(SearchIndexer, SearchIndexer._from_generated(result)) # pylint:disable=protected-access @distributed_trace_async @@ -124,7 +124,7 @@ async def create_or_update_indexer( kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) name = indexer.name patched_indexer = indexer._to_generated() # pylint:disable=protected-access - result = await self._client.indexers_operations.create_or_update( + result = await self._client.indexers.create_or_update( indexer_name=name, indexer=patched_indexer, prefer="return=representation", @@ -155,7 +155,7 @@ async def get_indexer(self, name: str, **kwargs: Any) -> SearchIndexer: :caption: Retrieve a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.indexers_operations.get(name, **kwargs) + result = await self._client.indexers.get(name, **kwargs) return cast(SearchIndexer, SearchIndexer._from_generated(result)) # pylint:disable=protected-access @distributed_trace_async @@ -181,7 +181,7 @@ async def get_indexers(self, *, select: Optional[List[str]] = None, **kwargs) -> kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) if select: kwargs["select"] = ",".join(select) - result = await self._client.indexers_operations.list(**kwargs) + result = await self._client.indexers.list(**kwargs) assert result.indexers is not None # Hint for mypy # pylint:disable=protected-access return [cast(SearchIndexer, SearchIndexer._from_generated(index)) for index in result.indexers] @@ -194,7 +194,7 @@ async def get_indexer_names(self, **kwargs) -> Sequence[str]: :rtype: list[str] """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - result = await self._client.indexers_operations.list(**kwargs) + result = await self._client.indexers.list(**kwargs) assert result.indexers is not None # Hint for mypy return [x.name for x in result.indexers] @@ -233,7 +233,7 @@ async def delete_indexer( etag = indexer.e_tag # type: ignore except AttributeError: name = indexer - await self._client.indexers_operations.delete(name, etag=etag, match_condition=match_condition, **kwargs) + await self._client.indexers.delete(name, etag=etag, match_condition=match_condition, **kwargs) @distributed_trace_async async def run_indexer(self, name: str, **kwargs: Any) -> None: @@ -252,7 +252,7 @@ async def run_indexer(self, name: str, **kwargs: Any) -> None: :caption: Run a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - await self._client.indexers_operations.run(name, **kwargs) + await self._client.indexers.run(name, **kwargs) @distributed_trace_async async def reset_indexer(self, name: str, **kwargs: Any) -> None: @@ -271,7 +271,7 @@ async def reset_indexer(self, name: str, **kwargs: Any) -> None: :caption: Reset a SearchIndexer's change tracking state """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - await self._client.indexers_operations.reset(name, **kwargs) + await self._client.indexers.reset(name, **kwargs) @distributed_trace_async async def reset_documents( @@ -301,7 +301,7 @@ async def reset_documents( name = indexer.name # type: ignore except AttributeError: name = indexer - await self._client.indexers_operations.reset_docs(name, overwrite=overwrite, **kwargs) + await self._client.indexers.reset_docs(name, overwrite=overwrite, **kwargs) return @distributed_trace_async @@ -324,7 +324,7 @@ async def get_indexer_status(self, name: str, **kwargs: Any) -> SearchIndexerSta :caption: Get a SearchIndexer's status """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - return await self._client.indexers_operations.get_status(name, **kwargs) + return await self._client.indexers.get_status(name, **kwargs) @distributed_trace_async async def create_data_source_connection( From 01178d903198cb5317018dc528408460d66f0a92 Mon Sep 17 00:00:00 2001 From: Xiang Yan Date: Fri, 7 Mar 2025 15:35:00 -0800 Subject: [PATCH 11/12] update --- .../azure/search/documents/_generated/_model_base.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_generated/_model_base.py b/sdk/search/azure-search-documents/azure/search/documents/_generated/_model_base.py index 3072ee252ed9..35c51750ea22 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_generated/_model_base.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_generated/_model_base.py @@ -204,7 +204,7 @@ def _deserialize_datetime(attr: typing.Union[str, datetime]) -> datetime: test_utc = date_obj.utctimetuple() if test_utc.tm_year > 9999 or test_utc.tm_year < 1: raise OverflowError("Hit max or min date") - return date_obj + return typing.cast(datetime, date_obj) def _deserialize_datetime_rfc7231(attr: typing.Union[str, datetime]) -> datetime: @@ -258,7 +258,7 @@ def _deserialize_time(attr: typing.Union[str, time]) -> time: """ if isinstance(attr, time): return attr - return isodate.parse_time(attr) + return typing.cast(time, isodate.parse_time(attr)) def _deserialize_bytes(attr): @@ -1182,7 +1182,7 @@ def _get_wrapped_element( _get_element(v, exclude_readonly, meta, wrapped_element) else: wrapped_element.text = _get_primitive_type_value(v) - return wrapped_element + return typing.cast(ET.Element, wrapped_element) def _get_primitive_type_value(v) -> str: From ecaf1448ded566108fc5911fafab30fd1d258cf5 Mon Sep 17 00:00:00 2001 From: Xiang Yan Date: Fri, 7 Mar 2025 17:23:30 -0800 Subject: [PATCH 12/12] updates --- .../azure/search/documents/_search_client.py | 6 ++---- .../azure/search/documents/aio/_paging.py | 1 + .../search/documents/aio/_search_client_async.py | 2 +- .../documents/indexes/_search_index_client.py | 16 ++++------------ .../documents/indexes/_search_indexer_client.py | 4 +--- .../indexes/aio/_search_index_client.py | 12 +++--------- .../search/documents/indexes/models/__init__.py | 6 +++--- .../async_tests/test_search_client_async.py | 4 +--- .../tests/test_search_client.py | 8 ++------ 9 files changed, 18 insertions(+), 41 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_search_client.py b/sdk/search/azure-search-documents/azure/search/documents/_search_client.py index d88f5fc0cc8a..0055e5abed8e 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_search_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_search_client.py @@ -3,7 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- -from typing import cast, List, Any, Union, Dict, Optional, MutableMapping +from typing import cast, List, Any, Union, Dict, Optional from azure.core.rest import HttpRequest, HttpResponse from azure.core.credentials import AzureKeyCredential, TokenCredential @@ -481,9 +481,7 @@ def suggest( query.order_by(order_by) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) request = cast(SuggestRequest, query.request) - response = self._client.documents.suggest_post( - index_name=self._index_name, suggest_request=request, **kwargs - ) + response = self._client.documents.suggest_post(index_name=self._index_name, suggest_request=request, **kwargs) assert response.results is not None # Hint for mypy results = response.results return results diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py index eab8dd64f0fd..2bb2c4c8bc28 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_paging.py @@ -103,6 +103,7 @@ async def wrapper(self, *args, **kw): class AsyncSearchPageIterator(AsyncPageIterator[ReturnType]): """An iterator of search results.""" + def __init__(self, client, index_name, initial_query, kwargs, continuation_token=None) -> None: super(AsyncSearchPageIterator, self).__init__( get_next=self._get_next_cb, diff --git a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py index 61f4c42ef19b..15df2c03a2c6 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/aio/_search_client_async.py @@ -3,7 +3,7 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- -from typing import cast, List, Union, Any, Optional, Dict, MutableMapping +from typing import cast, List, Union, Any, Optional, Dict from azure.core.rest import HttpRequest, AsyncHttpResponse from azure.core.credentials import AzureKeyCredential diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py index 75e4749d175b..b2ce464421fd 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_index_client.py @@ -113,9 +113,7 @@ def list_indexes(self, *, select: Optional[List[str]] = None, **kwargs: Any) -> if select: kwargs["select"] = ",".join(select) # pylint:disable=protected-access - indexes = self._client.indexes.list( - cls=lambda objs: [SearchIndex._from_generated(x) for x in objs], **kwargs - ) + indexes = self._client.indexes.list(cls=lambda objs: [SearchIndex._from_generated(x) for x in objs], **kwargs) return cast(ItemPaged[SearchIndex], indexes) @distributed_trace @@ -206,9 +204,7 @@ def delete_index( etag = index.e_tag # type: ignore except AttributeError: index_name = index - self._client.indexes.delete( - index_name=index_name, etag=etag, match_condition=match_condition, **kwargs - ) + self._client.indexes.delete(index_name=index_name, etag=etag, match_condition=match_condition, **kwargs) @distributed_trace def create_index(self, index: SearchIndex, **kwargs: Any) -> SearchIndex: @@ -416,9 +412,7 @@ def delete_synonym_map( etag = synonym_map.e_tag # type: ignore except AttributeError: name = synonym_map - self._client.synonym_maps.delete( - synonym_map_name=name, etag=etag, match_condition=match_condition, **kwargs - ) + self._client.synonym_maps.delete(synonym_map_name=name, etag=etag, match_condition=match_condition, **kwargs) @distributed_trace def create_synonym_map(self, synonym_map: SynonymMap, **kwargs: Any) -> SynonymMap: @@ -579,9 +573,7 @@ def delete_alias( etag = alias.e_tag # type: ignore except AttributeError: alias_name = alias - self._client.aliases.delete( - alias_name=alias_name, etag=etag, match_condition=match_condition, **kwargs - ) + self._client.aliases.delete(alias_name=alias_name, etag=etag, match_condition=match_condition, **kwargs) @distributed_trace def create_alias(self, alias: SearchAlias, **kwargs: Any) -> SearchAlias: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py index 43e55ebb83b0..5be093dac169 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_search_indexer_client.py @@ -505,9 +505,7 @@ def delete_data_source_connection( etag = data_source_connection.e_tag # type: ignore except AttributeError: name = data_source_connection - self._client.data_sources.delete( - data_source_name=name, etag=etag, match_condition=match_condition, **kwargs - ) + self._client.data_sources.delete(data_source_name=name, etag=etag, match_condition=match_condition, **kwargs) @distributed_trace def get_skillsets(self, *, select: Optional[List[str]] = None, **kwargs: Any) -> List[SearchIndexerSkillset]: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py index edb12b9b222d..8fb0a0699d7c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/aio/_search_index_client.py @@ -112,9 +112,7 @@ def list_indexes(self, *, select: Optional[List[str]] = None, **kwargs) -> Async if select: kwargs["select"] = ",".join(select) # pylint:disable=protected-access - indexes = self._client.indexes.list( - cls=lambda objs: [SearchIndex._from_generated(x) for x in objs], **kwargs - ) + indexes = self._client.indexes.list(cls=lambda objs: [SearchIndex._from_generated(x) for x in objs], **kwargs) return cast(AsyncItemPaged[SearchIndex], indexes) @distributed_trace @@ -203,9 +201,7 @@ async def delete_index( etag = index.e_tag # type: ignore except AttributeError: index_name = index - await self._client.indexes.delete( - index_name=index_name, etag=etag, match_condition=match_condition, **kwargs - ) + await self._client.indexes.delete(index_name=index_name, etag=etag, match_condition=match_condition, **kwargs) @distributed_trace_async async def create_index(self, index: SearchIndex, **kwargs: Any) -> SearchIndex: @@ -574,9 +570,7 @@ async def delete_alias( etag = alias.e_tag # type: ignore except AttributeError: alias_name = alias - await self._client.aliases.delete( - alias_name=alias_name, etag=etag, match_condition=match_condition, **kwargs - ) + await self._client.aliases.delete(alias_name=alias_name, etag=etag, match_condition=match_condition, **kwargs) @distributed_trace_async async def create_alias(self, alias: SearchAlias, **kwargs: Any) -> SearchAlias: diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py index 859f78ac7b85..379261afc6c2 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py @@ -329,7 +329,7 @@ class PathHierarchyTokenizer(PathHierarchyTokenizerV2): "DocumentExtractionSkill", "DocumentIntelligenceLayoutSkill", "DocumentKeysOrIds", - "EdgeNGramTokenFilter", + "EdgeNGramTokenFilter", # pylint: disable=naming-mismatch "EdgeNGramTokenizer", "ElisionTokenFilter", "EdgeNGramTokenFilterSide", @@ -378,7 +378,7 @@ class PathHierarchyTokenizer(PathHierarchyTokenizerV2): "LimitTokenFilter", "IndexStatisticsSummary", "LuceneStandardAnalyzer", - "LuceneStandardTokenizer", + "LuceneStandardTokenizer", # pylint: disable=naming-mismatch "MagnitudeScoringFunction", "MagnitudeScoringParameters", "MarkdownHeaderDepth", @@ -390,7 +390,7 @@ class PathHierarchyTokenizer(PathHierarchyTokenizerV2): "MicrosoftStemmingTokenizerLanguage", "MicrosoftTokenizerLanguage", "NativeBlobSoftDeleteDeletionDetectionPolicy", - "NGramTokenFilter", + "NGramTokenFilter", # pylint: disable=naming-mismatch "NGramTokenizer", "OcrLineEnding", "OcrSkill", diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_search_client_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_search_client_async.py index ddc702cec6bb..f55a2b1112da 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_search_client_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_search_client_async.py @@ -14,9 +14,7 @@ class TestSearchClientAsync: @await_prepared_test - @mock.patch( - "azure.search.documents._generated.aio.operations._operations.DocumentsOperations.search_post" - ) + @mock.patch("azure.search.documents._generated.aio.operations._operations.DocumentsOperations.search_post") async def test_get_count_reset_continuation_token(self, mock_search_post): client = SearchClient("endpoint", "index name", CREDENTIAL) result = await client.search(search_text="search text") diff --git a/sdk/search/azure-search-documents/tests/test_search_client.py b/sdk/search/azure-search-documents/tests/test_search_client.py index 3d813e9db5ed..bf98f571a5d5 100644 --- a/sdk/search/azure-search-documents/tests/test_search_client.py +++ b/sdk/search/azure-search-documents/tests/test_search_client.py @@ -181,9 +181,7 @@ def test_get_count_reset_continuation_token(self, mock_search_post): result.get_count() assert not result._first_page_iterator_instance.continuation_token - @mock.patch( - "azure.search.documents._generated.operations._operations.DocumentsOperations.autocomplete_post" - ) + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperations.autocomplete_post") def test_autocomplete_query_argument(self, mock_autocomplete_post): client = SearchClient("endpoint", "index name", CREDENTIAL) result = client.autocomplete(search_text="search text", suggester_name="sg") @@ -243,9 +241,7 @@ def test_suggest_query_argument_v2020_06_30(self, mock_suggest_post): assert mock_suggest_post.call_args[1]["headers"] == client._headers assert mock_suggest_post.call_args[1]["suggest_request"].search_text == "search text" - @mock.patch( - "azure.search.documents._generated.operations._operations.DocumentsOperations.autocomplete_post" - ) + @mock.patch("azure.search.documents._generated.operations._operations.DocumentsOperations.autocomplete_post") def test_autocomplete_query_argument_v2020_06_30(self, mock_autocomplete_post): client = SearchClient("endpoint", "index name", CREDENTIAL, api_version=ApiVersion.V2020_06_30) result = client.autocomplete(search_text="search text", suggester_name="sg")