diff --git a/sdk/search/azure-search-documents/CHANGELOG.md b/sdk/search/azure-search-documents/CHANGELOG.md index 317ffb4b0d0..c98afa0f794 100644 --- a/sdk/search/azure-search-documents/CHANGELOG.md +++ b/sdk/search/azure-search-documents/CHANGELOG.md @@ -1,6 +1,6 @@ # Release History -## 1.0.0b4 (Unreleased) +## 1.0.0b4 (2020-06-09) **Breaking Changes** @@ -24,7 +24,9 @@ PathHierarchyTokenizerV2 -> PathHierarchyTokenizer - Renamed DataSource methods to DataSourceConnection #11693 - Autocomplete & suggest methods now takes arguments search_text & suggester_name rather than query objects #11747 -- Create_or_updates methods does not support partial updates +- Create_or_updates methods does not support partial updates #11800 +- Renamed AnalyzeRequest to AnalyzeTextOptions #11800 +- Renamed Batch methods #11800 ## 1.0.0b3 (2020-05-04) diff --git a/sdk/search/azure-search-documents/README.md b/sdk/search/azure-search-documents/README.md index d36170a7698..b9d00d8b7c8 100644 --- a/sdk/search/azure-search-documents/README.md +++ b/sdk/search/azure-search-documents/README.md @@ -40,10 +40,11 @@ The above creates a resource with the "Standard" pricing tier. See [choosing a p In order to interact with the Cognitive Search service you'll need to create an instance of the Search Client class. To make this possible you will need an [api-key of the Cognitive Search service](https://docs.microsoft.com/en-us/azure/search/search-security-api-keys). -The SDK provides two clients. +The SDK provides three clients. 1. SearchClient for all document operations. -2. SearchServiceClient for all CRUD operations on service resources. +2. SearchIndexClient for all CRUD operations on index resources. +3. SearchIndexerClient for all CRUD operations on indexer resources. #### Create a SearchClient @@ -64,18 +65,33 @@ client = SearchClient(endpoint="", credential=credential) ``` -#### Create a SearchServiceClient +#### Create a SearchIndexClient Once you have the values of the Cognitive Search Service [service endpoint](https://docs.microsoft.com/en-us/azure/search/search-create-service-portal#get-a-key-and-url-endpoint) -and [api key](https://docs.microsoft.com/en-us/azure/search/search-security-api-keys) you can create the Search Service client: +and [api key](https://docs.microsoft.com/en-us/azure/search/search-security-api-keys) you can create the Search Index client: ```python from azure.core.credentials import AzureKeyCredential -from azure.search.documents import SearchServiceClient +from azure.search.documents.indexes import SearchIndexClient credential = AzureKeyCredential("") -client = SearchServiceClient(endpoint="" +client = SearchIndexClient(endpoint="", + credential=credential) +``` + +#### Create a SearchIndexerClient + +Once you have the values of the Cognitive Search Service [service endpoint](https://docs.microsoft.com/en-us/azure/search/search-create-service-portal#get-a-key-and-url-endpoint) +and [api key](https://docs.microsoft.com/en-us/azure/search/search-security-api-keys) you can create the Search Indexer client: + +```python +from azure.core.credentials import AzureKeyCredential +from azure.search.documents.indexes import SearchIndexerClient + +credential = AzureKeyCredential("") + +client = SearchIndexerClient(endpoint="", credential=credential) ``` @@ -83,7 +99,7 @@ client = SearchServiceClient(endpoint="" You can use the `SearchClient` you created in the first section above to make a basic search request: ```python -results = client.search(query="spa") +results = client.search(search_text="spa") print("Hotels containing 'spa' in the name (or other fields):") for result in results: @@ -100,7 +116,7 @@ source to extract and load data into an index. There are several types of operations that can be executed against the service: - **Index management operations** Create, delete, update, or configure a search index. ([API Reference](https://azuresdkdocs.blob.core.windows.net/$web/python/azure-search-documents/latest/azure.search.documents.html#azure.search.documents.SearchIndexesClient), [Service Docs](https://docs.microsoft.com/en-us/rest/api/searchservice/index-operations)) -- **Document operations** Add, update, or delete documents in the index, query the index, or look up specific documents by ID. ([API Reference](https://azuresdkdocs.blob.core.windows.net/$web/python/azure-search-documents/latest/azure.search.documents.html#azure.search.documents.SearchClient), [Service Docs](https://docs.microsoft.com/en-us/rest/api/searchservice/document-operations)) +- **Document operations** Add, update, or delete documents in the index, query the index, or look up specific documents by ID. ([API Reference](https://azuresdkdocs.blob.core.windows.net/$web/python/azure-search-documents/latest/azure.search.documents.html#azure.search.documents.SearchClient), [Service Docs](https://docs.microsoft.com/en-us/rest/api/searchservice/document-operations)) - **Datasource operations** Create, delete, update, or configure data sources for Search Indexers ([API Reference](https://azuresdkdocs.blob.core.windows.net/$web/python/azure-search-documents/latest/azure.search.documents.html#azure.search.documents.SearchDataSourcesClient), [Service Docs](https://docs.microsoft.com/en-us/rest/api/searchservice/indexer-operations)) - **Indexer operations** Automate aspects of an indexing operation by configuring a data source and an indexer that you can schedule or run on demand. This feature is supported for a limited number of data source types. ([API Reference](https://azuresdkdocs.blob.core.windows.net/$web/python/azure-search-documents/latest/azure.search.documents.html#azure.search.documents.SearchIndexersClient), [Service Docs](https://docs.microsoft.com/en-us/rest/api/searchservice/indexer-operations)) - **Skillset operations** Part of a cognitive search workload, a skillset defines a series of a series of enrichment processing steps. A skillset is consumed by an indexer. ([API Reference](https://azuresdkdocs.blob.core.windows.net/$web/python/azure-search-documents/latest/azure.search.documents.html#azure.search.documents.SearchSkillsetsClient), [Service Docs](https://docs.microsoft.com/en-us/rest/api/searchservice/skillset-operations)) @@ -126,7 +142,7 @@ from azure.core.credentials import AzureKeyCredential from azure.search.documents import SearchClient client = SearchClient("", "", AzureKeyCredential("")) -results = client.search(query="spa") +results = client.search(search_text="spa") print("Hotels containing 'spa' in the name (or other fields):") for result in results: @@ -154,12 +170,10 @@ Get search suggestions for related terms, e.g. find search suggestions for the term "coffee": ```python from azure.core.credentials import AzureKeyCredential -from azure.search.documents import SearchClient, SuggestQuery +from azure.search.documents import SearchClient client = SearchClient("", "", AzureKeyCredential("")) -query = SuggestQuery(search_text="coffee", suggester_name="sg") - -results = client.suggest(query=query) +results = client.suggest(search_text="coffee", suggester_name="sg") print("Search suggestions for 'coffee'") for result in results: @@ -172,25 +186,22 @@ for result in results: ```python from azure.core.credentials import AzureKeyCredential -from azure.search.documents import SearchServiceClient, CorsOptions, Index, ScoringProfile -client = SearchServiceClient("", AzureKeyCredential("")).get_indexes_client() +from azure.search.documents.indexes import SearchIndexClient, CorsOptions, SearchIndex, ScoringProfile +client = SearchIndexClient("", AzureKeyCredential("")) name = "hotels" fields = [ - { - "name": "hotelId", - "type": "Edm.String", - "key": True, - "searchable": False - }, - { - "name": "baseRate", - "type": "Edm.Double" - } -] + SimpleField(name="hotelId", type=SearchFieldDataType.String, key=True), + SimpleField(name="baseRate", type=SearchFieldDataType.Double), + SearchableField(name="description", type=SearchFieldDataType.String), + ComplexField(name="address", fields=[ + SimpleField(name="streetAddress", type=SearchFieldDataType.String), + SimpleField(name="city", type=SearchFieldDataType.String), + ]) + ] cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) scoring_profiles = [] -index = Index( +index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -257,7 +268,7 @@ client = SearchClient("", "", AzureKeyCredential(" Similarly, `logging_enable` can enable detailed logging for a single operation, even when it isn't enabled for the client: ```python -result = client.search(query="spa", logging_enable=True) +result = client.search(search_text="spa", logging_enable=True) ``` ## Next steps diff --git a/sdk/search/azure-search-documents/azure/search/documents/_internal/_index_documents_batch.py b/sdk/search/azure-search-documents/azure/search/documents/_internal/_index_documents_batch.py index fd02a58e4b2..dd267efc6af 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_internal/_index_documents_batch.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_internal/_index_documents_batch.py @@ -36,7 +36,7 @@ def __repr__(self): # type: () -> str return "".format(len(self.actions))[:1024] - def add_upload_documents(self, *documents): + def add_upload_actions(self, *documents): # type (Union[List[dict], List[List[dict]]]) -> None """Add documents to upload to the Azure search index. @@ -50,7 +50,7 @@ def add_upload_documents(self, *documents): """ self._extend_batch(flatten_args(documents), "upload") - def add_delete_documents(self, *documents): + def add_delete_actions(self, *documents): # type (Union[List[dict], List[List[dict]]]) -> None """Add documents to delete to the Azure search index. @@ -69,7 +69,7 @@ def add_delete_documents(self, *documents): """ self._extend_batch(flatten_args(documents), "delete") - def add_merge_documents(self, *documents): + def add_merge_actions(self, *documents): # type (Union[List[dict], List[List[dict]]]) -> None """Add documents to merge in to existing documets in the Azure search index. @@ -85,7 +85,7 @@ def add_merge_documents(self, *documents): """ self._extend_batch(flatten_args(documents), "merge") - def add_merge_or_upload_documents(self, *documents): + def add_merge_or_upload_actions(self, *documents): # type (Union[List[dict], List[List[dict]]]) -> None """Add documents to merge in to existing documets in the Azure search index, or upload if they do not yet exist. diff --git a/sdk/search/azure-search-documents/azure/search/documents/_internal/_search_client.py b/sdk/search/azure-search-documents/azure/search/documents/_internal/_search_client.py index 30970f9dd23..45694e8b6d0 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_internal/_search_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_internal/_search_client.py @@ -132,12 +132,12 @@ def get_document(self, key, selected_fields=None, **kwargs): return cast(dict, result) @distributed_trace - def search(self, query, **kwargs): - # type: (Union[str, SearchQuery], **Any) -> SearchItemPaged[dict] + def search(self, search_text, **kwargs): + # type: (str, **Any) -> SearchItemPaged[dict] """Search the Azure search index for documents. - :param query: An query for searching the index - :type documents: str or SearchQuery + :param str search_text: A full-text search query expression; Use "*" or omit this parameter to + match all documents. :rtype: SearchItemPaged[dict] .. admonition:: Example: @@ -167,14 +167,41 @@ def search(self, query, **kwargs): :dedent: 4 :caption: Get search result facets. """ - if isinstance(query, six.string_types): - query = SearchQuery(search_text=query) - elif not isinstance(query, SearchQuery): - raise TypeError( - "Expected a string or SearchQuery for 'query', but got {}".format( - repr(query) - ) - ) + include_total_result_count = kwargs.pop("include_total_result_count", None) + facets = kwargs.pop("facets", None) + filter_arg = kwargs.pop("filter", None) + highlight_fields = kwargs.pop("highlight_fields", None) + highlight_post_tag = kwargs.pop("highlight_post_tag", None) + highlight_pre_tag = kwargs.pop("highlight_pre_tag", None) + minimum_coverage = kwargs.pop("minimum_coverage", None) + order_by = kwargs.pop("order_by", None) + query_type = kwargs.pop("query_type", None) + scoring_parameters = kwargs.pop("scoring_parameters", None) + scoring_profile = kwargs.pop("scoring_profile", None) + search_fields = kwargs.pop("search_fields", None) + search_mode = kwargs.pop("search_mode", None) + select = kwargs.pop("select", None) + skip = kwargs.pop("skip", None) + top = kwargs.pop("top", None) + query = SearchQuery( + search_text=search_text, + include_total_result_count=include_total_result_count, + facets=facets, + filter=filter_arg, + highlight_fields=highlight_fields, + highlight_post_tag=highlight_post_tag, + highlight_pre_tag=highlight_pre_tag, + minimum_coverage=minimum_coverage, + order_by=order_by, + query_type=query_type, + scoring_parameters=scoring_parameters, + scoring_profile=scoring_profile, + search_fields=search_fields, + search_mode=search_mode, + select=select, + skip=skip, + top=top + ) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) return SearchItemPaged( @@ -201,7 +228,28 @@ def suggest(self, search_text, suggester_name, **kwargs): :dedent: 4 :caption: Get search suggestions. """ - query = SuggestQuery(search_text=search_text, suggester_name=suggester_name, **kwargs) + filter_arg = kwargs.pop("filter", None) + use_fuzzy_matching = kwargs.pop("use_fuzzy_matching", None) + highlight_post_tag = kwargs.pop("highlight_post_tag", None) + highlight_pre_tag = kwargs.pop("highlight_pre_tag", None) + minimum_coverage = kwargs.pop("minimum_coverage", None) + order_by = kwargs.pop("order_by", None) + search_fields = kwargs.pop("search_fields", None) + select = kwargs.pop("select", None) + top = kwargs.pop("top", None) + query = SuggestQuery( + search_text=search_text, + suggester_name=suggester_name, + filter=filter_arg, + use_fuzzy_matching=use_fuzzy_matching, + highlight_post_tag=highlight_post_tag, + highlight_pre_tag=highlight_pre_tag, + minimum_coverage=minimum_coverage, + order_by=order_by, + search_fields=search_fields, + select=select, + top=top + ) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) response = self._client.documents.suggest_post( @@ -229,7 +277,26 @@ def autocomplete(self, search_text, suggester_name, **kwargs): :dedent: 4 :caption: Get a auto-completions. """ - query = AutocompleteQuery(search_text=search_text, suggester_name=suggester_name, **kwargs) + autocomplete_mode = kwargs.pop("autocomplete_mode", None) + filter_arg = kwargs.pop("filter", None) + use_fuzzy_matching = kwargs.pop("use_fuzzy_matching", None) + highlight_post_tag = kwargs.pop("highlight_post_tag", None) + highlight_pre_tag = kwargs.pop("highlight_pre_tag", None) + minimum_coverage = kwargs.pop("minimum_coverage", None) + search_fields = kwargs.pop("search_fields", None) + top = kwargs.pop("top", None) + query = AutocompleteQuery( + search_text=search_text, + suggester_name=suggester_name, + autocomplete_mode=autocomplete_mode, + filter=filter_arg, + use_fuzzy_matching=use_fuzzy_matching, + highlight_post_tag=highlight_post_tag, + highlight_pre_tag=highlight_pre_tag, + minimum_coverage=minimum_coverage, + search_fields=search_fields, + top=top + ) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) response = self._client.documents.autocomplete_post( @@ -260,7 +327,7 @@ def upload_documents(self, documents, **kwargs): :caption: Upload new documents to an index """ batch = IndexDocumentsBatch() - batch.add_upload_documents(documents) + batch.add_upload_actions(documents) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) results = self.index_documents(batch, **kwargs) @@ -293,7 +360,7 @@ def delete_documents(self, documents, **kwargs): :caption: Delete existing documents to an index """ batch = IndexDocumentsBatch() - batch.add_delete_documents(documents) + batch.add_delete_actions(documents) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) results = self.index_documents(batch, **kwargs) @@ -322,7 +389,7 @@ def merge_documents(self, documents, **kwargs): :caption: Merge fields into existing documents to an index """ batch = IndexDocumentsBatch() - batch.add_merge_documents(documents) + batch.add_merge_actions(documents) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) results = self.index_documents(batch, **kwargs) @@ -342,7 +409,7 @@ def merge_or_upload_documents(self, documents, **kwargs): :rtype: List[IndexingResult] """ batch = IndexDocumentsBatch() - batch.add_merge_or_upload_documents(documents) + batch.add_merge_or_upload_actions(documents) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) results = self.index_documents(batch, **kwargs) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_internal/aio/_search_client_async.py b/sdk/search/azure-search-documents/azure/search/documents/_internal/aio/_search_client_async.py index 6eac9830aa0..7a2ed34e240 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_internal/aio/_search_client_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_internal/aio/_search_client_async.py @@ -5,12 +5,10 @@ # -------------------------------------------------------------------------- from typing import cast, List, TYPE_CHECKING -import six - from azure.core.tracing.decorator_async import distributed_trace_async from ._paging import AsyncSearchItemPaged, AsyncSearchPageIterator from .._generated.aio import SearchIndexClient -from .._generated.models import IndexBatch, IndexingResult, SearchRequest +from .._generated.models import IndexBatch, IndexingResult from .._index_documents_batch import IndexDocumentsBatch from .._queries import AutocompleteQuery, SearchQuery, SuggestQuery from ..._headers_mixin import HeadersMixin @@ -104,12 +102,12 @@ async def get_document(self, key, selected_fields=None, **kwargs): return cast(dict, result) @distributed_trace_async - async def search(self, query, **kwargs): - # type: (Union[str, SearchQuery], **Any) -> AsyncSearchItemPaged[dict] + async def search(self, search_text, **kwargs): + # type: (str, **Any) -> AsyncSearchItemPaged[dict] """Search the Azure search index for documents. - :param query: An query for searching the index - :type documents: str or SearchQuery + :param str search_text: A full-text search query expression; Use "*" or omit this parameter to + match all documents. :rtype: AsyncSearchItemPaged[dict] .. admonition:: Example: @@ -139,14 +137,42 @@ async def search(self, query, **kwargs): :dedent: 4 :caption: Get search result facets. """ - if isinstance(query, six.string_types): - query = SearchQuery(search_text=query) - elif not isinstance(query, SearchQuery): - raise TypeError( - "Expected a string or SearchQuery for 'query', but got {}".format( - repr(query) - ) - ) + include_total_result_count = kwargs.pop("include_total_result_count", None) + facets = kwargs.pop("facets", None) + filter_arg = kwargs.pop("filter", None) + highlight_fields = kwargs.pop("highlight_fields", None) + highlight_post_tag = kwargs.pop("highlight_post_tag", None) + highlight_pre_tag = kwargs.pop("highlight_pre_tag", None) + minimum_coverage = kwargs.pop("minimum_coverage", None) + order_by = kwargs.pop("order_by", None) + query_type = kwargs.pop("query_type", None) + scoring_parameters = kwargs.pop("scoring_parameters", None) + scoring_profile = kwargs.pop("scoring_profile", None) + search_fields = kwargs.pop("search_fields", None) + search_mode = kwargs.pop("search_mode", None) + select = kwargs.pop("select", None) + skip = kwargs.pop("skip", None) + top = kwargs.pop("top", None) + query = SearchQuery( + search_text=search_text, + include_total_result_count=include_total_result_count, + facets=facets, + filter=filter_arg, + highlight_fields=highlight_fields, + highlight_post_tag=highlight_post_tag, + highlight_pre_tag=highlight_pre_tag, + minimum_coverage=minimum_coverage, + order_by=order_by, + query_type=query_type, + scoring_parameters=scoring_parameters, + scoring_profile=scoring_profile, + search_fields=search_fields, + search_mode=search_mode, + select=select, + skip=skip, + top=top + ) + kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) return AsyncSearchItemPaged( self._client, query, kwargs, page_iterator_class=AsyncSearchPageIterator @@ -154,7 +180,7 @@ async def search(self, query, **kwargs): @distributed_trace_async async def suggest(self, search_text, suggester_name, **kwargs): - # type: (Union[str, SuggestQuery], **Any) -> List[dict] + # type: (str, str, **Any) -> List[dict] """Get search suggestion results from the Azure search index. :param str search_text: Required. The search text to use to suggest documents. Must be at least 1 @@ -172,7 +198,28 @@ async def suggest(self, search_text, suggester_name, **kwargs): :dedent: 4 :caption: Get search suggestions. """ - query = SuggestQuery(search_text=search_text, suggester_name=suggester_name, **kwargs) + filter_arg = kwargs.pop("filter", None) + use_fuzzy_matching = kwargs.pop("use_fuzzy_matching", None) + highlight_post_tag = kwargs.pop("highlight_post_tag", None) + highlight_pre_tag = kwargs.pop("highlight_pre_tag", None) + minimum_coverage = kwargs.pop("minimum_coverage", None) + order_by = kwargs.pop("order_by", None) + search_fields = kwargs.pop("search_fields", None) + select = kwargs.pop("select", None) + top = kwargs.pop("top", None) + query = SuggestQuery( + search_text=search_text, + suggester_name=suggester_name, + filter=filter_arg, + use_fuzzy_matching=use_fuzzy_matching, + highlight_post_tag=highlight_post_tag, + highlight_pre_tag=highlight_pre_tag, + minimum_coverage=minimum_coverage, + order_by=order_by, + search_fields=search_fields, + select=select, + top=top + ) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) response = await self._client.documents.suggest_post( @@ -200,7 +247,26 @@ async def autocomplete(self, search_text, suggester_name, **kwargs): :dedent: 4 :caption: Get a auto-completions. """ - query = AutocompleteQuery(search_text=search_text, suggester_name=suggester_name, **kwargs) + autocomplete_mode = kwargs.pop("autocomplete_mode", None) + filter_arg = kwargs.pop("filter", None) + use_fuzzy_matching = kwargs.pop("use_fuzzy_matching", None) + highlight_post_tag = kwargs.pop("highlight_post_tag", None) + highlight_pre_tag = kwargs.pop("highlight_pre_tag", None) + minimum_coverage = kwargs.pop("minimum_coverage", None) + search_fields = kwargs.pop("search_fields", None) + top = kwargs.pop("top", None) + query = AutocompleteQuery( + search_text=search_text, + suggester_name=suggester_name, + autocomplete_mode=autocomplete_mode, + filter=filter_arg, + use_fuzzy_matching=use_fuzzy_matching, + highlight_post_tag=highlight_post_tag, + highlight_pre_tag=highlight_pre_tag, + minimum_coverage=minimum_coverage, + search_fields=search_fields, + top=top + ) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) response = await self._client.documents.autocomplete_post( @@ -231,7 +297,7 @@ async def upload_documents(self, documents, **kwargs): :caption: Upload new documents to an index """ batch = IndexDocumentsBatch() - batch.add_upload_documents(documents) + batch.add_upload_actions(documents) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) results = await self.index_documents(batch, **kwargs) @@ -264,7 +330,7 @@ async def delete_documents(self, documents, **kwargs): :caption: Delete existing documents to an index """ batch = IndexDocumentsBatch() - batch.add_delete_documents(documents) + batch.add_delete_actions(documents) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) results = await self.index_documents(batch, **kwargs) @@ -293,7 +359,7 @@ async def merge_documents(self, documents, **kwargs): :caption: Merge fields into existing documents to an index """ batch = IndexDocumentsBatch() - batch.add_merge_documents(documents) + batch.add_merge_actions(documents) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) results = await self.index_documents(batch, **kwargs) @@ -313,7 +379,7 @@ async def merge_or_upload_documents(self, documents, **kwargs): :rtype: List[IndexingResult] """ batch = IndexDocumentsBatch() - batch.add_merge_or_upload_documents(documents) + batch.add_merge_or_upload_actions(documents) kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) results = await self.index_documents(batch, **kwargs) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_index.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_index.py index 7d555ab22d2..8d2ac52f9c5 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_index.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_index.py @@ -465,13 +465,13 @@ class SearchIndex(msrest.serialization.Model): 'scoring_profiles': {'key': 'scoringProfiles', 'type': '[ScoringProfile]'}, 'default_scoring_profile': {'key': 'defaultScoringProfile', 'type': 'str'}, 'cors_options': {'key': 'corsOptions', 'type': 'CorsOptions'}, - 'suggesters': {'key': 'suggesters', 'type': '[Suggester]'}, + 'suggesters': {'key': 'suggesters', 'type': '[SearchSuggester]'}, 'analyzers': {'key': 'analyzers', 'type': '[LexicalAnalyzer]'}, 'tokenizers': {'key': 'tokenizers', 'type': '[LexicalTokenizer]'}, 'token_filters': {'key': 'tokenFilters', 'type': '[TokenFilter]'}, 'char_filters': {'key': 'charFilters', 'type': '[CharFilter]'}, 'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'}, - 'similarity': {'key': 'similarity', 'type': 'Similarity'}, + 'similarity': {'key': 'similarity', 'type': 'SimilarityAlgorithm'}, 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, } diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_models.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_models.py index 8bc71209d88..6da11c7146d 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_models.py @@ -4,7 +4,142 @@ # license information. # -------------------------------------------------------------------------- import msrest.serialization -from ._generated.models import LexicalAnalyzer, LexicalTokenizer +from ._generated.models import ( + LexicalAnalyzer, + LexicalTokenizer, + AnalyzeRequest, + CustomAnalyzer as _CustomAnalyzer, +) + + +class AnalyzeTextOptions(msrest.serialization.Model): + """Specifies some text and analysis components used to break that text into tokens. + + All required parameters must be populated in order to send to Azure. + + :param text: Required. The text to break into tokens. + :type text: str + :param analyzer_name: The name of the analyzer to use to break the given text. If this parameter is + not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are + mutually exclusive. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", + "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh- + Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", + "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", + "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", + "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", + "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", + "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", + "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", + "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", + "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt- + PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", + "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", + "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", + "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", + "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", + "simple", "stop", "whitespace". + :type analyzer_name: str or ~azure.search.documents.indexes.models.LexicalAnalyzerName + :param tokenizer_name: The name of the tokenizer to use to break the given text. If this parameter + is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters + are mutually exclusive. Possible values include: "classic", "edgeNGram", "keyword_v2", + "letter", "lowercase", "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", + "nGram", "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace". + :type tokenizer_name: str or ~azure.search.documents.indexes.models.LexicalTokenizerName + :param token_filters: An optional list of token filters to use when breaking the given text. + This parameter can only be set when using the tokenizer parameter. + :type token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :param char_filters: An optional list of character filters to use when breaking the given text. + This parameter can only be set when using the tokenizer parameter. + :type char_filters: list[str] + """ + + _validation = { + 'text': {'required': True}, + } + + _attribute_map = { + 'text': {'key': 'text', 'type': 'str'}, + 'analyzer_name': {'key': 'analyzerName', 'type': 'str'}, + 'tokenizer_name': {'key': 'tokenizerName', 'type': 'str'}, + 'token_filters': {'key': 'tokenFilters', 'type': '[str]'}, + 'char_filters': {'key': 'charFilters', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(AnalyzeTextOptions, self).__init__(**kwargs) + self.text = kwargs['text'] + self.analyzer_name = kwargs.get('analyzer_name', None) + self.tokenizer_name = kwargs.get('tokenizer_name', None) + self.token_filters = kwargs.get('token_filters', None) + self.char_filters = kwargs.get('char_filters', None) + + def to_analyze_request(self): + return AnalyzeRequest( + text=self.text, + analyzer=self.analyzer_name, + tokenizer=self.tokenizer_name, + token_filters=self.token_filters, + char_filters=self.char_filters + ) + + +class CustomAnalyzer(LexicalAnalyzer): + """Allows you to take control over the process of converting text into indexable/searchable tokens. + It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. + The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens + emitted by the tokenizer. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param tokenizer_name: Required. The name of the tokenizer to use to divide continuous text into a + sequence of tokens, such as breaking a sentence into words. Possible values include: "classic", + "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer", + "microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern", + "standard_v2", "uax_url_email", "whitespace". + :type tokenizer_name: str or ~azure.search.documents.indexes.models.LexicalTokenizerName + :param token_filters: A list of token filters used to filter out or modify the tokens generated + by a tokenizer. For example, you can specify a lowercase filter that converts all characters to + lowercase. The filters are run in the order in which they are listed. + :type token_filters: list[str or ~azure.search.documents.indexes.models.TokenFilterName] + :param char_filters: A list of character filters used to prepare input text before it is + processed by the tokenizer. For instance, they can replace certain characters or symbols. The + filters are run in the order in which they are listed. + :type char_filters: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'tokenizer': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'tokenizer_name': {'key': 'tokenizerName', 'type': 'str'}, + 'token_filters': {'key': 'tokenFilters', 'type': '[str]'}, + 'char_filters': {'key': 'charFilters', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(CustomAnalyzer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.CustomAnalyzer' + self.tokenizer_name = kwargs['tokenizer_name'] + self.token_filters = kwargs.get('token_filters', None) + self.char_filters = kwargs.get('char_filters', None) class PatternAnalyzer(LexicalAnalyzer): diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_search_index_client.py index 176c5778096..bb6c2cc46c2 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_search_index_client.py @@ -24,6 +24,7 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports + from ._models import AnalyzeTextOptions from typing import Any, Dict, List, Sequence, Union, Optional from azure.core.credentials import AzureKeyCredential @@ -84,6 +85,20 @@ def list_indexes(self, **kwargs): return self._client.indexes.list(cls=lambda objs: [unpack_search_index(x) for x in objs], **kwargs) + @distributed_trace + def list_index_names(self, **kwargs): + # type: (**Any) -> ItemPaged[str] + """List the index names in an Azure Search service. + + :return: List of index names + :rtype: list[str] + :raises: ~azure.core.exceptions.HttpResponseError + + """ + kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) + + return self._client.indexes.list(cls=lambda objs: [x.name for x in objs], **kwargs) + @distributed_trace def get_index(self, name, **kwargs): # type: (str, **Any) -> SearchIndex @@ -235,13 +250,13 @@ def create_or_update_index( @distributed_trace def analyze_text(self, index_name, analyze_request, **kwargs): - # type: (str, AnalyzeRequest, **Any) -> AnalyzeResult + # type: (str, AnalyzeTextOptions, **Any) -> AnalyzeResult """Shows how an analyzer breaks text into tokens. :param index_name: The name of the index for which to test an analyzer. :type index_name: str :param analyze_request: The text and analyzer or analysis components to test. - :type analyze_request: ~azure.search.documents.AnalyzeRequest + :type analyze_request: ~azure.search.documents.indexes.models.AnalyzeTextOptions :return: AnalyzeResult :rtype: ~azure.search.documents.indexes.models.AnalyzeResult :raises: ~azure.core.exceptions.HttpResponseError @@ -257,7 +272,7 @@ def analyze_text(self, index_name, analyze_request, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = self._client.indexes.analyze( - index_name=index_name, request=analyze_request, **kwargs + index_name=index_name, request=analyze_request.to_analyze_request(), **kwargs ) return result diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_utils.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_utils.py index 8e7021b8c91..98acd82634c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_utils.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/_utils.py @@ -15,6 +15,7 @@ ) from ._generated.models import ( AzureActiveDirectoryApplicationCredentials, + CustomAnalyzer as _CustomAnalyzer, DataSourceCredentials, SearchIndexerDataSource as _SearchIndexerDataSource, SearchResourceEncryptionKey as _SearchResourceEncryptionKey, @@ -25,6 +26,7 @@ PatternTokenizer as _PatternTokenizer, ) from ._models import ( + CustomAnalyzer, PatternAnalyzer, PatternTokenizer, SynonymMap, @@ -73,7 +75,27 @@ def prep_if_none_match(etag, match_condition): return None -def delistize_flags_for_pattern_analyzer(pattern_analyzer): +def pack_custom_analyzer(custom_analyzer): + # type: (CustomAnalyzer) -> _CustomAnalyzer + return _CustomAnalyzer( + odata_type=custom_analyzer.odata_type, + tokenizer=custom_analyzer.tokenizer_name, + token_filters=custom_analyzer.token_filters, + char_filters=custom_analyzer.char_filters + ) + + +def unpack_custom_analyzer(custom_analyzer): + # type: (_CustomAnalyzer) -> CustomAnalyzer + return _CustomAnalyzer( + odata_type=custom_analyzer.odata_type, + tokenizer_name=custom_analyzer.tokenizer, + token_filters=custom_analyzer.token_filters, + char_filters=custom_analyzer.char_filters + ) + + +def pack_pattern_analyzer(pattern_analyzer): # type: (PatternAnalyzer) -> _PatternAnalyzer if not pattern_analyzer.flags: flags = None @@ -88,8 +110,8 @@ def delistize_flags_for_pattern_analyzer(pattern_analyzer): ) -def listize_flags_for_pattern_analyzer(pattern_analyzer): - # type: (PatternAnalyzer) -> PatternAnalyzer +def unpack_pattern_analyzer(pattern_analyzer): + # type: (_PatternAnalyzer) -> PatternAnalyzer if not pattern_analyzer.flags: flags = None else: @@ -103,7 +125,27 @@ def listize_flags_for_pattern_analyzer(pattern_analyzer): ) -def delistize_flags_for_pattern_tokenizer(pattern_tokenizer): +def pack_analyzer(analyzer): + if not analyzer: + return None + if isinstance(analyzer, PatternAnalyzer): + return pack_pattern_analyzer(analyzer) + if isinstance(analyzer, CustomAnalyzer): + return pack_custom_analyzer(analyzer) + return analyzer + + +def unpack_analyzer(analyzer): + if not analyzer: + return None + if isinstance(analyzer, _PatternAnalyzer): + return unpack_pattern_analyzer(analyzer) + if isinstance(analyzer, _CustomAnalyzer): + return unpack_custom_analyzer(analyzer) + return analyzer + + +def pack_pattern_tokenizer(pattern_tokenizer): # type: (PatternTokenizer) -> _PatternTokenizer if not pattern_tokenizer.flags: flags = None @@ -117,7 +159,7 @@ def delistize_flags_for_pattern_tokenizer(pattern_tokenizer): ) -def listize_flags_for_pattern_tokenizer(pattern_tokenizer): +def unpack_pattern_tokenizer(pattern_tokenizer): # type: (PatternTokenizer) -> PatternTokenizer if not pattern_tokenizer.flags: flags = None @@ -137,16 +179,14 @@ def pack_search_index(search_index): return None if search_index.analyzers: analyzers = [ - delistize_flags_for_pattern_analyzer(x) # type: ignore - if isinstance(x, PatternAnalyzer) - else x + pack_analyzer(x) # type: ignore for x in search_index.analyzers ] # mypy: ignore else: analyzers = None if search_index.tokenizers: tokenizers = [ - delistize_flags_for_pattern_tokenizer(x) # type: ignore + pack_pattern_tokenizer(x) # type: ignore if isinstance(x, PatternTokenizer) else x for x in search_index.tokenizers @@ -180,16 +220,14 @@ def unpack_search_index(search_index): return None if search_index.analyzers: analyzers = [ - listize_flags_for_pattern_analyzer(x) # type: ignore - if isinstance(x, _PatternAnalyzer) - else x + unpack_analyzer(x) # type: ignore for x in search_index.analyzers ] else: analyzers = None if search_index.tokenizers: tokenizers = [ - listize_flags_for_pattern_tokenizer(x) # type: ignore + unpack_pattern_tokenizer(x) # type: ignore if isinstance(x, _PatternTokenizer) else x for x in search_index.tokenizers diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/aio/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/aio/_search_index_client.py index 6ea84468765..49bcaace91e 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/aio/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/aio/_search_index_client.py @@ -24,7 +24,8 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from .._generated.models import AnalyzeRequest, AnalyzeResult, SearchIndex + from .._generated.models import AnalyzeResult, SearchIndex + from .._models import AnalyzeTextOptions from typing import Any, Dict, List, Union from azure.core.credentials import AzureKeyCredential @@ -88,6 +89,20 @@ def list_indexes(self, **kwargs): return self._client.indexes.list(cls=lambda objs: [unpack_search_index(x) for x in objs], **kwargs) + @distributed_trace + def list_index_names(self, **kwargs): + # type: (**Any) -> AsyncItemPaged[str] + """List the index names in an Azure Search service. + + :return: List of index names + :rtype: list[str] + :raises: ~azure.core.exceptions.HttpResponseError + + """ + kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) + + return self._client.indexes.list(cls=lambda objs: [x.name for x in objs], **kwargs) + @distributed_trace_async async def get_index(self, name, **kwargs): # type: (str, **Any) -> SearchIndex @@ -239,15 +254,15 @@ async def create_or_update_index( @distributed_trace_async async def analyze_text(self, index_name, analyze_request, **kwargs): - # type: (str, AnalyzeRequest, **Any) -> AnalyzeResult + # type: (str, AnalyzeTextOptions, **Any) -> AnalyzeResult """Shows how an analyzer breaks text into tokens. :param index_name: The name of the index for which to test an analyzer. :type index_name: str :param analyze_request: The text and analyzer or analysis components to test. - :type analyze_request: :class:`~azure.search.documents.indexes.models.AnalyzeRequest + :type analyze_request: ~azure.search.documents.indexes.models.AnalyzeTextOptions :return: AnalyzeResult - :rtype: :class:`~azure.search.documents.indexes.models.AnalyzeRequest + :rtype: ~azure.search.documents.indexes.models.AnalyzeRequest :raises: ~azure.core.exceptions.HttpResponseError .. admonition:: Example: @@ -261,7 +276,7 @@ async def analyze_text(self, index_name, analyze_request, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.indexes.analyze( - index_name=index_name, request=analyze_request, **kwargs + index_name=index_name, request=analyze_request.to_analyze_request(), **kwargs ) return result @@ -310,7 +325,7 @@ async def get_synonym_map(self, name, **kwargs): :param name: The name of the Synonym Map to get :type name: str :return: The retrieved Synonym Map - :rtype: :class:`~azure.search.documents.indexes.models.SynonymMap + :rtype: :class:`~azure.search.documents.indexes.models.SynonymMap` :raises: :class:`~azure.core.exceptions.ResourceNotFoundError` .. admonition:: Example: @@ -371,9 +386,9 @@ async def create_synonym_map(self, synonym_map, **kwargs): """Create a new Synonym Map in an Azure Search service :param synonym_map: The Synonym Map object - :type synonym_map: :class:`~azure.search.documents.indexes.models.SynonymMap + :type synonym_map: :class:`~azure.search.documents.indexes.models.SynonymMap` :return: The created Synonym Map - :rtype: :class:`~azure.search.documents.indexes.models.SynonymMap + :rtype: :class:`~azure.search.documents.indexes.models.SynonymMap` .. admonition:: Example: @@ -397,11 +412,11 @@ async def create_or_update_synonym_map(self, synonym_map, **kwargs): existing one. :param synonym_map: The Synonym Map object - :type synonym_map: :class:`~azure.search.documents.indexes.models.SynonymMap + :type synonym_map: :class:`~azure.search.documents.indexes.models.SynonymMap` :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions :return: The created or updated Synonym Map - :rtype: :class:`~azure.search.documents.indexes.models.SynonymMap + :rtype: :class:`~azure.search.documents.indexes.models.SynonymMap` """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/aio/_search_indexer_client.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/aio/_search_indexer_client.py index 88101e0c363..d4194179bce 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/aio/_search_indexer_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/_internal/aio/_search_indexer_client.py @@ -528,11 +528,11 @@ async def create_or_update_skillset(self, skillset, **kwargs): existing one. :param skillset: The SearchIndexerSkillset object to create or update - :type skillset: :class:`~azure.search.documents.indexes.models.SearchIndexerSkillset + :type skillset: :class:`~azure.search.documents.indexes.models.SearchIndexerSkillset` :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions :return: The created or updated SearchIndexerSkillset - :rtype: :class:`~azure.search.documents.indexes.models.SearchIndexerSkillset + :rtype: :class:`~azure.search.documents.indexes.models.SearchIndexerSkillset` """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) diff --git a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py index d75447532da..552d5691ef4 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/indexes/models/__init__.py @@ -31,8 +31,8 @@ SimpleField, SearchFieldDataType, ) +from ..._internal._generated.models import SuggestOptions from .._internal._generated.models import ( - AnalyzeRequest, AnalyzeResult, AnalyzedTokenInfo, AsciiFoldingTokenFilter, @@ -42,7 +42,6 @@ CommonGramTokenFilter, ConditionalSkill, CorsOptions, - CustomAnalyzer, DictionaryDecompounderTokenFilter, DistanceScoringFunction, DistanceScoringParameters, @@ -60,7 +59,7 @@ KeepTokenFilter, KeyPhraseExtractionSkill, KeywordMarkerTokenFilter, - KeywordTokenizer, + KeywordTokenizerV2 as KeywordTokenizer, LanguageDetectionSkill, LengthTokenFilter, LexicalAnalyzer, @@ -87,34 +86,50 @@ SearchIndex, SearchIndexer, SearchIndexerDataContainer, + SearchIndexerError, + SearchIndexerLimits, SearchIndexerSkillset, SearchIndexerStatus, ScoringFunction, ScoringProfile, SentimentSkill, + SentimentSkillLanguage, ShaperSkill, ShingleTokenFilter, Similarity as SimilarityAlgorithm, SnowballTokenFilter, + SnowballTokenFilterLanguage, + SoftDeleteColumnDeletionDetectionPolicy, SplitSkill, + SplitSkillLanguage, + SqlIntegratedChangeTrackingPolicy, StemmerOverrideTokenFilter, StemmerTokenFilter, + StemmerTokenFilterLanguage, StopAnalyzer, + StopwordsList, StopwordsTokenFilter, Suggester as SearchSuggester, SynonymTokenFilter, TagScoringFunction, TagScoringParameters, + TextSplitMode, TextTranslationSkill, + TextTranslationSkillLanguage, TextWeights, + TokenCharacterKind, TokenFilter, + TokenFilterName, TruncateTokenFilter, UaxUrlEmailTokenizer, UniqueTokenFilter, WebApiSkill, + VisualFeature, WordDelimiterTokenFilter, ) from .._internal._models import ( + AnalyzeTextOptions, + CustomAnalyzer, PatternAnalyzer, PatternTokenizer, SearchIndexerDataSourceConnection, @@ -124,7 +139,7 @@ __all__ = ( - "AnalyzeRequest", + "AnalyzeTextOptions", "AnalyzeResult", "AnalyzedTokenInfo", "AsciiFoldingTokenFilter", @@ -186,32 +201,47 @@ "SearchIndexer", "SearchIndexerDataContainer", "SearchIndexerDataSourceConnection", + "SearchIndexerError", + "SearchIndexerLimits", "SearchIndexerSkillset", "SearchIndexerStatus", "SearchResourceEncryptionKey", "SearchableField", "SentimentSkill", + "SentimentSkillLanguage", "ShaperSkill", "ShingleTokenFilter", "SimpleField", "SimilarityAlgorithm", "SnowballTokenFilter", + "SnowballTokenFilterLanguage", + "SoftDeleteColumnDeletionDetectionPolicy", "SplitSkill", + "SplitSkillLanguage", + "SqlIntegratedChangeTrackingPolicy", "StemmerOverrideTokenFilter", "StemmerTokenFilter", + "StemmerTokenFilterLanguage", "StopAnalyzer", + "StopwordsList", "StopwordsTokenFilter", "SearchSuggester", + "SuggestOptions", "SynonymMap", "SynonymTokenFilter", "TagScoringFunction", "TagScoringParameters", + "TextSplitMode", "TextTranslationSkill", + "TextTranslationSkillLanguage", "TextWeights", + "TokenCharacterKind", "TokenFilter", + "TokenFilterName", "TruncateTokenFilter", "UaxUrlEmailTokenizer", "UniqueTokenFilter", + "VisualFeature", "WebApiSkill", "WordDelimiterTokenFilter", "SearchFieldDataType", diff --git a/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py index 4b120c5c396..9a8f5307fbb 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/models/__init__.py @@ -27,7 +27,6 @@ from .._internal import ( IndexAction, IndexingResult, - SearchQuery, odata, ) @@ -35,6 +34,5 @@ __all__ = ( "IndexAction", "IndexingResult", - "SearchQuery", "odata", ) diff --git a/sdk/search/azure-search-documents/samples/async_samples/sample_analyze_text_async.py b/sdk/search/azure-search-documents/samples/async_samples/sample_analyze_text_async.py index 3fbf5e23324..cf14d3383f5 100644 --- a/sdk/search/azure-search-documents/samples/async_samples/sample_analyze_text_async.py +++ b/sdk/search/azure-search-documents/samples/async_samples/sample_analyze_text_async.py @@ -30,11 +30,11 @@ async def simple_analyze_text(): # [START simple_analyze_text_async] from azure.core.credentials import AzureKeyCredential from azure.search.documents.indexes.aio import SearchIndexClient - from azure.search.documents.indexes.models import AnalyzeRequest + from azure.search.documents.indexes.models import AnalyzeTextOptions client = SearchIndexClient(service_endpoint, AzureKeyCredential(key)) - analyze_request = AnalyzeRequest(text="One's ", analyzer="standard.lucene") + analyze_request = AnalyzeTextOptions(text="One's ", analyzer_name="standard.lucene") async with client: result = await client.analyze_text(index_name, analyze_request) diff --git a/sdk/search/azure-search-documents/samples/async_samples/sample_crud_operations_async.py b/sdk/search/azure-search-documents/samples/async_samples/sample_crud_operations_async.py index 577a6893645..895fa975d49 100644 --- a/sdk/search/azure-search-documents/samples/async_samples/sample_crud_operations_async.py +++ b/sdk/search/azure-search-documents/samples/async_samples/sample_crud_operations_async.py @@ -29,7 +29,6 @@ from azure.core.credentials import AzureKeyCredential from azure.search.documents.aio import SearchClient -from azure.search.documents.models import SearchQuery search_client = SearchClient(service_endpoint, index_name, AzureKeyCredential(key)) diff --git a/sdk/search/azure-search-documents/samples/async_samples/sample_facet_query_async.py b/sdk/search/azure-search-documents/samples/async_samples/sample_facet_query_async.py index 476d2ed795a..82522585818 100644 --- a/sdk/search/azure-search-documents/samples/async_samples/sample_facet_query_async.py +++ b/sdk/search/azure-search-documents/samples/async_samples/sample_facet_query_async.py @@ -32,14 +32,11 @@ async def filter_query(): # [START facet_query_async] from azure.core.credentials import AzureKeyCredential from azure.search.documents.aio import SearchClient - from azure.search.documents.models import SearchQuery search_client = SearchClient(service_endpoint, index_name, AzureKeyCredential(key)) - query = SearchQuery(search_text="WiFi", facets=["Category"], top=0) - async with search_client: - results = await search_client.search(query=query) + results = await search_client.search(search_text="WiFi", facets=["Category"], top=0) facets = await results.get_facets() diff --git a/sdk/search/azure-search-documents/samples/async_samples/sample_filter_query_async.py b/sdk/search/azure-search-documents/samples/async_samples/sample_filter_query_async.py index 0084482c920..8b8e9cd10dd 100644 --- a/sdk/search/azure-search-documents/samples/async_samples/sample_filter_query_async.py +++ b/sdk/search/azure-search-documents/samples/async_samples/sample_filter_query_async.py @@ -32,17 +32,17 @@ async def filter_query(): # [START filter_query_async] from azure.core.credentials import AzureKeyCredential from azure.search.documents.aio import SearchClient - from azure.search.documents.models import SearchQuery search_client = SearchClient(service_endpoint, index_name, AzureKeyCredential(key)) - query = SearchQuery(search_text="WiFi") - query.filter("Address/StateProvince eq 'FL' and Address/Country eq 'USA'") - query.select("HotelName", "Rating") - query.order_by("Rating desc") - + select = ("HotelName", "Rating") async with search_client: - results = await search_client.search(query=query) + results = await search_client.search( + search_text="WiFi", + filter="Address/StateProvince eq 'FL' and Address/Country eq 'USA'", + select=",".join(select), + order_by="Rating desc" + ) print("Florida hotels containing 'WiFi', sorted by Rating:") async for result in results: diff --git a/sdk/search/azure-search-documents/samples/async_samples/sample_simple_query_async.py b/sdk/search/azure-search-documents/samples/async_samples/sample_simple_query_async.py index 28810821a5f..d87f9a76aa7 100644 --- a/sdk/search/azure-search-documents/samples/async_samples/sample_simple_query_async.py +++ b/sdk/search/azure-search-documents/samples/async_samples/sample_simple_query_async.py @@ -36,7 +36,7 @@ async def simple_text_query(): search_client = SearchClient(service_endpoint, index_name, AzureKeyCredential(key)) async with search_client: - results = await search_client.search(query="spa") + results = await search_client.search(search_text="spa") print("Hotels containing 'spa' in the name (or other fields):") async for result in results: diff --git a/sdk/search/azure-search-documents/samples/sample_analyze_text.py b/sdk/search/azure-search-documents/samples/sample_analyze_text.py index edf3b43f435..280060a9a74 100644 --- a/sdk/search/azure-search-documents/samples/sample_analyze_text.py +++ b/sdk/search/azure-search-documents/samples/sample_analyze_text.py @@ -29,11 +29,11 @@ def simple_analyze_text(): # [START simple_analyze_text] from azure.core.credentials import AzureKeyCredential from azure.search.documents.indexes import SearchIndexClient - from azure.search.documents.indexes.models import AnalyzeRequest + from azure.search.documents.indexes.models import AnalyzeTextOptions client = SearchIndexClient(service_endpoint, AzureKeyCredential(key)) - analyze_request = AnalyzeRequest(text="One's ", analyzer="standard.lucene") + analyze_request = AnalyzeTextOptions(text="One's ", analyzer_name="standard.lucene") result = client.analyze_text(index_name, analyze_request) print(result.as_dict()) diff --git a/sdk/search/azure-search-documents/samples/sample_facet_query.py b/sdk/search/azure-search-documents/samples/sample_facet_query.py index 4e20ae5a9df..b2252f744b4 100644 --- a/sdk/search/azure-search-documents/samples/sample_facet_query.py +++ b/sdk/search/azure-search-documents/samples/sample_facet_query.py @@ -30,13 +30,10 @@ def filter_query(): # [START facet_query] from azure.core.credentials import AzureKeyCredential from azure.search.documents import SearchClient - from azure.search.documents.models import SearchQuery search_client = SearchClient(service_endpoint, index_name, AzureKeyCredential(key)) - query = SearchQuery(search_text="WiFi", facets=["Category"], top=0) - - results = search_client.search(query=query) + results = search_client.search(search_text="WiFi", facets=["Category"], top=0) facets = results.get_facets() diff --git a/sdk/search/azure-search-documents/samples/sample_filter_query.py b/sdk/search/azure-search-documents/samples/sample_filter_query.py index 58770bd1fe5..1ad1f76fb20 100644 --- a/sdk/search/azure-search-documents/samples/sample_filter_query.py +++ b/sdk/search/azure-search-documents/samples/sample_filter_query.py @@ -30,16 +30,16 @@ def filter_query(): # [START filter_query] from azure.core.credentials import AzureKeyCredential from azure.search.documents import SearchClient - from azure.search.documents.models import SearchQuery search_client = SearchClient(service_endpoint, index_name, AzureKeyCredential(key)) - query = SearchQuery(search_text="WiFi") - query.filter("Address/StateProvince eq 'FL' and Address/Country eq 'USA'") - query.select("HotelName", "Rating") - query.order_by("Rating desc") - - results = search_client.search(query=query) + select = ("HotelName", "Rating") + results = search_client.search( + search_text="WiFi", + filter="Address/StateProvince eq 'FL' and Address/Country eq 'USA'", + select=",".join(select), + order_by="Rating desc" + ) print("Florida hotels containing 'WiFi', sorted by Rating:") for result in results: diff --git a/sdk/search/azure-search-documents/samples/sample_simple_query.py b/sdk/search/azure-search-documents/samples/sample_simple_query.py index 3948ae446a7..19d9a83b7e9 100644 --- a/sdk/search/azure-search-documents/samples/sample_simple_query.py +++ b/sdk/search/azure-search-documents/samples/sample_simple_query.py @@ -33,7 +33,7 @@ def simple_text_query(): search_client = SearchClient(service_endpoint, index_name, AzureKeyCredential(key)) - results = search_client.search(query="spa") + results = search_client.search(search_text="spa") print("Hotels containing 'spa' in the name (or other fields):") for result in results: diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_index_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_index_live_async.py index 0bd090623c4..b47e3c78ffc 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_index_live_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_index_live_async.py @@ -24,7 +24,6 @@ from azure.core.exceptions import HttpResponseError from azure.core.credentials import AzureKeyCredential -from azure.search.documents.models import SearchQuery from azure.search.documents.aio import SearchClient TIME_TO_SLEEP = 3 @@ -87,12 +86,12 @@ async def test_get_search_simple(self, api_key, endpoint, index_name, **kwargs): ) async with client: results = [] - async for x in await client.search(query="hotel"): + async for x in await client.search(search_text="hotel"): results.append(x) assert len(results) == 7 results = [] - async for x in await client.search(query="motel"): + async for x in await client.search(search_text="motel"): results.append(x) assert len(results) == 2 @@ -103,14 +102,15 @@ async def test_get_search_filter(self, api_key, endpoint, index_name, **kwargs): endpoint, index_name, AzureKeyCredential(api_key) ) - query = SearchQuery(search_text="WiFi") - query.filter("category eq 'Budget'") - query.select("hotelName", "category", "description") - query.order_by("hotelName desc") - async with client: results = [] - async for x in await client.search(query=query): + select = ("hotelName", "category", "description") + async for x in await client.search( + search_text="WiFi", + filter="category eq 'Budget'", + select=",".join(select), + order_by="hotelName desc" + ): results.append(x) assert [x["hotelName"] for x in results] == sorted( [x["hotelName"] for x in results], reverse=True @@ -132,12 +132,10 @@ async def test_get_search_counts(self, api_key, endpoint, index_name, **kwargs): endpoint, index_name, AzureKeyCredential(api_key) ) - query = SearchQuery(search_text="hotel") - results = await client.search(query=query) + results = await client.search(search_text="hotel") assert await results.get_count() is None - query = SearchQuery(search_text="hotel", include_total_result_count=True) - results = await client.search(query=query) + results = await client.search(search_text="hotel", include_total_result_count=True) assert await results.get_count() == 7 @ResourceGroupPreparer(random_name_enabled=True) @@ -147,12 +145,10 @@ async def test_get_search_coverage(self, api_key, endpoint, index_name, **kwargs endpoint, index_name, AzureKeyCredential(api_key) ) - query = SearchQuery(search_text="hotel") - results = await client.search(query=query) + results = await client.search(search_text="hotel") assert await results.get_coverage() is None - query = SearchQuery(search_text="hotel", minimum_coverage=50.0) - results = await client.search(query=query) + results = await client.search(search_text="hotel", minimum_coverage=50.0) cov = await results.get_coverage() assert isinstance(cov, float) assert cov >= 50.0 @@ -166,11 +162,12 @@ async def test_get_search_facets_none( endpoint, index_name, AzureKeyCredential(api_key) ) - query = SearchQuery(search_text="WiFi") - query.select("hotelName", "category", "description") - async with client: - results = await client.search(query=query) + select = ("hotelName", "category", "description") + results = await client.search( + search_text="WiFi", + select=",".join(select) + ) assert await results.get_facets() is None @ResourceGroupPreparer(random_name_enabled=True) @@ -182,11 +179,13 @@ async def test_get_search_facets_result( endpoint, index_name, AzureKeyCredential(api_key) ) - query = SearchQuery(search_text="WiFi", facets=["category"]) - query.select("hotelName", "category", "description") - async with client: - results = await client.search(query=query) + select = ("hotelName", "category", "description") + results = await client.search( + search_text="WiFi", + facets=["category"], + select=",".join(select) + ) assert await results.get_facets() == { "category": [ {"value": "Budget", "count": 4}, diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_service_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_service_live_async.py index b3d3055fcc1..6ffa76fa12f 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_service_live_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_service_live_async.py @@ -20,7 +20,7 @@ from azure.core.exceptions import HttpResponseError from azure.search.documents.indexes.models import( - AnalyzeRequest, + AnalyzeTextOptions, AnalyzeResult, CorsOptions, EntityRecognitionSkill, @@ -260,7 +260,7 @@ async def test_create_or_update_indexes_if_unchanged(self, api_key, endpoint, in @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) async def test_analyze_text(self, api_key, endpoint, index_name, **kwargs): client = SearchIndexClient(endpoint, AzureKeyCredential(api_key)) - analyze_request = AnalyzeRequest(text="One's ", analyzer="standard.lucene") + analyze_request = AnalyzeTextOptions(text="One's ", analyzer_name="standard.lucene") result = await client.analyze_text(index_name, analyze_request) assert len(result.tokens) == 2 diff --git a/sdk/search/azure-search-documents/tests/test_index_documents_batch.py b/sdk/search/azure-search-documents/tests/test_index_documents_batch.py index 9fd058fe842..7aa45a76529 100644 --- a/sdk/search/azure-search-documents/tests/test_index_documents_batch.py +++ b/sdk/search/azure-search-documents/tests/test_index_documents_batch.py @@ -10,10 +10,10 @@ from azure.search.documents import IndexDocumentsBatch METHOD_NAMES = [ - "add_upload_documents", - "add_delete_documents", - "add_merge_documents", - "add_merge_or_upload_documents", + "add_upload_actions", + "add_delete_actions", + "add_merge_actions", + "add_merge_or_upload_actions", ] METHOD_MAP = dict(zip(METHOD_NAMES, ["upload", "delete", "merge", "mergeOrUpload"])) diff --git a/sdk/search/azure-search-documents/tests/test_index_live.py b/sdk/search/azure-search-documents/tests/test_index_live.py index 52d04f9e574..4401009e473 100644 --- a/sdk/search/azure-search-documents/tests/test_index_live.py +++ b/sdk/search/azure-search-documents/tests/test_index_live.py @@ -21,7 +21,6 @@ from azure.core.exceptions import HttpResponseError from azure.core.credentials import AzureKeyCredential from azure.search.documents import SearchClient -from azure.search.documents.models import SearchQuery TIME_TO_SLEEP = 3 @@ -62,10 +61,10 @@ def test_get_search_simple(self, api_key, endpoint, index_name, **kwargs): client = SearchClient( endpoint, index_name, AzureKeyCredential(api_key) ) - results = list(client.search(query="hotel")) + results = list(client.search(search_text="hotel")) assert len(results) == 7 - results = list(client.search(query="motel")) + results = list(client.search(search_text="motel")) assert len(results) == 2 @ResourceGroupPreparer(random_name_enabled=True) @@ -75,12 +74,13 @@ def test_get_search_filter(self, api_key, endpoint, index_name, **kwargs): endpoint, index_name, AzureKeyCredential(api_key) ) - query = SearchQuery(search_text="WiFi") - query.filter("category eq 'Budget'") - query.select("hotelName", "category", "description") - query.order_by("hotelName desc") - - results = list(client.search(query=query)) + select = ("hotelName", "category", "description") + results = list(client.search( + search_text="WiFi", + filter="category eq 'Budget'", + select=",".join(select), + order_by="hotelName desc" + )) assert [x["hotelName"] for x in results] == sorted( [x["hotelName"] for x in results], reverse=True ) @@ -101,12 +101,10 @@ def test_get_search_counts(self, api_key, endpoint, index_name, **kwargs): endpoint, index_name, AzureKeyCredential(api_key) ) - query = SearchQuery(search_text="hotel") - results = client.search(query=query) + results = client.search(search_text="hotel") assert results.get_count() is None - query = SearchQuery(search_text="hotel", include_total_result_count=True) - results = client.search(query=query) + results = client.search(search_text="hotel", include_total_result_count=True) assert results.get_count() == 7 @ResourceGroupPreparer(random_name_enabled=True) @@ -116,12 +114,10 @@ def test_get_search_coverage(self, api_key, endpoint, index_name, **kwargs): endpoint, index_name, AzureKeyCredential(api_key) ) - query = SearchQuery(search_text="hotel") - results = client.search(query=query) + results = client.search(search_text="hotel") assert results.get_coverage() is None - query = SearchQuery(search_text="hotel", minimum_coverage=50.0) - results = client.search(query=query) + results = client.search(search_text="hotel", minimum_coverage=50.0) cov = results.get_coverage() assert isinstance(cov, float) assert cov >= 50.0 @@ -133,10 +129,8 @@ def test_get_search_facets_none(self, api_key, endpoint, index_name, **kwargs): endpoint, index_name, AzureKeyCredential(api_key) ) - query = SearchQuery(search_text="WiFi") - query.select("hotelName", "category", "description") - - results = client.search(query=query) + select = ("hotelName", "category", "description") + results = client.search(search_text="WiFi", select=",".join(select)) assert results.get_facets() is None @ResourceGroupPreparer(random_name_enabled=True) @@ -146,10 +140,11 @@ def test_get_search_facets_result(self, api_key, endpoint, index_name, **kwargs) endpoint, index_name, AzureKeyCredential(api_key) ) - query = SearchQuery(search_text="WiFi", facets=["category"]) - query.select("hotelName", "category", "description") - - results = client.search(query=query) + select = ("hotelName", "category", "description") + results = client.search(search_text="WiFi", + facets=["category"], + select=",".join(select) + ) assert results.get_facets() == { "category": [ {"value": "Budget", "count": 4}, diff --git a/sdk/search/azure-search-documents/tests/test_queries.py b/sdk/search/azure-search-documents/tests/test_queries.py index 0bbca28a1f1..53abf62af37 100644 --- a/sdk/search/azure-search-documents/tests/test_queries.py +++ b/sdk/search/azure-search-documents/tests/test_queries.py @@ -16,8 +16,7 @@ SuggestRequest, ) -from azure.search.documents.models import SearchQuery -from azure.search.documents._internal._queries import AutocompleteQuery, SuggestQuery +from azure.search.documents._internal._queries import AutocompleteQuery, SuggestQuery, SearchQuery class TestAutocompleteQuery(object): @@ -63,7 +62,7 @@ def test_init(self): assert query.request.order_by is None assert query.request.select is None - @mock.patch("azure.search.documents.models.SearchQuery._request_type") + @mock.patch("azure.search.documents._internal._queries.SearchQuery._request_type") def test_kwargs_forwarded(self, mock_request): mock_request.return_value = None SearchQuery(foo=10, bar=20) diff --git a/sdk/search/azure-search-documents/tests/test_search_index_client.py b/sdk/search/azure-search-documents/tests/test_search_index_client.py index 667f4a7b4e4..db4c5031b6f 100644 --- a/sdk/search/azure-search-documents/tests/test_search_index_client.py +++ b/sdk/search/azure-search-documents/tests/test_search_index_client.py @@ -25,10 +25,7 @@ IndexDocumentsBatch, SearchClient, ) -from azure.search.documents.models import ( - SearchQuery, - odata, -) +from azure.search.documents.models import odata CREDENTIAL = AzureKeyCredential(key="test_api_key") @@ -140,15 +137,12 @@ def test_get_document(self, mock_get): assert mock_get.call_args[1]["key"] == "some_key" assert mock_get.call_args[1]["selected_fields"] == "foo" - @pytest.mark.parametrize( - "query", ["search text", SearchQuery(search_text="search text")], ids=repr - ) @mock.patch( "azure.search.documents._internal._generated.operations._documents_operations.DocumentsOperations.search_post" ) - def test_search_query_argument(self, mock_search_post, query): + def test_search_query_argument(self, mock_search_post): client = SearchClient("endpoint", "index name", CREDENTIAL) - result = client.search(query) + result = client.search(search_text="search text") assert isinstance(result, ItemPaged) assert result._page_iterator_class is SearchPageIterator search_result = SearchDocumentsResult() @@ -162,14 +156,6 @@ def test_search_query_argument(self, mock_search_post, query): mock_search_post.call_args[1]["search_request"].search_text == "search text" ) - def test_search_bad_argument(self): - client = SearchClient("endpoint", "index name", CREDENTIAL) - with pytest.raises(TypeError) as e: - client.search(10) - assert str(e) == "Expected a SuggestQuery for 'query', but got {}".format( - repr(10) - ) - @mock.patch( "azure.search.documents._internal._generated.operations._documents_operations.DocumentsOperations.suggest_post" ) @@ -252,10 +238,10 @@ def test_index_documents(self, mock_index): client = SearchClient("endpoint", "index name", CREDENTIAL) batch = IndexDocumentsBatch() - batch.add_upload_documents("upload1") - batch.add_delete_documents("delete1", "delete2") - batch.add_merge_documents(["merge1", "merge2", "merge3"]) - batch.add_merge_or_upload_documents("merge_or_upload1") + batch.add_upload_actions("upload1") + batch.add_delete_actions("delete1", "delete2") + batch.add_merge_actions(["merge1", "merge2", "merge3"]) + batch.add_merge_or_upload_actions("merge_or_upload1") client.index_documents(batch, extra="foo") assert mock_index.called diff --git a/sdk/search/azure-search-documents/tests/test_service_live.py b/sdk/search/azure-search-documents/tests/test_service_live.py index dd9599382d2..6b7e7853bde 100644 --- a/sdk/search/azure-search-documents/tests/test_service_live.py +++ b/sdk/search/azure-search-documents/tests/test_service_live.py @@ -17,7 +17,7 @@ from azure.core.credentials import AzureKeyCredential from azure.core.exceptions import HttpResponseError from azure.search.documents.indexes.models import( - AnalyzeRequest, + AnalyzeTextOptions, AnalyzeResult, CorsOptions, EntityRecognitionSkill, @@ -242,7 +242,7 @@ def test_create_or_update_indexes_if_unchanged(self, api_key, endpoint, index_na @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) def test_analyze_text(self, api_key, endpoint, index_name, **kwargs): client = SearchIndexClient(endpoint, AzureKeyCredential(api_key)) - analyze_request = AnalyzeRequest(text="One's ", analyzer="standard.lucene") + analyze_request = AnalyzeTextOptions(text="One's ", analyzer_name="standard.lucene") result = client.analyze_text(index_name, analyze_request) assert len(result.tokens) == 2