Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[textanalytics] remove dummy classes from samples #19441

Merged
merged 2 commits into from
Jun 24, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ def cancel(self, **kwargs): # type: ignore
:start-after: [START analyze_healthcare_entities_with_cancellation]
:end-before: [END analyze_healthcare_entities_with_cancellation]
:language: python
:dedent: 8
:dedent: 4
:caption: Cancel an existing health operation.
"""
polling_interval = kwargs.pop("polling_interval", 5)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,14 +95,14 @@ class TextAnalyticsClient(TextAnalyticsClientBase):
:start-after: [START create_ta_client_with_key]
:end-before: [END create_ta_client_with_key]
:language: python
:dedent: 8
:dedent: 4
:caption: Creating the TextAnalyticsClient with endpoint and API key.

.. literalinclude:: ../samples/sample_authentication.py
:start-after: [START create_ta_client_with_aad]
:end-before: [END create_ta_client_with_aad]
:language: python
:dedent: 8
:dedent: 4
:caption: Creating the TextAnalyticsClient with endpoint and token credential from Azure Active Directory.
"""

Expand Down Expand Up @@ -174,7 +174,7 @@ def detect_language( # type: ignore
:start-after: [START detect_language]
:end-before: [END detect_language]
:language: python
:dedent: 8
:dedent: 4
:caption: Detecting language in a batch of documents.
"""
country_hint_arg = kwargs.pop("country_hint", None)
Expand Down Expand Up @@ -256,7 +256,7 @@ def recognize_entities( # type: ignore
:start-after: [START recognize_entities]
:end-before: [END recognize_entities]
:language: python
:dedent: 8
:dedent: 4
:caption: Recognize entities in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
Expand Down Expand Up @@ -355,7 +355,7 @@ def recognize_pii_entities( # type: ignore
:start-after: [START recognize_pii_entities]
:end-before: [END recognize_pii_entities]
:language: python
:dedent: 8
:dedent: 4
:caption: Recognize personally identifiable information entities in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
Expand Down Expand Up @@ -457,7 +457,7 @@ def recognize_linked_entities( # type: ignore
:start-after: [START recognize_linked_entities]
:end-before: [END recognize_linked_entities]
:language: python
:dedent: 8
:dedent: 4
:caption: Recognize linked entities in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
Expand Down Expand Up @@ -559,7 +559,7 @@ def begin_analyze_healthcare_entities( # type: ignore
:start-after: [START analyze_healthcare_entities]
:end-before: [END analyze_healthcare_entities]
:language: python
:dedent: 8
:dedent: 4
:caption: Recognize healthcare entities in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
Expand Down Expand Up @@ -667,7 +667,7 @@ def extract_key_phrases( # type: ignore
:start-after: [START extract_key_phrases]
:end-before: [END extract_key_phrases]
:language: python
:dedent: 8
:dedent: 4
:caption: Extract the key phrases in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
Expand Down Expand Up @@ -759,7 +759,7 @@ def analyze_sentiment( # type: ignore
:start-after: [START analyze_sentiment]
:end-before: [END analyze_sentiment]
:language: python
:dedent: 8
:dedent: 4
:caption: Analyze sentiment in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
Expand Down Expand Up @@ -874,7 +874,7 @@ def begin_analyze_actions( # type: ignore
:start-after: [START analyze]
:end-before: [END analyze]
:language: python
:dedent: 8
:dedent: 4
:caption: Start a long-running operation to perform a variety of text analysis
actions over a batch of documents.
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ async def cancel( # type: ignore
:start-after: [START analyze_healthcare_entities_with_cancellation_async]
:end-before: [END analyze_healthcare_entities_with_cancellation_async]
:language: python
:dedent: 8
:dedent: 4
:caption: Cancel an existing health operation.
"""
polling_interval = kwargs.pop("polling_interval", 5)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,14 +91,14 @@ class TextAnalyticsClient(AsyncTextAnalyticsClientBase):
:start-after: [START create_ta_client_with_key_async]
:end-before: [END create_ta_client_with_key_async]
:language: python
:dedent: 8
:dedent: 4
:caption: Creating the TextAnalyticsClient with endpoint and API key.

.. literalinclude:: ../samples/async_samples/sample_authentication_async.py
:start-after: [START create_ta_client_with_aad_async]
:end-before: [END create_ta_client_with_aad_async]
:language: python
:dedent: 8
:dedent: 4
:caption: Creating the TextAnalyticsClient with endpoint and token credential from Azure Active Directory.
"""

Expand Down Expand Up @@ -172,7 +172,7 @@ async def detect_language( # type: ignore
:start-after: [START detect_language_async]
:end-before: [END detect_language_async]
:language: python
:dedent: 8
:dedent: 4
:caption: Detecting language in a batch of documents.
"""
country_hint_arg = kwargs.pop("country_hint", None)
Expand Down Expand Up @@ -252,7 +252,7 @@ async def recognize_entities( # type: ignore
:start-after: [START recognize_entities_async]
:end-before: [END recognize_entities_async]
:language: python
:dedent: 8
:dedent: 4
:caption: Recognize entities in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
Expand Down Expand Up @@ -350,7 +350,7 @@ async def recognize_pii_entities( # type: ignore
:start-after: [START recognize_pii_entities]
:end-before: [END recognize_pii_entities]
:language: python
:dedent: 8
:dedent: 4
:caption: Recognize personally identifiable information entities in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
Expand Down Expand Up @@ -450,7 +450,7 @@ async def recognize_linked_entities( # type: ignore
:start-after: [START recognize_linked_entities_async]
:end-before: [END recognize_linked_entities_async]
:language: python
:dedent: 8
:dedent: 4
:caption: Recognize linked entities in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
Expand Down Expand Up @@ -537,7 +537,7 @@ async def extract_key_phrases( # type: ignore
:start-after: [START extract_key_phrases_async]
:end-before: [END extract_key_phrases_async]
:language: python
:dedent: 8
:dedent: 4
:caption: Extract the key phrases in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
Expand Down Expand Up @@ -625,7 +625,7 @@ async def analyze_sentiment( # type: ignore
:start-after: [START analyze_sentiment_async]
:end-before: [END analyze_sentiment_async]
:language: python
:dedent: 8
:dedent: 4
:caption: Analyze sentiment in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
Expand Down Expand Up @@ -734,7 +734,7 @@ async def begin_analyze_healthcare_entities( # type: ignore
:start-after: [START analyze_healthcare_entities_async]
:end-before: [END analyze_healthcare_entities_async]
:language: python
:dedent: 8
:dedent: 4
:caption: Analyze healthcare entities in a batch of documents.
"""
language_arg = kwargs.pop("language", None)
Expand Down Expand Up @@ -858,7 +858,7 @@ async def begin_analyze_actions( # type: ignore
:start-after: [START analyze_async]
:end-before: [END analyze_async]
:language: python
:dedent: 8
:dedent: 4
:caption: Start a long-running operation to perform a variety of text analysis actions over
a batch of documents.
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,41 +25,38 @@
import asyncio


class AlternativeDocumentInputSampleAsync(object):

async def alternative_document_input(self):
from azure.core.credentials import AzureKeyCredential
from azure.ai.textanalytics.aio import TextAnalyticsClient

endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]

text_analytics_client = TextAnalyticsClient(endpoint=endpoint, credential=AzureKeyCredential(key))

documents = [
{"id": "0", "country_hint": "US", "text": "I had the best day of my life. I decided to go sky-diving and it made me appreciate my whole life so much more. I developed a deep-connection with my instructor as well."},
{"id": "1", "country_hint": "GB",
"text": "This was a waste of my time. The speaker put me to sleep."},
{"id": "2", "country_hint": "MX", "text": "No tengo dinero ni nada que dar..."},
{"id": "3", "country_hint": "FR",
"text": "L'hôtel n'était pas très confortable. L'éclairage était trop sombre."}
]
async with text_analytics_client:
result = await text_analytics_client.detect_language(documents)

for idx, doc in enumerate(result):
if not doc.is_error:
print("Document text: {}".format(documents[idx]))
print("Language detected: {}".format(doc.primary_language.name))
print("ISO6391 name: {}".format(doc.primary_language.iso6391_name))
print("Confidence score: {}\n".format(doc.primary_language.confidence_score))
if doc.is_error:
print(doc.id, doc.error)
async def sample_alternative_document_input():
from azure.core.credentials import AzureKeyCredential
from azure.ai.textanalytics.aio import TextAnalyticsClient

endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"]
key = os.environ["AZURE_TEXT_ANALYTICS_KEY"]

text_analytics_client = TextAnalyticsClient(endpoint=endpoint, credential=AzureKeyCredential(key))

documents = [
{"id": "0", "country_hint": "US", "text": "I had the best day of my life. I decided to go sky-diving and it made me appreciate my whole life so much more. I developed a deep-connection with my instructor as well."},
{"id": "1", "country_hint": "GB",
"text": "This was a waste of my time. The speaker put me to sleep."},
{"id": "2", "country_hint": "MX", "text": "No tengo dinero ni nada que dar..."},
{"id": "3", "country_hint": "FR",
"text": "L'hôtel n'était pas très confortable. L'éclairage était trop sombre."}
]
async with text_analytics_client:
result = await text_analytics_client.detect_language(documents)

for idx, doc in enumerate(result):
if not doc.is_error:
print("Document text: {}".format(documents[idx]))
print("Language detected: {}".format(doc.primary_language.name))
print("ISO6391 name: {}".format(doc.primary_language.iso6391_name))
print("Confidence score: {}\n".format(doc.primary_language.confidence_score))
if doc.is_error:
print(doc.id, doc.error)


async def main():
sample = AlternativeDocumentInputSampleAsync()
await sample.alternative_document_input()
await sample_alternative_document_input()


if __name__ == '__main__':
Expand Down
Loading