diff --git a/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/async_client.py b/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/async_client.py index f556a4e7078a..ae7c87de97cd 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/async_client.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1/services/auto_ml/async_client.py @@ -2561,7 +2561,7 @@ async def sample_list_model_evaluations(): # Done; return the response. return response - async def __aenter__(self): + async def __aenter__(self) -> "AutoMlAsyncClient": return self async def __aexit__(self, exc_type, exc, tb): diff --git a/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/async_client.py b/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/async_client.py index 3eb6923ed8ec..58d1e846b40c 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/async_client.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1/services/prediction_service/async_client.py @@ -673,7 +673,7 @@ async def sample_batch_predict(): # Done; return the response. return response - async def __aenter__(self): + async def __aenter__(self) -> "PredictionServiceAsyncClient": return self async def __aexit__(self, exc_type, exc, tb): diff --git a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/async_client.py b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/async_client.py index 81d796f0da50..070ee491035d 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/async_client.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/auto_ml/async_client.py @@ -3241,7 +3241,7 @@ async def sample_list_model_evaluations(): # Done; return the response. return response - async def __aenter__(self): + async def __aenter__(self) -> "AutoMlAsyncClient": return self async def __aexit__(self, exc_type, exc, tb): diff --git a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/async_client.py b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/async_client.py index 4ac9fe9fe1d7..a1e990817a9e 100644 --- a/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/async_client.py +++ b/packages/google-cloud-automl/google/cloud/automl_v1beta1/services/prediction_service/async_client.py @@ -638,7 +638,7 @@ async def sample_batch_predict(): # Done; return the response. return response - async def __aenter__(self): + async def __aenter__(self) -> "PredictionServiceAsyncClient": return self async def __aexit__(self, exc_type, exc, tb): diff --git a/packages/google-cloud-automl/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json b/packages/google-cloud-automl/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json index 65712b1173fb..df593972f2f6 100644 --- a/packages/google-cloud-automl/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json +++ b/packages/google-cloud-automl/samples/generated_samples/snippet_metadata_google.cloud.automl.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-automl", - "version": "2.11.1" + "version": "0.1.0" }, "snippets": [ { diff --git a/packages/google-cloud-automl/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json b/packages/google-cloud-automl/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json index 8fb28818057e..70c0487411a8 100644 --- a/packages/google-cloud-automl/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json +++ b/packages/google-cloud-automl/samples/generated_samples/snippet_metadata_google.cloud.automl.v1beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-automl", - "version": "2.11.1" + "version": "0.1.0" }, "snippets": [ { diff --git a/packages/google-cloud-automl/tests/unit/gapic/automl_v1/test_auto_ml.py b/packages/google-cloud-automl/tests/unit/gapic/automl_v1/test_auto_ml.py index e30fdef4ac99..05d528c77262 100644 --- a/packages/google-cloud-automl/tests/unit/gapic/automl_v1/test_auto_ml.py +++ b/packages/google-cloud-automl/tests/unit/gapic/automl_v1/test_auto_ml.py @@ -969,9 +969,6 @@ def test_get_dataset(request_type, transport: str = "grpc"): description="description_value", example_count=1396, etag="etag_value", - translation_dataset_metadata=translation.TranslationDatasetMetadata( - source_language_code="source_language_code_value" - ), ) response = client.get_dataset(request) @@ -1602,9 +1599,11 @@ async def test_list_datasets_async_pages(): RuntimeError, ) pages = [] - async for page_ in ( + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch await client.list_datasets(request={}) - ).pages: # pragma: no branch + ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -1636,9 +1635,6 @@ def test_update_dataset(request_type, transport: str = "grpc"): description="description_value", example_count=1396, etag="etag_value", - translation_dataset_metadata=translation.TranslationDatasetMetadata( - source_language_code="source_language_code_value" - ), ) response = client.update_dataset(request) @@ -3165,9 +3161,6 @@ def test_get_model(request_type, transport: str = "grpc"): dataset_id="dataset_id_value", deployment_state=model.Model.DeploymentState.DEPLOYED, etag="etag_value", - translation_model_metadata=translation.TranslationModelMetadata( - base_model="base_model_value" - ), ) response = client.get_model(request) @@ -3798,9 +3791,11 @@ async def test_list_models_async_pages(): RuntimeError, ) pages = [] - async for page_ in ( + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch await client.list_models(request={}) - ).pages: # pragma: no branch + ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -4058,9 +4053,6 @@ def test_update_model(request_type, transport: str = "grpc"): dataset_id="dataset_id_value", deployment_state=gca_model.Model.DeploymentState.DEPLOYED, etag="etag_value", - translation_model_metadata=translation.TranslationModelMetadata( - base_model="base_model_value" - ), ) response = client.update_model(request) @@ -5052,9 +5044,6 @@ def test_get_model_evaluation(request_type, transport: str = "grpc"): annotation_spec_id="annotation_spec_id_value", display_name="display_name_value", evaluated_example_count=2446, - classification_evaluation_metrics=classification.ClassificationEvaluationMetrics( - au_prc=0.634 - ), ) response = client.get_model_evaluation(request) @@ -5730,9 +5719,11 @@ async def test_list_model_evaluations_async_pages(): RuntimeError, ) pages = [] - async for page_ in ( + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch await client.list_model_evaluations(request={}) - ).pages: # pragma: no branch + ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -6075,9 +6066,6 @@ def test_get_dataset_rest(request_type): description="description_value", example_count=1396, etag="etag_value", - translation_dataset_metadata=translation.TranslationDatasetMetadata( - source_language_code="source_language_code_value" - ), ) # Wrap the value into a proper Response obj @@ -6698,9 +6686,6 @@ def test_update_dataset_rest(request_type): description="description_value", example_count=1396, etag="etag_value", - translation_dataset_metadata=translation.TranslationDatasetMetadata( - source_language_code="source_language_code_value" - ), ) # Wrap the value into a proper Response obj @@ -8432,9 +8417,6 @@ def test_get_model_rest(request_type): dataset_id="dataset_id_value", deployment_state=model.Model.DeploymentState.DEPLOYED, etag="etag_value", - translation_model_metadata=translation.TranslationModelMetadata( - base_model="base_model_value" - ), ) # Wrap the value into a proper Response obj @@ -9328,9 +9310,6 @@ def test_update_model_rest(request_type): dataset_id="dataset_id_value", deployment_state=gca_model.Model.DeploymentState.DEPLOYED, etag="etag_value", - translation_model_metadata=translation.TranslationModelMetadata( - base_model="base_model_value" - ), ) # Wrap the value into a proper Response obj @@ -10447,9 +10426,6 @@ def test_get_model_evaluation_rest(request_type): annotation_spec_id="annotation_spec_id_value", display_name="display_name_value", evaluated_example_count=2446, - classification_evaluation_metrics=classification.ClassificationEvaluationMetrics( - au_prc=0.634 - ), ) # Wrap the value into a proper Response obj diff --git a/packages/google-cloud-automl/tests/unit/gapic/automl_v1beta1/test_auto_ml.py b/packages/google-cloud-automl/tests/unit/gapic/automl_v1beta1/test_auto_ml.py index 9e56fe31f2a0..0c3aa5fc63d6 100644 --- a/packages/google-cloud-automl/tests/unit/gapic/automl_v1beta1/test_auto_ml.py +++ b/packages/google-cloud-automl/tests/unit/gapic/automl_v1beta1/test_auto_ml.py @@ -718,9 +718,6 @@ def test_create_dataset(request_type, transport: str = "grpc"): description="description_value", example_count=1396, etag="etag_value", - translation_dataset_metadata=translation.TranslationDatasetMetadata( - source_language_code="source_language_code_value" - ), ) response = client.create_dataset(request) @@ -999,9 +996,6 @@ def test_get_dataset(request_type, transport: str = "grpc"): description="description_value", example_count=1396, etag="etag_value", - translation_dataset_metadata=translation.TranslationDatasetMetadata( - source_language_code="source_language_code_value" - ), ) response = client.get_dataset(request) @@ -1632,9 +1626,11 @@ async def test_list_datasets_async_pages(): RuntimeError, ) pages = [] - async for page_ in ( + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch await client.list_datasets(request={}) - ).pages: # pragma: no branch + ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -1666,9 +1662,6 @@ def test_update_dataset(request_type, transport: str = "grpc"): description="description_value", example_count=1396, etag="etag_value", - translation_dataset_metadata=translation.TranslationDatasetMetadata( - source_language_code="source_language_code_value" - ), ) response = client.update_dataset(request) @@ -3563,9 +3556,11 @@ async def test_list_table_specs_async_pages(): RuntimeError, ) pages = [] - async for page_ in ( + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch await client.list_table_specs(request={}) - ).pages: # pragma: no branch + ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -4511,9 +4506,11 @@ async def test_list_column_specs_async_pages(): RuntimeError, ) pages = [] - async for page_ in ( + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch await client.list_column_specs(request={}) - ).pages: # pragma: no branch + ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -5058,9 +5055,6 @@ def test_get_model(request_type, transport: str = "grpc"): display_name="display_name_value", dataset_id="dataset_id_value", deployment_state=model.Model.DeploymentState.DEPLOYED, - translation_model_metadata=translation.TranslationModelMetadata( - base_model="base_model_value" - ), ) response = client.get_model(request) @@ -5688,9 +5682,11 @@ async def test_list_models_async_pages(): RuntimeError, ) pages = [] - async for page_ in ( + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch await client.list_models(request={}) - ).pages: # pragma: no branch + ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -6931,9 +6927,6 @@ def test_get_model_evaluation(request_type, transport: str = "grpc"): annotation_spec_id="annotation_spec_id_value", display_name="display_name_value", evaluated_example_count=2446, - classification_evaluation_metrics=classification.ClassificationEvaluationMetrics( - au_prc=0.634 - ), ) response = client.get_model_evaluation(request) @@ -7599,9 +7592,11 @@ async def test_list_model_evaluations_async_pages(): RuntimeError, ) pages = [] - async for page_ in ( + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch await client.list_model_evaluations(request={}) - ).pages: # pragma: no branch + ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -7660,9 +7655,6 @@ def test_create_dataset_rest(request_type): description="description_value", example_count=1396, etag="etag_value", - translation_dataset_metadata=translation.TranslationDatasetMetadata( - source_language_code="source_language_code_value" - ), ) # Wrap the value into a proper Response obj @@ -7977,9 +7969,6 @@ def test_get_dataset_rest(request_type): description="description_value", example_count=1396, etag="etag_value", - translation_dataset_metadata=translation.TranslationDatasetMetadata( - source_language_code="source_language_code_value" - ), ) # Wrap the value into a proper Response obj @@ -8611,9 +8600,6 @@ def test_update_dataset_rest(request_type): description="description_value", example_count=1396, etag="etag_value", - translation_dataset_metadata=translation.TranslationDatasetMetadata( - source_language_code="source_language_code_value" - ), ) # Wrap the value into a proper Response obj @@ -12373,9 +12359,6 @@ def test_get_model_rest(request_type): display_name="display_name_value", dataset_id="dataset_id_value", deployment_state=model.Model.DeploymentState.DEPLOYED, - translation_model_metadata=translation.TranslationModelMetadata( - base_model="base_model_value" - ), ) # Wrap the value into a proper Response obj @@ -14307,9 +14290,6 @@ def test_get_model_evaluation_rest(request_type): annotation_spec_id="annotation_spec_id_value", display_name="display_name_value", evaluated_example_count=2446, - classification_evaluation_metrics=classification.ClassificationEvaluationMetrics( - au_prc=0.634 - ), ) # Wrap the value into a proper Response obj