diff --git a/packages/google-cloud-container/.kokoro/docs/common.cfg b/packages/google-cloud-container/.kokoro/docs/common.cfg
index 859900410818..e092b9425fb8 100644
--- a/packages/google-cloud-container/.kokoro/docs/common.cfg
+++ b/packages/google-cloud-container/.kokoro/docs/common.cfg
@@ -30,7 +30,7 @@ env_vars: {
env_vars: {
key: "V2_STAGING_BUCKET"
- value: "docs-staging-v2-staging"
+ value: "docs-staging-v2"
}
# It will upload the docker image after successful builds.
diff --git a/packages/google-cloud-container/.kokoro/samples/python3.6/common.cfg b/packages/google-cloud-container/.kokoro/samples/python3.6/common.cfg
index f62d4920f1da..c252f67be870 100644
--- a/packages/google-cloud-container/.kokoro/samples/python3.6/common.cfg
+++ b/packages/google-cloud-container/.kokoro/samples/python3.6/common.cfg
@@ -13,6 +13,12 @@ env_vars: {
value: "py-3.6"
}
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-py36"
+}
+
env_vars: {
key: "TRAMPOLINE_BUILD_FILE"
value: "github/python-container/.kokoro/test-samples.sh"
diff --git a/packages/google-cloud-container/.kokoro/samples/python3.7/common.cfg b/packages/google-cloud-container/.kokoro/samples/python3.7/common.cfg
index 4463d19855dc..7cacad0d919a 100644
--- a/packages/google-cloud-container/.kokoro/samples/python3.7/common.cfg
+++ b/packages/google-cloud-container/.kokoro/samples/python3.7/common.cfg
@@ -13,6 +13,12 @@ env_vars: {
value: "py-3.7"
}
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-py37"
+}
+
env_vars: {
key: "TRAMPOLINE_BUILD_FILE"
value: "github/python-container/.kokoro/test-samples.sh"
diff --git a/packages/google-cloud-container/.kokoro/samples/python3.8/common.cfg b/packages/google-cloud-container/.kokoro/samples/python3.8/common.cfg
index 77b79cba58da..988ed4ff5836 100644
--- a/packages/google-cloud-container/.kokoro/samples/python3.8/common.cfg
+++ b/packages/google-cloud-container/.kokoro/samples/python3.8/common.cfg
@@ -13,6 +13,12 @@ env_vars: {
value: "py-3.8"
}
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-py38"
+}
+
env_vars: {
key: "TRAMPOLINE_BUILD_FILE"
value: "github/python-container/.kokoro/test-samples.sh"
diff --git a/packages/google-cloud-container/.kokoro/test-samples.sh b/packages/google-cloud-container/.kokoro/test-samples.sh
index b8cb9cbc7cb0..ab33a23f1d00 100755
--- a/packages/google-cloud-container/.kokoro/test-samples.sh
+++ b/packages/google-cloud-container/.kokoro/test-samples.sh
@@ -28,6 +28,12 @@ if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then
git checkout $LATEST_RELEASE
fi
+# Exit early if samples directory doesn't exist
+if [ ! -d "./samples" ]; then
+ echo "No tests run. `./samples` not found"
+ exit 0
+fi
+
# Disable buffering, so that the logs stream through.
export PYTHONUNBUFFERED=1
@@ -101,4 +107,4 @@ cd "$ROOT"
# Workaround for Kokoro permissions issue: delete secrets
rm testing/{test-env.sh,client-secrets.json,service-account.json}
-exit "$RTN"
\ No newline at end of file
+exit "$RTN"
diff --git a/packages/google-cloud-container/CODE_OF_CONDUCT.md b/packages/google-cloud-container/CODE_OF_CONDUCT.md
index b3d1f6029849..039f43681204 100644
--- a/packages/google-cloud-container/CODE_OF_CONDUCT.md
+++ b/packages/google-cloud-container/CODE_OF_CONDUCT.md
@@ -1,44 +1,95 @@
-# Contributor Code of Conduct
+# Code of Conduct
-As contributors and maintainers of this project,
-and in the interest of fostering an open and welcoming community,
-we pledge to respect all people who contribute through reporting issues,
-posting feature requests, updating documentation,
-submitting pull requests or patches, and other activities.
+## Our Pledge
-We are committed to making participation in this project
-a harassment-free experience for everyone,
-regardless of level of experience, gender, gender identity and expression,
-sexual orientation, disability, personal appearance,
-body size, race, ethnicity, age, religion, or nationality.
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of
+experience, education, socio-economic status, nationality, personal appearance,
+race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
-* The use of sexualized language or imagery
-* Personal attacks
-* Trolling or insulting/derogatory comments
-* Public or private harassment
-* Publishing other's private information,
-such as physical or electronic
-addresses, without explicit permission
-* Other unethical or unprofessional conduct.
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject
-comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct.
-By adopting this Code of Conduct,
-project maintainers commit themselves to fairly and consistently
-applying these principles to every aspect of managing this project.
-Project maintainers who do not follow or enforce the Code of Conduct
-may be permanently removed from the project team.
-
-This code of conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community.
-
-Instances of abusive, harassing, or otherwise unacceptable behavior
-may be reported by opening an issue
-or contacting one or more of the project maintainers.
-
-This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
-available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, or to ban temporarily or permanently any
+contributor for other behaviors that they deem inappropriate, threatening,
+offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+This Code of Conduct also applies outside the project spaces when the Project
+Steward has a reasonable belief that an individual's behavior may have a
+negative impact on the project or its community.
+
+## Conflict Resolution
+
+We do not believe that all conflict is bad; healthy debate and disagreement
+often yield positive results. However, it is never okay to be disrespectful or
+to engage in behavior that violates the project’s code of conduct.
+
+If you see someone violating the code of conduct, you are encouraged to address
+the behavior directly with those involved. Many issues can be resolved quickly
+and easily, and this gives people more control over the outcome of their
+dispute. If you are unable to resolve the matter for any reason, or if the
+behavior is threatening or harassing, report it. We are dedicated to providing
+an environment where participants feel welcome and safe.
+
+
+Reports should be directed to *googleapis-stewards@google.com*, the
+Project Steward(s) for *Google Cloud Client Libraries*. It is the Project Steward’s duty to
+receive and address reported violations of the code of conduct. They will then
+work with a committee consisting of representatives from the Open Source
+Programs Office and the Google Open Source Strategy team. If for any reason you
+are uncomfortable reaching out to the Project Steward, please email
+opensource@google.com.
+
+We will investigate every complaint, but you may not receive a direct response.
+We will use our discretion in determining when and how to follow up on reported
+incidents, which may range from not taking action to permanent expulsion from
+the project and project-sponsored spaces. We will notify the accused of the
+report and provide them an opportunity to discuss it before any action is taken.
+The identity of the reporter will be omitted from the details of the report
+supplied to the accused. In potentially harmful situations, such as ongoing
+harassment or threats to anyone's safety, we may take action without notice.
+
+## Attribution
+
+This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
+available at
+https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
\ No newline at end of file
diff --git a/packages/google-cloud-container/CONTRIBUTING.rst b/packages/google-cloud-container/CONTRIBUTING.rst
index ef44b69de39f..f9ddc489c7d0 100644
--- a/packages/google-cloud-container/CONTRIBUTING.rst
+++ b/packages/google-cloud-container/CONTRIBUTING.rst
@@ -80,25 +80,6 @@ We use `nox `__ to instrument our tests.
.. nox: https://pypi.org/project/nox/
-Note on Editable Installs / Develop Mode
-========================================
-
-- As mentioned previously, using ``setuptools`` in `develop mode`_
- or a ``pip`` `editable install`_ is not possible with this
- library. This is because this library uses `namespace packages`_.
- For context see `Issue #2316`_ and the relevant `PyPA issue`_.
-
- Since ``editable`` / ``develop`` mode can't be used, packages
- need to be installed directly. Hence your changes to the source
- tree don't get incorporated into the **already installed**
- package.
-
-.. _namespace packages: https://www.python.org/dev/peps/pep-0420/
-.. _Issue #2316: https://github.com/GoogleCloudPlatform/google-cloud-python/issues/2316
-.. _PyPA issue: https://github.com/pypa/packaging-problems/issues/12
-.. _develop mode: https://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode
-.. _editable install: https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs
-
*****************************************
I'm getting weird errors... Can you help?
*****************************************
diff --git a/packages/google-cloud-container/docs/conf.py b/packages/google-cloud-container/docs/conf.py
index 7bb0b3528d3f..bc32a9d7f9e1 100644
--- a/packages/google-cloud-container/docs/conf.py
+++ b/packages/google-cloud-container/docs/conf.py
@@ -39,6 +39,7 @@
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
+ "sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
@@ -348,6 +349,7 @@
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.io/grpc/python/", None),
+ "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
}
diff --git a/packages/google-cloud-container/docs/container_v1/types.rst b/packages/google-cloud-container/docs/container_v1/types.rst
index 6b9225d69228..0767dd6eb140 100644
--- a/packages/google-cloud-container/docs/container_v1/types.rst
+++ b/packages/google-cloud-container/docs/container_v1/types.rst
@@ -3,3 +3,4 @@ Types for Google Container v1 API
.. automodule:: google.cloud.container_v1.types
:members:
+ :show-inheritance:
diff --git a/packages/google-cloud-container/docs/container_v1beta1/types.rst b/packages/google-cloud-container/docs/container_v1beta1/types.rst
index 8c258957bc6e..9dd46e91dd9a 100644
--- a/packages/google-cloud-container/docs/container_v1beta1/types.rst
+++ b/packages/google-cloud-container/docs/container_v1beta1/types.rst
@@ -3,3 +3,4 @@ Types for Google Container v1beta1 API
.. automodule:: google.cloud.container_v1beta1.types
:members:
+ :show-inheritance:
diff --git a/packages/google-cloud-container/google/cloud/container/__init__.py b/packages/google-cloud-container/google/cloud/container/__init__.py
index 216ce9b33e1f..bc6619dc0ed3 100644
--- a/packages/google-cloud-container/google/cloud/container/__init__.py
+++ b/packages/google-cloud-container/google/cloud/container/__init__.py
@@ -36,20 +36,28 @@
from google.cloud.container_v1.types.cluster_service import ClusterAutoscaling
from google.cloud.container_v1.types.cluster_service import ClusterUpdate
from google.cloud.container_v1.types.cluster_service import CompleteIPRotationRequest
+from google.cloud.container_v1.types.cluster_service import ConfigConnectorConfig
from google.cloud.container_v1.types.cluster_service import CreateClusterRequest
from google.cloud.container_v1.types.cluster_service import CreateNodePoolRequest
from google.cloud.container_v1.types.cluster_service import DailyMaintenanceWindow
from google.cloud.container_v1.types.cluster_service import DatabaseEncryption
+from google.cloud.container_v1.types.cluster_service import DefaultSnatStatus
from google.cloud.container_v1.types.cluster_service import DeleteClusterRequest
from google.cloud.container_v1.types.cluster_service import DeleteNodePoolRequest
+from google.cloud.container_v1.types.cluster_service import DnsCacheConfig
from google.cloud.container_v1.types.cluster_service import GetClusterRequest
+from google.cloud.container_v1.types.cluster_service import GetJSONWebKeysRequest
+from google.cloud.container_v1.types.cluster_service import GetJSONWebKeysResponse
from google.cloud.container_v1.types.cluster_service import GetNodePoolRequest
+from google.cloud.container_v1.types.cluster_service import GetOpenIDConfigRequest
+from google.cloud.container_v1.types.cluster_service import GetOpenIDConfigResponse
from google.cloud.container_v1.types.cluster_service import GetOperationRequest
from google.cloud.container_v1.types.cluster_service import GetServerConfigRequest
from google.cloud.container_v1.types.cluster_service import HorizontalPodAutoscaling
from google.cloud.container_v1.types.cluster_service import HttpLoadBalancing
from google.cloud.container_v1.types.cluster_service import IPAllocationPolicy
from google.cloud.container_v1.types.cluster_service import IntraNodeVisibilityConfig
+from google.cloud.container_v1.types.cluster_service import Jwk
from google.cloud.container_v1.types.cluster_service import KubernetesDashboard
from google.cloud.container_v1.types.cluster_service import LegacyAbac
from google.cloud.container_v1.types.cluster_service import ListClustersRequest
@@ -78,13 +86,20 @@
from google.cloud.container_v1.types.cluster_service import NodePoolAutoscaling
from google.cloud.container_v1.types.cluster_service import NodeTaint
from google.cloud.container_v1.types.cluster_service import Operation
+from google.cloud.container_v1.types.cluster_service import OperationProgress
from google.cloud.container_v1.types.cluster_service import PrivateClusterConfig
+from google.cloud.container_v1.types.cluster_service import (
+ PrivateClusterMasterGlobalAccessConfig,
+)
from google.cloud.container_v1.types.cluster_service import RecurringTimeWindow
+from google.cloud.container_v1.types.cluster_service import ReleaseChannel
+from google.cloud.container_v1.types.cluster_service import ReservationAffinity
from google.cloud.container_v1.types.cluster_service import ResourceLimit
from google.cloud.container_v1.types.cluster_service import ResourceUsageExportConfig
from google.cloud.container_v1.types.cluster_service import (
RollbackNodePoolUpgradeRequest,
)
+from google.cloud.container_v1.types.cluster_service import SandboxConfig
from google.cloud.container_v1.types.cluster_service import ServerConfig
from google.cloud.container_v1.types.cluster_service import SetAddonsConfigRequest
from google.cloud.container_v1.types.cluster_service import SetLabelsRequest
@@ -101,6 +116,7 @@
from google.cloud.container_v1.types.cluster_service import SetNodePoolManagementRequest
from google.cloud.container_v1.types.cluster_service import SetNodePoolSizeRequest
from google.cloud.container_v1.types.cluster_service import ShieldedInstanceConfig
+from google.cloud.container_v1.types.cluster_service import ShieldedNodes
from google.cloud.container_v1.types.cluster_service import StartIPRotationRequest
from google.cloud.container_v1.types.cluster_service import StatusCondition
from google.cloud.container_v1.types.cluster_service import TimeWindow
@@ -112,6 +128,8 @@
UsableSubnetworkSecondaryRange,
)
from google.cloud.container_v1.types.cluster_service import VerticalPodAutoscaling
+from google.cloud.container_v1.types.cluster_service import WorkloadIdentityConfig
+from google.cloud.container_v1.types.cluster_service import WorkloadMetadataConfig
__all__ = (
"AcceleratorConfig",
@@ -129,20 +147,28 @@
"ClusterManagerClient",
"ClusterUpdate",
"CompleteIPRotationRequest",
+ "ConfigConnectorConfig",
"CreateClusterRequest",
"CreateNodePoolRequest",
"DailyMaintenanceWindow",
"DatabaseEncryption",
+ "DefaultSnatStatus",
"DeleteClusterRequest",
"DeleteNodePoolRequest",
+ "DnsCacheConfig",
"GetClusterRequest",
+ "GetJSONWebKeysRequest",
+ "GetJSONWebKeysResponse",
"GetNodePoolRequest",
+ "GetOpenIDConfigRequest",
+ "GetOpenIDConfigResponse",
"GetOperationRequest",
"GetServerConfigRequest",
"HorizontalPodAutoscaling",
"HttpLoadBalancing",
"IPAllocationPolicy",
"IntraNodeVisibilityConfig",
+ "Jwk",
"KubernetesDashboard",
"LegacyAbac",
"ListClustersRequest",
@@ -167,11 +193,16 @@
"NodePoolAutoscaling",
"NodeTaint",
"Operation",
+ "OperationProgress",
"PrivateClusterConfig",
+ "PrivateClusterMasterGlobalAccessConfig",
"RecurringTimeWindow",
+ "ReleaseChannel",
+ "ReservationAffinity",
"ResourceLimit",
"ResourceUsageExportConfig",
"RollbackNodePoolUpgradeRequest",
+ "SandboxConfig",
"ServerConfig",
"SetAddonsConfigRequest",
"SetLabelsRequest",
@@ -186,6 +217,7 @@
"SetNodePoolManagementRequest",
"SetNodePoolSizeRequest",
"ShieldedInstanceConfig",
+ "ShieldedNodes",
"StartIPRotationRequest",
"StatusCondition",
"TimeWindow",
@@ -195,4 +227,6 @@
"UsableSubnetwork",
"UsableSubnetworkSecondaryRange",
"VerticalPodAutoscaling",
+ "WorkloadIdentityConfig",
+ "WorkloadMetadataConfig",
)
diff --git a/packages/google-cloud-container/google/cloud/container_v1/__init__.py b/packages/google-cloud-container/google/cloud/container_v1/__init__.py
index 1c3fbcaba55e..ce0ea9ad5be7 100644
--- a/packages/google-cloud-container/google/cloud/container_v1/__init__.py
+++ b/packages/google-cloud-container/google/cloud/container_v1/__init__.py
@@ -29,20 +29,28 @@
from .types.cluster_service import ClusterAutoscaling
from .types.cluster_service import ClusterUpdate
from .types.cluster_service import CompleteIPRotationRequest
+from .types.cluster_service import ConfigConnectorConfig
from .types.cluster_service import CreateClusterRequest
from .types.cluster_service import CreateNodePoolRequest
from .types.cluster_service import DailyMaintenanceWindow
from .types.cluster_service import DatabaseEncryption
+from .types.cluster_service import DefaultSnatStatus
from .types.cluster_service import DeleteClusterRequest
from .types.cluster_service import DeleteNodePoolRequest
+from .types.cluster_service import DnsCacheConfig
from .types.cluster_service import GetClusterRequest
+from .types.cluster_service import GetJSONWebKeysRequest
+from .types.cluster_service import GetJSONWebKeysResponse
from .types.cluster_service import GetNodePoolRequest
+from .types.cluster_service import GetOpenIDConfigRequest
+from .types.cluster_service import GetOpenIDConfigResponse
from .types.cluster_service import GetOperationRequest
from .types.cluster_service import GetServerConfigRequest
from .types.cluster_service import HorizontalPodAutoscaling
from .types.cluster_service import HttpLoadBalancing
from .types.cluster_service import IPAllocationPolicy
from .types.cluster_service import IntraNodeVisibilityConfig
+from .types.cluster_service import Jwk
from .types.cluster_service import KubernetesDashboard
from .types.cluster_service import LegacyAbac
from .types.cluster_service import ListClustersRequest
@@ -67,11 +75,16 @@
from .types.cluster_service import NodePoolAutoscaling
from .types.cluster_service import NodeTaint
from .types.cluster_service import Operation
+from .types.cluster_service import OperationProgress
from .types.cluster_service import PrivateClusterConfig
+from .types.cluster_service import PrivateClusterMasterGlobalAccessConfig
from .types.cluster_service import RecurringTimeWindow
+from .types.cluster_service import ReleaseChannel
+from .types.cluster_service import ReservationAffinity
from .types.cluster_service import ResourceLimit
from .types.cluster_service import ResourceUsageExportConfig
from .types.cluster_service import RollbackNodePoolUpgradeRequest
+from .types.cluster_service import SandboxConfig
from .types.cluster_service import ServerConfig
from .types.cluster_service import SetAddonsConfigRequest
from .types.cluster_service import SetLabelsRequest
@@ -86,6 +99,7 @@
from .types.cluster_service import SetNodePoolManagementRequest
from .types.cluster_service import SetNodePoolSizeRequest
from .types.cluster_service import ShieldedInstanceConfig
+from .types.cluster_service import ShieldedNodes
from .types.cluster_service import StartIPRotationRequest
from .types.cluster_service import StatusCondition
from .types.cluster_service import TimeWindow
@@ -95,6 +109,8 @@
from .types.cluster_service import UsableSubnetwork
from .types.cluster_service import UsableSubnetworkSecondaryRange
from .types.cluster_service import VerticalPodAutoscaling
+from .types.cluster_service import WorkloadIdentityConfig
+from .types.cluster_service import WorkloadMetadataConfig
__all__ = (
@@ -111,20 +127,28 @@
"ClusterAutoscaling",
"ClusterUpdate",
"CompleteIPRotationRequest",
+ "ConfigConnectorConfig",
"CreateClusterRequest",
"CreateNodePoolRequest",
"DailyMaintenanceWindow",
"DatabaseEncryption",
+ "DefaultSnatStatus",
"DeleteClusterRequest",
"DeleteNodePoolRequest",
+ "DnsCacheConfig",
"GetClusterRequest",
+ "GetJSONWebKeysRequest",
+ "GetJSONWebKeysResponse",
"GetNodePoolRequest",
+ "GetOpenIDConfigRequest",
+ "GetOpenIDConfigResponse",
"GetOperationRequest",
"GetServerConfigRequest",
"HorizontalPodAutoscaling",
"HttpLoadBalancing",
"IPAllocationPolicy",
"IntraNodeVisibilityConfig",
+ "Jwk",
"KubernetesDashboard",
"LegacyAbac",
"ListClustersRequest",
@@ -149,11 +173,16 @@
"NodePoolAutoscaling",
"NodeTaint",
"Operation",
+ "OperationProgress",
"PrivateClusterConfig",
+ "PrivateClusterMasterGlobalAccessConfig",
"RecurringTimeWindow",
+ "ReleaseChannel",
+ "ReservationAffinity",
"ResourceLimit",
"ResourceUsageExportConfig",
"RollbackNodePoolUpgradeRequest",
+ "SandboxConfig",
"ServerConfig",
"SetAddonsConfigRequest",
"SetLabelsRequest",
@@ -168,6 +197,7 @@
"SetNodePoolManagementRequest",
"SetNodePoolSizeRequest",
"ShieldedInstanceConfig",
+ "ShieldedNodes",
"StartIPRotationRequest",
"StatusCondition",
"TimeWindow",
@@ -177,5 +207,7 @@
"UsableSubnetwork",
"UsableSubnetworkSecondaryRange",
"VerticalPodAutoscaling",
+ "WorkloadIdentityConfig",
+ "WorkloadMetadataConfig",
"ClusterManagerClient",
)
diff --git a/packages/google-cloud-container/google/cloud/container_v1/proto/cluster_service.proto b/packages/google-cloud-container/google/cloud/container_v1/proto/cluster_service.proto
index 6363fc9496da..040173ec22ff 100644
--- a/packages/google-cloud-container/google/cloud/container_v1/proto/cluster_service.proto
+++ b/packages/google-cloud-container/google/cloud/container_v1/proto/cluster_service.proto
@@ -1,4 +1,4 @@
-// Copyright 2019 Google LLC.
+// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -11,7 +11,6 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-//
syntax = "proto3";
@@ -65,7 +64,8 @@ service ClusterManager {
// Compute Engine instances.
//
// By default, the cluster is created in the project's
- // [default network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks).
+ // [default
+ // network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks).
//
// One firewall is added for the cluster. After cluster creation,
// the Kubelet creates routes for each node to allow the containers
@@ -168,7 +168,11 @@ service ClusterManager {
}
// Sets the locations for a specific cluster.
+ // Deprecated. Use
+ // [projects.locations.clusters.update](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update)
+ // instead.
rpc SetLocations(SetLocationsRequest) returns (Operation) {
+ option deprecated = true;
option (google.api.http) = {
post: "/v1/{name=projects/*/locations/*/clusters/*}:setLocations"
body: "*"
@@ -249,6 +253,7 @@ service ClusterManager {
}
};
option (google.api.method_signature) = "project_id,zone,operation_id";
+ option (google.api.method_signature) = "name";
}
// Cancels the specified operation.
@@ -277,6 +282,16 @@ service ClusterManager {
option (google.api.method_signature) = "name";
}
+ // Gets the public component of the cluster signing keys in
+ // JSON Web Key format.
+ // This API is not yet intended for general use, and is not available for all
+ // clusters.
+ rpc GetJSONWebKeys(GetJSONWebKeysRequest) returns (GetJSONWebKeysResponse) {
+ option (google.api.http) = {
+ get: "/v1/{parent=projects/*/locations/*/clusters/*}/jwks"
+ };
+ }
+
// Lists the node pools for a cluster.
rpc ListNodePools(ListNodePoolsRequest) returns (ListNodePoolsResponse) {
option (google.api.http) = {
@@ -459,11 +474,9 @@ service ClusterManager {
// Parameters that describe the nodes in a cluster.
message NodeConfig {
// The name of a Google Compute Engine [machine
- // type](https://cloud.google.com/compute/docs/machine-types) (e.g.
- // `n1-standard-1`).
+ // type](https://cloud.google.com/compute/docs/machine-types)
//
- // If unspecified, the default machine type is
- // `n1-standard-1`.
+ // If unspecified, the default machine type is `e2-medium`.
string machine_type = 1;
// Size of the disk attached to each node, specified in GB.
@@ -482,41 +495,46 @@ message NodeConfig {
// persistent storage on your nodes.
// * `https://www.googleapis.com/auth/devstorage.read_only` is required for
// communicating with **gcr.io**
- // (the [Google Container Registry](https://cloud.google.com/container-registry/)).
+ // (the [Google Container
+ // Registry](https://cloud.google.com/container-registry/)).
//
// If unspecified, no scopes are added, unless Cloud Logging or Cloud
// Monitoring are enabled, in which case their required scopes will be added.
repeated string oauth_scopes = 3;
- // The Google Cloud Platform Service Account to be used by the node VMs. If
- // no Service Account is specified, the "default" service account is used.
+ // The Google Cloud Platform Service Account to be used by the node VMs.
+ // Specify the email address of the Service Account; otherwise, if no Service
+ // Account is specified, the "default" service account is used.
string service_account = 9;
// The metadata key/value pairs assigned to instances in the cluster.
//
- // Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less than 128 bytes
+ // Keys must conform to the regexp `[a-zA-Z0-9-_]+` and be less than 128 bytes
// in length. These are reflected as part of a URL in the metadata server.
// Additionally, to avoid ambiguity, keys must not conflict with any other
// metadata keys for the project or be one of the reserved keys:
- // "cluster-location"
- // "cluster-name"
- // "cluster-uid"
- // "configure-sh"
- // "containerd-configure-sh"
- // "enable-os-login"
- // "gci-update-strategy"
- // "gci-ensure-gke-docker"
- // "instance-template"
- // "kube-env"
- // "startup-script"
- // "user-data"
- // "disable-address-manager"
- // "windows-startup-script-ps1"
- // "common-psm1"
- // "k8s-node-setup-psm1"
- // "install-ssh-psm1"
- // "user-profile-psm1"
- // "serial-port-logging-enable"
+ // - "cluster-location"
+ // - "cluster-name"
+ // - "cluster-uid"
+ // - "configure-sh"
+ // - "containerd-configure-sh"
+ // - "enable-os-login"
+ // - "gci-ensure-gke-docker"
+ // - "gci-metrics-enabled"
+ // - "gci-update-strategy"
+ // - "instance-template"
+ // - "kube-env"
+ // - "startup-script"
+ // - "user-data"
+ // - "disable-address-manager"
+ // - "windows-startup-script-ps1"
+ // - "common-psm1"
+ // - "k8s-node-setup-psm1"
+ // - "install-ssh-psm1"
+ // - "user-profile-psm1"
+ //
+ // The following keys are reserved for Windows nodes:
+ // - "serial-port-logging-enable"
//
// Values are free-form strings, and only have meaning as interpreted by
// the image running in the instance. The only restriction placed on them is
@@ -563,7 +581,8 @@ message NodeConfig {
// support for GPUs.
repeated AcceleratorConfig accelerators = 11;
- // Type of the disk attached to each node (e.g. 'pd-standard' or 'pd-ssd')
+ // Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or
+ // 'pd-balanced')
//
// If unspecified, the default disk type is 'pd-standard'
string disk_type = 12;
@@ -571,20 +590,47 @@ message NodeConfig {
// Minimum CPU platform to be used by this instance. The instance may be
// scheduled on the specified or newer CPU platform. Applicable values are the
// friendly names of CPU platforms, such as
- // minCpuPlatform: "Intel Haswell"
or
- // minCpuPlatform: "Intel Sandy Bridge"
. For more
+ // `minCpuPlatform: "Intel Haswell"` or
+ // `minCpuPlatform: "Intel Sandy Bridge"`. For more
// information, read [how to specify min CPU
// platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
string min_cpu_platform = 13;
+ // The workload metadata configuration for this node.
+ WorkloadMetadataConfig workload_metadata_config = 14;
+
// List of kubernetes taints to be applied to each node.
//
// For more information, including usage and the valid values, see:
// https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
repeated NodeTaint taints = 15;
+ // Sandbox configuration for this node.
+ SandboxConfig sandbox_config = 17;
+
+ // Setting this field will assign instances of this
+ // pool to run on the specified node group. This is useful for running
+ // workloads on [sole tenant
+ // nodes](https://cloud.google.com/compute/docs/nodes/sole-tenant-nodes).
+ string node_group = 18;
+
+ // The optional reservation affinity. Setting this field will apply
+ // the specified [Zonal Compute
+ // Reservation](https://cloud.google.com/compute/docs/instances/reserving-zonal-resources)
+ // to this node pool.
+ ReservationAffinity reservation_affinity = 19;
+
// Shielded Instance options.
ShieldedInstanceConfig shielded_instance_config = 20;
+
+ //
+ // The Customer Managed Encryption Key used to encrypt the boot disk attached
+ // to each node in the node pool. This should be of the form
+ // projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME].
+ // For more information about protecting resources with Cloud KMS Keys please
+ // see:
+ // https://cloud.google.com/compute/docs/disks/customer-managed-encryption
+ string boot_disk_kms_key = 23;
}
// A set of Shielded Instance options.
@@ -605,11 +651,59 @@ message ShieldedInstanceConfig {
bool enable_integrity_monitoring = 2;
}
+// SandboxConfig contains configurations of the sandbox to use for the node.
+message SandboxConfig {
+ // Possible types of sandboxes.
+ enum Type {
+ // Default value. This should not be used.
+ UNSPECIFIED = 0;
+
+ // Run sandbox using gvisor.
+ GVISOR = 1;
+ }
+
+ // Type of the sandbox to use for the node.
+ Type type = 2;
+}
+
+// [ReservationAffinity](https://cloud.google.com/compute/docs/instances/reserving-zonal-resources)
+// is the configuration of desired reservation which instances could take
+// capacity from.
+message ReservationAffinity {
+ // Indicates whether to consume capacity from a reservation or not.
+ enum Type {
+ // Default value. This should not be used.
+ UNSPECIFIED = 0;
+
+ // Do not consume from any reserved capacity.
+ NO_RESERVATION = 1;
+
+ // Consume any reservation available.
+ ANY_RESERVATION = 2;
+
+ // Must consume from a specific reservation. Must specify key value fields
+ // for specifying the reservations.
+ SPECIFIC_RESERVATION = 3;
+ }
+
+ // Corresponds to the type of reservation consumption.
+ Type consume_reservation_type = 1;
+
+ // Corresponds to the label key of a reservation resource. To target a
+ // SPECIFIC_RESERVATION by name, specify "googleapis.com/reservation-name" as
+ // the key and specify the name of your reservation as its value.
+ string key = 2;
+
+ // Corresponds to the label value(s) of reservation resource(s).
+ repeated string values = 3;
+}
+
// Kubernetes taint is comprised of three fields: key, value, and effect. Effect
// can only be one of three types: NoSchedule, PreferNoSchedule or NoExecute.
//
-// For more information, including usage and the valid values, see:
-// https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
+// See
+// [here](https://kubernetes.io/docs/concepts/configuration/taint-and-toleration)
+// for more information, including usage and the valid values.
message NodeTaint {
// Possible values for Effect in taint.
enum Effect {
@@ -643,13 +737,23 @@ message MasterAuth {
// The username to use for HTTP basic authentication to the master endpoint.
// For clusters v1.6.0 and later, basic authentication can be disabled by
// leaving username unspecified (or setting it to the empty string).
- string username = 1;
+ //
+ // Warning: basic authentication is deprecated, and will be removed in GKE
+ // control plane versions 1.19 and newer. For a list of recommended
+ // authentication methods, see:
+ // https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication
+ string username = 1 [deprecated = true];
// The password to use for HTTP basic authentication to the master endpoint.
// Because the master endpoint is open to the Internet, you should create a
// strong password. If a password is provided for cluster creation, username
// must be non-empty.
- string password = 2;
+ //
+ // Warning: basic authentication is deprecated, and will be removed in GKE
+ // control plane versions 1.19 and newer. For a list of recommended
+ // authentication methods, see:
+ // https://cloud.google.com/kubernetes-engine/docs/how-to/api-server-authentication
+ string password = 2 [deprecated = true];
// Configuration for client certificate authentication on the cluster. For
// clusters before v1.12, if no configuration is specified, a client
@@ -702,6 +806,13 @@ message AddonsConfig {
// Configuration for the Cloud Run addon, which allows the user to use a
// managed Knative service.
CloudRunConfig cloud_run_config = 7;
+
+ // Configuration for NodeLocalDNS, a dns cache running on cluster nodes
+ DnsCacheConfig dns_cache_config = 8;
+
+ // Configuration for the ConfigConnector add-on, a Kubernetes
+ // extension to manage hosted GCP services through the Kubernetes API
+ ConfigConnectorConfig config_connector_config = 10;
}
// Configuration options for the HTTP (L7) load balancing controller addon,
@@ -718,8 +829,8 @@ message HttpLoadBalancing {
// has based on the resource usage of the existing pods.
message HorizontalPodAutoscaling {
// Whether the Horizontal Pod Autoscaling feature is enabled in the cluster.
- // When enabled, it ensures that a Heapster pod is running in the cluster,
- // which is also used by the Cloud Monitoring service.
+ // When enabled, it ensures that metrics are collected into Stackdriver
+ // Monitoring.
bool disabled = 1;
}
@@ -737,6 +848,18 @@ message NetworkPolicyConfig {
bool disabled = 1;
}
+// Configuration for NodeLocal DNSCache
+message DnsCacheConfig {
+ // Whether NodeLocal DNSCache is enabled for this cluster.
+ bool enabled = 1;
+}
+
+// Configuration for controlling master global access settings.
+message PrivateClusterMasterGlobalAccessConfig {
+ // Whenever master is accessible globally or not.
+ bool enabled = 1;
+}
+
// Configuration options for private clusters.
message PrivateClusterConfig {
// Whether nodes have internal IP addresses only. If enabled, all nodes are
@@ -758,6 +881,12 @@ message PrivateClusterConfig {
// Output only. The external IP address of this cluster's master endpoint.
string public_endpoint = 5;
+
+ // Output only. The peering name in the customer VPC used by this cluster.
+ string peering_name = 7;
+
+ // Controls master global access settings.
+ PrivateClusterMasterGlobalAccessConfig master_global_access_config = 8;
}
// Configuration for returning group information from authenticators.
@@ -773,8 +902,29 @@ message AuthenticatorGroupsConfig {
// Configuration options for the Cloud Run feature.
message CloudRunConfig {
+ // Load balancer type of ingress service of Cloud Run.
+ enum LoadBalancerType {
+ // Load balancer type for Cloud Run is unspecified.
+ LOAD_BALANCER_TYPE_UNSPECIFIED = 0;
+
+ // Install external load balancer for Cloud Run.
+ LOAD_BALANCER_TYPE_EXTERNAL = 1;
+
+ // Install internal load balancer for Cloud Run.
+ LOAD_BALANCER_TYPE_INTERNAL = 2;
+ }
+
// Whether Cloud Run addon is enabled for this cluster.
bool disabled = 1;
+
+ // Which load balancer type is installed for Cloud Run.
+ LoadBalancerType load_balancer_type = 3;
+}
+
+// Configuration options for the Config Connector add-on.
+message ConfigConnectorConfig {
+ // Whether Cloud Connector is enabled for this cluster.
+ bool enabled = 1;
}
// Configuration options for the master authorized networks feature. Enabled
@@ -838,6 +988,9 @@ message BinaryAuthorization {
// Configuration for controlling how IPs are allocated in the cluster.
message IPAllocationPolicy {
// Whether alias IPs will be used for pod IPs in the cluster.
+ // This is used in conjunction with use_routes. It cannot
+ // be true if use_routes is true. If both use_ip_aliases and use_routes are
+ // false, then the server picks the default IP allocation mode
bool use_ip_aliases = 1;
// Whether a new subnetwork will be created automatically for the cluster.
@@ -943,6 +1096,12 @@ message IPAllocationPolicy {
// `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`) to pick a specific range
// to use.
string tpu_ipv4_cidr_block = 13;
+
+ // Whether routes will be used for pod IPs in the cluster.
+ // This is used in conjunction with use_ip_aliases. It cannot be true if
+ // use_ip_aliases is true. If both use_ip_aliases and use_routes are false,
+ // then the server picks the default IP allocation mode
+ bool use_routes = 15;
}
// A Google Kubernetes Engine cluster.
@@ -967,8 +1126,8 @@ message Cluster {
// The STOPPING state indicates the cluster is being deleted.
STOPPING = 4;
- // The ERROR state indicates the cluster may be unusable. Details
- // can be found in the `statusMessage` field.
+ // The ERROR state indicates the cluster is unusable. It will be
+ // automatically deleted. Details can be found in the `statusMessage` field.
ERROR = 5;
// The DEGRADED state indicates the cluster requires user action to restore
@@ -1023,25 +1182,33 @@ message Cluster {
// The logging service the cluster should use to write logs.
// Currently available options:
//
- // * "logging.googleapis.com/kubernetes" - the Google Cloud Logging
- // service with Kubernetes-native resource model
- // * `logging.googleapis.com` - the Google Cloud Logging service.
+ // * `logging.googleapis.com/kubernetes` - The Cloud Logging
+ // service with a Kubernetes-native resource model
+ // * `logging.googleapis.com` - The legacy Cloud Logging service (no longer
+ // available as of GKE 1.15).
// * `none` - no logs will be exported from the cluster.
- // * if left as an empty string,`logging.googleapis.com` will be used.
+ //
+ // If left as an empty string,`logging.googleapis.com/kubernetes` will be
+ // used for GKE 1.14+ or `logging.googleapis.com` for earlier versions.
string logging_service = 6;
// The monitoring service the cluster should use to write metrics.
// Currently available options:
//
- // * `monitoring.googleapis.com` - the Google Cloud Monitoring service.
- // * `none` - no metrics will be exported from the cluster.
- // * if left as an empty string, `monitoring.googleapis.com` will be used.
+ // * "monitoring.googleapis.com/kubernetes" - The Cloud Monitoring
+ // service with a Kubernetes-native resource model
+ // * `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no
+ // longer available as of GKE 1.15).
+ // * `none` - No metrics will be exported from the cluster.
+ //
+ // If left as an empty string,`monitoring.googleapis.com/kubernetes` will be
+ // used for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.
string monitoring_service = 7;
// The name of the Google Compute Engine
- // [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) to which the
- // cluster is connected. If left unspecified, the `default` network
- // will be used.
+ // [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
+ // to which the cluster is connected. If left unspecified, the `default`
+ // network will be used.
string network = 8;
// The IP address range of the container pods in this cluster, in
@@ -1054,8 +1221,8 @@ message Cluster {
AddonsConfig addons_config = 10;
// The name of the Google Compute Engine
- // [subnetwork](https://cloud.google.com/compute/docs/subnetworks) to which the
- // cluster is connected.
+ // [subnetwork](https://cloud.google.com/compute/docs/subnetworks) to which
+ // the cluster is connected.
string subnetwork = 11;
// The node pools associated with this cluster.
@@ -1064,8 +1231,16 @@ message Cluster {
repeated NodePool node_pools = 12;
// The list of Google Compute Engine
- // [zones](https://cloud.google.com/compute/docs/zones#available) in which the cluster's nodes
- // should be located.
+ // [zones](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster's nodes should be located.
+ //
+ // This field provides a default value if
+ // [NodePool.Locations](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools#NodePool.FIELDS.locations)
+ // are not specified during node pool creation.
+ //
+ // Warning: changing cluster locations will update the
+ // [NodePool.Locations](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools#NodePool.FIELDS.locations)
+ // of all node pools and will result in nodes being added and/or removed.
repeated string locations = 13;
// Kubernetes alpha features are enabled on this cluster. This includes alpha
@@ -1128,13 +1303,22 @@ message Cluster {
// Cluster-level Vertical Pod Autoscaling configuration.
VerticalPodAutoscaling vertical_pod_autoscaling = 39;
+ // Shielded Nodes configuration.
+ ShieldedNodes shielded_nodes = 40;
+
+ // Release channel configuration.
+ ReleaseChannel release_channel = 41;
+
+ // Configuration for the use of Kubernetes Service Accounts in GCP IAM
+ // policies.
+ WorkloadIdentityConfig workload_identity_config = 43;
+
// [Output only] Server-defined URL for the resource.
string self_link = 100;
// [Output only] The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field is deprecated, use location instead.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field is deprecated, use location instead.
string zone = 101 [deprecated = true];
// [Output only] The IP address of this cluster's master endpoint.
@@ -1164,7 +1348,7 @@ message Cluster {
string current_master_version = 104;
// [Output only] Deprecated, use
- // [NodePools.version](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.zones.clusters.nodePools)
+ // [NodePools.version](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters.nodePools)
// instead. The current version of the node software components. If they are
// currently at multiple versions because they're in the process of being
// upgraded, this reflects the minimum version of all nodes.
@@ -1177,9 +1361,10 @@ message Cluster {
// [Output only] The current status of this cluster.
Status status = 107;
- // [Output only] Additional information about the current status of this
+ // [Output only] Deprecated. Use conditions instead.
+ // Additional information about the current status of this
// cluster, if available.
- string status_message = 108;
+ string status_message = 108 [deprecated = true];
// [Output only] The size of the address space on each node for hosting
// containers. This is provisioned from within the `container_ipv4_cidr`
@@ -1206,9 +1391,10 @@ message Cluster {
string expire_time = 113;
// [Output only] The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) or
- // [region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) in which
- // the cluster resides.
+ // [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available)
+ // or
+ // [region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available)
+ // in which the cluster resides.
string location = 114;
// Enable the ability to use Cloud TPUs in this cluster.
@@ -1243,10 +1429,14 @@ message ClusterUpdate {
// The monitoring service the cluster should use to write metrics.
// Currently available options:
//
- // * "monitoring.googleapis.com/kubernetes" - the Google Cloud Monitoring
- // service with Kubernetes-native resource model
- // * "monitoring.googleapis.com" - the Google Cloud Monitoring service
- // * "none" - no metrics will be exported from the cluster
+ // * "monitoring.googleapis.com/kubernetes" - The Cloud Monitoring
+ // service with a Kubernetes-native resource model
+ // * `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no
+ // longer available as of GKE 1.15).
+ // * `none` - No metrics will be exported from the cluster.
+ //
+ // If left as an empty string,`monitoring.googleapis.com/kubernetes` will be
+ // used for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.
string desired_monitoring_service = 5;
// Configurations for the various addons available to run in the cluster.
@@ -1265,6 +1455,12 @@ message ClusterUpdate {
// Configuration of etcd encryption.
DatabaseEncryption desired_database_encryption = 46;
+ // Configuration for Workload Identity.
+ WorkloadIdentityConfig desired_workload_identity_config = 47;
+
+ // Configuration for Shielded Nodes.
+ ShieldedNodes desired_shielded_nodes = 48;
+
// Autoscaler configuration for the node pool specified in
// desired_node_pool_id. If there is only one pool in the
// cluster and desired_node_pool_id is not provided then
@@ -1272,12 +1468,13 @@ message ClusterUpdate {
NodePoolAutoscaling desired_node_pool_autoscaling = 9;
// The desired list of Google Compute Engine
- // [zones](https://cloud.google.com/compute/docs/zones#available) in which the cluster's nodes
- // should be located. Changing the locations a cluster is in will result
- // in nodes being either created or removed from the cluster, depending on
- // whether locations are being added or removed.
+ // [zones](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster's nodes should be located.
//
// This list must always include the cluster's primary zone.
+ //
+ // Warning: changing cluster locations will update the locations of all node
+ // pools and will result in nodes being added and/or removed.
repeated string desired_locations = 10;
// The desired configuration options for master authorized networks feature.
@@ -1292,10 +1489,14 @@ message ClusterUpdate {
// The logging service the cluster should use to write logs.
// Currently available options:
//
- // * "logging.googleapis.com/kubernetes" - the Google Cloud Logging
- // service with Kubernetes-native resource model
- // * "logging.googleapis.com" - the Google Cloud Logging service
- // * "none" - no logs will be exported from the cluster
+ // * `logging.googleapis.com/kubernetes` - The Cloud Logging
+ // service with a Kubernetes-native resource model
+ // * `logging.googleapis.com` - The legacy Cloud Logging service (no longer
+ // available as of GKE 1.15).
+ // * `none` - no logs will be exported from the cluster.
+ //
+ // If left as an empty string,`logging.googleapis.com/kubernetes` will be
+ // used for GKE 1.14+ or `logging.googleapis.com` for earlier versions.
string desired_logging_service = 19;
// The desired configuration for exporting resource usage.
@@ -1304,9 +1505,18 @@ message ClusterUpdate {
// Cluster-level Vertical Pod Autoscaling configuration.
VerticalPodAutoscaling desired_vertical_pod_autoscaling = 22;
+ // The desired private cluster configuration.
+ PrivateClusterConfig desired_private_cluster_config = 25;
+
// The desired config of Intra-node visibility.
IntraNodeVisibilityConfig desired_intra_node_visibility_config = 26;
+ // The desired status of whether to disable default sNAT for this cluster.
+ DefaultSnatStatus desired_default_snat_status = 28;
+
+ // The desired release channel configuration.
+ ReleaseChannel desired_release_channel = 31;
+
// The Kubernetes version to change the master to.
//
// Users may specify either explicit versions offered by
@@ -1399,9 +1609,8 @@ message Operation {
string name = 1;
// The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the operation
- // is taking place.
- // This field is deprecated, use location instead.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // operation is taking place. This field is deprecated, use location instead.
string zone = 2 [deprecated = true];
// The operation type.
@@ -1413,8 +1622,8 @@ message Operation {
// Detailed operation progress, if available.
string detail = 8;
- // If an error has occurred, a textual description of the error.
- string status_message = 5;
+ // Output only. If an error has occurred, a textual description of the error.
+ string status_message = 5 [(google.api.field_behavior) = OUTPUT_ONLY];
// Server-defined URL for the resource.
string self_link = 6;
@@ -1423,9 +1632,10 @@ message Operation {
string target_link = 7;
// [Output only] The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) or
- // [region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available) in which
- // the cluster resides.
+ // [zone](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available)
+ // or
+ // [region](https://cloud.google.com/compute/docs/regions-zones/regions-zones#available)
+ // in which the cluster resides.
string location = 9;
// [Output only] The time the operation started, in
@@ -1436,6 +1646,9 @@ message Operation {
// [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) text format.
string end_time = 11;
+ // Output only. [Output only] Progress information for an operation.
+ OperationProgress progress = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
+
// Which conditions caused the current cluster state.
repeated StatusCondition cluster_conditions = 13;
@@ -1443,6 +1656,46 @@ message Operation {
repeated StatusCondition nodepool_conditions = 14;
}
+// Information about operation (or operation stage) progress.
+message OperationProgress {
+ // Progress metric is (string, int|float|string) pair.
+ message Metric {
+ // Required. Metric name, e.g., "nodes total", "percent done".
+ string name = 1 [(google.api.field_behavior) = REQUIRED];
+
+ // Strictly one of the values is required.
+ oneof value {
+ // For metrics with integer value.
+ int64 int_value = 2;
+
+ // For metrics with floating point value.
+ double double_value = 3;
+
+ // For metrics with custom values (ratios, visual progress, etc.).
+ string string_value = 4;
+ }
+ }
+
+ // A non-parameterized string describing an operation stage.
+ // Unset for single-stage operations.
+ string name = 1;
+
+ // Status of an operation stage.
+ // Unset for single-stage operations.
+ Operation.Status status = 2;
+
+ // Progress metric bundle, for example:
+ // metrics: [{name: "nodes done", int_value: 15},
+ // {name: "nodes total", int_value: 32}]
+ // or
+ // metrics: [{name: "progress", double_value: 0.56},
+ // {name: "progress scale", double_value: 1.0}]
+ repeated Metric metrics = 3;
+
+ // Substages of an operation or a stage.
+ repeated OperationProgress stages = 4;
+}
+
// CreateClusterRequest creates a cluster.
message CreateClusterRequest {
// Deprecated. The Google Developers Console [project ID or project
@@ -1451,13 +1704,13 @@ message CreateClusterRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the parent field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the parent
+ // field.
string zone = 2 [deprecated = true];
// Required. A [cluster
- // resource](https://cloud.google.com/container-engine/reference/rest/v1/projects.zones.clusters)
+ // resource](https://cloud.google.com/container-engine/reference/rest/v1/projects.locations.clusters)
Cluster cluster = 3 [(google.api.field_behavior) = REQUIRED];
// The parent (project and location) where the cluster will be created.
@@ -1473,9 +1726,9 @@ message GetClusterRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster to retrieve.
@@ -1495,9 +1748,9 @@ message UpdateClusterRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster to upgrade.
@@ -1520,9 +1773,9 @@ message UpdateNodePoolRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster to upgrade.
@@ -1553,6 +1806,19 @@ message UpdateNodePoolRequest {
// update. Specified in the format
// `projects/*/locations/*/clusters/*/nodePools/*`.
string name = 8;
+
+ // The desired list of Google Compute Engine
+ // [zones](https://cloud.google.com/compute/docs/zones#available) in which the
+ // node pool's nodes should be located. Changing the locations for a node pool
+ // will result in nodes being either created or removed from the node pool,
+ // depending on whether locations are being added or removed.
+ repeated string locations = 13;
+
+ // The desired workload metadata config for the node pool.
+ WorkloadMetadataConfig workload_metadata_config = 14;
+
+ // Upgrade settings control disruption and speed of the upgrade.
+ NodePool.UpgradeSettings upgrade_settings = 15;
}
// SetNodePoolAutoscalingRequest sets the autoscaler settings of a node pool.
@@ -1563,9 +1829,9 @@ message SetNodePoolAutoscalingRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster to upgrade.
@@ -1593,20 +1859,26 @@ message SetLoggingServiceRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster to upgrade.
// This field has been deprecated and replaced by the name field.
string cluster_id = 3 [deprecated = true];
- // Required. The logging service the cluster should use to write metrics.
+ // Required. The logging service the cluster should use to write logs.
// Currently available options:
//
- // * "logging.googleapis.com" - the Google Cloud Logging service
- // * "none" - no metrics will be exported from the cluster
+ // * `logging.googleapis.com/kubernetes` - The Cloud Logging
+ // service with a Kubernetes-native resource model
+ // * `logging.googleapis.com` - The legacy Cloud Logging service (no longer
+ // available as of GKE 1.15).
+ // * `none` - no logs will be exported from the cluster.
+ //
+ // If left as an empty string,`logging.googleapis.com/kubernetes` will be
+ // used for GKE 1.14+ or `logging.googleapis.com` for earlier versions.
string logging_service = 4 [(google.api.field_behavior) = REQUIRED];
// The name (project, location, cluster) of the cluster to set logging.
@@ -1622,9 +1894,9 @@ message SetMonitoringServiceRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster to upgrade.
@@ -1634,10 +1906,14 @@ message SetMonitoringServiceRequest {
// Required. The monitoring service the cluster should use to write metrics.
// Currently available options:
//
- // * "monitoring.googleapis.com/kubernetes" - the Google Cloud Monitoring
- // service with Kubernetes-native resource model
- // * "monitoring.googleapis.com" - the Google Cloud Monitoring service
- // * "none" - no metrics will be exported from the cluster
+ // * "monitoring.googleapis.com/kubernetes" - The Cloud Monitoring
+ // service with a Kubernetes-native resource model
+ // * `monitoring.googleapis.com` - The legacy Cloud Monitoring service (no
+ // longer available as of GKE 1.15).
+ // * `none` - No metrics will be exported from the cluster.
+ //
+ // If left as an empty string,`monitoring.googleapis.com/kubernetes` will be
+ // used for GKE 1.14+ or `monitoring.googleapis.com` for earlier versions.
string monitoring_service = 4 [(google.api.field_behavior) = REQUIRED];
// The name (project, location, cluster) of the cluster to set monitoring.
@@ -1653,9 +1929,9 @@ message SetAddonsConfigRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster to upgrade.
@@ -1679,9 +1955,9 @@ message SetLocationsRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster to upgrade.
@@ -1689,10 +1965,10 @@ message SetLocationsRequest {
string cluster_id = 3 [deprecated = true];
// Required. The desired list of Google Compute Engine
- // [zones](https://cloud.google.com/compute/docs/zones#available) in which the cluster's nodes
- // should be located. Changing the locations a cluster is in will result
- // in nodes being either created or removed from the cluster, depending on
- // whether locations are being added or removed.
+ // [zones](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster's nodes should be located. Changing the locations a cluster is in
+ // will result in nodes being either created or removed from the cluster,
+ // depending on whether locations are being added or removed.
//
// This list must always include the cluster's primary zone.
repeated string locations = 4 [(google.api.field_behavior) = REQUIRED];
@@ -1710,9 +1986,9 @@ message UpdateMasterRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster to upgrade.
@@ -1762,9 +2038,9 @@ message SetMasterAuthRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster to upgrade.
@@ -1790,9 +2066,9 @@ message DeleteClusterRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster to delete.
@@ -1812,9 +2088,9 @@ message ListClustersRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides, or "-" for all zones.
- // This field has been deprecated and replaced by the parent field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides, or "-" for all zones. This field has been deprecated and
+ // replaced by the parent field.
string zone = 2 [deprecated = true];
// The parent (project and location) where the clusters will be listed.
@@ -1842,9 +2118,9 @@ message GetOperationRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The server-assigned `name` of the operation.
@@ -1864,8 +2140,9 @@ message ListOperationsRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for, or `-` for
- // all zones. This field has been deprecated and replaced by the parent field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) to return
+ // operations for, or `-` for all zones. This field has been deprecated and
+ // replaced by the parent field.
string zone = 2 [deprecated = true];
// The parent (project and location) where the operations will be listed.
@@ -1882,8 +2159,9 @@ message CancelOperationRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the operation resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // operation resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The server-assigned `name` of the operation.
@@ -1913,8 +2191,9 @@ message GetServerConfigRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) to return operations for.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) to return
+ // operations for. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// The name (project and location) of the server config to get,
@@ -1924,10 +2203,22 @@ message GetServerConfigRequest {
// Kubernetes Engine service configuration.
message ServerConfig {
+ // ReleaseChannelConfig exposes configuration for a release channel.
+ message ReleaseChannelConfig {
+ // The release channel this configuration applies to.
+ ReleaseChannel.Channel channel = 1;
+
+ // The default version for newly created clusters on the channel.
+ string default_version = 2;
+
+ // List of valid versions for the channel.
+ repeated string valid_versions = 4;
+ }
+
// Version of Kubernetes the service deploys by default.
string default_cluster_version = 1;
- // List of valid node upgrade target versions.
+ // List of valid node upgrade target versions, in descending order.
repeated string valid_node_versions = 3;
// Default image type.
@@ -1936,8 +2227,11 @@ message ServerConfig {
// List of valid image types.
repeated string valid_image_types = 5;
- // List of valid master versions.
+ // List of valid master versions, in descending order.
repeated string valid_master_versions = 6;
+
+ // List of release channel configurations.
+ repeated ReleaseChannelConfig channels = 9;
}
// CreateNodePoolRequest creates a node pool for a cluster.
@@ -1948,9 +2242,9 @@ message CreateNodePoolRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the parent field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the parent
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster.
@@ -1974,9 +2268,9 @@ message DeleteNodePoolRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster.
@@ -2001,9 +2295,9 @@ message ListNodePoolsRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the parent field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the parent
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster.
@@ -2023,9 +2317,9 @@ message GetNodePoolRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster.
@@ -2049,6 +2343,40 @@ message GetNodePoolRequest {
// during pod scheduling. They may also be resized up or down, to accommodate
// the workload.
message NodePool {
+ // These upgrade settings control the level of parallelism and the level of
+ // disruption caused by an upgrade.
+ //
+ // maxUnavailable controls the number of nodes that can be simultaneously
+ // unavailable.
+ //
+ // maxSurge controls the number of additional nodes that can be added to the
+ // node pool temporarily for the time of the upgrade to increase the number of
+ // available nodes.
+ //
+ // (maxUnavailable + maxSurge) determines the level of parallelism (how many
+ // nodes are being upgraded at the same time).
+ //
+ // Note: upgrades inevitably introduce some disruption since workloads need to
+ // be moved from old nodes to new, upgraded ones. Even if maxUnavailable=0,
+ // this holds true. (Disruption stays within the limits of
+ // PodDisruptionBudget, if it is configured.)
+ //
+ // Consider a hypothetical node pool with 5 nodes having maxSurge=2,
+ // maxUnavailable=1. This means the upgrade process upgrades 3 nodes
+ // simultaneously. It creates 2 additional (upgraded) nodes, then it brings
+ // down 3 old (not yet upgraded) nodes at the same time. This ensures that
+ // there are always at least 4 nodes available.
+ message UpgradeSettings {
+ // The maximum number of nodes that can be created beyond the current size
+ // of the node pool during the upgrade process.
+ int32 max_surge = 1;
+
+ // The maximum number of nodes that can be simultaneously unavailable during
+ // the upgrade process. A node is considered available if its status is
+ // Ready.
+ int32 max_unavailable = 2;
+ }
+
// The current status of the node pool instance.
enum Status {
// Not set.
@@ -2092,6 +2420,18 @@ message NodePool {
// firewall and routes quota.
int32 initial_node_count = 3;
+ // The list of Google Compute Engine
+ // [zones](https://cloud.google.com/compute/docs/zones#available) in which the
+ // NodePool's nodes should be located.
+ //
+ // If this value is unspecified during node pool creation, the
+ // [Cluster.Locations](https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters#Cluster.FIELDS.locations)
+ // value will be used, instead.
+ //
+ // Warning: changing node pool locations will result in nodes being added
+ // and/or removed.
+ repeated string locations = 13;
+
// [Output only] Server-defined URL for the resource.
string self_link = 100;
@@ -2106,9 +2446,10 @@ message NodePool {
// [Output only] The status of the nodes in this pool instance.
Status status = 103;
- // [Output only] Additional information about the current status of this
+ // [Output only] Deprecated. Use conditions instead.
+ // Additional information about the current status of this
// node pool instance, if available.
- string status_message = 104;
+ string status_message = 104 [deprecated = true];
// Autoscaler configuration for this NodePool. Autoscaler is enabled
// only if a valid configuration is present.
@@ -2126,6 +2467,9 @@ message NodePool {
// [Output only] The pod CIDR block size per node in this node pool.
int32 pod_ipv4_cidr_size = 7;
+
+ // Upgrade settings control disruption and speed of the upgrade.
+ UpgradeSettings upgrade_settings = 107;
}
// NodeManagement defines the set of node management services turned on for the
@@ -2167,7 +2511,7 @@ message MaintenancePolicy {
// A hash identifying the version of this policy, so that updates to fields of
// the policy won't accidentally undo intermediate changes (and so that users
// of the API unaware of some fields won't accidentally remove other fields).
- // Make a get()
request to the cluster to get the current
+ // Make a `get()` request to the cluster to get the current
// resource version and include it with requests to set the policy.
string resource_version = 3;
}
@@ -2209,25 +2553,30 @@ message RecurringTimeWindow {
// end time.
//
// For example, to have something repeat every weekday, you'd use:
- // FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR
+ // `FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR`
+ //
// To repeat some window daily (equivalent to the DailyMaintenanceWindow):
- // FREQ=DAILY
+ // `FREQ=DAILY`
+ //
// For the first weekend of every month:
- // FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU
+ // `FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU`
+ //
// This specifies how frequently the window starts. Eg, if you wanted to have
// a 9-5 UTC-4 window every weekday, you'd use something like:
- //
- // start time = 2019-01-01T09:00:00-0400
- // end time = 2019-01-01T17:00:00-0400
- // recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR
- //
+ // ```
+ // start time = 2019-01-01T09:00:00-0400
+ // end time = 2019-01-01T17:00:00-0400
+ // recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR
+ // ```
+ //
// Windows can span multiple days. Eg, to make the window encompass every
// weekend from midnight Saturday till the last minute of Sunday UTC:
- //
- // start time = 2019-01-05T00:00:00Z
- // end time = 2019-01-07T23:59:00Z
- // recurrence = FREQ=WEEKLY;BYDAY=SA
- //
+ // ```
+ // start time = 2019-01-05T00:00:00Z
+ // end time = 2019-01-07T23:59:00Z
+ // recurrence = FREQ=WEEKLY;BYDAY=SA
+ // ```
+ //
// Note the start and end time's specific dates are largely arbitrary except
// to specify duration of the window and when it first starts.
// The FREQ values of HOURLY, MINUTELY, and SECONDLY are not supported.
@@ -2257,9 +2606,9 @@ message SetNodePoolManagementRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster to update.
@@ -2288,9 +2637,9 @@ message SetNodePoolSizeRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster to update.
@@ -2320,9 +2669,9 @@ message RollbackNodePoolUpgradeRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster to rollback.
@@ -2361,21 +2710,60 @@ message ClusterAutoscaling {
// created by NAP.
AutoprovisioningNodePoolDefaults autoprovisioning_node_pool_defaults = 4;
- // The list of Google Compute Engine [zones](https://cloud.google.com/compute/docs/zones#available)
- // in which the NodePool's nodes can be created by NAP.
+ // The list of Google Compute Engine
+ // [zones](https://cloud.google.com/compute/docs/zones#available) in which the
+ // NodePool's nodes can be created by NAP.
repeated string autoprovisioning_locations = 5;
}
// AutoprovisioningNodePoolDefaults contains defaults for a node pool created
// by NAP.
message AutoprovisioningNodePoolDefaults {
- // Scopes that are used by NAP when creating node pools. If oauth_scopes are
- // specified, service_account should be empty.
+ // Scopes that are used by NAP when creating node pools.
repeated string oauth_scopes = 1;
- // The Google Cloud Platform Service Account to be used by the node VMs. If
- // service_account is specified, scopes should be empty.
+ // The Google Cloud Platform Service Account to be used by the node VMs.
string service_account = 2;
+
+ // Specifies the upgrade settings for NAP created node pools
+ NodePool.UpgradeSettings upgrade_settings = 3;
+
+ // Specifies the node management options for NAP created node-pools.
+ NodeManagement management = 4;
+
+ // Minimum CPU platform to be used for NAP created node pools.
+ // The instance may be scheduled on the specified or newer CPU platform.
+ // Applicable values are the friendly names of CPU platforms, such as
+ // minCpuPlatform: Intel Haswell or
+ // minCpuPlatform: Intel Sandy Bridge. For more
+ // information, read [how to specify min CPU
+ // platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
+ // To unset the min cpu platform field pass "automatic"
+ // as field value.
+ string min_cpu_platform = 5;
+
+ // Size of the disk attached to each node, specified in GB.
+ // The smallest allowed disk size is 10GB.
+ //
+ // If unspecified, the default disk size is 100GB.
+ int32 disk_size_gb = 6;
+
+ // Type of the disk attached to each node (e.g. 'pd-standard', 'pd-ssd' or
+ // 'pd-balanced')
+ //
+ // If unspecified, the default disk type is 'pd-standard'
+ string disk_type = 7;
+
+ // Shielded Instance options.
+ ShieldedInstanceConfig shielded_instance_config = 8;
+
+ // The Customer Managed Encryption Key used to encrypt the boot disk attached
+ // to each node in the node pool. This should be of the form
+ // projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME].
+ // For more information about protecting resources with Cloud KMS Keys please
+ // see:
+ // https://cloud.google.com/compute/docs/disks/customer-managed-encryption
+ string boot_disk_kms_key = 9;
}
// Contains information about amount of some resource in the cluster.
@@ -2419,9 +2807,9 @@ message SetLabelsRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster.
@@ -2435,7 +2823,7 @@ message SetLabelsRequest {
// used to detect conflicts. The fingerprint is initially generated by
// Kubernetes Engine and changes after every request to modify or update
// labels. You must always provide an up-to-date fingerprint hash when
- // updating or changing labels. Make a get()
request to the
+ // updating or changing labels. Make a `get()` request to the
// resource to get the latest fingerprint.
string label_fingerprint = 5 [(google.api.field_behavior) = REQUIRED];
@@ -2453,9 +2841,9 @@ message SetLegacyAbacRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster to update.
@@ -2479,9 +2867,9 @@ message StartIPRotationRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster.
@@ -2504,9 +2892,9 @@ message CompleteIPRotationRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster.
@@ -2528,6 +2916,31 @@ message AcceleratorConfig {
string accelerator_type = 2;
}
+// WorkloadMetadataConfig defines the metadata configuration to expose to
+// workloads on the node pool.
+message WorkloadMetadataConfig {
+ // Mode is the configuration for how to expose metadata to workloads running
+ // on the node.
+ enum Mode {
+ // Not set.
+ MODE_UNSPECIFIED = 0;
+
+ // Expose all Compute Engine metadata to pods.
+ GCE_METADATA = 1;
+
+ // Run the GKE Metadata Server on this node. The GKE Metadata Server exposes
+ // a metadata API to workloads that is compatible with the V1 Compute
+ // Metadata APIs exposed by the Compute Engine and App Engine Metadata
+ // Servers. This feature can only be enabled if Workload Identity is enabled
+ // at the cluster level.
+ GKE_METADATA = 2;
+ }
+
+ // Mode is the configuration for how to expose metadata to workloads running
+ // on the node pool.
+ Mode mode = 2;
+}
+
// SetNetworkPolicyRequest enables/disables network policy for a cluster.
message SetNetworkPolicyRequest {
// Deprecated. The Google Developers Console [project ID or project
@@ -2536,9 +2949,9 @@ message SetNetworkPolicyRequest {
string project_id = 1 [deprecated = true];
// Deprecated. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
- // This field has been deprecated and replaced by the name field.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides. This field has been deprecated and replaced by the name
+ // field.
string zone = 2 [deprecated = true];
// Deprecated. The name of the cluster.
@@ -2560,8 +2973,8 @@ message SetMaintenancePolicyRequest {
string project_id = 1 [(google.api.field_behavior) = REQUIRED];
// Required. The name of the Google Compute Engine
- // [zone](https://cloud.google.com/compute/docs/zones#available) in which the cluster
- // resides.
+ // [zone](https://cloud.google.com/compute/docs/zones#available) in which the
+ // cluster resides.
string zone = 2 [(google.api.field_behavior) = REQUIRED];
// Required. The name of the cluster to update.
@@ -2585,7 +2998,8 @@ message StatusCondition {
// UNKNOWN indicates a generic condition.
UNKNOWN = 0;
- // GCE_STOCKOUT indicates a Google Compute Engine stockout.
+ // GCE_STOCKOUT indicates that Google Compute Engine resources are
+ // temporarily unavailable.
GCE_STOCKOUT = 1;
// GKE_SERVICE_ACCOUNT_DELETED indicates that the user deleted their robot
@@ -2614,19 +3028,140 @@ message StatusCondition {
// NetworkConfig reports the relative names of network & subnetwork.
message NetworkConfig {
// Output only. The relative name of the Google Compute Engine
- // [network][google.container.v1.NetworkConfig.network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) to which
- // the cluster is connected.
- // Example: projects/my-project/global/networks/my-network
+ // [network][google.container.v1.NetworkConfig.network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks)
+ // to which the cluster is connected. Example:
+ // projects/my-project/global/networks/my-network
string network = 1;
// Output only. The relative name of the Google Compute Engine
- // [subnetwork](https://cloud.google.com/compute/docs/vpc) to which the cluster is connected.
- // Example: projects/my-project/regions/us-central1/subnetworks/my-subnet
+ // [subnetwork](https://cloud.google.com/compute/docs/vpc) to which the
+ // cluster is connected. Example:
+ // projects/my-project/regions/us-central1/subnetworks/my-subnet
string subnetwork = 2;
// Whether Intra-node visibility is enabled for this cluster.
// This makes same node pod to pod traffic visible for VPC network.
bool enable_intra_node_visibility = 5;
+
+ // Whether the cluster disables default in-node sNAT rules. In-node sNAT rules
+ // will be disabled when default_snat_status is disabled. When disabled is set
+ // to false, default IP masquerade rules will be applied to the nodes to
+ // prevent sNAT on cluster internal traffic.
+ DefaultSnatStatus default_snat_status = 7;
+}
+
+// GetOpenIDConfigRequest gets the OIDC discovery document for the
+// cluster. See the OpenID Connect Discovery 1.0 specification for details.
+message GetOpenIDConfigRequest {
+ // The cluster (project, location, cluster id) to get the discovery document
+ // for. Specified in the format `projects/*/locations/*/clusters/*`.
+ string parent = 1;
+}
+
+// GetOpenIDConfigResponse is an OIDC discovery document for the cluster.
+// See the OpenID Connect Discovery 1.0 specification for details.
+message GetOpenIDConfigResponse {
+ // OIDC Issuer.
+ string issuer = 1;
+
+ // JSON Web Key uri.
+ string jwks_uri = 2;
+
+ // Supported response types.
+ repeated string response_types_supported = 3;
+
+ // Supported subject types.
+ repeated string subject_types_supported = 4;
+
+ // supported ID Token signing Algorithms.
+ repeated string id_token_signing_alg_values_supported = 5;
+
+ // Supported claims.
+ repeated string claims_supported = 6;
+
+ // Supported grant types.
+ repeated string grant_types = 7;
+}
+
+// GetJSONWebKeysRequest gets the public component of the keys used by the
+// cluster to sign token requests. This will be the jwks_uri for the discover
+// document returned by getOpenIDConfig. See the OpenID Connect
+// Discovery 1.0 specification for details.
+message GetJSONWebKeysRequest {
+ // The cluster (project, location, cluster id) to get keys for. Specified in
+ // the format `projects/*/locations/*/clusters/*`.
+ string parent = 1;
+}
+
+// Jwk is a JSON Web Key as specified in RFC 7517
+message Jwk {
+ // Key Type.
+ string kty = 1;
+
+ // Algorithm.
+ string alg = 2;
+
+ // Permitted uses for the public keys.
+ string use = 3;
+
+ // Key ID.
+ string kid = 4;
+
+ // Used for RSA keys.
+ string n = 5;
+
+ // Used for RSA keys.
+ string e = 6;
+
+ // Used for ECDSA keys.
+ string x = 7;
+
+ // Used for ECDSA keys.
+ string y = 8;
+
+ // Used for ECDSA keys.
+ string crv = 9;
+}
+
+// GetJSONWebKeysResponse is a valid JSON Web Key Set as specififed in rfc 7517
+message GetJSONWebKeysResponse {
+ // The public component of the keys used by the cluster to sign token
+ // requests.
+ repeated Jwk keys = 1;
+}
+
+// ReleaseChannel indicates which release channel a cluster is
+// subscribed to. Release channels are arranged in order of risk.
+//
+// When a cluster is subscribed to a release channel, Google maintains
+// both the master version and the node version. Node auto-upgrade
+// defaults to true and cannot be disabled.
+message ReleaseChannel {
+ // Possible values for 'channel'.
+ enum Channel {
+ // No channel specified.
+ UNSPECIFIED = 0;
+
+ // RAPID channel is offered on an early access basis for customers who want
+ // to test new releases.
+ //
+ // WARNING: Versions available in the RAPID Channel may be subject to
+ // unresolved issues with no known workaround and are not subject to any
+ // SLAs.
+ RAPID = 1;
+
+ // Clusters subscribed to REGULAR receive versions that are considered GA
+ // quality. REGULAR is intended for production users who want to take
+ // advantage of new features.
+ REGULAR = 2;
+
+ // Clusters subscribed to STABLE receive versions that are known to be
+ // stable and reliable in production.
+ STABLE = 3;
+ }
+
+ // channel specifies which release channel the cluster is subscribed to.
+ Channel channel = 1;
}
// IntraNodeVisibilityConfig contains the desired config of the intra-node
@@ -2642,6 +3177,13 @@ message MaxPodsConstraint {
int64 max_pods_per_node = 1;
}
+// Configuration for the use of Kubernetes Service Accounts in GCP IAM
+// policies.
+message WorkloadIdentityConfig {
+ // The workload pool to attach all Kubernetes service accounts to.
+ string workload_pool = 2;
+}
+
// Configuration of etcd encryption.
message DatabaseEncryption {
// State of etcd encryption.
@@ -2653,7 +3195,7 @@ message DatabaseEncryption {
ENCRYPTED = 1;
// Secrets in etcd are stored in plain text (at etcd level) - this is
- // unrelated to GCE level full disk encryption.
+ // unrelated to Compute Engine level full disk encryption.
DECRYPTED = 2;
}
@@ -2795,3 +3337,16 @@ message VerticalPodAutoscaling {
// Enables vertical pod autoscaling.
bool enabled = 1;
}
+
+// DefaultSnatStatus contains the desired state of whether default sNAT should
+// be disabled on the cluster.
+message DefaultSnatStatus {
+ // Disables cluster default sNAT rules.
+ bool disabled = 1;
+}
+
+// Configuration of Shielded Nodes feature.
+message ShieldedNodes {
+ // Whether Shielded Nodes features are enabled on all nodes in this cluster.
+ bool enabled = 1;
+}
diff --git a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/async_client.py b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/async_client.py
index cfe4b9166c0e..5825f4d995f2 100644
--- a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/async_client.py
+++ b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/async_client.py
@@ -44,9 +44,47 @@ class ClusterManagerAsyncClient:
DEFAULT_ENDPOINT = ClusterManagerClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = ClusterManagerClient.DEFAULT_MTLS_ENDPOINT
+ common_billing_account_path = staticmethod(
+ ClusterManagerClient.common_billing_account_path
+ )
+ parse_common_billing_account_path = staticmethod(
+ ClusterManagerClient.parse_common_billing_account_path
+ )
+
+ common_folder_path = staticmethod(ClusterManagerClient.common_folder_path)
+ parse_common_folder_path = staticmethod(
+ ClusterManagerClient.parse_common_folder_path
+ )
+
+ common_organization_path = staticmethod(
+ ClusterManagerClient.common_organization_path
+ )
+ parse_common_organization_path = staticmethod(
+ ClusterManagerClient.parse_common_organization_path
+ )
+
+ common_project_path = staticmethod(ClusterManagerClient.common_project_path)
+ parse_common_project_path = staticmethod(
+ ClusterManagerClient.parse_common_project_path
+ )
+
+ common_location_path = staticmethod(ClusterManagerClient.common_location_path)
+ parse_common_location_path = staticmethod(
+ ClusterManagerClient.parse_common_location_path
+ )
+
from_service_account_file = ClusterManagerClient.from_service_account_file
from_service_account_json = from_service_account_file
+ @property
+ def transport(self) -> ClusterManagerTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ ClusterManagerTransport: The transport used by the client instance.
+ """
+ return self._client.transport
+
get_transport_class = functools.partial(
type(ClusterManagerClient).get_transport_class, type(ClusterManagerClient)
)
@@ -158,7 +196,8 @@ async def list_clusters(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, parent]):
+ has_flattened_params = any([project_id, zone, parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -268,7 +307,8 @@ async def get_cluster(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, name]):
+ has_flattened_params = any([project_id, zone, cluster_id, name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -365,7 +405,7 @@ async def create_cluster(
should not be set.
cluster (:class:`~.cluster_service.Cluster`):
Required. A `cluster
- resource `__
+ resource `__
This corresponds to the ``cluster`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
@@ -394,7 +434,8 @@ async def create_cluster(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster, parent]):
+ has_flattened_params = any([project_id, zone, cluster, parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -509,7 +550,8 @@ async def update_cluster(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, update, name]):
+ has_flattened_params = any([project_id, zone, cluster_id, update, name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -705,11 +747,19 @@ async def set_logging_service(
should not be set.
logging_service (:class:`str`):
Required. The logging service the cluster should use to
- write metrics. Currently available options:
-
- - "logging.googleapis.com" - the Google Cloud Logging
- service
- - "none" - no metrics will be exported from the cluster
+ write logs. Currently available options:
+
+ - ``logging.googleapis.com/kubernetes`` - The Cloud
+ Logging service with a Kubernetes-native resource
+ model
+ - ``logging.googleapis.com`` - The legacy Cloud Logging
+ service (no longer available as of GKE 1.15).
+ - ``none`` - no logs will be exported from the cluster.
+
+ If left as an empty
+ string,\ ``logging.googleapis.com/kubernetes`` will be
+ used for GKE 1.14+ or ``logging.googleapis.com`` for
+ earlier versions.
This corresponds to the ``logging_service`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
@@ -738,9 +788,10 @@ async def set_logging_service(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any(
+ has_flattened_params = any(
[project_id, zone, cluster_id, logging_service, name]
- ):
+ )
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -830,12 +881,19 @@ async def set_monitoring_service(
Required. The monitoring service the cluster should use
to write metrics. Currently available options:
- - "monitoring.googleapis.com/kubernetes" - the Google
- Cloud Monitoring service with Kubernetes-native
- resource model
- - "monitoring.googleapis.com" - the Google Cloud
- Monitoring service
- - "none" - no metrics will be exported from the cluster
+ - "monitoring.googleapis.com/kubernetes" - The Cloud
+ Monitoring service with a Kubernetes-native resource
+ model
+ - ``monitoring.googleapis.com`` - The legacy Cloud
+ Monitoring service (no longer available as of GKE
+ 1.15).
+ - ``none`` - No metrics will be exported from the
+ cluster.
+
+ If left as an empty
+ string,\ ``monitoring.googleapis.com/kubernetes`` will
+ be used for GKE 1.14+ or ``monitoring.googleapis.com``
+ for earlier versions.
This corresponds to the ``monitoring_service`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
@@ -864,9 +922,10 @@ async def set_monitoring_service(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any(
+ has_flattened_params = any(
[project_id, zone, cluster_id, monitoring_service, name]
- ):
+ )
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -984,9 +1043,8 @@ async def set_addons_config(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any(
- [project_id, zone, cluster_id, addons_config, name]
- ):
+ has_flattened_params = any([project_id, zone, cluster_id, addons_config, name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1041,7 +1099,9 @@ async def set_locations(
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
- r"""Sets the locations for a specific cluster.
+ r"""Sets the locations for a specific cluster. Deprecated. Use
+ `projects.locations.clusters.update `__
+ instead.
Args:
request (:class:`~.cluster_service.SetLocationsRequest`):
@@ -1110,7 +1170,8 @@ async def set_locations(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, locations, name]):
+ has_flattened_params = any([project_id, zone, cluster_id, locations, name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1127,11 +1188,12 @@ async def set_locations(
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
- if locations is not None:
- request.locations = locations
if name is not None:
request.name = name
+ if locations:
+ request.locations.extend(locations)
+
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
@@ -1239,9 +1301,8 @@ async def update_master(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any(
- [project_id, zone, cluster_id, master_version, name]
- ):
+ has_flattened_params = any([project_id, zone, cluster_id, master_version, name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1416,7 +1477,8 @@ async def delete_cluster(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, name]):
+ has_flattened_params = any([project_id, zone, cluster_id, name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1515,7 +1577,8 @@ async def list_operations(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone]):
+ has_flattened_params = any([project_id, zone])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1566,6 +1629,7 @@ async def get_operation(
project_id: str = None,
zone: str = None,
operation_id: str = None,
+ name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
@@ -1600,6 +1664,13 @@ async def get_operation(
This corresponds to the ``operation_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
+ name (:class:`str`):
+ The name (project, location, operation id) of the
+ operation to get. Specified in the format
+ ``projects/*/locations/*/operations/*``.
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
@@ -1618,7 +1689,8 @@ async def get_operation(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, operation_id]):
+ has_flattened_params = any([project_id, zone, operation_id, name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1635,6 +1707,8 @@ async def get_operation(
request.zone = zone
if operation_id is not None:
request.operation_id = operation_id
+ if name is not None:
+ request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
@@ -1723,7 +1797,8 @@ async def cancel_operation(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, operation_id, name]):
+ has_flattened_params = any([project_id, zone, operation_id, name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1819,7 +1894,8 @@ async def get_server_config(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, name]):
+ has_flattened_params = any([project_id, zone, name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1865,6 +1941,64 @@ async def get_server_config(
# Done; return the response.
return response
+ async def get_json_web_keys(
+ self,
+ request: cluster_service.GetJSONWebKeysRequest = None,
+ *,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> cluster_service.GetJSONWebKeysResponse:
+ r"""Gets the public component of the cluster signing keys
+ in JSON Web Key format.
+ This API is not yet intended for general use, and is not
+ available for all clusters.
+
+ Args:
+ request (:class:`~.cluster_service.GetJSONWebKeysRequest`):
+ The request object. GetJSONWebKeysRequest gets the
+ public component of the keys used by the cluster to sign
+ token requests. This will be the jwks_uri for the
+ discover document returned by getOpenIDConfig. See the
+ OpenID Connect Discovery 1.0 specification for details.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ ~.cluster_service.GetJSONWebKeysResponse:
+ GetJSONWebKeysResponse is a valid
+ JSON Web Key Set as specififed in rfc
+ 7517
+
+ """
+ # Create or coerce a protobuf request object.
+
+ request = cluster_service.GetJSONWebKeysRequest(request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.get_json_web_keys,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
async def list_node_pools(
self,
request: cluster_service.ListNodePoolsRequest = None,
@@ -1930,7 +2064,8 @@ async def list_node_pools(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, parent]):
+ has_flattened_params = any([project_id, zone, cluster_id, parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2059,9 +2194,8 @@ async def get_node_pool(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any(
- [project_id, zone, cluster_id, node_pool_id, name]
- ):
+ has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2184,9 +2318,8 @@ async def create_node_pool(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any(
- [project_id, zone, cluster_id, node_pool, parent]
- ):
+ has_flattened_params = any([project_id, zone, cluster_id, node_pool, parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2304,9 +2437,8 @@ async def delete_node_pool(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any(
- [project_id, zone, cluster_id, node_pool_id, name]
- ):
+ has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2438,9 +2570,8 @@ async def rollback_node_pool_upgrade(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any(
- [project_id, zone, cluster_id, node_pool_id, name]
- ):
+ has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2666,7 +2797,8 @@ async def set_legacy_abac(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, enabled, name]):
+ has_flattened_params = any([project_id, zone, cluster_id, enabled, name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2776,7 +2908,8 @@ async def start_ip_rotation(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, name]):
+ has_flattened_params = any([project_id, zone, cluster_id, name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2883,7 +3016,8 @@ async def complete_ip_rotation(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, name]):
+ has_flattened_params = any([project_id, zone, cluster_id, name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -3050,9 +3184,8 @@ async def set_network_policy(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any(
- [project_id, zone, cluster_id, network_policy, name]
- ):
+ has_flattened_params = any([project_id, zone, cluster_id, network_policy, name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -3165,9 +3298,10 @@ async def set_maintenance_policy(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any(
+ has_flattened_params = any(
[project_id, zone, cluster_id, maintenance_policy, name]
- ):
+ )
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
diff --git a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/client.py b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/client.py
index 3a0b89135010..ab4b7c165001 100644
--- a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/client.py
+++ b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/client.py
@@ -19,10 +19,10 @@
from distutils import util
import os
import re
-from typing import Callable, Dict, Sequence, Tuple, Type, Union
+from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
-import google.api_core.client_options as ClientOptions # type: ignore
+from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
@@ -130,12 +130,80 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
from_service_account_json = from_service_account_file
+ @property
+ def transport(self) -> ClusterManagerTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ ClusterManagerTransport: The transport used by the client instance.
+ """
+ return self._transport
+
+ @staticmethod
+ def common_billing_account_path(billing_account: str,) -> str:
+ """Return a fully-qualified billing_account string."""
+ return "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+
+ @staticmethod
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
+ """Parse a billing_account path into its component segments."""
+ m = re.match(r"^billingAccounts/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_folder_path(folder: str,) -> str:
+ """Return a fully-qualified folder string."""
+ return "folders/{folder}".format(folder=folder,)
+
+ @staticmethod
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
+ """Parse a folder path into its component segments."""
+ m = re.match(r"^folders/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_organization_path(organization: str,) -> str:
+ """Return a fully-qualified organization string."""
+ return "organizations/{organization}".format(organization=organization,)
+
+ @staticmethod
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
+ """Parse a organization path into its component segments."""
+ m = re.match(r"^organizations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_project_path(project: str,) -> str:
+ """Return a fully-qualified project string."""
+ return "projects/{project}".format(project=project,)
+
+ @staticmethod
+ def parse_common_project_path(path: str) -> Dict[str, str]:
+ """Parse a project path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_location_path(project: str, location: str,) -> str:
+ """Return a fully-qualified location string."""
+ return "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+
+ @staticmethod
+ def parse_common_location_path(path: str) -> Dict[str, str]:
+ """Parse a location path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
def __init__(
self,
*,
- credentials: credentials.Credentials = None,
- transport: Union[str, ClusterManagerTransport] = None,
- client_options: ClientOptions = None,
+ credentials: Optional[credentials.Credentials] = None,
+ transport: Union[str, ClusterManagerTransport, None] = None,
+ client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the cluster manager client.
@@ -149,8 +217,8 @@ def __init__(
transport (Union[str, ~.ClusterManagerTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
- client_options (ClientOptions): Custom options for the client. It
- won't take effect if a ``transport`` instance is provided.
+ client_options (client_options_lib.ClientOptions): Custom options for the
+ client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
@@ -165,10 +233,10 @@ def __init__(
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
- client_info (google.api_core.gapic_v1.client_info.ClientInfo):
- The client info used to send a user-agent string along with
- API requests. If ``None``, then default info will be used.
- Generally, you only need to set this if you're developing
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
your own client library.
Raises:
@@ -176,9 +244,9 @@ def __init__(
creation failed for any reason.
"""
if isinstance(client_options, dict):
- client_options = ClientOptions.from_dict(client_options)
+ client_options = client_options_lib.from_dict(client_options)
if client_options is None:
- client_options = ClientOptions.ClientOptions()
+ client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
@@ -501,7 +569,7 @@ def create_cluster(
should not be set.
cluster (:class:`~.cluster_service.Cluster`):
Required. A `cluster
- resource `__
+ resource `__
This corresponds to the ``cluster`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
@@ -849,11 +917,19 @@ def set_logging_service(
should not be set.
logging_service (:class:`str`):
Required. The logging service the cluster should use to
- write metrics. Currently available options:
-
- - "logging.googleapis.com" - the Google Cloud Logging
- service
- - "none" - no metrics will be exported from the cluster
+ write logs. Currently available options:
+
+ - ``logging.googleapis.com/kubernetes`` - The Cloud
+ Logging service with a Kubernetes-native resource
+ model
+ - ``logging.googleapis.com`` - The legacy Cloud Logging
+ service (no longer available as of GKE 1.15).
+ - ``none`` - no logs will be exported from the cluster.
+
+ If left as an empty
+ string,\ ``logging.googleapis.com/kubernetes`` will be
+ used for GKE 1.14+ or ``logging.googleapis.com`` for
+ earlier versions.
This corresponds to the ``logging_service`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
@@ -976,12 +1052,19 @@ def set_monitoring_service(
Required. The monitoring service the cluster should use
to write metrics. Currently available options:
- - "monitoring.googleapis.com/kubernetes" - the Google
- Cloud Monitoring service with Kubernetes-native
- resource model
- - "monitoring.googleapis.com" - the Google Cloud
- Monitoring service
- - "none" - no metrics will be exported from the cluster
+ - "monitoring.googleapis.com/kubernetes" - The Cloud
+ Monitoring service with a Kubernetes-native resource
+ model
+ - ``monitoring.googleapis.com`` - The legacy Cloud
+ Monitoring service (no longer available as of GKE
+ 1.15).
+ - ``none`` - No metrics will be exported from the
+ cluster.
+
+ If left as an empty
+ string,\ ``monitoring.googleapis.com/kubernetes`` will
+ be used for GKE 1.14+ or ``monitoring.googleapis.com``
+ for earlier versions.
This corresponds to the ``monitoring_service`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
@@ -1189,7 +1272,9 @@ def set_locations(
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> cluster_service.Operation:
- r"""Sets the locations for a specific cluster.
+ r"""Sets the locations for a specific cluster. Deprecated. Use
+ `projects.locations.clusters.update `__
+ instead.
Args:
request (:class:`~.cluster_service.SetLocationsRequest`):
@@ -1281,11 +1366,12 @@ def set_locations(
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
- if locations is not None:
- request.locations = locations
if name is not None:
request.name = name
+ if locations:
+ request.locations.extend(locations)
+
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_locations]
@@ -1705,6 +1791,7 @@ def get_operation(
project_id: str = None,
zone: str = None,
operation_id: str = None,
+ name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
@@ -1739,6 +1826,13 @@ def get_operation(
This corresponds to the ``operation_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
+ name (:class:`str`):
+ The name (project, location, operation id) of the
+ operation to get. Specified in the format
+ ``projects/*/locations/*/operations/*``.
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
@@ -1757,7 +1851,7 @@ def get_operation(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- has_flattened_params = any([project_id, zone, operation_id])
+ has_flattened_params = any([project_id, zone, operation_id, name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
@@ -1780,6 +1874,8 @@ def get_operation(
request.zone = zone
if operation_id is not None:
request.operation_id = operation_id
+ if name is not None:
+ request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
@@ -1994,6 +2090,65 @@ def get_server_config(
# Done; return the response.
return response
+ def get_json_web_keys(
+ self,
+ request: cluster_service.GetJSONWebKeysRequest = None,
+ *,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> cluster_service.GetJSONWebKeysResponse:
+ r"""Gets the public component of the cluster signing keys
+ in JSON Web Key format.
+ This API is not yet intended for general use, and is not
+ available for all clusters.
+
+ Args:
+ request (:class:`~.cluster_service.GetJSONWebKeysRequest`):
+ The request object. GetJSONWebKeysRequest gets the
+ public component of the keys used by the cluster to sign
+ token requests. This will be the jwks_uri for the
+ discover document returned by getOpenIDConfig. See the
+ OpenID Connect Discovery 1.0 specification for details.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ ~.cluster_service.GetJSONWebKeysResponse:
+ GetJSONWebKeysResponse is a valid
+ JSON Web Key Set as specififed in rfc
+ 7517
+
+ """
+ # Create or coerce a protobuf request object.
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a cluster_service.GetJSONWebKeysRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, cluster_service.GetJSONWebKeysRequest):
+ request = cluster_service.GetJSONWebKeysRequest(request)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_json_web_keys]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
def list_node_pools(
self,
request: cluster_service.ListNodePoolsRequest = None,
diff --git a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/base.py b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/base.py
index 73b54881904b..9f79bd3f1e3d 100644
--- a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/base.py
+++ b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/base.py
@@ -220,6 +220,9 @@ def _prep_wrapped_messages(self, client_info):
default_timeout=20.0,
client_info=client_info,
),
+ self.get_json_web_keys: gapic_v1.method.wrap_method(
+ self.get_json_web_keys, default_timeout=None, client_info=client_info,
+ ),
self.list_node_pools: gapic_v1.method.wrap_method(
self.list_node_pools,
default_retry=retries.Retry(
@@ -491,6 +494,18 @@ def get_server_config(
]:
raise NotImplementedError()
+ @property
+ def get_json_web_keys(
+ self,
+ ) -> typing.Callable[
+ [cluster_service.GetJSONWebKeysRequest],
+ typing.Union[
+ cluster_service.GetJSONWebKeysResponse,
+ typing.Awaitable[cluster_service.GetJSONWebKeysResponse],
+ ],
+ ]:
+ raise NotImplementedError()
+
@property
def list_node_pools(
self,
diff --git a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/grpc.py b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/grpc.py
index c738e64a8dfe..984a9db36e3b 100644
--- a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/grpc.py
+++ b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/grpc.py
@@ -90,10 +90,10 @@ def __init__(
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
- client_info (google.api_core.gapic_v1.client_info.ClientInfo):
- The client info used to send a user-agent string along with
- API requests. If ``None``, then default info will be used.
- Generally, you only need to set this if you're developing
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
your own client library.
Raises:
@@ -102,6 +102,8 @@ def __init__(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
+ self._ssl_channel_credentials = ssl_channel_credentials
+
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
@@ -109,6 +111,7 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
+ self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
@@ -145,6 +148,7 @@ def __init__(
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
+ self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
@@ -222,12 +226,8 @@ def create_channel(
@property
def grpc_channel(self) -> grpc.Channel:
- """Create the channel designed to connect to this service.
-
- This property caches on the instance; repeated calls return
- the same channel.
+ """Return the channel designed to connect to this service.
"""
- # Return the channel from cache.
return self._grpc_channel
@property
@@ -493,7 +493,9 @@ def set_locations(
) -> Callable[[cluster_service.SetLocationsRequest], cluster_service.Operation]:
r"""Return a callable for the set locations method over gRPC.
- Sets the locations for a specific cluster.
+ Sets the locations for a specific cluster. Deprecated. Use
+ `projects.locations.clusters.update `__
+ instead.
Returns:
Callable[[~.SetLocationsRequest],
@@ -713,6 +715,37 @@ def get_server_config(
)
return self._stubs["get_server_config"]
+ @property
+ def get_json_web_keys(
+ self,
+ ) -> Callable[
+ [cluster_service.GetJSONWebKeysRequest], cluster_service.GetJSONWebKeysResponse
+ ]:
+ r"""Return a callable for the get json web keys method over gRPC.
+
+ Gets the public component of the cluster signing keys
+ in JSON Web Key format.
+ This API is not yet intended for general use, and is not
+ available for all clusters.
+
+ Returns:
+ Callable[[~.GetJSONWebKeysRequest],
+ ~.GetJSONWebKeysResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_json_web_keys" not in self._stubs:
+ self._stubs["get_json_web_keys"] = self.grpc_channel.unary_unary(
+ "/google.container.v1.ClusterManager/GetJSONWebKeys",
+ request_serializer=cluster_service.GetJSONWebKeysRequest.serialize,
+ response_deserializer=cluster_service.GetJSONWebKeysResponse.deserialize,
+ )
+ return self._stubs["get_json_web_keys"]
+
@property
def list_node_pools(
self,
diff --git a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/grpc_asyncio.py b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/grpc_asyncio.py
index e53f51f9834e..6a66a1ba3a66 100644
--- a/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/grpc_asyncio.py
+++ b/packages/google-cloud-container/google/cloud/container_v1/services/cluster_manager/transports/grpc_asyncio.py
@@ -147,6 +147,8 @@ def __init__(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
+ self._ssl_channel_credentials = ssl_channel_credentials
+
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
@@ -154,6 +156,7 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
+ self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
@@ -190,6 +193,7 @@ def __init__(
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
+ self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
@@ -508,7 +512,9 @@ def set_locations(
]:
r"""Return a callable for the set locations method over gRPC.
- Sets the locations for a specific cluster.
+ Sets the locations for a specific cluster. Deprecated. Use
+ `projects.locations.clusters.update `__
+ instead.
Returns:
Callable[[~.SetLocationsRequest],
@@ -738,6 +744,38 @@ def get_server_config(
)
return self._stubs["get_server_config"]
+ @property
+ def get_json_web_keys(
+ self,
+ ) -> Callable[
+ [cluster_service.GetJSONWebKeysRequest],
+ Awaitable[cluster_service.GetJSONWebKeysResponse],
+ ]:
+ r"""Return a callable for the get json web keys method over gRPC.
+
+ Gets the public component of the cluster signing keys
+ in JSON Web Key format.
+ This API is not yet intended for general use, and is not
+ available for all clusters.
+
+ Returns:
+ Callable[[~.GetJSONWebKeysRequest],
+ Awaitable[~.GetJSONWebKeysResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_json_web_keys" not in self._stubs:
+ self._stubs["get_json_web_keys"] = self.grpc_channel.unary_unary(
+ "/google.container.v1.ClusterManager/GetJSONWebKeys",
+ request_serializer=cluster_service.GetJSONWebKeysRequest.serialize,
+ response_deserializer=cluster_service.GetJSONWebKeysResponse.deserialize,
+ )
+ return self._stubs["get_json_web_keys"]
+
@property
def list_node_pools(
self,
diff --git a/packages/google-cloud-container/google/cloud/container_v1/types/__init__.py b/packages/google-cloud-container/google/cloud/container_v1/types/__init__.py
index 1ab798870449..d35b91681f22 100644
--- a/packages/google-cloud-container/google/cloud/container_v1/types/__init__.py
+++ b/packages/google-cloud-container/google/cloud/container_v1/types/__init__.py
@@ -18,6 +18,8 @@
from .cluster_service import (
NodeConfig,
ShieldedInstanceConfig,
+ SandboxConfig,
+ ReservationAffinity,
NodeTaint,
MasterAuth,
ClientCertificateConfig,
@@ -26,9 +28,12 @@
HorizontalPodAutoscaling,
KubernetesDashboard,
NetworkPolicyConfig,
+ DnsCacheConfig,
+ PrivateClusterMasterGlobalAccessConfig,
PrivateClusterConfig,
AuthenticatorGroupsConfig,
CloudRunConfig,
+ ConfigConnectorConfig,
MasterAuthorizedNetworksConfig,
LegacyAbac,
NetworkPolicy,
@@ -37,6 +42,7 @@
Cluster,
ClusterUpdate,
Operation,
+ OperationProgress,
CreateClusterRequest,
GetClusterRequest,
UpdateClusterRequest,
@@ -82,12 +88,20 @@
StartIPRotationRequest,
CompleteIPRotationRequest,
AcceleratorConfig,
+ WorkloadMetadataConfig,
SetNetworkPolicyRequest,
SetMaintenancePolicyRequest,
StatusCondition,
NetworkConfig,
+ GetOpenIDConfigRequest,
+ GetOpenIDConfigResponse,
+ GetJSONWebKeysRequest,
+ Jwk,
+ GetJSONWebKeysResponse,
+ ReleaseChannel,
IntraNodeVisibilityConfig,
MaxPodsConstraint,
+ WorkloadIdentityConfig,
DatabaseEncryption,
ListUsableSubnetworksRequest,
ListUsableSubnetworksResponse,
@@ -95,12 +109,16 @@
UsableSubnetwork,
ResourceUsageExportConfig,
VerticalPodAutoscaling,
+ DefaultSnatStatus,
+ ShieldedNodes,
)
__all__ = (
"NodeConfig",
"ShieldedInstanceConfig",
+ "SandboxConfig",
+ "ReservationAffinity",
"NodeTaint",
"MasterAuth",
"ClientCertificateConfig",
@@ -109,9 +127,12 @@
"HorizontalPodAutoscaling",
"KubernetesDashboard",
"NetworkPolicyConfig",
+ "DnsCacheConfig",
+ "PrivateClusterMasterGlobalAccessConfig",
"PrivateClusterConfig",
"AuthenticatorGroupsConfig",
"CloudRunConfig",
+ "ConfigConnectorConfig",
"MasterAuthorizedNetworksConfig",
"LegacyAbac",
"NetworkPolicy",
@@ -120,6 +141,7 @@
"Cluster",
"ClusterUpdate",
"Operation",
+ "OperationProgress",
"CreateClusterRequest",
"GetClusterRequest",
"UpdateClusterRequest",
@@ -165,12 +187,20 @@
"StartIPRotationRequest",
"CompleteIPRotationRequest",
"AcceleratorConfig",
+ "WorkloadMetadataConfig",
"SetNetworkPolicyRequest",
"SetMaintenancePolicyRequest",
"StatusCondition",
"NetworkConfig",
+ "GetOpenIDConfigRequest",
+ "GetOpenIDConfigResponse",
+ "GetJSONWebKeysRequest",
+ "Jwk",
+ "GetJSONWebKeysResponse",
+ "ReleaseChannel",
"IntraNodeVisibilityConfig",
"MaxPodsConstraint",
+ "WorkloadIdentityConfig",
"DatabaseEncryption",
"ListUsableSubnetworksRequest",
"ListUsableSubnetworksResponse",
@@ -178,4 +208,6 @@
"UsableSubnetwork",
"ResourceUsageExportConfig",
"VerticalPodAutoscaling",
+ "DefaultSnatStatus",
+ "ShieldedNodes",
)
diff --git a/packages/google-cloud-container/google/cloud/container_v1/types/cluster_service.py b/packages/google-cloud-container/google/cloud/container_v1/types/cluster_service.py
index 010546851071..0706e5018c11 100644
--- a/packages/google-cloud-container/google/cloud/container_v1/types/cluster_service.py
+++ b/packages/google-cloud-container/google/cloud/container_v1/types/cluster_service.py
@@ -26,6 +26,8 @@
manifest={
"NodeConfig",
"ShieldedInstanceConfig",
+ "SandboxConfig",
+ "ReservationAffinity",
"NodeTaint",
"MasterAuth",
"ClientCertificateConfig",
@@ -34,9 +36,12 @@
"HorizontalPodAutoscaling",
"KubernetesDashboard",
"NetworkPolicyConfig",
+ "DnsCacheConfig",
+ "PrivateClusterMasterGlobalAccessConfig",
"PrivateClusterConfig",
"AuthenticatorGroupsConfig",
"CloudRunConfig",
+ "ConfigConnectorConfig",
"MasterAuthorizedNetworksConfig",
"LegacyAbac",
"NetworkPolicy",
@@ -45,6 +50,7 @@
"Cluster",
"ClusterUpdate",
"Operation",
+ "OperationProgress",
"CreateClusterRequest",
"GetClusterRequest",
"UpdateClusterRequest",
@@ -90,12 +96,20 @@
"StartIPRotationRequest",
"CompleteIPRotationRequest",
"AcceleratorConfig",
+ "WorkloadMetadataConfig",
"SetNetworkPolicyRequest",
"SetMaintenancePolicyRequest",
"StatusCondition",
"NetworkConfig",
+ "GetOpenIDConfigRequest",
+ "GetOpenIDConfigResponse",
+ "GetJSONWebKeysRequest",
+ "Jwk",
+ "GetJSONWebKeysResponse",
+ "ReleaseChannel",
"IntraNodeVisibilityConfig",
"MaxPodsConstraint",
+ "WorkloadIdentityConfig",
"DatabaseEncryption",
"ListUsableSubnetworksRequest",
"ListUsableSubnetworksResponse",
@@ -103,6 +117,8 @@
"UsableSubnetwork",
"ResourceUsageExportConfig",
"VerticalPodAutoscaling",
+ "DefaultSnatStatus",
+ "ShieldedNodes",
},
)
@@ -114,10 +130,8 @@ class NodeConfig(proto.Message):
machine_type (str):
The name of a Google Compute Engine `machine
type `__
- (e.g. ``n1-standard-1``).
- If unspecified, the default machine type is
- ``n1-standard-1``.
+ If unspecified, the default machine type is ``e2-medium``.
disk_size_gb (int):
Size of the disk attached to each node,
specified in GB. The smallest allowed disk size
@@ -142,25 +156,43 @@ class NodeConfig(proto.Message):
scopes will be added.
service_account (str):
The Google Cloud Platform Service Account to
- be used by the node VMs. If no Service Account
- is specified, the "default" service account is
- used.
+ be used by the node VMs. Specify the email
+ address of the Service Account; otherwise, if no
+ Service Account is specified, the "default"
+ service account is used.
metadata (Sequence[~.cluster_service.NodeConfig.MetadataEntry]):
The metadata key/value pairs assigned to instances in the
cluster.
- Keys must conform to the regexp [a-zA-Z0-9-_]+ and be less
- than 128 bytes in length. These are reflected as part of a
- URL in the metadata server. Additionally, to avoid
+ Keys must conform to the regexp ``[a-zA-Z0-9-_]+`` and be
+ less than 128 bytes in length. These are reflected as part
+ of a URL in the metadata server. Additionally, to avoid
ambiguity, keys must not conflict with any other metadata
keys for the project or be one of the reserved keys:
- "cluster-location" "cluster-name" "cluster-uid"
- "configure-sh" "containerd-configure-sh" "enable-os-login"
- "gci-update-strategy" "gci-ensure-gke-docker"
- "instance-template" "kube-env" "startup-script" "user-data"
- "disable-address-manager" "windows-startup-script-ps1"
- "common-psm1" "k8s-node-setup-psm1" "install-ssh-psm1"
- "user-profile-psm1" "serial-port-logging-enable"
+
+ - "cluster-location"
+ - "cluster-name"
+ - "cluster-uid"
+ - "configure-sh"
+ - "containerd-configure-sh"
+ - "enable-os-login"
+ - "gci-ensure-gke-docker"
+ - "gci-metrics-enabled"
+ - "gci-update-strategy"
+ - "instance-template"
+ - "kube-env"
+ - "startup-script"
+ - "user-data"
+ - "disable-address-manager"
+ - "windows-startup-script-ps1"
+ - "common-psm1"
+ - "k8s-node-setup-psm1"
+ - "install-ssh-psm1"
+ - "user-profile-psm1"
+
+ The following keys are reserved for Windows nodes:
+
+ - "serial-port-logging-enable"
Values are free-form strings, and only have meaning as
interpreted by the image running in the instance. The only
@@ -214,17 +246,21 @@ class NodeConfig(proto.Message):
more information about support for GPUs.
disk_type (str):
Type of the disk attached to each node (e.g.
- 'pd-standard' or 'pd-ssd')
+ 'pd-standard', 'pd-ssd' or 'pd-balanced')
+
If unspecified, the default disk type is 'pd-
standard'
min_cpu_platform (str):
Minimum CPU platform to be used by this instance. The
instance may be scheduled on the specified or newer CPU
platform. Applicable values are the friendly names of CPU
- platforms, such as minCpuPlatform: "Intel Haswell" or
- minCpuPlatform: "Intel Sandy Bridge". For more information,
- read `how to specify min CPU
+ platforms, such as ``minCpuPlatform: "Intel Haswell"`` or
+ ``minCpuPlatform: "Intel Sandy Bridge"``. For more
+ information, read `how to specify min CPU
platform `__
+ workload_metadata_config (~.cluster_service.WorkloadMetadataConfig):
+ The workload metadata configuration for this
+ node.
taints (Sequence[~.cluster_service.NodeTaint]):
List of kubernetes taints to be applied to
each node.
@@ -232,8 +268,28 @@ class NodeConfig(proto.Message):
valid values, see:
https://kubernetes.io/docs/concepts/configuration/taint-
and-toleration/
+ sandbox_config (~.cluster_service.SandboxConfig):
+ Sandbox configuration for this node.
+ node_group (str):
+ Setting this field will assign instances of this pool to run
+ on the specified node group. This is useful for running
+ workloads on `sole tenant
+ nodes `__.
+ reservation_affinity (~.cluster_service.ReservationAffinity):
+ The optional reservation affinity. Setting this field will
+ apply the specified `Zonal Compute
+ Reservation `__
+ to this node pool.
shielded_instance_config (~.cluster_service.ShieldedInstanceConfig):
Shielded Instance options.
+ boot_disk_kms_key (str):
+ The Customer Managed Encryption Key used to encrypt the boot
+ disk attached to each node in the node pool. This should be
+ of the form
+ projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME].
+ For more information about protecting resources with Cloud
+ KMS Keys please see:
+ https://cloud.google.com/compute/docs/disks/customer-managed-encryption
"""
machine_type = proto.Field(proto.STRING, number=1)
@@ -264,12 +320,26 @@ class NodeConfig(proto.Message):
min_cpu_platform = proto.Field(proto.STRING, number=13)
+ workload_metadata_config = proto.Field(
+ proto.MESSAGE, number=14, message="WorkloadMetadataConfig",
+ )
+
taints = proto.RepeatedField(proto.MESSAGE, number=15, message="NodeTaint",)
+ sandbox_config = proto.Field(proto.MESSAGE, number=17, message="SandboxConfig",)
+
+ node_group = proto.Field(proto.STRING, number=18)
+
+ reservation_affinity = proto.Field(
+ proto.MESSAGE, number=19, message="ReservationAffinity",
+ )
+
shielded_instance_config = proto.Field(
proto.MESSAGE, number=20, message="ShieldedInstanceConfig",
)
+ boot_disk_kms_key = proto.Field(proto.STRING, number=23)
+
class ShieldedInstanceConfig(proto.Message):
r"""A set of Shielded Instance options.
@@ -299,13 +369,66 @@ class ShieldedInstanceConfig(proto.Message):
enable_integrity_monitoring = proto.Field(proto.BOOL, number=2)
+class SandboxConfig(proto.Message):
+ r"""SandboxConfig contains configurations of the sandbox to use
+ for the node.
+
+ Attributes:
+ type_ (~.cluster_service.SandboxConfig.Type):
+ Type of the sandbox to use for the node.
+ """
+
+ class Type(proto.Enum):
+ r"""Possible types of sandboxes."""
+ UNSPECIFIED = 0
+ GVISOR = 1
+
+ type_ = proto.Field(proto.ENUM, number=2, enum=Type,)
+
+
+class ReservationAffinity(proto.Message):
+ r"""`ReservationAffinity `__
+ is the configuration of desired reservation which instances could
+ take capacity from.
+
+ Attributes:
+ consume_reservation_type (~.cluster_service.ReservationAffinity.Type):
+ Corresponds to the type of reservation
+ consumption.
+ key (str):
+ Corresponds to the label key of a reservation resource. To
+ target a SPECIFIC_RESERVATION by name, specify
+ "googleapis.com/reservation-name" as the key and specify the
+ name of your reservation as its value.
+ values (Sequence[str]):
+ Corresponds to the label value(s) of
+ reservation resource(s).
+ """
+
+ class Type(proto.Enum):
+ r"""Indicates whether to consume capacity from a reservation or
+ not.
+ """
+ UNSPECIFIED = 0
+ NO_RESERVATION = 1
+ ANY_RESERVATION = 2
+ SPECIFIC_RESERVATION = 3
+
+ consume_reservation_type = proto.Field(proto.ENUM, number=1, enum=Type,)
+
+ key = proto.Field(proto.STRING, number=2)
+
+ values = proto.RepeatedField(proto.STRING, number=3)
+
+
class NodeTaint(proto.Message):
- r"""Kubernetes taint is comprised of three fields: key, value,
- and effect. Effect can only be one of three types: NoSchedule,
+ r"""Kubernetes taint is comprised of three fields: key, value, and
+ effect. Effect can only be one of three types: NoSchedule,
PreferNoSchedule or NoExecute.
- For more information, including usage and the valid values, see:
- https://kubernetes.io/docs/concepts/configuration/taint-and-
- toleration/
+
+ See
+ `here `__
+ for more information, including usage and the valid values.
Attributes:
key (str):
@@ -342,6 +465,12 @@ class MasterAuth(proto.Message):
clusters v1.6.0 and later, basic authentication
can be disabled by leaving username unspecified
(or setting it to the empty string).
+ Warning: basic authentication is deprecated, and
+ will be removed in GKE control plane versions
+ 1.19 and newer. For a list of recommended
+ authentication methods, see:
+ https://cloud.google.com/kubernetes-
+ engine/docs/how-to/api-server-authentication
password (str):
The password to use for HTTP basic
authentication to the master endpoint. Because
@@ -349,6 +478,13 @@ class MasterAuth(proto.Message):
should create a strong password. If a password
is provided for cluster creation, username must
be non-empty.
+
+ Warning: basic authentication is deprecated, and
+ will be removed in GKE control plane versions
+ 1.19 and newer. For a list of recommended
+ authentication methods, see:
+ https://cloud.google.com/kubernetes-
+ engine/docs/how-to/api-server-authentication
client_certificate_config (~.cluster_service.ClientCertificateConfig):
Configuration for client certificate
authentication on the cluster. For clusters
@@ -425,6 +561,13 @@ class AddonsConfig(proto.Message):
Configuration for the Cloud Run addon, which
allows the user to use a managed Knative
service.
+ dns_cache_config (~.cluster_service.DnsCacheConfig):
+ Configuration for NodeLocalDNS, a dns cache
+ running on cluster nodes
+ config_connector_config (~.cluster_service.ConfigConnectorConfig):
+ Configuration for the ConfigConnector add-on,
+ a Kubernetes extension to manage hosted GCP
+ services through the Kubernetes API
"""
http_load_balancing = proto.Field(
@@ -445,6 +588,12 @@ class AddonsConfig(proto.Message):
cloud_run_config = proto.Field(proto.MESSAGE, number=7, message="CloudRunConfig",)
+ dns_cache_config = proto.Field(proto.MESSAGE, number=8, message="DnsCacheConfig",)
+
+ config_connector_config = proto.Field(
+ proto.MESSAGE, number=10, message="ConfigConnectorConfig",
+ )
+
class HttpLoadBalancing(proto.Message):
r"""Configuration options for the HTTP (L7) load balancing
@@ -472,9 +621,8 @@ class HorizontalPodAutoscaling(proto.Message):
disabled (bool):
Whether the Horizontal Pod Autoscaling
feature is enabled in the cluster. When enabled,
- it ensures that a Heapster pod is running in the
- cluster, which is also used by the Cloud
- Monitoring service.
+ it ensures that metrics are collected into
+ Stackdriver Monitoring.
"""
disabled = proto.Field(proto.BOOL, number=1)
@@ -506,6 +654,30 @@ class NetworkPolicyConfig(proto.Message):
disabled = proto.Field(proto.BOOL, number=1)
+class DnsCacheConfig(proto.Message):
+ r"""Configuration for NodeLocal DNSCache
+
+ Attributes:
+ enabled (bool):
+ Whether NodeLocal DNSCache is enabled for
+ this cluster.
+ """
+
+ enabled = proto.Field(proto.BOOL, number=1)
+
+
+class PrivateClusterMasterGlobalAccessConfig(proto.Message):
+ r"""Configuration for controlling master global access settings.
+
+ Attributes:
+ enabled (bool):
+ Whenever master is accessible globally or
+ not.
+ """
+
+ enabled = proto.Field(proto.BOOL, number=1)
+
+
class PrivateClusterConfig(proto.Message):
r"""Configuration options for private clusters.
@@ -531,6 +703,11 @@ class PrivateClusterConfig(proto.Message):
public_endpoint (str):
Output only. The external IP address of this
cluster's master endpoint.
+ peering_name (str):
+ Output only. The peering name in the customer
+ VPC used by this cluster.
+ master_global_access_config (~.cluster_service.PrivateClusterMasterGlobalAccessConfig):
+ Controls master global access settings.
"""
enable_private_nodes = proto.Field(proto.BOOL, number=1)
@@ -543,6 +720,12 @@ class PrivateClusterConfig(proto.Message):
public_endpoint = proto.Field(proto.STRING, number=5)
+ peering_name = proto.Field(proto.STRING, number=7)
+
+ master_global_access_config = proto.Field(
+ proto.MESSAGE, number=8, message="PrivateClusterMasterGlobalAccessConfig",
+ )
+
class AuthenticatorGroupsConfig(proto.Message):
r"""Configuration for returning group information from
@@ -570,10 +753,33 @@ class CloudRunConfig(proto.Message):
disabled (bool):
Whether Cloud Run addon is enabled for this
cluster.
+ load_balancer_type (~.cluster_service.CloudRunConfig.LoadBalancerType):
+ Which load balancer type is installed for
+ Cloud Run.
"""
+ class LoadBalancerType(proto.Enum):
+ r"""Load balancer type of ingress service of Cloud Run."""
+ LOAD_BALANCER_TYPE_UNSPECIFIED = 0
+ LOAD_BALANCER_TYPE_EXTERNAL = 1
+ LOAD_BALANCER_TYPE_INTERNAL = 2
+
disabled = proto.Field(proto.BOOL, number=1)
+ load_balancer_type = proto.Field(proto.ENUM, number=3, enum=LoadBalancerType,)
+
+
+class ConfigConnectorConfig(proto.Message):
+ r"""Configuration options for the Config Connector add-on.
+
+ Attributes:
+ enabled (bool):
+ Whether Cloud Connector is enabled for this
+ cluster.
+ """
+
+ enabled = proto.Field(proto.BOOL, number=1)
+
class MasterAuthorizedNetworksConfig(proto.Message):
r"""Configuration options for the master authorized networks
@@ -670,8 +876,11 @@ class IPAllocationPolicy(proto.Message):
Attributes:
use_ip_aliases (bool):
- Whether alias IPs will be used for pod IPs in
- the cluster.
+ Whether alias IPs will be used for pod IPs in the cluster.
+ This is used in conjunction with use_routes. It cannot be
+ true if use_routes is true. If both use_ip_aliases and
+ use_routes are false, then the server picks the default IP
+ allocation mode
create_subnetwork (bool):
Whether a new subnetwork will be created automatically for
the cluster.
@@ -773,6 +982,12 @@ class IPAllocationPolicy(proto.Message):
notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private
networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``,
``192.168.0.0/16``) to pick a specific range to use.
+ use_routes (bool):
+ Whether routes will be used for pod IPs in the cluster. This
+ is used in conjunction with use_ip_aliases. It cannot be
+ true if use_ip_aliases is true. If both use_ip_aliases and
+ use_routes are false, then the server picks the default IP
+ allocation mode
"""
use_ip_aliases = proto.Field(proto.BOOL, number=1)
@@ -799,6 +1014,8 @@ class IPAllocationPolicy(proto.Message):
tpu_ipv4_cidr_block = proto.Field(proto.STRING, number=13)
+ use_routes = proto.Field(proto.BOOL, number=15)
+
class Cluster(proto.Message):
r"""A Google Kubernetes Engine cluster.
@@ -850,22 +1067,31 @@ class Cluster(proto.Message):
The logging service the cluster should use to write logs.
Currently available options:
- - "logging.googleapis.com/kubernetes" - the Google Cloud
- Logging service with Kubernetes-native resource model
- - ``logging.googleapis.com`` - the Google Cloud Logging
- service.
+ - ``logging.googleapis.com/kubernetes`` - The Cloud Logging
+ service with a Kubernetes-native resource model
+ - ``logging.googleapis.com`` - The legacy Cloud Logging
+ service (no longer available as of GKE 1.15).
- ``none`` - no logs will be exported from the cluster.
- - if left as an empty string,\ ``logging.googleapis.com``
- will be used.
+
+ If left as an empty
+ string,\ ``logging.googleapis.com/kubernetes`` will be used
+ for GKE 1.14+ or ``logging.googleapis.com`` for earlier
+ versions.
monitoring_service (str):
The monitoring service the cluster should use to write
metrics. Currently available options:
- - ``monitoring.googleapis.com`` - the Google Cloud
- Monitoring service.
- - ``none`` - no metrics will be exported from the cluster.
- - if left as an empty string, ``monitoring.googleapis.com``
- will be used.
+ - "monitoring.googleapis.com/kubernetes" - The Cloud
+ Monitoring service with a Kubernetes-native resource
+ model
+ - ``monitoring.googleapis.com`` - The legacy Cloud
+ Monitoring service (no longer available as of GKE 1.15).
+ - ``none`` - No metrics will be exported from the cluster.
+
+ If left as an empty
+ string,\ ``monitoring.googleapis.com/kubernetes`` will be
+ used for GKE 1.14+ or ``monitoring.googleapis.com`` for
+ earlier versions.
network (str):
The name of the Google Compute Engine
`network `__
@@ -893,6 +1119,15 @@ class Cluster(proto.Message):
The list of Google Compute Engine
`zones `__
in which the cluster's nodes should be located.
+
+ This field provides a default value if
+ `NodePool.Locations `__
+ are not specified during node pool creation.
+
+ Warning: changing cluster locations will update the
+ `NodePool.Locations `__
+ of all node pools and will result in nodes being added
+ and/or removed.
enable_kubernetes_alpha (bool):
Kubernetes alpha features are enabled on this
cluster. This includes alpha API groups (e.g.
@@ -948,6 +1183,13 @@ class Cluster(proto.Message):
vertical_pod_autoscaling (~.cluster_service.VerticalPodAutoscaling):
Cluster-level Vertical Pod Autoscaling
configuration.
+ shielded_nodes (~.cluster_service.ShieldedNodes):
+ Shielded Nodes configuration.
+ release_channel (~.cluster_service.ReleaseChannel):
+ Release channel configuration.
+ workload_identity_config (~.cluster_service.WorkloadIdentityConfig):
+ Configuration for the use of Kubernetes
+ Service Accounts in GCP IAM policies.
self_link (str):
[Output only] Server-defined URL for the resource.
zone (str):
@@ -985,7 +1227,7 @@ class Cluster(proto.Message):
endpoint.
current_node_version (str):
[Output only] Deprecated, use
- `NodePools.version `__
+ `NodePools.version `__
instead. The current version of the node software
components. If they are currently at multiple versions
because they're in the process of being upgraded, this
@@ -997,8 +1239,9 @@ class Cluster(proto.Message):
status (~.cluster_service.Cluster.Status):
[Output only] The current status of this cluster.
status_message (str):
- [Output only] Additional information about the current
- status of this cluster, if available.
+ [Output only] Deprecated. Use conditions instead. Additional
+ information about the current status of this cluster, if
+ available.
node_ipv4_cidr_size (int):
[Output only] The size of the address space on each node for
hosting containers. This is provisioned from within the
@@ -1056,9 +1299,9 @@ class Status(proto.Enum):
initial_node_count = proto.Field(proto.INT32, number=3)
- node_config = proto.Field(proto.MESSAGE, number=4, message=NodeConfig,)
+ node_config = proto.Field(proto.MESSAGE, number=4, message="NodeConfig",)
- master_auth = proto.Field(proto.MESSAGE, number=5, message=MasterAuth,)
+ master_auth = proto.Field(proto.MESSAGE, number=5, message="MasterAuth",)
logging_service = proto.Field(proto.STRING, number=6)
@@ -1068,7 +1311,7 @@ class Status(proto.Enum):
cluster_ipv4_cidr = proto.Field(proto.STRING, number=9)
- addons_config = proto.Field(proto.MESSAGE, number=10, message=AddonsConfig,)
+ addons_config = proto.Field(proto.MESSAGE, number=10, message="AddonsConfig",)
subnetwork = proto.Field(proto.STRING, number=11)
@@ -1082,16 +1325,16 @@ class Status(proto.Enum):
label_fingerprint = proto.Field(proto.STRING, number=16)
- legacy_abac = proto.Field(proto.MESSAGE, number=18, message=LegacyAbac,)
+ legacy_abac = proto.Field(proto.MESSAGE, number=18, message="LegacyAbac",)
- network_policy = proto.Field(proto.MESSAGE, number=19, message=NetworkPolicy,)
+ network_policy = proto.Field(proto.MESSAGE, number=19, message="NetworkPolicy",)
ip_allocation_policy = proto.Field(
- proto.MESSAGE, number=20, message=IPAllocationPolicy,
+ proto.MESSAGE, number=20, message="IPAllocationPolicy",
)
master_authorized_networks_config = proto.Field(
- proto.MESSAGE, number=22, message=MasterAuthorizedNetworksConfig,
+ proto.MESSAGE, number=22, message="MasterAuthorizedNetworksConfig",
)
maintenance_policy = proto.Field(
@@ -1099,7 +1342,7 @@ class Status(proto.Enum):
)
binary_authorization = proto.Field(
- proto.MESSAGE, number=24, message=BinaryAuthorization,
+ proto.MESSAGE, number=24, message="BinaryAuthorization",
)
autoscaling = proto.Field(proto.MESSAGE, number=26, message="ClusterAutoscaling",)
@@ -1115,11 +1358,11 @@ class Status(proto.Enum):
)
authenticator_groups_config = proto.Field(
- proto.MESSAGE, number=34, message=AuthenticatorGroupsConfig,
+ proto.MESSAGE, number=34, message="AuthenticatorGroupsConfig",
)
private_cluster_config = proto.Field(
- proto.MESSAGE, number=37, message=PrivateClusterConfig,
+ proto.MESSAGE, number=37, message="PrivateClusterConfig",
)
database_encryption = proto.Field(
@@ -1130,6 +1373,14 @@ class Status(proto.Enum):
proto.MESSAGE, number=39, message="VerticalPodAutoscaling",
)
+ shielded_nodes = proto.Field(proto.MESSAGE, number=40, message="ShieldedNodes",)
+
+ release_channel = proto.Field(proto.MESSAGE, number=41, message="ReleaseChannel",)
+
+ workload_identity_config = proto.Field(
+ proto.MESSAGE, number=43, message="WorkloadIdentityConfig",
+ )
+
self_link = proto.Field(proto.STRING, number=100)
zone = proto.Field(proto.STRING, number=101)
@@ -1193,11 +1444,17 @@ class ClusterUpdate(proto.Message):
The monitoring service the cluster should use to write
metrics. Currently available options:
- - "monitoring.googleapis.com/kubernetes" - the Google Cloud
- Monitoring service with Kubernetes-native resource model
- - "monitoring.googleapis.com" - the Google Cloud Monitoring
- service
- - "none" - no metrics will be exported from the cluster
+ - "monitoring.googleapis.com/kubernetes" - The Cloud
+ Monitoring service with a Kubernetes-native resource
+ model
+ - ``monitoring.googleapis.com`` - The legacy Cloud
+ Monitoring service (no longer available as of GKE 1.15).
+ - ``none`` - No metrics will be exported from the cluster.
+
+ If left as an empty
+ string,\ ``monitoring.googleapis.com/kubernetes`` will be
+ used for GKE 1.14+ or ``monitoring.googleapis.com`` for
+ earlier versions.
desired_addons_config (~.cluster_service.AddonsConfig):
Configurations for the various addons
available to run in the cluster.
@@ -1211,6 +1468,10 @@ class ClusterUpdate(proto.Message):
"desired_node_pool" field as well.
desired_database_encryption (~.cluster_service.DatabaseEncryption):
Configuration of etcd encryption.
+ desired_workload_identity_config (~.cluster_service.WorkloadIdentityConfig):
+ Configuration for Workload Identity.
+ desired_shielded_nodes (~.cluster_service.ShieldedNodes):
+ Configuration for Shielded Nodes.
desired_node_pool_autoscaling (~.cluster_service.NodePoolAutoscaling):
Autoscaler configuration for the node pool specified in
desired_node_pool_id. If there is only one pool in the
@@ -1219,12 +1480,13 @@ class ClusterUpdate(proto.Message):
desired_locations (Sequence[str]):
The desired list of Google Compute Engine
`zones `__
- in which the cluster's nodes should be located. Changing the
- locations a cluster is in will result in nodes being either
- created or removed from the cluster, depending on whether
- locations are being added or removed.
+ in which the cluster's nodes should be located.
This list must always include the cluster's primary zone.
+
+ Warning: changing cluster locations will update the
+ locations of all node pools and will result in nodes being
+ added and/or removed.
desired_master_authorized_networks_config (~.cluster_service.MasterAuthorizedNetworksConfig):
The desired configuration options for master
authorized networks feature.
@@ -1237,19 +1499,31 @@ class ClusterUpdate(proto.Message):
The logging service the cluster should use to write logs.
Currently available options:
- - "logging.googleapis.com/kubernetes" - the Google Cloud
- Logging service with Kubernetes-native resource model
- - "logging.googleapis.com" - the Google Cloud Logging
- service
- - "none" - no logs will be exported from the cluster
+ - ``logging.googleapis.com/kubernetes`` - The Cloud Logging
+ service with a Kubernetes-native resource model
+ - ``logging.googleapis.com`` - The legacy Cloud Logging
+ service (no longer available as of GKE 1.15).
+ - ``none`` - no logs will be exported from the cluster.
+
+ If left as an empty
+ string,\ ``logging.googleapis.com/kubernetes`` will be used
+ for GKE 1.14+ or ``logging.googleapis.com`` for earlier
+ versions.
desired_resource_usage_export_config (~.cluster_service.ResourceUsageExportConfig):
The desired configuration for exporting
resource usage.
desired_vertical_pod_autoscaling (~.cluster_service.VerticalPodAutoscaling):
Cluster-level Vertical Pod Autoscaling
configuration.
+ desired_private_cluster_config (~.cluster_service.PrivateClusterConfig):
+ The desired private cluster configuration.
desired_intra_node_visibility_config (~.cluster_service.IntraNodeVisibilityConfig):
The desired config of Intra-node visibility.
+ desired_default_snat_status (~.cluster_service.DefaultSnatStatus):
+ The desired status of whether to disable
+ default sNAT for this cluster.
+ desired_release_channel (~.cluster_service.ReleaseChannel):
+ The desired release channel configuration.
desired_master_version (str):
The Kubernetes version to change the master
to.
@@ -1269,7 +1543,9 @@ class ClusterUpdate(proto.Message):
desired_monitoring_service = proto.Field(proto.STRING, number=5)
- desired_addons_config = proto.Field(proto.MESSAGE, number=6, message=AddonsConfig,)
+ desired_addons_config = proto.Field(
+ proto.MESSAGE, number=6, message="AddonsConfig",
+ )
desired_node_pool_id = proto.Field(proto.STRING, number=7)
@@ -1279,6 +1555,14 @@ class ClusterUpdate(proto.Message):
proto.MESSAGE, number=46, message="DatabaseEncryption",
)
+ desired_workload_identity_config = proto.Field(
+ proto.MESSAGE, number=47, message="WorkloadIdentityConfig",
+ )
+
+ desired_shielded_nodes = proto.Field(
+ proto.MESSAGE, number=48, message="ShieldedNodes",
+ )
+
desired_node_pool_autoscaling = proto.Field(
proto.MESSAGE, number=9, message="NodePoolAutoscaling",
)
@@ -1286,7 +1570,7 @@ class ClusterUpdate(proto.Message):
desired_locations = proto.RepeatedField(proto.STRING, number=10)
desired_master_authorized_networks_config = proto.Field(
- proto.MESSAGE, number=12, message=MasterAuthorizedNetworksConfig,
+ proto.MESSAGE, number=12, message="MasterAuthorizedNetworksConfig",
)
desired_cluster_autoscaling = proto.Field(
@@ -1294,7 +1578,7 @@ class ClusterUpdate(proto.Message):
)
desired_binary_authorization = proto.Field(
- proto.MESSAGE, number=16, message=BinaryAuthorization,
+ proto.MESSAGE, number=16, message="BinaryAuthorization",
)
desired_logging_service = proto.Field(proto.STRING, number=19)
@@ -1307,10 +1591,22 @@ class ClusterUpdate(proto.Message):
proto.MESSAGE, number=22, message="VerticalPodAutoscaling",
)
+ desired_private_cluster_config = proto.Field(
+ proto.MESSAGE, number=25, message="PrivateClusterConfig",
+ )
+
desired_intra_node_visibility_config = proto.Field(
proto.MESSAGE, number=26, message="IntraNodeVisibilityConfig",
)
+ desired_default_snat_status = proto.Field(
+ proto.MESSAGE, number=28, message="DefaultSnatStatus",
+ )
+
+ desired_release_channel = proto.Field(
+ proto.MESSAGE, number=31, message="ReleaseChannel",
+ )
+
desired_master_version = proto.Field(proto.STRING, number=100)
@@ -1334,8 +1630,8 @@ class Operation(proto.Message):
detail (str):
Detailed operation progress, if available.
status_message (str):
- If an error has occurred, a textual
- description of the error.
+ Output only. If an error has occurred, a
+ textual description of the error.
self_link (str):
Server-defined URL for the resource.
target_link (str):
@@ -1355,6 +1651,9 @@ class Operation(proto.Message):
[Output only] The time the operation completed, in
`RFC3339 `__ text
format.
+ progress (~.cluster_service.OperationProgress):
+ Output only. [Output only] Progress information for an
+ operation.
cluster_conditions (Sequence[~.cluster_service.StatusCondition]):
Which conditions caused the current cluster
state.
@@ -1413,6 +1712,8 @@ class Type(proto.Enum):
end_time = proto.Field(proto.STRING, number=11)
+ progress = proto.Field(proto.MESSAGE, number=12, message="OperationProgress",)
+
cluster_conditions = proto.RepeatedField(
proto.MESSAGE, number=13, message="StatusCondition",
)
@@ -1422,6 +1723,59 @@ class Type(proto.Enum):
)
+class OperationProgress(proto.Message):
+ r"""Information about operation (or operation stage) progress.
+
+ Attributes:
+ name (str):
+ A non-parameterized string describing an
+ operation stage. Unset for single-stage
+ operations.
+ status (~.cluster_service.Operation.Status):
+ Status of an operation stage.
+ Unset for single-stage operations.
+ metrics (Sequence[~.cluster_service.OperationProgress.Metric]):
+ Progress metric bundle, for example: metrics: [{name: "nodes
+ done", int_value: 15}, {name: "nodes total", int_value: 32}]
+ or metrics: [{name: "progress", double_value: 0.56}, {name:
+ "progress scale", double_value: 1.0}]
+ stages (Sequence[~.cluster_service.OperationProgress]):
+ Substages of an operation or a stage.
+ """
+
+ class Metric(proto.Message):
+ r"""Progress metric is (string, int|float|string) pair.
+
+ Attributes:
+ name (str):
+ Required. Metric name, e.g., "nodes total",
+ "percent done".
+ int_value (int):
+ For metrics with integer value.
+ double_value (float):
+ For metrics with floating point value.
+ string_value (str):
+ For metrics with custom values (ratios,
+ visual progress, etc.).
+ """
+
+ name = proto.Field(proto.STRING, number=1)
+
+ int_value = proto.Field(proto.INT64, number=2, oneof="value")
+
+ double_value = proto.Field(proto.DOUBLE, number=3, oneof="value")
+
+ string_value = proto.Field(proto.STRING, number=4, oneof="value")
+
+ name = proto.Field(proto.STRING, number=1)
+
+ status = proto.Field(proto.ENUM, number=2, enum="Operation.Status",)
+
+ metrics = proto.RepeatedField(proto.MESSAGE, number=3, message=Metric,)
+
+ stages = proto.RepeatedField(proto.MESSAGE, number=4, message="OperationProgress",)
+
+
class CreateClusterRequest(proto.Message):
r"""CreateClusterRequest creates a cluster.
@@ -1439,7 +1793,7 @@ class CreateClusterRequest(proto.Message):
and replaced by the parent field.
cluster (~.cluster_service.Cluster):
Required. A `cluster
- resource `__
+ resource `__
parent (str):
The parent (project and location) where the cluster will be
created. Specified in the format ``projects/*/locations/*``.
@@ -1449,7 +1803,7 @@ class CreateClusterRequest(proto.Message):
zone = proto.Field(proto.STRING, number=2)
- cluster = proto.Field(proto.MESSAGE, number=3, message=Cluster,)
+ cluster = proto.Field(proto.MESSAGE, number=3, message="Cluster",)
parent = proto.Field(proto.STRING, number=5)
@@ -1521,7 +1875,7 @@ class UpdateClusterRequest(proto.Message):
cluster_id = proto.Field(proto.STRING, number=3)
- update = proto.Field(proto.MESSAGE, number=4, message=ClusterUpdate,)
+ update = proto.Field(proto.MESSAGE, number=4, message="ClusterUpdate",)
name = proto.Field(proto.STRING, number=5)
@@ -1571,6 +1925,19 @@ class UpdateNodePoolRequest(proto.Message):
The name (project, location, cluster, node pool) of the node
pool to update. Specified in the format
``projects/*/locations/*/clusters/*/nodePools/*``.
+ locations (Sequence[str]):
+ The desired list of Google Compute Engine
+ `zones `__
+ in which the node pool's nodes should be located. Changing
+ the locations for a node pool will result in nodes being
+ either created or removed from the node pool, depending on
+ whether locations are being added or removed.
+ workload_metadata_config (~.cluster_service.WorkloadMetadataConfig):
+ The desired workload metadata config for the
+ node pool.
+ upgrade_settings (~.cluster_service.NodePool.UpgradeSettings):
+ Upgrade settings control disruption and speed
+ of the upgrade.
"""
project_id = proto.Field(proto.STRING, number=1)
@@ -1587,6 +1954,16 @@ class UpdateNodePoolRequest(proto.Message):
name = proto.Field(proto.STRING, number=8)
+ locations = proto.RepeatedField(proto.STRING, number=13)
+
+ workload_metadata_config = proto.Field(
+ proto.MESSAGE, number=14, message="WorkloadMetadataConfig",
+ )
+
+ upgrade_settings = proto.Field(
+ proto.MESSAGE, number=15, message="NodePool.UpgradeSettings",
+ )
+
class SetNodePoolAutoscalingRequest(proto.Message):
r"""SetNodePoolAutoscalingRequest sets the autoscaler settings of
@@ -1656,11 +2033,18 @@ class SetLoggingServiceRequest(proto.Message):
replaced by the name field.
logging_service (str):
Required. The logging service the cluster should use to
- write metrics. Currently available options:
+ write logs. Currently available options:
- - "logging.googleapis.com" - the Google Cloud Logging
- service
- - "none" - no metrics will be exported from the cluster
+ - ``logging.googleapis.com/kubernetes`` - The Cloud Logging
+ service with a Kubernetes-native resource model
+ - ``logging.googleapis.com`` - The legacy Cloud Logging
+ service (no longer available as of GKE 1.15).
+ - ``none`` - no logs will be exported from the cluster.
+
+ If left as an empty
+ string,\ ``logging.googleapis.com/kubernetes`` will be used
+ for GKE 1.14+ or ``logging.googleapis.com`` for earlier
+ versions.
name (str):
The name (project, location, cluster) of the cluster to set
logging. Specified in the format
@@ -1702,11 +2086,17 @@ class SetMonitoringServiceRequest(proto.Message):
Required. The monitoring service the cluster should use to
write metrics. Currently available options:
- - "monitoring.googleapis.com/kubernetes" - the Google Cloud
- Monitoring service with Kubernetes-native resource model
- - "monitoring.googleapis.com" - the Google Cloud Monitoring
- service
- - "none" - no metrics will be exported from the cluster
+ - "monitoring.googleapis.com/kubernetes" - The Cloud
+ Monitoring service with a Kubernetes-native resource
+ model
+ - ``monitoring.googleapis.com`` - The legacy Cloud
+ Monitoring service (no longer available as of GKE 1.15).
+ - ``none`` - No metrics will be exported from the cluster.
+
+ If left as an empty
+ string,\ ``monitoring.googleapis.com/kubernetes`` will be
+ used for GKE 1.14+ or ``monitoring.googleapis.com`` for
+ earlier versions.
name (str):
The name (project, location, cluster) of the cluster to set
monitoring. Specified in the format
@@ -1759,7 +2149,7 @@ class SetAddonsConfigRequest(proto.Message):
cluster_id = proto.Field(proto.STRING, number=3)
- addons_config = proto.Field(proto.MESSAGE, number=4, message=AddonsConfig,)
+ addons_config = proto.Field(proto.MESSAGE, number=4, message="AddonsConfig",)
name = proto.Field(proto.STRING, number=6)
@@ -1903,7 +2293,7 @@ class Action(proto.Enum):
action = proto.Field(proto.ENUM, number=4, enum=Action,)
- update = proto.Field(proto.MESSAGE, number=5, message=MasterAuth,)
+ update = proto.Field(proto.MESSAGE, number=5, message="MasterAuth",)
name = proto.Field(proto.STRING, number=7)
@@ -1982,7 +2372,7 @@ class ListClustersResponse(proto.Message):
clusters returned may be missing those zones.
"""
- clusters = proto.RepeatedField(proto.MESSAGE, number=1, message=Cluster,)
+ clusters = proto.RepeatedField(proto.MESSAGE, number=1, message="Cluster",)
missing_zones = proto.RepeatedField(proto.STRING, number=2)
@@ -2098,7 +2488,7 @@ class ListOperationsResponse(proto.Message):
operations from those zones.
"""
- operations = proto.RepeatedField(proto.MESSAGE, number=1, message=Operation,)
+ operations = proto.RepeatedField(proto.MESSAGE, number=1, message="Operation",)
missing_zones = proto.RepeatedField(proto.STRING, number=2)
@@ -2138,15 +2528,40 @@ class ServerConfig(proto.Message):
Version of Kubernetes the service deploys by
default.
valid_node_versions (Sequence[str]):
- List of valid node upgrade target versions.
+ List of valid node upgrade target versions,
+ in descending order.
default_image_type (str):
Default image type.
valid_image_types (Sequence[str]):
List of valid image types.
valid_master_versions (Sequence[str]):
- List of valid master versions.
+ List of valid master versions, in descending
+ order.
+ channels (Sequence[~.cluster_service.ServerConfig.ReleaseChannelConfig]):
+ List of release channel configurations.
"""
+ class ReleaseChannelConfig(proto.Message):
+ r"""ReleaseChannelConfig exposes configuration for a release
+ channel.
+
+ Attributes:
+ channel (~.cluster_service.ReleaseChannel.Channel):
+ The release channel this configuration
+ applies to.
+ default_version (str):
+ The default version for newly created
+ clusters on the channel.
+ valid_versions (Sequence[str]):
+ List of valid versions for the channel.
+ """
+
+ channel = proto.Field(proto.ENUM, number=1, enum="ReleaseChannel.Channel",)
+
+ default_version = proto.Field(proto.STRING, number=2)
+
+ valid_versions = proto.RepeatedField(proto.STRING, number=4)
+
default_cluster_version = proto.Field(proto.STRING, number=1)
valid_node_versions = proto.RepeatedField(proto.STRING, number=3)
@@ -2157,6 +2572,10 @@ class ServerConfig(proto.Message):
valid_master_versions = proto.RepeatedField(proto.STRING, number=6)
+ channels = proto.RepeatedField(
+ proto.MESSAGE, number=9, message=ReleaseChannelConfig,
+ )
+
class CreateNodePoolRequest(proto.Message):
r"""CreateNodePoolRequest creates a node pool for a cluster.
@@ -2330,6 +2749,17 @@ class NodePool(proto.Message):
quota `__ is
sufficient for this number of instances. You must also have
available firewall and routes quota.
+ locations (Sequence[str]):
+ The list of Google Compute Engine
+ `zones `__
+ in which the NodePool's nodes should be located.
+
+ If this value is unspecified during node pool creation, the
+ `Cluster.Locations `__
+ value will be used, instead.
+
+ Warning: changing node pool locations will result in nodes
+ being added and/or removed.
self_link (str):
[Output only] Server-defined URL for the resource.
version (str):
@@ -2341,8 +2771,9 @@ class NodePool(proto.Message):
status (~.cluster_service.NodePool.Status):
[Output only] The status of the nodes in this pool instance.
status_message (str):
- [Output only] Additional information about the current
- status of this node pool instance, if available.
+ [Output only] Deprecated. Use conditions instead. Additional
+ information about the current status of this node pool
+ instance, if available.
autoscaling (~.cluster_service.NodePoolAutoscaling):
Autoscaler configuration for this NodePool.
Autoscaler is enabled only if a valid
@@ -2360,6 +2791,9 @@ class NodePool(proto.Message):
pod_ipv4_cidr_size (int):
[Output only] The pod CIDR block size per node in this node
pool.
+ upgrade_settings (~.cluster_service.NodePool.UpgradeSettings):
+ Upgrade settings control disruption and speed
+ of the upgrade.
"""
class Status(proto.Enum):
@@ -2372,12 +2806,56 @@ class Status(proto.Enum):
STOPPING = 5
ERROR = 6
+ class UpgradeSettings(proto.Message):
+ r"""These upgrade settings control the level of parallelism and
+ the level of disruption caused by an upgrade.
+
+ maxUnavailable controls the number of nodes that can be
+ simultaneously unavailable.
+
+ maxSurge controls the number of additional nodes that can be
+ added to the node pool temporarily for the time of the upgrade
+ to increase the number of available nodes.
+
+ (maxUnavailable + maxSurge) determines the level of parallelism
+ (how many nodes are being upgraded at the same time).
+
+ Note: upgrades inevitably introduce some disruption since
+ workloads need to be moved from old nodes to new, upgraded ones.
+ Even if maxUnavailable=0, this holds true. (Disruption stays
+ within the limits of PodDisruptionBudget, if it is configured.)
+
+ Consider a hypothetical node pool with 5 nodes having
+ maxSurge=2, maxUnavailable=1. This means the upgrade process
+ upgrades 3 nodes simultaneously. It creates 2 additional
+ (upgraded) nodes, then it brings down 3 old (not yet upgraded)
+ nodes at the same time. This ensures that there are always at
+ least 4 nodes available.
+
+ Attributes:
+ max_surge (int):
+ The maximum number of nodes that can be
+ created beyond the current size of the node pool
+ during the upgrade process.
+ max_unavailable (int):
+ The maximum number of nodes that can be
+ simultaneously unavailable during the upgrade
+ process. A node is considered available if its
+ status is Ready.
+ """
+
+ max_surge = proto.Field(proto.INT32, number=1)
+
+ max_unavailable = proto.Field(proto.INT32, number=2)
+
name = proto.Field(proto.STRING, number=1)
- config = proto.Field(proto.MESSAGE, number=2, message=NodeConfig,)
+ config = proto.Field(proto.MESSAGE, number=2, message="NodeConfig",)
initial_node_count = proto.Field(proto.INT32, number=3)
+ locations = proto.RepeatedField(proto.STRING, number=13)
+
self_link = proto.Field(proto.STRING, number=100)
version = proto.Field(proto.STRING, number=101)
@@ -2402,6 +2880,8 @@ class Status(proto.Enum):
pod_ipv4_cidr_size = proto.Field(proto.INT32, number=7)
+ upgrade_settings = proto.Field(proto.MESSAGE, number=107, message=UpgradeSettings,)
+
class NodeManagement(proto.Message):
r"""NodeManagement defines the set of node management services
@@ -2463,14 +2943,13 @@ class MaintenancePolicy(proto.Message):
Specifies the maintenance window in which
maintenance may be performed.
resource_version (str):
- A hash identifying the version of this
- policy, so that updates to fields of the policy
- won't accidentally undo intermediate changes
- (and so that users of the API unaware of some
- fields won't accidentally remove other fields).
- Make a get()
request to the cluster
- to get the current resource version and include
- it with requests to set the policy.
+ A hash identifying the version of this policy, so that
+ updates to fields of the policy won't accidentally undo
+ intermediate changes (and so that users of the API unaware
+ of some fields won't accidentally remove other fields). Make
+ a ``get()`` request to the cluster to get the current
+ resource version and include it with requests to set the
+ policy.
"""
window = proto.Field(proto.MESSAGE, number=1, message="MaintenanceWindow",)
@@ -2535,41 +3014,46 @@ class RecurringTimeWindow(proto.Message):
The window of the first recurrence.
recurrence (str):
An RRULE
- (https://tools.ietf.org/html/rfc5545#section-3.8.5.3)
- for how this window reccurs. They go on for the
- span of time between the start and end time.
-
- For example, to have something repeat every
- weekday, you'd use:
- FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR
To
- repeat some window daily (equivalent to the
- DailyMaintenanceWindow):
- FREQ=DAILY
+ (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for
+ how this window reccurs. They go on for the span of time
+ between the start and end time.
+
+ For example, to have something repeat every weekday, you'd
+ use: ``FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR``
+
+ To repeat some window daily (equivalent to the
+ DailyMaintenanceWindow): ``FREQ=DAILY``
+
For the first weekend of every month:
- FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU
- This specifies how frequently the window starts.
- Eg, if you wanted to have a 9-5 UTC-4 window
- every weekday, you'd use something like:
- start time = 2019-01-01T09:00:00-0400
- end time = 2019-01-01T17:00:00-0400
- recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR
-
- Windows can span multiple days. Eg, to make the
- window encompass every weekend from midnight
- Saturday till the last minute of Sunday UTC:
-
- start time = 2019-01-05T00:00:00Z
- end time = 2019-01-07T23:59:00Z
- recurrence = FREQ=WEEKLY;BYDAY=SA
-
- Note the start and end time's specific dates are
- largely arbitrary except to specify duration of
- the window and when it first starts. The FREQ
- values of HOURLY, MINUTELY, and SECONDLY are not
- supported.
- """
-
- window = proto.Field(proto.MESSAGE, number=1, message=TimeWindow,)
+ ``FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU``
+
+ This specifies how frequently the window starts. Eg, if you
+ wanted to have a 9-5 UTC-4 window every weekday, you'd use
+ something like:
+
+ ::
+
+ start time = 2019-01-01T09:00:00-0400
+ end time = 2019-01-01T17:00:00-0400
+ recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR
+
+ Windows can span multiple days. Eg, to make the window
+ encompass every weekend from midnight Saturday till the last
+ minute of Sunday UTC:
+
+ ::
+
+ start time = 2019-01-05T00:00:00Z
+ end time = 2019-01-07T23:59:00Z
+ recurrence = FREQ=WEEKLY;BYDAY=SA
+
+ Note the start and end time's specific dates are largely
+ arbitrary except to specify duration of the window and when
+ it first starts. The FREQ values of HOURLY, MINUTELY, and
+ SECONDLY are not supported.
+ """
+
+ window = proto.Field(proto.MESSAGE, number=1, message="TimeWindow",)
recurrence = proto.Field(proto.STRING, number=2)
@@ -2637,7 +3121,7 @@ class SetNodePoolManagementRequest(proto.Message):
node_pool_id = proto.Field(proto.STRING, number=4)
- management = proto.Field(proto.MESSAGE, number=5, message=NodeManagement,)
+ management = proto.Field(proto.MESSAGE, number=5, message="NodeManagement",)
name = proto.Field(proto.STRING, number=7)
@@ -2738,7 +3222,7 @@ class ListNodePoolsResponse(proto.Message):
A list of node pools for a cluster.
"""
- node_pools = proto.RepeatedField(proto.MESSAGE, number=1, message=NodePool,)
+ node_pools = proto.RepeatedField(proto.MESSAGE, number=1, message="NodePool",)
class ClusterAutoscaling(proto.Message):
@@ -2782,18 +3266,72 @@ class AutoprovisioningNodePoolDefaults(proto.Message):
Attributes:
oauth_scopes (Sequence[str]):
- Scopes that are used by NAP when creating node pools. If
- oauth_scopes are specified, service_account should be empty.
+ Scopes that are used by NAP when creating
+ node pools.
service_account (str):
- The Google Cloud Platform Service Account to be used by the
- node VMs. If service_account is specified, scopes should be
- empty.
+ The Google Cloud Platform Service Account to
+ be used by the node VMs.
+ upgrade_settings (~.cluster_service.NodePool.UpgradeSettings):
+ Specifies the upgrade settings for NAP
+ created node pools
+ management (~.cluster_service.NodeManagement):
+ Specifies the node management options for NAP
+ created node-pools.
+ min_cpu_platform (str):
+ Minimum CPU platform to be used for NAP created node pools.
+ The instance may be scheduled on the specified or newer CPU
+ platform. Applicable values are the friendly names of CPU
+ platforms, such as minCpuPlatform: Intel Haswell or
+ minCpuPlatform: Intel Sandy Bridge. For more information,
+ read `how to specify min CPU
+ platform `__
+ To unset the min cpu platform field pass "automatic" as
+ field value.
+ disk_size_gb (int):
+ Size of the disk attached to each node,
+ specified in GB. The smallest allowed disk size
+ is 10GB.
+ If unspecified, the default disk size is 100GB.
+ disk_type (str):
+ Type of the disk attached to each node (e.g.
+ 'pd-standard', 'pd-ssd' or 'pd-balanced')
+
+ If unspecified, the default disk type is 'pd-
+ standard'
+ shielded_instance_config (~.cluster_service.ShieldedInstanceConfig):
+ Shielded Instance options.
+ boot_disk_kms_key (str):
+ The Customer Managed Encryption Key used to encrypt the boot
+ disk attached to each node in the node pool. This should be
+ of the form
+ projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME].
+ For more information about protecting resources with Cloud
+ KMS Keys please see:
+ https://cloud.google.com/compute/docs/disks/customer-managed-encryption
"""
oauth_scopes = proto.RepeatedField(proto.STRING, number=1)
service_account = proto.Field(proto.STRING, number=2)
+ upgrade_settings = proto.Field(
+ proto.MESSAGE, number=3, message="NodePool.UpgradeSettings",
+ )
+
+ management = proto.Field(proto.MESSAGE, number=4, message="NodeManagement",)
+
+ min_cpu_platform = proto.Field(proto.STRING, number=5)
+
+ disk_size_gb = proto.Field(proto.INT32, number=6)
+
+ disk_type = proto.Field(proto.STRING, number=7)
+
+ shielded_instance_config = proto.Field(
+ proto.MESSAGE, number=8, message="ShieldedInstanceConfig",
+ )
+
+ boot_disk_kms_key = proto.Field(proto.STRING, number=9)
+
class ResourceLimit(proto.Message):
r"""Contains information about amount of some resource in the
@@ -2870,14 +3408,12 @@ class SetLabelsRequest(proto.Message):
resource_labels (Sequence[~.cluster_service.SetLabelsRequest.ResourceLabelsEntry]):
Required. The labels to set for that cluster.
label_fingerprint (str):
- Required. The fingerprint of the previous set
- of labels for this resource, used to detect
- conflicts. The fingerprint is initially
- generated by Kubernetes Engine and changes after
- every request to modify or update labels. You
- must always provide an up-to-date fingerprint
- hash when updating or changing labels. Make a
- get()
request to the resource to
+ Required. The fingerprint of the previous set of labels for
+ this resource, used to detect conflicts. The fingerprint is
+ initially generated by Kubernetes Engine and changes after
+ every request to modify or update labels. You must always
+ provide an up-to-date fingerprint hash when updating or
+ changing labels. Make a ``get()`` request to the resource to
get the latest fingerprint.
name (str):
The name (project, location, cluster id) of the cluster to
@@ -3032,6 +3568,27 @@ class AcceleratorConfig(proto.Message):
accelerator_type = proto.Field(proto.STRING, number=2)
+class WorkloadMetadataConfig(proto.Message):
+ r"""WorkloadMetadataConfig defines the metadata configuration to
+ expose to workloads on the node pool.
+
+ Attributes:
+ mode (~.cluster_service.WorkloadMetadataConfig.Mode):
+ Mode is the configuration for how to expose
+ metadata to workloads running on the node pool.
+ """
+
+ class Mode(proto.Enum):
+ r"""Mode is the configuration for how to expose metadata to
+ workloads running on the node.
+ """
+ MODE_UNSPECIFIED = 0
+ GCE_METADATA = 1
+ GKE_METADATA = 2
+
+ mode = proto.Field(proto.ENUM, number=2, enum=Mode,)
+
+
class SetNetworkPolicyRequest(proto.Message):
r"""SetNetworkPolicyRequest enables/disables network policy for a
cluster.
@@ -3067,7 +3624,7 @@ class SetNetworkPolicyRequest(proto.Message):
cluster_id = proto.Field(proto.STRING, number=3)
- network_policy = proto.Field(proto.MESSAGE, number=4, message=NetworkPolicy,)
+ network_policy = proto.Field(proto.MESSAGE, number=4, message="NetworkPolicy",)
name = proto.Field(proto.STRING, number=6)
@@ -3104,7 +3661,7 @@ class SetMaintenancePolicyRequest(proto.Message):
cluster_id = proto.Field(proto.STRING, number=3)
maintenance_policy = proto.Field(
- proto.MESSAGE, number=4, message=MaintenancePolicy,
+ proto.MESSAGE, number=4, message="MaintenancePolicy",
)
name = proto.Field(proto.STRING, number=5)
@@ -3156,6 +3713,12 @@ class NetworkConfig(proto.Message):
Whether Intra-node visibility is enabled for
this cluster. This makes same node pod to pod
traffic visible for VPC network.
+ default_snat_status (~.cluster_service.DefaultSnatStatus):
+ Whether the cluster disables default in-node sNAT rules.
+ In-node sNAT rules will be disabled when default_snat_status
+ is disabled. When disabled is set to false, default IP
+ masquerade rules will be applied to the nodes to prevent
+ sNAT on cluster internal traffic.
"""
network = proto.Field(proto.STRING, number=1)
@@ -3164,6 +3727,157 @@ class NetworkConfig(proto.Message):
enable_intra_node_visibility = proto.Field(proto.BOOL, number=5)
+ default_snat_status = proto.Field(
+ proto.MESSAGE, number=7, message="DefaultSnatStatus",
+ )
+
+
+class GetOpenIDConfigRequest(proto.Message):
+ r"""GetOpenIDConfigRequest gets the OIDC discovery document for
+ the cluster. See the OpenID Connect Discovery 1.0 specification
+ for details.
+
+ Attributes:
+ parent (str):
+ The cluster (project, location, cluster id) to get the
+ discovery document for. Specified in the format
+ ``projects/*/locations/*/clusters/*``.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+
+class GetOpenIDConfigResponse(proto.Message):
+ r"""GetOpenIDConfigResponse is an OIDC discovery document for the
+ cluster. See the OpenID Connect Discovery 1.0 specification for
+ details.
+
+ Attributes:
+ issuer (str):
+ OIDC Issuer.
+ jwks_uri (str):
+ JSON Web Key uri.
+ response_types_supported (Sequence[str]):
+ Supported response types.
+ subject_types_supported (Sequence[str]):
+ Supported subject types.
+ id_token_signing_alg_values_supported (Sequence[str]):
+ supported ID Token signing Algorithms.
+ claims_supported (Sequence[str]):
+ Supported claims.
+ grant_types (Sequence[str]):
+ Supported grant types.
+ """
+
+ issuer = proto.Field(proto.STRING, number=1)
+
+ jwks_uri = proto.Field(proto.STRING, number=2)
+
+ response_types_supported = proto.RepeatedField(proto.STRING, number=3)
+
+ subject_types_supported = proto.RepeatedField(proto.STRING, number=4)
+
+ id_token_signing_alg_values_supported = proto.RepeatedField(proto.STRING, number=5)
+
+ claims_supported = proto.RepeatedField(proto.STRING, number=6)
+
+ grant_types = proto.RepeatedField(proto.STRING, number=7)
+
+
+class GetJSONWebKeysRequest(proto.Message):
+ r"""GetJSONWebKeysRequest gets the public component of the keys used by
+ the cluster to sign token requests. This will be the jwks_uri for
+ the discover document returned by getOpenIDConfig. See the OpenID
+ Connect Discovery 1.0 specification for details.
+
+ Attributes:
+ parent (str):
+ The cluster (project, location, cluster id) to get keys for.
+ Specified in the format
+ ``projects/*/locations/*/clusters/*``.
+ """
+
+ parent = proto.Field(proto.STRING, number=1)
+
+
+class Jwk(proto.Message):
+ r"""Jwk is a JSON Web Key as specified in RFC 7517
+
+ Attributes:
+ kty (str):
+ Key Type.
+ alg (str):
+ Algorithm.
+ use (str):
+ Permitted uses for the public keys.
+ kid (str):
+ Key ID.
+ n (str):
+ Used for RSA keys.
+ e (str):
+ Used for RSA keys.
+ x (str):
+ Used for ECDSA keys.
+ y (str):
+ Used for ECDSA keys.
+ crv (str):
+ Used for ECDSA keys.
+ """
+
+ kty = proto.Field(proto.STRING, number=1)
+
+ alg = proto.Field(proto.STRING, number=2)
+
+ use = proto.Field(proto.STRING, number=3)
+
+ kid = proto.Field(proto.STRING, number=4)
+
+ n = proto.Field(proto.STRING, number=5)
+
+ e = proto.Field(proto.STRING, number=6)
+
+ x = proto.Field(proto.STRING, number=7)
+
+ y = proto.Field(proto.STRING, number=8)
+
+ crv = proto.Field(proto.STRING, number=9)
+
+
+class GetJSONWebKeysResponse(proto.Message):
+ r"""GetJSONWebKeysResponse is a valid JSON Web Key Set as
+ specififed in rfc 7517
+
+ Attributes:
+ keys (Sequence[~.cluster_service.Jwk]):
+ The public component of the keys used by the
+ cluster to sign token requests.
+ """
+
+ keys = proto.RepeatedField(proto.MESSAGE, number=1, message="Jwk",)
+
+
+class ReleaseChannel(proto.Message):
+ r"""ReleaseChannel indicates which release channel a cluster is
+ subscribed to. Release channels are arranged in order of risk.
+ When a cluster is subscribed to a release channel, Google
+ maintains both the master version and the node version. Node
+ auto-upgrade defaults to true and cannot be disabled.
+
+ Attributes:
+ channel (~.cluster_service.ReleaseChannel.Channel):
+ channel specifies which release channel the
+ cluster is subscribed to.
+ """
+
+ class Channel(proto.Enum):
+ r"""Possible values for 'channel'."""
+ UNSPECIFIED = 0
+ RAPID = 1
+ REGULAR = 2
+ STABLE = 3
+
+ channel = proto.Field(proto.ENUM, number=1, enum=Channel,)
+
class IntraNodeVisibilityConfig(proto.Message):
r"""IntraNodeVisibilityConfig contains the desired config of the
@@ -3190,6 +3904,19 @@ class MaxPodsConstraint(proto.Message):
max_pods_per_node = proto.Field(proto.INT64, number=1)
+class WorkloadIdentityConfig(proto.Message):
+ r"""Configuration for the use of Kubernetes Service Accounts in
+ GCP IAM policies.
+
+ Attributes:
+ workload_pool (str):
+ The workload pool to attach all Kubernetes
+ service accounts to.
+ """
+
+ workload_pool = proto.Field(proto.STRING, number=2)
+
+
class DatabaseEncryption(proto.Message):
r"""Configuration of etcd encryption.
@@ -3341,7 +4068,7 @@ class UsableSubnetwork(proto.Message):
ip_cidr_range = proto.Field(proto.STRING, number=3)
secondary_ip_ranges = proto.RepeatedField(
- proto.MESSAGE, number=4, message=UsableSubnetworkSecondaryRange,
+ proto.MESSAGE, number=4, message="UsableSubnetworkSecondaryRange",
)
status_message = proto.Field(proto.STRING, number=5)
@@ -3412,4 +4139,28 @@ class VerticalPodAutoscaling(proto.Message):
enabled = proto.Field(proto.BOOL, number=1)
+class DefaultSnatStatus(proto.Message):
+ r"""DefaultSnatStatus contains the desired state of whether
+ default sNAT should be disabled on the cluster.
+
+ Attributes:
+ disabled (bool):
+ Disables cluster default sNAT rules.
+ """
+
+ disabled = proto.Field(proto.BOOL, number=1)
+
+
+class ShieldedNodes(proto.Message):
+ r"""Configuration of Shielded Nodes feature.
+
+ Attributes:
+ enabled (bool):
+ Whether Shielded Nodes features are enabled
+ on all nodes in this cluster.
+ """
+
+ enabled = proto.Field(proto.BOOL, number=1)
+
+
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/async_client.py b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/async_client.py
index 5767ac0756f9..649846931117 100644
--- a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/async_client.py
+++ b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/async_client.py
@@ -44,9 +44,47 @@ class ClusterManagerAsyncClient:
DEFAULT_ENDPOINT = ClusterManagerClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = ClusterManagerClient.DEFAULT_MTLS_ENDPOINT
+ common_billing_account_path = staticmethod(
+ ClusterManagerClient.common_billing_account_path
+ )
+ parse_common_billing_account_path = staticmethod(
+ ClusterManagerClient.parse_common_billing_account_path
+ )
+
+ common_folder_path = staticmethod(ClusterManagerClient.common_folder_path)
+ parse_common_folder_path = staticmethod(
+ ClusterManagerClient.parse_common_folder_path
+ )
+
+ common_organization_path = staticmethod(
+ ClusterManagerClient.common_organization_path
+ )
+ parse_common_organization_path = staticmethod(
+ ClusterManagerClient.parse_common_organization_path
+ )
+
+ common_project_path = staticmethod(ClusterManagerClient.common_project_path)
+ parse_common_project_path = staticmethod(
+ ClusterManagerClient.parse_common_project_path
+ )
+
+ common_location_path = staticmethod(ClusterManagerClient.common_location_path)
+ parse_common_location_path = staticmethod(
+ ClusterManagerClient.parse_common_location_path
+ )
+
from_service_account_file = ClusterManagerClient.from_service_account_file
from_service_account_json = from_service_account_file
+ @property
+ def transport(self) -> ClusterManagerTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ ClusterManagerTransport: The transport used by the client instance.
+ """
+ return self._client.transport
+
get_transport_class = functools.partial(
type(ClusterManagerClient).get_transport_class, type(ClusterManagerClient)
)
@@ -150,7 +188,8 @@ async def list_clusters(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone]):
+ has_flattened_params = any([project_id, zone])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -175,7 +214,7 @@ async def list_clusters(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -251,7 +290,8 @@ async def get_cluster(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id]):
+ has_flattened_params = any([project_id, zone, cluster_id])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -278,7 +318,7 @@ async def get_cluster(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -368,7 +408,8 @@ async def create_cluster(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster]):
+ has_flattened_params = any([project_id, zone, cluster])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -474,7 +515,8 @@ async def update_cluster(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, update]):
+ has_flattened_params = any([project_id, zone, cluster_id, update])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -694,7 +736,8 @@ async def set_logging_service(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, logging_service]):
+ has_flattened_params = any([project_id, zone, cluster_id, logging_service])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -806,9 +849,8 @@ async def set_monitoring_service(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any(
- [project_id, zone, cluster_id, monitoring_service]
- ):
+ has_flattened_params = any([project_id, zone, cluster_id, monitoring_service])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -917,7 +959,8 @@ async def set_addons_config(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, addons_config]):
+ has_flattened_params = any([project_id, zone, cluster_id, addons_config])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1032,7 +1075,8 @@ async def set_locations(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, locations]):
+ has_flattened_params = any([project_id, zone, cluster_id, locations])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1049,8 +1093,9 @@ async def set_locations(
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
- if locations is not None:
- request.locations = locations
+
+ if locations:
+ request.locations.extend(locations)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
@@ -1152,7 +1197,8 @@ async def update_master(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, master_version]):
+ has_flattened_params = any([project_id, zone, cluster_id, master_version])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1318,7 +1364,8 @@ async def delete_cluster(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id]):
+ has_flattened_params = any([project_id, zone, cluster_id])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1345,7 +1392,7 @@ async def delete_cluster(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -1416,7 +1463,8 @@ async def list_operations(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone]):
+ has_flattened_params = any([project_id, zone])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1441,7 +1489,7 @@ async def list_operations(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -1520,7 +1568,8 @@ async def get_operation(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, operation_id]):
+ has_flattened_params = any([project_id, zone, operation_id])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1547,7 +1596,7 @@ async def get_operation(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -1618,7 +1667,8 @@ async def cancel_operation(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, operation_id]):
+ has_flattened_params = any([project_id, zone, operation_id])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1706,7 +1756,8 @@ async def get_server_config(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone]):
+ has_flattened_params = any([project_id, zone])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1731,7 +1782,7 @@ async def get_server_config(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -1808,7 +1859,8 @@ async def list_node_pools(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id]):
+ has_flattened_params = any([project_id, zone, cluster_id])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1835,7 +1887,7 @@ async def list_node_pools(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -1929,7 +1981,8 @@ async def get_node_pool(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, node_pool_id]):
+ has_flattened_params = any([project_id, zone, cluster_id, node_pool_id])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1958,7 +2011,7 @@ async def get_node_pool(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -2043,7 +2096,8 @@ async def create_node_pool(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, node_pool]):
+ has_flattened_params = any([project_id, zone, cluster_id, node_pool])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2152,7 +2206,8 @@ async def delete_node_pool(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, node_pool_id]):
+ has_flattened_params = any([project_id, zone, cluster_id, node_pool_id])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2181,7 +2236,7 @@ async def delete_node_pool(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -2274,7 +2329,8 @@ async def rollback_node_pool_upgrade(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, node_pool_id]):
+ has_flattened_params = any([project_id, zone, cluster_id, node_pool_id])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2391,9 +2447,10 @@ async def set_node_pool_management(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any(
+ has_flattened_params = any(
[project_id, zone, cluster_id, node_pool_id, management]
- ):
+ )
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2522,9 +2579,10 @@ async def set_labels(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any(
+ has_flattened_params = any(
[project_id, zone, cluster_id, resource_labels, label_fingerprint]
- ):
+ )
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2541,11 +2599,12 @@ async def set_labels(
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
- if resource_labels is not None:
- request.resource_labels = resource_labels
if label_fingerprint is not None:
request.label_fingerprint = label_fingerprint
+ if resource_labels:
+ request.resource_labels.update(resource_labels)
+
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
@@ -2635,7 +2694,8 @@ async def set_legacy_abac(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, enabled]):
+ has_flattened_params = any([project_id, zone, cluster_id, enabled])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2736,7 +2796,8 @@ async def start_ip_rotation(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id]):
+ has_flattened_params = any([project_id, zone, cluster_id])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2834,7 +2895,8 @@ async def complete_ip_rotation(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id]):
+ has_flattened_params = any([project_id, zone, cluster_id])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2992,7 +3054,8 @@ async def set_network_policy(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([project_id, zone, cluster_id, network_policy]):
+ has_flattened_params = any([project_id, zone, cluster_id, network_policy])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -3095,9 +3158,8 @@ async def set_maintenance_policy(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any(
- [project_id, zone, cluster_id, maintenance_policy]
- ):
+ has_flattened_params = any([project_id, zone, cluster_id, maintenance_policy])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -3181,7 +3243,8 @@ async def list_usable_subnetworks(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent]):
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -3204,7 +3267,7 @@ async def list_usable_subnetworks(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -3268,7 +3331,8 @@ async def list_locations(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent]):
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -3291,7 +3355,7 @@ async def list_locations(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
diff --git a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/client.py b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/client.py
index afd63ed95b40..31d42dea80b2 100644
--- a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/client.py
+++ b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/client.py
@@ -19,10 +19,10 @@
from distutils import util
import os
import re
-from typing import Callable, Dict, Sequence, Tuple, Type, Union
+from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
-import google.api_core.client_options as ClientOptions # type: ignore
+from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
@@ -130,12 +130,80 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
from_service_account_json = from_service_account_file
+ @property
+ def transport(self) -> ClusterManagerTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ ClusterManagerTransport: The transport used by the client instance.
+ """
+ return self._transport
+
+ @staticmethod
+ def common_billing_account_path(billing_account: str,) -> str:
+ """Return a fully-qualified billing_account string."""
+ return "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+
+ @staticmethod
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
+ """Parse a billing_account path into its component segments."""
+ m = re.match(r"^billingAccounts/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_folder_path(folder: str,) -> str:
+ """Return a fully-qualified folder string."""
+ return "folders/{folder}".format(folder=folder,)
+
+ @staticmethod
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
+ """Parse a folder path into its component segments."""
+ m = re.match(r"^folders/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_organization_path(organization: str,) -> str:
+ """Return a fully-qualified organization string."""
+ return "organizations/{organization}".format(organization=organization,)
+
+ @staticmethod
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
+ """Parse a organization path into its component segments."""
+ m = re.match(r"^organizations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_project_path(project: str,) -> str:
+ """Return a fully-qualified project string."""
+ return "projects/{project}".format(project=project,)
+
+ @staticmethod
+ def parse_common_project_path(path: str) -> Dict[str, str]:
+ """Parse a project path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_location_path(project: str, location: str,) -> str:
+ """Return a fully-qualified location string."""
+ return "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+
+ @staticmethod
+ def parse_common_location_path(path: str) -> Dict[str, str]:
+ """Parse a location path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
def __init__(
self,
*,
- credentials: credentials.Credentials = None,
- transport: Union[str, ClusterManagerTransport] = None,
- client_options: ClientOptions = None,
+ credentials: Optional[credentials.Credentials] = None,
+ transport: Union[str, ClusterManagerTransport, None] = None,
+ client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the cluster manager client.
@@ -149,8 +217,8 @@ def __init__(
transport (Union[str, ~.ClusterManagerTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
- client_options (ClientOptions): Custom options for the client. It
- won't take effect if a ``transport`` instance is provided.
+ client_options (client_options_lib.ClientOptions): Custom options for the
+ client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
@@ -165,10 +233,10 @@ def __init__(
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
- client_info (google.api_core.gapic_v1.client_info.ClientInfo):
- The client info used to send a user-agent string along with
- API requests. If ``None``, then default info will be used.
- Generally, you only need to set this if you're developing
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
your own client library.
Raises:
@@ -176,9 +244,9 @@ def __init__(
creation failed for any reason.
"""
if isinstance(client_options, dict):
- client_options = ClientOptions.from_dict(client_options)
+ client_options = client_options_lib.from_dict(client_options)
if client_options is None:
- client_options = ClientOptions.ClientOptions()
+ client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
@@ -1203,8 +1271,9 @@ def set_locations(
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
- if locations is not None:
- request.locations = locations
+
+ if locations:
+ request.locations.extend(locations)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
@@ -2668,11 +2737,12 @@ def set_labels(
request.zone = zone
if cluster_id is not None:
request.cluster_id = cluster_id
- if resource_labels is not None:
- request.resource_labels = resource_labels
if label_fingerprint is not None:
request.label_fingerprint = label_fingerprint
+ if resource_labels:
+ request.resource_labels.update(resource_labels)
+
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.set_labels]
diff --git a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/base.py b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/base.py
index 975e5f7ce7ed..e021648f72de 100644
--- a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/base.py
+++ b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/base.py
@@ -112,7 +112,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -125,7 +125,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -172,7 +172,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -185,7 +185,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -198,7 +198,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -214,7 +214,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -227,7 +227,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -240,7 +240,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -256,7 +256,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -304,7 +304,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
@@ -317,7 +317,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=20.0,
diff --git a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc.py b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc.py
index f03d06b8df3f..9642b7b7292d 100644
--- a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc.py
+++ b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc.py
@@ -90,10 +90,10 @@ def __init__(
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
- client_info (google.api_core.gapic_v1.client_info.ClientInfo):
- The client info used to send a user-agent string along with
- API requests. If ``None``, then default info will be used.
- Generally, you only need to set this if you're developing
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
your own client library.
Raises:
@@ -102,6 +102,8 @@ def __init__(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
+ self._ssl_channel_credentials = ssl_channel_credentials
+
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
@@ -109,6 +111,7 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
+ self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
@@ -145,6 +148,7 @@ def __init__(
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
+ self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
@@ -222,12 +226,8 @@ def create_channel(
@property
def grpc_channel(self) -> grpc.Channel:
- """Create the channel designed to connect to this service.
-
- This property caches on the instance; repeated calls return
- the same channel.
+ """Return the channel designed to connect to this service.
"""
- # Return the channel from cache.
return self._grpc_channel
@property
diff --git a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py
index cca74eaa69bd..309593bbd4dc 100644
--- a/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py
+++ b/packages/google-cloud-container/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py
@@ -147,6 +147,8 @@ def __init__(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
+ self._ssl_channel_credentials = ssl_channel_credentials
+
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
@@ -154,6 +156,7 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
+ self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
@@ -190,6 +193,7 @@ def __init__(
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
+ self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
diff --git a/packages/google-cloud-container/google/cloud/container_v1beta1/types/cluster_service.py b/packages/google-cloud-container/google/cloud/container_v1beta1/types/cluster_service.py
index 545933c25535..8248233b1e62 100644
--- a/packages/google-cloud-container/google/cloud/container_v1beta1/types/cluster_service.py
+++ b/packages/google-cloud-container/google/cloud/container_v1beta1/types/cluster_service.py
@@ -1146,9 +1146,9 @@ class Status(proto.Enum):
initial_node_count = proto.Field(proto.INT32, number=3)
- node_config = proto.Field(proto.MESSAGE, number=4, message=NodeConfig,)
+ node_config = proto.Field(proto.MESSAGE, number=4, message="NodeConfig",)
- master_auth = proto.Field(proto.MESSAGE, number=5, message=MasterAuth,)
+ master_auth = proto.Field(proto.MESSAGE, number=5, message="MasterAuth",)
logging_service = proto.Field(proto.STRING, number=6)
@@ -1158,7 +1158,7 @@ class Status(proto.Enum):
cluster_ipv4_cidr = proto.Field(proto.STRING, number=9)
- addons_config = proto.Field(proto.MESSAGE, number=10, message=AddonsConfig,)
+ addons_config = proto.Field(proto.MESSAGE, number=10, message="AddonsConfig",)
subnetwork = proto.Field(proto.STRING, number=11)
@@ -1172,16 +1172,16 @@ class Status(proto.Enum):
label_fingerprint = proto.Field(proto.STRING, number=16)
- legacy_abac = proto.Field(proto.MESSAGE, number=18, message=LegacyAbac,)
+ legacy_abac = proto.Field(proto.MESSAGE, number=18, message="LegacyAbac",)
- network_policy = proto.Field(proto.MESSAGE, number=19, message=NetworkPolicy,)
+ network_policy = proto.Field(proto.MESSAGE, number=19, message="NetworkPolicy",)
ip_allocation_policy = proto.Field(
- proto.MESSAGE, number=20, message=IPAllocationPolicy,
+ proto.MESSAGE, number=20, message="IPAllocationPolicy",
)
master_authorized_networks_config = proto.Field(
- proto.MESSAGE, number=22, message=MasterAuthorizedNetworksConfig,
+ proto.MESSAGE, number=22, message="MasterAuthorizedNetworksConfig",
)
maintenance_policy = proto.Field(
@@ -1189,11 +1189,11 @@ class Status(proto.Enum):
)
binary_authorization = proto.Field(
- proto.MESSAGE, number=24, message=BinaryAuthorization,
+ proto.MESSAGE, number=24, message="BinaryAuthorization",
)
pod_security_policy_config = proto.Field(
- proto.MESSAGE, number=25, message=PodSecurityPolicyConfig,
+ proto.MESSAGE, number=25, message="PodSecurityPolicyConfig",
)
autoscaling = proto.Field(proto.MESSAGE, number=26, message="ClusterAutoscaling",)
@@ -1213,11 +1213,11 @@ class Status(proto.Enum):
)
authenticator_groups_config = proto.Field(
- proto.MESSAGE, number=34, message=AuthenticatorGroupsConfig,
+ proto.MESSAGE, number=34, message="AuthenticatorGroupsConfig",
)
private_cluster_config = proto.Field(
- proto.MESSAGE, number=37, message=PrivateClusterConfig,
+ proto.MESSAGE, number=37, message="PrivateClusterConfig",
)
vertical_pod_autoscaling = proto.Field(
@@ -1370,7 +1370,9 @@ class ClusterUpdate(proto.Message):
desired_monitoring_service = proto.Field(proto.STRING, number=5)
- desired_addons_config = proto.Field(proto.MESSAGE, number=6, message=AddonsConfig,)
+ desired_addons_config = proto.Field(
+ proto.MESSAGE, number=6, message="AddonsConfig",
+ )
desired_node_pool_id = proto.Field(proto.STRING, number=7)
@@ -1383,11 +1385,11 @@ class ClusterUpdate(proto.Message):
desired_locations = proto.RepeatedField(proto.STRING, number=10)
desired_master_authorized_networks_config = proto.Field(
- proto.MESSAGE, number=12, message=MasterAuthorizedNetworksConfig,
+ proto.MESSAGE, number=12, message="MasterAuthorizedNetworksConfig",
)
desired_pod_security_policy_config = proto.Field(
- proto.MESSAGE, number=14, message=PodSecurityPolicyConfig,
+ proto.MESSAGE, number=14, message="PodSecurityPolicyConfig",
)
desired_cluster_autoscaling = proto.Field(
@@ -1395,7 +1397,7 @@ class ClusterUpdate(proto.Message):
)
desired_binary_authorization = proto.Field(
- proto.MESSAGE, number=16, message=BinaryAuthorization,
+ proto.MESSAGE, number=16, message="BinaryAuthorization",
)
desired_logging_service = proto.Field(proto.STRING, number=19)
@@ -1573,7 +1575,7 @@ class Metric(proto.Message):
name = proto.Field(proto.STRING, number=1)
- status = proto.Field(proto.ENUM, number=2, enum=Operation.Status,)
+ status = proto.Field(proto.ENUM, number=2, enum="Operation.Status",)
metrics = proto.RepeatedField(proto.MESSAGE, number=3, message=Metric,)
@@ -1607,7 +1609,7 @@ class CreateClusterRequest(proto.Message):
zone = proto.Field(proto.STRING, number=2)
- cluster = proto.Field(proto.MESSAGE, number=3, message=Cluster,)
+ cluster = proto.Field(proto.MESSAGE, number=3, message="Cluster",)
parent = proto.Field(proto.STRING, number=5)
@@ -1679,7 +1681,7 @@ class UpdateClusterRequest(proto.Message):
cluster_id = proto.Field(proto.STRING, number=3)
- update = proto.Field(proto.MESSAGE, number=4, message=ClusterUpdate,)
+ update = proto.Field(proto.MESSAGE, number=4, message="ClusterUpdate",)
name = proto.Field(proto.STRING, number=5)
@@ -1919,7 +1921,7 @@ class SetAddonsConfigRequest(proto.Message):
cluster_id = proto.Field(proto.STRING, number=3)
- addons_config = proto.Field(proto.MESSAGE, number=4, message=AddonsConfig,)
+ addons_config = proto.Field(proto.MESSAGE, number=4, message="AddonsConfig",)
name = proto.Field(proto.STRING, number=6)
@@ -2063,7 +2065,7 @@ class Action(proto.Enum):
action = proto.Field(proto.ENUM, number=4, enum=Action,)
- update = proto.Field(proto.MESSAGE, number=5, message=MasterAuth,)
+ update = proto.Field(proto.MESSAGE, number=5, message="MasterAuth",)
name = proto.Field(proto.STRING, number=7)
@@ -2142,7 +2144,7 @@ class ListClustersResponse(proto.Message):
clusters returned may be missing those zones.
"""
- clusters = proto.RepeatedField(proto.MESSAGE, number=1, message=Cluster,)
+ clusters = proto.RepeatedField(proto.MESSAGE, number=1, message="Cluster",)
missing_zones = proto.RepeatedField(proto.STRING, number=2)
@@ -2258,7 +2260,7 @@ class ListOperationsResponse(proto.Message):
operations from those zones.
"""
- operations = proto.RepeatedField(proto.MESSAGE, number=1, message=Operation,)
+ operations = proto.RepeatedField(proto.MESSAGE, number=1, message="Operation",)
missing_zones = proto.RepeatedField(proto.STRING, number=2)
@@ -2534,7 +2536,7 @@ class Status(proto.Enum):
name = proto.Field(proto.STRING, number=1)
- config = proto.Field(proto.MESSAGE, number=2, message=NodeConfig,)
+ config = proto.Field(proto.MESSAGE, number=2, message="NodeConfig",)
initial_node_count = proto.Field(proto.INT32, number=3)
@@ -2723,7 +2725,7 @@ class RecurringTimeWindow(proto.Message):
supported.
"""
- window = proto.Field(proto.MESSAGE, number=1, message=TimeWindow,)
+ window = proto.Field(proto.MESSAGE, number=1, message="TimeWindow",)
recurrence = proto.Field(proto.STRING, number=2)
@@ -2787,7 +2789,7 @@ class SetNodePoolManagementRequest(proto.Message):
node_pool_id = proto.Field(proto.STRING, number=4)
- management = proto.Field(proto.MESSAGE, number=5, message=NodeManagement,)
+ management = proto.Field(proto.MESSAGE, number=5, message="NodeManagement",)
name = proto.Field(proto.STRING, number=7)
@@ -2888,7 +2890,7 @@ class ListNodePoolsResponse(proto.Message):
A list of node pools for a cluster.
"""
- node_pools = proto.RepeatedField(proto.MESSAGE, number=1, message=NodePool,)
+ node_pools = proto.RepeatedField(proto.MESSAGE, number=1, message="NodePool",)
class ClusterAutoscaling(proto.Message):
@@ -3239,7 +3241,7 @@ class SetNetworkPolicyRequest(proto.Message):
cluster_id = proto.Field(proto.STRING, number=3)
- network_policy = proto.Field(proto.MESSAGE, number=4, message=NetworkPolicy,)
+ network_policy = proto.Field(proto.MESSAGE, number=4, message="NetworkPolicy",)
name = proto.Field(proto.STRING, number=6)
@@ -3276,7 +3278,7 @@ class SetMaintenancePolicyRequest(proto.Message):
cluster_id = proto.Field(proto.STRING, number=3)
maintenance_policy = proto.Field(
- proto.MESSAGE, number=4, message=MaintenancePolicy,
+ proto.MESSAGE, number=4, message="MaintenancePolicy",
)
name = proto.Field(proto.STRING, number=5)
@@ -3324,7 +3326,7 @@ class Location(proto.Message):
recommended for GKE cluster scheduling.
Attributes:
- type (~.cluster_service.Location.LocationType):
+ type_ (~.cluster_service.Location.LocationType):
Contains the type of location this Location
is for. Regional or Zonal.
name (str):
@@ -3341,7 +3343,7 @@ class LocationType(proto.Enum):
ZONE = 1
REGION = 2
- type = proto.Field(proto.ENUM, number=1, enum=LocationType,)
+ type_ = proto.Field(proto.ENUM, number=1, enum=LocationType,)
name = proto.Field(proto.STRING, number=2)
@@ -3530,7 +3532,7 @@ class UsableSubnetwork(proto.Message):
ip_cidr_range = proto.Field(proto.STRING, number=3)
secondary_ip_ranges = proto.RepeatedField(
- proto.MESSAGE, number=4, message=UsableSubnetworkSecondaryRange,
+ proto.MESSAGE, number=4, message="UsableSubnetworkSecondaryRange",
)
status_message = proto.Field(proto.STRING, number=5)
diff --git a/packages/google-cloud-container/noxfile.py b/packages/google-cloud-container/noxfile.py
index 810c95413e06..548b398c168f 100644
--- a/packages/google-cloud-container/noxfile.py
+++ b/packages/google-cloud-container/noxfile.py
@@ -28,7 +28,7 @@
DEFAULT_PYTHON_VERSION = "3.8"
SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
-UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"]
+UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
@nox.session(python=DEFAULT_PYTHON_VERSION)
@@ -72,7 +72,9 @@ def default(session):
# Install all test dependencies, then install this package in-place.
session.install("asyncmock", "pytest-asyncio")
- session.install("mock", "pytest", "pytest-cov")
+ session.install(
+ "mock", "pytest", "pytest-cov",
+ )
session.install("-e", ".")
# Run py.test against the unit tests.
diff --git a/packages/google-cloud-container/scripts/fixup_container_v1_keywords.py b/packages/google-cloud-container/scripts/fixup_container_v1_keywords.py
index 1c87db6015be..25973d5eec21 100644
--- a/packages/google-cloud-container/scripts/fixup_container_v1_keywords.py
+++ b/packages/google-cloud-container/scripts/fixup_container_v1_keywords.py
@@ -1,3 +1,4 @@
+#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
@@ -47,6 +48,7 @@ class containerCallTransformer(cst.CSTTransformer):
'delete_cluster': ('project_id', 'zone', 'cluster_id', 'name', ),
'delete_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ),
'get_cluster': ('project_id', 'zone', 'cluster_id', 'name', ),
+ 'get_json_web_keys': ('parent', ),
'get_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ),
'get_operation': ('project_id', 'zone', 'operation_id', 'name', ),
'get_server_config': ('project_id', 'zone', 'name', ),
@@ -70,7 +72,7 @@ class containerCallTransformer(cst.CSTTransformer):
'start_ip_rotation': ('project_id', 'zone', 'cluster_id', 'name', 'rotate_credentials', ),
'update_cluster': ('update', 'project_id', 'zone', 'cluster_id', 'name', ),
'update_master': ('master_version', 'project_id', 'zone', 'cluster_id', 'name', ),
- 'update_node_pool': ('node_version', 'image_type', 'project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ),
+ 'update_node_pool': ('node_version', 'image_type', 'project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', 'locations', 'workload_metadata_config', 'upgrade_settings', ),
}
diff --git a/packages/google-cloud-container/scripts/fixup_container_v1beta1_keywords.py b/packages/google-cloud-container/scripts/fixup_container_v1beta1_keywords.py
index 0c1ba06e9fa1..f06d07fdb9b9 100644
--- a/packages/google-cloud-container/scripts/fixup_container_v1beta1_keywords.py
+++ b/packages/google-cloud-container/scripts/fixup_container_v1beta1_keywords.py
@@ -1,3 +1,4 @@
+#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
diff --git a/packages/google-cloud-container/synth.metadata b/packages/google-cloud-container/synth.metadata
index 8e1254cb138e..08d174afe75a 100644
--- a/packages/google-cloud-container/synth.metadata
+++ b/packages/google-cloud-container/synth.metadata
@@ -4,14 +4,22 @@
"git": {
"name": ".",
"remote": "https://github.com/googleapis/python-container.git",
- "sha": "e9e9e85f3177009648c1f77e242b6925b1f0428e"
+ "sha": "2f3ba9672930d7698bc8808decfce90646723a82"
+ }
+ },
+ {
+ "git": {
+ "name": "googleapis",
+ "remote": "https://github.com/googleapis/googleapis.git",
+ "sha": "3ac5ef0436d8dfeb2ca0091dc7fa8012da1c85af",
+ "internalRef": "342835449"
}
},
{
"git": {
"name": "synthtool",
"remote": "https://github.com/googleapis/synthtool.git",
- "sha": "dba48bb9bc6959c232bec9150ac6313b608fe7bd"
+ "sha": "6542bd723403513626f61642fc02ddca528409aa"
}
}
],
@@ -34,5 +42,112 @@
"generator": "bazel"
}
}
+ ],
+ "generatedFiles": [
+ ".flake8",
+ ".github/CONTRIBUTING.md",
+ ".github/ISSUE_TEMPLATE/bug_report.md",
+ ".github/ISSUE_TEMPLATE/feature_request.md",
+ ".github/ISSUE_TEMPLATE/support_request.md",
+ ".github/PULL_REQUEST_TEMPLATE.md",
+ ".github/release-please.yml",
+ ".github/snippet-bot.yml",
+ ".gitignore",
+ ".kokoro/build.sh",
+ ".kokoro/continuous/common.cfg",
+ ".kokoro/continuous/continuous.cfg",
+ ".kokoro/docker/docs/Dockerfile",
+ ".kokoro/docker/docs/fetch_gpg_keys.sh",
+ ".kokoro/docs/common.cfg",
+ ".kokoro/docs/docs-presubmit.cfg",
+ ".kokoro/docs/docs.cfg",
+ ".kokoro/populate-secrets.sh",
+ ".kokoro/presubmit/common.cfg",
+ ".kokoro/presubmit/presubmit.cfg",
+ ".kokoro/publish-docs.sh",
+ ".kokoro/release.sh",
+ ".kokoro/release/common.cfg",
+ ".kokoro/release/release.cfg",
+ ".kokoro/samples/lint/common.cfg",
+ ".kokoro/samples/lint/continuous.cfg",
+ ".kokoro/samples/lint/periodic.cfg",
+ ".kokoro/samples/lint/presubmit.cfg",
+ ".kokoro/samples/python3.6/common.cfg",
+ ".kokoro/samples/python3.6/continuous.cfg",
+ ".kokoro/samples/python3.6/periodic.cfg",
+ ".kokoro/samples/python3.6/presubmit.cfg",
+ ".kokoro/samples/python3.7/common.cfg",
+ ".kokoro/samples/python3.7/continuous.cfg",
+ ".kokoro/samples/python3.7/periodic.cfg",
+ ".kokoro/samples/python3.7/presubmit.cfg",
+ ".kokoro/samples/python3.8/common.cfg",
+ ".kokoro/samples/python3.8/continuous.cfg",
+ ".kokoro/samples/python3.8/periodic.cfg",
+ ".kokoro/samples/python3.8/presubmit.cfg",
+ ".kokoro/test-samples.sh",
+ ".kokoro/trampoline.sh",
+ ".kokoro/trampoline_v2.sh",
+ ".trampolinerc",
+ "CODE_OF_CONDUCT.md",
+ "CONTRIBUTING.rst",
+ "LICENSE",
+ "MANIFEST.in",
+ "docs/_static/custom.css",
+ "docs/_templates/layout.html",
+ "docs/conf.py",
+ "docs/container_v1/services.rst",
+ "docs/container_v1/types.rst",
+ "docs/container_v1beta1/services.rst",
+ "docs/container_v1beta1/types.rst",
+ "docs/multiprocessing.rst",
+ "google/cloud/container/__init__.py",
+ "google/cloud/container/py.typed",
+ "google/cloud/container_v1/__init__.py",
+ "google/cloud/container_v1/proto/cluster_service.proto",
+ "google/cloud/container_v1/py.typed",
+ "google/cloud/container_v1/services/__init__.py",
+ "google/cloud/container_v1/services/cluster_manager/__init__.py",
+ "google/cloud/container_v1/services/cluster_manager/async_client.py",
+ "google/cloud/container_v1/services/cluster_manager/client.py",
+ "google/cloud/container_v1/services/cluster_manager/pagers.py",
+ "google/cloud/container_v1/services/cluster_manager/transports/__init__.py",
+ "google/cloud/container_v1/services/cluster_manager/transports/base.py",
+ "google/cloud/container_v1/services/cluster_manager/transports/grpc.py",
+ "google/cloud/container_v1/services/cluster_manager/transports/grpc_asyncio.py",
+ "google/cloud/container_v1/types/__init__.py",
+ "google/cloud/container_v1/types/cluster_service.py",
+ "google/cloud/container_v1beta1/__init__.py",
+ "google/cloud/container_v1beta1/proto/cluster_service.proto",
+ "google/cloud/container_v1beta1/py.typed",
+ "google/cloud/container_v1beta1/services/__init__.py",
+ "google/cloud/container_v1beta1/services/cluster_manager/__init__.py",
+ "google/cloud/container_v1beta1/services/cluster_manager/async_client.py",
+ "google/cloud/container_v1beta1/services/cluster_manager/client.py",
+ "google/cloud/container_v1beta1/services/cluster_manager/pagers.py",
+ "google/cloud/container_v1beta1/services/cluster_manager/transports/__init__.py",
+ "google/cloud/container_v1beta1/services/cluster_manager/transports/base.py",
+ "google/cloud/container_v1beta1/services/cluster_manager/transports/grpc.py",
+ "google/cloud/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py",
+ "google/cloud/container_v1beta1/types/__init__.py",
+ "google/cloud/container_v1beta1/types/cluster_service.py",
+ "noxfile.py",
+ "renovate.json",
+ "samples/AUTHORING_GUIDE.md",
+ "samples/CONTRIBUTING.md",
+ "scripts/decrypt-secrets.sh",
+ "scripts/fixup_container_v1_keywords.py",
+ "scripts/fixup_container_v1beta1_keywords.py",
+ "scripts/readme-gen/readme_gen.py",
+ "scripts/readme-gen/templates/README.tmpl.rst",
+ "scripts/readme-gen/templates/auth.tmpl.rst",
+ "scripts/readme-gen/templates/auth_api_key.tmpl.rst",
+ "scripts/readme-gen/templates/install_deps.tmpl.rst",
+ "scripts/readme-gen/templates/install_portaudio.tmpl.rst",
+ "setup.cfg",
+ "testing/.gitignore",
+ "tests/unit/gapic/container_v1/__init__.py",
+ "tests/unit/gapic/container_v1/test_cluster_manager.py",
+ "tests/unit/gapic/container_v1beta1/__init__.py",
+ "tests/unit/gapic/container_v1beta1/test_cluster_manager.py"
]
}
\ No newline at end of file
diff --git a/packages/google-cloud-container/tests/unit/gapic/container_v1/test_cluster_manager.py b/packages/google-cloud-container/tests/unit/gapic/container_v1/test_cluster_manager.py
index ff61986a4efc..04ced1bd37bc 100644
--- a/packages/google-cloud-container/tests/unit/gapic/container_v1/test_cluster_manager.py
+++ b/packages/google-cloud-container/tests/unit/gapic/container_v1/test_cluster_manager.py
@@ -95,12 +95,12 @@ def test_cluster_manager_client_from_service_account_file(client_class):
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
- assert client._transport._credentials == creds
+ assert client.transport._credentials == creds
client = client_class.from_service_account_json("dummy/file/path.json")
- assert client._transport._credentials == creds
+ assert client.transport._credentials == creds
- assert client._transport._host == "container.googleapis.com:443"
+ assert client.transport._host == "container.googleapis.com:443"
def test_cluster_manager_client_get_transport_class():
@@ -450,7 +450,7 @@ def test_list_clusters(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_clusters), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListClustersResponse(
missing_zones=["missing_zones_value"],
@@ -465,6 +465,7 @@ def test_list_clusters(
assert args[0] == cluster_service.ListClustersRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.ListClustersResponse)
assert response.missing_zones == ["missing_zones_value"]
@@ -475,19 +476,19 @@ def test_list_clusters_from_dict():
@pytest.mark.asyncio
-async def test_list_clusters_async(transport: str = "grpc_asyncio"):
+async def test_list_clusters_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.ListClustersRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.ListClustersRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_clusters), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListClustersResponse(missing_zones=["missing_zones_value"],)
@@ -499,7 +500,7 @@ async def test_list_clusters_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.ListClustersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.ListClustersResponse)
@@ -507,6 +508,11 @@ async def test_list_clusters_async(transport: str = "grpc_asyncio"):
assert response.missing_zones == ["missing_zones_value"]
+@pytest.mark.asyncio
+async def test_list_clusters_async_from_dict():
+ await test_list_clusters_async(request_type=dict)
+
+
def test_list_clusters_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -516,7 +522,7 @@ def test_list_clusters_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_clusters), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
call.return_value = cluster_service.ListClustersResponse()
client.list_clusters(request)
@@ -541,9 +547,7 @@ async def test_list_clusters_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_clusters), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListClustersResponse()
)
@@ -564,7 +568,7 @@ def test_list_clusters_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_clusters), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListClustersResponse()
@@ -605,9 +609,7 @@ async def test_list_clusters_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_clusters), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListClustersResponse()
@@ -659,7 +661,7 @@ def test_get_cluster(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Cluster(
name="name_value",
@@ -701,6 +703,7 @@ def test_get_cluster(
assert args[0] == cluster_service.GetClusterRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Cluster)
assert response.name == "name_value"
@@ -765,19 +768,19 @@ def test_get_cluster_from_dict():
@pytest.mark.asyncio
-async def test_get_cluster_async(transport: str = "grpc_asyncio"):
+async def test_get_cluster_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.GetClusterRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.GetClusterRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Cluster(
@@ -818,7 +821,7 @@ async def test_get_cluster_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.GetClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Cluster)
@@ -880,6 +883,11 @@ async def test_get_cluster_async(transport: str = "grpc_asyncio"):
assert response.tpu_ipv4_cidr_block == "tpu_ipv4_cidr_block_value"
+@pytest.mark.asyncio
+async def test_get_cluster_async_from_dict():
+ await test_get_cluster_async(request_type=dict)
+
+
def test_get_cluster_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -889,7 +897,7 @@ def test_get_cluster_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
call.return_value = cluster_service.Cluster()
client.get_cluster(request)
@@ -914,9 +922,7 @@ async def test_get_cluster_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Cluster()
)
@@ -937,7 +943,7 @@ def test_get_cluster_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Cluster()
@@ -984,9 +990,7 @@ async def test_get_cluster_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Cluster()
@@ -1044,7 +1048,7 @@ def test_create_cluster(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -1069,6 +1073,7 @@ def test_create_cluster(
assert args[0] == cluster_service.CreateClusterRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -1099,19 +1104,19 @@ def test_create_cluster_from_dict():
@pytest.mark.asyncio
-async def test_create_cluster_async(transport: str = "grpc_asyncio"):
+async def test_create_cluster_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.CreateClusterRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.CreateClusterRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -1135,7 +1140,7 @@ async def test_create_cluster_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.CreateClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -1163,6 +1168,11 @@ async def test_create_cluster_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_create_cluster_async_from_dict():
+ await test_create_cluster_async(request_type=dict)
+
+
def test_create_cluster_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -1172,7 +1182,7 @@ def test_create_cluster_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
call.return_value = cluster_service.Operation()
client.create_cluster(request)
@@ -1197,9 +1207,7 @@ async def test_create_cluster_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -1220,7 +1228,7 @@ def test_create_cluster_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -1267,9 +1275,7 @@ async def test_create_cluster_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -1327,7 +1333,7 @@ def test_update_cluster(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -1352,6 +1358,7 @@ def test_update_cluster(
assert args[0] == cluster_service.UpdateClusterRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -1382,19 +1389,19 @@ def test_update_cluster_from_dict():
@pytest.mark.asyncio
-async def test_update_cluster_async(transport: str = "grpc_asyncio"):
+async def test_update_cluster_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.UpdateClusterRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.UpdateClusterRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -1418,7 +1425,7 @@ async def test_update_cluster_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.UpdateClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -1446,6 +1453,11 @@ async def test_update_cluster_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_update_cluster_async_from_dict():
+ await test_update_cluster_async(request_type=dict)
+
+
def test_update_cluster_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -1455,7 +1467,7 @@ def test_update_cluster_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
call.return_value = cluster_service.Operation()
client.update_cluster(request)
@@ -1480,9 +1492,7 @@ async def test_update_cluster_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -1503,7 +1513,7 @@ def test_update_cluster_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -1560,9 +1570,7 @@ async def test_update_cluster_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -1630,9 +1638,7 @@ def test_update_node_pool(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.update_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -1657,6 +1663,7 @@ def test_update_node_pool(
assert args[0] == cluster_service.UpdateNodePoolRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -1687,19 +1694,19 @@ def test_update_node_pool_from_dict():
@pytest.mark.asyncio
-async def test_update_node_pool_async(transport: str = "grpc_asyncio"):
+async def test_update_node_pool_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.UpdateNodePoolRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.UpdateNodePoolRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -1723,7 +1730,7 @@ async def test_update_node_pool_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.UpdateNodePoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -1751,6 +1758,11 @@ async def test_update_node_pool_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_update_node_pool_async_from_dict():
+ await test_update_node_pool_async(request_type=dict)
+
+
def test_update_node_pool_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -1760,9 +1772,7 @@ def test_update_node_pool_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.update_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_node_pool), "__call__") as call:
call.return_value = cluster_service.Operation()
client.update_node_pool(request)
@@ -1787,9 +1797,7 @@ async def test_update_node_pool_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_node_pool), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -1819,7 +1827,7 @@ def test_set_node_pool_autoscaling(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_node_pool_autoscaling), "__call__"
+ type(client.transport.set_node_pool_autoscaling), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -1845,6 +1853,7 @@ def test_set_node_pool_autoscaling(
assert args[0] == cluster_service.SetNodePoolAutoscalingRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -1875,18 +1884,21 @@ def test_set_node_pool_autoscaling_from_dict():
@pytest.mark.asyncio
-async def test_set_node_pool_autoscaling_async(transport: str = "grpc_asyncio"):
+async def test_set_node_pool_autoscaling_async(
+ transport: str = "grpc_asyncio",
+ request_type=cluster_service.SetNodePoolAutoscalingRequest,
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetNodePoolAutoscalingRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_node_pool_autoscaling), "__call__"
+ type(client.transport.set_node_pool_autoscaling), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -1911,7 +1923,7 @@ async def test_set_node_pool_autoscaling_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetNodePoolAutoscalingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -1939,6 +1951,11 @@ async def test_set_node_pool_autoscaling_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_node_pool_autoscaling_async_from_dict():
+ await test_set_node_pool_autoscaling_async(request_type=dict)
+
+
def test_set_node_pool_autoscaling_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -1949,7 +1966,7 @@ def test_set_node_pool_autoscaling_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_node_pool_autoscaling), "__call__"
+ type(client.transport.set_node_pool_autoscaling), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -1976,7 +1993,7 @@ async def test_set_node_pool_autoscaling_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_node_pool_autoscaling), "__call__"
+ type(client.transport.set_node_pool_autoscaling), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -2007,7 +2024,7 @@ def test_set_logging_service(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_logging_service), "__call__"
+ type(client.transport.set_logging_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -2033,6 +2050,7 @@ def test_set_logging_service(
assert args[0] == cluster_service.SetLoggingServiceRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -2063,18 +2081,21 @@ def test_set_logging_service_from_dict():
@pytest.mark.asyncio
-async def test_set_logging_service_async(transport: str = "grpc_asyncio"):
+async def test_set_logging_service_async(
+ transport: str = "grpc_asyncio",
+ request_type=cluster_service.SetLoggingServiceRequest,
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetLoggingServiceRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_logging_service), "__call__"
+ type(client.transport.set_logging_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -2099,7 +2120,7 @@ async def test_set_logging_service_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetLoggingServiceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -2127,6 +2148,11 @@ async def test_set_logging_service_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_logging_service_async_from_dict():
+ await test_set_logging_service_async(request_type=dict)
+
+
def test_set_logging_service_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -2137,7 +2163,7 @@ def test_set_logging_service_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_logging_service), "__call__"
+ type(client.transport.set_logging_service), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -2164,7 +2190,7 @@ async def test_set_logging_service_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_logging_service), "__call__"
+ type(client.transport.set_logging_service), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -2187,7 +2213,7 @@ def test_set_logging_service_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_logging_service), "__call__"
+ type(client.transport.set_logging_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -2240,7 +2266,7 @@ async def test_set_logging_service_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_logging_service), "__call__"
+ type(client.transport.set_logging_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -2304,7 +2330,7 @@ def test_set_monitoring_service(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_monitoring_service), "__call__"
+ type(client.transport.set_monitoring_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -2330,6 +2356,7 @@ def test_set_monitoring_service(
assert args[0] == cluster_service.SetMonitoringServiceRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -2360,18 +2387,21 @@ def test_set_monitoring_service_from_dict():
@pytest.mark.asyncio
-async def test_set_monitoring_service_async(transport: str = "grpc_asyncio"):
+async def test_set_monitoring_service_async(
+ transport: str = "grpc_asyncio",
+ request_type=cluster_service.SetMonitoringServiceRequest,
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetMonitoringServiceRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_monitoring_service), "__call__"
+ type(client.transport.set_monitoring_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -2396,7 +2426,7 @@ async def test_set_monitoring_service_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetMonitoringServiceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -2424,6 +2454,11 @@ async def test_set_monitoring_service_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_monitoring_service_async_from_dict():
+ await test_set_monitoring_service_async(request_type=dict)
+
+
def test_set_monitoring_service_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -2434,7 +2469,7 @@ def test_set_monitoring_service_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_monitoring_service), "__call__"
+ type(client.transport.set_monitoring_service), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -2461,7 +2496,7 @@ async def test_set_monitoring_service_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_monitoring_service), "__call__"
+ type(client.transport.set_monitoring_service), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -2484,7 +2519,7 @@ def test_set_monitoring_service_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_monitoring_service), "__call__"
+ type(client.transport.set_monitoring_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -2537,7 +2572,7 @@ async def test_set_monitoring_service_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_monitoring_service), "__call__"
+ type(client.transport.set_monitoring_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -2601,7 +2636,7 @@ def test_set_addons_config(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_addons_config), "__call__"
+ type(client.transport.set_addons_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -2627,6 +2662,7 @@ def test_set_addons_config(
assert args[0] == cluster_service.SetAddonsConfigRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -2657,18 +2693,20 @@ def test_set_addons_config_from_dict():
@pytest.mark.asyncio
-async def test_set_addons_config_async(transport: str = "grpc_asyncio"):
+async def test_set_addons_config_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.SetAddonsConfigRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetAddonsConfigRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_addons_config), "__call__"
+ type(client.transport.set_addons_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -2693,7 +2731,7 @@ async def test_set_addons_config_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetAddonsConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -2721,6 +2759,11 @@ async def test_set_addons_config_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_addons_config_async_from_dict():
+ await test_set_addons_config_async(request_type=dict)
+
+
def test_set_addons_config_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -2731,7 +2774,7 @@ def test_set_addons_config_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_addons_config), "__call__"
+ type(client.transport.set_addons_config), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -2758,7 +2801,7 @@ async def test_set_addons_config_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_addons_config), "__call__"
+ type(client.transport.set_addons_config), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -2781,7 +2824,7 @@ def test_set_addons_config_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_addons_config), "__call__"
+ type(client.transport.set_addons_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -2840,7 +2883,7 @@ async def test_set_addons_config_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_addons_config), "__call__"
+ type(client.transport.set_addons_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -2909,7 +2952,7 @@ def test_set_locations(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_locations), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -2934,6 +2977,7 @@ def test_set_locations(
assert args[0] == cluster_service.SetLocationsRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -2964,19 +3008,19 @@ def test_set_locations_from_dict():
@pytest.mark.asyncio
-async def test_set_locations_async(transport: str = "grpc_asyncio"):
+async def test_set_locations_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.SetLocationsRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetLocationsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_locations), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -3000,7 +3044,7 @@ async def test_set_locations_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetLocationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -3028,6 +3072,11 @@ async def test_set_locations_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_locations_async_from_dict():
+ await test_set_locations_async(request_type=dict)
+
+
def test_set_locations_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -3037,7 +3086,7 @@ def test_set_locations_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_locations), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
call.return_value = cluster_service.Operation()
client.set_locations(request)
@@ -3062,9 +3111,7 @@ async def test_set_locations_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_locations), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -3085,7 +3132,7 @@ def test_set_locations_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_locations), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -3136,9 +3183,7 @@ async def test_set_locations_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_locations), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -3200,7 +3245,7 @@ def test_update_master(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_master), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_master), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -3225,6 +3270,7 @@ def test_update_master(
assert args[0] == cluster_service.UpdateMasterRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -3255,19 +3301,19 @@ def test_update_master_from_dict():
@pytest.mark.asyncio
-async def test_update_master_async(transport: str = "grpc_asyncio"):
+async def test_update_master_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.UpdateMasterRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.UpdateMasterRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_master), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_master), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -3291,7 +3337,7 @@ async def test_update_master_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.UpdateMasterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -3319,6 +3365,11 @@ async def test_update_master_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_update_master_async_from_dict():
+ await test_update_master_async(request_type=dict)
+
+
def test_update_master_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -3328,7 +3379,7 @@ def test_update_master_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_master), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_master), "__call__") as call:
call.return_value = cluster_service.Operation()
client.update_master(request)
@@ -3353,9 +3404,7 @@ async def test_update_master_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_master), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_master), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -3376,7 +3425,7 @@ def test_update_master_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_master), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_master), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -3427,9 +3476,7 @@ async def test_update_master_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_master), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_master), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -3491,7 +3538,7 @@ def test_set_master_auth(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_master_auth), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_master_auth), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -3516,6 +3563,7 @@ def test_set_master_auth(
assert args[0] == cluster_service.SetMasterAuthRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -3546,19 +3594,19 @@ def test_set_master_auth_from_dict():
@pytest.mark.asyncio
-async def test_set_master_auth_async(transport: str = "grpc_asyncio"):
+async def test_set_master_auth_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.SetMasterAuthRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetMasterAuthRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_master_auth), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_master_auth), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -3582,7 +3630,7 @@ async def test_set_master_auth_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetMasterAuthRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -3610,6 +3658,11 @@ async def test_set_master_auth_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_master_auth_async_from_dict():
+ await test_set_master_auth_async(request_type=dict)
+
+
def test_set_master_auth_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -3619,7 +3672,7 @@ def test_set_master_auth_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_master_auth), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_master_auth), "__call__") as call:
call.return_value = cluster_service.Operation()
client.set_master_auth(request)
@@ -3644,9 +3697,7 @@ async def test_set_master_auth_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_master_auth), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_master_auth), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -3675,7 +3726,7 @@ def test_delete_cluster(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -3700,6 +3751,7 @@ def test_delete_cluster(
assert args[0] == cluster_service.DeleteClusterRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -3730,19 +3782,19 @@ def test_delete_cluster_from_dict():
@pytest.mark.asyncio
-async def test_delete_cluster_async(transport: str = "grpc_asyncio"):
+async def test_delete_cluster_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.DeleteClusterRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.DeleteClusterRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -3766,7 +3818,7 @@ async def test_delete_cluster_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.DeleteClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -3794,6 +3846,11 @@ async def test_delete_cluster_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_delete_cluster_async_from_dict():
+ await test_delete_cluster_async(request_type=dict)
+
+
def test_delete_cluster_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -3803,7 +3860,7 @@ def test_delete_cluster_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
call.return_value = cluster_service.Operation()
client.delete_cluster(request)
@@ -3828,9 +3885,7 @@ async def test_delete_cluster_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -3851,7 +3906,7 @@ def test_delete_cluster_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -3898,9 +3953,7 @@ async def test_delete_cluster_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -3958,7 +4011,7 @@ def test_list_operations(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_operations), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListOperationsResponse(
missing_zones=["missing_zones_value"],
@@ -3973,6 +4026,7 @@ def test_list_operations(
assert args[0] == cluster_service.ListOperationsRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.ListOperationsResponse)
assert response.missing_zones == ["missing_zones_value"]
@@ -3983,19 +4037,19 @@ def test_list_operations_from_dict():
@pytest.mark.asyncio
-async def test_list_operations_async(transport: str = "grpc_asyncio"):
+async def test_list_operations_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.ListOperationsRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.ListOperationsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_operations), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListOperationsResponse(
@@ -4009,7 +4063,7 @@ async def test_list_operations_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.ListOperationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.ListOperationsResponse)
@@ -4017,6 +4071,11 @@ async def test_list_operations_async(transport: str = "grpc_asyncio"):
assert response.missing_zones == ["missing_zones_value"]
+@pytest.mark.asyncio
+async def test_list_operations_async_from_dict():
+ await test_list_operations_async(request_type=dict)
+
+
def test_list_operations_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -4026,7 +4085,7 @@ def test_list_operations_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_operations), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
call.return_value = cluster_service.ListOperationsResponse()
client.list_operations(request)
@@ -4051,9 +4110,7 @@ async def test_list_operations_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_operations), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListOperationsResponse()
)
@@ -4074,7 +4131,7 @@ def test_list_operations_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_operations), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListOperationsResponse()
@@ -4112,9 +4169,7 @@ async def test_list_operations_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_operations), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListOperationsResponse()
@@ -4163,7 +4218,7 @@ def test_get_operation(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_operation), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -4188,6 +4243,7 @@ def test_get_operation(
assert args[0] == cluster_service.GetOperationRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -4218,19 +4274,19 @@ def test_get_operation_from_dict():
@pytest.mark.asyncio
-async def test_get_operation_async(transport: str = "grpc_asyncio"):
+async def test_get_operation_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.GetOperationRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.GetOperationRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_operation), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -4254,7 +4310,7 @@ async def test_get_operation_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.GetOperationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -4282,6 +4338,11 @@ async def test_get_operation_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_get_operation_async_from_dict():
+ await test_get_operation_async(request_type=dict)
+
+
def test_get_operation_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -4291,7 +4352,7 @@ def test_get_operation_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_operation), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
call.return_value = cluster_service.Operation()
client.get_operation(request)
@@ -4316,9 +4377,7 @@ async def test_get_operation_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_operation), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -4339,7 +4398,7 @@ def test_get_operation_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_operation), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -4349,6 +4408,7 @@ def test_get_operation_flattened():
project_id="project_id_value",
zone="zone_value",
operation_id="operation_id_value",
+ name="name_value",
)
# Establish that the underlying call was made with the expected
@@ -4362,6 +4422,8 @@ def test_get_operation_flattened():
assert args[0].operation_id == "operation_id_value"
+ assert args[0].name == "name_value"
+
def test_get_operation_flattened_error():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -4374,6 +4436,7 @@ def test_get_operation_flattened_error():
project_id="project_id_value",
zone="zone_value",
operation_id="operation_id_value",
+ name="name_value",
)
@@ -4382,9 +4445,7 @@ async def test_get_operation_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_operation), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -4397,6 +4458,7 @@ async def test_get_operation_flattened_async():
project_id="project_id_value",
zone="zone_value",
operation_id="operation_id_value",
+ name="name_value",
)
# Establish that the underlying call was made with the expected
@@ -4410,6 +4472,8 @@ async def test_get_operation_flattened_async():
assert args[0].operation_id == "operation_id_value"
+ assert args[0].name == "name_value"
+
@pytest.mark.asyncio
async def test_get_operation_flattened_error_async():
@@ -4423,6 +4487,7 @@ async def test_get_operation_flattened_error_async():
project_id="project_id_value",
zone="zone_value",
operation_id="operation_id_value",
+ name="name_value",
)
@@ -4438,9 +4503,7 @@ def test_cancel_operation(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.cancel_operation), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
@@ -4461,19 +4524,19 @@ def test_cancel_operation_from_dict():
@pytest.mark.asyncio
-async def test_cancel_operation_async(transport: str = "grpc_asyncio"):
+async def test_cancel_operation_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.CancelOperationRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.CancelOperationRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.cancel_operation), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
@@ -4483,12 +4546,17 @@ async def test_cancel_operation_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.CancelOperationRequest()
# Establish that the response is the type that we expect.
assert response is None
+@pytest.mark.asyncio
+async def test_cancel_operation_async_from_dict():
+ await test_cancel_operation_async(request_type=dict)
+
+
def test_cancel_operation_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -4498,9 +4566,7 @@ def test_cancel_operation_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.cancel_operation), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
call.return_value = None
client.cancel_operation(request)
@@ -4525,9 +4591,7 @@ async def test_cancel_operation_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.cancel_operation), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.cancel_operation(request)
@@ -4546,9 +4610,7 @@ def test_cancel_operation_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.cancel_operation), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
@@ -4595,9 +4657,7 @@ async def test_cancel_operation_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.cancel_operation), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
@@ -4654,7 +4714,7 @@ def test_get_server_config(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_server_config), "__call__"
+ type(client.transport.get_server_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ServerConfig(
@@ -4674,6 +4734,7 @@ def test_get_server_config(
assert args[0] == cluster_service.GetServerConfigRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.ServerConfig)
assert response.default_cluster_version == "default_cluster_version_value"
@@ -4692,18 +4753,20 @@ def test_get_server_config_from_dict():
@pytest.mark.asyncio
-async def test_get_server_config_async(transport: str = "grpc_asyncio"):
+async def test_get_server_config_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.GetServerConfigRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.GetServerConfigRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_server_config), "__call__"
+ type(client.transport.get_server_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -4722,7 +4785,7 @@ async def test_get_server_config_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.GetServerConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.ServerConfig)
@@ -4738,6 +4801,11 @@ async def test_get_server_config_async(transport: str = "grpc_asyncio"):
assert response.valid_master_versions == ["valid_master_versions_value"]
+@pytest.mark.asyncio
+async def test_get_server_config_async_from_dict():
+ await test_get_server_config_async(request_type=dict)
+
+
def test_get_server_config_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -4748,7 +4816,7 @@ def test_get_server_config_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_server_config), "__call__"
+ type(client.transport.get_server_config), "__call__"
) as call:
call.return_value = cluster_service.ServerConfig()
@@ -4775,7 +4843,7 @@ async def test_get_server_config_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_server_config), "__call__"
+ type(client.transport.get_server_config), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ServerConfig()
@@ -4798,7 +4866,7 @@ def test_get_server_config_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_server_config), "__call__"
+ type(client.transport.get_server_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ServerConfig()
@@ -4841,7 +4909,7 @@ async def test_get_server_config_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_server_config), "__call__"
+ type(client.transport.get_server_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ServerConfig()
@@ -4882,8 +4950,8 @@ async def test_get_server_config_flattened_error_async():
)
-def test_list_node_pools(
- transport: str = "grpc", request_type=cluster_service.ListNodePoolsRequest
+def test_get_json_web_keys(
+ transport: str = "grpc", request_type=cluster_service.GetJSONWebKeysRequest
):
client = ClusterManagerClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
@@ -4894,55 +4962,189 @@ def test_list_node_pools(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_node_pools), "__call__") as call:
+ with mock.patch.object(
+ type(client.transport.get_json_web_keys), "__call__"
+ ) as call:
# Designate an appropriate return value for the call.
- call.return_value = cluster_service.ListNodePoolsResponse()
+ call.return_value = cluster_service.GetJSONWebKeysResponse()
- response = client.list_node_pools(request)
+ response = client.get_json_web_keys(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0] == cluster_service.ListNodePoolsRequest()
+ assert args[0] == cluster_service.GetJSONWebKeysRequest()
# Establish that the response is the type that we expect.
- assert isinstance(response, cluster_service.ListNodePoolsResponse)
+ assert isinstance(response, cluster_service.GetJSONWebKeysResponse)
-def test_list_node_pools_from_dict():
- test_list_node_pools(request_type=dict)
+
+def test_get_json_web_keys_from_dict():
+ test_get_json_web_keys(request_type=dict)
@pytest.mark.asyncio
-async def test_list_node_pools_async(transport: str = "grpc_asyncio"):
+async def test_get_json_web_keys_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.GetJSONWebKeysRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.ListNodePoolsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_node_pools), "__call__"
+ type(client.transport.get_json_web_keys), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- cluster_service.ListNodePoolsResponse()
+ cluster_service.GetJSONWebKeysResponse()
)
- response = await client.list_node_pools(request)
+ response = await client.get_json_web_keys(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.GetJSONWebKeysRequest()
# Establish that the response is the type that we expect.
- assert isinstance(response, cluster_service.ListNodePoolsResponse)
+ assert isinstance(response, cluster_service.GetJSONWebKeysResponse)
+
+
+@pytest.mark.asyncio
+async def test_get_json_web_keys_async_from_dict():
+ await test_get_json_web_keys_async(request_type=dict)
+
+
+def test_get_json_web_keys_field_headers():
+ client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = cluster_service.GetJSONWebKeysRequest()
+ request.parent = "parent/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_json_web_keys), "__call__"
+ ) as call:
+ call.return_value = cluster_service.GetJSONWebKeysResponse()
+
+ client.get_json_web_keys(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_json_web_keys_field_headers_async():
+ client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = cluster_service.GetJSONWebKeysRequest()
+ request.parent = "parent/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.get_json_web_keys), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ cluster_service.GetJSONWebKeysResponse()
+ )
+
+ await client.get_json_web_keys(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
+
+
+def test_list_node_pools(
+ transport: str = "grpc", request_type=cluster_service.ListNodePoolsRequest
+):
+ client = ClusterManagerClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = cluster_service.ListNodePoolsResponse()
+
+ response = client.list_node_pools(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == cluster_service.ListNodePoolsRequest()
+
+ # Establish that the response is the type that we expect.
+
+ assert isinstance(response, cluster_service.ListNodePoolsResponse)
+
+
+def test_list_node_pools_from_dict():
+ test_list_node_pools(request_type=dict)
+
+
+@pytest.mark.asyncio
+async def test_list_node_pools_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.ListNodePoolsRequest
+):
+ client = ClusterManagerAsyncClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ cluster_service.ListNodePoolsResponse()
+ )
+
+ response = await client.list_node_pools(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == cluster_service.ListNodePoolsRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, cluster_service.ListNodePoolsResponse)
+
+
+@pytest.mark.asyncio
+async def test_list_node_pools_async_from_dict():
+ await test_list_node_pools_async(request_type=dict)
def test_list_node_pools_field_headers():
@@ -4954,7 +5156,7 @@ def test_list_node_pools_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_node_pools), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
call.return_value = cluster_service.ListNodePoolsResponse()
client.list_node_pools(request)
@@ -4979,9 +5181,7 @@ async def test_list_node_pools_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_node_pools), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListNodePoolsResponse()
)
@@ -5002,7 +5202,7 @@ def test_list_node_pools_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_node_pools), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListNodePoolsResponse()
@@ -5049,9 +5249,7 @@ async def test_list_node_pools_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_node_pools), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListNodePoolsResponse()
@@ -5109,11 +5307,12 @@ def test_get_node_pool(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_node_pool), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.NodePool(
name="name_value",
initial_node_count=1911,
+ locations=["locations_value"],
self_link="self_link_value",
version="version_value",
instance_group_urls=["instance_group_urls_value"],
@@ -5131,12 +5330,15 @@ def test_get_node_pool(
assert args[0] == cluster_service.GetNodePoolRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.NodePool)
assert response.name == "name_value"
assert response.initial_node_count == 1911
+ assert response.locations == ["locations_value"]
+
assert response.self_link == "self_link_value"
assert response.version == "version_value"
@@ -5155,24 +5357,25 @@ def test_get_node_pool_from_dict():
@pytest.mark.asyncio
-async def test_get_node_pool_async(transport: str = "grpc_asyncio"):
+async def test_get_node_pool_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.GetNodePoolRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.GetNodePoolRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.NodePool(
name="name_value",
initial_node_count=1911,
+ locations=["locations_value"],
self_link="self_link_value",
version="version_value",
instance_group_urls=["instance_group_urls_value"],
@@ -5188,7 +5391,7 @@ async def test_get_node_pool_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.GetNodePoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.NodePool)
@@ -5197,6 +5400,8 @@ async def test_get_node_pool_async(transport: str = "grpc_asyncio"):
assert response.initial_node_count == 1911
+ assert response.locations == ["locations_value"]
+
assert response.self_link == "self_link_value"
assert response.version == "version_value"
@@ -5210,6 +5415,11 @@ async def test_get_node_pool_async(transport: str = "grpc_asyncio"):
assert response.pod_ipv4_cidr_size == 1856
+@pytest.mark.asyncio
+async def test_get_node_pool_async_from_dict():
+ await test_get_node_pool_async(request_type=dict)
+
+
def test_get_node_pool_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -5219,7 +5429,7 @@ def test_get_node_pool_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_node_pool), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
call.return_value = cluster_service.NodePool()
client.get_node_pool(request)
@@ -5244,9 +5454,7 @@ async def test_get_node_pool_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.NodePool()
)
@@ -5267,7 +5475,7 @@ def test_get_node_pool_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_node_pool), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.NodePool()
@@ -5318,9 +5526,7 @@ async def test_get_node_pool_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.NodePool()
@@ -5382,9 +5588,7 @@ def test_create_node_pool(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.create_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -5409,6 +5613,7 @@ def test_create_node_pool(
assert args[0] == cluster_service.CreateNodePoolRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -5439,19 +5644,19 @@ def test_create_node_pool_from_dict():
@pytest.mark.asyncio
-async def test_create_node_pool_async(transport: str = "grpc_asyncio"):
+async def test_create_node_pool_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.CreateNodePoolRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.CreateNodePoolRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -5475,7 +5680,7 @@ async def test_create_node_pool_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.CreateNodePoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -5503,6 +5708,11 @@ async def test_create_node_pool_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_create_node_pool_async_from_dict():
+ await test_create_node_pool_async(request_type=dict)
+
+
def test_create_node_pool_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -5512,9 +5722,7 @@ def test_create_node_pool_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.create_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
call.return_value = cluster_service.Operation()
client.create_node_pool(request)
@@ -5539,9 +5747,7 @@ async def test_create_node_pool_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -5562,9 +5768,7 @@ def test_create_node_pool_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.create_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -5615,9 +5819,7 @@ async def test_create_node_pool_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -5679,9 +5881,7 @@ def test_delete_node_pool(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.delete_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -5706,6 +5906,7 @@ def test_delete_node_pool(
assert args[0] == cluster_service.DeleteNodePoolRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -5736,19 +5937,19 @@ def test_delete_node_pool_from_dict():
@pytest.mark.asyncio
-async def test_delete_node_pool_async(transport: str = "grpc_asyncio"):
+async def test_delete_node_pool_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.DeleteNodePoolRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.DeleteNodePoolRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -5772,7 +5973,7 @@ async def test_delete_node_pool_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.DeleteNodePoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -5800,6 +6001,11 @@ async def test_delete_node_pool_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_delete_node_pool_async_from_dict():
+ await test_delete_node_pool_async(request_type=dict)
+
+
def test_delete_node_pool_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -5809,9 +6015,7 @@ def test_delete_node_pool_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.delete_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
call.return_value = cluster_service.Operation()
client.delete_node_pool(request)
@@ -5836,9 +6040,7 @@ async def test_delete_node_pool_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -5859,9 +6061,7 @@ def test_delete_node_pool_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.delete_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -5912,9 +6112,7 @@ async def test_delete_node_pool_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -5977,7 +6175,7 @@ def test_rollback_node_pool_upgrade(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.rollback_node_pool_upgrade), "__call__"
+ type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -6003,6 +6201,7 @@ def test_rollback_node_pool_upgrade(
assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -6033,18 +6232,21 @@ def test_rollback_node_pool_upgrade_from_dict():
@pytest.mark.asyncio
-async def test_rollback_node_pool_upgrade_async(transport: str = "grpc_asyncio"):
+async def test_rollback_node_pool_upgrade_async(
+ transport: str = "grpc_asyncio",
+ request_type=cluster_service.RollbackNodePoolUpgradeRequest,
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.RollbackNodePoolUpgradeRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.rollback_node_pool_upgrade), "__call__"
+ type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -6069,7 +6271,7 @@ async def test_rollback_node_pool_upgrade_async(transport: str = "grpc_asyncio")
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -6097,6 +6299,11 @@ async def test_rollback_node_pool_upgrade_async(transport: str = "grpc_asyncio")
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_rollback_node_pool_upgrade_async_from_dict():
+ await test_rollback_node_pool_upgrade_async(request_type=dict)
+
+
def test_rollback_node_pool_upgrade_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -6107,7 +6314,7 @@ def test_rollback_node_pool_upgrade_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.rollback_node_pool_upgrade), "__call__"
+ type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -6134,7 +6341,7 @@ async def test_rollback_node_pool_upgrade_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.rollback_node_pool_upgrade), "__call__"
+ type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -6157,7 +6364,7 @@ def test_rollback_node_pool_upgrade_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.rollback_node_pool_upgrade), "__call__"
+ type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -6210,7 +6417,7 @@ async def test_rollback_node_pool_upgrade_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.rollback_node_pool_upgrade), "__call__"
+ type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -6274,7 +6481,7 @@ def test_set_node_pool_management(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_node_pool_management), "__call__"
+ type(client.transport.set_node_pool_management), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -6300,6 +6507,7 @@ def test_set_node_pool_management(
assert args[0] == cluster_service.SetNodePoolManagementRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -6330,18 +6538,21 @@ def test_set_node_pool_management_from_dict():
@pytest.mark.asyncio
-async def test_set_node_pool_management_async(transport: str = "grpc_asyncio"):
+async def test_set_node_pool_management_async(
+ transport: str = "grpc_asyncio",
+ request_type=cluster_service.SetNodePoolManagementRequest,
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetNodePoolManagementRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_node_pool_management), "__call__"
+ type(client.transport.set_node_pool_management), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -6366,7 +6577,7 @@ async def test_set_node_pool_management_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetNodePoolManagementRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -6394,6 +6605,11 @@ async def test_set_node_pool_management_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_node_pool_management_async_from_dict():
+ await test_set_node_pool_management_async(request_type=dict)
+
+
def test_set_node_pool_management_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -6404,7 +6620,7 @@ def test_set_node_pool_management_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_node_pool_management), "__call__"
+ type(client.transport.set_node_pool_management), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -6431,7 +6647,7 @@ async def test_set_node_pool_management_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_node_pool_management), "__call__"
+ type(client.transport.set_node_pool_management), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -6461,7 +6677,7 @@ def test_set_labels(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_labels), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_labels), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -6486,6 +6702,7 @@ def test_set_labels(
assert args[0] == cluster_service.SetLabelsRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -6516,19 +6733,19 @@ def test_set_labels_from_dict():
@pytest.mark.asyncio
-async def test_set_labels_async(transport: str = "grpc_asyncio"):
+async def test_set_labels_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.SetLabelsRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetLabelsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_labels), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_labels), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -6552,7 +6769,7 @@ async def test_set_labels_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetLabelsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -6580,6 +6797,11 @@ async def test_set_labels_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_labels_async_from_dict():
+ await test_set_labels_async(request_type=dict)
+
+
def test_set_labels_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -6589,7 +6811,7 @@ def test_set_labels_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_labels), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_labels), "__call__") as call:
call.return_value = cluster_service.Operation()
client.set_labels(request)
@@ -6614,9 +6836,7 @@ async def test_set_labels_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_labels), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_labels), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -6645,7 +6865,7 @@ def test_set_legacy_abac(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_legacy_abac), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -6670,6 +6890,7 @@ def test_set_legacy_abac(
assert args[0] == cluster_service.SetLegacyAbacRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -6700,19 +6921,19 @@ def test_set_legacy_abac_from_dict():
@pytest.mark.asyncio
-async def test_set_legacy_abac_async(transport: str = "grpc_asyncio"):
+async def test_set_legacy_abac_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.SetLegacyAbacRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetLegacyAbacRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_legacy_abac), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -6736,7 +6957,7 @@ async def test_set_legacy_abac_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetLegacyAbacRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -6764,6 +6985,11 @@ async def test_set_legacy_abac_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_legacy_abac_async_from_dict():
+ await test_set_legacy_abac_async(request_type=dict)
+
+
def test_set_legacy_abac_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -6773,7 +6999,7 @@ def test_set_legacy_abac_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_legacy_abac), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
call.return_value = cluster_service.Operation()
client.set_legacy_abac(request)
@@ -6798,9 +7024,7 @@ async def test_set_legacy_abac_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_legacy_abac), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -6821,7 +7045,7 @@ def test_set_legacy_abac_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_legacy_abac), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -6872,9 +7096,7 @@ async def test_set_legacy_abac_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_legacy_abac), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -6937,7 +7159,7 @@ def test_start_ip_rotation(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.start_ip_rotation), "__call__"
+ type(client.transport.start_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -6963,6 +7185,7 @@ def test_start_ip_rotation(
assert args[0] == cluster_service.StartIPRotationRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -6993,18 +7216,20 @@ def test_start_ip_rotation_from_dict():
@pytest.mark.asyncio
-async def test_start_ip_rotation_async(transport: str = "grpc_asyncio"):
+async def test_start_ip_rotation_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.StartIPRotationRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.StartIPRotationRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.start_ip_rotation), "__call__"
+ type(client.transport.start_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -7029,7 +7254,7 @@ async def test_start_ip_rotation_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.StartIPRotationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -7057,6 +7282,11 @@ async def test_start_ip_rotation_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_start_ip_rotation_async_from_dict():
+ await test_start_ip_rotation_async(request_type=dict)
+
+
def test_start_ip_rotation_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -7067,7 +7297,7 @@ def test_start_ip_rotation_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.start_ip_rotation), "__call__"
+ type(client.transport.start_ip_rotation), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -7094,7 +7324,7 @@ async def test_start_ip_rotation_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.start_ip_rotation), "__call__"
+ type(client.transport.start_ip_rotation), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -7117,7 +7347,7 @@ def test_start_ip_rotation_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.start_ip_rotation), "__call__"
+ type(client.transport.start_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -7166,7 +7396,7 @@ async def test_start_ip_rotation_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.start_ip_rotation), "__call__"
+ type(client.transport.start_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -7226,7 +7456,7 @@ def test_complete_ip_rotation(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.complete_ip_rotation), "__call__"
+ type(client.transport.complete_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -7252,6 +7482,7 @@ def test_complete_ip_rotation(
assert args[0] == cluster_service.CompleteIPRotationRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -7282,18 +7513,21 @@ def test_complete_ip_rotation_from_dict():
@pytest.mark.asyncio
-async def test_complete_ip_rotation_async(transport: str = "grpc_asyncio"):
+async def test_complete_ip_rotation_async(
+ transport: str = "grpc_asyncio",
+ request_type=cluster_service.CompleteIPRotationRequest,
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.CompleteIPRotationRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.complete_ip_rotation), "__call__"
+ type(client.transport.complete_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -7318,7 +7552,7 @@ async def test_complete_ip_rotation_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.CompleteIPRotationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -7346,6 +7580,11 @@ async def test_complete_ip_rotation_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_complete_ip_rotation_async_from_dict():
+ await test_complete_ip_rotation_async(request_type=dict)
+
+
def test_complete_ip_rotation_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -7356,7 +7595,7 @@ def test_complete_ip_rotation_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.complete_ip_rotation), "__call__"
+ type(client.transport.complete_ip_rotation), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -7383,7 +7622,7 @@ async def test_complete_ip_rotation_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.complete_ip_rotation), "__call__"
+ type(client.transport.complete_ip_rotation), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -7406,7 +7645,7 @@ def test_complete_ip_rotation_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.complete_ip_rotation), "__call__"
+ type(client.transport.complete_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -7455,7 +7694,7 @@ async def test_complete_ip_rotation_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.complete_ip_rotation), "__call__"
+ type(client.transport.complete_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -7515,7 +7754,7 @@ def test_set_node_pool_size(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_node_pool_size), "__call__"
+ type(client.transport.set_node_pool_size), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -7541,6 +7780,7 @@ def test_set_node_pool_size(
assert args[0] == cluster_service.SetNodePoolSizeRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -7571,18 +7811,20 @@ def test_set_node_pool_size_from_dict():
@pytest.mark.asyncio
-async def test_set_node_pool_size_async(transport: str = "grpc_asyncio"):
+async def test_set_node_pool_size_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.SetNodePoolSizeRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetNodePoolSizeRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_node_pool_size), "__call__"
+ type(client.transport.set_node_pool_size), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -7607,7 +7849,7 @@ async def test_set_node_pool_size_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetNodePoolSizeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -7635,6 +7877,11 @@ async def test_set_node_pool_size_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_node_pool_size_async_from_dict():
+ await test_set_node_pool_size_async(request_type=dict)
+
+
def test_set_node_pool_size_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -7645,7 +7892,7 @@ def test_set_node_pool_size_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_node_pool_size), "__call__"
+ type(client.transport.set_node_pool_size), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -7672,7 +7919,7 @@ async def test_set_node_pool_size_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_node_pool_size), "__call__"
+ type(client.transport.set_node_pool_size), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -7703,7 +7950,7 @@ def test_set_network_policy(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_network_policy), "__call__"
+ type(client.transport.set_network_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -7729,6 +7976,7 @@ def test_set_network_policy(
assert args[0] == cluster_service.SetNetworkPolicyRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -7759,18 +8007,21 @@ def test_set_network_policy_from_dict():
@pytest.mark.asyncio
-async def test_set_network_policy_async(transport: str = "grpc_asyncio"):
+async def test_set_network_policy_async(
+ transport: str = "grpc_asyncio",
+ request_type=cluster_service.SetNetworkPolicyRequest,
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetNetworkPolicyRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_network_policy), "__call__"
+ type(client.transport.set_network_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -7795,7 +8046,7 @@ async def test_set_network_policy_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetNetworkPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -7823,6 +8074,11 @@ async def test_set_network_policy_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_network_policy_async_from_dict():
+ await test_set_network_policy_async(request_type=dict)
+
+
def test_set_network_policy_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -7833,7 +8089,7 @@ def test_set_network_policy_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_network_policy), "__call__"
+ type(client.transport.set_network_policy), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -7860,7 +8116,7 @@ async def test_set_network_policy_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_network_policy), "__call__"
+ type(client.transport.set_network_policy), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -7883,7 +8139,7 @@ def test_set_network_policy_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_network_policy), "__call__"
+ type(client.transport.set_network_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -7942,7 +8198,7 @@ async def test_set_network_policy_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_network_policy), "__call__"
+ type(client.transport.set_network_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -8012,7 +8268,7 @@ def test_set_maintenance_policy(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_maintenance_policy), "__call__"
+ type(client.transport.set_maintenance_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -8038,6 +8294,7 @@ def test_set_maintenance_policy(
assert args[0] == cluster_service.SetMaintenancePolicyRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -8068,18 +8325,21 @@ def test_set_maintenance_policy_from_dict():
@pytest.mark.asyncio
-async def test_set_maintenance_policy_async(transport: str = "grpc_asyncio"):
+async def test_set_maintenance_policy_async(
+ transport: str = "grpc_asyncio",
+ request_type=cluster_service.SetMaintenancePolicyRequest,
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetMaintenancePolicyRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_maintenance_policy), "__call__"
+ type(client.transport.set_maintenance_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -8104,7 +8364,7 @@ async def test_set_maintenance_policy_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetMaintenancePolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -8132,6 +8392,11 @@ async def test_set_maintenance_policy_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_maintenance_policy_async_from_dict():
+ await test_set_maintenance_policy_async(request_type=dict)
+
+
def test_set_maintenance_policy_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -8142,7 +8407,7 @@ def test_set_maintenance_policy_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_maintenance_policy), "__call__"
+ type(client.transport.set_maintenance_policy), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -8169,7 +8434,7 @@ async def test_set_maintenance_policy_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_maintenance_policy), "__call__"
+ type(client.transport.set_maintenance_policy), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -8192,7 +8457,7 @@ def test_set_maintenance_policy_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_maintenance_policy), "__call__"
+ type(client.transport.set_maintenance_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -8263,7 +8528,7 @@ async def test_set_maintenance_policy_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_maintenance_policy), "__call__"
+ type(client.transport.set_maintenance_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -8345,7 +8610,7 @@ def test_list_usable_subnetworks(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_usable_subnetworks), "__call__"
+ type(client.transport.list_usable_subnetworks), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListUsableSubnetworksResponse(
@@ -8361,6 +8626,7 @@ def test_list_usable_subnetworks(
assert args[0] == cluster_service.ListUsableSubnetworksRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, pagers.ListUsableSubnetworksPager)
assert response.next_page_token == "next_page_token_value"
@@ -8371,18 +8637,21 @@ def test_list_usable_subnetworks_from_dict():
@pytest.mark.asyncio
-async def test_list_usable_subnetworks_async(transport: str = "grpc_asyncio"):
+async def test_list_usable_subnetworks_async(
+ transport: str = "grpc_asyncio",
+ request_type=cluster_service.ListUsableSubnetworksRequest,
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.ListUsableSubnetworksRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_usable_subnetworks), "__call__"
+ type(client.transport.list_usable_subnetworks), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -8397,7 +8666,7 @@ async def test_list_usable_subnetworks_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.ListUsableSubnetworksRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListUsableSubnetworksAsyncPager)
@@ -8405,6 +8674,11 @@ async def test_list_usable_subnetworks_async(transport: str = "grpc_asyncio"):
assert response.next_page_token == "next_page_token_value"
+@pytest.mark.asyncio
+async def test_list_usable_subnetworks_async_from_dict():
+ await test_list_usable_subnetworks_async(request_type=dict)
+
+
def test_list_usable_subnetworks_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -8415,7 +8689,7 @@ def test_list_usable_subnetworks_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_usable_subnetworks), "__call__"
+ type(client.transport.list_usable_subnetworks), "__call__"
) as call:
call.return_value = cluster_service.ListUsableSubnetworksResponse()
@@ -8442,7 +8716,7 @@ async def test_list_usable_subnetworks_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_usable_subnetworks), "__call__"
+ type(client.transport.list_usable_subnetworks), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListUsableSubnetworksResponse()
@@ -8465,7 +8739,7 @@ def test_list_usable_subnetworks_pager():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_usable_subnetworks), "__call__"
+ type(client.transport.list_usable_subnetworks), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -8511,7 +8785,7 @@ def test_list_usable_subnetworks_pages():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_usable_subnetworks), "__call__"
+ type(client.transport.list_usable_subnetworks), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -8549,7 +8823,7 @@ async def test_list_usable_subnetworks_async_pager():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_usable_subnetworks),
+ type(client.transport.list_usable_subnetworks),
"__call__",
new_callable=mock.AsyncMock,
) as call:
@@ -8594,7 +8868,7 @@ async def test_list_usable_subnetworks_async_pages():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_usable_subnetworks),
+ type(client.transport.list_usable_subnetworks),
"__call__",
new_callable=mock.AsyncMock,
) as call:
@@ -8666,7 +8940,7 @@ def test_transport_instance():
credentials=credentials.AnonymousCredentials(),
)
client = ClusterManagerClient(transport=transport)
- assert client._transport is transport
+ assert client.transport is transport
def test_transport_get_channel():
@@ -8702,7 +8976,7 @@ def test_transport_adc(transport_class):
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
- assert isinstance(client._transport, transports.ClusterManagerGrpcTransport,)
+ assert isinstance(client.transport, transports.ClusterManagerGrpcTransport,)
def test_cluster_manager_base_transport_error():
@@ -8744,6 +9018,7 @@ def test_cluster_manager_base_transport():
"get_operation",
"cancel_operation",
"get_server_config",
+ "get_json_web_keys",
"list_node_pools",
"get_node_pool",
"create_node_pool",
@@ -8826,7 +9101,7 @@ def test_cluster_manager_host_no_port():
api_endpoint="container.googleapis.com"
),
)
- assert client._transport._host == "container.googleapis.com:443"
+ assert client.transport._host == "container.googleapis.com:443"
def test_cluster_manager_host_with_port():
@@ -8836,7 +9111,7 @@ def test_cluster_manager_host_with_port():
api_endpoint="container.googleapis.com:8000"
),
)
- assert client._transport._host == "container.googleapis.com:8000"
+ assert client.transport._host == "container.googleapis.com:8000"
def test_cluster_manager_grpc_transport_channel():
@@ -8848,6 +9123,7 @@ def test_cluster_manager_grpc_transport_channel():
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
def test_cluster_manager_grpc_asyncio_transport_channel():
@@ -8859,6 +9135,7 @@ def test_cluster_manager_grpc_asyncio_transport_channel():
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize(
@@ -8906,6 +9183,7 @@ def test_cluster_manager_transport_channel_mtls_with_client_cert_source(
quota_project_id=None,
)
assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize(
@@ -8948,6 +9226,107 @@ def test_cluster_manager_transport_channel_mtls_with_adc(transport_class):
assert transport.grpc_channel == mock_grpc_channel
+def test_common_billing_account_path():
+ billing_account = "squid"
+
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = ClusterManagerClient.common_billing_account_path(billing_account)
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "clam",
+ }
+ path = ClusterManagerClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ClusterManagerClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "whelk"
+
+ expected = "folders/{folder}".format(folder=folder,)
+ actual = ClusterManagerClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "octopus",
+ }
+ path = ClusterManagerClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ClusterManagerClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "oyster"
+
+ expected = "organizations/{organization}".format(organization=organization,)
+ actual = ClusterManagerClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "nudibranch",
+ }
+ path = ClusterManagerClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ClusterManagerClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "cuttlefish"
+
+ expected = "projects/{project}".format(project=project,)
+ actual = ClusterManagerClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "mussel",
+ }
+ path = ClusterManagerClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ClusterManagerClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "winkle"
+ location = "nautilus"
+
+ expected = "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+ actual = ClusterManagerClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "scallop",
+ "location": "abalone",
+ }
+ path = ClusterManagerClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ClusterManagerClient.parse_common_location_path(path)
+ assert expected == actual
+
+
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
diff --git a/packages/google-cloud-container/tests/unit/gapic/container_v1beta1/test_cluster_manager.py b/packages/google-cloud-container/tests/unit/gapic/container_v1beta1/test_cluster_manager.py
index 941e6d01db73..4f883f880f06 100644
--- a/packages/google-cloud-container/tests/unit/gapic/container_v1beta1/test_cluster_manager.py
+++ b/packages/google-cloud-container/tests/unit/gapic/container_v1beta1/test_cluster_manager.py
@@ -97,12 +97,12 @@ def test_cluster_manager_client_from_service_account_file(client_class):
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
- assert client._transport._credentials == creds
+ assert client.transport._credentials == creds
client = client_class.from_service_account_json("dummy/file/path.json")
- assert client._transport._credentials == creds
+ assert client.transport._credentials == creds
- assert client._transport._host == "container.googleapis.com:443"
+ assert client.transport._host == "container.googleapis.com:443"
def test_cluster_manager_client_get_transport_class():
@@ -452,7 +452,7 @@ def test_list_clusters(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_clusters), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListClustersResponse(
missing_zones=["missing_zones_value"],
@@ -467,6 +467,7 @@ def test_list_clusters(
assert args[0] == cluster_service.ListClustersRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.ListClustersResponse)
assert response.missing_zones == ["missing_zones_value"]
@@ -477,19 +478,19 @@ def test_list_clusters_from_dict():
@pytest.mark.asyncio
-async def test_list_clusters_async(transport: str = "grpc_asyncio"):
+async def test_list_clusters_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.ListClustersRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.ListClustersRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_clusters), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListClustersResponse(missing_zones=["missing_zones_value"],)
@@ -501,7 +502,7 @@ async def test_list_clusters_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.ListClustersRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.ListClustersResponse)
@@ -509,6 +510,11 @@ async def test_list_clusters_async(transport: str = "grpc_asyncio"):
assert response.missing_zones == ["missing_zones_value"]
+@pytest.mark.asyncio
+async def test_list_clusters_async_from_dict():
+ await test_list_clusters_async(request_type=dict)
+
+
def test_list_clusters_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -518,7 +524,7 @@ def test_list_clusters_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_clusters), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
call.return_value = cluster_service.ListClustersResponse()
client.list_clusters(request)
@@ -543,9 +549,7 @@ async def test_list_clusters_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_clusters), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListClustersResponse()
)
@@ -566,7 +570,7 @@ def test_list_clusters_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_clusters), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListClustersResponse()
@@ -604,9 +608,7 @@ async def test_list_clusters_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_clusters), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_clusters), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListClustersResponse()
@@ -655,7 +657,7 @@ def test_get_cluster(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Cluster(
name="name_value",
@@ -699,6 +701,7 @@ def test_get_cluster(
assert args[0] == cluster_service.GetClusterRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Cluster)
assert response.name == "name_value"
@@ -767,19 +770,19 @@ def test_get_cluster_from_dict():
@pytest.mark.asyncio
-async def test_get_cluster_async(transport: str = "grpc_asyncio"):
+async def test_get_cluster_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.GetClusterRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.GetClusterRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Cluster(
@@ -822,7 +825,7 @@ async def test_get_cluster_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.GetClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Cluster)
@@ -888,6 +891,11 @@ async def test_get_cluster_async(transport: str = "grpc_asyncio"):
assert response.tpu_ipv4_cidr_block == "tpu_ipv4_cidr_block_value"
+@pytest.mark.asyncio
+async def test_get_cluster_async_from_dict():
+ await test_get_cluster_async(request_type=dict)
+
+
def test_get_cluster_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -897,7 +905,7 @@ def test_get_cluster_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
call.return_value = cluster_service.Cluster()
client.get_cluster(request)
@@ -922,9 +930,7 @@ async def test_get_cluster_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Cluster()
)
@@ -945,7 +951,7 @@ def test_get_cluster_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Cluster()
@@ -988,9 +994,7 @@ async def test_get_cluster_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Cluster()
@@ -1044,7 +1048,7 @@ def test_create_cluster(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -1069,6 +1073,7 @@ def test_create_cluster(
assert args[0] == cluster_service.CreateClusterRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -1099,19 +1104,19 @@ def test_create_cluster_from_dict():
@pytest.mark.asyncio
-async def test_create_cluster_async(transport: str = "grpc_asyncio"):
+async def test_create_cluster_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.CreateClusterRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.CreateClusterRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -1135,7 +1140,7 @@ async def test_create_cluster_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.CreateClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -1163,6 +1168,11 @@ async def test_create_cluster_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_create_cluster_async_from_dict():
+ await test_create_cluster_async(request_type=dict)
+
+
def test_create_cluster_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -1172,7 +1182,7 @@ def test_create_cluster_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
call.return_value = cluster_service.Operation()
client.create_cluster(request)
@@ -1197,9 +1207,7 @@ async def test_create_cluster_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -1220,7 +1228,7 @@ def test_create_cluster_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -1263,9 +1271,7 @@ async def test_create_cluster_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -1319,7 +1325,7 @@ def test_update_cluster(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -1344,6 +1350,7 @@ def test_update_cluster(
assert args[0] == cluster_service.UpdateClusterRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -1374,19 +1381,19 @@ def test_update_cluster_from_dict():
@pytest.mark.asyncio
-async def test_update_cluster_async(transport: str = "grpc_asyncio"):
+async def test_update_cluster_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.UpdateClusterRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.UpdateClusterRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -1410,7 +1417,7 @@ async def test_update_cluster_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.UpdateClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -1438,6 +1445,11 @@ async def test_update_cluster_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_update_cluster_async_from_dict():
+ await test_update_cluster_async(request_type=dict)
+
+
def test_update_cluster_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -1447,7 +1459,7 @@ def test_update_cluster_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
call.return_value = cluster_service.Operation()
client.update_cluster(request)
@@ -1472,9 +1484,7 @@ async def test_update_cluster_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -1495,7 +1505,7 @@ def test_update_cluster_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -1548,9 +1558,7 @@ async def test_update_cluster_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -1614,9 +1622,7 @@ def test_update_node_pool(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.update_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -1641,6 +1647,7 @@ def test_update_node_pool(
assert args[0] == cluster_service.UpdateNodePoolRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -1671,19 +1678,19 @@ def test_update_node_pool_from_dict():
@pytest.mark.asyncio
-async def test_update_node_pool_async(transport: str = "grpc_asyncio"):
+async def test_update_node_pool_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.UpdateNodePoolRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.UpdateNodePoolRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -1707,7 +1714,7 @@ async def test_update_node_pool_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.UpdateNodePoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -1735,6 +1742,11 @@ async def test_update_node_pool_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_update_node_pool_async_from_dict():
+ await test_update_node_pool_async(request_type=dict)
+
+
def test_update_node_pool_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -1744,9 +1756,7 @@ def test_update_node_pool_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.update_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_node_pool), "__call__") as call:
call.return_value = cluster_service.Operation()
client.update_node_pool(request)
@@ -1771,9 +1781,7 @@ async def test_update_node_pool_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_node_pool), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -1803,7 +1811,7 @@ def test_set_node_pool_autoscaling(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_node_pool_autoscaling), "__call__"
+ type(client.transport.set_node_pool_autoscaling), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -1829,6 +1837,7 @@ def test_set_node_pool_autoscaling(
assert args[0] == cluster_service.SetNodePoolAutoscalingRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -1859,18 +1868,21 @@ def test_set_node_pool_autoscaling_from_dict():
@pytest.mark.asyncio
-async def test_set_node_pool_autoscaling_async(transport: str = "grpc_asyncio"):
+async def test_set_node_pool_autoscaling_async(
+ transport: str = "grpc_asyncio",
+ request_type=cluster_service.SetNodePoolAutoscalingRequest,
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetNodePoolAutoscalingRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_node_pool_autoscaling), "__call__"
+ type(client.transport.set_node_pool_autoscaling), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -1895,7 +1907,7 @@ async def test_set_node_pool_autoscaling_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetNodePoolAutoscalingRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -1923,6 +1935,11 @@ async def test_set_node_pool_autoscaling_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_node_pool_autoscaling_async_from_dict():
+ await test_set_node_pool_autoscaling_async(request_type=dict)
+
+
def test_set_node_pool_autoscaling_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -1933,7 +1950,7 @@ def test_set_node_pool_autoscaling_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_node_pool_autoscaling), "__call__"
+ type(client.transport.set_node_pool_autoscaling), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -1960,7 +1977,7 @@ async def test_set_node_pool_autoscaling_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_node_pool_autoscaling), "__call__"
+ type(client.transport.set_node_pool_autoscaling), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -1991,7 +2008,7 @@ def test_set_logging_service(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_logging_service), "__call__"
+ type(client.transport.set_logging_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -2017,6 +2034,7 @@ def test_set_logging_service(
assert args[0] == cluster_service.SetLoggingServiceRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -2047,18 +2065,21 @@ def test_set_logging_service_from_dict():
@pytest.mark.asyncio
-async def test_set_logging_service_async(transport: str = "grpc_asyncio"):
+async def test_set_logging_service_async(
+ transport: str = "grpc_asyncio",
+ request_type=cluster_service.SetLoggingServiceRequest,
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetLoggingServiceRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_logging_service), "__call__"
+ type(client.transport.set_logging_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -2083,7 +2104,7 @@ async def test_set_logging_service_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetLoggingServiceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -2111,6 +2132,11 @@ async def test_set_logging_service_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_logging_service_async_from_dict():
+ await test_set_logging_service_async(request_type=dict)
+
+
def test_set_logging_service_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -2121,7 +2147,7 @@ def test_set_logging_service_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_logging_service), "__call__"
+ type(client.transport.set_logging_service), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -2148,7 +2174,7 @@ async def test_set_logging_service_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_logging_service), "__call__"
+ type(client.transport.set_logging_service), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -2171,7 +2197,7 @@ def test_set_logging_service_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_logging_service), "__call__"
+ type(client.transport.set_logging_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -2220,7 +2246,7 @@ async def test_set_logging_service_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_logging_service), "__call__"
+ type(client.transport.set_logging_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -2280,7 +2306,7 @@ def test_set_monitoring_service(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_monitoring_service), "__call__"
+ type(client.transport.set_monitoring_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -2306,6 +2332,7 @@ def test_set_monitoring_service(
assert args[0] == cluster_service.SetMonitoringServiceRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -2336,18 +2363,21 @@ def test_set_monitoring_service_from_dict():
@pytest.mark.asyncio
-async def test_set_monitoring_service_async(transport: str = "grpc_asyncio"):
+async def test_set_monitoring_service_async(
+ transport: str = "grpc_asyncio",
+ request_type=cluster_service.SetMonitoringServiceRequest,
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetMonitoringServiceRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_monitoring_service), "__call__"
+ type(client.transport.set_monitoring_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -2372,7 +2402,7 @@ async def test_set_monitoring_service_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetMonitoringServiceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -2400,6 +2430,11 @@ async def test_set_monitoring_service_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_monitoring_service_async_from_dict():
+ await test_set_monitoring_service_async(request_type=dict)
+
+
def test_set_monitoring_service_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -2410,7 +2445,7 @@ def test_set_monitoring_service_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_monitoring_service), "__call__"
+ type(client.transport.set_monitoring_service), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -2437,7 +2472,7 @@ async def test_set_monitoring_service_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_monitoring_service), "__call__"
+ type(client.transport.set_monitoring_service), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -2460,7 +2495,7 @@ def test_set_monitoring_service_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_monitoring_service), "__call__"
+ type(client.transport.set_monitoring_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -2509,7 +2544,7 @@ async def test_set_monitoring_service_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_monitoring_service), "__call__"
+ type(client.transport.set_monitoring_service), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -2569,7 +2604,7 @@ def test_set_addons_config(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_addons_config), "__call__"
+ type(client.transport.set_addons_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -2595,6 +2630,7 @@ def test_set_addons_config(
assert args[0] == cluster_service.SetAddonsConfigRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -2625,18 +2661,20 @@ def test_set_addons_config_from_dict():
@pytest.mark.asyncio
-async def test_set_addons_config_async(transport: str = "grpc_asyncio"):
+async def test_set_addons_config_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.SetAddonsConfigRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetAddonsConfigRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_addons_config), "__call__"
+ type(client.transport.set_addons_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -2661,7 +2699,7 @@ async def test_set_addons_config_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetAddonsConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -2689,6 +2727,11 @@ async def test_set_addons_config_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_addons_config_async_from_dict():
+ await test_set_addons_config_async(request_type=dict)
+
+
def test_set_addons_config_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -2699,7 +2742,7 @@ def test_set_addons_config_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_addons_config), "__call__"
+ type(client.transport.set_addons_config), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -2726,7 +2769,7 @@ async def test_set_addons_config_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_addons_config), "__call__"
+ type(client.transport.set_addons_config), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -2749,7 +2792,7 @@ def test_set_addons_config_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_addons_config), "__call__"
+ type(client.transport.set_addons_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -2804,7 +2847,7 @@ async def test_set_addons_config_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_addons_config), "__call__"
+ type(client.transport.set_addons_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -2869,7 +2912,7 @@ def test_set_locations(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_locations), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -2894,6 +2937,7 @@ def test_set_locations(
assert args[0] == cluster_service.SetLocationsRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -2924,19 +2968,19 @@ def test_set_locations_from_dict():
@pytest.mark.asyncio
-async def test_set_locations_async(transport: str = "grpc_asyncio"):
+async def test_set_locations_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.SetLocationsRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetLocationsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_locations), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -2960,7 +3004,7 @@ async def test_set_locations_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetLocationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -2988,6 +3032,11 @@ async def test_set_locations_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_locations_async_from_dict():
+ await test_set_locations_async(request_type=dict)
+
+
def test_set_locations_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -2997,7 +3046,7 @@ def test_set_locations_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_locations), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
call.return_value = cluster_service.Operation()
client.set_locations(request)
@@ -3022,9 +3071,7 @@ async def test_set_locations_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_locations), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -3045,7 +3092,7 @@ def test_set_locations_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_locations), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -3092,9 +3139,7 @@ async def test_set_locations_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_locations), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -3152,7 +3197,7 @@ def test_update_master(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_master), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_master), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -3177,6 +3222,7 @@ def test_update_master(
assert args[0] == cluster_service.UpdateMasterRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -3207,19 +3253,19 @@ def test_update_master_from_dict():
@pytest.mark.asyncio
-async def test_update_master_async(transport: str = "grpc_asyncio"):
+async def test_update_master_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.UpdateMasterRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.UpdateMasterRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_master), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_master), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -3243,7 +3289,7 @@ async def test_update_master_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.UpdateMasterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -3271,6 +3317,11 @@ async def test_update_master_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_update_master_async_from_dict():
+ await test_update_master_async(request_type=dict)
+
+
def test_update_master_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -3280,7 +3331,7 @@ def test_update_master_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_master), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_master), "__call__") as call:
call.return_value = cluster_service.Operation()
client.update_master(request)
@@ -3305,9 +3356,7 @@ async def test_update_master_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_master), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_master), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -3328,7 +3377,7 @@ def test_update_master_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_master), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_master), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -3375,9 +3424,7 @@ async def test_update_master_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_master), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_master), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -3435,7 +3482,7 @@ def test_set_master_auth(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_master_auth), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_master_auth), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -3460,6 +3507,7 @@ def test_set_master_auth(
assert args[0] == cluster_service.SetMasterAuthRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -3490,19 +3538,19 @@ def test_set_master_auth_from_dict():
@pytest.mark.asyncio
-async def test_set_master_auth_async(transport: str = "grpc_asyncio"):
+async def test_set_master_auth_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.SetMasterAuthRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetMasterAuthRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_master_auth), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_master_auth), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -3526,7 +3574,7 @@ async def test_set_master_auth_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetMasterAuthRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -3554,6 +3602,11 @@ async def test_set_master_auth_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_master_auth_async_from_dict():
+ await test_set_master_auth_async(request_type=dict)
+
+
def test_set_master_auth_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -3563,7 +3616,7 @@ def test_set_master_auth_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_master_auth), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_master_auth), "__call__") as call:
call.return_value = cluster_service.Operation()
client.set_master_auth(request)
@@ -3588,9 +3641,7 @@ async def test_set_master_auth_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_master_auth), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_master_auth), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -3619,7 +3670,7 @@ def test_delete_cluster(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -3644,6 +3695,7 @@ def test_delete_cluster(
assert args[0] == cluster_service.DeleteClusterRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -3674,19 +3726,19 @@ def test_delete_cluster_from_dict():
@pytest.mark.asyncio
-async def test_delete_cluster_async(transport: str = "grpc_asyncio"):
+async def test_delete_cluster_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.DeleteClusterRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.DeleteClusterRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -3710,7 +3762,7 @@ async def test_delete_cluster_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.DeleteClusterRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -3738,6 +3790,11 @@ async def test_delete_cluster_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_delete_cluster_async_from_dict():
+ await test_delete_cluster_async(request_type=dict)
+
+
def test_delete_cluster_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -3747,7 +3804,7 @@ def test_delete_cluster_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
call.return_value = cluster_service.Operation()
client.delete_cluster(request)
@@ -3772,9 +3829,7 @@ async def test_delete_cluster_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -3795,7 +3850,7 @@ def test_delete_cluster_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_cluster), "__call__") as call:
+ with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -3838,9 +3893,7 @@ async def test_delete_cluster_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_cluster), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_cluster), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -3894,7 +3947,7 @@ def test_list_operations(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_operations), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListOperationsResponse(
missing_zones=["missing_zones_value"],
@@ -3909,6 +3962,7 @@ def test_list_operations(
assert args[0] == cluster_service.ListOperationsRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.ListOperationsResponse)
assert response.missing_zones == ["missing_zones_value"]
@@ -3919,19 +3973,19 @@ def test_list_operations_from_dict():
@pytest.mark.asyncio
-async def test_list_operations_async(transport: str = "grpc_asyncio"):
+async def test_list_operations_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.ListOperationsRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.ListOperationsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_operations), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListOperationsResponse(
@@ -3945,7 +3999,7 @@ async def test_list_operations_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.ListOperationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.ListOperationsResponse)
@@ -3953,6 +4007,11 @@ async def test_list_operations_async(transport: str = "grpc_asyncio"):
assert response.missing_zones == ["missing_zones_value"]
+@pytest.mark.asyncio
+async def test_list_operations_async_from_dict():
+ await test_list_operations_async(request_type=dict)
+
+
def test_list_operations_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -3962,7 +4021,7 @@ def test_list_operations_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_operations), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
call.return_value = cluster_service.ListOperationsResponse()
client.list_operations(request)
@@ -3987,9 +4046,7 @@ async def test_list_operations_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_operations), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListOperationsResponse()
)
@@ -4010,7 +4067,7 @@ def test_list_operations_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_operations), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListOperationsResponse()
@@ -4048,9 +4105,7 @@ async def test_list_operations_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_operations), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_operations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListOperationsResponse()
@@ -4099,7 +4154,7 @@ def test_get_operation(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_operation), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -4124,6 +4179,7 @@ def test_get_operation(
assert args[0] == cluster_service.GetOperationRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -4154,19 +4210,19 @@ def test_get_operation_from_dict():
@pytest.mark.asyncio
-async def test_get_operation_async(transport: str = "grpc_asyncio"):
+async def test_get_operation_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.GetOperationRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.GetOperationRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_operation), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -4190,7 +4246,7 @@ async def test_get_operation_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.GetOperationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -4218,6 +4274,11 @@ async def test_get_operation_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_get_operation_async_from_dict():
+ await test_get_operation_async(request_type=dict)
+
+
def test_get_operation_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -4227,7 +4288,7 @@ def test_get_operation_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_operation), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
call.return_value = cluster_service.Operation()
client.get_operation(request)
@@ -4252,9 +4313,7 @@ async def test_get_operation_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_operation), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -4275,7 +4334,7 @@ def test_get_operation_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_operation), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -4318,9 +4377,7 @@ async def test_get_operation_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_operation), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -4374,9 +4431,7 @@ def test_cancel_operation(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.cancel_operation), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
@@ -4397,19 +4452,19 @@ def test_cancel_operation_from_dict():
@pytest.mark.asyncio
-async def test_cancel_operation_async(transport: str = "grpc_asyncio"):
+async def test_cancel_operation_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.CancelOperationRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.CancelOperationRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.cancel_operation), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
@@ -4419,12 +4474,17 @@ async def test_cancel_operation_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.CancelOperationRequest()
# Establish that the response is the type that we expect.
assert response is None
+@pytest.mark.asyncio
+async def test_cancel_operation_async_from_dict():
+ await test_cancel_operation_async(request_type=dict)
+
+
def test_cancel_operation_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -4434,9 +4494,7 @@ def test_cancel_operation_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.cancel_operation), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
call.return_value = None
client.cancel_operation(request)
@@ -4461,9 +4519,7 @@ async def test_cancel_operation_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.cancel_operation), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.cancel_operation(request)
@@ -4482,9 +4538,7 @@ def test_cancel_operation_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.cancel_operation), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
@@ -4527,9 +4581,7 @@ async def test_cancel_operation_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.cancel_operation), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.cancel_operation), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
@@ -4582,7 +4634,7 @@ def test_get_server_config(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_server_config), "__call__"
+ type(client.transport.get_server_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ServerConfig(
@@ -4602,6 +4654,7 @@ def test_get_server_config(
assert args[0] == cluster_service.GetServerConfigRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.ServerConfig)
assert response.default_cluster_version == "default_cluster_version_value"
@@ -4620,18 +4673,20 @@ def test_get_server_config_from_dict():
@pytest.mark.asyncio
-async def test_get_server_config_async(transport: str = "grpc_asyncio"):
+async def test_get_server_config_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.GetServerConfigRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.GetServerConfigRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_server_config), "__call__"
+ type(client.transport.get_server_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -4650,7 +4705,7 @@ async def test_get_server_config_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.GetServerConfigRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.ServerConfig)
@@ -4666,6 +4721,11 @@ async def test_get_server_config_async(transport: str = "grpc_asyncio"):
assert response.valid_master_versions == ["valid_master_versions_value"]
+@pytest.mark.asyncio
+async def test_get_server_config_async_from_dict():
+ await test_get_server_config_async(request_type=dict)
+
+
def test_get_server_config_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -4676,7 +4736,7 @@ def test_get_server_config_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_server_config), "__call__"
+ type(client.transport.get_server_config), "__call__"
) as call:
call.return_value = cluster_service.ServerConfig()
@@ -4703,7 +4763,7 @@ async def test_get_server_config_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_server_config), "__call__"
+ type(client.transport.get_server_config), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ServerConfig()
@@ -4726,7 +4786,7 @@ def test_get_server_config_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_server_config), "__call__"
+ type(client.transport.get_server_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ServerConfig()
@@ -4766,7 +4826,7 @@ async def test_get_server_config_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_server_config), "__call__"
+ type(client.transport.get_server_config), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ServerConfig()
@@ -4816,7 +4876,7 @@ def test_list_node_pools(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_node_pools), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListNodePoolsResponse()
@@ -4829,6 +4889,7 @@ def test_list_node_pools(
assert args[0] == cluster_service.ListNodePoolsRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.ListNodePoolsResponse)
@@ -4837,19 +4898,19 @@ def test_list_node_pools_from_dict():
@pytest.mark.asyncio
-async def test_list_node_pools_async(transport: str = "grpc_asyncio"):
+async def test_list_node_pools_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.ListNodePoolsRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.ListNodePoolsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_node_pools), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListNodePoolsResponse()
@@ -4861,12 +4922,17 @@ async def test_list_node_pools_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.ListNodePoolsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.ListNodePoolsResponse)
+@pytest.mark.asyncio
+async def test_list_node_pools_async_from_dict():
+ await test_list_node_pools_async(request_type=dict)
+
+
def test_list_node_pools_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -4876,7 +4942,7 @@ def test_list_node_pools_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_node_pools), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
call.return_value = cluster_service.ListNodePoolsResponse()
client.list_node_pools(request)
@@ -4901,9 +4967,7 @@ async def test_list_node_pools_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_node_pools), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListNodePoolsResponse()
)
@@ -4924,7 +4988,7 @@ def test_list_node_pools_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_node_pools), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListNodePoolsResponse()
@@ -4967,9 +5031,7 @@ async def test_list_node_pools_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_node_pools), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_node_pools), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListNodePoolsResponse()
@@ -5023,7 +5085,7 @@ def test_get_node_pool(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_node_pool), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.NodePool(
name="name_value",
@@ -5045,6 +5107,7 @@ def test_get_node_pool(
assert args[0] == cluster_service.GetNodePoolRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.NodePool)
assert response.name == "name_value"
@@ -5069,19 +5132,19 @@ def test_get_node_pool_from_dict():
@pytest.mark.asyncio
-async def test_get_node_pool_async(transport: str = "grpc_asyncio"):
+async def test_get_node_pool_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.GetNodePoolRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.GetNodePoolRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.NodePool(
@@ -5102,7 +5165,7 @@ async def test_get_node_pool_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.GetNodePoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.NodePool)
@@ -5124,6 +5187,11 @@ async def test_get_node_pool_async(transport: str = "grpc_asyncio"):
assert response.pod_ipv4_cidr_size == 1856
+@pytest.mark.asyncio
+async def test_get_node_pool_async_from_dict():
+ await test_get_node_pool_async(request_type=dict)
+
+
def test_get_node_pool_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -5133,7 +5201,7 @@ def test_get_node_pool_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_node_pool), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
call.return_value = cluster_service.NodePool()
client.get_node_pool(request)
@@ -5158,9 +5226,7 @@ async def test_get_node_pool_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.NodePool()
)
@@ -5181,7 +5247,7 @@ def test_get_node_pool_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_node_pool), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.NodePool()
@@ -5228,9 +5294,7 @@ async def test_get_node_pool_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.NodePool()
@@ -5288,9 +5352,7 @@ def test_create_node_pool(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.create_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -5315,6 +5377,7 @@ def test_create_node_pool(
assert args[0] == cluster_service.CreateNodePoolRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -5345,19 +5408,19 @@ def test_create_node_pool_from_dict():
@pytest.mark.asyncio
-async def test_create_node_pool_async(transport: str = "grpc_asyncio"):
+async def test_create_node_pool_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.CreateNodePoolRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.CreateNodePoolRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -5381,7 +5444,7 @@ async def test_create_node_pool_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.CreateNodePoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -5409,6 +5472,11 @@ async def test_create_node_pool_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_create_node_pool_async_from_dict():
+ await test_create_node_pool_async(request_type=dict)
+
+
def test_create_node_pool_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -5418,9 +5486,7 @@ def test_create_node_pool_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.create_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
call.return_value = cluster_service.Operation()
client.create_node_pool(request)
@@ -5445,9 +5511,7 @@ async def test_create_node_pool_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -5468,9 +5532,7 @@ def test_create_node_pool_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.create_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -5517,9 +5579,7 @@ async def test_create_node_pool_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -5577,9 +5637,7 @@ def test_delete_node_pool(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.delete_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -5604,6 +5662,7 @@ def test_delete_node_pool(
assert args[0] == cluster_service.DeleteNodePoolRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -5634,19 +5693,19 @@ def test_delete_node_pool_from_dict():
@pytest.mark.asyncio
-async def test_delete_node_pool_async(transport: str = "grpc_asyncio"):
+async def test_delete_node_pool_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.DeleteNodePoolRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.DeleteNodePoolRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -5670,7 +5729,7 @@ async def test_delete_node_pool_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.DeleteNodePoolRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -5698,6 +5757,11 @@ async def test_delete_node_pool_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_delete_node_pool_async_from_dict():
+ await test_delete_node_pool_async(request_type=dict)
+
+
def test_delete_node_pool_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -5707,9 +5771,7 @@ def test_delete_node_pool_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.delete_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
call.return_value = cluster_service.Operation()
client.delete_node_pool(request)
@@ -5734,9 +5796,7 @@ async def test_delete_node_pool_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -5757,9 +5817,7 @@ def test_delete_node_pool_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.delete_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -5806,9 +5864,7 @@ async def test_delete_node_pool_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_node_pool), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_node_pool), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -5867,7 +5923,7 @@ def test_rollback_node_pool_upgrade(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.rollback_node_pool_upgrade), "__call__"
+ type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -5893,6 +5949,7 @@ def test_rollback_node_pool_upgrade(
assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -5923,18 +5980,21 @@ def test_rollback_node_pool_upgrade_from_dict():
@pytest.mark.asyncio
-async def test_rollback_node_pool_upgrade_async(transport: str = "grpc_asyncio"):
+async def test_rollback_node_pool_upgrade_async(
+ transport: str = "grpc_asyncio",
+ request_type=cluster_service.RollbackNodePoolUpgradeRequest,
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.RollbackNodePoolUpgradeRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.rollback_node_pool_upgrade), "__call__"
+ type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -5959,7 +6019,7 @@ async def test_rollback_node_pool_upgrade_async(transport: str = "grpc_asyncio")
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -5987,6 +6047,11 @@ async def test_rollback_node_pool_upgrade_async(transport: str = "grpc_asyncio")
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_rollback_node_pool_upgrade_async_from_dict():
+ await test_rollback_node_pool_upgrade_async(request_type=dict)
+
+
def test_rollback_node_pool_upgrade_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -5997,7 +6062,7 @@ def test_rollback_node_pool_upgrade_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.rollback_node_pool_upgrade), "__call__"
+ type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -6024,7 +6089,7 @@ async def test_rollback_node_pool_upgrade_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.rollback_node_pool_upgrade), "__call__"
+ type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -6047,7 +6112,7 @@ def test_rollback_node_pool_upgrade_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.rollback_node_pool_upgrade), "__call__"
+ type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -6096,7 +6161,7 @@ async def test_rollback_node_pool_upgrade_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.rollback_node_pool_upgrade), "__call__"
+ type(client.transport.rollback_node_pool_upgrade), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -6156,7 +6221,7 @@ def test_set_node_pool_management(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_node_pool_management), "__call__"
+ type(client.transport.set_node_pool_management), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -6182,6 +6247,7 @@ def test_set_node_pool_management(
assert args[0] == cluster_service.SetNodePoolManagementRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -6212,18 +6278,21 @@ def test_set_node_pool_management_from_dict():
@pytest.mark.asyncio
-async def test_set_node_pool_management_async(transport: str = "grpc_asyncio"):
+async def test_set_node_pool_management_async(
+ transport: str = "grpc_asyncio",
+ request_type=cluster_service.SetNodePoolManagementRequest,
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetNodePoolManagementRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_node_pool_management), "__call__"
+ type(client.transport.set_node_pool_management), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -6248,7 +6317,7 @@ async def test_set_node_pool_management_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetNodePoolManagementRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -6276,6 +6345,11 @@ async def test_set_node_pool_management_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_node_pool_management_async_from_dict():
+ await test_set_node_pool_management_async(request_type=dict)
+
+
def test_set_node_pool_management_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -6286,7 +6360,7 @@ def test_set_node_pool_management_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_node_pool_management), "__call__"
+ type(client.transport.set_node_pool_management), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -6313,7 +6387,7 @@ async def test_set_node_pool_management_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_node_pool_management), "__call__"
+ type(client.transport.set_node_pool_management), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -6336,7 +6410,7 @@ def test_set_node_pool_management_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_node_pool_management), "__call__"
+ type(client.transport.set_node_pool_management), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -6389,7 +6463,7 @@ async def test_set_node_pool_management_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_node_pool_management), "__call__"
+ type(client.transport.set_node_pool_management), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -6452,7 +6526,7 @@ def test_set_labels(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_labels), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_labels), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -6477,6 +6551,7 @@ def test_set_labels(
assert args[0] == cluster_service.SetLabelsRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -6507,19 +6582,19 @@ def test_set_labels_from_dict():
@pytest.mark.asyncio
-async def test_set_labels_async(transport: str = "grpc_asyncio"):
+async def test_set_labels_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.SetLabelsRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetLabelsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_labels), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_labels), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -6543,7 +6618,7 @@ async def test_set_labels_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetLabelsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -6571,6 +6646,11 @@ async def test_set_labels_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_labels_async_from_dict():
+ await test_set_labels_async(request_type=dict)
+
+
def test_set_labels_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -6580,7 +6660,7 @@ def test_set_labels_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_labels), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_labels), "__call__") as call:
call.return_value = cluster_service.Operation()
client.set_labels(request)
@@ -6605,9 +6685,7 @@ async def test_set_labels_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_labels), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_labels), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -6628,7 +6706,7 @@ def test_set_labels_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_labels), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_labels), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -6679,9 +6757,7 @@ async def test_set_labels_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_labels), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_labels), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -6743,7 +6819,7 @@ def test_set_legacy_abac(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_legacy_abac), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
name="name_value",
@@ -6768,6 +6844,7 @@ def test_set_legacy_abac(
assert args[0] == cluster_service.SetLegacyAbacRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -6798,19 +6875,19 @@ def test_set_legacy_abac_from_dict():
@pytest.mark.asyncio
-async def test_set_legacy_abac_async(transport: str = "grpc_asyncio"):
+async def test_set_legacy_abac_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.SetLegacyAbacRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetLegacyAbacRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_legacy_abac), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation(
@@ -6834,7 +6911,7 @@ async def test_set_legacy_abac_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetLegacyAbacRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -6862,6 +6939,11 @@ async def test_set_legacy_abac_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_legacy_abac_async_from_dict():
+ await test_set_legacy_abac_async(request_type=dict)
+
+
def test_set_legacy_abac_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -6871,7 +6953,7 @@ def test_set_legacy_abac_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_legacy_abac), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
call.return_value = cluster_service.Operation()
client.set_legacy_abac(request)
@@ -6896,9 +6978,7 @@ async def test_set_legacy_abac_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_legacy_abac), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
)
@@ -6919,7 +6999,7 @@ def test_set_legacy_abac_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.set_legacy_abac), "__call__") as call:
+ with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -6966,9 +7046,7 @@ async def test_set_legacy_abac_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.set_legacy_abac), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.set_legacy_abac), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -7027,7 +7105,7 @@ def test_start_ip_rotation(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.start_ip_rotation), "__call__"
+ type(client.transport.start_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -7053,6 +7131,7 @@ def test_start_ip_rotation(
assert args[0] == cluster_service.StartIPRotationRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -7083,18 +7162,20 @@ def test_start_ip_rotation_from_dict():
@pytest.mark.asyncio
-async def test_start_ip_rotation_async(transport: str = "grpc_asyncio"):
+async def test_start_ip_rotation_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.StartIPRotationRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.StartIPRotationRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.start_ip_rotation), "__call__"
+ type(client.transport.start_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -7119,7 +7200,7 @@ async def test_start_ip_rotation_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.StartIPRotationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -7147,6 +7228,11 @@ async def test_start_ip_rotation_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_start_ip_rotation_async_from_dict():
+ await test_start_ip_rotation_async(request_type=dict)
+
+
def test_start_ip_rotation_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -7157,7 +7243,7 @@ def test_start_ip_rotation_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.start_ip_rotation), "__call__"
+ type(client.transport.start_ip_rotation), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -7184,7 +7270,7 @@ async def test_start_ip_rotation_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.start_ip_rotation), "__call__"
+ type(client.transport.start_ip_rotation), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -7207,7 +7293,7 @@ def test_start_ip_rotation_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.start_ip_rotation), "__call__"
+ type(client.transport.start_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -7252,7 +7338,7 @@ async def test_start_ip_rotation_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.start_ip_rotation), "__call__"
+ type(client.transport.start_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -7308,7 +7394,7 @@ def test_complete_ip_rotation(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.complete_ip_rotation), "__call__"
+ type(client.transport.complete_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -7334,6 +7420,7 @@ def test_complete_ip_rotation(
assert args[0] == cluster_service.CompleteIPRotationRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -7364,18 +7451,21 @@ def test_complete_ip_rotation_from_dict():
@pytest.mark.asyncio
-async def test_complete_ip_rotation_async(transport: str = "grpc_asyncio"):
+async def test_complete_ip_rotation_async(
+ transport: str = "grpc_asyncio",
+ request_type=cluster_service.CompleteIPRotationRequest,
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.CompleteIPRotationRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.complete_ip_rotation), "__call__"
+ type(client.transport.complete_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -7400,7 +7490,7 @@ async def test_complete_ip_rotation_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.CompleteIPRotationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -7428,6 +7518,11 @@ async def test_complete_ip_rotation_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_complete_ip_rotation_async_from_dict():
+ await test_complete_ip_rotation_async(request_type=dict)
+
+
def test_complete_ip_rotation_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -7438,7 +7533,7 @@ def test_complete_ip_rotation_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.complete_ip_rotation), "__call__"
+ type(client.transport.complete_ip_rotation), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -7465,7 +7560,7 @@ async def test_complete_ip_rotation_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.complete_ip_rotation), "__call__"
+ type(client.transport.complete_ip_rotation), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -7488,7 +7583,7 @@ def test_complete_ip_rotation_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.complete_ip_rotation), "__call__"
+ type(client.transport.complete_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -7533,7 +7628,7 @@ async def test_complete_ip_rotation_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.complete_ip_rotation), "__call__"
+ type(client.transport.complete_ip_rotation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -7589,7 +7684,7 @@ def test_set_node_pool_size(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_node_pool_size), "__call__"
+ type(client.transport.set_node_pool_size), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -7615,6 +7710,7 @@ def test_set_node_pool_size(
assert args[0] == cluster_service.SetNodePoolSizeRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -7645,18 +7741,20 @@ def test_set_node_pool_size_from_dict():
@pytest.mark.asyncio
-async def test_set_node_pool_size_async(transport: str = "grpc_asyncio"):
+async def test_set_node_pool_size_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.SetNodePoolSizeRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetNodePoolSizeRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_node_pool_size), "__call__"
+ type(client.transport.set_node_pool_size), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -7681,7 +7779,7 @@ async def test_set_node_pool_size_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetNodePoolSizeRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -7709,6 +7807,11 @@ async def test_set_node_pool_size_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_node_pool_size_async_from_dict():
+ await test_set_node_pool_size_async(request_type=dict)
+
+
def test_set_node_pool_size_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -7719,7 +7822,7 @@ def test_set_node_pool_size_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_node_pool_size), "__call__"
+ type(client.transport.set_node_pool_size), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -7746,7 +7849,7 @@ async def test_set_node_pool_size_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_node_pool_size), "__call__"
+ type(client.transport.set_node_pool_size), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -7777,7 +7880,7 @@ def test_set_network_policy(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_network_policy), "__call__"
+ type(client.transport.set_network_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -7803,6 +7906,7 @@ def test_set_network_policy(
assert args[0] == cluster_service.SetNetworkPolicyRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -7833,18 +7937,21 @@ def test_set_network_policy_from_dict():
@pytest.mark.asyncio
-async def test_set_network_policy_async(transport: str = "grpc_asyncio"):
+async def test_set_network_policy_async(
+ transport: str = "grpc_asyncio",
+ request_type=cluster_service.SetNetworkPolicyRequest,
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetNetworkPolicyRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_network_policy), "__call__"
+ type(client.transport.set_network_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -7869,7 +7976,7 @@ async def test_set_network_policy_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetNetworkPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -7897,6 +8004,11 @@ async def test_set_network_policy_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_network_policy_async_from_dict():
+ await test_set_network_policy_async(request_type=dict)
+
+
def test_set_network_policy_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -7907,7 +8019,7 @@ def test_set_network_policy_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_network_policy), "__call__"
+ type(client.transport.set_network_policy), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -7934,7 +8046,7 @@ async def test_set_network_policy_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_network_policy), "__call__"
+ type(client.transport.set_network_policy), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -7957,7 +8069,7 @@ def test_set_network_policy_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_network_policy), "__call__"
+ type(client.transport.set_network_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -8012,7 +8124,7 @@ async def test_set_network_policy_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_network_policy), "__call__"
+ type(client.transport.set_network_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -8078,7 +8190,7 @@ def test_set_maintenance_policy(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_maintenance_policy), "__call__"
+ type(client.transport.set_maintenance_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation(
@@ -8104,6 +8216,7 @@ def test_set_maintenance_policy(
assert args[0] == cluster_service.SetMaintenancePolicyRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, cluster_service.Operation)
assert response.name == "name_value"
@@ -8134,18 +8247,21 @@ def test_set_maintenance_policy_from_dict():
@pytest.mark.asyncio
-async def test_set_maintenance_policy_async(transport: str = "grpc_asyncio"):
+async def test_set_maintenance_policy_async(
+ transport: str = "grpc_asyncio",
+ request_type=cluster_service.SetMaintenancePolicyRequest,
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.SetMaintenancePolicyRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_maintenance_policy), "__call__"
+ type(client.transport.set_maintenance_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -8170,7 +8286,7 @@ async def test_set_maintenance_policy_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.SetMaintenancePolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.Operation)
@@ -8198,6 +8314,11 @@ async def test_set_maintenance_policy_async(transport: str = "grpc_asyncio"):
assert response.end_time == "end_time_value"
+@pytest.mark.asyncio
+async def test_set_maintenance_policy_async_from_dict():
+ await test_set_maintenance_policy_async(request_type=dict)
+
+
def test_set_maintenance_policy_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -8208,7 +8329,7 @@ def test_set_maintenance_policy_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_maintenance_policy), "__call__"
+ type(client.transport.set_maintenance_policy), "__call__"
) as call:
call.return_value = cluster_service.Operation()
@@ -8235,7 +8356,7 @@ async def test_set_maintenance_policy_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_maintenance_policy), "__call__"
+ type(client.transport.set_maintenance_policy), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.Operation()
@@ -8258,7 +8379,7 @@ def test_set_maintenance_policy_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.set_maintenance_policy), "__call__"
+ type(client.transport.set_maintenance_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -8325,7 +8446,7 @@ async def test_set_maintenance_policy_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.set_maintenance_policy), "__call__"
+ type(client.transport.set_maintenance_policy), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.Operation()
@@ -8403,7 +8524,7 @@ def test_list_usable_subnetworks(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_usable_subnetworks), "__call__"
+ type(client.transport.list_usable_subnetworks), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListUsableSubnetworksResponse(
@@ -8419,6 +8540,7 @@ def test_list_usable_subnetworks(
assert args[0] == cluster_service.ListUsableSubnetworksRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, pagers.ListUsableSubnetworksPager)
assert response.next_page_token == "next_page_token_value"
@@ -8429,18 +8551,21 @@ def test_list_usable_subnetworks_from_dict():
@pytest.mark.asyncio
-async def test_list_usable_subnetworks_async(transport: str = "grpc_asyncio"):
+async def test_list_usable_subnetworks_async(
+ transport: str = "grpc_asyncio",
+ request_type=cluster_service.ListUsableSubnetworksRequest,
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.ListUsableSubnetworksRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_usable_subnetworks), "__call__"
+ type(client.transport.list_usable_subnetworks), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -8455,7 +8580,7 @@ async def test_list_usable_subnetworks_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.ListUsableSubnetworksRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListUsableSubnetworksAsyncPager)
@@ -8463,6 +8588,11 @@ async def test_list_usable_subnetworks_async(transport: str = "grpc_asyncio"):
assert response.next_page_token == "next_page_token_value"
+@pytest.mark.asyncio
+async def test_list_usable_subnetworks_async_from_dict():
+ await test_list_usable_subnetworks_async(request_type=dict)
+
+
def test_list_usable_subnetworks_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -8473,7 +8603,7 @@ def test_list_usable_subnetworks_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_usable_subnetworks), "__call__"
+ type(client.transport.list_usable_subnetworks), "__call__"
) as call:
call.return_value = cluster_service.ListUsableSubnetworksResponse()
@@ -8500,7 +8630,7 @@ async def test_list_usable_subnetworks_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_usable_subnetworks), "__call__"
+ type(client.transport.list_usable_subnetworks), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListUsableSubnetworksResponse()
@@ -8523,7 +8653,7 @@ def test_list_usable_subnetworks_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_usable_subnetworks), "__call__"
+ type(client.transport.list_usable_subnetworks), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListUsableSubnetworksResponse()
@@ -8557,7 +8687,7 @@ async def test_list_usable_subnetworks_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_usable_subnetworks), "__call__"
+ type(client.transport.list_usable_subnetworks), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListUsableSubnetworksResponse()
@@ -8594,7 +8724,7 @@ def test_list_usable_subnetworks_pager():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_usable_subnetworks), "__call__"
+ type(client.transport.list_usable_subnetworks), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -8640,7 +8770,7 @@ def test_list_usable_subnetworks_pages():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_usable_subnetworks), "__call__"
+ type(client.transport.list_usable_subnetworks), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -8678,7 +8808,7 @@ async def test_list_usable_subnetworks_async_pager():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_usable_subnetworks),
+ type(client.transport.list_usable_subnetworks),
"__call__",
new_callable=mock.AsyncMock,
) as call:
@@ -8723,7 +8853,7 @@ async def test_list_usable_subnetworks_async_pages():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_usable_subnetworks),
+ type(client.transport.list_usable_subnetworks),
"__call__",
new_callable=mock.AsyncMock,
) as call:
@@ -8771,7 +8901,7 @@ def test_list_locations(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_locations), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListLocationsResponse(
next_page_token="next_page_token_value",
@@ -8786,6 +8916,9 @@ def test_list_locations(
assert args[0] == cluster_service.ListLocationsRequest()
# Establish that the response is the type that we expect.
+
+ assert response.raw_page is response
+
assert isinstance(response, cluster_service.ListLocationsResponse)
assert response.next_page_token == "next_page_token_value"
@@ -8796,19 +8929,19 @@ def test_list_locations_from_dict():
@pytest.mark.asyncio
-async def test_list_locations_async(transport: str = "grpc_asyncio"):
+async def test_list_locations_async(
+ transport: str = "grpc_asyncio", request_type=cluster_service.ListLocationsRequest
+):
client = ClusterManagerAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = cluster_service.ListLocationsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_locations), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListLocationsResponse(
@@ -8822,7 +8955,7 @@ async def test_list_locations_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == cluster_service.ListLocationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, cluster_service.ListLocationsResponse)
@@ -8830,6 +8963,11 @@ async def test_list_locations_async(transport: str = "grpc_asyncio"):
assert response.next_page_token == "next_page_token_value"
+@pytest.mark.asyncio
+async def test_list_locations_async_from_dict():
+ await test_list_locations_async(request_type=dict)
+
+
def test_list_locations_field_headers():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
@@ -8839,7 +8977,7 @@ def test_list_locations_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_locations), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
call.return_value = cluster_service.ListLocationsResponse()
client.list_locations(request)
@@ -8864,9 +9002,7 @@ async def test_list_locations_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_locations), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
cluster_service.ListLocationsResponse()
)
@@ -8887,7 +9023,7 @@ def test_list_locations_flattened():
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_locations), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListLocationsResponse()
@@ -8919,9 +9055,7 @@ async def test_list_locations_flattened_async():
client = ClusterManagerAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_locations), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_locations), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = cluster_service.ListLocationsResponse()
@@ -8988,7 +9122,7 @@ def test_transport_instance():
credentials=credentials.AnonymousCredentials(),
)
client = ClusterManagerClient(transport=transport)
- assert client._transport is transport
+ assert client.transport is transport
def test_transport_get_channel():
@@ -9024,7 +9158,7 @@ def test_transport_adc(transport_class):
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),)
- assert isinstance(client._transport, transports.ClusterManagerGrpcTransport,)
+ assert isinstance(client.transport, transports.ClusterManagerGrpcTransport,)
def test_cluster_manager_base_transport_error():
@@ -9149,7 +9283,7 @@ def test_cluster_manager_host_no_port():
api_endpoint="container.googleapis.com"
),
)
- assert client._transport._host == "container.googleapis.com:443"
+ assert client.transport._host == "container.googleapis.com:443"
def test_cluster_manager_host_with_port():
@@ -9159,7 +9293,7 @@ def test_cluster_manager_host_with_port():
api_endpoint="container.googleapis.com:8000"
),
)
- assert client._transport._host == "container.googleapis.com:8000"
+ assert client.transport._host == "container.googleapis.com:8000"
def test_cluster_manager_grpc_transport_channel():
@@ -9171,6 +9305,7 @@ def test_cluster_manager_grpc_transport_channel():
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
def test_cluster_manager_grpc_asyncio_transport_channel():
@@ -9182,6 +9317,7 @@ def test_cluster_manager_grpc_asyncio_transport_channel():
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize(
@@ -9229,6 +9365,7 @@ def test_cluster_manager_transport_channel_mtls_with_client_cert_source(
quota_project_id=None,
)
assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize(
@@ -9271,6 +9408,107 @@ def test_cluster_manager_transport_channel_mtls_with_adc(transport_class):
assert transport.grpc_channel == mock_grpc_channel
+def test_common_billing_account_path():
+ billing_account = "squid"
+
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = ClusterManagerClient.common_billing_account_path(billing_account)
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "clam",
+ }
+ path = ClusterManagerClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ClusterManagerClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "whelk"
+
+ expected = "folders/{folder}".format(folder=folder,)
+ actual = ClusterManagerClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "octopus",
+ }
+ path = ClusterManagerClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ClusterManagerClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "oyster"
+
+ expected = "organizations/{organization}".format(organization=organization,)
+ actual = ClusterManagerClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "nudibranch",
+ }
+ path = ClusterManagerClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ClusterManagerClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "cuttlefish"
+
+ expected = "projects/{project}".format(project=project,)
+ actual = ClusterManagerClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "mussel",
+ }
+ path = ClusterManagerClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ClusterManagerClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "winkle"
+ location = "nautilus"
+
+ expected = "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+ actual = ClusterManagerClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "scallop",
+ "location": "abalone",
+ }
+ path = ClusterManagerClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = ClusterManagerClient.parse_common_location_path(path)
+ assert expected == actual
+
+
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()