diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index a3da1b0d4c..dd98abbdee 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:3e3800bb100af5d7f9e810d48212b37812c1856d20ffeafb99ebe66461b61fc7 -# created: 2023-08-02T10:53:29.114535628Z + digest: sha256:08e34975760f002746b1d8c86fdc90660be45945ee6d9db914d1508acdf9a547 +# created: 2023-10-09T14:06:13.397766266Z diff --git a/.gitignore b/.gitignore index b4243ced74..d083ea1ddc 100644 --- a/.gitignore +++ b/.gitignore @@ -50,6 +50,7 @@ docs.metadata # Virtual environment env/ +venv/ # Test logs coverage.xml diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index 029bd342de..0332d3267e 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -113,30 +113,30 @@ commonmark==0.9.1 \ --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 # via rich -cryptography==41.0.3 \ - --hash=sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306 \ - --hash=sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84 \ - --hash=sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47 \ - --hash=sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d \ - --hash=sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116 \ - --hash=sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207 \ - --hash=sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81 \ - --hash=sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087 \ - --hash=sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd \ - --hash=sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507 \ - --hash=sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858 \ - --hash=sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae \ - --hash=sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34 \ - --hash=sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906 \ - --hash=sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd \ - --hash=sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922 \ - --hash=sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7 \ - --hash=sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4 \ - --hash=sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574 \ - --hash=sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1 \ - --hash=sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c \ - --hash=sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e \ - --hash=sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de +cryptography==41.0.4 \ + --hash=sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67 \ + --hash=sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311 \ + --hash=sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8 \ + --hash=sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13 \ + --hash=sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143 \ + --hash=sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f \ + --hash=sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829 \ + --hash=sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd \ + --hash=sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397 \ + --hash=sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac \ + --hash=sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d \ + --hash=sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a \ + --hash=sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839 \ + --hash=sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e \ + --hash=sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6 \ + --hash=sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9 \ + --hash=sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860 \ + --hash=sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca \ + --hash=sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91 \ + --hash=sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d \ + --hash=sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714 \ + --hash=sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb \ + --hash=sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f # via # gcp-releasetool # secretstorage @@ -382,6 +382,7 @@ protobuf==3.20.3 \ # gcp-docuploader # gcp-releasetool # google-api-core + # googleapis-common-protos pyasn1==0.4.8 \ --hash=sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d \ --hash=sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba @@ -466,9 +467,9 @@ typing-extensions==4.4.0 \ --hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \ --hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e # via -r requirements.in -urllib3==1.26.12 \ - --hash=sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e \ - --hash=sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997 +urllib3==1.26.17 \ + --hash=sha256:24d6a242c28d29af46c3fae832c36db3bbebcc533dd1bb549172cd739c82df21 \ + --hash=sha256:94a757d178c9be92ef5539b8840d48dc9cf1b2709c9d6b588232a055c524458b # via # requests # twine diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 19409cbd37..6a8e169506 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,7 +22,7 @@ repos: - id: end-of-file-fixer - id: check-yaml - repo: https://github.com/psf/black - rev: 22.3.0 + rev: 23.7.0 hooks: - id: black - repo: https://github.com/pycqa/flake8 diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py index 5aaedde91c..07fe33ae45 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py @@ -3183,7 +3183,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: - r"""Call the cancel operation method over HTTP. Args: @@ -3258,7 +3257,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> None: - r"""Call the delete operation method over HTTP. Args: @@ -3333,7 +3331,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operations_pb2.Operation: - r"""Call the get operation method over HTTP. Args: @@ -3412,7 +3409,6 @@ def __call__( timeout: Optional[float] = None, metadata: Sequence[Tuple[str, str]] = (), ) -> operations_pb2.ListOperationsResponse: - r"""Call the list operations method over HTTP. Args: diff --git a/google/cloud/spanner_admin_instance_v1/__init__.py b/google/cloud/spanner_admin_instance_v1/__init__.py index bf1893144c..e92a5768ad 100644 --- a/google/cloud/spanner_admin_instance_v1/__init__.py +++ b/google/cloud/spanner_admin_instance_v1/__init__.py @@ -22,6 +22,7 @@ from .services.instance_admin import InstanceAdminAsyncClient from .types.common import OperationProgress +from .types.spanner_instance_admin import AutoscalingConfig from .types.spanner_instance_admin import CreateInstanceConfigMetadata from .types.spanner_instance_admin import CreateInstanceConfigRequest from .types.spanner_instance_admin import CreateInstanceMetadata @@ -46,6 +47,7 @@ __all__ = ( "InstanceAdminAsyncClient", + "AutoscalingConfig", "CreateInstanceConfigMetadata", "CreateInstanceConfigRequest", "CreateInstanceMetadata", diff --git a/google/cloud/spanner_admin_instance_v1/types/__init__.py b/google/cloud/spanner_admin_instance_v1/types/__init__.py index 3ee4fcb10a..b4eaac8066 100644 --- a/google/cloud/spanner_admin_instance_v1/types/__init__.py +++ b/google/cloud/spanner_admin_instance_v1/types/__init__.py @@ -17,6 +17,7 @@ OperationProgress, ) from .spanner_instance_admin import ( + AutoscalingConfig, CreateInstanceConfigMetadata, CreateInstanceConfigRequest, CreateInstanceMetadata, @@ -42,6 +43,7 @@ __all__ = ( "OperationProgress", + "AutoscalingConfig", "CreateInstanceConfigMetadata", "CreateInstanceConfigRequest", "CreateInstanceMetadata", diff --git a/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py b/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py index 394e799d05..b4c18b85f2 100644 --- a/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py +++ b/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py @@ -30,6 +30,7 @@ manifest={ "ReplicaInfo", "InstanceConfig", + "AutoscalingConfig", "Instance", "ListInstanceConfigsRequest", "ListInstanceConfigsResponse", @@ -297,6 +298,116 @@ class State(proto.Enum): ) +class AutoscalingConfig(proto.Message): + r"""Autoscaling config for an instance. + + Attributes: + autoscaling_limits (google.cloud.spanner_admin_instance_v1.types.AutoscalingConfig.AutoscalingLimits): + Required. Autoscaling limits for an instance. + autoscaling_targets (google.cloud.spanner_admin_instance_v1.types.AutoscalingConfig.AutoscalingTargets): + Required. The autoscaling targets for an + instance. + """ + + class AutoscalingLimits(proto.Message): + r"""The autoscaling limits for the instance. Users can define the + minimum and maximum compute capacity allocated to the instance, and + the autoscaler will only scale within that range. Users can either + use nodes or processing units to specify the limits, but should use + the same unit to set both the min_limit and max_limit. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + min_nodes (int): + Minimum number of nodes allocated to the + instance. If set, this number should be greater + than or equal to 1. + + This field is a member of `oneof`_ ``min_limit``. + min_processing_units (int): + Minimum number of processing units allocated + to the instance. If set, this number should be + multiples of 1000. + + This field is a member of `oneof`_ ``min_limit``. + max_nodes (int): + Maximum number of nodes allocated to the instance. If set, + this number should be greater than or equal to min_nodes. + + This field is a member of `oneof`_ ``max_limit``. + max_processing_units (int): + Maximum number of processing units allocated to the + instance. If set, this number should be multiples of 1000 + and be greater than or equal to min_processing_units. + + This field is a member of `oneof`_ ``max_limit``. + """ + + min_nodes: int = proto.Field( + proto.INT32, + number=1, + oneof="min_limit", + ) + min_processing_units: int = proto.Field( + proto.INT32, + number=2, + oneof="min_limit", + ) + max_nodes: int = proto.Field( + proto.INT32, + number=3, + oneof="max_limit", + ) + max_processing_units: int = proto.Field( + proto.INT32, + number=4, + oneof="max_limit", + ) + + class AutoscalingTargets(proto.Message): + r"""The autoscaling targets for an instance. + + Attributes: + high_priority_cpu_utilization_percent (int): + Required. The target high priority cpu utilization + percentage that the autoscaler should be trying to achieve + for the instance. This number is on a scale from 0 (no + utilization) to 100 (full utilization). The valid range is + [10, 90] inclusive. + storage_utilization_percent (int): + Required. The target storage utilization percentage that the + autoscaler should be trying to achieve for the instance. + This number is on a scale from 0 (no utilization) to 100 + (full utilization). The valid range is [10, 100] inclusive. + """ + + high_priority_cpu_utilization_percent: int = proto.Field( + proto.INT32, + number=1, + ) + storage_utilization_percent: int = proto.Field( + proto.INT32, + number=2, + ) + + autoscaling_limits: AutoscalingLimits = proto.Field( + proto.MESSAGE, + number=1, + message=AutoscalingLimits, + ) + autoscaling_targets: AutoscalingTargets = proto.Field( + proto.MESSAGE, + number=2, + message=AutoscalingTargets, + ) + + class Instance(proto.Message): r"""An isolated set of Cloud Spanner resources on which databases can be hosted. @@ -325,8 +436,13 @@ class Instance(proto.Message): node_count (int): The number of nodes allocated to this instance. At most one of either node_count or processing_units should be present - in the message. This may be zero in API responses for - instances that are not yet in state ``READY``. + in the message. + + Users can set the node_count field to specify the target + number of nodes allocated to the instance. + + This may be zero in API responses for instances that are not + yet in state ``READY``. See `the documentation `__ @@ -334,12 +450,23 @@ class Instance(proto.Message): processing_units (int): The number of processing units allocated to this instance. At most one of processing_units or node_count should be - present in the message. This may be zero in API responses - for instances that are not yet in state ``READY``. + present in the message. + + Users can set the processing_units field to specify the + target number of processing units allocated to the instance. + + This may be zero in API responses for instances that are not + yet in state ``READY``. See `the documentation `__ for more information about nodes and processing units. + autoscaling_config (google.cloud.spanner_admin_instance_v1.types.AutoscalingConfig): + Optional. The autoscaling configuration. Autoscaling is + enabled if this field is set. When autoscaling is enabled, + node_count and processing_units are treated as OUTPUT_ONLY + fields and reflect the current compute capacity allocated to + the instance. state (google.cloud.spanner_admin_instance_v1.types.Instance.State): Output only. The current instance state. For [CreateInstance][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstance], @@ -424,6 +551,11 @@ class State(proto.Enum): proto.INT32, number=9, ) + autoscaling_config: "AutoscalingConfig" = proto.Field( + proto.MESSAGE, + number=17, + message="AutoscalingConfig", + ) state: State = proto.Field( proto.ENUM, number=6, diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index 1d211f7d6d..eee34361b3 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -648,7 +648,6 @@ def execute_partitioned_dml( def execute_pdml(): with SessionCheckout(self._pool) as session: - txn = api.begin_transaction( session=session.name, options=txn_options, metadata=metadata ) diff --git a/google/cloud/spanner_v1/gapic_metadata.json b/google/cloud/spanner_v1/gapic_metadata.json index ea51736a55..f5957c633a 100644 --- a/google/cloud/spanner_v1/gapic_metadata.json +++ b/google/cloud/spanner_v1/gapic_metadata.json @@ -15,6 +15,11 @@ "batch_create_sessions" ] }, + "BatchWrite": { + "methods": [ + "batch_write" + ] + }, "BeginTransaction": { "methods": [ "begin_transaction" @@ -95,6 +100,11 @@ "batch_create_sessions" ] }, + "BatchWrite": { + "methods": [ + "batch_write" + ] + }, "BeginTransaction": { "methods": [ "begin_transaction" @@ -175,6 +185,11 @@ "batch_create_sessions" ] }, + "BatchWrite": { + "methods": [ + "batch_write" + ] + }, "BeginTransaction": { "methods": [ "begin_transaction" diff --git a/google/cloud/spanner_v1/services/spanner/async_client.py b/google/cloud/spanner_v1/services/spanner/async_client.py index 977970ce7e..7c2e950793 100644 --- a/google/cloud/spanner_v1/services/spanner/async_client.py +++ b/google/cloud/spanner_v1/services/spanner/async_client.py @@ -1973,6 +1973,143 @@ async def sample_partition_read(): # Done; return the response. return response + def batch_write( + self, + request: Optional[Union[spanner.BatchWriteRequest, dict]] = None, + *, + session: Optional[str] = None, + mutation_groups: Optional[ + MutableSequence[spanner.BatchWriteRequest.MutationGroup] + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[spanner.BatchWriteResponse]]: + r"""Batches the supplied mutation groups in a collection + of efficient transactions. All mutations in a group are + committed atomically. However, mutations across groups + can be committed non-atomically in an unspecified order + and thus, they must be independent of each other. + Partial failure is possible, i.e., some groups may have + been committed successfully, while some may have failed. + The results of individual batches are streamed into the + response as the batches are applied. + + BatchWrite requests are not replay protected, meaning + that each mutation group may be applied more than once. + Replays of non-idempotent mutations may have undesirable + effects. For example, replays of an insert mutation may + produce an already exists error or if you use generated + or commit timestamp-based keys, it may result in + additional rows being added to the mutation's table. We + recommend structuring your mutation groups to be + idempotent to avoid this issue. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import spanner_v1 + + async def sample_batch_write(): + # Create a client + client = spanner_v1.SpannerAsyncClient() + + # Initialize request argument(s) + mutation_groups = spanner_v1.MutationGroup() + mutation_groups.mutations.insert.table = "table_value" + + request = spanner_v1.BatchWriteRequest( + session="session_value", + mutation_groups=mutation_groups, + ) + + # Make the request + stream = await client.batch_write(request=request) + + # Handle the response + async for response in stream: + print(response) + + Args: + request (Optional[Union[google.cloud.spanner_v1.types.BatchWriteRequest, dict]]): + The request object. The request for + [BatchWrite][google.spanner.v1.Spanner.BatchWrite]. + session (:class:`str`): + Required. The session in which the + batch request is to be run. + + This corresponds to the ``session`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + mutation_groups (:class:`MutableSequence[google.cloud.spanner_v1.types.BatchWriteRequest.MutationGroup]`): + Required. The groups of mutations to + be applied. + + This corresponds to the ``mutation_groups`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.spanner_v1.types.BatchWriteResponse]: + The result of applying a batch of + mutations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([session, mutation_groups]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = spanner.BatchWriteRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if session is not None: + request.session = session + if mutation_groups: + request.mutation_groups.extend(mutation_groups) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.batch_write, + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def __aenter__(self) -> "SpannerAsyncClient": return self diff --git a/google/cloud/spanner_v1/services/spanner/client.py b/google/cloud/spanner_v1/services/spanner/client.py index 59dc4f222c..03907a1b0b 100644 --- a/google/cloud/spanner_v1/services/spanner/client.py +++ b/google/cloud/spanner_v1/services/spanner/client.py @@ -2119,6 +2119,143 @@ def sample_partition_read(): # Done; return the response. return response + def batch_write( + self, + request: Optional[Union[spanner.BatchWriteRequest, dict]] = None, + *, + session: Optional[str] = None, + mutation_groups: Optional[ + MutableSequence[spanner.BatchWriteRequest.MutationGroup] + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[spanner.BatchWriteResponse]: + r"""Batches the supplied mutation groups in a collection + of efficient transactions. All mutations in a group are + committed atomically. However, mutations across groups + can be committed non-atomically in an unspecified order + and thus, they must be independent of each other. + Partial failure is possible, i.e., some groups may have + been committed successfully, while some may have failed. + The results of individual batches are streamed into the + response as the batches are applied. + + BatchWrite requests are not replay protected, meaning + that each mutation group may be applied more than once. + Replays of non-idempotent mutations may have undesirable + effects. For example, replays of an insert mutation may + produce an already exists error or if you use generated + or commit timestamp-based keys, it may result in + additional rows being added to the mutation's table. We + recommend structuring your mutation groups to be + idempotent to avoid this issue. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import spanner_v1 + + def sample_batch_write(): + # Create a client + client = spanner_v1.SpannerClient() + + # Initialize request argument(s) + mutation_groups = spanner_v1.MutationGroup() + mutation_groups.mutations.insert.table = "table_value" + + request = spanner_v1.BatchWriteRequest( + session="session_value", + mutation_groups=mutation_groups, + ) + + # Make the request + stream = client.batch_write(request=request) + + # Handle the response + for response in stream: + print(response) + + Args: + request (Union[google.cloud.spanner_v1.types.BatchWriteRequest, dict]): + The request object. The request for + [BatchWrite][google.spanner.v1.Spanner.BatchWrite]. + session (str): + Required. The session in which the + batch request is to be run. + + This corresponds to the ``session`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + mutation_groups (MutableSequence[google.cloud.spanner_v1.types.BatchWriteRequest.MutationGroup]): + Required. The groups of mutations to + be applied. + + This corresponds to the ``mutation_groups`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.spanner_v1.types.BatchWriteResponse]: + The result of applying a batch of + mutations. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([session, mutation_groups]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a spanner.BatchWriteRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, spanner.BatchWriteRequest): + request = spanner.BatchWriteRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if session is not None: + request.session = session + if mutation_groups is not None: + request.mutation_groups = mutation_groups + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.batch_write] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("session", request.session),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def __enter__(self) -> "SpannerClient": return self diff --git a/google/cloud/spanner_v1/services/spanner/transports/base.py b/google/cloud/spanner_v1/services/spanner/transports/base.py index 668191c5f2..27006d8fbc 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/base.py +++ b/google/cloud/spanner_v1/services/spanner/transports/base.py @@ -322,6 +322,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=30.0, client_info=client_info, ), + self.batch_write: gapic_v1.method.wrap_method( + self.batch_write, + default_timeout=3600.0, + client_info=client_info, + ), } def close(self): @@ -473,6 +478,15 @@ def partition_read( ]: raise NotImplementedError() + @property + def batch_write( + self, + ) -> Callable[ + [spanner.BatchWriteRequest], + Union[spanner.BatchWriteResponse, Awaitable[spanner.BatchWriteResponse]], + ]: + raise NotImplementedError() + @property def kind(self) -> str: raise NotImplementedError() diff --git a/google/cloud/spanner_v1/services/spanner/transports/grpc.py b/google/cloud/spanner_v1/services/spanner/transports/grpc.py index 7236f0ed27..86d9ba4133 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/grpc.py +++ b/google/cloud/spanner_v1/services/spanner/transports/grpc.py @@ -755,6 +755,50 @@ def partition_read( ) return self._stubs["partition_read"] + @property + def batch_write( + self, + ) -> Callable[[spanner.BatchWriteRequest], spanner.BatchWriteResponse]: + r"""Return a callable for the batch write method over gRPC. + + Batches the supplied mutation groups in a collection + of efficient transactions. All mutations in a group are + committed atomically. However, mutations across groups + can be committed non-atomically in an unspecified order + and thus, they must be independent of each other. + Partial failure is possible, i.e., some groups may have + been committed successfully, while some may have failed. + The results of individual batches are streamed into the + response as the batches are applied. + + BatchWrite requests are not replay protected, meaning + that each mutation group may be applied more than once. + Replays of non-idempotent mutations may have undesirable + effects. For example, replays of an insert mutation may + produce an already exists error or if you use generated + or commit timestamp-based keys, it may result in + additional rows being added to the mutation's table. We + recommend structuring your mutation groups to be + idempotent to avoid this issue. + + Returns: + Callable[[~.BatchWriteRequest], + ~.BatchWriteResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_write" not in self._stubs: + self._stubs["batch_write"] = self.grpc_channel.unary_stream( + "/google.spanner.v1.Spanner/BatchWrite", + request_serializer=spanner.BatchWriteRequest.serialize, + response_deserializer=spanner.BatchWriteResponse.deserialize, + ) + return self._stubs["batch_write"] + def close(self): self.grpc_channel.close() diff --git a/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py b/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py index 62a975c319..d0755e3a67 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py +++ b/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py @@ -771,6 +771,50 @@ def partition_read( ) return self._stubs["partition_read"] + @property + def batch_write( + self, + ) -> Callable[[spanner.BatchWriteRequest], Awaitable[spanner.BatchWriteResponse]]: + r"""Return a callable for the batch write method over gRPC. + + Batches the supplied mutation groups in a collection + of efficient transactions. All mutations in a group are + committed atomically. However, mutations across groups + can be committed non-atomically in an unspecified order + and thus, they must be independent of each other. + Partial failure is possible, i.e., some groups may have + been committed successfully, while some may have failed. + The results of individual batches are streamed into the + response as the batches are applied. + + BatchWrite requests are not replay protected, meaning + that each mutation group may be applied more than once. + Replays of non-idempotent mutations may have undesirable + effects. For example, replays of an insert mutation may + produce an already exists error or if you use generated + or commit timestamp-based keys, it may result in + additional rows being added to the mutation's table. We + recommend structuring your mutation groups to be + idempotent to avoid this issue. + + Returns: + Callable[[~.BatchWriteRequest], + Awaitable[~.BatchWriteResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "batch_write" not in self._stubs: + self._stubs["batch_write"] = self.grpc_channel.unary_stream( + "/google.spanner.v1.Spanner/BatchWrite", + request_serializer=spanner.BatchWriteRequest.serialize, + response_deserializer=spanner.BatchWriteResponse.deserialize, + ) + return self._stubs["batch_write"] + def close(self): return self.grpc_channel.close() diff --git a/google/cloud/spanner_v1/services/spanner/transports/rest.py b/google/cloud/spanner_v1/services/spanner/transports/rest.py index d7157886a5..5e32bfaf2a 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/rest.py +++ b/google/cloud/spanner_v1/services/spanner/transports/rest.py @@ -78,6 +78,14 @@ def post_batch_create_sessions(self, response): logging.log(f"Received response: {response}") return response + def pre_batch_write(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_batch_write(self, response): + logging.log(f"Received response: {response}") + return response + def pre_begin_transaction(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -211,6 +219,27 @@ def post_batch_create_sessions( """ return response + def pre_batch_write( + self, request: spanner.BatchWriteRequest, metadata: Sequence[Tuple[str, str]] + ) -> Tuple[spanner.BatchWriteRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for batch_write + + Override in a subclass to manipulate the request or metadata + before they are sent to the Spanner server. + """ + return request, metadata + + def post_batch_write( + self, response: rest_streaming.ResponseIterator + ) -> rest_streaming.ResponseIterator: + """Post-rpc interceptor for batch_write + + Override in a subclass to manipulate the response + after it is returned by the Spanner server but before + it is returned to user code. + """ + return response + def pre_begin_transaction( self, request: spanner.BeginTransactionRequest, @@ -681,6 +710,101 @@ def __call__( resp = self._interceptor.post_batch_create_sessions(resp) return resp + class _BatchWrite(SpannerRestStub): + def __hash__(self): + return hash("BatchWrite") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: spanner.BatchWriteRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> rest_streaming.ResponseIterator: + r"""Call the batch write method over HTTP. + + Args: + request (~.spanner.BatchWriteRequest): + The request object. The request for + [BatchWrite][google.spanner.v1.Spanner.BatchWrite]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.spanner.BatchWriteResponse: + The result of applying a batch of + mutations. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:batchWrite", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_batch_write(request, metadata) + pb_request = spanner.BatchWriteRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = rest_streaming.ResponseIterator(response, spanner.BatchWriteResponse) + resp = self._interceptor.post_batch_write(resp) + return resp + class _BeginTransaction(SpannerRestStub): def __hash__(self): return hash("BeginTransaction") @@ -2056,6 +2180,14 @@ def batch_create_sessions( # In C++ this would require a dynamic_cast return self._BatchCreateSessions(self._session, self._host, self._interceptor) # type: ignore + @property + def batch_write( + self, + ) -> Callable[[spanner.BatchWriteRequest], spanner.BatchWriteResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._BatchWrite(self._session, self._host, self._interceptor) # type: ignore + @property def begin_transaction( self, diff --git a/google/cloud/spanner_v1/session.py b/google/cloud/spanner_v1/session.py index 256e72511b..b25af53805 100644 --- a/google/cloud/spanner_v1/session.py +++ b/google/cloud/spanner_v1/session.py @@ -441,7 +441,6 @@ def _delay_until_retry(exc, deadline, attempts): delay = _get_retry_delay(cause, attempts) if delay is not None: - if now + delay > deadline: raise diff --git a/google/cloud/spanner_v1/types/__init__.py b/google/cloud/spanner_v1/types/__init__.py index df0960d9d9..f4f619f6c4 100644 --- a/google/cloud/spanner_v1/types/__init__.py +++ b/google/cloud/spanner_v1/types/__init__.py @@ -36,6 +36,8 @@ from .spanner import ( BatchCreateSessionsRequest, BatchCreateSessionsResponse, + BatchWriteRequest, + BatchWriteResponse, BeginTransactionRequest, CommitRequest, CreateSessionRequest, @@ -81,6 +83,8 @@ "ResultSetStats", "BatchCreateSessionsRequest", "BatchCreateSessionsResponse", + "BatchWriteRequest", + "BatchWriteResponse", "BeginTransactionRequest", "CommitRequest", "CreateSessionRequest", diff --git a/google/cloud/spanner_v1/types/spanner.py b/google/cloud/spanner_v1/types/spanner.py index 310cf8e31f..dfd83ac165 100644 --- a/google/cloud/spanner_v1/types/spanner.py +++ b/google/cloud/spanner_v1/types/spanner.py @@ -53,6 +53,8 @@ "BeginTransactionRequest", "CommitRequest", "RollbackRequest", + "BatchWriteRequest", + "BatchWriteResponse", }, ) @@ -1329,4 +1331,83 @@ class RollbackRequest(proto.Message): ) +class BatchWriteRequest(proto.Message): + r"""The request for [BatchWrite][google.spanner.v1.Spanner.BatchWrite]. + + Attributes: + session (str): + Required. The session in which the batch + request is to be run. + request_options (google.cloud.spanner_v1.types.RequestOptions): + Common options for this request. + mutation_groups (MutableSequence[google.cloud.spanner_v1.types.BatchWriteRequest.MutationGroup]): + Required. The groups of mutations to be + applied. + """ + + class MutationGroup(proto.Message): + r"""A group of mutations to be committed together. Related + mutations should be placed in a group. For example, two + mutations inserting rows with the same primary key prefix in + both parent and child tables are related. + + Attributes: + mutations (MutableSequence[google.cloud.spanner_v1.types.Mutation]): + Required. The mutations in this group. + """ + + mutations: MutableSequence[mutation.Mutation] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=mutation.Mutation, + ) + + session: str = proto.Field( + proto.STRING, + number=1, + ) + request_options: "RequestOptions" = proto.Field( + proto.MESSAGE, + number=3, + message="RequestOptions", + ) + mutation_groups: MutableSequence[MutationGroup] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=MutationGroup, + ) + + +class BatchWriteResponse(proto.Message): + r"""The result of applying a batch of mutations. + + Attributes: + indexes (MutableSequence[int]): + The mutation groups applied in this batch. The values index + into the ``mutation_groups`` field in the corresponding + ``BatchWriteRequest``. + status (google.rpc.status_pb2.Status): + An ``OK`` status indicates success. Any other status + indicates a failure. + commit_timestamp (google.protobuf.timestamp_pb2.Timestamp): + The commit timestamp of the transaction that applied this + batch. Present if ``status`` is ``OK``, absent otherwise. + """ + + indexes: MutableSequence[int] = proto.RepeatedField( + proto.INT32, + number=1, + ) + status: status_pb2.Status = proto.Field( + proto.MESSAGE, + number=2, + message=status_pb2.Status, + ) + commit_timestamp: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/noxfile.py b/noxfile.py index 95fe0d2365..e1677c220b 100644 --- a/noxfile.py +++ b/noxfile.py @@ -17,22 +17,24 @@ # Generated by synthtool. DO NOT EDIT! from __future__ import absolute_import + import os import pathlib import re import shutil +from typing import Dict, List import warnings import nox FLAKE8_VERSION = "flake8==6.1.0" -BLACK_VERSION = "black==22.3.0" -ISORT_VERSION = "isort==5.10.1" +BLACK_VERSION = "black[jupyter]==23.7.0" +ISORT_VERSION = "isort==5.11.0" LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] DEFAULT_PYTHON_VERSION = "3.8" -UNIT_TEST_PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +UNIT_TEST_PYTHON_VERSIONS: List[str] = ["3.7", "3.8", "3.9", "3.10", "3.11"] UNIT_TEST_STANDARD_DEPENDENCIES = [ "mock", "asyncmock", @@ -40,25 +42,25 @@ "pytest-cov", "pytest-asyncio", ] -UNIT_TEST_EXTERNAL_DEPENDENCIES = [] -UNIT_TEST_LOCAL_DEPENDENCIES = [] -UNIT_TEST_DEPENDENCIES = [] -UNIT_TEST_EXTRAS = [] -UNIT_TEST_EXTRAS_BY_PYTHON = {} - -SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] -SYSTEM_TEST_STANDARD_DEPENDENCIES = [ +UNIT_TEST_EXTERNAL_DEPENDENCIES: List[str] = [] +UNIT_TEST_LOCAL_DEPENDENCIES: List[str] = [] +UNIT_TEST_DEPENDENCIES: List[str] = [] +UNIT_TEST_EXTRAS: List[str] = [] +UNIT_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {} + +SYSTEM_TEST_PYTHON_VERSIONS: List[str] = ["3.8"] +SYSTEM_TEST_STANDARD_DEPENDENCIES: List[str] = [ "mock", "pytest", "google-cloud-testutils", ] -SYSTEM_TEST_EXTERNAL_DEPENDENCIES = [] -SYSTEM_TEST_LOCAL_DEPENDENCIES = [] -SYSTEM_TEST_DEPENDENCIES = [] -SYSTEM_TEST_EXTRAS = [ +SYSTEM_TEST_EXTERNAL_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_LOCAL_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_EXTRAS: List[str] = [ "tracing", ] -SYSTEM_TEST_EXTRAS_BY_PYTHON = {} +SYSTEM_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {} CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() @@ -71,6 +73,7 @@ "lint_setup_py", "blacken", "docs", + "format", ] # Error if a python version is missing @@ -210,7 +213,6 @@ def unit(session): def install_systemtest_dependencies(session, *constraints): - # Use pre-release gRPC for system tests. # Exclude version 1.52.0rc1 which has a known issue. # See https://github.com/grpc/grpc/issues/32163 diff --git a/samples/generated_samples/snippet_metadata_google.spanner.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.v1.json index a8e8be3ae3..4384d19e2a 100644 --- a/samples/generated_samples/snippet_metadata_google.spanner.v1.json +++ b/samples/generated_samples/snippet_metadata_google.spanner.v1.json @@ -180,6 +180,175 @@ ], "title": "spanner_v1_generated_spanner_batch_create_sessions_sync.py" }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.spanner_v1.SpannerAsyncClient", + "shortName": "SpannerAsyncClient" + }, + "fullName": "google.cloud.spanner_v1.SpannerAsyncClient.batch_write", + "method": { + "fullName": "google.spanner.v1.Spanner.BatchWrite", + "service": { + "fullName": "google.spanner.v1.Spanner", + "shortName": "Spanner" + }, + "shortName": "BatchWrite" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.spanner_v1.types.BatchWriteRequest" + }, + { + "name": "session", + "type": "str" + }, + { + "name": "mutation_groups", + "type": "MutableSequence[google.cloud.spanner_v1.types.BatchWriteRequest.MutationGroup]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.spanner_v1.types.BatchWriteResponse]", + "shortName": "batch_write" + }, + "description": "Sample for BatchWrite", + "file": "spanner_v1_generated_spanner_batch_write_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "spanner_v1_generated_Spanner_BatchWrite_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "spanner_v1_generated_spanner_batch_write_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.spanner_v1.SpannerClient", + "shortName": "SpannerClient" + }, + "fullName": "google.cloud.spanner_v1.SpannerClient.batch_write", + "method": { + "fullName": "google.spanner.v1.Spanner.BatchWrite", + "service": { + "fullName": "google.spanner.v1.Spanner", + "shortName": "Spanner" + }, + "shortName": "BatchWrite" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.spanner_v1.types.BatchWriteRequest" + }, + { + "name": "session", + "type": "str" + }, + { + "name": "mutation_groups", + "type": "MutableSequence[google.cloud.spanner_v1.types.BatchWriteRequest.MutationGroup]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "Iterable[google.cloud.spanner_v1.types.BatchWriteResponse]", + "shortName": "batch_write" + }, + "description": "Sample for BatchWrite", + "file": "spanner_v1_generated_spanner_batch_write_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "spanner_v1_generated_Spanner_BatchWrite_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 52, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 53, + "type": "RESPONSE_HANDLING" + } + ], + "title": "spanner_v1_generated_spanner_batch_write_sync.py" + }, { "canonical": true, "clientMethod": { diff --git a/samples/generated_samples/spanner_v1_generated_spanner_batch_write_async.py b/samples/generated_samples/spanner_v1_generated_spanner_batch_write_async.py new file mode 100644 index 0000000000..39352562b1 --- /dev/null +++ b/samples/generated_samples/spanner_v1_generated_spanner_batch_write_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchWrite +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-spanner + + +# [START spanner_v1_generated_Spanner_BatchWrite_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import spanner_v1 + + +async def sample_batch_write(): + # Create a client + client = spanner_v1.SpannerAsyncClient() + + # Initialize request argument(s) + mutation_groups = spanner_v1.MutationGroup() + mutation_groups.mutations.insert.table = "table_value" + + request = spanner_v1.BatchWriteRequest( + session="session_value", + mutation_groups=mutation_groups, + ) + + # Make the request + stream = await client.batch_write(request=request) + + # Handle the response + async for response in stream: + print(response) + +# [END spanner_v1_generated_Spanner_BatchWrite_async] diff --git a/samples/generated_samples/spanner_v1_generated_spanner_batch_write_sync.py b/samples/generated_samples/spanner_v1_generated_spanner_batch_write_sync.py new file mode 100644 index 0000000000..4ee88b0cd6 --- /dev/null +++ b/samples/generated_samples/spanner_v1_generated_spanner_batch_write_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for BatchWrite +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-spanner + + +# [START spanner_v1_generated_Spanner_BatchWrite_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import spanner_v1 + + +def sample_batch_write(): + # Create a client + client = spanner_v1.SpannerClient() + + # Initialize request argument(s) + mutation_groups = spanner_v1.MutationGroup() + mutation_groups.mutations.insert.table = "table_value" + + request = spanner_v1.BatchWriteRequest( + session="session_value", + mutation_groups=mutation_groups, + ) + + # Make the request + stream = client.batch_write(request=request) + + # Handle the response + for response in stream: + print(response) + +# [END spanner_v1_generated_Spanner_BatchWrite_sync] diff --git a/scripts/fixup_spanner_v1_keywords.py b/scripts/fixup_spanner_v1_keywords.py index df4d3501f2..b1ba4084df 100644 --- a/scripts/fixup_spanner_v1_keywords.py +++ b/scripts/fixup_spanner_v1_keywords.py @@ -40,6 +40,7 @@ class spannerCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'batch_create_sessions': ('database', 'session_count', 'session_template', ), + 'batch_write': ('session', 'mutation_groups', 'request_options', ), 'begin_transaction': ('session', 'options', 'request_options', ), 'commit': ('session', 'transaction_id', 'single_use_transaction', 'mutations', 'return_commit_stats', 'request_options', ), 'create_session': ('database', 'session', ), diff --git a/setup.py b/setup.py index 7f72131638..1738eed2ea 100644 --- a/setup.py +++ b/setup.py @@ -37,7 +37,7 @@ dependencies = [ "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", - "google-cloud-core >= 1.4.1, < 3.0dev", + "google-cloud-core >= 1.4.4, < 3.0dev", "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", "proto-plus >= 1.22.0, <2.0.0dev", "sqlparse >= 0.4.4", diff --git a/testing/constraints-3.7.txt b/testing/constraints-3.7.txt index cddc7be6e5..165814fd90 100644 --- a/testing/constraints-3.7.txt +++ b/testing/constraints-3.7.txt @@ -5,7 +5,7 @@ # e.g., if setup.py has "google-cloud-foo >= 1.14.0, < 2.0.0dev", # Then this file should have google-cloud-foo==1.14.0 google-api-core==1.34.0 -google-cloud-core==1.4.1 +google-cloud-core==1.4.4 grpc-google-iam-v1==0.12.4 libcst==0.2.5 proto-plus==1.22.0 diff --git a/tests/system/_sample_data.py b/tests/system/_sample_data.py index a7f3b80a86..2398442aff 100644 --- a/tests/system/_sample_data.py +++ b/tests/system/_sample_data.py @@ -70,7 +70,6 @@ def _check_row_data(row_data, expected, recurse_into_lists=True): def _check_cell_data(found_cell, expected_cell, recurse_into_lists=True): - if isinstance(found_cell, datetime_helpers.DatetimeWithNanoseconds): _assert_timestamp(expected_cell, found_cell) diff --git a/tests/system/conftest.py b/tests/system/conftest.py index fdeab14c8f..b297d1f2ad 100644 --- a/tests/system/conftest.py +++ b/tests/system/conftest.py @@ -119,7 +119,6 @@ def instance_configs(spanner_client): configs = list(_helpers.retry_503(spanner_client.list_instance_configs)()) if not _helpers.USE_EMULATOR: - # Defend against back-end returning configs for regions we aren't # actually allowed to use. configs = [config for config in configs if "-us-" in config.name] diff --git a/tests/system/test_dbapi.py b/tests/system/test_dbapi.py index cb5a11e89d..29617ad614 100644 --- a/tests/system/test_dbapi.py +++ b/tests/system/test_dbapi.py @@ -64,7 +64,6 @@ def clear_table(transaction): @pytest.fixture(scope="function") def dbapi_database(raw_database): - raw_database.run_in_transaction(clear_table) yield raw_database diff --git a/tests/system/test_session_api.py b/tests/system/test_session_api.py index 4438b1459e..4a2ce5f495 100644 --- a/tests/system/test_session_api.py +++ b/tests/system/test_session_api.py @@ -306,7 +306,6 @@ def assert_span_attributes( def _make_attributes(db_instance, **kwargs): - attributes = { "db.type": "spanner", "db.url": "spanner.googleapis.com", @@ -1099,7 +1098,6 @@ def test_transaction_batch_update_w_parent_span( ) def unit_of_work(transaction): - status, row_counts = transaction.batch_update( [insert_statement, update_statement, delete_statement] ) @@ -1303,7 +1301,6 @@ def _row_data(max_index): def _set_up_table(database, row_count): - sd = _sample_data def _unit_of_work(transaction): @@ -1430,7 +1427,6 @@ def test_multiuse_snapshot_read_isolation_read_timestamp(sessions_database): with sessions_database.snapshot( read_timestamp=committed, multi_use=True ) as read_ts: - before = list(read_ts.read(sd.TABLE, sd.COLUMNS, sd.ALL)) sd._check_row_data(before, all_data_rows) @@ -1452,7 +1448,6 @@ def test_multiuse_snapshot_read_isolation_exact_staleness(sessions_database): delta = datetime.timedelta(microseconds=1000) with sessions_database.snapshot(exact_staleness=delta, multi_use=True) as exact: - before = list(exact.read(sd.TABLE, sd.COLUMNS, sd.ALL)) sd._check_row_data(before, all_data_rows) @@ -1958,7 +1953,6 @@ def test_multiuse_snapshot_execute_sql_isolation_strong(sessions_database): all_data_rows = list(_row_data(row_count)) with sessions_database.snapshot(multi_use=True) as strong: - before = list(strong.execute_sql(sd.SQL)) sd._check_row_data(before, all_data_rows) @@ -2018,7 +2012,6 @@ def test_invalid_type(sessions_database): def test_execute_sql_select_1(sessions_database): - sessions_database.snapshot(multi_use=True) # Hello, world query @@ -2188,7 +2181,6 @@ def test_execute_sql_w_bytes_bindings(sessions_database, database_dialect): def test_execute_sql_w_timestamp_bindings(sessions_database, database_dialect): - timestamp_1 = datetime_helpers.DatetimeWithNanoseconds( 1989, 1, 17, 17, 59, 12, nanosecond=345612789 ) @@ -2475,7 +2467,6 @@ def test_execute_sql_w_query_param_struct(sessions_database, not_postgres): def test_execute_sql_returning_transfinite_floats(sessions_database, not_postgres): - with sessions_database.snapshot(multi_use=True) as snapshot: # Query returning -inf, +inf, NaN as column values rows = list( @@ -2550,7 +2541,6 @@ def details(self): def _check_batch_status(status_code, expected=code_pb2.OK): if status_code != expected: - _status_code_to_grpc_status_code = { member.value[0]: member for member in grpc.StatusCode } diff --git a/tests/unit/gapic/spanner_v1/test_spanner.py b/tests/unit/gapic/spanner_v1/test_spanner.py index 8bf8407724..7f593f1953 100644 --- a/tests/unit/gapic/spanner_v1/test_spanner.py +++ b/tests/unit/gapic/spanner_v1/test_spanner.py @@ -3857,6 +3857,292 @@ async def test_partition_read_field_headers_async(): ) in kw["metadata"] +@pytest.mark.parametrize( + "request_type", + [ + spanner.BatchWriteRequest, + dict, + ], +) +def test_batch_write(request_type, transport: str = "grpc"): + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_write), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([spanner.BatchWriteResponse()]) + response = client.batch_write(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == spanner.BatchWriteRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, spanner.BatchWriteResponse) + + +def test_batch_write_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_write), "__call__") as call: + client.batch_write() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == spanner.BatchWriteRequest() + + +@pytest.mark.asyncio +async def test_batch_write_async( + transport: str = "grpc_asyncio", request_type=spanner.BatchWriteRequest +): + client = SpannerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_write), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[spanner.BatchWriteResponse()] + ) + response = await client.batch_write(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == spanner.BatchWriteRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, spanner.BatchWriteResponse) + + +@pytest.mark.asyncio +async def test_batch_write_async_from_dict(): + await test_batch_write_async(request_type=dict) + + +def test_batch_write_field_headers(): + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.BatchWriteRequest() + + request.session = "session_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_write), "__call__") as call: + call.return_value = iter([spanner.BatchWriteResponse()]) + client.batch_write(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "session=session_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_batch_write_field_headers_async(): + client = SpannerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = spanner.BatchWriteRequest() + + request.session = "session_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_write), "__call__") as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock( + side_effect=[spanner.BatchWriteResponse()] + ) + await client.batch_write(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "session=session_value", + ) in kw["metadata"] + + +def test_batch_write_flattened(): + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_write), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([spanner.BatchWriteResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.batch_write( + session="session_value", + mutation_groups=[ + spanner.BatchWriteRequest.MutationGroup( + mutations=[ + mutation.Mutation( + insert=mutation.Mutation.Write(table="table_value") + ) + ] + ) + ], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].session + mock_val = "session_value" + assert arg == mock_val + arg = args[0].mutation_groups + mock_val = [ + spanner.BatchWriteRequest.MutationGroup( + mutations=[ + mutation.Mutation( + insert=mutation.Mutation.Write(table="table_value") + ) + ] + ) + ] + assert arg == mock_val + + +def test_batch_write_flattened_error(): + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_write( + spanner.BatchWriteRequest(), + session="session_value", + mutation_groups=[ + spanner.BatchWriteRequest.MutationGroup( + mutations=[ + mutation.Mutation( + insert=mutation.Mutation.Write(table="table_value") + ) + ] + ) + ], + ) + + +@pytest.mark.asyncio +async def test_batch_write_flattened_async(): + client = SpannerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_write), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = iter([spanner.BatchWriteResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.batch_write( + session="session_value", + mutation_groups=[ + spanner.BatchWriteRequest.MutationGroup( + mutations=[ + mutation.Mutation( + insert=mutation.Mutation.Write(table="table_value") + ) + ] + ) + ], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].session + mock_val = "session_value" + assert arg == mock_val + arg = args[0].mutation_groups + mock_val = [ + spanner.BatchWriteRequest.MutationGroup( + mutations=[ + mutation.Mutation( + insert=mutation.Mutation.Write(table="table_value") + ) + ] + ) + ] + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_batch_write_flattened_error_async(): + client = SpannerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.batch_write( + spanner.BatchWriteRequest(), + session="session_value", + mutation_groups=[ + spanner.BatchWriteRequest.MutationGroup( + mutations=[ + mutation.Mutation( + insert=mutation.Mutation.Write(table="table_value") + ) + ] + ) + ], + ) + + @pytest.mark.parametrize( "request_type", [ @@ -7695,6 +7981,315 @@ def test_partition_read_rest_error(): ) +@pytest.mark.parametrize( + "request_type", + [ + spanner.BatchWriteRequest, + dict, + ], +) +def test_batch_write_rest(request_type): + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = { + "session": "projects/sample1/instances/sample2/databases/sample3/sessions/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = spanner.BatchWriteResponse( + indexes=[752], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = spanner.BatchWriteResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.batch_write(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, spanner.BatchWriteResponse) + assert response.indexes == [752] + + +def test_batch_write_rest_required_fields(request_type=spanner.BatchWriteRequest): + transport_class = transports.SpannerRestTransport + + request_init = {} + request_init["session"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).batch_write._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["session"] = "session_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).batch_write._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "session" in jsonified_request + assert jsonified_request["session"] == "session_value" + + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = spanner.BatchWriteResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = spanner.BatchWriteResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.batch_write(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_batch_write_rest_unset_required_fields(): + transport = transports.SpannerRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.batch_write._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "session", + "mutationGroups", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_batch_write_rest_interceptors(null_interceptor): + transport = transports.SpannerRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.SpannerRestInterceptor(), + ) + client = SpannerClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.SpannerRestInterceptor, "post_batch_write" + ) as post, mock.patch.object( + transports.SpannerRestInterceptor, "pre_batch_write" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = spanner.BatchWriteRequest.pb(spanner.BatchWriteRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = spanner.BatchWriteResponse.to_json( + spanner.BatchWriteResponse() + ) + req.return_value._content = "[{}]".format(req.return_value._content) + + request = spanner.BatchWriteRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = spanner.BatchWriteResponse() + + client.batch_write( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_batch_write_rest_bad_request( + transport: str = "rest", request_type=spanner.BatchWriteRequest +): + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = { + "session": "projects/sample1/instances/sample2/databases/sample3/sessions/sample4" + } + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.batch_write(request) + + +def test_batch_write_rest_flattened(): + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = spanner.BatchWriteResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "session": "projects/sample1/instances/sample2/databases/sample3/sessions/sample4" + } + + # get truthy value for each flattened field + mock_args = dict( + session="session_value", + mutation_groups=[ + spanner.BatchWriteRequest.MutationGroup( + mutations=[ + mutation.Mutation( + insert=mutation.Mutation.Write(table="table_value") + ) + ] + ) + ], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = spanner.BatchWriteResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + with mock.patch.object(response_value, "iter_content") as iter_content: + iter_content.return_value = iter(json_return_value) + client.batch_write(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{session=projects/*/instances/*/databases/*/sessions/*}:batchWrite" + % client.transport._host, + args[1], + ) + + +def test_batch_write_rest_flattened_error(transport: str = "rest"): + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.batch_write( + spanner.BatchWriteRequest(), + session="session_value", + mutation_groups=[ + spanner.BatchWriteRequest.MutationGroup( + mutations=[ + mutation.Mutation( + insert=mutation.Mutation.Write(table="table_value") + ) + ] + ) + ], + ) + + +def test_batch_write_rest_error(): + client = SpannerClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.SpannerGrpcTransport( @@ -7849,6 +8444,7 @@ def test_spanner_base_transport(): "rollback", "partition_query", "partition_read", + "batch_write", ) for method in methods: with pytest.raises(NotImplementedError): @@ -8161,6 +8757,9 @@ def test_spanner_client_transport_session_collision(transport_name): session1 = client1.transport.partition_read._session session2 = client2.transport.partition_read._session assert session1 != session2 + session1 = client1.transport.batch_write._session + session2 = client2.transport.batch_write._session + assert session1 != session2 def test_spanner_grpc_transport_channel(): diff --git a/tests/unit/spanner_dbapi/test_cursor.py b/tests/unit/spanner_dbapi/test_cursor.py index f744fc769f..46a093b109 100644 --- a/tests/unit/spanner_dbapi/test_cursor.py +++ b/tests/unit/spanner_dbapi/test_cursor.py @@ -20,7 +20,6 @@ class TestCursor(unittest.TestCase): - INSTANCE = "test-instance" DATABASE = "test-database" @@ -917,7 +916,6 @@ def test_fetchone_retry_aborted(self, mock_client): with mock.patch( "google.cloud.spanner_dbapi.connection.Connection.retry_transaction" ) as retry_mock: - cursor.fetchone() retry_mock.assert_called_with() @@ -948,7 +946,6 @@ def test_fetchone_retry_aborted_statements(self, mock_client): "google.cloud.spanner_dbapi.connection.Connection.run_statement", return_value=([row], ResultsChecksum()), ) as run_mock: - cursor.fetchone() run_mock.assert_called_with(statement, retried=True) @@ -982,7 +979,6 @@ def test_fetchone_retry_aborted_statements_checksums_mismatch(self, mock_client) "google.cloud.spanner_dbapi.connection.Connection.run_statement", return_value=([row2], ResultsChecksum()), ) as run_mock: - with self.assertRaises(RetryAborted): cursor.fetchone() @@ -1007,7 +1003,6 @@ def test_fetchall_retry_aborted(self, mock_client): with mock.patch( "google.cloud.spanner_dbapi.connection.Connection.retry_transaction" ) as retry_mock: - cursor.fetchall() retry_mock.assert_called_with() @@ -1071,7 +1066,6 @@ def test_fetchall_retry_aborted_statements_checksums_mismatch(self, mock_client) "google.cloud.spanner_dbapi.connection.Connection.run_statement", return_value=([row2], ResultsChecksum()), ) as run_mock: - with self.assertRaises(RetryAborted): cursor.fetchall() @@ -1096,7 +1090,6 @@ def test_fetchmany_retry_aborted(self, mock_client): with mock.patch( "google.cloud.spanner_dbapi.connection.Connection.retry_transaction" ) as retry_mock: - cursor.fetchmany() retry_mock.assert_called_with() @@ -1127,7 +1120,6 @@ def test_fetchmany_retry_aborted_statements(self, mock_client): "google.cloud.spanner_dbapi.connection.Connection.run_statement", return_value=([row], ResultsChecksum()), ) as run_mock: - cursor.fetchmany(len(row)) run_mock.assert_called_with(statement, retried=True) @@ -1161,7 +1153,6 @@ def test_fetchmany_retry_aborted_statements_checksums_mismatch(self, mock_client "google.cloud.spanner_dbapi.connection.Connection.run_statement", return_value=([row2], ResultsChecksum()), ) as run_mock: - with self.assertRaises(RetryAborted): cursor.fetchmany(len(row)) diff --git a/tests/unit/spanner_dbapi/test_parse_utils.py b/tests/unit/spanner_dbapi/test_parse_utils.py index ddd1d5572a..887f984c2c 100644 --- a/tests/unit/spanner_dbapi/test_parse_utils.py +++ b/tests/unit/spanner_dbapi/test_parse_utils.py @@ -20,7 +20,6 @@ class TestParseUtils(unittest.TestCase): - skip_condition = sys.version_info[0] < 3 skip_message = "Subtests are not supported in Python 2" @@ -112,7 +111,7 @@ def test_sql_pyformat_args_to_spanner(self): ("SELECT * from t WHERE id=10", {"f1": "app", "f2": "name"}), ), ] - for ((sql_in, params), sql_want) in cases: + for (sql_in, params), sql_want in cases: with self.subTest(sql=sql_in): got_sql, got_named_args = sql_pyformat_args_to_spanner(sql_in, params) want_sql, want_named_args = sql_want diff --git a/tests/unit/spanner_dbapi/test_parser.py b/tests/unit/spanner_dbapi/test_parser.py index dd99f6fa4b..25f51591c2 100644 --- a/tests/unit/spanner_dbapi/test_parser.py +++ b/tests/unit/spanner_dbapi/test_parser.py @@ -17,7 +17,6 @@ class TestParser(unittest.TestCase): - skip_condition = sys.version_info[0] < 3 skip_message = "Subtests are not supported in Python 2" diff --git a/tests/unit/spanner_dbapi/test_types.py b/tests/unit/spanner_dbapi/test_types.py index 8c9dbe6c2b..375dc31853 100644 --- a/tests/unit/spanner_dbapi/test_types.py +++ b/tests/unit/spanner_dbapi/test_types.py @@ -18,7 +18,6 @@ class TestTypes(unittest.TestCase): - TICKS = 1572822862.9782631 + timezone # Sun 03 Nov 2019 23:14:22 UTC def test__date_from_ticks(self): diff --git a/tests/unit/spanner_dbapi/test_utils.py b/tests/unit/spanner_dbapi/test_utils.py index 76c347d402..fadbca1a09 100644 --- a/tests/unit/spanner_dbapi/test_utils.py +++ b/tests/unit/spanner_dbapi/test_utils.py @@ -17,7 +17,6 @@ class TestUtils(unittest.TestCase): - skip_condition = sys.version_info[0] < 3 skip_message = "Subtests are not supported in Python 2" diff --git a/tests/unit/test_batch.py b/tests/unit/test_batch.py index 0199d44033..856816628f 100644 --- a/tests/unit/test_batch.py +++ b/tests/unit/test_batch.py @@ -32,7 +32,6 @@ class _BaseTest(unittest.TestCase): - PROJECT_ID = "project-id" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID @@ -426,7 +425,6 @@ class _Database(object): class _FauxSpannerAPI: - _create_instance_conflict = False _instance_not_found = False _committed = None diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index ed79271a96..049ee1124f 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -29,7 +29,6 @@ class _CredentialsWithScopes( class TestClient(unittest.TestCase): - PROJECT = "PROJECT" PATH = "projects/%s" % (PROJECT,) CONFIGURATION_NAME = "config-name" diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py index 5a6abf8084..bd368eed11 100644 --- a/tests/unit/test_database.py +++ b/tests/unit/test_database.py @@ -49,7 +49,6 @@ class _CredentialsWithScopes( class _BaseTest(unittest.TestCase): - PROJECT_ID = "project-id" PARENT = "projects/" + PROJECT_ID INSTANCE_ID = "instance-id" @@ -148,14 +147,12 @@ def test_ctor_w_route_to_leader_disbled(self): self.assertFalse(database._route_to_leader_enabled) def test_ctor_w_ddl_statements_non_string(self): - with self.assertRaises(ValueError): self._make_one( self.DATABASE_ID, instance=object(), ddl_statements=[object()] ) def test_ctor_w_ddl_statements_w_create_database(self): - with self.assertRaises(ValueError): self._make_one( self.DATABASE_ID, @@ -365,7 +362,6 @@ def test_default_leader(self): self.assertEqual(database.default_leader, default_leader) def test_spanner_api_property_w_scopeless_creds(self): - client = _Client() client_info = client._client_info = mock.Mock() client_options = client._client_options = mock.Mock() @@ -2744,7 +2740,6 @@ def put(self, session): class _Session(object): - _rows = () _created = False _transaction = None diff --git a/tests/unit/test_instance.py b/tests/unit/test_instance.py index 0a7dbccb81..20064e7e88 100644 --- a/tests/unit/test_instance.py +++ b/tests/unit/test_instance.py @@ -17,7 +17,6 @@ class TestInstance(unittest.TestCase): - PROJECT = "project" PARENT = "projects/" + PROJECT INSTANCE_ID = "instance-id" @@ -1031,7 +1030,6 @@ def __eq__(self, other): class _FauxInstanceAdminAPI(object): - _create_instance_conflict = False _instance_not_found = False _rpc_error = False diff --git a/tests/unit/test_keyset.py b/tests/unit/test_keyset.py index a7bad4070d..8fc743e075 100644 --- a/tests/unit/test_keyset.py +++ b/tests/unit/test_keyset.py @@ -205,7 +205,6 @@ def test_ctor_w_ranges(self): self.assertEqual(keyset.ranges, [range_1, range_2]) def test_ctor_w_all_and_keys(self): - with self.assertRaises(ValueError): self._make_one(all_=True, keys=[["key1"], ["key2"]]) diff --git a/tests/unit/test_pool.py b/tests/unit/test_pool.py index 58665634de..23ed3e7251 100644 --- a/tests/unit/test_pool.py +++ b/tests/unit/test_pool.py @@ -913,7 +913,6 @@ def _make_transaction(*args, **kw): @total_ordering class _Session(object): - _transaction = None def __init__(self, database, exists=True, transaction=None): @@ -1004,7 +1003,6 @@ def session(self, **kwargs): class _Queue(object): - _size = 1 def __init__(self, *items): @@ -1035,5 +1033,4 @@ def put_nowait(self, item, **kwargs): class _Pool(_Queue): - _database = None diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index 3125e33f21..0bb02ebdc7 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -37,7 +37,6 @@ def time(self): class TestSession(OpenTelemetryBase): - PROJECT_ID = "project-id" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID diff --git a/tests/unit/test_snapshot.py b/tests/unit/test_snapshot.py index 5d2afb4fe6..0010877396 100644 --- a/tests/unit/test_snapshot.py +++ b/tests/unit/test_snapshot.py @@ -56,7 +56,6 @@ def _getTargetClass(self): def _makeDerived(self, session): class _Derived(self._getTargetClass()): - _transaction_id = None _multi_use = False @@ -514,7 +513,6 @@ def test_iteration_w_multiple_span_creation(self): class Test_SnapshotBase(OpenTelemetryBase): - PROJECT_ID = "project-id" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID @@ -533,7 +531,6 @@ def _make_one(self, session): def _makeDerived(self, session): class _Derived(self._getTargetClass()): - _transaction_id = None _multi_use = False @@ -1358,7 +1355,6 @@ def test_partition_query_ok_w_timeout_and_retry_params(self): class TestSnapshot(OpenTelemetryBase): - PROJECT_ID = "project-id" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID diff --git a/tests/unit/test_spanner.py b/tests/unit/test_spanner.py index e4cd1e84cd..8c04e1142d 100644 --- a/tests/unit/test_spanner.py +++ b/tests/unit/test_spanner.py @@ -88,7 +88,6 @@ class TestTransaction(OpenTelemetryBase): - PROJECT_ID = "project-id" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID @@ -344,7 +343,6 @@ def _read_helper( self.assertEqual(result_set.stats, stats_pb) def _read_helper_expected_request(self, partition=None, begin=True, count=0): - if begin is True: expected_transaction = TransactionSelector( begin=TransactionOptions(read_write=TransactionOptions.ReadWrite()) @@ -939,7 +937,6 @@ def __init__(self): class _Session(object): - _transaction = None def __init__(self, database=None, name=TestTransaction.SESSION_NAME): diff --git a/tests/unit/test_streamed.py b/tests/unit/test_streamed.py index 2714ddfb45..85dcb40026 100644 --- a/tests/unit/test_streamed.py +++ b/tests/unit/test_streamed.py @@ -973,7 +973,6 @@ def test___iter___w_existing_rows_read(self): class _MockCancellableIterator(object): - cancel_calls = 0 def __init__(self, *values): @@ -987,7 +986,6 @@ def __next__(self): # pragma: NO COVER Py3k class TestStreamedResultSet_JSON_acceptance_tests(unittest.TestCase): - _json_tests = None def _getTargetClass(self): @@ -1006,7 +1004,7 @@ def _load_json_test(self, test_name): filename = os.path.join(dirname, "streaming-read-acceptance-test.json") raw = _parse_streaming_read_acceptance_tests(filename) tests = self.__class__._json_tests = {} - for (name, partial_result_sets, results) in raw: + for name, partial_result_sets, results in raw: tests[name] = partial_result_sets, results return self.__class__._json_tests[test_name] diff --git a/tests/unit/test_transaction.py b/tests/unit/test_transaction.py index 85359dac19..ffcffa115e 100644 --- a/tests/unit/test_transaction.py +++ b/tests/unit/test_transaction.py @@ -42,7 +42,6 @@ class TestTransaction(OpenTelemetryBase): - PROJECT_ID = "project-id" INSTANCE_ID = "instance-id" INSTANCE_NAME = "projects/" + PROJECT_ID + "/instances/" + INSTANCE_ID @@ -910,7 +909,6 @@ def __init__(self): class _Session(object): - _transaction = None def __init__(self, database=None, name=TestTransaction.SESSION_NAME): @@ -919,7 +917,6 @@ def __init__(self, database=None, name=TestTransaction.SESSION_NAME): class _FauxSpannerAPI(object): - _committed = None def __init__(self, **kwargs):