diff --git a/spanner/google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py b/spanner/google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py index 9811ead1f0c5..97c46e01ee52 100644 --- a/spanner/google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py +++ b/spanner/google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py @@ -169,9 +169,10 @@ def __init__(self, ) if client_info is None: - client_info = ( - google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO) - client_info.gapic_version = _GAPIC_LIBRARY_VERSION + client_info = google.api_core.gapic_v1.client_info.ClientInfo( + gapic_version=_GAPIC_LIBRARY_VERSION, ) + else: + client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC @@ -213,14 +214,14 @@ def list_databases(self, >>> # Alternatively: >>> >>> # Iterate over results one page at a time - >>> for page in client.list_databases(parent, options=CallOptions(page_token=INITIAL_PAGE)): + >>> for page in client.list_databases(parent).pages: ... for element in page: ... # process element ... pass Args: - parent (str): Required. The instance whose databases should be listed. - Values are of the form ``projects//instances/``. + parent (str): Required. The instance whose databases should be listed. Values are of + the form ``projects//instances/``. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page @@ -285,14 +286,12 @@ def create_database(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - Creates a new Cloud Spanner database and starts to prepare it for serving. - The returned ``long-running operation`` will - have a name of the format ``/operations/`` and - can be used to track preparation of the database. The - ``metadata`` field type is - ``CreateDatabaseMetadata``. The - ``response`` field type is - ``Database``, if successful. + Creates a new Cloud Spanner database and starts to prepare it for + serving. The returned ``long-running operation`` will have a name of the + format ``/operations/`` and can be used to + track preparation of the database. The ``metadata`` field type is + ``CreateDatabaseMetadata``. The ``response`` field type is ``Database``, + if successful. Example: >>> from google.cloud import spanner_admin_database_v1 @@ -301,7 +300,7 @@ def create_database(self, >>> >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') >>> - >>> # TODO: Initialize ``create_statement``: + >>> # TODO: Initialize `create_statement`: >>> create_statement = '' >>> >>> response = client.create_database(parent, create_statement) @@ -319,10 +318,10 @@ def create_database(self, parent (str): Required. The name of the instance that will serve the new database. Values are of the form ``projects//instances/``. create_statement (str): Required. A ``CREATE DATABASE`` statement, which specifies the ID of the - new database. The database ID must conform to the regular expression - ``[a-z][a-z0-9_\-]*[a-z0-9]`` and be between 2 and 30 characters in length. - If the database ID is a reserved word or if it contains a hyphen, the - database ID must be enclosed in backticks. + new database. The database ID must conform to the regular expression + ``[a-z][a-z0-9_\-]*[a-z0-9]`` and be between 2 and 30 characters in + length. If the database ID is a reserved word or if it contains a + hyphen, the database ID must be enclosed in backticks (`````). extra_statements (list[str]): An optional list of DDL statements to run inside the newly created database. Statements can create tables, indexes, etc. These statements execute atomically with the creation of the database: @@ -435,11 +434,10 @@ def update_database_ddl(self, """ Updates the schema of a Cloud Spanner database by creating/altering/dropping tables, columns, indexes, etc. The returned - ``long-running operation`` will have a name of - the format ``/operations/`` and can be used to - track execution of the schema change(s). The - ``metadata`` field type is - ``UpdateDatabaseDdlMetadata``. The operation has no response. + ``long-running operation`` will have a name of the format + ``/operations/`` and can be used to track + execution of the schema change(s). The ``metadata`` field type is + ``UpdateDatabaseDdlMetadata``. The operation has no response. Example: >>> from google.cloud import spanner_admin_database_v1 @@ -448,7 +446,7 @@ def update_database_ddl(self, >>> >>> database = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') >>> - >>> # TODO: Initialize ``statements``: + >>> # TODO: Initialize `statements`: >>> statements = [] >>> >>> response = client.update_database_ddl(database, statements) @@ -465,25 +463,21 @@ def update_database_ddl(self, Args: database (str): Required. The database to update. statements (list[str]): DDL statements to be applied to the database. - operation_id (str): If empty, the new update request is assigned an - automatically-generated operation ID. Otherwise, ``operation_id`` - is used to construct the name of the resulting - ``Operation``. - - Specifying an explicit operation ID simplifies determining - whether the statements were executed in the event that the - ``UpdateDatabaseDdl`` call is replayed, - or the return value is otherwise lost: the ``database`` and - ``operation_id`` fields can be combined to form the - ``name`` of the resulting - ``longrunning.Operation``: ``/operations/``. - - ``operation_id`` should be unique within the database, and must be - a valid identifier: ``[a-z][a-z0-9_]*``. Note that - automatically-generated operation IDs always begin with an - underscore. If the named operation already exists, - ``UpdateDatabaseDdl`` returns - ``ALREADY_EXISTS``. + operation_id (str): If empty, the new update request is assigned an automatically-generated + operation ID. Otherwise, ``operation_id`` is used to construct the name + of the resulting ``Operation``. + + Specifying an explicit operation ID simplifies determining whether the + statements were executed in the event that the ``UpdateDatabaseDdl`` + call is replayed, or the return value is otherwise lost: the + ``database`` and ``operation_id`` fields can be combined to form the + ``name`` of the resulting ``longrunning.Operation``: + ``/operations/``. + + ``operation_id`` should be unique within the database, and must be a + valid identifier: ``[a-z][a-z0-9_]*``. Note that automatically-generated + operation IDs always begin with an underscore. If the named operation + already exists, ``UpdateDatabaseDdl`` returns ``ALREADY_EXISTS``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -587,8 +581,8 @@ def get_database_ddl(self, metadata=None): """ Returns the schema of a Cloud Spanner database as a list of formatted - DDL statements. This method does not show pending schema updates, those may - be queried using the ``Operations`` API. + DDL statements. This method does not show pending schema updates, those + may be queried using the ``Operations`` API. Example: >>> from google.cloud import spanner_admin_database_v1 @@ -656,7 +650,7 @@ def set_iam_policy(self, >>> >>> resource = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') >>> - >>> # TODO: Initialize ``policy``: + >>> # TODO: Initialize `policy`: >>> policy = {} >>> >>> response = client.set_iam_policy(resource, policy) @@ -665,10 +659,11 @@ def set_iam_policy(self, resource (str): REQUIRED: The resource for which the policy is being specified. ``resource`` is usually specified as a path. For example, a Project resource is specified as ``projects/{project}``. - policy (Union[dict, ~google.cloud.spanner_admin_database_v1.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The size of - the policy is limited to a few 10s of KB. An empty policy is a + policy (Union[dict, ~google.cloud.spanner_admin_database_v1.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The + size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_admin_database_v1.types.Policy` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -774,12 +769,13 @@ def test_iam_permissions(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - Returns permissions that the caller has on the specified database resource. + Returns permissions that the caller has on the specified database + resource. - Attempting this RPC on a non-existent Cloud Spanner database will result in - a NOT_FOUND error if the user has ``spanner.databases.list`` permission on - the containing Cloud Spanner instance. Otherwise returns an empty set of - permissions. + Attempting this RPC on a non-existent Cloud Spanner database will result + in a NOT\_FOUND error if the user has ``spanner.databases.list`` + permission on the containing Cloud Spanner instance. Otherwise returns + an empty set of permissions. Example: >>> from google.cloud import spanner_admin_database_v1 @@ -788,7 +784,7 @@ def test_iam_permissions(self, >>> >>> resource = client.database_path('[PROJECT]', '[INSTANCE]', '[DATABASE]') >>> - >>> # TODO: Initialize ``permissions``: + >>> # TODO: Initialize `permissions`: >>> permissions = [] >>> >>> response = client.test_iam_permissions(resource, permissions) @@ -799,8 +795,8 @@ def test_iam_permissions(self, resource is specified as ``projects/{project}``. permissions (list[str]): The set of permissions to check for the ``resource``. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more - information see - `IAM Overview `_. + information see `IAM + Overview `__. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. diff --git a/spanner/google/cloud/spanner_admin_database_v1/gapic/transports/database_admin_grpc_transport.py b/spanner/google/cloud/spanner_admin_database_v1/gapic/transports/database_admin_grpc_transport.py index 956d1c5ca04a..76a3db8a77e3 100644 --- a/spanner/google/cloud/spanner_admin_database_v1/gapic/transports/database_admin_grpc_transport.py +++ b/spanner/google/cloud/spanner_admin_database_v1/gapic/transports/database_admin_grpc_transport.py @@ -119,14 +119,12 @@ def list_databases(self): def create_database(self): """Return the gRPC stub for {$apiMethod.name}. - Creates a new Cloud Spanner database and starts to prepare it for serving. - The returned ``long-running operation`` will - have a name of the format ``/operations/`` and - can be used to track preparation of the database. The - ``metadata`` field type is - ``CreateDatabaseMetadata``. The - ``response`` field type is - ``Database``, if successful. + Creates a new Cloud Spanner database and starts to prepare it for + serving. The returned ``long-running operation`` will have a name of the + format ``/operations/`` and can be used to + track preparation of the database. The ``metadata`` field type is + ``CreateDatabaseMetadata``. The ``response`` field type is ``Database``, + if successful. Returns: Callable: A callable which accepts the appropriate @@ -154,11 +152,10 @@ def update_database_ddl(self): Updates the schema of a Cloud Spanner database by creating/altering/dropping tables, columns, indexes, etc. The returned - ``long-running operation`` will have a name of - the format ``/operations/`` and can be used to - track execution of the schema change(s). The - ``metadata`` field type is - ``UpdateDatabaseDdlMetadata``. The operation has no response. + ``long-running operation`` will have a name of the format + ``/operations/`` and can be used to track + execution of the schema change(s). The ``metadata`` field type is + ``UpdateDatabaseDdlMetadata``. The operation has no response. Returns: Callable: A callable which accepts the appropriate @@ -185,8 +182,8 @@ def get_database_ddl(self): """Return the gRPC stub for {$apiMethod.name}. Returns the schema of a Cloud Spanner database as a list of formatted - DDL statements. This method does not show pending schema updates, those may - be queried using the ``Operations`` API. + DDL statements. This method does not show pending schema updates, those + may be queried using the ``Operations`` API. Returns: Callable: A callable which accepts the appropriate @@ -233,12 +230,13 @@ def get_iam_policy(self): def test_iam_permissions(self): """Return the gRPC stub for {$apiMethod.name}. - Returns permissions that the caller has on the specified database resource. + Returns permissions that the caller has on the specified database + resource. - Attempting this RPC on a non-existent Cloud Spanner database will result in - a NOT_FOUND error if the user has ``spanner.databases.list`` permission on - the containing Cloud Spanner instance. Otherwise returns an empty set of - permissions. + Attempting this RPC on a non-existent Cloud Spanner database will result + in a NOT\_FOUND error if the user has ``spanner.databases.list`` + permission on the containing Cloud Spanner instance. Otherwise returns + an empty set of permissions. Returns: Callable: A callable which accepts the appropriate diff --git a/spanner/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py b/spanner/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py index 0374e89566b7..24a3156e44e4 100644 --- a/spanner/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py +++ b/spanner/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py @@ -53,7 +53,7 @@ class InstanceAdminClient(object): modify and list instances. Instances are dedicated Cloud Spanner serving and storage resources to be used by Cloud Spanner databases. - Each instance has a \"configuration\", which dictates where the + Each instance has a "configuration", which dictates where the serving resources for the Cloud Spanner instance are located (e.g., US-central, Europe). Configurations are created by Google based on resource availability. @@ -193,9 +193,10 @@ def __init__(self, ) if client_info is None: - client_info = ( - google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO) - client_info.gapic_version = _GAPIC_LIBRARY_VERSION + client_info = google.api_core.gapic_v1.client_info.ClientInfo( + gapic_version=_GAPIC_LIBRARY_VERSION, ) + else: + client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC @@ -237,7 +238,7 @@ def list_instance_configs(self, >>> # Alternatively: >>> >>> # Iterate over results one page at a time - >>> for page in client.list_instance_configs(parent, options=CallOptions(page_token=INITIAL_PAGE)): + >>> for page in client.list_instance_configs(parent).pages: ... for element in page: ... # process element ... pass @@ -321,8 +322,8 @@ def get_instance_config(self, >>> response = client.get_instance_config(name) Args: - name (str): Required. The name of the requested instance configuration. Values are of - the form ``projects//instanceConfigs/``. + name (str): Required. The name of the requested instance configuration. Values are + of the form ``projects//instanceConfigs/``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -385,7 +386,7 @@ def list_instances(self, >>> # Alternatively: >>> >>> # Iterate over results one page at a time - >>> for page in client.list_instances(parent, options=CallOptions(page_token=INITIAL_PAGE)): + >>> for page in client.list_instances(parent).pages: ... for element in page: ... # process element ... pass @@ -401,22 +402,21 @@ def list_instances(self, filter_ (str): An expression for filtering the results of the request. Filter rules are case insensitive. The fields eligible for filtering are: - * ``name`` - * ``display_name`` - * ``labels.key`` where key is the name of a label + - ``name`` + - ``display_name`` + - ``labels.key`` where key is the name of a label Some examples of using filters are: - * ``name:*`` --> The instance has a name. - * ``name:Howl`` --> The instance's name contains the string \"howl\". - * ``name:HOWL`` --> Equivalent to above. - * ``NAME:howl`` --> Equivalent to above. - * ``labels.env:*`` --> The instance has the label \"env\". - * ``labels.env:dev`` --> The instance has the label \"env\" - and the value of the label contains the string \"dev\". - * ``name:howl labels.env:dev`` --> The instance's name - contains \"howl\" and it has the label \"env\" with - its value containing \"dev\". + - ``name:*`` --> The instance has a name. + - ``name:Howl`` --> The instance's name contains the string "howl". + - ``name:HOWL`` --> Equivalent to above. + - ``NAME:howl`` --> Equivalent to above. + - ``labels.env:*`` --> The instance has the label "env". + - ``labels.env:dev`` --> The instance has the label "env" and the value + of the label contains the string "dev". + - ``name:howl labels.env:dev`` --> The instance's name contains "howl" + and it has the label "env" with its value containing "dev". retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -532,39 +532,36 @@ def create_instance(self, metadata=None): """ Creates an instance and begins preparing it to begin serving. The - returned ``long-running operation`` - can be used to track the progress of preparing the new - instance. The instance name is assigned by the caller. If the - named instance already exists, ``CreateInstance`` returns + returned ``long-running operation`` can be used to track the progress of + preparing the new instance. The instance name is assigned by the caller. + If the named instance already exists, ``CreateInstance`` returns ``ALREADY_EXISTS``. Immediately upon completion of this request: - * The instance is readable via the API, with all requested attributes - but no allocated resources. Its state is `CREATING`. + - The instance is readable via the API, with all requested attributes + but no allocated resources. Its state is ``CREATING``. Until completion of the returned operation: - * Cancelling the operation renders the instance immediately unreadable - via the API. - * The instance can be deleted. - * All other attempts to modify the instance are rejected. + - Cancelling the operation renders the instance immediately unreadable + via the API. + - The instance can be deleted. + - All other attempts to modify the instance are rejected. Upon completion of the returned operation: - * Billing for all successfully-allocated resources begins (some types - may have lower than the requested levels). - * Databases can be created in the instance. - * The instance's allocated resource levels are readable via the API. - * The instance's state becomes ``READY``. + - Billing for all successfully-allocated resources begins (some types + may have lower than the requested levels). + - Databases can be created in the instance. + - The instance's allocated resource levels are readable via the API. + - The instance's state becomes ``READY``. - The returned ``long-running operation`` will - have a name of the format ``/operations/`` and - can be used to track creation of the instance. The - ``metadata`` field type is - ``CreateInstanceMetadata``. - The ``response`` field type is - ``Instance``, if successful. + The returned ``long-running operation`` will have a name of the format + ``/operations/`` and can be used to track + creation of the instance. The ``metadata`` field type is + ``CreateInstanceMetadata``. The ``response`` field type is ``Instance``, + if successful. Example: >>> from google.cloud import spanner_admin_instance_v1 @@ -573,10 +570,10 @@ def create_instance(self, >>> >>> parent = client.project_path('[PROJECT]') >>> - >>> # TODO: Initialize ``instance_id``: + >>> # TODO: Initialize `instance_id`: >>> instance_id = '' >>> - >>> # TODO: Initialize ``instance``: + >>> # TODO: Initialize `instance`: >>> instance = {} >>> >>> response = client.create_instance(parent, instance_id, instance) @@ -591,13 +588,14 @@ def create_instance(self, >>> metadata = response.metadata() Args: - parent (str): Required. The name of the project in which to create the instance. Values - are of the form ``projects/``. - instance_id (str): Required. The ID of the instance to create. Valid identifiers are of the - form ``[a-z][-a-z0-9]*[a-z0-9]`` and must be between 6 and 30 characters in - length. - instance (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.Instance]): Required. The instance to create. The name may be omitted, but if + parent (str): Required. The name of the project in which to create the instance. + Values are of the form ``projects/``. + instance_id (str): Required. The ID of the instance to create. Valid identifiers are of the + form ``[a-z][-a-z0-9]*[a-z0-9]`` and must be between 6 and 30 characters + in length. + instance (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.Instance]): Required. The instance to create. The name may be omitted, but if specified must be ``/instances/``. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_admin_instance_v1.types.Instance` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -651,43 +649,39 @@ def update_instance(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - Updates an instance, and begins allocating or releasing resources - as requested. The returned [long-running - operation][google.longrunning.Operation] can be used to track the - progress of updating the instance. If the named instance does not + Updates an instance, and begins allocating or releasing resources as + requested. The returned ``long-running operation`` can be used to track + the progress of updating the instance. If the named instance does not exist, returns ``NOT_FOUND``. Immediately upon completion of this request: - * For resource types for which a decrease in the instance's allocation - has been requested, billing is based on the newly-requested level. + - For resource types for which a decrease in the instance's allocation + has been requested, billing is based on the newly-requested level. Until completion of the returned operation: - * Cancelling the operation sets its metadata's - [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], - and begins restoring resources to their pre-request values. - The operation is guaranteed to succeed at undoing all resource - changes, after which point it terminates with a `CANCELLED` status. - * All other attempts to modify the instance are rejected. - * Reading the instance via the API continues to give the pre-request - resource levels. + - Cancelling the operation sets its metadata's ``cancel_time``, and + begins restoring resources to their pre-request values. The operation + is guaranteed to succeed at undoing all resource changes, after which + point it terminates with a ``CANCELLED`` status. + - All other attempts to modify the instance are rejected. + - Reading the instance via the API continues to give the pre-request + resource levels. Upon completion of the returned operation: - * Billing begins for all successfully-allocated resources (some types - may have lower than the requested levels). - * All newly-reserved resources are available for serving the instance's - tables. - * The instance's new resource levels are readable via the API. + - Billing begins for all successfully-allocated resources (some types + may have lower than the requested levels). + - All newly-reserved resources are available for serving the instance's + tables. + - The instance's new resource levels are readable via the API. - The returned ``long-running operation`` will - have a name of the format ``/operations/`` and - can be used to track the instance modification. The - ``metadata`` field type is - ``UpdateInstanceMetadata``. - The ``response`` field type is - ``Instance``, if successful. + The returned ``long-running operation`` will have a name of the format + ``/operations/`` and can be used to track + the instance modification. The ``metadata`` field type is + ``UpdateInstanceMetadata``. The ``response`` field type is ``Instance``, + if successful. Authorization requires ``spanner.instances.update`` permission on resource ``name``. @@ -697,10 +691,10 @@ def update_instance(self, >>> >>> client = spanner_admin_instance_v1.InstanceAdminClient() >>> - >>> # TODO: Initialize ``instance``: + >>> # TODO: Initialize `instance`: >>> instance = {} >>> - >>> # TODO: Initialize ``field_mask``: + >>> # TODO: Initialize `field_mask`: >>> field_mask = {} >>> >>> response = client.update_instance(instance, field_mask) @@ -716,13 +710,19 @@ def update_instance(self, Args: instance (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.Instance]): Required. The instance to update, which must always include the instance - name. Otherwise, only fields mentioned in [][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask] need be included. + name. Otherwise, only fields mentioned in + [][google.spanner.admin.instance.v1.UpdateInstanceRequest.field\_mask] + need be included. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_admin_instance_v1.types.Instance` - field_mask (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.FieldMask]): Required. A mask specifying which fields in [][google.spanner.admin.instance.v1.UpdateInstanceRequest.instance] should be updated. - The field mask must always be specified; this prevents any future fields in - [][google.spanner.admin.instance.v1.Instance] from being erased accidentally by clients that do not know - about them. + field_mask (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.FieldMask]): Required. A mask specifying which fields in + [][google.spanner.admin.instance.v1.UpdateInstanceRequest.instance] + should be updated. The field mask must always be specified; this + prevents any future fields in + [][google.spanner.admin.instance.v1.Instance] from being erased + accidentally by clients that do not know about them. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_admin_instance_v1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -778,13 +778,13 @@ def delete_instance(self, Immediately upon completion of the request: - * Billing ceases for all of the instance's reserved resources. + - Billing ceases for all of the instance's reserved resources. Soon afterward: - * The instance and *all of its databases* immediately and - irrevocably disappear from the API. All data in the databases - is permanently deleted. + - The instance and *all of its databases* immediately and irrevocably + disappear from the API. All data in the databases is permanently + deleted. Example: >>> from google.cloud import spanner_admin_instance_v1 @@ -849,7 +849,7 @@ def set_iam_policy(self, >>> >>> resource = client.instance_path('[PROJECT]', '[INSTANCE]') >>> - >>> # TODO: Initialize ``policy``: + >>> # TODO: Initialize `policy`: >>> policy = {} >>> >>> response = client.set_iam_policy(resource, policy) @@ -858,10 +858,11 @@ def set_iam_policy(self, resource (str): REQUIRED: The resource for which the policy is being specified. ``resource`` is usually specified as a path. For example, a Project resource is specified as ``projects/{project}``. - policy (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The size of - the policy is limited to a few 10s of KB. An empty policy is a + policy (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The + size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_admin_instance_v1.types.Policy` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -907,8 +908,8 @@ def get_iam_policy(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - Gets the access control policy for an instance resource. Returns an empty - policy if an instance exists but does not have a policy set. + Gets the access control policy for an instance resource. Returns an + empty policy if an instance exists but does not have a policy set. Authorization requires ``spanner.instances.getIamPolicy`` on ``resource``. @@ -967,12 +968,13 @@ def test_iam_permissions(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - Returns permissions that the caller has on the specified instance resource. + Returns permissions that the caller has on the specified instance + resource. - Attempting this RPC on a non-existent Cloud Spanner instance resource will - result in a NOT_FOUND error if the user has ``spanner.instances.list`` - permission on the containing Google Cloud Project. Otherwise returns an - empty set of permissions. + Attempting this RPC on a non-existent Cloud Spanner instance resource + will result in a NOT\_FOUND error if the user has + ``spanner.instances.list`` permission on the containing Google Cloud + Project. Otherwise returns an empty set of permissions. Example: >>> from google.cloud import spanner_admin_instance_v1 @@ -981,7 +983,7 @@ def test_iam_permissions(self, >>> >>> resource = client.instance_path('[PROJECT]', '[INSTANCE]') >>> - >>> # TODO: Initialize ``permissions``: + >>> # TODO: Initialize `permissions`: >>> permissions = [] >>> >>> response = client.test_iam_permissions(resource, permissions) @@ -992,8 +994,8 @@ def test_iam_permissions(self, resource is specified as ``projects/{project}``. permissions (list[str]): The set of permissions to check for the ``resource``. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more - information see - `IAM Overview `_. + information see `IAM + Overview `__. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. diff --git a/spanner/google/cloud/spanner_admin_instance_v1/gapic/transports/instance_admin_grpc_transport.py b/spanner/google/cloud/spanner_admin_instance_v1/gapic/transports/instance_admin_grpc_transport.py index 5f84d91d8453..2bc915d05239 100644 --- a/spanner/google/cloud/spanner_admin_instance_v1/gapic/transports/instance_admin_grpc_transport.py +++ b/spanner/google/cloud/spanner_admin_instance_v1/gapic/transports/instance_admin_grpc_transport.py @@ -159,39 +159,36 @@ def create_instance(self): """Return the gRPC stub for {$apiMethod.name}. Creates an instance and begins preparing it to begin serving. The - returned ``long-running operation`` - can be used to track the progress of preparing the new - instance. The instance name is assigned by the caller. If the - named instance already exists, ``CreateInstance`` returns + returned ``long-running operation`` can be used to track the progress of + preparing the new instance. The instance name is assigned by the caller. + If the named instance already exists, ``CreateInstance`` returns ``ALREADY_EXISTS``. Immediately upon completion of this request: - * The instance is readable via the API, with all requested attributes - but no allocated resources. Its state is `CREATING`. + - The instance is readable via the API, with all requested attributes + but no allocated resources. Its state is ``CREATING``. Until completion of the returned operation: - * Cancelling the operation renders the instance immediately unreadable - via the API. - * The instance can be deleted. - * All other attempts to modify the instance are rejected. + - Cancelling the operation renders the instance immediately unreadable + via the API. + - The instance can be deleted. + - All other attempts to modify the instance are rejected. Upon completion of the returned operation: - * Billing for all successfully-allocated resources begins (some types - may have lower than the requested levels). - * Databases can be created in the instance. - * The instance's allocated resource levels are readable via the API. - * The instance's state becomes ``READY``. + - Billing for all successfully-allocated resources begins (some types + may have lower than the requested levels). + - Databases can be created in the instance. + - The instance's allocated resource levels are readable via the API. + - The instance's state becomes ``READY``. - The returned ``long-running operation`` will - have a name of the format ``/operations/`` and - can be used to track creation of the instance. The - ``metadata`` field type is - ``CreateInstanceMetadata``. - The ``response`` field type is - ``Instance``, if successful. + The returned ``long-running operation`` will have a name of the format + ``/operations/`` and can be used to track + creation of the instance. The ``metadata`` field type is + ``CreateInstanceMetadata``. The ``response`` field type is ``Instance``, + if successful. Returns: Callable: A callable which accepts the appropriate @@ -204,43 +201,39 @@ def create_instance(self): def update_instance(self): """Return the gRPC stub for {$apiMethod.name}. - Updates an instance, and begins allocating or releasing resources - as requested. The returned [long-running - operation][google.longrunning.Operation] can be used to track the - progress of updating the instance. If the named instance does not + Updates an instance, and begins allocating or releasing resources as + requested. The returned ``long-running operation`` can be used to track + the progress of updating the instance. If the named instance does not exist, returns ``NOT_FOUND``. Immediately upon completion of this request: - * For resource types for which a decrease in the instance's allocation - has been requested, billing is based on the newly-requested level. + - For resource types for which a decrease in the instance's allocation + has been requested, billing is based on the newly-requested level. Until completion of the returned operation: - * Cancelling the operation sets its metadata's - [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins - restoring resources to their pre-request values. The operation - is guaranteed to succeed at undoing all resource changes, - after which point it terminates with a `CANCELLED` status. - * All other attempts to modify the instance are rejected. - * Reading the instance via the API continues to give the pre-request - resource levels. + - Cancelling the operation sets its metadata's ``cancel_time``, and + begins restoring resources to their pre-request values. The operation + is guaranteed to succeed at undoing all resource changes, after which + point it terminates with a ``CANCELLED`` status. + - All other attempts to modify the instance are rejected. + - Reading the instance via the API continues to give the pre-request + resource levels. Upon completion of the returned operation: - * Billing begins for all successfully-allocated resources (some types - may have lower than the requested levels). - * All newly-reserved resources are available for serving the instance's - tables. - * The instance's new resource levels are readable via the API. + - Billing begins for all successfully-allocated resources (some types + may have lower than the requested levels). + - All newly-reserved resources are available for serving the instance's + tables. + - The instance's new resource levels are readable via the API. - The returned ``long-running operation`` will - have a name of the format ``/operations/`` and - can be used to track the instance modification. The - ``metadata`` field type is - ``UpdateInstanceMetadata``. - The ``response`` field type is - ``Instance``, if successful. + The returned ``long-running operation`` will have a name of the format + ``/operations/`` and can be used to track + the instance modification. The ``metadata`` field type is + ``UpdateInstanceMetadata``. The ``response`` field type is ``Instance``, + if successful. Authorization requires ``spanner.instances.update`` permission on resource ``name``. @@ -260,13 +253,13 @@ def delete_instance(self): Immediately upon completion of the request: - * Billing ceases for all of the instance's reserved resources. + - Billing ceases for all of the instance's reserved resources. Soon afterward: - * The instance and *all of its databases* immediately and - irrevocably disappear from the API. All data in the databases - is permanently deleted. + - The instance and *all of its databases* immediately and irrevocably + disappear from the API. All data in the databases is permanently + deleted. Returns: Callable: A callable which accepts the appropriate @@ -296,8 +289,8 @@ def set_iam_policy(self): def get_iam_policy(self): """Return the gRPC stub for {$apiMethod.name}. - Gets the access control policy for an instance resource. Returns an empty - policy if an instance exists but does not have a policy set. + Gets the access control policy for an instance resource. Returns an + empty policy if an instance exists but does not have a policy set. Authorization requires ``spanner.instances.getIamPolicy`` on ``resource``. @@ -313,12 +306,13 @@ def get_iam_policy(self): def test_iam_permissions(self): """Return the gRPC stub for {$apiMethod.name}. - Returns permissions that the caller has on the specified instance resource. + Returns permissions that the caller has on the specified instance + resource. - Attempting this RPC on a non-existent Cloud Spanner instance resource will - result in a NOT_FOUND error if the user has ``spanner.instances.list`` - permission on the containing Google Cloud Project. Otherwise returns an - empty set of permissions. + Attempting this RPC on a non-existent Cloud Spanner instance resource + will result in a NOT\_FOUND error if the user has + ``spanner.instances.list`` permission on the containing Google Cloud + Project. Otherwise returns an empty set of permissions. Returns: Callable: A callable which accepts the appropriate diff --git a/spanner/google/cloud/spanner_v1/gapic/enums.py b/spanner/google/cloud/spanner_v1/gapic/enums.py index 3a7d0b7b58a1..9d4d8ea399dc 100644 --- a/spanner/google/cloud/spanner_v1/gapic/enums.py +++ b/spanner/google/cloud/spanner_v1/gapic/enums.py @@ -20,10 +20,10 @@ class NullValue(enum.IntEnum): """ - ``NullValue`` is a singleton enumeration to represent the null value for the - ``Value`` type union. + ``NullValue`` is a singleton enumeration to represent the null value for + the ``Value`` type union. - The JSON representation for ``NullValue`` is JSON ``null``. + The JSON representation for ``NullValue`` is JSON ``null``. Attributes: NULL_VALUE (int): Null value. @@ -33,36 +33,35 @@ class NullValue(enum.IntEnum): class TypeCode(enum.IntEnum): """ - ``TypeCode`` is used as part of ``Type`` to - indicate the type of a Cloud Spanner value. + ``TypeCode`` is used as part of ``Type`` to indicate the type of a Cloud + Spanner value. Each legal value of a type can be encoded to or decoded from a JSON value, using the encodings described below. All Cloud Spanner values can - be ``null``, regardless of type; ``null``s are always encoded as a JSON - ``null``. + be ``null``, regardless of type; ``null``\ s are always encoded as a + JSON ``null``. Attributes: TYPE_CODE_UNSPECIFIED (int): Not specified. BOOL (int): Encoded as JSON ``true`` or ``false``. INT64 (int): Encoded as ``string``, in decimal format. - FLOAT64 (int): Encoded as ``number``, or the strings ``\"NaN\"``, ``\"Infinity\"``, or - ``\"-Infinity\"``. - TIMESTAMP (int): Encoded as ``string`` in RFC 3339 timestamp format. The time zone - must be present, and must be ``\"Z\"``. - - If the schema has the column option - ``allow_commit_timestamp=true``, the placeholder string - ``\"spanner.commit_timestamp()\"`` can be used to instruct the system - to insert the commit timestamp associated with the transaction - commit. + FLOAT64 (int): Encoded as ``number``, or the strings ``"NaN"``, ``"Infinity"``, or + ``"-Infinity"``. + TIMESTAMP (int): Encoded as ``string`` in RFC 3339 timestamp format. The time zone must + be present, and must be ``"Z"``. + + If the schema has the column option ``allow_commit_timestamp=true``, the + placeholder string ``"spanner.commit_timestamp()"`` can be used to + instruct the system to insert the commit timestamp associated with the + transaction commit. DATE (int): Encoded as ``string`` in RFC 3339 date format. STRING (int): Encoded as ``string``. BYTES (int): Encoded as a base64-encoded ``string``, as described in RFC 4648, section 4. - ARRAY (int): Encoded as ``list``, where the list elements are represented - according to ``array_element_type``. + ARRAY (int): Encoded as ``list``, where the list elements are represented according + to ``array_element_type``. STRUCT (int): Encoded as ``list``, where list element ``i`` is represented according - to [struct_type.fields[i]][google.spanner.v1.StructType.fields]. + to [struct\_type.fields[i]][google.spanner.v1.StructType.fields]. """ TYPE_CODE_UNSPECIFIED = 0 BOOL = 1 @@ -79,8 +78,8 @@ class TypeCode(enum.IntEnum): class PlanNode(object): class Kind(enum.IntEnum): """ - The kind of ``PlanNode``. Distinguishes between the two different kinds of - nodes that can appear in a query plan. + The kind of ``PlanNode``. Distinguishes between the two different kinds + of nodes that can appear in a query plan. Attributes: KIND_UNSPECIFIED (int): Not specified. diff --git a/spanner/google/cloud/spanner_v1/gapic/spanner_client.py b/spanner/google/cloud/spanner_v1/gapic/spanner_client.py index be7dc587d7be..3c64c4f72175 100644 --- a/spanner/google/cloud/spanner_v1/gapic/spanner_client.py +++ b/spanner/google/cloud/spanner_v1/gapic/spanner_client.py @@ -170,9 +170,10 @@ def __init__(self, ) if client_info is None: - client_info = ( - google.api_core.gapic_v1.client_info.DEFAULT_CLIENT_INFO) - client_info.gapic_version = _GAPIC_LIBRARY_VERSION + client_info = google.api_core.gapic_v1.client_info.ClientInfo( + gapic_version=_GAPIC_LIBRARY_VERSION, ) + else: + client_info.gapic_version = _GAPIC_LIBRARY_VERSION self._client_info = client_info # Parse out the default settings for retry and timeout for each RPC @@ -196,25 +197,23 @@ def create_session(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - Creates a new session. A session can be used to perform - transactions that read and/or modify data in a Cloud Spanner database. - Sessions are meant to be reused for many consecutive - transactions. + Creates a new session. A session can be used to perform transactions + that read and/or modify data in a Cloud Spanner database. Sessions are + meant to be reused for many consecutive transactions. - Sessions can only execute one transaction at a time. To execute - multiple concurrent read-write/write-only transactions, create - multiple sessions. Note that standalone reads and queries use a - transaction internally, and count toward the one transaction - limit. + Sessions can only execute one transaction at a time. To execute multiple + concurrent read-write/write-only transactions, create multiple sessions. + Note that standalone reads and queries use a transaction internally, and + count toward the one transaction limit. Cloud Spanner limits the number of sessions that can exist at any given time; thus, it is a good idea to delete idle and/or unneeded sessions. - Aside from explicit deletes, Cloud Spanner can delete sessions for which no - operations are sent for more than an hour. If a session is deleted, + Aside from explicit deletes, Cloud Spanner can delete sessions for which + no operations are sent for more than an hour. If a session is deleted, requests to it return ``NOT_FOUND``. Idle sessions can be kept alive by sending a trivial SQL query - periodically, e.g., ``\"SELECT 1\"``. + periodically, e.g., ``"SELECT 1"``. Example: >>> from google.cloud import spanner_v1 @@ -228,6 +227,7 @@ def create_session(self, Args: database (str): Required. The database in which the new session is created. session (Union[dict, ~google.cloud.spanner_v1.types.Session]): The session to create. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.Session` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -274,8 +274,7 @@ def get_session(self, metadata=None): """ Gets a session. Returns ``NOT_FOUND`` if the session does not exist. - This is mainly useful for determining whether a session is still - alive. + This is mainly useful for determining whether a session is still alive. Example: >>> from google.cloud import spanner_v1 @@ -347,7 +346,7 @@ def list_sessions(self, >>> # Alternatively: >>> >>> # Iterate over results one page at a time - >>> for page in client.list_sessions(database, options=CallOptions(page_token=INITIAL_PAGE)): + >>> for page in client.list_sessions(database).pages: ... for element in page: ... # process element ... pass @@ -362,15 +361,13 @@ def list_sessions(self, filter_ (str): An expression for filtering the results of the request. Filter rules are case insensitive. The fields eligible for filtering are: - * ``labels.key`` where key is the name of a label + - ``labels.key`` where key is the name of a label Some examples of using filters are: - * ``labels.env:*`` --> The session has the label \"env\". - * ``labels.env:dev`` --> The session has the label \"env\" and the value of - :: - - the label contains the string \"dev\". + - ``labels.env:*`` --> The session has the label "env". + - ``labels.env:dev`` --> The session has the label "env" and the value + of the label contains the string "dev". retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -488,13 +485,13 @@ def execute_sql(self, metadata=None): """ Executes an SQL statement, returning all results in a single reply. This - method cannot be used to return a result set larger than 10 MiB; - if the query yields more data than that, the query fails with - a ``FAILED_PRECONDITION`` error. + method cannot be used to return a result set larger than 10 MiB; if the + query yields more data than that, the query fails with a + ``FAILED_PRECONDITION`` error. Operations inside read-write transactions might return ``ABORTED``. If - this occurs, the application should restart the transaction from - the beginning. See ``Transaction`` for more details. + this occurs, the application should restart the transaction from the + beginning. See ``Transaction`` for more details. Larger result sets can be fetched in streaming fashion by calling ``ExecuteStreamingSql`` instead. @@ -506,7 +503,7 @@ def execute_sql(self, >>> >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') >>> - >>> # TODO: Initialize ``sql``: + >>> # TODO: Initialize `sql`: >>> sql = '' >>> >>> response = client.execute_sql(session, sql) @@ -527,47 +524,49 @@ def execute_sql(self, either supply an existing transaction ID or begin a new transaction. Partitioned DML requires an existing PartitionedDml transaction ID. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.TransactionSelector` params (Union[dict, ~google.cloud.spanner_v1.types.Struct]): The SQL string can contain parameter placeholders. A parameter - placeholder consists of ``'@'`` followed by the parameter - name. Parameter names consist of any combination of letters, - numbers, and underscores. + placeholder consists of ``'@'`` followed by the parameter name. + Parameter names consist of any combination of letters, numbers, and + underscores. - Parameters can appear anywhere that a literal value is expected. The same - parameter name can be used more than once, for example: - ``\"WHERE id > @msg_id AND id < @msg_id + 100\"`` + Parameters can appear anywhere that a literal value is expected. The + same parameter name can be used more than once, for example: + ``"WHERE id > @msg_id AND id < @msg_id + 100"`` It is an error to execute an SQL statement with unbound parameters. - Parameter values are specified using ``params``, which is a JSON - object whose keys are parameter names, and whose values are the - corresponding parameter values. + Parameter values are specified using ``params``, which is a JSON object + whose keys are parameter names, and whose values are the corresponding + parameter values. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.Struct` param_types (dict[str -> Union[dict, ~google.cloud.spanner_v1.types.Type]]): It is not always possible for Cloud Spanner to infer the right SQL type - from a JSON value. For example, values of type ``BYTES`` and values - of type ``STRING`` both appear in ``params`` as JSON strings. + from a JSON value. For example, values of type ``BYTES`` and values of + type ``STRING`` both appear in ``params`` as JSON strings. + + In these cases, ``param_types`` can be used to specify the exact SQL + type for some or all of the SQL statement parameters. See the definition + of ``Type`` for more information about SQL types. - In these cases, ``param_types`` can be used to specify the exact - SQL type for some or all of the SQL statement parameters. See the - definition of ``Type`` for more information - about SQL types. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.Type` resume_token (bytes): If this request is resuming a previously interrupted SQL statement execution, ``resume_token`` should be copied from the last - ``PartialResultSet`` yielded before the interruption. Doing this - enables the new SQL statement execution to resume where the last one left - off. The rest of the request parameters must exactly match the - request that yielded this token. + ``PartialResultSet`` yielded before the interruption. Doing this enables + the new SQL statement execution to resume where the last one left off. + The rest of the request parameters must exactly match the request that + yielded this token. query_mode (~google.cloud.spanner_v1.types.QueryMode): Used to control the amount of debugging information returned in - ``ResultSetStats``. If ``partition_token`` is set, ``query_mode`` can only - be set to ``QueryMode.NORMAL``. + ``ResultSetStats``. If ``partition_token`` is set, ``query_mode`` can + only be set to ``QueryMode.NORMAL``. partition_token (bytes): If present, results will be restricted to the specified partition - previously created using PartitionQuery(). There must be an exact - match for the values of fields common to this message and the - PartitionQueryRequest message used to create this partition_token. + previously created using PartitionQuery(). There must be an exact match + for the values of fields common to this message and the + PartitionQueryRequest message used to create this partition\_token. seqno (long): A per-transaction sequence number used to identify this request. This makes each request idempotent such that if the request is received multiple times, at most one will succeed. @@ -635,11 +634,10 @@ def execute_streaming_sql(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - Like ``ExecuteSql``, except returns the result - set as a stream. Unlike ``ExecuteSql``, there - is no limit on the size of the returned result set. However, no - individual row in the result set can exceed 100 MiB, and no - column value can exceed 10 MiB. + Like ``ExecuteSql``, except returns the result set as a stream. Unlike + ``ExecuteSql``, there is no limit on the size of the returned result + set. However, no individual row in the result set can exceed 100 MiB, + and no column value can exceed 10 MiB. Example: >>> from google.cloud import spanner_v1 @@ -648,7 +646,7 @@ def execute_streaming_sql(self, >>> >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') >>> - >>> # TODO: Initialize ``sql``: + >>> # TODO: Initialize `sql`: >>> sql = '' >>> >>> for element in client.execute_streaming_sql(session, sql): @@ -671,47 +669,49 @@ def execute_streaming_sql(self, either supply an existing transaction ID or begin a new transaction. Partitioned DML requires an existing PartitionedDml transaction ID. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.TransactionSelector` params (Union[dict, ~google.cloud.spanner_v1.types.Struct]): The SQL string can contain parameter placeholders. A parameter - placeholder consists of ``'@'`` followed by the parameter - name. Parameter names consist of any combination of letters, - numbers, and underscores. + placeholder consists of ``'@'`` followed by the parameter name. + Parameter names consist of any combination of letters, numbers, and + underscores. - Parameters can appear anywhere that a literal value is expected. The same - parameter name can be used more than once, for example: - ``\"WHERE id > @msg_id AND id < @msg_id + 100\"`` + Parameters can appear anywhere that a literal value is expected. The + same parameter name can be used more than once, for example: + ``"WHERE id > @msg_id AND id < @msg_id + 100"`` It is an error to execute an SQL statement with unbound parameters. - Parameter values are specified using ``params``, which is a JSON - object whose keys are parameter names, and whose values are the - corresponding parameter values. + Parameter values are specified using ``params``, which is a JSON object + whose keys are parameter names, and whose values are the corresponding + parameter values. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.Struct` param_types (dict[str -> Union[dict, ~google.cloud.spanner_v1.types.Type]]): It is not always possible for Cloud Spanner to infer the right SQL type - from a JSON value. For example, values of type ``BYTES`` and values - of type ``STRING`` both appear in ``params`` as JSON strings. + from a JSON value. For example, values of type ``BYTES`` and values of + type ``STRING`` both appear in ``params`` as JSON strings. + + In these cases, ``param_types`` can be used to specify the exact SQL + type for some or all of the SQL statement parameters. See the definition + of ``Type`` for more information about SQL types. - In these cases, ``param_types`` can be used to specify the exact - SQL type for some or all of the SQL statement parameters. See the - definition of ``Type`` for more information - about SQL types. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.Type` resume_token (bytes): If this request is resuming a previously interrupted SQL statement execution, ``resume_token`` should be copied from the last - ``PartialResultSet`` yielded before the interruption. Doing this - enables the new SQL statement execution to resume where the last one left - off. The rest of the request parameters must exactly match the - request that yielded this token. + ``PartialResultSet`` yielded before the interruption. Doing this enables + the new SQL statement execution to resume where the last one left off. + The rest of the request parameters must exactly match the request that + yielded this token. query_mode (~google.cloud.spanner_v1.types.QueryMode): Used to control the amount of debugging information returned in - ``ResultSetStats``. If ``partition_token`` is set, ``query_mode`` can only - be set to ``QueryMode.NORMAL``. + ``ResultSetStats``. If ``partition_token`` is set, ``query_mode`` can + only be set to ``QueryMode.NORMAL``. partition_token (bytes): If present, results will be restricted to the specified partition - previously created using PartitionQuery(). There must be an exact - match for the values of fields common to this message and the - PartitionQueryRequest message used to create this partition_token. + previously created using PartitionQuery(). There must be an exact match + for the values of fields common to this message and the + PartitionQueryRequest message used to create this partition\_token. seqno (long): A per-transaction sequence number used to identify this request. This makes each request idempotent such that if the request is received multiple times, at most one will succeed. @@ -781,16 +781,14 @@ def read(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - Reads rows from the database using key lookups and scans, as a - simple key/value style alternative to - ``ExecuteSql``. This method cannot be used to - return a result set larger than 10 MiB; if the read matches more - data than that, the read fails with a ``FAILED_PRECONDITION`` - error. + Reads rows from the database using key lookups and scans, as a simple + key/value style alternative to ``ExecuteSql``. This method cannot be + used to return a result set larger than 10 MiB; if the read matches more + data than that, the read fails with a ``FAILED_PRECONDITION`` error. - Reads inside read-write transactions might return ``ABORTED``. If - this occurs, the application should restart the transaction from - the beginning. See ``Transaction`` for more details. + Reads inside read-write transactions might return ``ABORTED``. If this + occurs, the application should restart the transaction from the + beginning. See ``Transaction`` for more details. Larger result sets can be yielded in streaming fashion by calling ``StreamingRead`` instead. @@ -802,13 +800,13 @@ def read(self, >>> >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') >>> - >>> # TODO: Initialize ``table``: + >>> # TODO: Initialize `table`: >>> table = '' >>> - >>> # TODO: Initialize ``columns``: + >>> # TODO: Initialize `columns`: >>> columns = [] >>> - >>> # TODO: Initialize ``key_set``: + >>> # TODO: Initialize `key_set`: >>> key_set = {} >>> >>> response = client.read(session, table, columns, key_set) @@ -816,42 +814,43 @@ def read(self, Args: session (str): Required. The session in which the read should be performed. table (str): Required. The name of the table in the database to be read. - columns (list[str]): The columns of ``table`` to be returned for each row matching - this request. - key_set (Union[dict, ~google.cloud.spanner_v1.types.KeySet]): Required. ``key_set`` identifies the rows to be yielded. ``key_set`` names the - primary keys of the rows in ``table`` to be yielded, unless ``index`` - is present. If ``index`` is present, then ``key_set`` instead names - index keys in ``index``. - - If the ``partition_token`` field is empty, rows are yielded - in table primary key order (if ``index`` is empty) or index key order - (if ``index`` is non-empty). If the ``partition_token`` field is not - empty, rows will be yielded in an unspecified order. - - It is not an error for the ``key_set`` to name rows that do not - exist in the database. Read yields nothing for nonexistent rows. + columns (list[str]): The columns of ``table`` to be returned for each row matching this + request. + key_set (Union[dict, ~google.cloud.spanner_v1.types.KeySet]): Required. ``key_set`` identifies the rows to be yielded. ``key_set`` + names the primary keys of the rows in ``table`` to be yielded, unless + ``index`` is present. If ``index`` is present, then ``key_set`` instead + names index keys in ``index``. + + If the ``partition_token`` field is empty, rows are yielded in table + primary key order (if ``index`` is empty) or index key order (if + ``index`` is non-empty). If the ``partition_token`` field is not empty, + rows will be yielded in an unspecified order. + + It is not an error for the ``key_set`` to name rows that do not exist in + the database. Read yields nothing for nonexistent rows. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.KeySet` transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): The transaction to use. If none is provided, the default is a temporary read-only transaction with strong concurrency. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.TransactionSelector` - index (str): If non-empty, the name of an index on ``table``. This index is - used instead of the table primary key when interpreting ``key_set`` - and sorting result rows. See ``key_set`` for further information. - limit (long): If greater than zero, only the first ``limit`` rows are yielded. If ``limit`` - is zero, the default is no limit. A limit cannot be specified if - ``partition_token`` is set. + index (str): If non-empty, the name of an index on ``table``. This index is used + instead of the table primary key when interpreting ``key_set`` and + sorting result rows. See ``key_set`` for further information. + limit (long): If greater than zero, only the first ``limit`` rows are yielded. If + ``limit`` is zero, the default is no limit. A limit cannot be specified + if ``partition_token`` is set. resume_token (bytes): If this request is resuming a previously interrupted read, - ``resume_token`` should be copied from the last - ``PartialResultSet`` yielded before the interruption. Doing this - enables the new read to resume where the last read left off. The - rest of the request parameters must exactly match the request - that yielded this token. + ``resume_token`` should be copied from the last ``PartialResultSet`` + yielded before the interruption. Doing this enables the new read to + resume where the last read left off. The rest of the request parameters + must exactly match the request that yielded this token. partition_token (bytes): If present, results will be restricted to the specified partition - previously created using PartitionRead(). There must be an exact - match for the values of fields common to this message and the - PartitionReadRequest message used to create this partition_token. + previously created using PartitionRead(). There must be an exact match + for the values of fields common to this message and the + PartitionReadRequest message used to create this partition\_token. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -909,11 +908,10 @@ def streaming_read(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - Like ``Read``, except returns the result set as a - stream. Unlike ``Read``, there is no limit on the - size of the returned result set. However, no individual row in - the result set can exceed 100 MiB, and no column value can exceed - 10 MiB. + Like ``Read``, except returns the result set as a stream. Unlike + ``Read``, there is no limit on the size of the returned result set. + However, no individual row in the result set can exceed 100 MiB, and no + column value can exceed 10 MiB. Example: >>> from google.cloud import spanner_v1 @@ -922,13 +920,13 @@ def streaming_read(self, >>> >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') >>> - >>> # TODO: Initialize ``table``: + >>> # TODO: Initialize `table`: >>> table = '' >>> - >>> # TODO: Initialize ``columns``: + >>> # TODO: Initialize `columns`: >>> columns = [] >>> - >>> # TODO: Initialize ``key_set``: + >>> # TODO: Initialize `key_set`: >>> key_set = {} >>> >>> for element in client.streaming_read(session, table, columns, key_set): @@ -938,42 +936,43 @@ def streaming_read(self, Args: session (str): Required. The session in which the read should be performed. table (str): Required. The name of the table in the database to be read. - columns (list[str]): The columns of ``table`` to be returned for each row matching - this request. - key_set (Union[dict, ~google.cloud.spanner_v1.types.KeySet]): Required. ``key_set`` identifies the rows to be yielded. ``key_set`` names the - primary keys of the rows in ``table`` to be yielded, unless ``index`` - is present. If ``index`` is present, then ``key_set`` instead names - index keys in ``index``. - - If the ``partition_token`` field is empty, rows are yielded - in table primary key order (if ``index`` is empty) or index key order - (if ``index`` is non-empty). If the ``partition_token`` field is not - empty, rows will be yielded in an unspecified order. - - It is not an error for the ``key_set`` to name rows that do not - exist in the database. Read yields nothing for nonexistent rows. + columns (list[str]): The columns of ``table`` to be returned for each row matching this + request. + key_set (Union[dict, ~google.cloud.spanner_v1.types.KeySet]): Required. ``key_set`` identifies the rows to be yielded. ``key_set`` + names the primary keys of the rows in ``table`` to be yielded, unless + ``index`` is present. If ``index`` is present, then ``key_set`` instead + names index keys in ``index``. + + If the ``partition_token`` field is empty, rows are yielded in table + primary key order (if ``index`` is empty) or index key order (if + ``index`` is non-empty). If the ``partition_token`` field is not empty, + rows will be yielded in an unspecified order. + + It is not an error for the ``key_set`` to name rows that do not exist in + the database. Read yields nothing for nonexistent rows. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.KeySet` transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): The transaction to use. If none is provided, the default is a temporary read-only transaction with strong concurrency. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.TransactionSelector` - index (str): If non-empty, the name of an index on ``table``. This index is - used instead of the table primary key when interpreting ``key_set`` - and sorting result rows. See ``key_set`` for further information. - limit (long): If greater than zero, only the first ``limit`` rows are yielded. If ``limit`` - is zero, the default is no limit. A limit cannot be specified if - ``partition_token`` is set. + index (str): If non-empty, the name of an index on ``table``. This index is used + instead of the table primary key when interpreting ``key_set`` and + sorting result rows. See ``key_set`` for further information. + limit (long): If greater than zero, only the first ``limit`` rows are yielded. If + ``limit`` is zero, the default is no limit. A limit cannot be specified + if ``partition_token`` is set. resume_token (bytes): If this request is resuming a previously interrupted read, - ``resume_token`` should be copied from the last - ``PartialResultSet`` yielded before the interruption. Doing this - enables the new read to resume where the last read left off. The - rest of the request parameters must exactly match the request - that yielded this token. + ``resume_token`` should be copied from the last ``PartialResultSet`` + yielded before the interruption. Doing this enables the new read to + resume where the last read left off. The rest of the request parameters + must exactly match the request that yielded this token. partition_token (bytes): If present, results will be restricted to the specified partition - previously created using PartitionRead(). There must be an exact - match for the values of fields common to this message and the - PartitionReadRequest message used to create this partition_token. + previously created using PartitionRead(). There must be an exact match + for the values of fields common to this message and the + PartitionReadRequest message used to create this partition\_token. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. @@ -1025,9 +1024,8 @@ def begin_transaction(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - Begins a new transaction. This step can often be skipped: - ``Read``, ``ExecuteSql`` and - ``Commit`` can begin a new transaction as a + Begins a new transaction. This step can often be skipped: ``Read``, + ``ExecuteSql`` and ``Commit`` can begin a new transaction as a side-effect. Example: @@ -1037,7 +1035,7 @@ def begin_transaction(self, >>> >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') >>> - >>> # TODO: Initialize ``options_``: + >>> # TODO: Initialize `options_`: >>> options_ = {} >>> >>> response = client.begin_transaction(session, options_) @@ -1045,6 +1043,7 @@ def begin_transaction(self, Args: session (str): Required. The session in which the transaction runs. options_ (Union[dict, ~google.cloud.spanner_v1.types.TransactionOptions]): Required. Options for the new transaction. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.TransactionOptions` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -1094,14 +1093,14 @@ def commit(self, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): """ - Commits a transaction. The request includes the mutations to be - applied to rows in the database. + Commits a transaction. The request includes the mutations to be applied + to rows in the database. - ``Commit`` might return an ``ABORTED`` error. This can occur at any time; - commonly, the cause is conflicts with concurrent - transactions. However, it can also happen for a variety of other - reasons. If ``Commit`` returns ``ABORTED``, the caller should re-attempt - the transaction from the beginning, re-using the same session. + ``Commit`` might return an ``ABORTED`` error. This can occur at any + time; commonly, the cause is conflicts with concurrent transactions. + However, it can also happen for a variety of other reasons. If + ``Commit`` returns ``ABORTED``, the caller should re-attempt the + transaction from the beginning, re-using the same session. Example: >>> from google.cloud import spanner_v1 @@ -1110,7 +1109,7 @@ def commit(self, >>> >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') >>> - >>> # TODO: Initialize ``mutations``: + >>> # TODO: Initialize `mutations`: >>> mutations = [] >>> >>> response = client.commit(session, mutations) @@ -1120,18 +1119,18 @@ def commit(self, mutations (list[Union[dict, ~google.cloud.spanner_v1.types.Mutation]]): The mutations to be executed when this transaction commits. All mutations are applied atomically, in the order they appear in this list. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.Mutation` transaction_id (bytes): Commit a previously-started transaction. - single_use_transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionOptions]): Execute mutations in a temporary transaction. Note that unlike - commit of a previously-started transaction, commit with a - temporary transaction is non-idempotent. That is, if the - ``CommitRequest`` is sent to Cloud Spanner more than once (for - instance, due to retries in the application, or in the - transport library), it is possible that the mutations are + single_use_transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionOptions]): Execute mutations in a temporary transaction. Note that unlike commit of + a previously-started transaction, commit with a temporary transaction is + non-idempotent. That is, if the ``CommitRequest`` is sent to Cloud + Spanner more than once (for instance, due to retries in the application, + or in the transport library), it is possible that the mutations are executed more than once. If this is undesirable, use - ``BeginTransaction`` and - ``Commit`` instead. + ``BeginTransaction`` and ``Commit`` instead. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.TransactionOptions` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -1187,13 +1186,12 @@ def rollback(self, metadata=None): """ Rolls back a transaction, releasing any locks it holds. It is a good - idea to call this for any transaction that includes one or more - ``Read`` or ``ExecuteSql`` requests and - ultimately decides not to commit. + idea to call this for any transaction that includes one or more ``Read`` + or ``ExecuteSql`` requests and ultimately decides not to commit. - ``Rollback`` returns ``OK`` if it successfully aborts the transaction, the - transaction was already aborted, or the transaction is not - found. ``Rollback`` never returns ``ABORTED``. + ``Rollback`` returns ``OK`` if it successfully aborts the transaction, + the transaction was already aborted, or the transaction is not found. + ``Rollback`` never returns ``ABORTED``. Example: >>> from google.cloud import spanner_v1 @@ -1202,7 +1200,7 @@ def rollback(self, >>> >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') >>> - >>> # TODO: Initialize ``transaction_id``: + >>> # TODO: Initialize `transaction_id`: >>> transaction_id = b'' >>> >>> client.rollback(session, transaction_id) @@ -1255,16 +1253,16 @@ def partition_query(self, metadata=None): """ Creates a set of partition tokens that can be used to execute a query - operation in parallel. Each of the returned partition tokens can be used - by ``ExecuteStreamingSql`` to specify a subset - of the query result to read. The same session and read-only transaction - must be used by the PartitionQueryRequest used to create the - partition tokens and the ExecuteSqlRequests that use the partition tokens. + operation in parallel. Each of the returned partition tokens can be used + by ``ExecuteStreamingSql`` to specify a subset of the query result to + read. The same session and read-only transaction must be used by the + PartitionQueryRequest used to create the partition tokens and the + ExecuteSqlRequests that use the partition tokens. - Partition tokens become invalid when the session used to create them - is deleted, is idle for too long, begins a new transaction, or becomes too - old. When any of these happen, it is not possible to resume the query, and - the whole operation must be restarted from the beginning. + Partition tokens become invalid when the session used to create them is + deleted, is idle for too long, begins a new transaction, or becomes too + old. When any of these happen, it is not possible to resume the query, + and the whole operation must be restarted from the beginning. Example: >>> from google.cloud import spanner_v1 @@ -1273,7 +1271,7 @@ def partition_query(self, >>> >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') >>> - >>> # TODO: Initialize ``sql``: + >>> # TODO: Initialize `sql`: >>> sql = '' >>> >>> response = client.partition_query(session, sql) @@ -1282,45 +1280,48 @@ def partition_query(self, session (str): Required. The session used to create the partitions. sql (str): The query request to generate partitions for. The request will fail if the query is not root partitionable. The query plan of a root - partitionable query has a single distributed union operator. A distributed - union operator conceptually divides one or more tables into multiple - splits, remotely evaluates a subquery independently on each split, and - then unions all results. - - This must not contain DML commands, such as INSERT, UPDATE, or - DELETE. Use ``ExecuteStreamingSql`` with a - PartitionedDml transaction for large, partition-friendly DML operations. + partitionable query has a single distributed union operator. A + distributed union operator conceptually divides one or more tables into + multiple splits, remotely evaluates a subquery independently on each + split, and then unions all results. + + This must not contain DML commands, such as INSERT, UPDATE, or DELETE. + Use ``ExecuteStreamingSql`` with a PartitionedDml transaction for large, + partition-friendly DML operations. transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): Read only snapshot transactions are supported, read/write and single use transactions are not. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.TransactionSelector` params (Union[dict, ~google.cloud.spanner_v1.types.Struct]): The SQL query string can contain parameter placeholders. A parameter - placeholder consists of ``'@'`` followed by the parameter - name. Parameter names consist of any combination of letters, - numbers, and underscores. + placeholder consists of ``'@'`` followed by the parameter name. + Parameter names consist of any combination of letters, numbers, and + underscores. - Parameters can appear anywhere that a literal value is expected. The same - parameter name can be used more than once, for example: - ``\"WHERE id > @msg_id AND id < @msg_id + 100\"`` + Parameters can appear anywhere that a literal value is expected. The + same parameter name can be used more than once, for example: + ``"WHERE id > @msg_id AND id < @msg_id + 100"`` It is an error to execute an SQL query with unbound parameters. - Parameter values are specified using ``params``, which is a JSON - object whose keys are parameter names, and whose values are the - corresponding parameter values. + Parameter values are specified using ``params``, which is a JSON object + whose keys are parameter names, and whose values are the corresponding + parameter values. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.Struct` param_types (dict[str -> Union[dict, ~google.cloud.spanner_v1.types.Type]]): It is not always possible for Cloud Spanner to infer the right SQL type - from a JSON value. For example, values of type ``BYTES`` and values - of type ``STRING`` both appear in ``params`` as JSON strings. + from a JSON value. For example, values of type ``BYTES`` and values of + type ``STRING`` both appear in ``params`` as JSON strings. + + In these cases, ``param_types`` can be used to specify the exact SQL + type for some or all of the SQL query parameters. See the definition of + ``Type`` for more information about SQL types. - In these cases, ``param_types`` can be used to specify the exact - SQL type for some or all of the SQL query parameters. See the - definition of ``Type`` for more information - about SQL types. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.Type` partition_options (Union[dict, ~google.cloud.spanner_v1.types.PartitionOptions]): Additional options that affect how many partitions are created. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.PartitionOptions` retry (Optional[google.api_core.retry.Retry]): A retry object used @@ -1377,18 +1378,19 @@ def partition_read(self, metadata=None): """ Creates a set of partition tokens that can be used to execute a read - operation in parallel. Each of the returned partition tokens can be used - by ``StreamingRead`` to specify a subset of the read - result to read. The same session and read-only transaction must be used by - the PartitionReadRequest used to create the partition tokens and the - ReadRequests that use the partition tokens. There are no ordering + operation in parallel. Each of the returned partition tokens can be used + by ``StreamingRead`` to specify a subset of the read result to read. The + same session and read-only transaction must be used by the + PartitionReadRequest used to create the partition tokens and the + ReadRequests that use the partition tokens. There are no ordering guarantees on rows returned among the returned partition tokens, or even - within each individual StreamingRead call issued with a partition_token. + within each individual StreamingRead call issued with a + partition\_token. - Partition tokens become invalid when the session used to create them - is deleted, is idle for too long, begins a new transaction, or becomes too - old. When any of these happen, it is not possible to resume the read, and - the whole operation must be restarted from the beginning. + Partition tokens become invalid when the session used to create them is + deleted, is idle for too long, begins a new transaction, or becomes too + old. When any of these happen, it is not possible to resume the read, + and the whole operation must be restarted from the beginning. Example: >>> from google.cloud import spanner_v1 @@ -1397,10 +1399,10 @@ def partition_read(self, >>> >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') >>> - >>> # TODO: Initialize ``table``: + >>> # TODO: Initialize `table`: >>> table = '' >>> - >>> # TODO: Initialize ``key_set``: + >>> # TODO: Initialize `key_set`: >>> key_set = {} >>> >>> response = client.partition_read(session, table, key_set) @@ -1408,25 +1410,28 @@ def partition_read(self, Args: session (str): Required. The session used to create the partitions. table (str): Required. The name of the table in the database to be read. - key_set (Union[dict, ~google.cloud.spanner_v1.types.KeySet]): Required. ``key_set`` identifies the rows to be yielded. ``key_set`` names the - primary keys of the rows in ``table`` to be yielded, unless ``index`` - is present. If ``index`` is present, then ``key_set`` instead names - index keys in ``index``. + key_set (Union[dict, ~google.cloud.spanner_v1.types.KeySet]): Required. ``key_set`` identifies the rows to be yielded. ``key_set`` + names the primary keys of the rows in ``table`` to be yielded, unless + ``index`` is present. If ``index`` is present, then ``key_set`` instead + names index keys in ``index``. + + It is not an error for the ``key_set`` to name rows that do not exist in + the database. Read yields nothing for nonexistent rows. - It is not an error for the ``key_set`` to name rows that do not - exist in the database. Read yields nothing for nonexistent rows. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.KeySet` transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): Read only snapshot transactions are supported, read/write and single use transactions are not. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.TransactionSelector` - index (str): If non-empty, the name of an index on ``table``. This index is - used instead of the table primary key when interpreting ``key_set`` - and sorting result rows. See ``key_set`` for further information. - columns (list[str]): The columns of ``table`` to be returned for each row matching - this request. + index (str): If non-empty, the name of an index on ``table``. This index is used + instead of the table primary key when interpreting ``key_set`` and + sorting result rows. See ``key_set`` for further information. + columns (list[str]): The columns of ``table`` to be returned for each row matching this + request. partition_options (Union[dict, ~google.cloud.spanner_v1.types.PartitionOptions]): Additional options that affect how many partitions are created. + If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.PartitionOptions` retry (Optional[google.api_core.retry.Retry]): A retry object used diff --git a/spanner/google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py b/spanner/google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py index b6d2fe623eff..91350c81eac0 100644 --- a/spanner/google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py +++ b/spanner/google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py @@ -107,25 +107,23 @@ def create_channel(cls, def create_session(self): """Return the gRPC stub for {$apiMethod.name}. - Creates a new session. A session can be used to perform - transactions that read and/or modify data in a Cloud Spanner database. - Sessions are meant to be reused for many consecutive - transactions. + Creates a new session. A session can be used to perform transactions + that read and/or modify data in a Cloud Spanner database. Sessions are + meant to be reused for many consecutive transactions. - Sessions can only execute one transaction at a time. To execute - multiple concurrent read-write/write-only transactions, create - multiple sessions. Note that standalone reads and queries use a - transaction internally, and count toward the one transaction - limit. + Sessions can only execute one transaction at a time. To execute multiple + concurrent read-write/write-only transactions, create multiple sessions. + Note that standalone reads and queries use a transaction internally, and + count toward the one transaction limit. Cloud Spanner limits the number of sessions that can exist at any given time; thus, it is a good idea to delete idle and/or unneeded sessions. - Aside from explicit deletes, Cloud Spanner can delete sessions for which no - operations are sent for more than an hour. If a session is deleted, + Aside from explicit deletes, Cloud Spanner can delete sessions for which + no operations are sent for more than an hour. If a session is deleted, requests to it return ``NOT_FOUND``. Idle sessions can be kept alive by sending a trivial SQL query - periodically, e.g., ``\"SELECT 1\"``. + periodically, e.g., ``"SELECT 1"``. Returns: Callable: A callable which accepts the appropriate @@ -139,8 +137,7 @@ def get_session(self): """Return the gRPC stub for {$apiMethod.name}. Gets a session. Returns ``NOT_FOUND`` if the session does not exist. - This is mainly useful for determining whether a session is still - alive. + This is mainly useful for determining whether a session is still alive. Returns: Callable: A callable which accepts the appropriate @@ -180,13 +177,13 @@ def execute_sql(self): """Return the gRPC stub for {$apiMethod.name}. Executes an SQL statement, returning all results in a single reply. This - method cannot be used to return a result set larger than 10 MiB; - if the query yields more data than that, the query fails with - a ``FAILED_PRECONDITION`` error. + method cannot be used to return a result set larger than 10 MiB; if the + query yields more data than that, the query fails with a + ``FAILED_PRECONDITION`` error. Operations inside read-write transactions might return ``ABORTED``. If - this occurs, the application should restart the transaction from - the beginning. See ``Transaction`` for more details. + this occurs, the application should restart the transaction from the + beginning. See ``Transaction`` for more details. Larger result sets can be fetched in streaming fashion by calling ``ExecuteStreamingSql`` instead. @@ -202,11 +199,10 @@ def execute_sql(self): def execute_streaming_sql(self): """Return the gRPC stub for {$apiMethod.name}. - Like ``ExecuteSql``, except returns the result - set as a stream. Unlike ``ExecuteSql``, there - is no limit on the size of the returned result set. However, no - individual row in the result set can exceed 100 MiB, and no - column value can exceed 10 MiB. + Like ``ExecuteSql``, except returns the result set as a stream. Unlike + ``ExecuteSql``, there is no limit on the size of the returned result + set. However, no individual row in the result set can exceed 100 MiB, + and no column value can exceed 10 MiB. Returns: Callable: A callable which accepts the appropriate @@ -219,16 +215,14 @@ def execute_streaming_sql(self): def read(self): """Return the gRPC stub for {$apiMethod.name}. - Reads rows from the database using key lookups and scans, as a - simple key/value style alternative to - ``ExecuteSql``. This method cannot be used to - return a result set larger than 10 MiB; if the read matches more - data than that, the read fails with a ``FAILED_PRECONDITION`` - error. + Reads rows from the database using key lookups and scans, as a simple + key/value style alternative to ``ExecuteSql``. This method cannot be + used to return a result set larger than 10 MiB; if the read matches more + data than that, the read fails with a ``FAILED_PRECONDITION`` error. - Reads inside read-write transactions might return ``ABORTED``. If - this occurs, the application should restart the transaction from - the beginning. See ``Transaction`` for more details. + Reads inside read-write transactions might return ``ABORTED``. If this + occurs, the application should restart the transaction from the + beginning. See ``Transaction`` for more details. Larger result sets can be yielded in streaming fashion by calling ``StreamingRead`` instead. @@ -244,11 +238,10 @@ def read(self): def streaming_read(self): """Return the gRPC stub for {$apiMethod.name}. - Like ``Read``, except returns the result set as a - stream. Unlike ``Read``, there is no limit on the - size of the returned result set. However, no individual row in - the result set can exceed 100 MiB, and no column value can exceed - 10 MiB. + Like ``Read``, except returns the result set as a stream. Unlike + ``Read``, there is no limit on the size of the returned result set. + However, no individual row in the result set can exceed 100 MiB, and no + column value can exceed 10 MiB. Returns: Callable: A callable which accepts the appropriate @@ -261,9 +254,8 @@ def streaming_read(self): def begin_transaction(self): """Return the gRPC stub for {$apiMethod.name}. - Begins a new transaction. This step can often be skipped: - ``Read``, ``ExecuteSql`` and - ``Commit`` can begin a new transaction as a + Begins a new transaction. This step can often be skipped: ``Read``, + ``ExecuteSql`` and ``Commit`` can begin a new transaction as a side-effect. Returns: @@ -277,14 +269,14 @@ def begin_transaction(self): def commit(self): """Return the gRPC stub for {$apiMethod.name}. - Commits a transaction. The request includes the mutations to be - applied to rows in the database. + Commits a transaction. The request includes the mutations to be applied + to rows in the database. - ``Commit`` might return an ``ABORTED`` error. This can occur at any time; - commonly, the cause is conflicts with concurrent - transactions. However, it can also happen for a variety of other - reasons. If ``Commit`` returns ``ABORTED``, the caller should re-attempt - the transaction from the beginning, re-using the same session. + ``Commit`` might return an ``ABORTED`` error. This can occur at any + time; commonly, the cause is conflicts with concurrent transactions. + However, it can also happen for a variety of other reasons. If + ``Commit`` returns ``ABORTED``, the caller should re-attempt the + transaction from the beginning, re-using the same session. Returns: Callable: A callable which accepts the appropriate @@ -298,13 +290,12 @@ def rollback(self): """Return the gRPC stub for {$apiMethod.name}. Rolls back a transaction, releasing any locks it holds. It is a good - idea to call this for any transaction that includes one or more - ``Read`` or ``ExecuteSql`` requests and - ultimately decides not to commit. + idea to call this for any transaction that includes one or more ``Read`` + or ``ExecuteSql`` requests and ultimately decides not to commit. - ``Rollback`` returns ``OK`` if it successfully aborts the transaction, the - transaction was already aborted, or the transaction is not - found. ``Rollback`` never returns ``ABORTED``. + ``Rollback`` returns ``OK`` if it successfully aborts the transaction, + the transaction was already aborted, or the transaction is not found. + ``Rollback`` never returns ``ABORTED``. Returns: Callable: A callable which accepts the appropriate @@ -318,16 +309,16 @@ def partition_query(self): """Return the gRPC stub for {$apiMethod.name}. Creates a set of partition tokens that can be used to execute a query - operation in parallel. Each of the returned partition tokens can be used - by ``ExecuteStreamingSql`` to specify a subset - of the query result to read. The same session and read-only transaction - must be used by the PartitionQueryRequest used to create the - partition tokens and the ExecuteSqlRequests that use the partition tokens. + operation in parallel. Each of the returned partition tokens can be used + by ``ExecuteStreamingSql`` to specify a subset of the query result to + read. The same session and read-only transaction must be used by the + PartitionQueryRequest used to create the partition tokens and the + ExecuteSqlRequests that use the partition tokens. - Partition tokens become invalid when the session used to create them - is deleted, is idle for too long, begins a new transaction, or becomes too - old. When any of these happen, it is not possible to resume the query, and - the whole operation must be restarted from the beginning. + Partition tokens become invalid when the session used to create them is + deleted, is idle for too long, begins a new transaction, or becomes too + old. When any of these happen, it is not possible to resume the query, + and the whole operation must be restarted from the beginning. Returns: Callable: A callable which accepts the appropriate @@ -341,18 +332,19 @@ def partition_read(self): """Return the gRPC stub for {$apiMethod.name}. Creates a set of partition tokens that can be used to execute a read - operation in parallel. Each of the returned partition tokens can be used - by ``StreamingRead`` to specify a subset of the read - result to read. The same session and read-only transaction must be used by - the PartitionReadRequest used to create the partition tokens and the - ReadRequests that use the partition tokens. There are no ordering + operation in parallel. Each of the returned partition tokens can be used + by ``StreamingRead`` to specify a subset of the read result to read. The + same session and read-only transaction must be used by the + PartitionReadRequest used to create the partition tokens and the + ReadRequests that use the partition tokens. There are no ordering guarantees on rows returned among the returned partition tokens, or even - within each individual StreamingRead call issued with a partition_token. + within each individual StreamingRead call issued with a + partition\_token. - Partition tokens become invalid when the session used to create them - is deleted, is idle for too long, begins a new transaction, or becomes too - old. When any of these happen, it is not possible to resume the read, and - the whole operation must be restarted from the beginning. + Partition tokens become invalid when the session used to create them is + deleted, is idle for too long, begins a new transaction, or becomes too + old. When any of these happen, it is not possible to resume the read, + and the whole operation must be restarted from the beginning. Returns: Callable: A callable which accepts the appropriate