From 3a2e52eccc40593b195e7ff58152dbe222c65817 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi Date: Thu, 8 Feb 2024 17:51:22 +0100 Subject: [PATCH] Generate SDK --- .codegen/_openapi_sha | 2 +- .gitattributes | 2 +- databricks/sdk/service/catalog.py | 195 ++++++--- databricks/sdk/service/compute.py | 97 ++++- databricks/sdk/service/files.py | 158 +++++++- databricks/sdk/service/jobs.py | 192 ++++++++- databricks/sdk/service/pipelines.py | 65 ++- databricks/sdk/service/settings.py | 375 +++++++++++++++--- databricks/sdk/service/sql.py | 8 +- databricks/sdk/service/vectorsearch.py | 16 +- databricks/sdk/service/workspace.py | 18 +- docs/account/catalog/metastores.rst | 2 +- docs/account/settings/settings.rst | 20 +- docs/dbdataclasses/catalog.rst | 4 + docs/dbdataclasses/compute.rst | 8 + docs/dbdataclasses/files.rst | 8 + docs/dbdataclasses/jobs.rst | 20 + docs/dbdataclasses/pipelines.rst | 4 + docs/dbdataclasses/settings.rst | 26 +- docs/workspace/catalog/connections.rst | 10 +- docs/workspace/catalog/lakehouse_monitors.rst | 81 ++++ docs/workspace/catalog/metastores.rst | 6 +- docs/workspace/catalog/registered_models.rst | 4 +- docs/workspace/catalog/schemas.rst | 4 +- docs/workspace/catalog/volumes.rst | 4 +- docs/workspace/compute/clusters.rst | 24 +- docs/workspace/iam/current_user.rst | 4 +- docs/workspace/pipelines/pipelines.rst | 16 - .../settings/credentials_manager.rst | 4 +- docs/workspace/settings/settings.rst | 82 +++- docs/workspace/sql/dashboards.rst | 3 + docs/workspace/sql/query_history.rst | 4 +- .../vectorsearch/vector_search_indexes.rst | 4 +- .../change_owner_clusters_api_integration.py | 2 +- .../create_clusters_api_integration.py | 2 +- .../delete_clusters_api_integration.py | 2 +- .../clusters/edit_clusters_api_integration.py | 2 +- .../events_clusters_api_integration.py | 2 +- .../clusters/get_clusters_api_integration.py | 2 +- .../clusters/pin_clusters_api_integration.py | 2 +- .../resize_clusters_api_integration.py | 2 +- .../restart_clusters_api_integration.py | 2 +- ..._spark_version_clusters_api_integration.py | 2 +- .../start_clusters_api_integration.py | 2 +- .../unpin_clusters_api_integration.py | 2 +- examples/connections/get_connections.py | 3 +- examples/connections/update_connections.py | 3 +- examples/metastores/update_metastores.py | 2 +- examples/r/wait_catalog_workspace_bindings.py | 5 + 49 files changed, 1238 insertions(+), 269 deletions(-) create mode 100755 examples/r/wait_catalog_workspace_bindings.py diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index f705ffea6..2f884a290 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -e05401ed5dd4974c5333d737ec308a7d451f749f \ No newline at end of file +6b897bc95b23abed8b9f5eff0e6b8ec034046180 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index d36f22eeb..209ba7dec 100755 --- a/.gitattributes +++ b/.gitattributes @@ -190,6 +190,7 @@ examples/queries/create_queries.py linguist-generated=true examples/queries/get_queries.py linguist-generated=true examples/queries/update_queries.py linguist-generated=true examples/query_history/list_sql_query_history.py linguist-generated=true +examples/r/wait_catalog_workspace_bindings.py linguist-generated=true examples/recipients/create_recipients.py linguist-generated=true examples/recipients/get_recipients.py linguist-generated=true examples/recipients/list_recipients.py linguist-generated=true @@ -285,7 +286,6 @@ examples/workspace/list_workspace_integration.py linguist-generated=true examples/workspace_assignment/list_workspace_assignment_on_aws.py linguist-generated=true examples/workspace_assignment/update_workspace_assignment_on_aws.py linguist-generated=true examples/workspace_bindings/get_catalog_workspace_bindings.py linguist-generated=true -examples/workspace_bindings/update_catalog_workspace_bindings.py linguist-generated=true examples/workspace_conf/get_status_repos.py linguist-generated=true examples/workspaces/create_workspaces.py linguist-generated=true examples/workspaces/get_workspaces.py linguist-generated=true diff --git a/databricks/sdk/service/catalog.py b/databricks/sdk/service/catalog.py index 5c240c62e..578d0bbe6 100755 --- a/databricks/sdk/service/catalog.py +++ b/databricks/sdk/service/catalog.py @@ -784,6 +784,7 @@ class ConnectionInfoSecurableKind(Enum): class ConnectionType(Enum): """The type of connection.""" + BIGQUERY = 'BIGQUERY' DATABRICKS = 'DATABRICKS' MYSQL = 'MYSQL' POSTGRESQL = 'POSTGRESQL' @@ -3136,6 +3137,53 @@ def from_dict(cls, d: Dict[str, any]) -> MonitorNotificationsConfig: return cls(on_failure=_from_dict(d, 'on_failure', MonitorDestinations)) +@dataclass +class MonitorRefreshInfo: + end_time_ms: Optional[int] = None + """The time at which the refresh ended, in epoch milliseconds.""" + + message: Optional[str] = None + """An optional message to give insight into the current state of the job (e.g. FAILURE messages).""" + + refresh_id: Optional[int] = None + """The ID of the refresh.""" + + start_time_ms: Optional[int] = None + """The time at which the refresh started, in epoch milliseconds.""" + + state: Optional[MonitorRefreshInfoState] = None + """The current state of the refresh.""" + + def as_dict(self) -> dict: + """Serializes the MonitorRefreshInfo into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.end_time_ms is not None: body['end_time_ms'] = self.end_time_ms + if self.message is not None: body['message'] = self.message + if self.refresh_id is not None: body['refresh_id'] = self.refresh_id + if self.start_time_ms is not None: body['start_time_ms'] = self.start_time_ms + if self.state is not None: body['state'] = self.state.value + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> MonitorRefreshInfo: + """Deserializes the MonitorRefreshInfo from a dictionary.""" + return cls(end_time_ms=d.get('end_time_ms', None), + message=d.get('message', None), + refresh_id=d.get('refresh_id', None), + start_time_ms=d.get('start_time_ms', None), + state=_enum(d, 'state', MonitorRefreshInfoState)) + + +class MonitorRefreshInfoState(Enum): + """The current state of the refresh.""" + + CANCELED = 'CANCELED' + FAILED = 'FAILED' + PENDING = 'PENDING' + RUNNING = 'RUNNING' + SUCCESS = 'SUCCESS' + + @dataclass class MonitorTimeSeriesProfileType: granularities: Optional[List[str]] = None @@ -4115,9 +4163,6 @@ class UpdateConnection: options: Dict[str, str] """A map of key-value properties attached to the securable.""" - name: Optional[str] = None - """Name of the connection.""" - name_arg: Optional[str] = None """Name of the connection.""" @@ -4130,7 +4175,6 @@ class UpdateConnection: def as_dict(self) -> dict: """Serializes the UpdateConnection into a dictionary suitable for use as a JSON request body.""" body = {} - if self.name is not None: body['name'] = self.name if self.name_arg is not None: body['name_arg'] = self.name_arg if self.new_name is not None: body['new_name'] = self.new_name if self.options: body['options'] = self.options @@ -4140,8 +4184,7 @@ def as_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, any]) -> UpdateConnection: """Deserializes the UpdateConnection from a dictionary.""" - return cls(name=d.get('name', None), - name_arg=d.get('name_arg', None), + return cls(name_arg=d.get('name_arg', None), new_name=d.get('new_name', None), options=d.get('options', None), owner=d.get('owner', None)) @@ -4251,9 +4294,6 @@ class UpdateMetastore: id: Optional[str] = None """Unique ID of the metastore.""" - name: Optional[str] = None - """The user-specified name of the metastore.""" - new_name: Optional[str] = None """New name for the metastore.""" @@ -4276,7 +4316,6 @@ def as_dict(self) -> dict: 'delta_sharing_recipient_token_lifetime_in_seconds'] = self.delta_sharing_recipient_token_lifetime_in_seconds if self.delta_sharing_scope is not None: body['delta_sharing_scope'] = self.delta_sharing_scope.value if self.id is not None: body['id'] = self.id - if self.name is not None: body['name'] = self.name if self.new_name is not None: body['new_name'] = self.new_name if self.owner is not None: body['owner'] = self.owner if self.privilege_model_version is not None: @@ -4293,7 +4332,6 @@ def from_dict(cls, d: Dict[str, any]) -> UpdateMetastore: 'delta_sharing_recipient_token_lifetime_in_seconds', None), delta_sharing_scope=_enum(d, 'delta_sharing_scope', UpdateMetastoreDeltaSharingScope), id=d.get('id', None), - name=d.get('name', None), new_name=d.get('new_name', None), owner=d.get('owner', None), privilege_model_version=d.get('privilege_model_version', None), @@ -4475,9 +4513,6 @@ class UpdateRegisteredModelRequest: full_name: Optional[str] = None """The three-level (fully qualified) name of the registered model""" - name: Optional[str] = None - """The name of the registered model""" - new_name: Optional[str] = None """New name for the registered model.""" @@ -4489,7 +4524,6 @@ def as_dict(self) -> dict: body = {} if self.comment is not None: body['comment'] = self.comment if self.full_name is not None: body['full_name'] = self.full_name - if self.name is not None: body['name'] = self.name if self.new_name is not None: body['new_name'] = self.new_name if self.owner is not None: body['owner'] = self.owner return body @@ -4499,7 +4533,6 @@ def from_dict(cls, d: Dict[str, any]) -> UpdateRegisteredModelRequest: """Deserializes the UpdateRegisteredModelRequest from a dictionary.""" return cls(comment=d.get('comment', None), full_name=d.get('full_name', None), - name=d.get('name', None), new_name=d.get('new_name', None), owner=d.get('owner', None)) @@ -4515,9 +4548,6 @@ class UpdateSchema: full_name: Optional[str] = None """Full name of the schema.""" - name: Optional[str] = None - """Name of schema, relative to parent catalog.""" - new_name: Optional[str] = None """New name for the schema.""" @@ -4534,7 +4564,6 @@ def as_dict(self) -> dict: if self.enable_predictive_optimization is not None: body['enable_predictive_optimization'] = self.enable_predictive_optimization.value if self.full_name is not None: body['full_name'] = self.full_name - if self.name is not None: body['name'] = self.name if self.new_name is not None: body['new_name'] = self.new_name if self.owner is not None: body['owner'] = self.owner if self.properties: body['properties'] = self.properties @@ -4547,7 +4576,6 @@ def from_dict(cls, d: Dict[str, any]) -> UpdateSchema: enable_predictive_optimization=_enum(d, 'enable_predictive_optimization', EnablePredictiveOptimization), full_name=d.get('full_name', None), - name=d.get('name', None), new_name=d.get('new_name', None), owner=d.get('owner', None), properties=d.get('properties', None)) @@ -4635,9 +4663,6 @@ class UpdateVolumeRequestContent: full_name_arg: Optional[str] = None """The three-level (fully qualified) name of the volume""" - name: Optional[str] = None - """The name of the volume""" - new_name: Optional[str] = None """New name for the volume.""" @@ -4649,7 +4674,6 @@ def as_dict(self) -> dict: body = {} if self.comment is not None: body['comment'] = self.comment if self.full_name_arg is not None: body['full_name_arg'] = self.full_name_arg - if self.name is not None: body['name'] = self.name if self.new_name is not None: body['new_name'] = self.new_name if self.owner is not None: body['owner'] = self.owner return body @@ -4659,7 +4683,6 @@ def from_dict(cls, d: Dict[str, any]) -> UpdateVolumeRequestContent: """Deserializes the UpdateVolumeRequestContent from a dictionary.""" return cls(comment=d.get('comment', None), full_name_arg=d.get('full_name_arg', None), - name=d.get('name', None), new_name=d.get('new_name', None), owner=d.get('owner', None)) @@ -5638,7 +5661,6 @@ def update(self, name_arg: str, options: Dict[str, str], *, - name: Optional[str] = None, new_name: Optional[str] = None, owner: Optional[str] = None) -> ConnectionInfo: """Update a connection. @@ -5649,8 +5671,6 @@ def update(self, Name of the connection. :param options: Dict[str,str] A map of key-value properties attached to the securable. - :param name: str (optional) - Name of the connection. :param new_name: str (optional) New name for the connection. :param owner: str (optional) @@ -5659,7 +5679,6 @@ def update(self, :returns: :class:`ConnectionInfo` """ body = {} - if name is not None: body['name'] = name if new_name is not None: body['new_name'] = new_name if options is not None: body['options'] = options if owner is not None: body['owner'] = owner @@ -6129,6 +6148,31 @@ class LakehouseMonitorsAPI: def __init__(self, api_client): self._api = api_client + def cancel_refresh(self, full_name: str, refresh_id: str): + """Cancel refresh. + + Cancel an active monitor refresh for the given refresh ID. + + The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the + table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: + - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table + + Additionally, the call must be made from the workspace where the monitor was created. + + :param full_name: str + Full name of the table. + :param refresh_id: str + ID of the refresh. + + + """ + + headers = {} + self._api.do('POST', + f'/api/2.1/unity-catalog/tables/{full_name}/monitor/refreshes/{refresh_id}/cancel', + headers=headers) + def create(self, full_name: str, assets_dir: str, @@ -6263,6 +6307,81 @@ def get(self, full_name: str) -> MonitorInfo: res = self._api.do('GET', f'/api/2.1/unity-catalog/tables/{full_name}/monitor', headers=headers) return MonitorInfo.from_dict(res) + def get_refresh(self, full_name: str, refresh_id: str) -> MonitorRefreshInfo: + """Get refresh. + + Gets info about a specific monitor refresh using the given refresh ID. + + The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the + table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: + - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - + **SELECT** privilege on the table. + + Additionally, the call must be made from the workspace where the monitor was created. + + :param full_name: str + Full name of the table. + :param refresh_id: str + ID of the refresh. + + :returns: :class:`MonitorRefreshInfo` + """ + + headers = {'Accept': 'application/json', } + res = self._api.do('GET', + f'/api/2.1/unity-catalog/tables/{full_name}/monitor/refreshes/{refresh_id}', + headers=headers) + return MonitorRefreshInfo.from_dict(res) + + def list_refreshes(self, full_name: str) -> Iterator[MonitorRefreshInfo]: + """List refreshes. + + Gets an array containing the history of the most recent refreshes (up to 25) for this table. + + The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the + table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: + - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - + **SELECT** privilege on the table. + + Additionally, the call must be made from the workspace where the monitor was created. + + :param full_name: str + Full name of the table. + + :returns: Iterator over :class:`MonitorRefreshInfo` + """ + + headers = {'Accept': 'application/json', } + res = self._api.do('GET', + f'/api/2.1/unity-catalog/tables/{full_name}/monitor/refreshes', + headers=headers) + return [MonitorRefreshInfo.from_dict(v) for v in res] + + def run_refresh(self, full_name: str) -> MonitorRefreshInfo: + """Queue a metric refresh for a monitor. + + Queues a metric refresh on the monitor for the specified table. The refresh will execute in the + background. + + The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the + table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: + - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table + + Additionally, the call must be made from the workspace where the monitor was created. + + :param full_name: str + Full name of the table. + + :returns: :class:`MonitorRefreshInfo` + """ + + headers = {'Accept': 'application/json', } + res = self._api.do('POST', + f'/api/2.1/unity-catalog/tables/{full_name}/monitor/refreshes', + headers=headers) + return MonitorRefreshInfo.from_dict(res) + def update(self, full_name: str, assets_dir: str, @@ -6514,7 +6633,6 @@ def update(self, delta_sharing_organization_name: Optional[str] = None, delta_sharing_recipient_token_lifetime_in_seconds: Optional[int] = None, delta_sharing_scope: Optional[UpdateMetastoreDeltaSharingScope] = None, - name: Optional[str] = None, new_name: Optional[str] = None, owner: Optional[str] = None, privilege_model_version: Optional[str] = None, @@ -6533,8 +6651,6 @@ def update(self, The lifetime of delta sharing recipient token in seconds. :param delta_sharing_scope: :class:`UpdateMetastoreDeltaSharingScope` (optional) The scope of Delta Sharing enabled for the metastore. - :param name: str (optional) - The user-specified name of the metastore. :param new_name: str (optional) New name for the metastore. :param owner: str (optional) @@ -6553,7 +6669,6 @@ def update(self, body[ 'delta_sharing_recipient_token_lifetime_in_seconds'] = delta_sharing_recipient_token_lifetime_in_seconds if delta_sharing_scope is not None: body['delta_sharing_scope'] = delta_sharing_scope.value - if name is not None: body['name'] = name if new_name is not None: body['new_name'] = new_name if owner is not None: body['owner'] = owner if privilege_model_version is not None: body['privilege_model_version'] = privilege_model_version @@ -6963,7 +7078,6 @@ def update(self, full_name: str, *, comment: Optional[str] = None, - name: Optional[str] = None, new_name: Optional[str] = None, owner: Optional[str] = None) -> RegisteredModelInfo: """Update a Registered Model. @@ -6980,8 +7094,6 @@ def update(self, The three-level (fully qualified) name of the registered model :param comment: str (optional) The comment attached to the registered model - :param name: str (optional) - The name of the registered model :param new_name: str (optional) New name for the registered model. :param owner: str (optional) @@ -6991,7 +7103,6 @@ def update(self, """ body = {} if comment is not None: body['comment'] = comment - if name is not None: body['name'] = name if new_name is not None: body['new_name'] = new_name if owner is not None: body['owner'] = owner headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } @@ -7120,7 +7231,6 @@ def update(self, *, comment: Optional[str] = None, enable_predictive_optimization: Optional[EnablePredictiveOptimization] = None, - name: Optional[str] = None, new_name: Optional[str] = None, owner: Optional[str] = None, properties: Optional[Dict[str, str]] = None) -> SchemaInfo: @@ -7137,8 +7247,6 @@ def update(self, User-provided free-form text description. :param enable_predictive_optimization: :class:`EnablePredictiveOptimization` (optional) Whether predictive optimization should be enabled for this object and objects under it. - :param name: str (optional) - Name of schema, relative to parent catalog. :param new_name: str (optional) New name for the schema. :param owner: str (optional) @@ -7152,7 +7260,6 @@ def update(self, if comment is not None: body['comment'] = comment if enable_predictive_optimization is not None: body['enable_predictive_optimization'] = enable_predictive_optimization.value - if name is not None: body['name'] = name if new_name is not None: body['new_name'] = new_name if owner is not None: body['owner'] = owner if properties is not None: body['properties'] = properties @@ -7917,7 +8024,6 @@ def update(self, full_name_arg: str, *, comment: Optional[str] = None, - name: Optional[str] = None, new_name: Optional[str] = None, owner: Optional[str] = None) -> VolumeInfo: """Update a Volume. @@ -7934,8 +8040,6 @@ def update(self, The three-level (fully qualified) name of the volume :param comment: str (optional) The comment attached to the volume - :param name: str (optional) - The name of the volume :param new_name: str (optional) New name for the volume. :param owner: str (optional) @@ -7945,7 +8049,6 @@ def update(self, """ body = {} if comment is not None: body['comment'] = comment - if name is not None: body['name'] = name if new_name is not None: body['new_name'] = new_name if owner is not None: body['owner'] = owner headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } diff --git a/databricks/sdk/service/compute.py b/databricks/sdk/service/compute.py index efa4c54b8..91ff63a30 100755 --- a/databricks/sdk/service/compute.py +++ b/databricks/sdk/service/compute.py @@ -65,15 +65,33 @@ def from_dict(cls, d: Dict[str, any]) -> AddInstanceProfile: @dataclass -class AutoScale: - min_workers: int - """The minimum number of workers to which the cluster can scale down when underutilized. It is also - the initial number of workers the cluster will have after creation.""" +class Adlsgen2Info: + destination: str + """abfss destination, e.g. + `abfss://@.dfs.core.windows.net/`.""" + + def as_dict(self) -> dict: + """Serializes the Adlsgen2Info into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.destination is not None: body['destination'] = self.destination + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> Adlsgen2Info: + """Deserializes the Adlsgen2Info from a dictionary.""" + return cls(destination=d.get('destination', None)) - max_workers: int + +@dataclass +class AutoScale: + max_workers: Optional[int] = None """The maximum number of workers to which the cluster can scale up when overloaded. Note that `max_workers` must be strictly greater than `min_workers`.""" + min_workers: Optional[int] = None + """The minimum number of workers to which the cluster can scale down when underutilized. It is also + the initial number of workers the cluster will have after creation.""" + def as_dict(self) -> dict: """Serializes the AutoScale into a dictionary suitable for use as a JSON request body.""" body = {} @@ -2213,7 +2231,7 @@ class DataSecurityMode(Enum): @dataclass class DbfsStorageInfo: - destination: Optional[str] = None + destination: str """dbfs destination, e.g. `dbfs:/my/path`""" def as_dict(self) -> dict: @@ -2957,6 +2975,18 @@ class GcpAttributes: [GCP documentation]: https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds""" + use_preemptible_executors: Optional[bool] = None + """This field determines whether the spark executors will be scheduled to run on preemptible VMs + (when set to true) versus standard compute engine VMs (when set to false; default). Note: Soon + to be deprecated, use the availability field instead.""" + + zone_id: Optional[str] = None + """Identifier for the availability zone in which the cluster resides. This can be one of the + following: - "HA" => High availability, spread nodes across availability zones for a Databricks + deployment region [default] - "AUTO" => Databricks picks an availability zone to schedule the + cluster on. - A GCP availability zone => Pick One of the available zones for (machine type + + region) from https://cloud.google.com/compute/docs/regions-zones.""" + def as_dict(self) -> dict: """Serializes the GcpAttributes into a dictionary suitable for use as a JSON request body.""" body = {} @@ -2965,6 +2995,9 @@ def as_dict(self) -> dict: if self.google_service_account is not None: body['google_service_account'] = self.google_service_account if self.local_ssd_count is not None: body['local_ssd_count'] = self.local_ssd_count + if self.use_preemptible_executors is not None: + body['use_preemptible_executors'] = self.use_preemptible_executors + if self.zone_id is not None: body['zone_id'] = self.zone_id return body @classmethod @@ -2973,7 +3006,9 @@ def from_dict(cls, d: Dict[str, any]) -> GcpAttributes: return cls(availability=_enum(d, 'availability', GcpAvailability), boot_disk_size=d.get('boot_disk_size', None), google_service_account=d.get('google_service_account', None), - local_ssd_count=d.get('local_ssd_count', None)) + local_ssd_count=d.get('local_ssd_count', None), + use_preemptible_executors=d.get('use_preemptible_executors', None), + zone_id=d.get('zone_id', None)) class GcpAvailability(Enum): @@ -2985,6 +3020,23 @@ class GcpAvailability(Enum): PREEMPTIBLE_WITH_FALLBACK_GCP = 'PREEMPTIBLE_WITH_FALLBACK_GCP' +@dataclass +class GcsStorageInfo: + destination: str + """GCS destination/URI, e.g. `gs://my-bucket/some-prefix`""" + + def as_dict(self) -> dict: + """Serializes the GcsStorageInfo into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.destination is not None: body['destination'] = self.destination + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> GcsStorageInfo: + """Deserializes the GcsStorageInfo from a dictionary.""" + return cls(destination=d.get('destination', None)) + + @dataclass class GetClusterPermissionLevelsResponse: permission_levels: Optional[List[ClusterPermissionsDescription]] = None @@ -3538,6 +3590,10 @@ class InitScriptExecutionDetailsStatus(Enum): @dataclass class InitScriptInfo: + abfss: Optional[Adlsgen2Info] = None + """destination needs to be provided. e.g. `{ "abfss" : { "destination" : + "abfss://@.dfs.core.windows.net/" } }""" + dbfs: Optional[DbfsStorageInfo] = None """destination needs to be provided. e.g. `{ "dbfs" : { "destination" : "dbfs:/home/cluster_log" } }`""" @@ -3546,6 +3602,9 @@ class InitScriptInfo: """destination needs to be provided. e.g. `{ "file" : { "destination" : "file:/my/local/file.sh" } }`""" + gcs: Optional[GcsStorageInfo] = None + """destination needs to be provided. e.g. `{ "gcs": { "destination": "gs://my-bucket/file.sh" } }`""" + s3: Optional[S3StorageInfo] = None """destination and either the region or endpoint need to be provided. e.g. `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : "us-west-2" } }` Cluster iam role is used to @@ -3563,8 +3622,10 @@ class InitScriptInfo: def as_dict(self) -> dict: """Serializes the InitScriptInfo into a dictionary suitable for use as a JSON request body.""" body = {} + if self.abfss: body['abfss'] = self.abfss.as_dict() if self.dbfs: body['dbfs'] = self.dbfs.as_dict() if self.file: body['file'] = self.file.as_dict() + if self.gcs: body['gcs'] = self.gcs.as_dict() if self.s3: body['s3'] = self.s3.as_dict() if self.volumes: body['volumes'] = self.volumes.as_dict() if self.workspace: body['workspace'] = self.workspace.as_dict() @@ -3573,8 +3634,10 @@ def as_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, any]) -> InitScriptInfo: """Deserializes the InitScriptInfo from a dictionary.""" - return cls(dbfs=_from_dict(d, 'dbfs', DbfsStorageInfo), + return cls(abfss=_from_dict(d, 'abfss', Adlsgen2Info), + dbfs=_from_dict(d, 'dbfs', DbfsStorageInfo), file=_from_dict(d, 'file', LocalFileInfo), + gcs=_from_dict(d, 'gcs', GcsStorageInfo), s3=_from_dict(d, 's3', S3StorageInfo), volumes=_from_dict(d, 'volumes', VolumesStorageInfo), workspace=_from_dict(d, 'workspace', WorkspaceStorageInfo)) @@ -4428,7 +4491,7 @@ class ListSortOrder(Enum): @dataclass class LocalFileInfo: - destination: Optional[str] = None + destination: str """local file destination, e.g. `file:/my/local/file.sh`""" def as_dict(self) -> dict: @@ -5027,6 +5090,11 @@ class RuntimeEngine(Enum): @dataclass class S3StorageInfo: + destination: str + """S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using cluster + iam role, please make sure you set cluster iam role and the role has write access to the + destination. Please also note that you cannot use AWS keys to deliver logs.""" + canned_acl: Optional[str] = None """(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`. If `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on @@ -5036,11 +5104,6 @@ class S3StorageInfo: for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to read the logs.""" - destination: Optional[str] = None - """S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using cluster - iam role, please make sure you set cluster iam role and the role has write access to the - destination. Please also note that you cannot use AWS keys to deliver logs.""" - enable_encryption: Optional[bool] = None """(Optional) Flag to enable server side encryption, `false` by default.""" @@ -5371,7 +5434,7 @@ def from_dict(cls, d: Dict[str, any]) -> UnpinCluster: @dataclass class VolumesStorageInfo: - destination: Optional[str] = None + destination: str """Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh`""" def as_dict(self) -> dict: @@ -5388,7 +5451,7 @@ def from_dict(cls, d: Dict[str, any]) -> VolumesStorageInfo: @dataclass class WorkloadType: - clients: Optional[ClientsTypes] = None + clients: ClientsTypes """defined what type of clients can use the cluster. E.g. Notebooks, Jobs""" def as_dict(self) -> dict: @@ -5405,7 +5468,7 @@ def from_dict(cls, d: Dict[str, any]) -> WorkloadType: @dataclass class WorkspaceStorageInfo: - destination: Optional[str] = None + destination: str """workspace files destination, e.g. `/Users/user1@databricks.com/my-init.sh`""" def as_dict(self) -> dict: diff --git a/databricks/sdk/service/files.py b/databricks/sdk/service/files.py index c1c3c184b..1878f748f 100755 --- a/databricks/sdk/service/files.py +++ b/databricks/sdk/service/files.py @@ -112,6 +112,43 @@ def from_dict(cls, d: Dict[str, any]) -> Delete: return cls(path=d.get('path', None), recursive=d.get('recursive', None)) +@dataclass +class DirectoryEntry: + file_size: Optional[int] = None + """The length of the file in bytes. This field is omitted for directories.""" + + is_directory: Optional[bool] = None + """True if the path is a directory.""" + + last_modified: Optional[int] = None + """Last modification time of given file in milliseconds since unix epoch.""" + + name: Optional[str] = None + """The name of the file or directory.""" + + path: Optional[str] = None + """The absolute path of the file or directory.""" + + def as_dict(self) -> dict: + """Serializes the DirectoryEntry into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.file_size is not None: body['file_size'] = self.file_size + if self.is_directory is not None: body['is_directory'] = self.is_directory + if self.last_modified is not None: body['last_modified'] = self.last_modified + if self.name is not None: body['name'] = self.name + if self.path is not None: body['path'] = self.path + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> DirectoryEntry: + """Deserializes the DirectoryEntry from a dictionary.""" + return cls(file_size=d.get('file_size', None), + is_directory=d.get('is_directory', None), + last_modified=d.get('last_modified', None), + name=d.get('name', None), + path=d.get('path', None)) + + @dataclass class DownloadResponse: contents: Optional[BinaryIO] = None @@ -149,6 +186,28 @@ def from_dict(cls, d: Dict[str, any]) -> FileInfo: path=d.get('path', None)) +@dataclass +class ListDirectoryResponse: + contents: Optional[List[DirectoryEntry]] = None + """Array of DirectoryEntry.""" + + next_page_token: Optional[str] = None + """A token, which can be sent as `page_token` to retrieve the next page.""" + + def as_dict(self) -> dict: + """Serializes the ListDirectoryResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.contents: body['contents'] = [v.as_dict() for v in self.contents] + if self.next_page_token is not None: body['next_page_token'] = self.next_page_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> ListDirectoryResponse: + """Deserializes the ListDirectoryResponse from a dictionary.""" + return cls(contents=_repeated_dict(d, 'contents', DirectoryEntry), + next_page_token=d.get('next_page_token', None)) + + @dataclass class ListStatusResponse: files: Optional[List[FileInfo]] = None @@ -505,59 +564,114 @@ class FilesAPI: def __init__(self, api_client): self._api = api_client + def create_directory(self, directory_path: str): + """Create a directory. + + Creates an empty directory. If called on an existing directory, the API returns a success response. + + :param directory_path: str + The absolute path of a directory. + + + """ + + headers = {} + self._api.do('PUT', f'/api/2.0/fs/directories{directory_path}', headers=headers) + def delete(self, file_path: str): - """Delete a file or directory. + """Delete a file. - Deletes a file or directory. + Deletes a file. :param file_path: str - The absolute path of the file or directory in DBFS. + The absolute path of the file. """ headers = {} - self._api.do('DELETE', f'/api/2.0/fs/files/{file_path}', headers=headers) + self._api.do('DELETE', f'/api/2.0/fs/files{file_path}', headers=headers) + + def delete_directory(self, directory_path: str): + """Delete a directory. + + Deletes an empty directory. If the directory is not empty, the API returns a HTTP 400 error. + + :param directory_path: str + The absolute path of a directory. + + + """ + + headers = {} + self._api.do('DELETE', f'/api/2.0/fs/directories{directory_path}', headers=headers) def download(self, file_path: str) -> DownloadResponse: """Download a file. - Downloads a file of up to 2 GiB. + Downloads a file of up to 5 GiB. :param file_path: str - The absolute path of the file or directory in DBFS. + The absolute path of the file. :returns: :class:`DownloadResponse` """ headers = {'Accept': 'application/octet-stream', } - res = self._api.do('GET', f'/api/2.0/fs/files/{file_path}', headers=headers, raw=True) + res = self._api.do('GET', f'/api/2.0/fs/files{file_path}', headers=headers, raw=True) return DownloadResponse(contents=res) - def get_status(self, path: str) -> FileInfo: - """Get file or directory status. - - Returns the status of a file or directory. - - :param path: str - The absolute path of the file or directory in the Files API, omitting the initial slash. - - :returns: :class:`FileInfo` + def list_directory_contents(self, + directory_path: str, + *, + page_size: Optional[int] = None, + page_token: Optional[str] = None) -> Iterator[DirectoryEntry]: + """List directory contents. + + Returns the contents of a directory. If there is no directory at the specified path, the API returns a + HTTP 404 error. + + :param directory_path: str + The absolute path of a directory. + :param page_size: int (optional) + The maximum number of directory entries to return. The API may return fewer than this value. + Receiving fewer results does not imply there are no more results. As long as the response contains a + next_page_token, there may be more results. + + If unspecified, at most 1000 directory entries will be returned. The maximum value is 1000. Values + above 1000 will be coerced to 1000. + :param page_token: str (optional) + A page token, received from a previous `list` call. Provide this to retrieve the subsequent page. + When paginating, all other parameters provided to `list` must match the call that provided the page + token. + + :returns: Iterator over :class:`DirectoryEntry` """ query = {} - if path is not None: query['path'] = path + if page_size is not None: query['page_size'] = page_size + if page_token is not None: query['page_token'] = page_token headers = {'Accept': 'application/json', } - res = self._api.do('GET', '/api/2.0/fs/get-status', query=query, headers=headers) - return FileInfo.from_dict(res) + + while True: + json = self._api.do('GET', + f'/api/2.0/fs/directories{directory_path}', + query=query, + headers=headers) + if 'contents' in json: + for v in json['contents']: + yield DirectoryEntry.from_dict(v) + if 'next_page_token' not in json or not json['next_page_token']: + return + query['page_token'] = json['next_page_token'] def upload(self, file_path: str, contents: BinaryIO, *, overwrite: Optional[bool] = None): """Upload a file. - Uploads a file of up to 2 GiB. + Uploads a file of up to 5 GiB. :param file_path: str - The absolute path of the file or directory in DBFS. + The absolute path of the file. :param contents: BinaryIO :param overwrite: bool (optional) If true, an existing file will be overwritten. @@ -568,4 +682,4 @@ def upload(self, file_path: str, contents: BinaryIO, *, overwrite: Optional[bool query = {} if overwrite is not None: query['overwrite'] = overwrite headers = {'Content-Type': 'application/octet-stream', } - self._api.do('PUT', f'/api/2.0/fs/files/{file_path}', query=query, headers=headers, data=contents) + self._api.do('PUT', f'/api/2.0/fs/files{file_path}', query=query, headers=headers, data=contents) diff --git a/databricks/sdk/service/jobs.py b/databricks/sdk/service/jobs.py index c9453ae04..bdc5b8a20 100755 --- a/databricks/sdk/service/jobs.py +++ b/databricks/sdk/service/jobs.py @@ -676,13 +676,22 @@ class DbtTask: specified. If no warehouse_id is specified and this folder is unset, the root directory is used.""" project_directory: Optional[str] = None - """Optional (relative) path to the project directory, if no value is provided, the root of the git - repository is used.""" + """Path to the project directory. Optional for Git sourced tasks, in which case if no value is + provided, the root of the Git repository is used.""" schema: Optional[str] = None """Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used.""" + source: Optional[Source] = None + """Optional location type of the project directory. When set to `WORKSPACE`, the project will be + retrieved from the local workspace. When set to `GIT`, the project will be + retrieved from a Git repository defined in `git_source`. If the value is empty, the task will + use `GIT` if `git_source` is defined and `WORKSPACE` otherwise. + + * `WORKSPACE`: Project is located in workspace. * `GIT`: Project is located in + cloud Git provider.""" + warehouse_id: Optional[str] = None """ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the @@ -696,6 +705,7 @@ def as_dict(self) -> dict: if self.profiles_directory is not None: body['profiles_directory'] = self.profiles_directory if self.project_directory is not None: body['project_directory'] = self.project_directory if self.schema is not None: body['schema'] = self.schema + if self.source is not None: body['source'] = self.source.value if self.warehouse_id is not None: body['warehouse_id'] = self.warehouse_id return body @@ -707,6 +717,7 @@ def from_dict(cls, d: Dict[str, any]) -> DbtTask: profiles_directory=d.get('profiles_directory', None), project_directory=d.get('project_directory', None), schema=d.get('schema', None), + source=_enum(d, 'source', Source), warehouse_id=d.get('warehouse_id', None)) @@ -771,8 +782,8 @@ class FileArrivalTriggerConfiguration: time the trigger fired. The minimum allowed value is 60 seconds""" url: Optional[str] = None - """URL to be monitored for file arrivals. The path must point to the root or a subpath of the - external location.""" + """The storage location to monitor for file arrivals. The value must point to the root or a subpath + of an external location URL or the root or subpath of a Unity Catalog volume.""" wait_after_last_change_seconds: Optional[int] = None """If set, the trigger starts a run only after no file activity has occurred for the specified @@ -797,6 +808,117 @@ def from_dict(cls, d: Dict[str, any]) -> FileArrivalTriggerConfiguration: wait_after_last_change_seconds=d.get('wait_after_last_change_seconds', None)) +@dataclass +class ForEachStats: + error_message_stats: Optional[ForEachTaskErrorMessageStats] = None + """Sample of 3 most common error messages occurred during the iteration.""" + + task_run_stats: Optional[ForEachTaskTaskRunStats] = None + """Describes stats of the iteration. Only latest retries are considered.""" + + def as_dict(self) -> dict: + """Serializes the ForEachStats into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.error_message_stats: body['error_message_stats'] = self.error_message_stats.as_dict() + if self.task_run_stats: body['task_run_stats'] = self.task_run_stats.as_dict() + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> ForEachStats: + """Deserializes the ForEachStats from a dictionary.""" + return cls(error_message_stats=_from_dict(d, 'error_message_stats', ForEachTaskErrorMessageStats), + task_run_stats=_from_dict(d, 'task_run_stats', ForEachTaskTaskRunStats)) + + +@dataclass +class ForEachTask: + inputs: str + """Array for task to iterate on. This can be a JSON string or a reference to an array parameter.""" + + task: Task + + concurrency: Optional[int] = None + """Controls the number of active iterations task runs. Default is 100 (maximal value).""" + + def as_dict(self) -> dict: + """Serializes the ForEachTask into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.concurrency is not None: body['concurrency'] = self.concurrency + if self.inputs is not None: body['inputs'] = self.inputs + if self.task: body['task'] = self.task.as_dict() + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> ForEachTask: + """Deserializes the ForEachTask from a dictionary.""" + return cls(concurrency=d.get('concurrency', None), + inputs=d.get('inputs', None), + task=_from_dict(d, 'task', Task)) + + +@dataclass +class ForEachTaskErrorMessageStats: + count: Optional[str] = None + """Describes the count of such error message encountered during the iterations.""" + + error_message: Optional[str] = None + """Describes the error message occured during the iterations.""" + + def as_dict(self) -> dict: + """Serializes the ForEachTaskErrorMessageStats into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.count is not None: body['count'] = self.count + if self.error_message is not None: body['error_message'] = self.error_message + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> ForEachTaskErrorMessageStats: + """Deserializes the ForEachTaskErrorMessageStats from a dictionary.""" + return cls(count=d.get('count', None), error_message=d.get('error_message', None)) + + +@dataclass +class ForEachTaskTaskRunStats: + active_iterations: Optional[int] = None + """Describes the iteration runs having an active lifecycle state or an active run sub state.""" + + completed_iterations: Optional[int] = None + """Describes the number of failed and succeeded iteration runs.""" + + failed_iterations: Optional[int] = None + """Describes the number of failed iteration runs.""" + + scheduled_iterations: Optional[int] = None + """Describes the number of iteration runs that have been scheduled.""" + + succeeded_iterations: Optional[int] = None + """Describes the number of succeeded iteration runs.""" + + total_iterations: Optional[int] = None + """Describes the length of the list of items to iterate over.""" + + def as_dict(self) -> dict: + """Serializes the ForEachTaskTaskRunStats into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.active_iterations is not None: body['active_iterations'] = self.active_iterations + if self.completed_iterations is not None: body['completed_iterations'] = self.completed_iterations + if self.failed_iterations is not None: body['failed_iterations'] = self.failed_iterations + if self.scheduled_iterations is not None: body['scheduled_iterations'] = self.scheduled_iterations + if self.succeeded_iterations is not None: body['succeeded_iterations'] = self.succeeded_iterations + if self.total_iterations is not None: body['total_iterations'] = self.total_iterations + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> ForEachTaskTaskRunStats: + """Deserializes the ForEachTaskTaskRunStats from a dictionary.""" + return cls(active_iterations=d.get('active_iterations', None), + completed_iterations=d.get('completed_iterations', None), + failed_iterations=d.get('failed_iterations', None), + scheduled_iterations=d.get('scheduled_iterations', None), + succeeded_iterations=d.get('succeeded_iterations', None), + total_iterations=d.get('total_iterations', None)) + + class Format(Enum): MULTI_TASK = 'MULTI_TASK' @@ -2531,6 +2653,36 @@ class RunConditionTaskOp(Enum): NOT_EQUAL = 'NOT_EQUAL' +@dataclass +class RunForEachTask: + concurrency: Optional[int] = None + """Controls the number of active iterations task runs. Default is 100 (maximal value).""" + + inputs: Optional[str] = None + """Array for task to iterate on. This can be a JSON string or a reference to an array parameter.""" + + stats: Optional[ForEachStats] = None + + task: Optional[Task] = None + + def as_dict(self) -> dict: + """Serializes the RunForEachTask into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.concurrency is not None: body['concurrency'] = self.concurrency + if self.inputs is not None: body['inputs'] = self.inputs + if self.stats: body['stats'] = self.stats.as_dict() + if self.task: body['task'] = self.task.as_dict() + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> RunForEachTask: + """Deserializes the RunForEachTask from a dictionary.""" + return cls(concurrency=d.get('concurrency', None), + inputs=d.get('inputs', None), + stats=_from_dict(d, 'stats', ForEachStats), + task=_from_dict(d, 'task', Task)) + + class RunIf(Enum): """An optional value indicating the condition that determines whether the task should be run once its dependencies have been completed. When omitted, defaults to `ALL_SUCCESS`. @@ -3056,6 +3208,9 @@ class RunTask: When running jobs on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability.""" + for_each_task: Optional[RunForEachTask] = None + """If for_each_task, indicates that this task must execute the nested task within it.""" + git_source: Optional[GitSource] = None """An optional specification for a remote Git repository containing the source code used by tasks. Version-controlled source code is supported by notebook, dbt, Python script, and SQL File tasks. @@ -3159,6 +3314,7 @@ def as_dict(self) -> dict: if self.end_time is not None: body['end_time'] = self.end_time if self.execution_duration is not None: body['execution_duration'] = self.execution_duration if self.existing_cluster_id is not None: body['existing_cluster_id'] = self.existing_cluster_id + if self.for_each_task: body['for_each_task'] = self.for_each_task.as_dict() if self.git_source: body['git_source'] = self.git_source.as_dict() if self.libraries: body['libraries'] = [v.as_dict() for v in self.libraries] if self.new_cluster: body['new_cluster'] = self.new_cluster.as_dict() @@ -3193,6 +3349,7 @@ def from_dict(cls, d: Dict[str, any]) -> RunTask: end_time=d.get('end_time', None), execution_duration=d.get('execution_duration', None), existing_cluster_id=d.get('existing_cluster_id', None), + for_each_task=_from_dict(d, 'for_each_task', RunForEachTask), git_source=_from_dict(d, 'git_source', GitSource), libraries=_repeated_dict(d, 'libraries', compute.Library), new_cluster=_from_dict(d, 'new_cluster', compute.ClusterSpec), @@ -3662,18 +3819,29 @@ def from_dict(cls, d: Dict[str, any]) -> SqlTaskDashboard: @dataclass class SqlTaskFile: path: str - """Relative path of the SQL file in the remote Git repository.""" + """Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for + workspace paths.""" + + source: Optional[Source] = None + """Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved + from the local workspace. When set to `GIT`, the SQL file will be retrieved from a + Git repository defined in `git_source`. If the value is empty, the task will use `GIT` if + `git_source` is defined and `WORKSPACE` otherwise. + + * `WORKSPACE`: SQL file is located in workspace. * `GIT`: SQL file is located in + cloud Git provider.""" def as_dict(self) -> dict: """Serializes the SqlTaskFile into a dictionary suitable for use as a JSON request body.""" body = {} if self.path is not None: body['path'] = self.path + if self.source is not None: body['source'] = self.source.value return body @classmethod def from_dict(cls, d: Dict[str, any]) -> SqlTaskFile: """Deserializes the SqlTaskFile from a dictionary.""" - return cls(path=d.get('path', None)) + return cls(path=d.get('path', None), source=_enum(d, 'source', Source)) @dataclass @@ -3847,6 +4015,10 @@ class SubmitTask: to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability.""" + for_each_task: Optional[ForEachTask] = None + """If for_each_task, indicates that this must execute the nested task within it for the inputs + provided.""" + health: Optional[JobsHealthRules] = None """An optional set of health rules that can be defined for this job.""" @@ -3920,6 +4092,7 @@ def as_dict(self) -> dict: if self.depends_on: body['depends_on'] = [v.as_dict() for v in self.depends_on] if self.email_notifications: body['email_notifications'] = self.email_notifications.as_dict() if self.existing_cluster_id is not None: body['existing_cluster_id'] = self.existing_cluster_id + if self.for_each_task: body['for_each_task'] = self.for_each_task.as_dict() if self.health: body['health'] = self.health.as_dict() if self.libraries: body['libraries'] = [v.as_dict() for v in self.libraries] if self.new_cluster: body['new_cluster'] = self.new_cluster.as_dict() @@ -3945,6 +4118,7 @@ def from_dict(cls, d: Dict[str, any]) -> SubmitTask: depends_on=_repeated_dict(d, 'depends_on', TaskDependency), email_notifications=_from_dict(d, 'email_notifications', JobEmailNotifications), existing_cluster_id=d.get('existing_cluster_id', None), + for_each_task=_from_dict(d, 'for_each_task', ForEachTask), health=_from_dict(d, 'health', JobsHealthRules), libraries=_repeated_dict(d, 'libraries', compute.Library), new_cluster=_from_dict(d, 'new_cluster', compute.ClusterSpec), @@ -4002,6 +4176,10 @@ class Task: to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability.""" + for_each_task: Optional[ForEachTask] = None + """If for_each_task, indicates that this must execute the nested task within it for the inputs + provided.""" + health: Optional[JobsHealthRules] = None """An optional set of health rules that can be defined for this job.""" @@ -4098,6 +4276,7 @@ def as_dict(self) -> dict: if self.description is not None: body['description'] = self.description if self.email_notifications: body['email_notifications'] = self.email_notifications.as_dict() if self.existing_cluster_id is not None: body['existing_cluster_id'] = self.existing_cluster_id + if self.for_each_task: body['for_each_task'] = self.for_each_task.as_dict() if self.health: body['health'] = self.health.as_dict() if self.job_cluster_key is not None: body['job_cluster_key'] = self.job_cluster_key if self.libraries: body['libraries'] = [v.as_dict() for v in self.libraries] @@ -4131,6 +4310,7 @@ def from_dict(cls, d: Dict[str, any]) -> Task: description=d.get('description', None), email_notifications=_from_dict(d, 'email_notifications', TaskEmailNotifications), existing_cluster_id=d.get('existing_cluster_id', None), + for_each_task=_from_dict(d, 'for_each_task', ForEachTask), health=_from_dict(d, 'health', JobsHealthRules), job_cluster_key=d.get('job_cluster_key', None), libraries=_repeated_dict(d, 'libraries', compute.Library), diff --git a/databricks/sdk/service/pipelines.py b/databricks/sdk/service/pipelines.py index b27b5cbe0..2dc12f1a0 100755 --- a/databricks/sdk/service/pipelines.py +++ b/databricks/sdk/service/pipelines.py @@ -791,7 +791,7 @@ class PipelineCluster: apply_policy_default_values: Optional[bool] = None """Note: This field won't be persisted. Only API users will check this field.""" - autoscale: Optional[compute.AutoScale] = None + autoscale: Optional[PipelineClusterAutoscale] = None """Parameters needed in order to automatically scale clusters up and down based on load. Note: autoscaling works best with DB runtime versions 3.0 or later.""" @@ -914,7 +914,7 @@ def as_dict(self) -> dict: def from_dict(cls, d: Dict[str, any]) -> PipelineCluster: """Deserializes the PipelineCluster from a dictionary.""" return cls(apply_policy_default_values=d.get('apply_policy_default_values', None), - autoscale=_from_dict(d, 'autoscale', compute.AutoScale), + autoscale=_from_dict(d, 'autoscale', PipelineClusterAutoscale), aws_attributes=_from_dict(d, 'aws_attributes', compute.AwsAttributes), azure_attributes=_from_dict(d, 'azure_attributes', compute.AzureAttributes), cluster_log_conf=_from_dict(d, 'cluster_log_conf', compute.ClusterLogConf), @@ -933,6 +933,48 @@ def from_dict(cls, d: Dict[str, any]) -> PipelineCluster: ssh_public_keys=d.get('ssh_public_keys', None)) +@dataclass +class PipelineClusterAutoscale: + min_workers: int + """The minimum number of workers the cluster can scale down to when underutilized. It is also the + initial number of workers the cluster will have after creation.""" + + max_workers: int + """The maximum number of workers to which the cluster can scale up when overloaded. `max_workers` + must be strictly greater than `min_workers`.""" + + mode: Optional[PipelineClusterAutoscaleMode] = None + """Databricks Enhanced Autoscaling optimizes cluster utilization by automatically allocating + cluster resources based on workload volume, with minimal impact to the data processing latency + of your pipelines. Enhanced Autoscaling is available for `updates` clusters only. The legacy + autoscaling feature is used for `maintenance` clusters.""" + + def as_dict(self) -> dict: + """Serializes the PipelineClusterAutoscale into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.max_workers is not None: body['max_workers'] = self.max_workers + if self.min_workers is not None: body['min_workers'] = self.min_workers + if self.mode is not None: body['mode'] = self.mode.value + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> PipelineClusterAutoscale: + """Deserializes the PipelineClusterAutoscale from a dictionary.""" + return cls(max_workers=d.get('max_workers', None), + min_workers=d.get('min_workers', None), + mode=_enum(d, 'mode', PipelineClusterAutoscaleMode)) + + +class PipelineClusterAutoscaleMode(Enum): + """Databricks Enhanced Autoscaling optimizes cluster utilization by automatically allocating + cluster resources based on workload volume, with minimal impact to the data processing latency + of your pipelines. Enhanced Autoscaling is available for `updates` clusters only. The legacy + autoscaling feature is used for `maintenance` clusters.""" + + ENHANCED = 'ENHANCED' + LEGACY = 'LEGACY' + + @dataclass class PipelineEvent: error: Optional[ErrorDetail] = None @@ -1976,25 +2018,6 @@ def list_updates(self, res = self._api.do('GET', f'/api/2.0/pipelines/{pipeline_id}/updates', query=query, headers=headers) return ListUpdatesResponse.from_dict(res) - def reset(self, pipeline_id: str) -> Wait[GetPipelineResponse]: - """Reset a pipeline. - - Resets a pipeline. - - :param pipeline_id: str - - :returns: - Long-running operation waiter for :class:`GetPipelineResponse`. - See :method:wait_get_pipeline_running for more details. - """ - - headers = {'Accept': 'application/json', } - self._api.do('POST', f'/api/2.0/pipelines/{pipeline_id}/reset', headers=headers) - return Wait(self.wait_get_pipeline_running, pipeline_id=pipeline_id) - - def reset_and_wait(self, pipeline_id: str, timeout=timedelta(minutes=20)) -> GetPipelineResponse: - return self.reset(pipeline_id=pipeline_id).result(timeout=timeout) - def set_permissions( self, pipeline_id: str, diff --git a/databricks/sdk/service/settings.py b/databricks/sdk/service/settings.py index e542c250f..df351fa36 100755 --- a/databricks/sdk/service/settings.py +++ b/databricks/sdk/service/settings.py @@ -266,36 +266,40 @@ def from_dict(cls, d: Dict[str, any]) -> DefaultNamespaceSetting: @dataclass -class DeleteDefaultWorkspaceNamespaceResponse: +class DeleteDefaultNamespaceSettingResponse: + """The etag is returned.""" + etag: str """etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read - -> update pattern to perform setting updates in order to avoid race conditions. That is, get an - etag from a GET request, and pass it with the PATCH request to identify the setting version you - are updating.""" + -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get + an etag from a GET request, and pass it with the DELETE request to identify the rule set version + you are deleting.""" def as_dict(self) -> dict: - """Serializes the DeleteDefaultWorkspaceNamespaceResponse into a dictionary suitable for use as a JSON request body.""" + """Serializes the DeleteDefaultNamespaceSettingResponse into a dictionary suitable for use as a JSON request body.""" body = {} if self.etag is not None: body['etag'] = self.etag return body @classmethod - def from_dict(cls, d: Dict[str, any]) -> DeleteDefaultWorkspaceNamespaceResponse: - """Deserializes the DeleteDefaultWorkspaceNamespaceResponse from a dictionary.""" + def from_dict(cls, d: Dict[str, any]) -> DeleteDefaultNamespaceSettingResponse: + """Deserializes the DeleteDefaultNamespaceSettingResponse from a dictionary.""" return cls(etag=d.get('etag', None)) @dataclass class DeletePersonalComputeSettingResponse: + """The etag is returned.""" + etag: str """etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read - -> update pattern to perform setting updates in order to avoid race conditions. That is, get an - etag from a GET request, and pass it with the PATCH request to identify the setting version you - are updating.""" + -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get + an etag from a GET request, and pass it with the DELETE request to identify the rule set version + you are deleting.""" def as_dict(self) -> dict: """Serializes the DeletePersonalComputeSettingResponse into a dictionary suitable for use as a JSON request body.""" @@ -309,8 +313,34 @@ def from_dict(cls, d: Dict[str, any]) -> DeletePersonalComputeSettingResponse: return cls(etag=d.get('etag', None)) +@dataclass +class DeleteRestrictWorkspaceAdminsSettingResponse: + """The etag is returned.""" + + etag: str + """etag used for versioning. The response is at least as fresh as the eTag provided. This is used + for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + overwriting each other. It is strongly suggested that systems make use of the etag in the read + -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get + an etag from a GET request, and pass it with the DELETE request to identify the rule set version + you are deleting.""" + + def as_dict(self) -> dict: + """Serializes the DeleteRestrictWorkspaceAdminsSettingResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.etag is not None: body['etag'] = self.etag + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> DeleteRestrictWorkspaceAdminsSettingResponse: + """Deserializes the DeleteRestrictWorkspaceAdminsSettingResponse from a dictionary.""" + return cls(etag=d.get('etag', None)) + + @dataclass class ExchangeToken: + """The exchange token is the result of the token exchange with the IdP""" + credential: Optional[str] = None """The requested token.""" @@ -324,7 +354,7 @@ class ExchangeToken: """The scopes of access granted in the token.""" token_type: Optional[TokenType] = None - """The type of token request. As of now, only `AZURE_ACTIVE_DIRECTORY_TOKEN` is supported.""" + """The type of this exchange token""" def as_dict(self) -> dict: """Serializes the ExchangeToken into a dictionary suitable for use as a JSON request body.""" @@ -348,9 +378,13 @@ def from_dict(cls, d: Dict[str, any]) -> ExchangeToken: @dataclass class ExchangeTokenRequest: + """Exchange a token with the IdP""" + partition_id: PartitionId + """The partition of Credentials store""" token_type: List[TokenType] + """A list of token types being requested""" scopes: List[str] """Array of scopes for the token request.""" @@ -373,6 +407,8 @@ def from_dict(cls, d: Dict[str, any]) -> ExchangeTokenRequest: @dataclass class ExchangeTokenResponse: + """Exhanged tokens were successfully returned.""" + values: Optional[List[ExchangeToken]] = None def as_dict(self) -> dict: @@ -911,6 +947,8 @@ def from_dict(cls, d: Dict[str, any]) -> NetworkConnectivityConfiguration: @dataclass class PartitionId: + """Partition by workspace or account""" + workspace_id: Optional[int] = None """The ID of the workspace.""" @@ -973,7 +1011,8 @@ class PersonalComputeSetting: setting_name: Optional[str] = None """Name of the corresponding setting. This field is populated in the response, but it will not be respected even if it's set in the request body. The setting name in the path parameter will be - respected instead.""" + respected instead. Setting name is required to be 'default' if the setting only has one instance + per workspace.""" def as_dict(self) -> dict: """Serializes the PersonalComputeSetting into a dictionary suitable for use as a JSON request body.""" @@ -1064,6 +1103,65 @@ def from_dict(cls, d: Dict[str, any]) -> ReplaceIpAccessList: list_type=_enum(d, 'list_type', ListType)) +@dataclass +class RestrictWorkspaceAdminsMessage: + status: RestrictWorkspaceAdminsMessageStatus + + def as_dict(self) -> dict: + """Serializes the RestrictWorkspaceAdminsMessage into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.status is not None: body['status'] = self.status.value + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> RestrictWorkspaceAdminsMessage: + """Deserializes the RestrictWorkspaceAdminsMessage from a dictionary.""" + return cls(status=_enum(d, 'status', RestrictWorkspaceAdminsMessageStatus)) + + +class RestrictWorkspaceAdminsMessageStatus(Enum): + + ALLOW_ALL = 'ALLOW_ALL' + RESTRICT_TOKENS_AND_JOB_RUN_AS = 'RESTRICT_TOKENS_AND_JOB_RUN_AS' + STATUS_UNSPECIFIED = 'STATUS_UNSPECIFIED' + + +@dataclass +class RestrictWorkspaceAdminsSetting: + restrict_workspace_admins: RestrictWorkspaceAdminsMessage + + etag: Optional[str] = None + """etag used for versioning. The response is at least as fresh as the eTag provided. This is used + for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + overwriting each other. It is strongly suggested that systems make use of the etag in the read + -> update pattern to perform setting updates in order to avoid race conditions. That is, get an + etag from a GET request, and pass it with the PATCH request to identify the setting version you + are updating.""" + + setting_name: Optional[str] = None + """Name of the corresponding setting. This field is populated in the response, but it will not be + respected even if it's set in the request body. The setting name in the path parameter will be + respected instead. Setting name is required to be 'default' if the setting only has one instance + per workspace.""" + + def as_dict(self) -> dict: + """Serializes the RestrictWorkspaceAdminsSetting into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.etag is not None: body['etag'] = self.etag + if self.restrict_workspace_admins: + body['restrict_workspace_admins'] = self.restrict_workspace_admins.as_dict() + if self.setting_name is not None: body['setting_name'] = self.setting_name + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> RestrictWorkspaceAdminsSetting: + """Deserializes the RestrictWorkspaceAdminsSetting from a dictionary.""" + return cls(etag=d.get('etag', None), + restrict_workspace_admins=_from_dict(d, 'restrict_workspace_admins', + RestrictWorkspaceAdminsMessage), + setting_name=d.get('setting_name', None)) + + @dataclass class RevokeTokenRequest: token_id: str @@ -1316,6 +1414,43 @@ class TokenType(Enum): AZURE_ACTIVE_DIRECTORY_TOKEN = 'AZURE_ACTIVE_DIRECTORY_TOKEN' +@dataclass +class UpdateDefaultNamespaceSettingRequest: + """Details required to update a setting.""" + + allow_missing: bool + """This should always be set to true for Settings API. Added for AIP compliance.""" + + setting: DefaultNamespaceSetting + """This represents the setting configuration for the default namespace in the Databricks workspace. + Setting the default catalog for the workspace determines the catalog that is used when queries + do not reference a fully qualified 3 level name. For example, if the default catalog is set to + 'retail_prod' then a query 'SELECT * FROM myTable' would reference the object + 'retail_prod.default.myTable' (the schema 'default' is always assumed). This setting requires a + restart of clusters and SQL warehouses to take effect. Additionally, the default namespace only + applies when using Unity Catalog-enabled compute.""" + + field_mask: str + """Field mask is required to be passed into the PATCH request. Field mask specifies which fields of + the setting payload will be updated. The field mask needs to be supplied as single string. To + specify multiple fields in the field mask, use comma as the separator (no space).""" + + def as_dict(self) -> dict: + """Serializes the UpdateDefaultNamespaceSettingRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.allow_missing is not None: body['allow_missing'] = self.allow_missing + if self.field_mask is not None: body['field_mask'] = self.field_mask + if self.setting: body['setting'] = self.setting.as_dict() + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> UpdateDefaultNamespaceSettingRequest: + """Deserializes the UpdateDefaultNamespaceSettingRequest from a dictionary.""" + return cls(allow_missing=d.get('allow_missing', None), + field_mask=d.get('field_mask', None), + setting=_from_dict(d, 'setting', DefaultNamespaceSetting)) + + @dataclass class UpdateIpAccessList: """Details required to update an IP access list.""" @@ -1357,6 +1492,66 @@ def from_dict(cls, d: Dict[str, any]) -> UpdateIpAccessList: list_type=_enum(d, 'list_type', ListType)) +@dataclass +class UpdatePersonalComputeSettingRequest: + """Details required to update a setting.""" + + allow_missing: bool + """This should always be set to true for Settings API. Added for AIP compliance.""" + + setting: PersonalComputeSetting + + field_mask: str + """Field mask is required to be passed into the PATCH request. Field mask specifies which fields of + the setting payload will be updated. The field mask needs to be supplied as single string. To + specify multiple fields in the field mask, use comma as the separator (no space).""" + + def as_dict(self) -> dict: + """Serializes the UpdatePersonalComputeSettingRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.allow_missing is not None: body['allow_missing'] = self.allow_missing + if self.field_mask is not None: body['field_mask'] = self.field_mask + if self.setting: body['setting'] = self.setting.as_dict() + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> UpdatePersonalComputeSettingRequest: + """Deserializes the UpdatePersonalComputeSettingRequest from a dictionary.""" + return cls(allow_missing=d.get('allow_missing', None), + field_mask=d.get('field_mask', None), + setting=_from_dict(d, 'setting', PersonalComputeSetting)) + + +@dataclass +class UpdateRestrictWorkspaceAdminsSettingRequest: + """Details required to update a setting.""" + + allow_missing: bool + """This should always be set to true for Settings API. Added for AIP compliance.""" + + setting: RestrictWorkspaceAdminsSetting + + field_mask: str + """Field mask is required to be passed into the PATCH request. Field mask specifies which fields of + the setting payload will be updated. The field mask needs to be supplied as single string. To + specify multiple fields in the field mask, use comma as the separator (no space).""" + + def as_dict(self) -> dict: + """Serializes the UpdateRestrictWorkspaceAdminsSettingRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.allow_missing is not None: body['allow_missing'] = self.allow_missing + if self.field_mask is not None: body['field_mask'] = self.field_mask + if self.setting: body['setting'] = self.setting.as_dict() + return body + + @classmethod + def from_dict(cls, d: Dict[str, any]) -> UpdateRestrictWorkspaceAdminsSettingRequest: + """Deserializes the UpdateRestrictWorkspaceAdminsSettingRequest from a dictionary.""" + return cls(allow_missing=d.get('allow_missing', None), + field_mask=d.get('field_mask', None), + setting=_from_dict(d, 'setting', RestrictWorkspaceAdminsSetting)) + + WorkspaceConf = Dict[str, str] @@ -1581,12 +1776,14 @@ class AccountSettingsAPI: def __init__(self, api_client): self._api = api_client - def delete_personal_compute_setting(self, etag: str) -> DeletePersonalComputeSettingResponse: + def delete_personal_compute_setting(self, + *, + etag: Optional[str] = None) -> DeletePersonalComputeSettingResponse: """Delete Personal Compute setting. Reverts back the Personal Compute setting value to default (ON) - :param etag: str + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern @@ -1606,12 +1803,12 @@ def delete_personal_compute_setting(self, etag: str) -> DeletePersonalComputeSet headers=headers) return DeletePersonalComputeSettingResponse.from_dict(res) - def read_personal_compute_setting(self, etag: str) -> PersonalComputeSetting: + def get_personal_compute_setting(self, *, etag: Optional[str] = None) -> PersonalComputeSetting: """Get Personal Compute setting. Gets the value of the Personal Compute setting. - :param etag: str + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern @@ -1631,23 +1828,25 @@ def read_personal_compute_setting(self, etag: str) -> PersonalComputeSetting: headers=headers) return PersonalComputeSetting.from_dict(res) - def update_personal_compute_setting( - self, - *, - allow_missing: Optional[bool] = None, - setting: Optional[PersonalComputeSetting] = None) -> PersonalComputeSetting: + def update_personal_compute_setting(self, allow_missing: bool, setting: PersonalComputeSetting, + field_mask: str) -> PersonalComputeSetting: """Update Personal Compute setting. Updates the value of the Personal Compute setting. - :param allow_missing: bool (optional) - This should always be set to true for Settings RPCs. Added for AIP compliance. - :param setting: :class:`PersonalComputeSetting` (optional) + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`PersonalComputeSetting` + :param field_mask: str + Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the + setting payload will be updated. The field mask needs to be supplied as single string. To specify + multiple fields in the field mask, use comma as the separator (no space). :returns: :class:`PersonalComputeSetting` """ body = {} if allow_missing is not None: body['allow_missing'] = allow_missing + if field_mask is not None: body['field_mask'] = field_mask if setting is not None: body['setting'] = setting.as_dict() headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } res = self._api.do( @@ -1669,11 +1868,13 @@ def exchange_token(self, partition_id: PartitionId, token_type: List[TokenType], scopes: List[str]) -> ExchangeTokenResponse: """Exchange token. - Exchange tokens with an Identity Provider to get a new access token. It allowes specifying scopes to + Exchange tokens with an Identity Provider to get a new access token. It allows specifying scopes to determine token permissions. :param partition_id: :class:`PartitionId` + The partition of Credentials store :param token_type: List[:class:`TokenType`] + A list of token types being requested :param scopes: List[str] Array of scopes for the token request. @@ -2132,7 +2333,9 @@ class SettingsAPI: def __init__(self, api_client): self._api = api_client - def delete_default_workspace_namespace(self, etag: str) -> DeleteDefaultWorkspaceNamespaceResponse: + def delete_default_namespace_setting(self, + *, + etag: Optional[str] = None) -> DeleteDefaultNamespaceSettingResponse: """Delete the default namespace setting. Deletes the default namespace setting for the workspace. A fresh etag needs to be provided in DELETE @@ -2140,14 +2343,14 @@ def delete_default_workspace_namespace(self, etag: str) -> DeleteDefaultWorkspac request. If the setting is updated/deleted concurrently, DELETE will fail with 409 and the request will need to be retried by using the fresh etag in the 409 response. - :param etag: str + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - :returns: :class:`DeleteDefaultWorkspaceNamespaceResponse` + :returns: :class:`DeleteDefaultNamespaceSettingResponse` """ query = {} @@ -2157,14 +2360,44 @@ def delete_default_workspace_namespace(self, etag: str) -> DeleteDefaultWorkspac '/api/2.0/settings/types/default_namespace_ws/names/default', query=query, headers=headers) - return DeleteDefaultWorkspaceNamespaceResponse.from_dict(res) + return DeleteDefaultNamespaceSettingResponse.from_dict(res) + + def delete_restrict_workspace_admins_setting(self, + *, + etag: Optional[str] = None + ) -> DeleteRestrictWorkspaceAdminsSettingResponse: + """Delete the restrict workspace admins setting. + + Reverts the restrict workspace admins setting status for the workspace. A fresh etag needs to be + provided in DELETE requests (as a query parameter). The etag can be retrieved by making a GET request + before the DELETE request. If the setting is updated/deleted concurrently, DELETE will fail with 409 + and the request will need to be retried by using the fresh etag in the 409 response. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`DeleteRestrictWorkspaceAdminsSettingResponse` + """ + + query = {} + if etag is not None: query['etag'] = etag + headers = {'Accept': 'application/json', } + res = self._api.do('DELETE', + '/api/2.0/settings/types/restrict_workspace_admins/names/default', + query=query, + headers=headers) + return DeleteRestrictWorkspaceAdminsSettingResponse.from_dict(res) - def read_default_workspace_namespace(self, etag: str) -> DefaultNamespaceSetting: + def get_default_namespace_setting(self, *, etag: Optional[str] = None) -> DefaultNamespaceSetting: """Get the default namespace setting. Gets the default namespace setting. - :param etag: str + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern @@ -2183,12 +2416,34 @@ def read_default_workspace_namespace(self, etag: str) -> DefaultNamespaceSetting headers=headers) return DefaultNamespaceSetting.from_dict(res) - def update_default_workspace_namespace( - self, - *, - allow_missing: Optional[bool] = None, - field_mask: Optional[str] = None, - setting: Optional[DefaultNamespaceSetting] = None) -> DefaultNamespaceSetting: + def get_restrict_workspace_admins_setting(self, + *, + etag: Optional[str] = None) -> RestrictWorkspaceAdminsSetting: + """Get the restrict workspace admins setting. + + Gets the restrict workspace admins setting. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`RestrictWorkspaceAdminsSetting` + """ + + query = {} + if etag is not None: query['etag'] = etag + headers = {'Accept': 'application/json', } + res = self._api.do('GET', + '/api/2.0/settings/types/restrict_workspace_admins/names/default', + query=query, + headers=headers) + return RestrictWorkspaceAdminsSetting.from_dict(res) + + def update_default_namespace_setting(self, allow_missing: bool, setting: DefaultNamespaceSetting, + field_mask: str) -> DefaultNamespaceSetting: """Update the default namespace setting. Updates the default namespace setting for the workspace. A fresh etag needs to be provided in PATCH @@ -2198,16 +2453,9 @@ def update_default_workspace_namespace( updated concurrently, PATCH will fail with 409 and the request will need to be retried by using the fresh etag in the 409 response. - :param allow_missing: bool (optional) + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. - :param field_mask: str (optional) - Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the - setting payload will be updated. For example, for Default Namespace setting, the field mask is - supposed to contain fields from the DefaultNamespaceSetting.namespace schema. - - The field mask needs to be supplied as single string. To specify multiple fields in the field mask, - use comma as the seperator (no space). - :param setting: :class:`DefaultNamespaceSetting` (optional) + :param setting: :class:`DefaultNamespaceSetting` This represents the setting configuration for the default namespace in the Databricks workspace. Setting the default catalog for the workspace determines the catalog that is used when queries do not reference a fully qualified 3 level name. For example, if the default catalog is set to @@ -2215,6 +2463,10 @@ def update_default_workspace_namespace( 'retail_prod.default.myTable' (the schema 'default' is always assumed). This setting requires a restart of clusters and SQL warehouses to take effect. Additionally, the default namespace only applies when using Unity Catalog-enabled compute. + :param field_mask: str + Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the + setting payload will be updated. The field mask needs to be supplied as single string. To specify + multiple fields in the field mask, use comma as the separator (no space). :returns: :class:`DefaultNamespaceSetting` """ @@ -2229,6 +2481,37 @@ def update_default_workspace_namespace( headers=headers) return DefaultNamespaceSetting.from_dict(res) + def update_restrict_workspace_admins_setting(self, allow_missing: bool, + setting: RestrictWorkspaceAdminsSetting, + field_mask: str) -> RestrictWorkspaceAdminsSetting: + """Update the restrict workspace admins setting. + + Updates the restrict workspace admins setting for the workspace. A fresh etag needs to be provided in + PATCH requests (as part of the setting field). The etag can be retrieved by making a GET request + before the PATCH request. If the setting is updated concurrently, PATCH will fail with 409 and the + request will need to be retried by using the fresh etag in the 409 response. + + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`RestrictWorkspaceAdminsSetting` + :param field_mask: str + Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the + setting payload will be updated. The field mask needs to be supplied as single string. To specify + multiple fields in the field mask, use comma as the separator (no space). + + :returns: :class:`RestrictWorkspaceAdminsSetting` + """ + body = {} + if allow_missing is not None: body['allow_missing'] = allow_missing + if field_mask is not None: body['field_mask'] = field_mask + if setting is not None: body['setting'] = setting.as_dict() + headers = {'Accept': 'application/json', 'Content-Type': 'application/json', } + res = self._api.do('PATCH', + '/api/2.0/settings/types/restrict_workspace_admins/names/default', + body=body, + headers=headers) + return RestrictWorkspaceAdminsSetting.from_dict(res) + class TokenManagementAPI: """Enables administrators to get all tokens and delete tokens for other users. Admins can either get every diff --git a/databricks/sdk/service/sql.py b/databricks/sdk/service/sql.py index b7dc9a297..c1bf9ea41 100755 --- a/databricks/sdk/service/sql.py +++ b/databricks/sdk/service/sql.py @@ -346,6 +346,7 @@ def from_dict(cls, d: Dict[str, any]) -> ChannelInfo: class ChannelName(Enum): + """Name of the channel""" CHANNEL_NAME_CURRENT = 'CHANNEL_NAME_CURRENT' CHANNEL_NAME_CUSTOM = 'CHANNEL_NAME_CUSTOM' @@ -4049,6 +4050,9 @@ def list(self, Fetch a paginated list of dashboard objects. + ### **Warning: Calling this API concurrently 10 or more times could result in throttling, service + degradation, or a temporary ban.** + :param order: :class:`ListOrder` (optional) Name of dashboard attribute to order by. :param page: int (optional) @@ -4487,7 +4491,9 @@ def list(self, :param max_results: int (optional) Limit the number of results returned in one page. The default is 100. :param page_token: str (optional) - A token that can be used to get the next page of results. + A token that can be used to get the next page of results. The token can contains characters that + need to be encoded before using it in a URL. For example, the character '+' needs to be replaced by + %2B. :returns: Iterator over :class:`QueryInfo` """ diff --git a/databricks/sdk/service/vectorsearch.py b/databricks/sdk/service/vectorsearch.py index 52aaf172a..9815eb0bf 100755 --- a/databricks/sdk/service/vectorsearch.py +++ b/databricks/sdk/service/vectorsearch.py @@ -72,7 +72,7 @@ class CreateVectorIndexRequest: `DIRECT_ACCESS`: An index that supports direct read and write of vectors and metadata through our REST and SDK APIs. With this model, the user manages index updates.""" - delta_sync_vector_index_spec: Optional[DeltaSyncVectorIndexSpecRequest] = None + delta_sync_index_spec: Optional[DeltaSyncVectorIndexSpecRequest] = None """Specification for Delta Sync Index. Required if `index_type` is `DELTA_SYNC`.""" direct_access_index_spec: Optional[DirectAccessVectorIndexSpec] = None @@ -84,8 +84,7 @@ class CreateVectorIndexRequest: def as_dict(self) -> dict: """Serializes the CreateVectorIndexRequest into a dictionary suitable for use as a JSON request body.""" body = {} - if self.delta_sync_vector_index_spec: - body['delta_sync_vector_index_spec'] = self.delta_sync_vector_index_spec.as_dict() + if self.delta_sync_index_spec: body['delta_sync_index_spec'] = self.delta_sync_index_spec.as_dict() if self.direct_access_index_spec: body['direct_access_index_spec'] = self.direct_access_index_spec.as_dict() if self.endpoint_name is not None: body['endpoint_name'] = self.endpoint_name @@ -97,8 +96,8 @@ def as_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, any]) -> CreateVectorIndexRequest: """Deserializes the CreateVectorIndexRequest from a dictionary.""" - return cls(delta_sync_vector_index_spec=_from_dict(d, 'delta_sync_vector_index_spec', - DeltaSyncVectorIndexSpecRequest), + return cls(delta_sync_index_spec=_from_dict(d, 'delta_sync_index_spec', + DeltaSyncVectorIndexSpecRequest), direct_access_index_spec=_from_dict(d, 'direct_access_index_spec', DirectAccessVectorIndexSpec), endpoint_name=d.get('endpoint_name', None), @@ -1003,7 +1002,7 @@ def create_index(self, primary_key: str, index_type: VectorIndexType, *, - delta_sync_vector_index_spec: Optional[DeltaSyncVectorIndexSpecRequest] = None, + delta_sync_index_spec: Optional[DeltaSyncVectorIndexSpecRequest] = None, direct_access_index_spec: Optional[DirectAccessVectorIndexSpec] = None, endpoint_name: Optional[str] = None) -> CreateVectorIndexResponse: """Create an index. @@ -1021,7 +1020,7 @@ def create_index(self, incrementally updating the index as the underlying data in the Delta Table changes. - `DIRECT_ACCESS`: An index that supports direct read and write of vectors and metadata through our REST and SDK APIs. With this model, the user manages index updates. - :param delta_sync_vector_index_spec: :class:`DeltaSyncVectorIndexSpecRequest` (optional) + :param delta_sync_index_spec: :class:`DeltaSyncVectorIndexSpecRequest` (optional) Specification for Delta Sync Index. Required if `index_type` is `DELTA_SYNC`. :param direct_access_index_spec: :class:`DirectAccessVectorIndexSpec` (optional) Specification for Direct Vector Access Index. Required if `index_type` is `DIRECT_ACCESS`. @@ -1031,8 +1030,7 @@ def create_index(self, :returns: :class:`CreateVectorIndexResponse` """ body = {} - if delta_sync_vector_index_spec is not None: - body['delta_sync_vector_index_spec'] = delta_sync_vector_index_spec.as_dict() + if delta_sync_index_spec is not None: body['delta_sync_index_spec'] = delta_sync_index_spec.as_dict() if direct_access_index_spec is not None: body['direct_access_index_spec'] = direct_access_index_spec.as_dict() if endpoint_name is not None: body['endpoint_name'] = endpoint_name diff --git a/databricks/sdk/service/workspace.py b/databricks/sdk/service/workspace.py index 142fff57d..d5d5e6a2a 100755 --- a/databricks/sdk/service/workspace.py +++ b/databricks/sdk/service/workspace.py @@ -317,16 +317,20 @@ class ExportResponse: """The base64-encoded content. If the limit (10MB) is exceeded, exception with error code **MAX_NOTEBOOK_SIZE_EXCEEDED** is thrown.""" + file_type: Optional[str] = None + """The file type of the exported file.""" + def as_dict(self) -> dict: """Serializes the ExportResponse into a dictionary suitable for use as a JSON request body.""" body = {} if self.content is not None: body['content'] = self.content + if self.file_type is not None: body['file_type'] = self.file_type return body @classmethod def from_dict(cls, d: Dict[str, any]) -> ExportResponse: """Deserializes the ExportResponse from a dictionary.""" - return cls(content=d.get('content', None)) + return cls(content=d.get('content', None), file_type=d.get('file_type', None)) @dataclass @@ -608,11 +612,15 @@ class ObjectInfo: """The type of the object in workspace. - `NOTEBOOK`: document that contains runnable code, visualizations, and explanatory text. - - `DIRECTORY`: directory - `LIBRARY`: library - `FILE`: file - `REPO`: repository""" + `DIRECTORY`: directory - `LIBRARY`: library - `FILE`: file - `REPO`: repository - `DASHBOARD`: + Lakeview dashboard""" path: Optional[str] = None """The absolute path of the object.""" + resource_id: Optional[str] = None + """A unique identifier for the object that is consistent across all Databricks APIs.""" + size: Optional[int] = None """Only applicable to files. The file size in bytes can be returned.""" @@ -625,6 +633,7 @@ def as_dict(self) -> dict: if self.object_id is not None: body['object_id'] = self.object_id if self.object_type is not None: body['object_type'] = self.object_type.value if self.path is not None: body['path'] = self.path + if self.resource_id is not None: body['resource_id'] = self.resource_id if self.size is not None: body['size'] = self.size return body @@ -637,6 +646,7 @@ def from_dict(cls, d: Dict[str, any]) -> ObjectInfo: object_id=d.get('object_id', None), object_type=_enum(d, 'object_type', ObjectType), path=d.get('path', None), + resource_id=d.get('resource_id', None), size=d.get('size', None)) @@ -644,8 +654,10 @@ class ObjectType(Enum): """The type of the object in workspace. - `NOTEBOOK`: document that contains runnable code, visualizations, and explanatory text. - - `DIRECTORY`: directory - `LIBRARY`: library - `FILE`: file - `REPO`: repository""" + `DIRECTORY`: directory - `LIBRARY`: library - `FILE`: file - `REPO`: repository - `DASHBOARD`: + Lakeview dashboard""" + DASHBOARD = 'DASHBOARD' DIRECTORY = 'DIRECTORY' FILE = 'FILE' LIBRARY = 'LIBRARY' diff --git a/docs/account/catalog/metastores.rst b/docs/account/catalog/metastores.rst index d0c2752ed..11aae2abb 100644 --- a/docs/account/catalog/metastores.rst +++ b/docs/account/catalog/metastores.rst @@ -122,7 +122,7 @@ storage_root="s3://%s/%s" % (os.environ["TEST_BUCKET"], f'sdk-{time.time_ns()}')) - _ = w.metastores.update(id=created.metastore_id, name=f'sdk-{time.time_ns()}') + _ = w.metastores.update(id=created.metastore_id, new_name=f'sdk-{time.time_ns()}') # cleanup w.metastores.delete(id=created.metastore_id, force=True) diff --git a/docs/account/settings/settings.rst b/docs/account/settings/settings.rst index 7f9d44534..fe277cbc4 100644 --- a/docs/account/settings/settings.rst +++ b/docs/account/settings/settings.rst @@ -12,13 +12,13 @@ setting is present on all accounts even though it's never set on a given account. Deletion reverts the value of the setting back to the default value. - .. py:method:: delete_personal_compute_setting(etag: str) -> DeletePersonalComputeSettingResponse + .. py:method:: delete_personal_compute_setting( [, etag: Optional[str]]) -> DeletePersonalComputeSettingResponse Delete Personal Compute setting. Reverts back the Personal Compute setting value to default (ON) - :param etag: str + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern @@ -28,13 +28,13 @@ :returns: :class:`DeletePersonalComputeSettingResponse` - .. py:method:: read_personal_compute_setting(etag: str) -> PersonalComputeSetting + .. py:method:: get_personal_compute_setting( [, etag: Optional[str]]) -> PersonalComputeSetting Get Personal Compute setting. Gets the value of the Personal Compute setting. - :param etag: str + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern @@ -44,15 +44,19 @@ :returns: :class:`PersonalComputeSetting` - .. py:method:: update_personal_compute_setting( [, allow_missing: Optional[bool], setting: Optional[PersonalComputeSetting]]) -> PersonalComputeSetting + .. py:method:: update_personal_compute_setting(allow_missing: bool, setting: PersonalComputeSetting, field_mask: str) -> PersonalComputeSetting Update Personal Compute setting. Updates the value of the Personal Compute setting. - :param allow_missing: bool (optional) - This should always be set to true for Settings RPCs. Added for AIP compliance. - :param setting: :class:`PersonalComputeSetting` (optional) + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`PersonalComputeSetting` + :param field_mask: str + Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the + setting payload will be updated. The field mask needs to be supplied as single string. To specify + multiple fields in the field mask, use comma as the separator (no space). :returns: :class:`PersonalComputeSetting` \ No newline at end of file diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index 155a197a0..6b5dba6af 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -296,6 +296,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: MonitorRefreshInfo + :members: + :undoc-members: + .. autoclass:: MonitorTimeSeriesProfileType :members: :undoc-members: diff --git a/docs/dbdataclasses/compute.rst b/docs/dbdataclasses/compute.rst index 2ba82a8dd..f7ff6c853 100644 --- a/docs/dbdataclasses/compute.rst +++ b/docs/dbdataclasses/compute.rst @@ -8,6 +8,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: Adlsgen2Info + :members: + :undoc-members: + .. autoclass:: AutoScale :members: :undoc-members: @@ -224,6 +228,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: GcsStorageInfo + :members: + :undoc-members: + .. autoclass:: GetClusterPermissionLevelsResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/files.rst b/docs/dbdataclasses/files.rst index a4a97a5f9..ee2410600 100644 --- a/docs/dbdataclasses/files.rst +++ b/docs/dbdataclasses/files.rst @@ -24,6 +24,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: DirectoryEntry + :members: + :undoc-members: + .. autoclass:: DownloadResponse :members: :undoc-members: @@ -32,6 +36,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListDirectoryResponse + :members: + :undoc-members: + .. autoclass:: ListStatusResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/jobs.rst b/docs/dbdataclasses/jobs.rst index 6f17761c4..7c9576cf5 100644 --- a/docs/dbdataclasses/jobs.rst +++ b/docs/dbdataclasses/jobs.rst @@ -72,6 +72,22 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ForEachStats + :members: + :undoc-members: + +.. autoclass:: ForEachTask + :members: + :undoc-members: + +.. autoclass:: ForEachTaskErrorMessageStats + :members: + :undoc-members: + +.. autoclass:: ForEachTaskTaskRunStats + :members: + :undoc-members: + .. autoclass:: GetJobPermissionLevelsResponse :members: :undoc-members: @@ -248,6 +264,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: RunForEachTask + :members: + :undoc-members: + .. autoclass:: RunJobOutput :members: :undoc-members: diff --git a/docs/dbdataclasses/pipelines.rst b/docs/dbdataclasses/pipelines.rst index 9c3c1f4c1..39499235e 100644 --- a/docs/dbdataclasses/pipelines.rst +++ b/docs/dbdataclasses/pipelines.rst @@ -84,6 +84,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: PipelineClusterAutoscale + :members: + :undoc-members: + .. autoclass:: PipelineEvent :members: :undoc-members: diff --git a/docs/dbdataclasses/settings.rst b/docs/dbdataclasses/settings.rst index a7da42d06..d4843893c 100644 --- a/docs/dbdataclasses/settings.rst +++ b/docs/dbdataclasses/settings.rst @@ -40,7 +40,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: DeleteDefaultWorkspaceNamespaceResponse +.. autoclass:: DeleteDefaultNamespaceSettingResponse :members: :undoc-members: @@ -48,6 +48,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: DeleteRestrictWorkspaceAdminsSettingResponse + :members: + :undoc-members: + .. autoclass:: ExchangeToken :members: :undoc-members: @@ -148,6 +152,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: RestrictWorkspaceAdminsMessage + :members: + :undoc-members: + +.. autoclass:: RestrictWorkspaceAdminsSetting + :members: + :undoc-members: + .. autoclass:: RevokeTokenRequest :members: :undoc-members: @@ -184,6 +196,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: UpdateDefaultNamespaceSettingRequest + :members: + :undoc-members: + .. autoclass:: UpdateIpAccessList :members: :undoc-members: + +.. autoclass:: UpdatePersonalComputeSettingRequest + :members: + :undoc-members: + +.. autoclass:: UpdateRestrictWorkspaceAdminsSettingRequest + :members: + :undoc-members: diff --git a/docs/workspace/catalog/connections.rst b/docs/workspace/catalog/connections.rst index 6125db714..5ed7ad429 100644 --- a/docs/workspace/catalog/connections.rst +++ b/docs/workspace/catalog/connections.rst @@ -103,8 +103,7 @@ f'sdk-{time.time_ns()}', }) - conn_update = w.connections.update(name=conn_create.name, - name_arg=conn_create.name, + conn_update = w.connections.update(name_arg=conn_create.name, options={ "host": "%s-fake-workspace.cloud.databricks.com" % (f'sdk-{time.time_ns()}'), @@ -149,7 +148,7 @@ :returns: Iterator over :class:`ConnectionInfo` - .. py:method:: update(name_arg: str, options: Dict[str, str] [, name: Optional[str], new_name: Optional[str], owner: Optional[str]]) -> ConnectionInfo + .. py:method:: update(name_arg: str, options: Dict[str, str] [, new_name: Optional[str], owner: Optional[str]]) -> ConnectionInfo Usage: @@ -175,8 +174,7 @@ f'sdk-{time.time_ns()}', }) - conn_update = w.connections.update(name=conn_create.name, - name_arg=conn_create.name, + conn_update = w.connections.update(name_arg=conn_create.name, options={ "host": "%s-fake-workspace.cloud.databricks.com" % (f'sdk-{time.time_ns()}'), @@ -197,8 +195,6 @@ Name of the connection. :param options: Dict[str,str] A map of key-value properties attached to the securable. - :param name: str (optional) - Name of the connection. :param new_name: str (optional) New name for the connection. :param owner: str (optional) diff --git a/docs/workspace/catalog/lakehouse_monitors.rst b/docs/workspace/catalog/lakehouse_monitors.rst index 9e224207e..e4162b09a 100644 --- a/docs/workspace/catalog/lakehouse_monitors.rst +++ b/docs/workspace/catalog/lakehouse_monitors.rst @@ -11,6 +11,27 @@ catalog). Viewing the dashboard, computed metrics, or monitor configuration only requires the user to have **SELECT** privileges on the table (along with **USE_SCHEMA** and **USE_CATALOG**). + .. py:method:: cancel_refresh(full_name: str, refresh_id: str) + + Cancel refresh. + + Cancel an active monitor refresh for the given refresh ID. + + The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the + table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: + - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table + + Additionally, the call must be made from the workspace where the monitor was created. + + :param full_name: str + Full name of the table. + :param refresh_id: str + ID of the refresh. + + + + .. py:method:: create(full_name: str, assets_dir: str, output_schema_name: str [, baseline_table_name: Optional[str], custom_metrics: Optional[List[MonitorCustomMetric]], data_classification_config: Optional[MonitorDataClassificationConfig], inference_log: Optional[MonitorInferenceLogProfileType], notifications: Optional[List[MonitorNotificationsConfig]], schedule: Optional[MonitorCronSchedule], skip_builtin_dashboard: Optional[bool], slicing_exprs: Optional[List[str]], snapshot: Optional[Any], time_series: Optional[MonitorTimeSeriesProfileType], warehouse_id: Optional[str]]) -> MonitorInfo Create a table monitor. @@ -105,6 +126,66 @@ :returns: :class:`MonitorInfo` + .. py:method:: get_refresh(full_name: str, refresh_id: str) -> MonitorRefreshInfo + + Get refresh. + + Gets info about a specific monitor refresh using the given refresh ID. + + The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the + table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: + - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - + **SELECT** privilege on the table. + + Additionally, the call must be made from the workspace where the monitor was created. + + :param full_name: str + Full name of the table. + :param refresh_id: str + ID of the refresh. + + :returns: :class:`MonitorRefreshInfo` + + + .. py:method:: list_refreshes(full_name: str) -> Iterator[MonitorRefreshInfo] + + List refreshes. + + Gets an array containing the history of the most recent refreshes (up to 25) for this table. + + The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the + table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: + - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - + **SELECT** privilege on the table. + + Additionally, the call must be made from the workspace where the monitor was created. + + :param full_name: str + Full name of the table. + + :returns: Iterator over :class:`MonitorRefreshInfo` + + + .. py:method:: run_refresh(full_name: str) -> MonitorRefreshInfo + + Queue a metric refresh for a monitor. + + Queues a metric refresh on the monitor for the specified table. The refresh will execute in the + background. + + The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG** on the + table's parent catalog and be an owner of the table's parent schema 3. have the following permissions: + - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table + + Additionally, the call must be made from the workspace where the monitor was created. + + :param full_name: str + Full name of the table. + + :returns: :class:`MonitorRefreshInfo` + + .. py:method:: update(full_name: str, assets_dir: str, output_schema_name: str [, baseline_table_name: Optional[str], custom_metrics: Optional[List[MonitorCustomMetric]], data_classification_config: Optional[MonitorDataClassificationConfig], inference_log: Optional[MonitorInferenceLogProfileType], notifications: Optional[List[MonitorNotificationsConfig]], schedule: Optional[MonitorCronSchedule], slicing_exprs: Optional[List[str]], snapshot: Optional[Any], time_series: Optional[MonitorTimeSeriesProfileType]]) -> MonitorInfo Update a table monitor. diff --git a/docs/workspace/catalog/metastores.rst b/docs/workspace/catalog/metastores.rst index a5beb397a..6fb939894 100644 --- a/docs/workspace/catalog/metastores.rst +++ b/docs/workspace/catalog/metastores.rst @@ -243,7 +243,7 @@ - .. py:method:: update(id: str [, delta_sharing_organization_name: Optional[str], delta_sharing_recipient_token_lifetime_in_seconds: Optional[int], delta_sharing_scope: Optional[UpdateMetastoreDeltaSharingScope], name: Optional[str], new_name: Optional[str], owner: Optional[str], privilege_model_version: Optional[str], storage_root_credential_id: Optional[str]]) -> MetastoreInfo + .. py:method:: update(id: str [, delta_sharing_organization_name: Optional[str], delta_sharing_recipient_token_lifetime_in_seconds: Optional[int], delta_sharing_scope: Optional[UpdateMetastoreDeltaSharingScope], new_name: Optional[str], owner: Optional[str], privilege_model_version: Optional[str], storage_root_credential_id: Optional[str]]) -> MetastoreInfo Usage: @@ -261,7 +261,7 @@ storage_root="s3://%s/%s" % (os.environ["TEST_BUCKET"], f'sdk-{time.time_ns()}')) - _ = w.metastores.update(id=created.metastore_id, name=f'sdk-{time.time_ns()}') + _ = w.metastores.update(id=created.metastore_id, new_name=f'sdk-{time.time_ns()}') # cleanup w.metastores.delete(id=created.metastore_id, force=True) @@ -280,8 +280,6 @@ The lifetime of delta sharing recipient token in seconds. :param delta_sharing_scope: :class:`UpdateMetastoreDeltaSharingScope` (optional) The scope of Delta Sharing enabled for the metastore. - :param name: str (optional) - The user-specified name of the metastore. :param new_name: str (optional) New name for the metastore. :param owner: str (optional) diff --git a/docs/workspace/catalog/registered_models.rst b/docs/workspace/catalog/registered_models.rst index 784e0d272..09d29e55b 100644 --- a/docs/workspace/catalog/registered_models.rst +++ b/docs/workspace/catalog/registered_models.rst @@ -158,7 +158,7 @@ :returns: :class:`RegisteredModelAlias` - .. py:method:: update(full_name: str [, comment: Optional[str], name: Optional[str], new_name: Optional[str], owner: Optional[str]]) -> RegisteredModelInfo + .. py:method:: update(full_name: str [, comment: Optional[str], new_name: Optional[str], owner: Optional[str]]) -> RegisteredModelInfo Update a Registered Model. @@ -174,8 +174,6 @@ The three-level (fully qualified) name of the registered model :param comment: str (optional) The comment attached to the registered model - :param name: str (optional) - The name of the registered model :param new_name: str (optional) New name for the registered model. :param owner: str (optional) diff --git a/docs/workspace/catalog/schemas.rst b/docs/workspace/catalog/schemas.rst index 46ea49ff4..7453521b7 100644 --- a/docs/workspace/catalog/schemas.rst +++ b/docs/workspace/catalog/schemas.rst @@ -137,7 +137,7 @@ :returns: Iterator over :class:`SchemaInfo` - .. py:method:: update(full_name: str [, comment: Optional[str], enable_predictive_optimization: Optional[EnablePredictiveOptimization], name: Optional[str], new_name: Optional[str], owner: Optional[str], properties: Optional[Dict[str, str]]]) -> SchemaInfo + .. py:method:: update(full_name: str [, comment: Optional[str], enable_predictive_optimization: Optional[EnablePredictiveOptimization], new_name: Optional[str], owner: Optional[str], properties: Optional[Dict[str, str]]]) -> SchemaInfo Usage: @@ -173,8 +173,6 @@ User-provided free-form text description. :param enable_predictive_optimization: :class:`EnablePredictiveOptimization` (optional) Whether predictive optimization should be enabled for this object and objects under it. - :param name: str (optional) - Name of schema, relative to parent catalog. :param new_name: str (optional) New name for the schema. :param owner: str (optional) diff --git a/docs/workspace/catalog/volumes.rst b/docs/workspace/catalog/volumes.rst index 0e0426e5e..e8da5a906 100644 --- a/docs/workspace/catalog/volumes.rst +++ b/docs/workspace/catalog/volumes.rst @@ -205,7 +205,7 @@ :returns: :class:`VolumeInfo` - .. py:method:: update(full_name_arg: str [, comment: Optional[str], name: Optional[str], new_name: Optional[str], owner: Optional[str]]) -> VolumeInfo + .. py:method:: update(full_name_arg: str [, comment: Optional[str], new_name: Optional[str], owner: Optional[str]]) -> VolumeInfo Usage: @@ -266,8 +266,6 @@ The three-level (fully qualified) name of the volume :param comment: str (optional) The comment attached to the volume - :param name: str (optional) - The name of the volume :param new_name: str (optional) New name for the volume. :param owner: str (optional) diff --git a/docs/workspace/compute/clusters.rst b/docs/workspace/compute/clusters.rst index a01b23566..a9e3d9864 100644 --- a/docs/workspace/compute/clusters.rst +++ b/docs/workspace/compute/clusters.rst @@ -40,7 +40,7 @@ w = WorkspaceClient() - latest = w.clusters.select_spark_version(latest=True) + latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' @@ -86,7 +86,7 @@ w = WorkspaceClient() - latest = w.clusters.select_spark_version(latest=True) + latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' @@ -244,7 +244,7 @@ w = WorkspaceClient() - latest = w.clusters.select_spark_version(latest=True) + latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' @@ -292,7 +292,7 @@ cluster_name = f'sdk-{time.time_ns()}' - latest = w.clusters.select_spark_version(latest=True) + latest = w.clusters.select_spark_version(latest=True, long_term_support=True) clstr = w.clusters.create(cluster_name=cluster_name, spark_version=latest, @@ -487,7 +487,7 @@ w = WorkspaceClient() - latest = w.clusters.select_spark_version(latest=True) + latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' @@ -542,7 +542,7 @@ w = WorkspaceClient() - latest = w.clusters.select_spark_version(latest=True) + latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' @@ -685,7 +685,7 @@ w = WorkspaceClient() - latest = w.clusters.select_spark_version(latest=True) + latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' @@ -725,7 +725,7 @@ w = WorkspaceClient() - latest = w.clusters.select_spark_version(latest=True) + latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' @@ -782,7 +782,7 @@ w = WorkspaceClient() - latest = w.clusters.select_spark_version(latest=True) + latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' @@ -856,7 +856,7 @@ w = WorkspaceClient() - latest = w.clusters.select_spark_version(latest=True) + latest = w.clusters.select_spark_version(latest=True, long_term_support=True) Selects the latest Databricks Runtime Version. @@ -909,7 +909,7 @@ w = WorkspaceClient() - latest = w.clusters.select_spark_version(latest=True) + latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' @@ -958,7 +958,7 @@ w = WorkspaceClient() - latest = w.clusters.select_spark_version(latest=True) + latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' diff --git a/docs/workspace/iam/current_user.rst b/docs/workspace/iam/current_user.rst index b2ba795db..47ef1eff3 100644 --- a/docs/workspace/iam/current_user.rst +++ b/docs/workspace/iam/current_user.rst @@ -1,5 +1,5 @@ -``w.current_user``: CurrentUser -=============================== +``w.current_user``: Current User +================================ .. currentmodule:: databricks.sdk.service.iam .. py:class:: CurrentUserAPI diff --git a/docs/workspace/pipelines/pipelines.rst b/docs/workspace/pipelines/pipelines.rst index 29872c900..6d2edc1ac 100644 --- a/docs/workspace/pipelines/pipelines.rst +++ b/docs/workspace/pipelines/pipelines.rst @@ -308,22 +308,6 @@ :returns: :class:`ListUpdatesResponse` - .. py:method:: reset(pipeline_id: str) -> Wait[GetPipelineResponse] - - Reset a pipeline. - - Resets a pipeline. - - :param pipeline_id: str - - :returns: - Long-running operation waiter for :class:`GetPipelineResponse`. - See :method:wait_get_pipeline_running for more details. - - - .. py:method:: reset_and_wait(pipeline_id: str, timeout: datetime.timedelta = 0:20:00) -> GetPipelineResponse - - .. py:method:: set_permissions(pipeline_id: str [, access_control_list: Optional[List[PipelineAccessControlRequest]]]) -> PipelinePermissions Set pipeline permissions. diff --git a/docs/workspace/settings/credentials_manager.rst b/docs/workspace/settings/credentials_manager.rst index 1767ba34e..c8bfa4f30 100644 --- a/docs/workspace/settings/credentials_manager.rst +++ b/docs/workspace/settings/credentials_manager.rst @@ -11,11 +11,13 @@ Exchange token. - Exchange tokens with an Identity Provider to get a new access token. It allowes specifying scopes to + Exchange tokens with an Identity Provider to get a new access token. It allows specifying scopes to determine token permissions. :param partition_id: :class:`PartitionId` + The partition of Credentials store :param token_type: List[:class:`TokenType`] + A list of token types being requested :param scopes: List[str] Array of scopes for the token request. diff --git a/docs/workspace/settings/settings.rst b/docs/workspace/settings/settings.rst index 0395213d3..6a4bbf221 100644 --- a/docs/workspace/settings/settings.rst +++ b/docs/workspace/settings/settings.rst @@ -15,7 +15,7 @@ This setting requires a restart of clusters and SQL warehouses to take effect. Additionally, the default namespace only applies when using Unity Catalog-enabled compute. - .. py:method:: delete_default_workspace_namespace(etag: str) -> DeleteDefaultWorkspaceNamespaceResponse + .. py:method:: delete_default_namespace_setting( [, etag: Optional[str]]) -> DeleteDefaultNamespaceSettingResponse Delete the default namespace setting. @@ -24,23 +24,42 @@ request. If the setting is updated/deleted concurrently, DELETE will fail with 409 and the request will need to be retried by using the fresh etag in the 409 response. - :param etag: str + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET request, and pass it with the DELETE request to identify the rule set version you are deleting. - :returns: :class:`DeleteDefaultWorkspaceNamespaceResponse` + :returns: :class:`DeleteDefaultNamespaceSettingResponse` - .. py:method:: read_default_workspace_namespace(etag: str) -> DefaultNamespaceSetting + .. py:method:: delete_restrict_workspace_admins_setting( [, etag: Optional[str]]) -> DeleteRestrictWorkspaceAdminsSettingResponse + + Delete the restrict workspace admins setting. + + Reverts the restrict workspace admins setting status for the workspace. A fresh etag needs to be + provided in DELETE requests (as a query parameter). The etag can be retrieved by making a GET request + before the DELETE request. If the setting is updated/deleted concurrently, DELETE will fail with 409 + and the request will need to be retried by using the fresh etag in the 409 response. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`DeleteRestrictWorkspaceAdminsSettingResponse` + + + .. py:method:: get_default_namespace_setting( [, etag: Optional[str]]) -> DefaultNamespaceSetting Get the default namespace setting. Gets the default namespace setting. - :param etag: str + :param etag: str (optional) etag used for versioning. The response is at least as fresh as the eTag provided. This is used for optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern @@ -50,7 +69,23 @@ :returns: :class:`DefaultNamespaceSetting` - .. py:method:: update_default_workspace_namespace( [, allow_missing: Optional[bool], field_mask: Optional[str], setting: Optional[DefaultNamespaceSetting]]) -> DefaultNamespaceSetting + .. py:method:: get_restrict_workspace_admins_setting( [, etag: Optional[str]]) -> RestrictWorkspaceAdminsSetting + + Get the restrict workspace admins setting. + + Gets the restrict workspace admins setting. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`RestrictWorkspaceAdminsSetting` + + + .. py:method:: update_default_namespace_setting(allow_missing: bool, setting: DefaultNamespaceSetting, field_mask: str) -> DefaultNamespaceSetting Update the default namespace setting. @@ -61,16 +96,9 @@ updated concurrently, PATCH will fail with 409 and the request will need to be retried by using the fresh etag in the 409 response. - :param allow_missing: bool (optional) + :param allow_missing: bool This should always be set to true for Settings API. Added for AIP compliance. - :param field_mask: str (optional) - Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the - setting payload will be updated. For example, for Default Namespace setting, the field mask is - supposed to contain fields from the DefaultNamespaceSetting.namespace schema. - - The field mask needs to be supplied as single string. To specify multiple fields in the field mask, - use comma as the seperator (no space). - :param setting: :class:`DefaultNamespaceSetting` (optional) + :param setting: :class:`DefaultNamespaceSetting` This represents the setting configuration for the default namespace in the Databricks workspace. Setting the default catalog for the workspace determines the catalog that is used when queries do not reference a fully qualified 3 level name. For example, if the default catalog is set to @@ -78,6 +106,30 @@ 'retail_prod.default.myTable' (the schema 'default' is always assumed). This setting requires a restart of clusters and SQL warehouses to take effect. Additionally, the default namespace only applies when using Unity Catalog-enabled compute. + :param field_mask: str + Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the + setting payload will be updated. The field mask needs to be supplied as single string. To specify + multiple fields in the field mask, use comma as the separator (no space). :returns: :class:`DefaultNamespaceSetting` + + + .. py:method:: update_restrict_workspace_admins_setting(allow_missing: bool, setting: RestrictWorkspaceAdminsSetting, field_mask: str) -> RestrictWorkspaceAdminsSetting + + Update the restrict workspace admins setting. + + Updates the restrict workspace admins setting for the workspace. A fresh etag needs to be provided in + PATCH requests (as part of the setting field). The etag can be retrieved by making a GET request + before the PATCH request. If the setting is updated concurrently, PATCH will fail with 409 and the + request will need to be retried by using the fresh etag in the 409 response. + + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`RestrictWorkspaceAdminsSetting` + :param field_mask: str + Field mask is required to be passed into the PATCH request. Field mask specifies which fields of the + setting payload will be updated. The field mask needs to be supplied as single string. To specify + multiple fields in the field mask, use comma as the separator (no space). + + :returns: :class:`RestrictWorkspaceAdminsSetting` \ No newline at end of file diff --git a/docs/workspace/sql/dashboards.rst b/docs/workspace/sql/dashboards.rst index 29cafb7cc..8d13bd686 100644 --- a/docs/workspace/sql/dashboards.rst +++ b/docs/workspace/sql/dashboards.rst @@ -123,6 +123,9 @@ Fetch a paginated list of dashboard objects. + ### **Warning: Calling this API concurrently 10 or more times could result in throttling, service + degradation, or a temporary ban.** + :param order: :class:`ListOrder` (optional) Name of dashboard attribute to order by. :param page: int (optional) diff --git a/docs/workspace/sql/query_history.rst b/docs/workspace/sql/query_history.rst index c4c6ee9ae..6aacd3c78 100644 --- a/docs/workspace/sql/query_history.rst +++ b/docs/workspace/sql/query_history.rst @@ -34,7 +34,9 @@ :param max_results: int (optional) Limit the number of results returned in one page. The default is 100. :param page_token: str (optional) - A token that can be used to get the next page of results. + A token that can be used to get the next page of results. The token can contains characters that + need to be encoded before using it in a URL. For example, the character '+' needs to be replaced by + %2B. :returns: Iterator over :class:`QueryInfo` \ No newline at end of file diff --git a/docs/workspace/vectorsearch/vector_search_indexes.rst b/docs/workspace/vectorsearch/vector_search_indexes.rst index f79335e4f..051a81df6 100644 --- a/docs/workspace/vectorsearch/vector_search_indexes.rst +++ b/docs/workspace/vectorsearch/vector_search_indexes.rst @@ -12,7 +12,7 @@ Delta Table changes. * **Direct Vector Access Index**: An index that supports direct read and write of vectors and metadata through our REST and SDK APIs. With this model, the user manages index updates. - .. py:method:: create_index(name: str, primary_key: str, index_type: VectorIndexType [, delta_sync_vector_index_spec: Optional[DeltaSyncVectorIndexSpecRequest], direct_access_index_spec: Optional[DirectAccessVectorIndexSpec], endpoint_name: Optional[str]]) -> CreateVectorIndexResponse + .. py:method:: create_index(name: str, primary_key: str, index_type: VectorIndexType [, delta_sync_index_spec: Optional[DeltaSyncVectorIndexSpecRequest], direct_access_index_spec: Optional[DirectAccessVectorIndexSpec], endpoint_name: Optional[str]]) -> CreateVectorIndexResponse Create an index. @@ -29,7 +29,7 @@ incrementally updating the index as the underlying data in the Delta Table changes. - `DIRECT_ACCESS`: An index that supports direct read and write of vectors and metadata through our REST and SDK APIs. With this model, the user manages index updates. - :param delta_sync_vector_index_spec: :class:`DeltaSyncVectorIndexSpecRequest` (optional) + :param delta_sync_index_spec: :class:`DeltaSyncVectorIndexSpecRequest` (optional) Specification for Delta Sync Index. Required if `index_type` is `DELTA_SYNC`. :param direct_access_index_spec: :class:`DirectAccessVectorIndexSpec` (optional) Specification for Direct Vector Access Index. Required if `index_type` is `DIRECT_ACCESS`. diff --git a/examples/clusters/change_owner_clusters_api_integration.py b/examples/clusters/change_owner_clusters_api_integration.py index bea93c1c3..6fd9a14c7 100755 --- a/examples/clusters/change_owner_clusters_api_integration.py +++ b/examples/clusters/change_owner_clusters_api_integration.py @@ -5,7 +5,7 @@ w = WorkspaceClient() -latest = w.clusters.select_spark_version(latest=True) +latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' diff --git a/examples/clusters/create_clusters_api_integration.py b/examples/clusters/create_clusters_api_integration.py index 9b6111a5f..48b31308c 100755 --- a/examples/clusters/create_clusters_api_integration.py +++ b/examples/clusters/create_clusters_api_integration.py @@ -5,7 +5,7 @@ w = WorkspaceClient() -latest = w.clusters.select_spark_version(latest=True) +latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' diff --git a/examples/clusters/delete_clusters_api_integration.py b/examples/clusters/delete_clusters_api_integration.py index 3f61f0fe2..5f096893b 100755 --- a/examples/clusters/delete_clusters_api_integration.py +++ b/examples/clusters/delete_clusters_api_integration.py @@ -5,7 +5,7 @@ w = WorkspaceClient() -latest = w.clusters.select_spark_version(latest=True) +latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' diff --git a/examples/clusters/edit_clusters_api_integration.py b/examples/clusters/edit_clusters_api_integration.py index d58ad65ea..625eae461 100755 --- a/examples/clusters/edit_clusters_api_integration.py +++ b/examples/clusters/edit_clusters_api_integration.py @@ -7,7 +7,7 @@ cluster_name = f'sdk-{time.time_ns()}' -latest = w.clusters.select_spark_version(latest=True) +latest = w.clusters.select_spark_version(latest=True, long_term_support=True) clstr = w.clusters.create(cluster_name=cluster_name, spark_version=latest, diff --git a/examples/clusters/events_clusters_api_integration.py b/examples/clusters/events_clusters_api_integration.py index 971f520ea..3701a1737 100755 --- a/examples/clusters/events_clusters_api_integration.py +++ b/examples/clusters/events_clusters_api_integration.py @@ -5,7 +5,7 @@ w = WorkspaceClient() -latest = w.clusters.select_spark_version(latest=True) +latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' diff --git a/examples/clusters/get_clusters_api_integration.py b/examples/clusters/get_clusters_api_integration.py index 23e833cfa..718977dfb 100755 --- a/examples/clusters/get_clusters_api_integration.py +++ b/examples/clusters/get_clusters_api_integration.py @@ -5,7 +5,7 @@ w = WorkspaceClient() -latest = w.clusters.select_spark_version(latest=True) +latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' diff --git a/examples/clusters/pin_clusters_api_integration.py b/examples/clusters/pin_clusters_api_integration.py index b3b57097a..31f5610b6 100755 --- a/examples/clusters/pin_clusters_api_integration.py +++ b/examples/clusters/pin_clusters_api_integration.py @@ -5,7 +5,7 @@ w = WorkspaceClient() -latest = w.clusters.select_spark_version(latest=True) +latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' diff --git a/examples/clusters/resize_clusters_api_integration.py b/examples/clusters/resize_clusters_api_integration.py index 96ca09ad1..9711185ee 100755 --- a/examples/clusters/resize_clusters_api_integration.py +++ b/examples/clusters/resize_clusters_api_integration.py @@ -5,7 +5,7 @@ w = WorkspaceClient() -latest = w.clusters.select_spark_version(latest=True) +latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' diff --git a/examples/clusters/restart_clusters_api_integration.py b/examples/clusters/restart_clusters_api_integration.py index 9ddd8cd2b..b78b57b52 100755 --- a/examples/clusters/restart_clusters_api_integration.py +++ b/examples/clusters/restart_clusters_api_integration.py @@ -5,7 +5,7 @@ w = WorkspaceClient() -latest = w.clusters.select_spark_version(latest=True) +latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' diff --git a/examples/clusters/select_spark_version_clusters_api_integration.py b/examples/clusters/select_spark_version_clusters_api_integration.py index 28a16baa4..2ed55c9be 100755 --- a/examples/clusters/select_spark_version_clusters_api_integration.py +++ b/examples/clusters/select_spark_version_clusters_api_integration.py @@ -2,4 +2,4 @@ w = WorkspaceClient() -latest = w.clusters.select_spark_version(latest=True) +latest = w.clusters.select_spark_version(latest=True, long_term_support=True) diff --git a/examples/clusters/start_clusters_api_integration.py b/examples/clusters/start_clusters_api_integration.py index 5870b68a0..68594ba16 100755 --- a/examples/clusters/start_clusters_api_integration.py +++ b/examples/clusters/start_clusters_api_integration.py @@ -5,7 +5,7 @@ w = WorkspaceClient() -latest = w.clusters.select_spark_version(latest=True) +latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' diff --git a/examples/clusters/unpin_clusters_api_integration.py b/examples/clusters/unpin_clusters_api_integration.py index d671a0e53..c4c5df5d7 100755 --- a/examples/clusters/unpin_clusters_api_integration.py +++ b/examples/clusters/unpin_clusters_api_integration.py @@ -5,7 +5,7 @@ w = WorkspaceClient() -latest = w.clusters.select_spark_version(latest=True) +latest = w.clusters.select_spark_version(latest=True, long_term_support=True) cluster_name = f'sdk-{time.time_ns()}' diff --git a/examples/connections/get_connections.py b/examples/connections/get_connections.py index 4ab7b0108..ebfba8b95 100755 --- a/examples/connections/get_connections.py +++ b/examples/connections/get_connections.py @@ -17,8 +17,7 @@ f'sdk-{time.time_ns()}', }) -conn_update = w.connections.update(name=conn_create.name, - name_arg=conn_create.name, +conn_update = w.connections.update(name_arg=conn_create.name, options={ "host": "%s-fake-workspace.cloud.databricks.com" % (f'sdk-{time.time_ns()}'), diff --git a/examples/connections/update_connections.py b/examples/connections/update_connections.py index 32d96ee4d..f404feff7 100755 --- a/examples/connections/update_connections.py +++ b/examples/connections/update_connections.py @@ -17,8 +17,7 @@ f'sdk-{time.time_ns()}', }) -conn_update = w.connections.update(name=conn_create.name, - name_arg=conn_create.name, +conn_update = w.connections.update(name_arg=conn_create.name, options={ "host": "%s-fake-workspace.cloud.databricks.com" % (f'sdk-{time.time_ns()}'), diff --git a/examples/metastores/update_metastores.py b/examples/metastores/update_metastores.py index 8ac70371c..f7152b552 100755 --- a/examples/metastores/update_metastores.py +++ b/examples/metastores/update_metastores.py @@ -9,7 +9,7 @@ storage_root="s3://%s/%s" % (os.environ["TEST_BUCKET"], f'sdk-{time.time_ns()}')) -_ = w.metastores.update(id=created.metastore_id, name=f'sdk-{time.time_ns()}') +_ = w.metastores.update(id=created.metastore_id, new_name=f'sdk-{time.time_ns()}') # cleanup w.metastores.delete(id=created.metastore_id, force=True) diff --git a/examples/r/wait_catalog_workspace_bindings.py b/examples/r/wait_catalog_workspace_bindings.py new file mode 100755 index 000000000..1352ed169 --- /dev/null +++ b/examples/r/wait_catalog_workspace_bindings.py @@ -0,0 +1,5 @@ +from databricks.sdk import WorkspaceClient + +w = WorkspaceClient() + +w.r.wait(update_function)