With cluster policies, you can: - Auto-install cluster libraries on the next restart by - * listing them in the policy's "libraries" field. - Limit users to creating clusters with the - * prescribed settings. - Simplify the user interface, enabling more users to create clusters, by - * fixing and hiding some fields. - Manage costs by setting limits on attributes that impact the - * hourly rate. + * listing them in the policy's "libraries" field (Public Preview). - Limit users to creating + * clusters with the prescribed settings. - Simplify the user interface, enabling more users to + * create clusters, by fixing and hiding some fields. - Manage costs by setting limits on + * attributes that impact the hourly rate. * *
Cluster policy permissions limit which policies a user can select in the Policy drop-down * when the user creates a cluster: - A user who has unrestricted cluster create permission can @@ -693,6 +697,20 @@ public JobsAPI jobs() { return jobsAPI; } + /** + * A monitor computes and monitors data or model quality metrics for a table over time. It + * generates metrics tables and a dashboard that you can use to monitor table health and set + * alerts. + * + *
Most write operations require the user to be the owner of the table (or its parent schema or
+ * parent catalog). Viewing the dashboard, computed metrics, or monitor configuration only
+ * requires the user to have **SELECT** privileges on the table (along with **USE_SCHEMA** and
+ * **USE_CATALOG**).
+ */
+ public LakehouseMonitorsAPI lakehouseMonitors() {
+ return lakehouseMonitorsAPI;
+ }
+
/**
* These APIs provide specific management operations for Lakeview dashboards. Generic resource
* management can be done with Workspace API (import, export, get-status, list, delete).
@@ -1480,6 +1498,12 @@ public WorkspaceClient withJobsImpl(JobsService jobs) {
return this;
}
+ /** Replace LakehouseMonitorsAPI implementation with mock */
+ public WorkspaceClient withLakehouseMonitorsImpl(LakehouseMonitorsService lakehouseMonitors) {
+ lakehouseMonitorsAPI = new LakehouseMonitorsAPI(lakehouseMonitors);
+ return this;
+ }
+
/** Replace LakeviewAPI implementation with mock */
public WorkspaceClient withLakeviewImpl(LakeviewService lakeview) {
lakeviewAPI = new LakeviewAPI(lakeview);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/core/UserAgent.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/UserAgent.java
index 514b5ac82..875dd24a8 100644
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/core/UserAgent.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/UserAgent.java
@@ -13,7 +13,7 @@ public class UserAgent {
// TODO: check if reading from
// /META-INF/maven/com.databricks/databrics-sdk-java/pom.properties
// or getClass().getPackage().getImplementationVersion() is enough.
- private static final String version = "0.16.0";
+ private static final String version = "0.17.0";
public static void withProduct(String product, String productVersion) {
UserAgent.product = product;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateMonitor.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateMonitor.java
new file mode 100755
index 000000000..b690ab23b
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateMonitor.java
@@ -0,0 +1,269 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
+import java.util.Objects;
+
+@Generated
+public class CreateMonitor {
+ /** The directory to store monitoring assets (e.g. dashboard, metric tables). */
+ @JsonProperty("assets_dir")
+ private String assetsDir;
+
+ /**
+ * Name of the baseline table from which drift metrics are computed from. Columns in the monitored
+ * table should also be present in the baseline table.
+ */
+ @JsonProperty("baseline_table_name")
+ private String baselineTableName;
+
+ /**
+ * Custom metrics to compute on the monitored table. These can be aggregate metrics, derived
+ * metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across
+ * time windows).
+ */
+ @JsonProperty("custom_metrics")
+ private Collection Most write operations require the user to be the owner of the table (or its parent schema or
+ * parent catalog). Viewing the dashboard, computed metrics, or monitor configuration only requires
+ * the user to have **SELECT** privileges on the table (along with **USE_SCHEMA** and
+ * **USE_CATALOG**).
+ */
+@Generated
+public class LakehouseMonitorsAPI {
+ private static final Logger LOG = LoggerFactory.getLogger(LakehouseMonitorsAPI.class);
+
+ private final LakehouseMonitorsService impl;
+
+ /** Regular-use constructor */
+ public LakehouseMonitorsAPI(ApiClient apiClient) {
+ impl = new LakehouseMonitorsImpl(apiClient);
+ }
+
+ /** Constructor for mocks */
+ public LakehouseMonitorsAPI(LakehouseMonitorsService mock) {
+ impl = mock;
+ }
+
+ public MonitorInfo create(String fullName, String assetsDir, String outputSchemaName) {
+ return create(
+ new CreateMonitor()
+ .setFullName(fullName)
+ .setAssetsDir(assetsDir)
+ .setOutputSchemaName(outputSchemaName));
+ }
+
+ /**
+ * Create a table monitor.
+ *
+ * Creates a new monitor for the specified table.
+ *
+ * The caller must either: 1. be an owner of the table's parent catalog, have **USE_SCHEMA** on
+ * the table's parent schema, and have **SELECT** access on the table 2. have **USE_CATALOG** on
+ * the table's parent catalog, be an owner of the table's parent schema, and have **SELECT**
+ * access on the table. 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table.
+ *
+ * Workspace assets, such as the dashboard, will be created in the workspace where this call
+ * was made.
+ */
+ public MonitorInfo create(CreateMonitor request) {
+ return impl.create(request);
+ }
+
+ public void delete(String fullName) {
+ delete(new DeleteLakehouseMonitorRequest().setFullName(fullName));
+ }
+
+ /**
+ * Delete a table monitor.
+ *
+ * Deletes a monitor for the specified table.
+ *
+ * The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG**
+ * on the table's parent catalog and be an owner of the table's parent schema 3. have the
+ * following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the
+ * table's parent schema - be an owner of the table.
+ *
+ * Additionally, the call must be made from the workspace where the monitor was created.
+ *
+ * Note that the metric tables and dashboard will not be deleted as part of this call; those
+ * assets must be manually cleaned up (if desired).
+ */
+ public void delete(DeleteLakehouseMonitorRequest request) {
+ impl.delete(request);
+ }
+
+ public MonitorInfo get(String fullName) {
+ return get(new GetLakehouseMonitorRequest().setFullName(fullName));
+ }
+
+ /**
+ * Get a table monitor.
+ *
+ * Gets a monitor for the specified table.
+ *
+ * The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG**
+ * on the table's parent catalog and be an owner of the table's parent schema. 3. have the
+ * following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the
+ * table's parent schema - **SELECT** privilege on the table.
+ *
+ * The returned information includes configuration values, as well as information on assets
+ * created by the monitor. Some information (e.g., dashboard) may be filtered out if the caller is
+ * in a different workspace than where the monitor was created.
+ */
+ public MonitorInfo get(GetLakehouseMonitorRequest request) {
+ return impl.get(request);
+ }
+
+ public MonitorInfo update(String fullName, String assetsDir, String outputSchemaName) {
+ return update(
+ new UpdateMonitor()
+ .setFullName(fullName)
+ .setAssetsDir(assetsDir)
+ .setOutputSchemaName(outputSchemaName));
+ }
+
+ /**
+ * Update a table monitor.
+ *
+ * Updates a monitor for the specified table.
+ *
+ * The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG**
+ * on the table's parent catalog and be an owner of the table's parent schema 3. have the
+ * following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the
+ * table's parent schema - be an owner of the table.
+ *
+ * Additionally, the call must be made from the workspace where the monitor was created, and
+ * the caller must be the original creator of the monitor.
+ *
+ * Certain configuration fields, such as output asset identifiers, cannot be updated.
+ */
+ public MonitorInfo update(UpdateMonitor request) {
+ return impl.update(request);
+ }
+
+ public LakehouseMonitorsService impl() {
+ return impl;
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/LakehouseMonitorsImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/LakehouseMonitorsImpl.java
new file mode 100755
index 000000000..5f885fdf3
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/LakehouseMonitorsImpl.java
@@ -0,0 +1,50 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.core.ApiClient;
+import com.databricks.sdk.support.Generated;
+import java.util.HashMap;
+import java.util.Map;
+
+/** Package-local implementation of LakehouseMonitors */
+@Generated
+class LakehouseMonitorsImpl implements LakehouseMonitorsService {
+ private final ApiClient apiClient;
+
+ public LakehouseMonitorsImpl(ApiClient apiClient) {
+ this.apiClient = apiClient;
+ }
+
+ @Override
+ public MonitorInfo create(CreateMonitor request) {
+ String path = String.format("/api/2.1/unity-catalog/tables/%s/monitor", request.getFullName());
+ Map Most write operations require the user to be the owner of the table (or its parent schema or
+ * parent catalog). Viewing the dashboard, computed metrics, or monitor configuration only requires
+ * the user to have **SELECT** privileges on the table (along with **USE_SCHEMA** and
+ * **USE_CATALOG**).
+ *
+ * This is the high-level interface, that contains generated methods.
+ *
+ * Evolving: this interface is under development. Method signatures may change.
+ */
+@Generated
+public interface LakehouseMonitorsService {
+ /**
+ * Create a table monitor.
+ *
+ * Creates a new monitor for the specified table.
+ *
+ * The caller must either: 1. be an owner of the table's parent catalog, have **USE_SCHEMA** on
+ * the table's parent schema, and have **SELECT** access on the table 2. have **USE_CATALOG** on
+ * the table's parent catalog, be an owner of the table's parent schema, and have **SELECT**
+ * access on the table. 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table.
+ *
+ * Workspace assets, such as the dashboard, will be created in the workspace where this call
+ * was made.
+ */
+ MonitorInfo create(CreateMonitor createMonitor);
+
+ /**
+ * Delete a table monitor.
+ *
+ * Deletes a monitor for the specified table.
+ *
+ * The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG**
+ * on the table's parent catalog and be an owner of the table's parent schema 3. have the
+ * following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the
+ * table's parent schema - be an owner of the table.
+ *
+ * Additionally, the call must be made from the workspace where the monitor was created.
+ *
+ * Note that the metric tables and dashboard will not be deleted as part of this call; those
+ * assets must be manually cleaned up (if desired).
+ */
+ void delete(DeleteLakehouseMonitorRequest deleteLakehouseMonitorRequest);
+
+ /**
+ * Get a table monitor.
+ *
+ * Gets a monitor for the specified table.
+ *
+ * The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG**
+ * on the table's parent catalog and be an owner of the table's parent schema. 3. have the
+ * following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the
+ * table's parent schema - **SELECT** privilege on the table.
+ *
+ * The returned information includes configuration values, as well as information on assets
+ * created by the monitor. Some information (e.g., dashboard) may be filtered out if the caller is
+ * in a different workspace than where the monitor was created.
+ */
+ MonitorInfo get(GetLakehouseMonitorRequest getLakehouseMonitorRequest);
+
+ /**
+ * Update a table monitor.
+ *
+ * Updates a monitor for the specified table.
+ *
+ * The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG**
+ * on the table's parent catalog and be an owner of the table's parent schema 3. have the
+ * following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the
+ * table's parent schema - be an owner of the table.
+ *
+ * Additionally, the call must be made from the workspace where the monitor was created, and
+ * the caller must be the original creator of the monitor.
+ *
+ * Certain configuration fields, such as output asset identifiers, cannot be updated.
+ */
+ MonitorInfo update(UpdateMonitor updateMonitor);
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MonitorCronSchedule.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MonitorCronSchedule.java
new file mode 100755
index 000000000..1c355bd46
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MonitorCronSchedule.java
@@ -0,0 +1,74 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class MonitorCronSchedule {
+ /** Whether the schedule is paused or not */
+ @JsonProperty("pause_status")
+ private MonitorCronSchedulePauseStatus pauseStatus;
+
+ /** A cron expression using quartz syntax that describes the schedule for a job. */
+ @JsonProperty("quartz_cron_expression")
+ private String quartzCronExpression;
+
+ /** A Java timezone id. The schedule for a job will be resolved with respect to this timezone. */
+ @JsonProperty("timezone_id")
+ private String timezoneId;
+
+ public MonitorCronSchedule setPauseStatus(MonitorCronSchedulePauseStatus pauseStatus) {
+ this.pauseStatus = pauseStatus;
+ return this;
+ }
+
+ public MonitorCronSchedulePauseStatus getPauseStatus() {
+ return pauseStatus;
+ }
+
+ public MonitorCronSchedule setQuartzCronExpression(String quartzCronExpression) {
+ this.quartzCronExpression = quartzCronExpression;
+ return this;
+ }
+
+ public String getQuartzCronExpression() {
+ return quartzCronExpression;
+ }
+
+ public MonitorCronSchedule setTimezoneId(String timezoneId) {
+ this.timezoneId = timezoneId;
+ return this;
+ }
+
+ public String getTimezoneId() {
+ return timezoneId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ MonitorCronSchedule that = (MonitorCronSchedule) o;
+ return Objects.equals(pauseStatus, that.pauseStatus)
+ && Objects.equals(quartzCronExpression, that.quartzCronExpression)
+ && Objects.equals(timezoneId, that.timezoneId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(pauseStatus, quartzCronExpression, timezoneId);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(MonitorCronSchedule.class)
+ .add("pauseStatus", pauseStatus)
+ .add("quartzCronExpression", quartzCronExpression)
+ .add("timezoneId", timezoneId)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MonitorCronSchedulePauseStatus.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MonitorCronSchedulePauseStatus.java
new file mode 100755
index 000000000..5b59f5385
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MonitorCronSchedulePauseStatus.java
@@ -0,0 +1,12 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+
+/** Whether the schedule is paused or not */
+@Generated
+public enum MonitorCronSchedulePauseStatus {
+ PAUSED,
+ UNPAUSED,
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MonitorCustomMetric.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MonitorCustomMetric.java
new file mode 100755
index 000000000..2167187ba
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MonitorCustomMetric.java
@@ -0,0 +1,111 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
+import java.util.Objects;
+
+@Generated
+public class MonitorCustomMetric {
+ /**
+ * Jinja template for a SQL expression that specifies how to compute the metric. See [create
+ * metric definition].
+ *
+ * [create metric definition]:
+ * https://docs.databricks.com/en/lakehouse-monitoring/custom-metrics.html#create-definition
+ */
+ @JsonProperty("definition")
+ private String definition;
+
+ /** Columns on the monitored table to apply the custom metrics to. */
+ @JsonProperty("input_columns")
+ private Collection Gets if a table exists in the metastore for a specific catalog and schema. The caller must
+ * satisfy one of the following requirements: * Be a metastore admin * Be the owner of the parent
+ * catalog * Be the owner of the parent schema and have the USE_CATALOG privilege on the parent
+ * catalog * Have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA**
+ * privilege on the parent schema, and either be the table owner or have the SELECT privilege on
+ * the table. * Have BROWSE privilege on the parent catalog * Have BROWSE privilege on the parent
+ * schema.
+ */
+ public TableExistsResponse exists(ExistsRequest request) {
+ return impl.exists(request);
+ }
+
public TableInfo get(String fullName) {
return get(new GetTableRequest().setFullName(fullName));
}
@@ -56,10 +75,11 @@ public TableInfo get(String fullName) {
/**
* Get a table.
*
- * Gets a table from the metastore for a specific catalog and schema. The caller must be a
- * metastore admin, be the owner of the table and have the **USE_CATALOG** privilege on the parent
- * catalog and the **USE_SCHEMA** privilege on the parent schema, or be the owner of the table and
- * have the **SELECT** privilege on it as well.
+ * Gets a table from the metastore for a specific catalog and schema. The caller must satisfy
+ * one of the following requirements: * Be a metastore admin * Be the owner of the parent catalog
+ * * Be the owner of the parent schema and have the USE_CATALOG privilege on the parent catalog *
+ * Have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on
+ * the parent schema, and either be the table owner or have the SELECT privilege on the table.
*/
public TableInfo get(GetTableRequest request) {
return impl.get(request);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/TablesImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/TablesImpl.java
index 53b043863..3da035105 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/TablesImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/TablesImpl.java
@@ -23,6 +23,14 @@ public void delete(DeleteTableRequest request) {
apiClient.DELETE(path, request, Void.class, headers);
}
+ @Override
+ public TableExistsResponse exists(ExistsRequest request) {
+ String path = String.format("/api/2.1/unity-catalog/tables/%s/exists", request.getFullName());
+ Map Gets if a table exists in the metastore for a specific catalog and schema. The caller must
+ * satisfy one of the following requirements: * Be a metastore admin * Be the owner of the parent
+ * catalog * Be the owner of the parent schema and have the USE_CATALOG privilege on the parent
+ * catalog * Have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA**
+ * privilege on the parent schema, and either be the table owner or have the SELECT privilege on
+ * the table. * Have BROWSE privilege on the parent catalog * Have BROWSE privilege on the parent
+ * schema.
+ */
+ TableExistsResponse exists(ExistsRequest existsRequest);
+
/**
* Get a table.
*
- * Gets a table from the metastore for a specific catalog and schema. The caller must be a
- * metastore admin, be the owner of the table and have the **USE_CATALOG** privilege on the parent
- * catalog and the **USE_SCHEMA** privilege on the parent schema, or be the owner of the table and
- * have the **SELECT** privilege on it as well.
+ * Gets a table from the metastore for a specific catalog and schema. The caller must satisfy
+ * one of the following requirements: * Be a metastore admin * Be the owner of the parent catalog
+ * * Be the owner of the parent schema and have the USE_CATALOG privilege on the parent catalog *
+ * Have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on
+ * the parent schema, and either be the table owner or have the SELECT privilege on the table.
*/
TableInfo get(GetTableRequest getTableRequest);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateMonitor.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateMonitor.java
new file mode 100755
index 000000000..e4475bb2e
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateMonitor.java
@@ -0,0 +1,234 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
+import java.util.Objects;
+
+@Generated
+public class UpdateMonitor {
+ /** The directory to store monitoring assets (e.g. dashboard, metric tables). */
+ @JsonProperty("assets_dir")
+ private String assetsDir;
+
+ /**
+ * Name of the baseline table from which drift metrics are computed from. Columns in the monitored
+ * table should also be present in the baseline table.
+ */
+ @JsonProperty("baseline_table_name")
+ private String baselineTableName;
+
+ /**
+ * Custom metrics to compute on the monitored table. These can be aggregate metrics, derived
+ * metrics (from already computed aggregate metrics), or drift metrics (comparing metrics across
+ * time windows).
+ */
+ @JsonProperty("custom_metrics")
+ private Collection With cluster policies, you can: - Auto-install cluster libraries on the next restart by
- * listing them in the policy's "libraries" field. - Limit users to creating clusters with the
- * prescribed settings. - Simplify the user interface, enabling more users to create clusters, by
- * fixing and hiding some fields. - Manage costs by setting limits on attributes that impact the
- * hourly rate.
+ * listing them in the policy's "libraries" field (Public Preview). - Limit users to creating
+ * clusters with the prescribed settings. - Simplify the user interface, enabling more users to
+ * create clusters, by fixing and hiding some fields. - Manage costs by setting limits on attributes
+ * that impact the hourly rate.
*
* Cluster policy permissions limit which policies a user can select in the Policy drop-down when
* the user creates a cluster: - A user who has unrestricted cluster create permission can select
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterPoliciesService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterPoliciesService.java
index 5cb83f298..10654b89f 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterPoliciesService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterPoliciesService.java
@@ -9,10 +9,10 @@
* creation. Cluster policies have ACLs that limit their use to specific users and groups.
*
* With cluster policies, you can: - Auto-install cluster libraries on the next restart by
- * listing them in the policy's "libraries" field. - Limit users to creating clusters with the
- * prescribed settings. - Simplify the user interface, enabling more users to create clusters, by
- * fixing and hiding some fields. - Manage costs by setting limits on attributes that impact the
- * hourly rate.
+ * listing them in the policy's "libraries" field (Public Preview). - Limit users to creating
+ * clusters with the prescribed settings. - Simplify the user interface, enabling more users to
+ * create clusters, by fixing and hiding some fields. - Manage costs by setting limits on attributes
+ * that impact the hourly rate.
*
* Cluster policy permissions limit which policies a user can select in the Policy drop-down when
* the user creates a cluster: - A user who has unrestricted cluster create permission can select
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterPolicyAccessControlRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterPolicyAccessControlRequest.java
index aef055912..651b6e1bb 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterPolicyAccessControlRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterPolicyAccessControlRequest.java
@@ -17,10 +17,7 @@ public class ClusterPolicyAccessControlRequest {
@JsonProperty("permission_level")
private ClusterPolicyPermissionLevel permissionLevel;
- /**
- * Application ID of an active service principal. Setting this field requires the
- * `servicePrincipal/user` role.
- */
+ /** application ID of a service principal */
@JsonProperty("service_principal_name")
private String servicePrincipalName;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClustersAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClustersAPI.java
index 6670e979d..6bf0d8e66 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClustersAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClustersAPI.java
@@ -147,7 +147,9 @@ public void changeOwner(String clusterId, String ownerUsername) {
/**
* Change cluster owner.
*
- * Change the owner of the cluster. You must be an admin to perform this operation.
+ * Change the owner of the cluster. You must be an admin and the cluster must be terminated to
+ * perform this operation. The service principal application ID can be supplied as an argument to
+ * `owner_username`.
*/
public void changeOwner(ChangeClusterOwner request) {
impl.changeOwner(request);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClustersService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClustersService.java
index c47c7d30e..33158349d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClustersService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClustersService.java
@@ -36,7 +36,9 @@ public interface ClustersService {
/**
* Change cluster owner.
*
- * Change the owner of the cluster. You must be an admin to perform this operation.
+ * Change the owner of the cluster. You must be an admin and the cluster must be terminated to
+ * perform this operation. The service principal application ID can be supplied as an argument to
+ * `owner_username`.
*/
void changeOwner(ChangeClusterOwner changeClusterOwner);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreatePolicy.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreatePolicy.java
index 8de3541cf..8b44ccd98 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreatePolicy.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreatePolicy.java
@@ -23,7 +23,10 @@ public class CreatePolicy {
@JsonProperty("description")
private String description;
- /** A list of libraries to be installed on the next cluster restart that uses this policy. */
+ /**
+ * A list of libraries to be installed on the next cluster restart that uses this policy. The
+ * maximum number of libraries is 500.
+ */
@JsonProperty("libraries")
private Collection Get a list of all global init scripts for this workspace. This returns all properties for
* each script but **not** the script contents. To retrieve the contents of a script, use the [get
- * a global init script](#operation/get-script) operation.
+ * a global init script](:method:globalinitscripts/get) operation.
*/
public Iterable Get a list of all global init scripts for this workspace. This returns all properties for
* each script but **not** the script contents. To retrieve the contents of a script, use the [get
- * a global init script](#operation/get-script) operation.
+ * a global init script](:method:globalinitscripts/get) operation.
*/
ListGlobalInitScriptsResponse list();
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/InstancePoolAccessControlRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/InstancePoolAccessControlRequest.java
index 79c7449ad..94ea72be1 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/InstancePoolAccessControlRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/InstancePoolAccessControlRequest.java
@@ -17,10 +17,7 @@ public class InstancePoolAccessControlRequest {
@JsonProperty("permission_level")
private InstancePoolPermissionLevel permissionLevel;
- /**
- * Application ID of an active service principal. Setting this field requires the
- * `servicePrincipal/user` role.
- */
+ /** application ID of a service principal */
@JsonProperty("service_principal_name")
private String servicePrincipalName;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/Policy.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/Policy.java
index fff3fb236..de6167066 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/Policy.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/Policy.java
@@ -41,7 +41,10 @@ public class Policy {
@JsonProperty("is_default")
private Boolean isDefault;
- /** A list of libraries to be installed on the next cluster restart that uses this policy. */
+ /**
+ * A list of libraries to be installed on the next cluster restart that uses this policy. The
+ * maximum number of libraries is 500.
+ */
@JsonProperty("libraries")
private Collection Appends a block of data to the stream specified by the input handle. If the handle does not
- * exist, this call will throw an exception with `RESOURCE_DOES_NOT_EXIST`.
+ * exist, this call will throw an exception with ``RESOURCE_DOES_NOT_EXIST``.
*
* If the block of data exceeds 1 MB, this call will throw an exception with
- * `MAX_BLOCK_SIZE_EXCEEDED`.
+ * ``MAX_BLOCK_SIZE_EXCEEDED``.
*/
public void addBlock(AddBlock request) {
impl.addBlock(request);
@@ -51,7 +51,7 @@ public void close(long handle) {
* Close the stream.
*
* Closes the stream specified by the input handle. If the handle does not exist, this call
- * throws an exception with `RESOURCE_DOES_NOT_EXIST`.
+ * throws an exception with ``RESOURCE_DOES_NOT_EXIST``.
*/
public void close(Close request) {
impl.close(request);
@@ -66,12 +66,13 @@ public CreateResponse create(String path) {
*
* Opens a stream to write to a file and returns a handle to this stream. There is a 10 minute
* idle timeout on this handle. If a file or directory already exists on the given path and
- * __overwrite__ is set to `false`, this call throws an exception with `RESOURCE_ALREADY_EXISTS`.
+ * __overwrite__ is set to false, this call will throw an exception with
+ * ``RESOURCE_ALREADY_EXISTS``.
*
* A typical workflow for file upload would be:
*
- * 1. Issue a `create` call and get a handle. 2. Issue one or more `add-block` calls with the
- * handle you have. 3. Issue a `close` call with the handle you have.
+ * 1. Issue a ``create`` call and get a handle. 2. Issue one or more ``add-block`` calls with
+ * the handle you have. 3. Issue a ``close`` call with the handle you have.
*/
public CreateResponse create(Create request) {
return impl.create(request);
@@ -165,7 +166,7 @@ public void move(String sourcePath, String destinationPath) {
* Moves a file from one location to another location within DBFS. If the source file does not
* exist, this call throws an exception with `RESOURCE_DOES_NOT_EXIST`. If a file already exists
* in the destination path, this call throws an exception with `RESOURCE_ALREADY_EXISTS`. If the
- * given source path is a directory, this call always recursively moves all files.",
+ * given source path is a directory, this call always recursively moves all files.
*/
public void move(Move request) {
impl.move(request);
@@ -206,7 +207,7 @@ public ReadResponse read(String path) {
* read length exceeds 1 MB, this call throws an exception with `MAX_READ_SIZE_EXCEEDED`.
*
* If `offset + length` exceeds the number of bytes in a file, it reads the contents until the
- * end of file.",
+ * end of file.
*/
public ReadResponse read(ReadDbfsRequest request) {
return impl.read(request);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/files/DbfsService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/files/DbfsService.java
index 36aace200..007d6eba3 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/files/DbfsService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/files/DbfsService.java
@@ -17,10 +17,10 @@ public interface DbfsService {
* Append data block.
*
* Appends a block of data to the stream specified by the input handle. If the handle does not
- * exist, this call will throw an exception with `RESOURCE_DOES_NOT_EXIST`.
+ * exist, this call will throw an exception with ``RESOURCE_DOES_NOT_EXIST``.
*
* If the block of data exceeds 1 MB, this call will throw an exception with
- * `MAX_BLOCK_SIZE_EXCEEDED`.
+ * ``MAX_BLOCK_SIZE_EXCEEDED``.
*/
void addBlock(AddBlock addBlock);
@@ -28,7 +28,7 @@ public interface DbfsService {
* Close the stream.
*
* Closes the stream specified by the input handle. If the handle does not exist, this call
- * throws an exception with `RESOURCE_DOES_NOT_EXIST`.
+ * throws an exception with ``RESOURCE_DOES_NOT_EXIST``.
*/
void close(Close close);
@@ -37,12 +37,13 @@ public interface DbfsService {
*
* Opens a stream to write to a file and returns a handle to this stream. There is a 10 minute
* idle timeout on this handle. If a file or directory already exists on the given path and
- * __overwrite__ is set to `false`, this call throws an exception with `RESOURCE_ALREADY_EXISTS`.
+ * __overwrite__ is set to false, this call will throw an exception with
+ * ``RESOURCE_ALREADY_EXISTS``.
*
* A typical workflow for file upload would be:
*
- * 1. Issue a `create` call and get a handle. 2. Issue one or more `add-block` calls with the
- * handle you have. 3. Issue a `close` call with the handle you have.
+ * 1. Issue a ``create`` call and get a handle. 2. Issue one or more ``add-block`` calls with
+ * the handle you have. 3. Issue a ``close`` call with the handle you have.
*/
CreateResponse create(Create create);
@@ -106,7 +107,7 @@ public interface DbfsService {
* Moves a file from one location to another location within DBFS. If the source file does not
* exist, this call throws an exception with `RESOURCE_DOES_NOT_EXIST`. If a file already exists
* in the destination path, this call throws an exception with `RESOURCE_ALREADY_EXISTS`. If the
- * given source path is a directory, this call always recursively moves all files.",
+ * given source path is a directory, this call always recursively moves all files.
*/
void move(Move move);
@@ -135,7 +136,7 @@ public interface DbfsService {
* read length exceeds 1 MB, this call throws an exception with `MAX_READ_SIZE_EXCEEDED`.
*
* If `offset + length` exceeds the number of bytes in a file, it reads the contents until the
- * end of file.",
+ * end of file.
*/
ReadResponse read(ReadDbfsRequest readDbfsRequest);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/files/ReadResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/files/ReadResponse.java
index 3eec214ab..97a5b3dd0 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/files/ReadResponse.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/files/ReadResponse.java
@@ -10,7 +10,7 @@
@Generated
public class ReadResponse {
/**
- * The number of bytes read (could be less than `length` if we hit end of file). This refers to
+ * The number of bytes read (could be less than ``length`` if we hit end of file). This refers to
* number of bytes read in unencoded version (response data is base64-encoded).
*/
@JsonProperty("bytes_read")
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccessControlRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccessControlRequest.java
index 7db3e07d0..2e96227dc 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccessControlRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccessControlRequest.java
@@ -17,10 +17,7 @@ public class AccessControlRequest {
@JsonProperty("permission_level")
private PermissionLevel permissionLevel;
- /**
- * Application ID of an active service principal. Setting this field requires the
- * `servicePrincipal/user` role.
- */
+ /** application ID of a service principal */
@JsonProperty("service_principal_name")
private String servicePrincipalName;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/PasswordAccessControlRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/PasswordAccessControlRequest.java
index c0508f67c..b9558a0c7 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/PasswordAccessControlRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/PasswordAccessControlRequest.java
@@ -17,10 +17,7 @@ public class PasswordAccessControlRequest {
@JsonProperty("permission_level")
private PasswordPermissionLevel permissionLevel;
- /**
- * Application ID of an active service principal. Setting this field requires the
- * `servicePrincipal/user` role.
- */
+ /** application ID of a service principal */
@JsonProperty("service_principal_name")
private String servicePrincipalName;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/JobAccessControlRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/JobAccessControlRequest.java
index 3bce29c82..d2e6553d1 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/JobAccessControlRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/JobAccessControlRequest.java
@@ -17,10 +17,7 @@ public class JobAccessControlRequest {
@JsonProperty("permission_level")
private JobPermissionLevel permissionLevel;
- /**
- * Application ID of an active service principal. Setting this field requires the
- * `servicePrincipal/user` role.
- */
+ /** application ID of a service principal */
@JsonProperty("service_principal_name")
private String servicePrincipalName;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/SubmitTask.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/SubmitTask.java
index 486a5bfee..5c471fbe8 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/SubmitTask.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/SubmitTask.java
@@ -35,8 +35,9 @@ public class SubmitTask {
/**
* If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task.
- * When running tasks on an existing cluster, you may need to manually restart the cluster if it
- * stops responding. We suggest running jobs on new clusters for greater reliability.
+ * Only all-purpose clusters are supported. When running tasks on an existing cluster, you may
+ * need to manually restart the cluster if it stops responding. We suggest running jobs on new
+ * clusters for greater reliability.
*/
@JsonProperty("existing_cluster_id")
private String existingClusterId;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/Task.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/Task.java
index b87453fd9..600c0e7a7 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/Task.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/Task.java
@@ -54,8 +54,9 @@ public class Task {
/**
* If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task.
- * When running tasks on an existing cluster, you may need to manually restart the cluster if it
- * stops responding. We suggest running jobs on new clusters for greater reliability.
+ * Only all-purpose clusters are supported. When running tasks on an existing cluster, you may
+ * need to manually restart the cluster if it stops responding. We suggest running jobs on new
+ * clusters for greater reliability.
*/
@JsonProperty("existing_cluster_id")
private String existingClusterId;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/ExperimentAccessControlRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/ExperimentAccessControlRequest.java
index 906deda04..250fa534d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/ExperimentAccessControlRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/ExperimentAccessControlRequest.java
@@ -17,10 +17,7 @@ public class ExperimentAccessControlRequest {
@JsonProperty("permission_level")
private ExperimentPermissionLevel permissionLevel;
- /**
- * Application ID of an active service principal. Setting this field requires the
- * `servicePrincipal/user` role.
- */
+ /** application ID of a service principal */
@JsonProperty("service_principal_name")
private String servicePrincipalName;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/ExperimentsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/ExperimentsAPI.java
index 51e9342fb..112ab4bd8 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/ExperimentsAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/ExperimentsAPI.java
@@ -96,7 +96,9 @@ public DeleteRunsResponse deleteRuns(String experimentId, long maxTimestampMilli
* Delete runs by creation time.
*
* Bulk delete runs in an experiment that were created prior to or at the specified timestamp.
- * Deletes at most max_runs per request.
+ * Deletes at most max_runs per request. To call this API from a Databricks Notebook in Python,
+ * you can use the client code snippet on
+ * https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-delete.
*/
public DeleteRunsResponse deleteRuns(DeleteRuns request) {
return impl.deleteRuns(request);
@@ -388,7 +390,9 @@ public RestoreRunsResponse restoreRuns(String experimentId, long minTimestampMil
* Restore runs by deletion time.
*
* Bulk restore runs in an experiment that were deleted no earlier than the specified
- * timestamp. Restores at most max_runs per request.
+ * timestamp. Restores at most max_runs per request. To call this API from a Databricks Notebook
+ * in Python, you can use the client code snippet on
+ * https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-restore.
*/
public RestoreRunsResponse restoreRuns(RestoreRuns request) {
return impl.restoreRuns(request);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/ExperimentsService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/ExperimentsService.java
index 52d87067a..c09fe0503 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/ExperimentsService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/ExperimentsService.java
@@ -57,7 +57,9 @@ public interface ExperimentsService {
* Delete runs by creation time.
*
* Bulk delete runs in an experiment that were created prior to or at the specified timestamp.
- * Deletes at most max_runs per request.
+ * Deletes at most max_runs per request. To call this API from a Databricks Notebook in Python,
+ * you can use the client code snippet on
+ * https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-delete.
*/
DeleteRunsResponse deleteRuns(DeleteRuns deleteRuns);
@@ -236,7 +238,9 @@ ExperimentPermissions getPermissions(
* Restore runs by deletion time.
*
* Bulk restore runs in an experiment that were deleted no earlier than the specified
- * timestamp. Restores at most max_runs per request.
+ * timestamp. Restores at most max_runs per request. To call this API from a Databricks Notebook
+ * in Python, you can use the client code snippet on
+ * https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-restore.
*/
RestoreRunsResponse restoreRuns(RestoreRuns restoreRuns);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/RegisteredModelAccessControlRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/RegisteredModelAccessControlRequest.java
index bc6ba6c45..ec6ad3f55 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/RegisteredModelAccessControlRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/ml/RegisteredModelAccessControlRequest.java
@@ -17,10 +17,7 @@ public class RegisteredModelAccessControlRequest {
@JsonProperty("permission_level")
private RegisteredModelPermissionLevel permissionLevel;
- /**
- * Application ID of an active service principal. Setting this field requires the
- * `servicePrincipal/user` role.
- */
+ /** application ID of a service principal */
@JsonProperty("service_principal_name")
private String servicePrincipalName;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/PipelineAccessControlRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/PipelineAccessControlRequest.java
index 1f421fb13..27b567277 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/PipelineAccessControlRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/PipelineAccessControlRequest.java
@@ -17,10 +17,7 @@ public class PipelineAccessControlRequest {
@JsonProperty("permission_level")
private PipelinePermissionLevel permissionLevel;
- /**
- * Application ID of an active service principal. Setting this field requires the
- * `servicePrincipal/user` role.
- */
+ /** application ID of a service principal */
@JsonProperty("service_principal_name")
private String servicePrincipalName;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointAccessControlRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointAccessControlRequest.java
index 63893df68..a1c66a4f3 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointAccessControlRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointAccessControlRequest.java
@@ -17,10 +17,7 @@ public class ServingEndpointAccessControlRequest {
@JsonProperty("permission_level")
private ServingEndpointPermissionLevel permissionLevel;
- /**
- * Application ID of an active service principal. Setting this field requires the
- * `servicePrincipal/user` role.
- */
+ /** application ID of a service principal */
@JsonProperty("service_principal_name")
private String servicePrincipalName;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CreateOboTokenRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CreateOboTokenRequest.java
index 6f17a5acc..4ae7809b5 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CreateOboTokenRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CreateOboTokenRequest.java
@@ -7,6 +7,7 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
+/** Configuration details for creating on-behalf tokens. */
@Generated
public class CreateOboTokenRequest {
/** Application ID of the service principal. */
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CreateOboTokenResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CreateOboTokenResponse.java
index 8fd64ad96..2b9157a11 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CreateOboTokenResponse.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CreateOboTokenResponse.java
@@ -7,6 +7,7 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
+/** An on-behalf token was successfully created for the service principal. */
@Generated
public class CreateOboTokenResponse {
/** */
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetTokenResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetTokenResponse.java
new file mode 100755
index 000000000..2d4a5d80e
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetTokenResponse.java
@@ -0,0 +1,43 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.settings;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+/** Token with specified Token ID was successfully returned. */
+@Generated
+public class GetTokenResponse {
+ /** */
+ @JsonProperty("token_info")
+ private TokenInfo tokenInfo;
+
+ public GetTokenResponse setTokenInfo(TokenInfo tokenInfo) {
+ this.tokenInfo = tokenInfo;
+ return this;
+ }
+
+ public TokenInfo getTokenInfo() {
+ return tokenInfo;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ GetTokenResponse that = (GetTokenResponse) o;
+ return Objects.equals(tokenInfo, that.tokenInfo);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(tokenInfo);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(GetTokenResponse.class).add("tokenInfo", tokenInfo).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/ListTokenManagementRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/ListTokenManagementRequest.java
index 5d62ed32e..06734ded6 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/ListTokenManagementRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/ListTokenManagementRequest.java
@@ -12,18 +12,18 @@
public class ListTokenManagementRequest {
/** User ID of the user that created the token. */
@QueryParam("created_by_id")
- private String createdById;
+ private Long createdById;
/** Username of the user that created the token. */
@QueryParam("created_by_username")
private String createdByUsername;
- public ListTokenManagementRequest setCreatedById(String createdById) {
+ public ListTokenManagementRequest setCreatedById(Long createdById) {
this.createdById = createdById;
return this;
}
- public String getCreatedById() {
+ public Long getCreatedById() {
return createdById;
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/ListTokensResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/ListTokensResponse.java
index a17db3bbf..09fa4602d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/ListTokensResponse.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/ListTokensResponse.java
@@ -8,9 +8,10 @@
import java.util.Collection;
import java.util.Objects;
+/** Tokens were successfully returned. */
@Generated
public class ListTokensResponse {
- /** */
+ /** Token metadata of each user-created token in the workspace */
@JsonProperty("token_infos")
private Collection Gets information about a token, specified by its ID.
*/
- public TokenInfo get(GetTokenManagementRequest request) {
+ public GetTokenResponse get(GetTokenManagementRequest request) {
return impl.get(request);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/TokenManagementImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/TokenManagementImpl.java
index eca3190ef..8008f7cd4 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/TokenManagementImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/TokenManagementImpl.java
@@ -28,15 +28,16 @@ public CreateOboTokenResponse createOboToken(CreateOboTokenRequest request) {
public void delete(DeleteTokenManagementRequest request) {
String path = String.format("/api/2.0/token-management/tokens/%s", request.getTokenId());
Map Gets information about a token, specified by its ID.
*/
- TokenInfo get(GetTokenManagementRequest getTokenManagementRequest);
+ GetTokenResponse get(GetTokenManagementRequest getTokenManagementRequest);
/**
* Get token permission levels.
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/QueriesAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/QueriesAPI.java
index 9a33683ec..20f0e15e2 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/QueriesAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/QueriesAPI.java
@@ -76,6 +76,9 @@ public Query get(GetQueryRequest request) {
* Get a list of queries.
*
* Gets a list of queries. Optionally, this list can be filtered by a search term.
+ *
+ * ### **Warning: Calling this API concurrently 10 or more times could result in throttling,
+ * service degradation, or a temporary ban.**
*/
public Iterable Gets a list of queries. Optionally, this list can be filtered by a search term.
+ *
+ * ### **Warning: Calling this API concurrently 10 or more times could result in throttling,
+ * service degradation, or a temporary ban.**
*/
QueryList list(ListQueriesRequest listQueriesRequest);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseAccessControlRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseAccessControlRequest.java
index 594238f8c..1dc83f0f6 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseAccessControlRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseAccessControlRequest.java
@@ -17,10 +17,7 @@ public class WarehouseAccessControlRequest {
@JsonProperty("permission_level")
private WarehousePermissionLevel permissionLevel;
- /**
- * Application ID of an active service principal. Setting this field requires the
- * `servicePrincipal/user` role.
- */
+ /** application ID of a service principal */
@JsonProperty("service_principal_name")
private String servicePrincipalName;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/workspace/RepoAccessControlRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/workspace/RepoAccessControlRequest.java
index 9a34d3307..1e507c3d6 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/workspace/RepoAccessControlRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/workspace/RepoAccessControlRequest.java
@@ -17,10 +17,7 @@ public class RepoAccessControlRequest {
@JsonProperty("permission_level")
private RepoPermissionLevel permissionLevel;
- /**
- * Application ID of an active service principal. Setting this field requires the
- * `servicePrincipal/user` role.
- */
+ /** application ID of a service principal */
@JsonProperty("service_principal_name")
private String servicePrincipalName;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/workspace/WorkspaceObjectAccessControlRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/workspace/WorkspaceObjectAccessControlRequest.java
index 3d6832dd3..edb8f3b3e 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/workspace/WorkspaceObjectAccessControlRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/workspace/WorkspaceObjectAccessControlRequest.java
@@ -17,10 +17,7 @@ public class WorkspaceObjectAccessControlRequest {
@JsonProperty("permission_level")
private WorkspaceObjectPermissionLevel permissionLevel;
- /**
- * Application ID of an active service principal. Setting this field requires the
- * `servicePrincipal/user` role.
- */
+ /** application ID of a service principal */
@JsonProperty("service_principal_name")
private String servicePrincipalName;
diff --git a/examples/docs/pom.xml b/examples/docs/pom.xml
index 90661996d..7ed213e89 100644
--- a/examples/docs/pom.xml
+++ b/examples/docs/pom.xml
@@ -24,7 +24,7 @@