Skip to content

Commit

Permalink
🙈 Make it work with Python SDK v0.5.0 (#79)
Browse files Browse the repository at this point in the history
  • Loading branch information
nfx authored Aug 11, 2023
1 parent 4aa3ee3 commit 8d6b487
Show file tree
Hide file tree
Showing 4 changed files with 29 additions and 31 deletions.
3 changes: 2 additions & 1 deletion src/uc_migration_toolkit/managers/group.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ def _get_clean_group_info(group: Group, cleanup_keys: list[str] | None = None) -
return group_info

def _get_group(self, group_name, level: GroupLevel) -> Group | None:
# TODO: calling this can cause issues for SCIM backend, cache groups instead
method = self._ws.groups.list if level == GroupLevel.WORKSPACE else self._ws.list_account_level_groups
query_filter = f"displayName eq '{group_name}'"
attributes = ",".join(["id", "displayName", "meta", "entitlements", "roles", "members"])
Expand All @@ -77,7 +78,7 @@ def _get_or_create_backup_group(self, source_group_name: str, source_group: Grou
logger.info(f"Creating backup group {backup_group_name}")
new_group_payload = self._get_clean_group_info(source_group)
new_group_payload["displayName"] = backup_group_name
backup_group = self._ws.groups.create(request=Group.from_dict(new_group_payload))
backup_group = self._ws.groups.create(**new_group_payload)
logger.info(f"Backup group {backup_group_name} successfully created")

return backup_group
Expand Down
4 changes: 2 additions & 2 deletions src/uc_migration_toolkit/managers/inventory/inventorizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,15 +125,15 @@ def __init__(self, ws: ImprovedWorkspaceClient):

def _preload_tokens(self):
try:
return self._ws.get_tokens().get("access_control_list", [])
return self._ws.token_management.get_token_permissions().access_control_list
except DatabricksError as e:
logger.warning("Cannot load token permissions due to error:")
logger.warning(e)
return []

def _preload_passwords(self):
try:
return self._ws.get_passwords().get("access_control_list", [])
return self._ws.users.get_password_permissions().access_control_list
except DatabricksError as e:
logger.error("Cannot load password permissions due to error:")
logger.error(e)
Expand Down
20 changes: 10 additions & 10 deletions src/uc_migration_toolkit/providers/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,19 @@ class ImprovedWorkspaceClient(WorkspaceClient):
@sleep_and_retry
@limits(calls=5, period=1) # assumption
def assign_permissions(self, principal_id: str, permissions: list[str]):
# TODO: add OpenAPI spec for it
request_string = f"/api/2.0/preview/permissionassignments/principals/{principal_id}"
self.api_client.do("put", request_string, data=json.dumps({"permissions": permissions}))

@sleep_and_retry
@limits(calls=10, period=1) # assumption
def patch_workspace_group(self, group_id: str, payload: dict):
# TODO: replace usages
# self.groups.patch(group_id,
# schemas=[PatchSchema.URN_IETF_PARAMS_SCIM_API_MESSAGES_2_0_PATCH_OP],
# operations=[
# Patch(op=PatchOp.ADD, path='..', value='...')
# ])
path = f"/api/2.0/preview/scim/v2/Groups/{group_id}"
self.api_client.do("PATCH", path, data=json.dumps(payload))

Expand All @@ -34,6 +41,7 @@ def patch_workspace_group(self, group_id: str, payload: dict):
def list_account_level_groups(
self, filter: str, attributes: str | None = None, excluded_attributes: str | None = None # noqa: A002
) -> list[Group]:
# TODO: move to other places, this won't be in SDK
query = {"filter": filter, "attributes": attributes, "excludedAttributes": excluded_attributes}
response = self.api_client.do("get", "/api/2.0/account/scim/v2/Groups", query=query)
return [Group.from_dict(v) for v in response.get("Resources", [])]
Expand All @@ -43,19 +51,10 @@ def reflect_account_group_to_workspace(self, acc_group: Group) -> None:
self.assign_permissions(principal_id=acc_group.id, permissions=["USER"])
logger.info(f"Group {acc_group.display_name} successfully reflected to workspace")

@sleep_and_retry
@limits(calls=100, period=1) # assumption
def get_tokens(self):
return self.api_client.do("GET", "/api/2.0/preview/permissions/authorization/tokens")

@sleep_and_retry
@limits(calls=100, period=1) # assumption
def get_passwords(self):
return self.api_client.do("GET", "/api/2.0/preview/permissions/authorization/passwords")

@sleep_and_retry
@limits(calls=45, period=1) # safety value, can be 50 actually
def list_workspace(self, path: str) -> Iterator[ObjectType]:
# TODO: remove, use SDK
return self.workspace.list(path=path, recursive=False)

@sleep_and_retry
Expand All @@ -78,6 +77,7 @@ def update_permissions(
)

def apply_roles_and_entitlements(self, group_id: str, roles: list, entitlements: list):
# TODO: move to other places, this won't be in SDK
op_schema = "urn:ietf:params:scim:api:messages:2.0:PatchOp"
schemas = []
operations = []
Expand Down
33 changes: 15 additions & 18 deletions tests/integration/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
ClusterDetails,
CreateInstancePoolResponse,
CreatePolicyResponse,
DataSecurityMode,
RuntimeEngine,
)
from databricks.sdk.service.iam import AccessControlRequest, PermissionLevel
from databricks.sdk.service.jobs import CreateResponse
Expand All @@ -42,7 +44,7 @@
from uc_migration_toolkit.managers.inventory.types import RequestObjectType
from uc_migration_toolkit.providers.client import ImprovedWorkspaceClient
from uc_migration_toolkit.providers.logger import logger
from uc_migration_toolkit.utils import Request, ThreadedExecution
from uc_migration_toolkit.utils import ThreadedExecution

from .utils import (
EnvironmentInfo,
Expand Down Expand Up @@ -139,23 +141,18 @@ def dbconnect_cluster_id(ws: ImprovedWorkspaceClient) -> str:
return dbc_cluster.cluster_id

logger.debug("Creating a cluster for integration testing")
spark_version = ws.clusters.select_spark_version(latest=True)
request = {
"cluster_name": DB_CONNECT_CLUSTER_NAME,
"spark_version": spark_version,
"instance_pool_id": os.environ["TEST_INSTANCE_POOL_ID"],
"driver_instance_pool_id": os.environ["TEST_INSTANCE_POOL_ID"],
"num_workers": 0,
"spark_conf": {"spark.master": "local[*, 4]", "spark.databricks.cluster.profile": "singleNode"},
"custom_tags": {
"ResourceClass": "SingleNode",
},
"data_security_mode": "SINGLE_USER",
"autotermination_minutes": 180,
"runtime_engine": "PHOTON",
}

dbc_cluster = ws.clusters.create(spark_version=spark_version, request=Request(request))
dbc_cluster = ws.clusters.create(
spark_version=ws.clusters.select_spark_version(latest=True),
cluster_name=DB_CONNECT_CLUSTER_NAME,
instance_pool_id=os.environ["TEST_INSTANCE_POOL_ID"],
driver_node_type_id=os.environ["TEST_INSTANCE_POOL_ID"],
num_workers=0,
spark_conf={"spark.master": "local[*, 4]", "spark.databricks.cluster.profile": "singleNode"},
custom_tags={"ResourceClass": "SingleNode"},
data_security_mode=DataSecurityMode.SINGLE_USER,
autotermination_minutes=60,
runtime_engine=RuntimeEngine.PHOTON,
)
logger.debug(f"Cluster {dbc_cluster.cluster_id} created")

# TODO: pre-create the cluster in the test infra
Expand Down

0 comments on commit 8d6b487

Please sign in to comment.