From ed9775a61261c6d8b160a5fe5a1a8902dc03fad2 Mon Sep 17 00:00:00 2001
From: Serge Smertin <259697+nfx@users.noreply.github.com>
Date: Thu, 16 Nov 2023 21:19:48 +0100
Subject: [PATCH] Added more views to assessment dashboard (#474)

This PR has the following improvements for views and tables:

- `assessment/queries` are renamed and refactored to more efficiently
fit the grid
- introduced `assessment/views` to hold common views that could be
reused by multiple commands and widgets.
- introduced `crawlers.SchemaDeployer`, that is (re-)creating views and
tables **upon installation** (and not on the assessment step).
- added `search_by` field for table widgets
- improved parallelism for fetching grants
- removed (now redundant) `setup_schema` and `setup_view` tasks from
`assessment` workflow
<img width="1981" alt="_UCX__serge_smertin__UCX_Assessment"
src="https://github.com/databrickslabs/ucx/assets/259697/7addb0bb-b301-47ea-b351-9b75cd0a5d9d">

<img width="1985" alt="_UCX__serge_smertin__UCX_Assessment"
src="https://github.com/databrickslabs/ucx/assets/259697/88effab7-59bb-46aa-af9b-bd9185d4f817">
---
 src/databricks/labs/ucx/framework/crawlers.py |  28 +++
 .../labs/ucx/framework/dashboards.py          |   5 +-
 .../labs/ucx/hive_metastore/grants.py         |  27 +--
 .../labs/ucx/hive_metastore/tables.py         |   8 +
 src/databricks/labs/ucx/install.py            |  59 ++++++-
 ....sql => 05_0_azure_service_principals.sql} |   2 +-
 .../assessment/main/00_0_compatibility.sql    |   6 +
 ...ses.sql => 00_1_count_total_databases.sql} |   2 +-
 ...ures.sql => 00_2_count_table_failures.sql} |   0
 .../main/00_3_count_external_locations.sql    |   4 +
 ...tables.sql => 00_4_count_total_tables.sql} |   2 +-
 .../assessment/main/01_0_object_readiness.sql |   9 +
 .../assessment/main/01_1_failure_summary.sql  |   7 +
 .../{all_tables.sql => 01_3_all_tables.sql}   |   8 +-
 .../assessment/main/02_0_database_summary.sql |  53 ++++++
 ...ations.sql => 03_0_external_locations.sql} |   2 +-
 ...mount_points.sql => 03_3_mount_points.sql} |   2 +-
 .../queries/assessment/main/04_0_clusters.sql |   9 +
 .../ucx/queries/assessment/main/04_3_jobs.sql |  10 ++
 .../main/05_3_global_init_scripts.sql         |   9 +
 .../assessment/main/06_0_pipelines.sql        |   8 +
 .../ucx/queries/assessment/main/README.md     |   6 +
 .../ucx/queries/assessment/main/clusters.sql  |   9 -
 .../assessment/main/database_summary.sql      |  37 ----
 .../assessment/main/failure_summary.sql       |  23 ---
 .../assessment/main/global_init_scripts.sql   |  11 --
 .../labs/ucx/queries/assessment/main/jobs.sql |   9 -
 .../ucx/queries/assessment/main/pipelines.sql |  10 --
 .../labs/ucx/queries/views/grant_detail.sql   |  32 ++++
 .../labs/ucx/queries/views/objects.sql        |  31 ++++
 src/databricks/labs/ucx/runtime.py            | 159 ++----------------
 tests/integration/framework/test_crawlers.py  |  11 ++
 tests/integration/test_installation.py        |   3 +
 tests/unit/test_install.py                    |   9 +-
 34 files changed, 336 insertions(+), 274 deletions(-)
 rename src/databricks/labs/ucx/queries/assessment/azure/{azure_service_principals.sql => 05_0_azure_service_principals.sql} (84%)
 create mode 100644 src/databricks/labs/ucx/queries/assessment/main/00_0_compatibility.sql
 rename src/databricks/labs/ucx/queries/assessment/main/{count_total_databases.sql => 00_1_count_total_databases.sql} (82%)
 rename src/databricks/labs/ucx/queries/assessment/main/{count_table_failures.sql => 00_2_count_table_failures.sql} (100%)
 create mode 100644 src/databricks/labs/ucx/queries/assessment/main/00_3_count_external_locations.sql
 rename src/databricks/labs/ucx/queries/assessment/main/{count_total_tables.sql => 00_4_count_total_tables.sql} (79%)
 create mode 100644 src/databricks/labs/ucx/queries/assessment/main/01_0_object_readiness.sql
 create mode 100644 src/databricks/labs/ucx/queries/assessment/main/01_1_failure_summary.sql
 rename src/databricks/labs/ucx/queries/assessment/main/{all_tables.sql => 01_3_all_tables.sql} (68%)
 create mode 100644 src/databricks/labs/ucx/queries/assessment/main/02_0_database_summary.sql
 rename src/databricks/labs/ucx/queries/assessment/main/{external_locations.sql => 03_0_external_locations.sql} (61%)
 rename src/databricks/labs/ucx/queries/assessment/main/{mount_points.sql => 03_3_mount_points.sql} (62%)
 create mode 100644 src/databricks/labs/ucx/queries/assessment/main/04_0_clusters.sql
 create mode 100644 src/databricks/labs/ucx/queries/assessment/main/04_3_jobs.sql
 create mode 100644 src/databricks/labs/ucx/queries/assessment/main/05_3_global_init_scripts.sql
 create mode 100644 src/databricks/labs/ucx/queries/assessment/main/06_0_pipelines.sql
 create mode 100644 src/databricks/labs/ucx/queries/assessment/main/README.md
 delete mode 100644 src/databricks/labs/ucx/queries/assessment/main/clusters.sql
 delete mode 100644 src/databricks/labs/ucx/queries/assessment/main/database_summary.sql
 delete mode 100644 src/databricks/labs/ucx/queries/assessment/main/failure_summary.sql
 delete mode 100644 src/databricks/labs/ucx/queries/assessment/main/global_init_scripts.sql
 delete mode 100644 src/databricks/labs/ucx/queries/assessment/main/jobs.sql
 delete mode 100644 src/databricks/labs/ucx/queries/assessment/main/pipelines.sql
 create mode 100644 src/databricks/labs/ucx/queries/views/grant_detail.sql
 create mode 100644 src/databricks/labs/ucx/queries/views/objects.sql
 create mode 100644 tests/integration/framework/test_crawlers.py

diff --git a/src/databricks/labs/ucx/framework/crawlers.py b/src/databricks/labs/ucx/framework/crawlers.py
index 54f7ff7a4c..faba664f54 100644
--- a/src/databricks/labs/ucx/framework/crawlers.py
+++ b/src/databricks/labs/ucx/framework/crawlers.py
@@ -1,6 +1,7 @@
 import dataclasses
 import logging
 import os
+import pkgutil
 from abc import ABC, abstractmethod
 from collections.abc import Iterator
 from typing import ClassVar
@@ -244,3 +245,30 @@ def _snapshot(self, fetcher, loader) -> list[any]:
     def _append_records(self, items):
         logger.debug(f"[{self._full_name}] found {len(items)} new records for {self._table}")
         self._backend.save_table(self._full_name, items, self._klass, mode="append")
+
+
+class SchemaDeployer:
+    def __init__(self, sql_backend: SqlBackend, inventory_schema: str, mod: any):
+        self._sql_backend = sql_backend
+        self._inventory_schema = inventory_schema
+        self._module = mod
+
+    def deploy_schema(self):
+        logger.info(f"Ensuring {self._inventory_schema} database exists")
+        self._sql_backend.execute(f"CREATE SCHEMA IF NOT EXISTS hive_metastore.{self._inventory_schema}")
+
+    def deploy_table(self, name: str, klass: type):
+        logger.info(f"Ensuring {self._inventory_schema}.{name} table exists")
+        self._sql_backend.create_table(f"hive_metastore.{self._inventory_schema}.{name}", klass)
+
+    def deploy_view(self, name: str, relative_filename: str):
+        query = self._load(relative_filename)
+        logger.info(f"Ensuring {self._inventory_schema}.{name} view matches {relative_filename} contents")
+        ddl = f"CREATE OR REPLACE VIEW hive_metastore.{self._inventory_schema}.{name} AS {query}"
+        self._sql_backend.execute(ddl)
+
+    def _load(self, relative_filename: str) -> str:
+        data = pkgutil.get_data(self._module.__name__, relative_filename)
+        sql = data.decode("utf-8")
+        sql = sql.replace("$inventory", f"hive_metastore.{self._inventory_schema}")
+        return sql
diff --git a/src/databricks/labs/ucx/framework/dashboards.py b/src/databricks/labs/ucx/framework/dashboards.py
index 4879b522e5..030c50a4b2 100644
--- a/src/databricks/labs/ucx/framework/dashboards.py
+++ b/src/databricks/labs/ucx/framework/dashboards.py
@@ -316,6 +316,7 @@ def _table_viz_args(
         condensed=True,
         with_row_number=False,
         description: str | None = None,
+        search_by: str | None = None,
     ) -> dict:
         return {
             "type": "TABLE",
@@ -326,7 +327,9 @@ def _table_viz_args(
                 "condensed": condensed,
                 "withRowNumber": with_row_number,
                 "version": 2,
-                "columns": [VizColumn(name=x, title=x).as_dict() for x in columns.split(",")],
+                "columns": [
+                    VizColumn(name=x, title=x, allowSearch=x == search_by).as_dict() for x in columns.split(",")
+                ],
             },
         }
 
diff --git a/src/databricks/labs/ucx/hive_metastore/grants.py b/src/databricks/labs/ucx/hive_metastore/grants.py
index 222d96843f..09d141987d 100644
--- a/src/databricks/labs/ucx/hive_metastore/grants.py
+++ b/src/databricks/labs/ucx/hive_metastore/grants.py
@@ -1,6 +1,5 @@
 import logging
 from collections import defaultdict
-from collections.abc import Iterator
 from dataclasses import dataclass
 from functools import partial
 
@@ -198,7 +197,7 @@ def _grants(
         view: str | None = None,
         any_file: bool = False,
         anonymous_function: bool = False,
-    ) -> Iterator[Grant]:
+    ) -> list[Grant]:
         """
         Fetches and yields grant information for the specified database objects.
 
@@ -239,6 +238,7 @@ def _grants(
             anonymous_function=anonymous_function,
         )
         try:
+            grants = []
             object_type_normalization = {"SCHEMA": "DATABASE", "CATALOG$": "CATALOG"}
             for row in self._fetch(f"SHOW GRANTS ON {on_type} {key}"):
                 (principal, action_type, object_type, _) = row
@@ -246,16 +246,21 @@ def _grants(
                     object_type = object_type_normalization[object_type]
                 if on_type != object_type:
                     continue
-                yield Grant(
-                    principal=principal,
-                    action_type=action_type,
-                    table=table,
-                    view=view,
-                    database=database,
-                    catalog=catalog,
-                    any_file=any_file,
-                    anonymous_function=anonymous_function,
+                # we have to return concrete list, as with yield we're executing
+                # everything on the main thread.
+                grants.append(
+                    Grant(
+                        principal=principal,
+                        action_type=action_type,
+                        table=table,
+                        view=view,
+                        database=database,
+                        catalog=catalog,
+                        any_file=any_file,
+                        anonymous_function=anonymous_function,
+                    )
                 )
+                return grants
         except Exception as e:
             # TODO: https://github.com/databrickslabs/ucx/issues/406
             logger.error(f"Couldn't fetch grants for object {on_type} {key}: {e}")
diff --git a/src/databricks/labs/ucx/hive_metastore/tables.py b/src/databricks/labs/ucx/hive_metastore/tables.py
index 89269c32c8..d2512739f0 100644
--- a/src/databricks/labs/ucx/hive_metastore/tables.py
+++ b/src/databricks/labs/ucx/hive_metastore/tables.py
@@ -75,6 +75,14 @@ def sql_alter_from(self, catalog):
         )
 
 
+@dataclass
+class TableError:
+    catalog: str
+    database: str
+    name: str = None
+    error: str = None
+
+
 class TablesCrawler(CrawlerBase):
     def __init__(self, backend: SqlBackend, schema):
         """
diff --git a/src/databricks/labs/ucx/install.py b/src/databricks/labs/ucx/install.py
index 4c14f26927..43bbed3e47 100644
--- a/src/databricks/labs/ucx/install.py
+++ b/src/databricks/labs/ucx/install.py
@@ -23,12 +23,30 @@
 from databricks.sdk.service.workspace import ImportFormat
 
 from databricks.labs.ucx.__about__ import __version__
+from databricks.labs.ucx.assessment.crawlers import (
+    AzureServicePrincipalInfo,
+    ClusterInfo,
+    GlobalInitScriptInfo,
+    JobInfo,
+    PipelineInfo,
+)
 from databricks.labs.ucx.config import GroupsConfig, WorkspaceConfig
+from databricks.labs.ucx.framework.crawlers import (
+    SchemaDeployer,
+    SqlBackend,
+    StatementExecutionBackend,
+)
 from databricks.labs.ucx.framework.dashboards import DashboardFromFiles
 from databricks.labs.ucx.framework.install_state import InstallState
 from databricks.labs.ucx.framework.tasks import _TASKS, Task
+from databricks.labs.ucx.hive_metastore.data_objects import ExternalLocation
+from databricks.labs.ucx.hive_metastore.grants import Grant
 from databricks.labs.ucx.hive_metastore.hms_lineage import HiveMetastoreLineageEnabler
+from databricks.labs.ucx.hive_metastore.mounts import Mount
+from databricks.labs.ucx.hive_metastore.tables import Table, TableError
 from databricks.labs.ucx.runtime import main
+from databricks.labs.ucx.workspace_access.base import Permissions
+from databricks.labs.ucx.workspace_access.generic import WorkspaceObjectInfo
 
 TAG_STEP = "step"
 TAG_APP = "App"
@@ -89,12 +107,36 @@
 logger = logging.getLogger(__name__)
 
 
+def deploy_schema(sql_backend: SqlBackend, inventory_schema: str):
+    from databricks.labs import ucx
+
+    deployer = SchemaDeployer(sql_backend, inventory_schema, ucx)
+    deployer.deploy_schema()
+    deployer.deploy_table("azure_service_principals", AzureServicePrincipalInfo)
+    deployer.deploy_table("clusters", ClusterInfo)
+    deployer.deploy_table("global_init_scripts", GlobalInitScriptInfo)
+    deployer.deploy_table("jobs", JobInfo)
+    deployer.deploy_table("pipelines", PipelineInfo)
+    deployer.deploy_table("external_locations", ExternalLocation)
+    deployer.deploy_table("mounts", Mount)
+    deployer.deploy_table("grants", Grant)
+    deployer.deploy_table("tables", Table)
+    deployer.deploy_table("table_failures", TableError)
+    deployer.deploy_table("workspace_objects", WorkspaceObjectInfo)
+    deployer.deploy_table("permissions", Permissions)
+    deployer.deploy_view("objects", "queries/views/objects.sql")
+    deployer.deploy_view("grant_detail", "queries/views/grant_detail.sql")
+
+
 class WorkspaceInstaller:
-    def __init__(self, ws: WorkspaceClient, *, prefix: str = "ucx", promtps: bool = True):
+    def __init__(
+        self, ws: WorkspaceClient, *, prefix: str = "ucx", promtps: bool = True, sql_backend: SqlBackend = None
+    ):
         if "DATABRICKS_RUNTIME_VERSION" in os.environ:
             msg = "WorkspaceInstaller is not supposed to be executed in Databricks Runtime"
             raise SystemExit(msg)
         self._ws = ws
+        self._sql_backend = sql_backend
         self._prefix = prefix
         self._prompts = promtps
         self._this_file = Path(__file__)
@@ -111,10 +153,16 @@ def _run_configured(self):
         self._install_spark_config_for_hms_lineage()
         self._create_dashboards()
         self._create_jobs()
+        self._create_database()
         readme = f'{self.notebook_link(f"{self._install_folder}/README.py")}'
         msg = f"Installation completed successfully! Please refer to the {readme} notebook for next steps."
         logger.info(msg)
 
+    def _create_database(self):
+        if self._sql_backend is None:
+            self._sql_backend = StatementExecutionBackend(self._ws, self._current_config.warehouse_id)
+        deploy_schema(self._sql_backend, self._current_config.inventory_database)
+
     def _install_spark_config_for_hms_lineage(self):
         hms_lineage = HiveMetastoreLineageEnabler(ws=self._ws)
         logger.info(
@@ -157,10 +205,15 @@ def _install_spark_config_for_hms_lineage(self):
 
     @staticmethod
     def run_for_config(
-        ws: WorkspaceClient, config: WorkspaceConfig, *, prefix="ucx", override_clusters: dict[str, str] | None = None
+        ws: WorkspaceClient,
+        config: WorkspaceConfig,
+        *,
+        prefix="ucx",
+        override_clusters: dict[str, str] | None = None,
+        sql_backend: SqlBackend = None,
     ) -> "WorkspaceInstaller":
         logger.info(f"Installing UCX v{__version__} on {ws.config.host}")
-        workspace_installer = WorkspaceInstaller(ws, prefix=prefix, promtps=False)
+        workspace_installer = WorkspaceInstaller(ws, prefix=prefix, promtps=False, sql_backend=sql_backend)
         logger.info(f"Installing UCX v{workspace_installer._version} on {ws.config.host}")
         workspace_installer._config = config
         workspace_installer._write_config()
diff --git a/src/databricks/labs/ucx/queries/assessment/azure/azure_service_principals.sql b/src/databricks/labs/ucx/queries/assessment/azure/05_0_azure_service_principals.sql
similarity index 84%
rename from src/databricks/labs/ucx/queries/assessment/azure/azure_service_principals.sql
rename to src/databricks/labs/ucx/queries/assessment/azure/05_0_azure_service_principals.sql
index 9729205148..5f6970ea13 100644
--- a/src/databricks/labs/ucx/queries/assessment/azure/azure_service_principals.sql
+++ b/src/databricks/labs/ucx/queries/assessment/azure/05_0_azure_service_principals.sql
@@ -1,5 +1,5 @@
 -- viz type=table, name=Azure Service Principals, columns=application_id,secret_scope,secret_key,tenant_id,storage_account
--- widget title=Azure Service Principals, col=0, row=49, size_x=6, size_y=8
+-- widget title=Azure Service Principals, row=0, col=0, size_x=3, size_y=8
 select
   application_id,
   if(secret_scope = '', "NA", secret_scope) secret_scope,
diff --git a/src/databricks/labs/ucx/queries/assessment/main/00_0_compatibility.sql b/src/databricks/labs/ucx/queries/assessment/main/00_0_compatibility.sql
new file mode 100644
index 0000000000..c855d6e8a0
--- /dev/null
+++ b/src/databricks/labs/ucx/queries/assessment/main/00_0_compatibility.sql
@@ -0,0 +1,6 @@
+-- viz type=counter, name=Workspace UC readiness, counter_label=UC readiness, value_column=readiness
+-- widget row=0, col=0, size_x=1, size_y=3
+WITH raw AS (
+  SELECT object_type, object_id, IF(failures == '[]', 1, 0) AS ready FROM $inventory.objects
+)
+SELECT CONCAT(ROUND(SUM(ready) / COUNT(*) * 100, 1), '%') AS readiness FROM raw
\ No newline at end of file
diff --git a/src/databricks/labs/ucx/queries/assessment/main/count_total_databases.sql b/src/databricks/labs/ucx/queries/assessment/main/00_1_count_total_databases.sql
similarity index 82%
rename from src/databricks/labs/ucx/queries/assessment/main/count_total_databases.sql
rename to src/databricks/labs/ucx/queries/assessment/main/00_1_count_total_databases.sql
index fa431850b1..b518444242 100644
--- a/src/databricks/labs/ucx/queries/assessment/main/count_total_databases.sql
+++ b/src/databricks/labs/ucx/queries/assessment/main/00_1_count_total_databases.sql
@@ -1,4 +1,4 @@
 -- viz type=counter, name=Total Database Count, counter_label=Total Databases, value_column=count_total_databases
--- widget col=0, row=0, size_x=1, size_y=3
+-- widget row=0, col=1, size_x=1, size_y=3
 SELECT COUNT(DISTINCT `database`) AS count_total_databases
 FROM $inventory.tables
diff --git a/src/databricks/labs/ucx/queries/assessment/main/count_table_failures.sql b/src/databricks/labs/ucx/queries/assessment/main/00_2_count_table_failures.sql
similarity index 100%
rename from src/databricks/labs/ucx/queries/assessment/main/count_table_failures.sql
rename to src/databricks/labs/ucx/queries/assessment/main/00_2_count_table_failures.sql
diff --git a/src/databricks/labs/ucx/queries/assessment/main/00_3_count_external_locations.sql b/src/databricks/labs/ucx/queries/assessment/main/00_3_count_external_locations.sql
new file mode 100644
index 0000000000..cc97b60ee6
--- /dev/null
+++ b/src/databricks/labs/ucx/queries/assessment/main/00_3_count_external_locations.sql
@@ -0,0 +1,4 @@
+-- viz type=counter, name=Storage Locations, counter_label=Storage Locations, value_column=count_total
+-- widget row=0, col=4, size_x=1, size_y=3
+SELECT count(*) AS count_total
+FROM $inventory.external_locations
diff --git a/src/databricks/labs/ucx/queries/assessment/main/count_total_tables.sql b/src/databricks/labs/ucx/queries/assessment/main/00_4_count_total_tables.sql
similarity index 79%
rename from src/databricks/labs/ucx/queries/assessment/main/count_total_tables.sql
rename to src/databricks/labs/ucx/queries/assessment/main/00_4_count_total_tables.sql
index a346849396..863725a0b1 100644
--- a/src/databricks/labs/ucx/queries/assessment/main/count_total_tables.sql
+++ b/src/databricks/labs/ucx/queries/assessment/main/00_4_count_total_tables.sql
@@ -1,4 +1,4 @@
 -- viz type=counter, name=Total Table Count, counter_label=Total Tables, value_column=count_total_tables
--- widget col=1, row=0, size_x=1, size_y=3
+-- widget row=0, col=3, size_x=1, size_y=3
 SELECT count(*) AS count_total_tables
 FROM $inventory.tables
diff --git a/src/databricks/labs/ucx/queries/assessment/main/01_0_object_readiness.sql b/src/databricks/labs/ucx/queries/assessment/main/01_0_object_readiness.sql
new file mode 100644
index 0000000000..271dd95556
--- /dev/null
+++ b/src/databricks/labs/ucx/queries/assessment/main/01_0_object_readiness.sql
@@ -0,0 +1,9 @@
+-- viz type=table, name=Object Type Readiness, columns=object_type,readiness
+-- widget title=Readiness, row=1, col=0, size_x=1, size_y=8
+WITH raw AS (
+  SELECT object_type, object_id, IF(failures == '[]', 1, 0) AS ready FROM $inventory.objects
+)
+SELECT object_type, CONCAT(ROUND(SUM(ready) / COUNT(*) * 100, 1), '%') AS readiness
+FROM raw
+GROUP BY object_type
+ORDER BY readiness DESC
\ No newline at end of file
diff --git a/src/databricks/labs/ucx/queries/assessment/main/01_1_failure_summary.sql b/src/databricks/labs/ucx/queries/assessment/main/01_1_failure_summary.sql
new file mode 100644
index 0000000000..0658cec60b
--- /dev/null
+++ b/src/databricks/labs/ucx/queries/assessment/main/01_1_failure_summary.sql
@@ -0,0 +1,7 @@
+-- viz type=table, name=Failure Summary, search_by=failure, columns=failure,count
+-- widget title=Failure Summary, row=1, col=1, size_x=2, size_y=8
+WITH raw AS (
+  SELECT EXPLODE(FROM_JSON(failures, 'array<string>')) AS failure FROM $inventory.objects WHERE failures <> '[]'
+)
+SELECT failure, COUNT(*) AS count FROM raw GROUP BY failure
+ORDER BY count DESC, failure DESC
\ No newline at end of file
diff --git a/src/databricks/labs/ucx/queries/assessment/main/all_tables.sql b/src/databricks/labs/ucx/queries/assessment/main/01_3_all_tables.sql
similarity index 68%
rename from src/databricks/labs/ucx/queries/assessment/main/all_tables.sql
rename to src/databricks/labs/ucx/queries/assessment/main/01_3_all_tables.sql
index b06bf3bd88..e83974e3c4 100644
--- a/src/databricks/labs/ucx/queries/assessment/main/all_tables.sql
+++ b/src/databricks/labs/ucx/queries/assessment/main/01_3_all_tables.sql
@@ -1,10 +1,8 @@
--- viz type=table, name=Table Types, columns=database,name,type,format,table_view,storage,is_delta,location
--- widget title=Table Types, col=0, row=3, size_x=6, size_y=6
-SELECT `database`,
-       name,
+-- viz type=table, name=Table Types, search_by=name, columns=name,type,format,storage,is_delta,location
+-- widget title=Table Types, row=1, col=3, size_x=3, size_y=8
+SELECT CONCAT(`database`, '.', name) AS name,
        object_type AS type,
        UPPER(table_format) AS format,
-       IF(object_type IN ("MANAGED", "EXTERNAL"), "TABLE", "VIEW") AS table_view,
        CASE
            WHEN STARTSWITH(location, "dbfs:/mnt") THEN "DBFS MOUNT"
            WHEN STARTSWITH(location, "/dbfs/mnt") THEN "DBFS MOUNT"
diff --git a/src/databricks/labs/ucx/queries/assessment/main/02_0_database_summary.sql b/src/databricks/labs/ucx/queries/assessment/main/02_0_database_summary.sql
new file mode 100644
index 0000000000..0ac93639ef
--- /dev/null
+++ b/src/databricks/labs/ucx/queries/assessment/main/02_0_database_summary.sql
@@ -0,0 +1,53 @@
+-- viz type=table, name=Database Summary, search_by=database, columns=database,upgrade,tables,views,dbfs_root,delta_tables,total_grants,granted_principals,database_grants,table_grants,service_principal_grants,user_grants,group_grants
+-- widget title=Database Summary, col=0, row=2, size_x=6, size_y=8
+WITH table_stats AS (
+  SELECT
+    `database`,
+    object_type,
+    UPPER(table_format) AS `format`,
+    `location`,
+    IF(object_type IN ("MANAGED", "EXTERNAL"), 1, 0) AS is_table,
+    IF(object_type = "VIEW", 1, 0) AS is_view,
+    CASE
+        WHEN STARTSWITH(location, "dbfs:/") AND NOT STARTSWITH(location, "dbfs:/mnt") THEN 1
+        WHEN STARTSWITH(location, "/dbfs/") AND NOT STARTSWITH(location, "/dbfs/mnt") THEN 1
+        ELSE 0
+    END AS is_dbfs_root,
+    CASE
+        WHEN STARTSWITH(location, "wasb") THEN 1
+        WHEN STARTSWITH(location, "adl") THEN 1
+        ELSE 0
+    END AS is_unsupported,
+    IF(format = "DELTA", 1, 0) AS is_delta
+   FROM $inventory.tables
+), database_stats AS (
+  SELECT `database`,
+    CASE
+        WHEN SUM(is_table) = 0 AND SUM(is_view) > 0 THEN "View Migration Required"
+        WHEN SUM(is_dbfs_root)/SUM(is_table) > .3 THEN "Asset Replication Required"
+        WHEN SUM(is_delta)/SUM(is_table) < .7 THEN "Some Non Delta Assets"
+        WHEN SUM(is_unsupported)/SUM(is_table) > .7 THEN "Storage Migration Required"
+        ELSE "In Place Sync"
+    END AS upgrade,
+    SUM(is_table) AS tables,
+    SUM(is_view) AS views,
+    SUM(is_unsupported) AS unsupported,
+    SUM(is_dbfs_root) AS dbfs_root,
+    SUM(is_delta) AS delta_tables
+  FROM table_stats
+  GROUP BY `database`
+), grant_stats AS (
+  SELECT
+    `database`,
+    COUNT(*) AS total_grants,
+    COUNT(DISTINCT principal) AS granted_principals,
+    SUM(IF(object_type == 'DATABASE', 1, 0)) AS database_grants,
+    SUM(IF(object_type == 'TABLE', 1, 0)) AS table_grants,
+    SUM(IF(principal_type == 'service-principal', 1, 0)) AS service_principal_grants,
+    SUM(IF(principal_type == 'user', 1, 0)) AS user_grants,
+    SUM(IF(principal_type == 'group', 1, 0)) AS group_grants
+  FROM $inventory.grant_detail
+  GROUP BY `database`
+)
+SELECT * FROM database_stats FULL JOIN grant_stats USING (`database`)
+ORDER BY tables DESC
\ No newline at end of file
diff --git a/src/databricks/labs/ucx/queries/assessment/main/external_locations.sql b/src/databricks/labs/ucx/queries/assessment/main/03_0_external_locations.sql
similarity index 61%
rename from src/databricks/labs/ucx/queries/assessment/main/external_locations.sql
rename to src/databricks/labs/ucx/queries/assessment/main/03_0_external_locations.sql
index 139cf67c19..b3c6dc5c64 100644
--- a/src/databricks/labs/ucx/queries/assessment/main/external_locations.sql
+++ b/src/databricks/labs/ucx/queries/assessment/main/03_0_external_locations.sql
@@ -1,4 +1,4 @@
 -- viz type=table, name=External Locations, columns=location
--- widget title=External Locations, col=0, row=17, size_x=3, size_y=8
+-- widget title=External Locations, row=3, col=0, size_x=3, size_y=8
 SELECT location
 FROM $inventory.external_locations
diff --git a/src/databricks/labs/ucx/queries/assessment/main/mount_points.sql b/src/databricks/labs/ucx/queries/assessment/main/03_3_mount_points.sql
similarity index 62%
rename from src/databricks/labs/ucx/queries/assessment/main/mount_points.sql
rename to src/databricks/labs/ucx/queries/assessment/main/03_3_mount_points.sql
index e8df6a459d..236e675f88 100644
--- a/src/databricks/labs/ucx/queries/assessment/main/mount_points.sql
+++ b/src/databricks/labs/ucx/queries/assessment/main/03_3_mount_points.sql
@@ -1,5 +1,5 @@
 -- viz type=table, name=Mount Points, columns=name,source
--- widget title=Mount Points, col=3, row=17, size_x=3, size_y=8
+-- widget title=Mount Points, row=3, col=3, size_x=3, size_y=8
 SELECT name,
        source
 FROM $inventory.mounts
\ No newline at end of file
diff --git a/src/databricks/labs/ucx/queries/assessment/main/04_0_clusters.sql b/src/databricks/labs/ucx/queries/assessment/main/04_0_clusters.sql
new file mode 100644
index 0000000000..62f0a1ff46
--- /dev/null
+++ b/src/databricks/labs/ucx/queries/assessment/main/04_0_clusters.sql
@@ -0,0 +1,9 @@
+-- viz type=table, name=Clusters, columns=failure,cluster_name,cluster_id,creator
+-- widget title=Incompatible clusters, row=4, col=0, size_x=3, size_y=8
+SELECT EXPLODE(FROM_JSON(failures, 'array<string>')) AS failure,
+    cluster_id,
+    cluster_name,
+    creator
+FROM $inventory.clusters
+WHERE NOT STARTSWITH(cluster_name, "job-")
+ORDER BY cluster_id DESC
\ No newline at end of file
diff --git a/src/databricks/labs/ucx/queries/assessment/main/04_3_jobs.sql b/src/databricks/labs/ucx/queries/assessment/main/04_3_jobs.sql
new file mode 100644
index 0000000000..09d81bdeea
--- /dev/null
+++ b/src/databricks/labs/ucx/queries/assessment/main/04_3_jobs.sql
@@ -0,0 +1,10 @@
+-- viz type=table, name=Jobs, columns=failure,job_id,job_name,creator
+-- widget title=Incompatible jobs, row=4, col=3, size_x=3, size_y=8
+SELECT
+    EXPLODE(FROM_JSON(failures, 'array<string>')) AS failure,
+    job_id,
+    job_name,
+    creator
+FROM $inventory.jobs
+WHERE job_name not like '[UCX]%'
+ORDER BY job_id DESC
\ No newline at end of file
diff --git a/src/databricks/labs/ucx/queries/assessment/main/05_3_global_init_scripts.sql b/src/databricks/labs/ucx/queries/assessment/main/05_3_global_init_scripts.sql
new file mode 100644
index 0000000000..0fd91cc1eb
--- /dev/null
+++ b/src/databricks/labs/ucx/queries/assessment/main/05_3_global_init_scripts.sql
@@ -0,0 +1,9 @@
+-- viz type=table, name=Global Init Scripts, columns=failure,script_name,created_by
+-- widget title=Incompatible Global Init Scripts, row=5, col=3, size_x=3, size_y=8
+SELECT
+    EXPLODE(FROM_JSON(failures, 'array<string>')) AS failure,
+    script_name,
+    created_by
+FROM
+  $inventory.global_init_scripts
+ORDER BY script_name DESC
\ No newline at end of file
diff --git a/src/databricks/labs/ucx/queries/assessment/main/06_0_pipelines.sql b/src/databricks/labs/ucx/queries/assessment/main/06_0_pipelines.sql
new file mode 100644
index 0000000000..3d80a63e88
--- /dev/null
+++ b/src/databricks/labs/ucx/queries/assessment/main/06_0_pipelines.sql
@@ -0,0 +1,8 @@
+-- viz type=table, name=Pipelines, columns=failure,pipeline_name,creator_name
+-- widget title=Incompatible Delta Live Tables, row=6, col=0, size_x=3, size_y=8
+SELECT
+    EXPLODE(FROM_JSON(failures, 'array<string>')) AS failure,
+    pipeline_name,
+    creator_name
+FROM $inventory.pipelines
+ORDER BY pipeline_name DESC
\ No newline at end of file
diff --git a/src/databricks/labs/ucx/queries/assessment/main/README.md b/src/databricks/labs/ucx/queries/assessment/main/README.md
new file mode 100644
index 0000000000..f83e1e03c4
--- /dev/null
+++ b/src/databricks/labs/ucx/queries/assessment/main/README.md
@@ -0,0 +1,6 @@
+# Naming convention
+
+All files in this directory follow the virtual grid of a dashboard:
+
+* total width is 6 columns
+* all files are named as `<row>_<column>_something.sql`
\ No newline at end of file
diff --git a/src/databricks/labs/ucx/queries/assessment/main/clusters.sql b/src/databricks/labs/ucx/queries/assessment/main/clusters.sql
deleted file mode 100644
index 98aed74b7c..0000000000
--- a/src/databricks/labs/ucx/queries/assessment/main/clusters.sql
+++ /dev/null
@@ -1,9 +0,0 @@
--- viz type=table, name=Clusters, columns=cluster_id,cluster_name,creator,compatible,failures
--- widget title=Clusters, col=0, row=25, size_x=6, size_y=8
-SELECT cluster_id,
-       cluster_name,
-       creator,
-       IF(success = 1, "Compatible", "Incompatible") AS compatible,
-       failures
-FROM $inventory.clusters
-WHERE NOT STARTSWITH(cluster_name, "job-")
\ No newline at end of file
diff --git a/src/databricks/labs/ucx/queries/assessment/main/database_summary.sql b/src/databricks/labs/ucx/queries/assessment/main/database_summary.sql
deleted file mode 100644
index 4166e35415..0000000000
--- a/src/databricks/labs/ucx/queries/assessment/main/database_summary.sql
+++ /dev/null
@@ -1,37 +0,0 @@
--- viz type=table, name=Database Summary, columns=database,tables,views,dbfs_root,delta_tables,upgrade
--- widget title=Database Summary, col=0, row=9, size_x=6, size_y=8
-SELECT `database`,
-       SUM(is_table) AS tables,
-       SUM(is_view) AS views,
-       SUM(is_unsupported) AS unsupported,
-       SUM(is_dbfs_root) AS dbfs_root,
-       SUM(is_delta) AS delta_tables,
-       CASE
-           WHEN SUM(is_table) = 0 AND SUM(is_view) > 0 THEN "View Migration Required"
-           WHEN SUM(is_dbfs_root)/SUM(is_table) > .3 THEN "Asset Replication Required"
-           WHEN SUM(is_delta)/SUM(is_table) < .7 THEN "Some Non Delta Assets"
-           WHEN SUM(is_unsupported)/SUM(is_table) > .7 THEN "Storage Migration Required"
-           ELSE "In Place Sync"
-       END AS upgrade
-FROM
-  (SELECT `database`,
-          name,
-          object_type,
-          UPPER(table_format) AS `format`,
-          `location`,
-          IF(object_type IN ("MANAGED", "EXTERNAL"), 1, 0) AS is_table,
-          IF(object_type = "VIEW", 1, 0) AS is_view,
-          CASE
-              WHEN STARTSWITH(location, "dbfs:/") AND NOT STARTSWITH(location, "dbfs:/mnt") THEN 1
-              WHEN STARTSWITH(location, "/dbfs/") AND NOT STARTSWITH(location, "/dbfs/mnt") THEN 1
-              ELSE 0
-          END AS is_dbfs_root,
-          CASE
-              WHEN STARTSWITH(location, "wasb") THEN 1
-              WHEN STARTSWITH(location, "adl") THEN 1
-              ELSE 0
-          END AS is_unsupported,
-          IF(format = "DELTA", 1, 0) AS is_delta
-   FROM $inventory.tables)
-GROUP BY `database`
-ORDER BY `database`
\ No newline at end of file
diff --git a/src/databricks/labs/ucx/queries/assessment/main/failure_summary.sql b/src/databricks/labs/ucx/queries/assessment/main/failure_summary.sql
deleted file mode 100644
index 8f032b73e3..0000000000
--- a/src/databricks/labs/ucx/queries/assessment/main/failure_summary.sql
+++ /dev/null
@@ -1,23 +0,0 @@
--- viz type=table, name=Failure Summary, columns=issue,object_type,issue_count
--- widget title=Failure Summary, col=0, row=65, size_x=6, size_y=8
-SELECT
-  issue,
-  object_type,
-  COUNT(*) AS issue_count
-FROM
-  (
-    SELECT
-      EXPLODE(FROM_JSON(failures, 'array<string>')) AS indv_failure,
-      SUBSTRING_INDEX(indv_failure, ":", 1) issue,
-      object_type
-    from
-      $inventory.failure_details
-  )
-WHERE
-  indv_failure IS NOT NULL
-  AND indv_failure != ""
-GROUP BY
-  issue,
-  object_type
-ORDER BY
-  3 DESC;
\ No newline at end of file
diff --git a/src/databricks/labs/ucx/queries/assessment/main/global_init_scripts.sql b/src/databricks/labs/ucx/queries/assessment/main/global_init_scripts.sql
deleted file mode 100644
index 5195e8869d..0000000000
--- a/src/databricks/labs/ucx/queries/assessment/main/global_init_scripts.sql
+++ /dev/null
@@ -1,11 +0,0 @@
--- viz type=table, name=Global Init Scripts, columns=script_id,script_name,created_by,enabled,Azure_SPN_Present,failures
--- widget title=Global Init Scripts, col=0, row=57, size_x=6, size_y=8
-select
-  script_id,
-  script_name,
-  created_by,
-  if(enabled = True, "Yes", "No") enabled,
-  if(success = 1, "No", "Yes") Azure_SPN_Present,
-  failures
-from
-  $inventory.global_init_scripts
\ No newline at end of file
diff --git a/src/databricks/labs/ucx/queries/assessment/main/jobs.sql b/src/databricks/labs/ucx/queries/assessment/main/jobs.sql
deleted file mode 100644
index 5842052855..0000000000
--- a/src/databricks/labs/ucx/queries/assessment/main/jobs.sql
+++ /dev/null
@@ -1,9 +0,0 @@
--- viz type=table, name=Jobs, columns=job_id,job_name,creator,compatible,failures
--- widget title=Jobs, col=0, row=33, size_x=6, size_y=8
-SELECT job_id,
-       job_name,
-       creator,
-       IF(success=1, "Compatible", "Incompatible") AS compatible,
-       failures
-FROM $inventory.jobs
-WHERE job_name not like '[UCX]%'
diff --git a/src/databricks/labs/ucx/queries/assessment/main/pipelines.sql b/src/databricks/labs/ucx/queries/assessment/main/pipelines.sql
deleted file mode 100644
index 8262e939e8..0000000000
--- a/src/databricks/labs/ucx/queries/assessment/main/pipelines.sql
+++ /dev/null
@@ -1,10 +0,0 @@
--- viz type=table, name=Pipelines, columns=pipeline_id,pipeline_name,creator_name,Azure_SPN_Present,failures
--- widget title=Pipelines, col=0, row=41, size_x=6, size_y=8
-select
-       pipeline_id,
-       pipeline_name,
-       creator_name,
-       IF(success = 1, "No", "Yes") AS Azure_SPN_Present,
-       failures
-from
-  $inventory.pipelines
\ No newline at end of file
diff --git a/src/databricks/labs/ucx/queries/views/grant_detail.sql b/src/databricks/labs/ucx/queries/views/grant_detail.sql
new file mode 100644
index 0000000000..0cd2f170e6
--- /dev/null
+++ b/src/databricks/labs/ucx/queries/views/grant_detail.sql
@@ -0,0 +1,32 @@
+SELECT
+    CASE
+        WHEN anonymous_function THEN 'ANONYMOUS FUNCTION'
+        WHEN any_file THEN 'ANY FILE'
+        WHEN view IS NOT NULL THEN 'VIEW'
+        WHEN table IS NOT NULL THEN 'TABLE'
+        WHEN database IS NOT NULL THEN 'DATABASE'
+        WHEN catalog IS NOT NULL THEN 'CATALOG'
+        ELSE 'UNKNOWN'
+    END AS object_type,
+    CASE
+        WHEN anonymous_function THEN NULL
+        WHEN any_file THEN NULL
+        WHEN view IS NOT NULL THEN CONCAT(catalog, '.', database, '.', view)
+        WHEN table IS NOT NULL THEN  CONCAT(catalog, '.', database, '.', table)
+        WHEN database IS NOT NULL THEN  CONCAT(catalog, '.', database)
+        WHEN catalog IS NOT NULL THEN catalog
+        ELSE 'UNKNOWN'
+    END AS object_id,
+    action_type,
+    CASE
+        WHEN principal
+            RLIKE '^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$'
+        THEN 'service-principal'
+        WHEN principal RLIKE '@' THEN 'user'
+        ELSE 'group'
+    END AS principal_type,
+    principal,
+    catalog,
+    database,
+    table
+FROM $inventory.grants
\ No newline at end of file
diff --git a/src/databricks/labs/ucx/queries/views/objects.sql b/src/databricks/labs/ucx/queries/views/objects.sql
new file mode 100644
index 0000000000..60e15a447c
--- /dev/null
+++ b/src/databricks/labs/ucx/queries/views/objects.sql
@@ -0,0 +1,31 @@
+SELECT "jobs" AS object_type, job_id AS object_id, failures FROM $inventory.jobs
+UNION ALL
+SELECT "clusters" AS object_type, cluster_id AS object_id, failures FROM $inventory.clusters
+UNION ALL
+SELECT "global init scripts" AS object_type, script_id AS object_id, failures FROM $inventory.global_init_scripts
+UNION ALL
+SELECT "pipelines" AS object_type, pipeline_id AS object_id, failures FROM $inventory.pipelines
+UNION ALL
+SELECT object_type, object_id, failures FROM (
+  SELECT "tables" as object_type, CONCAT(t.catalog, '.', t.database, '.', t.name) AS object_id, 
+  TO_JSON(
+    FILTER(ARRAY(
+      IF(t.table_format != "delta", CONCAT("Non-DELTA format: ", t.table_format), NULL),
+      IF(STARTSWITH(t.location, "wasb"), "Unsupported Storage Type: wasb://", NULL),
+      IF(STARTSWITH(t.location, "adl"), "Unsupported Storage Type: adl://", NULL),
+      CASE
+        WHEN STARTSWITH(t.location, "dbfs:/mnt") THEN "Data is in DBFS Mount"
+        WHEN STARTSWITH(t.location, "/dbfs/mnt") THEN "Data is in DBFS Mount"
+        WHEN STARTSWITH(t.location, "dbfs:/") THEN "Data is in DBFS Root"
+        WHEN STARTSWITH(t.location, "/dbfs/") THEN "Data is in DBFS Root"
+        ELSE NULL
+      END,
+      tf.error
+    ), f -> f IS NOT NULL)
+  ) AS failures
+  FROM $inventory.tables AS t
+  FULL JOIN $inventory.table_failures AS tf USING (catalog, database, name)
+)
+UNION ALL
+SELECT "databases" AS object_type, CONCAT(catalog, '.', database) AS object_id, TO_JSON(ARRAY(error)) AS failures
+FROM $inventory.table_failures WHERE name IS NULL
\ No newline at end of file
diff --git a/src/databricks/labs/ucx/runtime.py b/src/databricks/labs/ucx/runtime.py
index a5c51d4fbe..5ea03dd918 100644
--- a/src/databricks/labs/ucx/runtime.py
+++ b/src/databricks/labs/ucx/runtime.py
@@ -27,127 +27,7 @@
 logger = logging.getLogger(__name__)
 
 
-def _get_view_definition() -> str:
-    return """CREATE
-OR REPLACE VIEW $inventory.failure_details AS WITH failuretab (object_type, object_id, failures) AS (
-  SELECT
-    object_type,
-    object_id,
-    failures
-  FROM
-    (
-      SELECT
-        "jobs" AS object_type,
-        job_id AS object_id,
-        failures
-      FROM
-        $inventory.jobs
-      WHERE
-        failures IS NOT NULL
-        AND failures != '[]'
-      UNION ALL
-      SELECT
-        "clusters" AS object_type,
-        cluster_id AS object_id,
-        failures
-      FROM
-        $inventory.clusters
-      WHERE
-        failures IS NOT NULL
-        AND failures != '[]'
-      UNION ALL
-      SELECT
-        "global init scripts" AS object_type,
-        script_id AS object_id,
-        failures
-      FROM
-        $inventory.global_init_scripts
-      WHERE
-        failures IS NOT NULL
-        AND failures != '[]'
-      UNION ALL
-      SELECT
-        "pipelines" AS object_type,
-        pipeline_id AS object_id,
-        failures
-      FROM
-        $inventory.pipelines
-      WHERE
-        failures IS NOT NULL
-        AND failures != '[]'
-      UNION ALL
-      SELECT
-        object_type,
-        object_id,
-        failures
-      FROM
-        (
-          SELECT
-            "Table" as object_type,
-            CONCAT(catalog, '.', database, '.', name) AS object_id,
-            TO_JSON(
-              ARRAY(
-                CASE
-                  WHEN STARTSWITH(location, "wasb") THEN "Unsupported Storage Type"
-                  WHEN STARTSWITH(location, "adl") THEN "Unsupported Storage Type"
-                  WHEN STARTSWITH(location, "dbfs:/mnt") THEN "DBFS Mount"
-                  WHEN STARTSWITH(location, "/dbfs/mnt") THEN "DBFS Mount"
-                  WHEN STARTSWITH(location, "dbfs:/") THEN "DBFS Root"
-                  WHEN STARTSWITH(location, "/dbfs/") THEN "DBFS Root"
-                  ELSE NULL
-                END,
-                IF(table_format != "delta", "Non Delta", NULL)
-              )
-            ) AS failures
-          FROM
-            $inventory.tables
-          WHERE
-            object_type IN ("MANAGED", "EXTERNAL")
-        )
-      WHERE
-        failures != '[null,null]'
-      UNION ALL
-      SELECT
-        CASE
-          WHEN instr(error, "ignoring database") > 0 THEN "Database"
-          WHEN instr(error, "ignoring table") > 0 THEN "Table"
-        END AS object_type,
-        CASE
-          WHEN instr(error, "ignoring database") > 0 THEN concat(catalog, '.', database)
-          WHEN instr(error, "ignoring table") > 0 THEN concat(catalog, '.', database, '.', name)
-        END AS object_id,
-        TO_JSON(ARRAY(error)) AS failures
-      FROM
-        $inventory.table_failures
-      WHERE
-        error IS NOT NULL
-        AND error != ""
-    )
-)
-SELECT
-  object_type,
-  object_id,
-  failures
-FROM
-  failuretab
-WHERE
-  failures IS NOT NULL
-  AND failures != ""
-ORDER BY
-  object_id,
-  object_type,
-  failures;"""
-
-
-@task("assessment")
-def setup_schema(cfg: WorkspaceConfig):
-    """Creates a database for the UCX migration intermediate state. The name comes from the configuration file
-    and is set with the `inventory_database` key."""
-    backend = RuntimeBackend()
-    backend.execute(f"CREATE SCHEMA IF NOT EXISTS hive_metastore.{cfg.inventory_database}")
-
-
-@task("assessment", depends_on=[setup_schema], notebook="hive_metastore/tables.scala")
+@task("assessment", notebook="hive_metastore/tables.scala")
 def crawl_tables(_: WorkspaceConfig):
     """Iterates over all tables in the Hive Metastore of the current workspace and persists their metadata, such
     as _database name_, _table name_, _table type_, _table location_, etc., in the Delta table named
@@ -177,7 +57,7 @@ def crawl_grants(cfg: WorkspaceConfig):
     grants.snapshot()
 
 
-@task("assessment", depends_on=[setup_schema])
+@task("assessment")
 def crawl_mounts(cfg: WorkspaceConfig):
     """Defines the scope of the _mount points_ intended for migration into Unity Catalog. As these objects are not
     compatible with the Unity Catalog paradigm, a key component of the migration process involves transferring them
@@ -205,7 +85,7 @@ def guess_external_locations(cfg: WorkspaceConfig):
     crawler.snapshot()
 
 
-@task("assessment", depends_on=[setup_schema])
+@task("assessment")
 def assess_jobs(cfg: WorkspaceConfig):
     """Scans through all the jobs and identifies those that are not compatible with UC. The list of all the jobs is
     stored in the `$inventory.jobs` table.
@@ -221,7 +101,7 @@ def assess_jobs(cfg: WorkspaceConfig):
     crawler.snapshot()
 
 
-@task("assessment", depends_on=[setup_schema])
+@task("assessment")
 def assess_clusters(cfg: WorkspaceConfig):
     """Scan through all the clusters and identifies those that are not compatible with UC. The list of all the clusters
     is stored in the`$inventory.clusters` table.
@@ -237,7 +117,7 @@ def assess_clusters(cfg: WorkspaceConfig):
     crawler.snapshot()
 
 
-@task("assessment", depends_on=[setup_schema])
+@task("assessment")
 def assess_pipelines(cfg: WorkspaceConfig):
     """This module scans through all the Pipelines and identifies those pipelines which has Azure Service Principals
     embedded (who has been given access to the Azure storage accounts via spark configurations) in the pipeline
@@ -253,7 +133,7 @@ def assess_pipelines(cfg: WorkspaceConfig):
     crawler.snapshot()
 
 
-@task("assessment", depends_on=[setup_schema])
+@task("assessment")
 def assess_azure_service_principals(cfg: WorkspaceConfig):
     """This module scans through all the clusters configurations, cluster policies, job cluster configurations,
     Pipeline configurations, Warehouse configuration and identifies all the Azure Service Principals who has been
@@ -269,7 +149,7 @@ def assess_azure_service_principals(cfg: WorkspaceConfig):
     crawler.snapshot()
 
 
-@task("assessment", depends_on=[setup_schema])
+@task("assessment")
 def assess_global_init_scripts(cfg: WorkspaceConfig):
     """This module scans through all the global init scripts and identifies if there is an Azure Service Principal
     who has been given access to the Azure storage accounts via spark configurations referred in those scripts.
@@ -281,7 +161,7 @@ def assess_global_init_scripts(cfg: WorkspaceConfig):
     crawler.snapshot()
 
 
-@task("assessment", depends_on=[setup_schema])
+@task("assessment")
 def workspace_listing(cfg: WorkspaceConfig):
     """Scans the workspace for workspace objects. It recursively list all sub directories
     and compiles a list of directories, notebooks, files, repos and libraries in the workspace.
@@ -317,6 +197,9 @@ def crawl_permissions(cfg: WorkspaceConfig):
 @task(
     "assessment",
     depends_on=[
+        crawl_grants,
+        crawl_permissions,
+        guess_external_locations,
         assess_jobs,
         assess_clusters,
         assess_pipelines,
@@ -324,26 +207,6 @@ def crawl_permissions(cfg: WorkspaceConfig):
         assess_global_init_scripts,
         crawl_tables,
     ],
-)
-def setup_view(cfg: WorkspaceConfig):
-    """Creates a database view for capturing following details as part of the assessment process:
-    - Unsupported DBR version
-    - Unsupported config
-    - DBFS mount used in configuration
-    - Azure service principal credentials used in config
-    - Unsupported storage type (WASBS, ADL) used in table location
-    - DBFS root and DBFS Mount location used in table location
-    - Non Delta tables
-    - Table scan failures
-    - Database scan failures
-    """
-    backend = RuntimeBackend()
-    backend.execute(cfg.replace_inventory_variable(_get_view_definition()))
-
-
-@task(
-    "assessment",
-    depends_on=[crawl_grants, crawl_permissions, guess_external_locations, setup_view],
     dashboard="assessment_main",
 )
 def assessment_report(_: WorkspaceConfig):
diff --git a/tests/integration/framework/test_crawlers.py b/tests/integration/framework/test_crawlers.py
new file mode 100644
index 0000000000..118deeb6d4
--- /dev/null
+++ b/tests/integration/framework/test_crawlers.py
@@ -0,0 +1,11 @@
+from databricks.labs.ucx.framework.crawlers import SchemaDeployer
+from databricks.labs.ucx.hive_metastore.grants import Grant
+
+
+def test_deploys_database(sql_backend, inventory_schema):
+    from databricks.labs import ucx
+
+    deployer = SchemaDeployer(sql_backend, inventory_schema, ucx)
+    deployer.deploy_schema()
+    deployer.deploy_table("grants", Grant)
+    deployer.deploy_view("grant_detail", "assessment/views/grant_detail.sql")
diff --git a/tests/integration/test_installation.py b/tests/integration/test_installation.py
index 0cdccebe7e..80d2494e04 100644
--- a/tests/integration/test_installation.py
+++ b/tests/integration/test_installation.py
@@ -36,6 +36,7 @@ def test_destroying_non_existing_schema_fails_with_correct_message(ws, sql_backe
             ),
             log_level="DEBUG",
         ),
+        sql_backend=sql_backend,
         prefix=make_random(4),
         override_clusters={
             "main": default_cluster_id,
@@ -61,6 +62,7 @@ def test_logs_are_available(ws, sql_backend, env_or_skip, make_random):
             groups=GroupsConfig(auto=True),
             log_level="INFO",
         ),
+        sql_backend=sql_backend,
         prefix=make_random(4),
         override_clusters={
             "main": default_cluster_id,
@@ -172,6 +174,7 @@ def test_jobs_with_no_inventory_database(
             ),
             log_level="DEBUG",
         ),
+        sql_backend=sql_backend,
         prefix=make_random(4),
         override_clusters={
             "main": default_cluster_id,
diff --git a/tests/unit/test_install.py b/tests/unit/test_install.py
index faa81dbeab..080b699b09 100644
--- a/tests/unit/test_install.py
+++ b/tests/unit/test_install.py
@@ -31,6 +31,8 @@
 from databricks.labs.ucx.framework.install_state import InstallState
 from databricks.labs.ucx.install import WorkspaceInstaller
 
+from ..unit.framework.mocks import MockBackend
+
 
 @pytest.fixture
 def ws(mocker):
@@ -56,7 +58,10 @@ def ws(mocker):
 
 def test_replace_clusters_for_integration_tests(ws):
     return_value = WorkspaceInstaller.run_for_config(
-        ws, WorkspaceConfig(inventory_database="a", groups=GroupsConfig(auto=True)), override_clusters={"main": "abc"}
+        ws,
+        WorkspaceConfig(inventory_database="a", groups=GroupsConfig(auto=True)),
+        override_clusters={"main": "abc"},
+        sql_backend=MockBackend(),
     )
     assert return_value
 
@@ -330,7 +335,7 @@ def test_main_with_existing_conf_does_not_recreate_config(ws, mocker):
     ws.queries.create.return_value = Query(id="abc")
     ws.query_visualizations.create.return_value = Visualization(id="abc")
     ws.dashboard_widgets.create.return_value = Widget(id="abc")
-    install = WorkspaceInstaller(ws)
+    install = WorkspaceInstaller(ws, sql_backend=MockBackend())
     install._build_wheel = lambda _: Path(__file__)
     install.run()