-
Notifications
You must be signed in to change notification settings - Fork 87
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Added export CLI functionality for assessment results #2553
Changes from 21 commits
4f0c6dc
d288e0a
5aee6b6
afc714c
ad01abb
f26c522
522d43e
0c7083d
66dbab1
32adee0
77a88cd
9db2d70
33f1f11
898eeef
2c8e92b
d6243ad
99bf9a1
ee446f1
e57cbfe
1240c63
5cdeb3c
c63904e
8b71454
a8a325e
6ebc90c
f47f125
cd67ee4
3e8d342
92fd1c0
e5e2e10
a71b086
21785e4
077a03f
14fa408
8d607c8
20c19f2
cf6fa3c
6a33aca
7b798aa
66c281f
a00ecf8
92937ce
b4d58d5
4599dfa
dce6f86
d61a5ce
8c7dd67
d94d581
007891d
598c596
82146db
f4b7170
90a8189
5eb417c
9069f3d
44b4ac4
bcfd6e1
f90d2c1
6e3ddf9
26799fc
b2e3c85
9fb3f49
3d567b4
ae5832d
204ab4a
0a6e72f
93d496e
ee20112
2f62a0f
8fee9d8
21da7cc
2a09a8f
d1dd0c5
7cba9b0
04a4956
15c5536
2611b11
555e83a
d744465
04918be
0a03def
be955ea
788c273
1d12391
d3a0e39
4d2e9a6
b163801
188bc24
cb89541
571fc8b
77786a2
84830fd
2975950
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
|
@@ -275,3 +275,5 @@ commands: | |||||
- name: target-workspace-id | ||||||
description: (Optional) id of a workspace in the target collection. If not specified, ucx will prompt to select from a list | ||||||
|
||||||
- name: export | ||||||
rportilla-databricks marked this conversation as resolved.
Show resolved
Hide resolved
|
||||||
description: export widget data from the assessment | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can we update the command to accept a second argument
Suggested change
@nfx: please provide your input on this API |
Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
@@ -0,0 +1,116 @@ | ||||||||||||||||||||||||||
import os | ||||||||||||||||||||||||||
import re | ||||||||||||||||||||||||||
import csv | ||||||||||||||||||||||||||
import logging | ||||||||||||||||||||||||||
from pathlib import Path | ||||||||||||||||||||||||||
from zipfile import ZipFile | ||||||||||||||||||||||||||
from concurrent.futures import ThreadPoolExecutor | ||||||||||||||||||||||||||
from databricks.labs.blueprint.tui import Prompts | ||||||||||||||||||||||||||
from databricks.labs.ucx.contexts.workspace_cli import WorkspaceContext | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. isort |
||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
logger = logging.getLogger(__name__) | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
class AssessmentExporter: | ||||||||||||||||||||||||||
# File and Path Constants | ||||||||||||||||||||||||||
_ZIP_FILE_NAME = "ucx_assessment_results.zip" | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
def __init__(self, ctx: WorkspaceContext): | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
it's an anti-pattern to depend on the entire |
||||||||||||||||||||||||||
self._ctx = ctx | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
def _get_ucx_main_queries(self) -> list[dict[str, str]]: | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. use existing |
||||||||||||||||||||||||||
"""Retrieve and construct the main UCX queries.""" | ||||||||||||||||||||||||||
pattern = r"\b.inventory\b" | ||||||||||||||||||||||||||
schema = self._ctx.inventory_database | ||||||||||||||||||||||||||
project_root = Path(__file__).parent.parent.parent.parent | ||||||||||||||||||||||||||
ucx_main_queries_path = project_root / "labs/ucx/queries/assessment/main" | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this is a way concise version of this class that does the same thing, but with portability to become part of https://github.com/databrickslabs/lsql. You can PR it over there first and call it from UCX after, if you'd like. This way we can export any dashboards-as-code into CSV for any project. from databricks.labs.lsql.dashboards import DashboardMetadata
dashboard = DashboardMetadata.from_path(ucx_main_queries_path)
dashboard = dashboard.replace_database(catalog='hive_metastore', database=self._config.inventory_database)
for tile in dashboard.tiles:
if not tile.is_query():
continue
file_name = f"{tile.id}.csv"
for row in self._sql_backend.fetch(tile.content):
_ = row.as_dict() There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. WIP |
||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
# List all SQL files in the directory, excluding those with 'count' in their names | ||||||||||||||||||||||||||
sql_files = [file for file in ucx_main_queries_path.iterdir() if file.suffix == ".sql"] | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
ucx_main_queries = [] | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
for sql_file in sql_files: | ||||||||||||||||||||||||||
content = sql_file.read_text() | ||||||||||||||||||||||||||
modified_content = re.sub(pattern, f" {schema}", content, flags=re.IGNORECASE) | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
use databricks.labs.ucx.config.WorkspaceConfig.replace_inventory_variable that is already for this purpose There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. see other comment, this method may be written in more maintainable way |
||||||||||||||||||||||||||
query_name = sql_file.stem | ||||||||||||||||||||||||||
ucx_main_queries.append({"name": query_name, "query": modified_content}) | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
return ucx_main_queries | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
@staticmethod | ||||||||||||||||||||||||||
def _extract_target_name(name: str, pattern: str) -> str: | ||||||||||||||||||||||||||
"""Extract target name from the file name using the provided pattern.""" | ||||||||||||||||||||||||||
match = re.search(pattern, name) | ||||||||||||||||||||||||||
return match.group(1) if match else "" | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this method is not used in production code. |
||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
@staticmethod | ||||||||||||||||||||||||||
def _cleanup(path: Path, target_name: str) -> None: | ||||||||||||||||||||||||||
"""Remove a specific CSV file in the given path that matches the target name.""" | ||||||||||||||||||||||||||
target_file = path.joinpath(target_name) | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
if target_file.exists(): | ||||||||||||||||||||||||||
target_file.unlink() | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
def _execute_query(self, path: Path, result: dict[str, str]) -> None: | ||||||||||||||||||||||||||
"""Execute a SQL query and write the result to a CSV file.""" | ||||||||||||||||||||||||||
pattern = r"^\d+_\d+_(.*)" | ||||||||||||||||||||||||||
match = re.search(pattern, result["name"]) | ||||||||||||||||||||||||||
if match: | ||||||||||||||||||||||||||
file_name = f"{match.group(1)}.csv" | ||||||||||||||||||||||||||
csv_path = os.path.join(path, file_name) | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
query_results = list(self._ctx.sql_backend.fetch(result["query"])) | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
if query_results: | ||||||||||||||||||||||||||
headers = query_results[0].asDict().keys() | ||||||||||||||||||||||||||
with open(csv_path, mode='w', newline='', encoding='utf-8') as file: | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
consistently use
|
||||||||||||||||||||||||||
writer = csv.DictWriter(file, fieldnames=headers) | ||||||||||||||||||||||||||
writer.writeheader() | ||||||||||||||||||||||||||
for row in query_results: | ||||||||||||||||||||||||||
writer.writerow(row.asDict()) | ||||||||||||||||||||||||||
# Add the CSV file to the ZIP archive | ||||||||||||||||||||||||||
self._add_to_zip(path, file_name) | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
def _add_to_zip(self, path: Path, file_name) -> None: | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
use types everywhere |
||||||||||||||||||||||||||
"""Create a ZIP file containing all the CSV files.""" | ||||||||||||||||||||||||||
zip_path = path / self._ZIP_FILE_NAME | ||||||||||||||||||||||||||
file_path = path / file_name | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
try: | ||||||||||||||||||||||||||
with ZipFile(zip_path, 'a') as zipf: | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why do you need to create temporary CSV files if you can write them directly to the open zip?https://docs.python.org/3/library/zipfile.html#zipfile.ZipFile.open with ZipFile(target_folder / 'ucx-export.zip', mode='w') as z:
...
with z.open(f'{tile.id}.csv') as f:
writer = csv.DictWriter(f, fieldnames=headers)
writer.writeheader()
for row in query_results:
... this way you don't have to cleanup a file. |
||||||||||||||||||||||||||
zipf.write(file_path, arcname=file_name) | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
except FileNotFoundError: | ||||||||||||||||||||||||||
print(f"File {file_path} not found.") | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
don't use |
||||||||||||||||||||||||||
except PermissionError: | ||||||||||||||||||||||||||
print(f"Permission denied for {file_path} or {zip_path}.") | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
# Clean up the file if it was successfully added | ||||||||||||||||||||||||||
if file_path.exists(): | ||||||||||||||||||||||||||
self._cleanup(path, file_name) | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
def export_results(self, prompts: Prompts, path: Path | None) -> None: | ||||||||||||||||||||||||||
"""Main method to export results to CSV files inside a ZIP archive.""" | ||||||||||||||||||||||||||
results = self._get_ucx_main_queries() | ||||||||||||||||||||||||||
if path is None: | ||||||||||||||||||||||||||
response = prompts.question( | ||||||||||||||||||||||||||
"Choose a path to save the UCX Assessment results", | ||||||||||||||||||||||||||
default=Path.cwd().as_posix(), | ||||||||||||||||||||||||||
validate=lambda p_: Path(p_).exists(), | ||||||||||||||||||||||||||
) | ||||||||||||||||||||||||||
path = Path(response) | ||||||||||||||||||||||||||
else: | ||||||||||||||||||||||||||
logger.info(f"Using the provided path: {path}") | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
it's redundant, as you have |
||||||||||||||||||||||||||
try: | ||||||||||||||||||||||||||
logger.info(f"Exporting UCX Assessment (Main) results to {path}") | ||||||||||||||||||||||||||
with ThreadPoolExecutor(max_workers=4) as executor: | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we don't use ThreadPoolExecutor directly, as it "swallows" errors by default and needs more work for a robust error handling. We use you can add with ZipFile(path / 'ucx-export.zip', mode='w') as zip:
tasks = [partial(self._append_to_zip, zip, tile) for tile in dashboard.tiles]
Threads.strict("exporting", tasks) |
||||||||||||||||||||||||||
futures = [executor.submit(self._execute_query, path, result) for result in results] | ||||||||||||||||||||||||||
for future in futures: | ||||||||||||||||||||||||||
future.result() | ||||||||||||||||||||||||||
|
||||||||||||||||||||||||||
except TimeoutError as e: | ||||||||||||||||||||||||||
print("A thread execution timed out. Check the query execution logic.") | ||||||||||||||||||||||||||
print(f"Error exporting results: {e}") | ||||||||||||||||||||||||||
finally: | ||||||||||||||||||||||||||
logger.info(f"UCX Assessment (Main) results exported to {path}") | ||||||||||||||||||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. are they? what if the path you're trying to write is not writable, like |
Original file line number | Diff line number | Diff line change | ||||||||
---|---|---|---|---|---|---|---|---|---|---|
|
@@ -51,6 +51,7 @@ | |||||||||
from databricks.labs.ucx.installer.logs import PartialLogRecord, parse_logs | ||||||||||
from databricks.labs.ucx.installer.mixins import InstallationMixin | ||||||||||
|
||||||||||
|
||||||||||
logger = logging.getLogger(__name__) | ||||||||||
|
||||||||||
TEST_RESOURCE_PURGE_TIMEOUT = timedelta(hours=1) | ||||||||||
|
@@ -112,6 +113,126 @@ | |||||||||
f'--parent_run_id=' + dbutils.widgets.get('parent_run_id')) | ||||||||||
""" | ||||||||||
|
||||||||||
EXPORT_UCX_NOTEBOOK = """ | ||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
|
||||||||||
# Databricks notebook source | ||||||||||
# MAGIC %md | ||||||||||
# MAGIC ##### Exporter of UCX assessment results | ||||||||||
# MAGIC ##### Instructions: | ||||||||||
# MAGIC 1. Execute using an all-purpose cluster with Databricks Runtime 14 or higher. | ||||||||||
# MAGIC 1. Hit **Run all** button and wait for completion. | ||||||||||
# MAGIC 1. Go to the bottom of the notebook and click the Download UCX Results button. | ||||||||||
# MAGIC | ||||||||||
# MAGIC ##### Important: | ||||||||||
# MAGIC Please note that this is only meant to serve as example code. | ||||||||||
# MAGIC This is not official **Databricks** or **Databricks Labs UCX** code. | ||||||||||
# MAGIC | ||||||||||
# MAGIC Example code developed by **Databricks Shared Technical Services team**. | ||||||||||
# COMMAND ---------- | ||||||||||
# DBTITLE 1,Installing Packages | ||||||||||
# MAGIC %pip install {remote_wheel} -q -q -q | ||||||||||
# MAGIC %pip install xlsxwriter -q -q -q | ||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why do we suppress the pip install?
Suggested change
Note that this will fail for workspaces that have restrictive internet access. To do this similar to how we install ucx, use the |
||||||||||
# MAGIC dbutils.library.restartPython() | ||||||||||
# COMMAND ---------- | ||||||||||
# DBTITLE 1,Import Libraries | ||||||||||
# Standard library imports | ||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Remove redundant comments, this is convention, comments do not help |
||||||||||
import os | ||||||||||
nfx marked this conversation as resolved.
Show resolved
Hide resolved
|
||||||||||
import re | ||||||||||
import shutil | ||||||||||
import json | ||||||||||
from typing import List, Dict | ||||||||||
from ast import literal_eval | ||||||||||
from concurrent.futures import ThreadPoolExecutor | ||||||||||
# Third-party library imports | ||||||||||
import pandas as pd | ||||||||||
import xlsxwriter | ||||||||||
# Databricks imports | ||||||||||
from databricks.labs.ucx.contexts.workflow_task import RuntimeContext | ||||||||||
import databricks.labs.ucx.queries.assessment.main as queries | ||||||||||
# Resource management | ||||||||||
import importlib.resources as resources | ||||||||||
# COMMAND ---------- | ||||||||||
# DBTITLE 1,UCX Assessment Export | ||||||||||
rportilla-databricks marked this conversation as resolved.
Show resolved
Hide resolved
|
||||||||||
class Exporter: | ||||||||||
# File and Path Constants | ||||||||||
_FILE_NAME = "ucx_assessment_results.xlsx" | ||||||||||
_TMP_PATH = "/Workspace/Applications/ucx/ucx_results/" | ||||||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. it has to be replaced with |
||||||||||
_DOWNLOAD_PATH = "/dbfs/FileStore/ucx_results" | ||||||||||
# Named Parameters | ||||||||||
_NAMED_PARAMS = dict("config": "/Workspace{config_file}") | ||||||||||
def __init__(self) -> None: | ||||||||||
self._ctx = RuntimeContext(self._NAMED_PARAMS) | ||||||||||
def _get_ucx_main_queries(self) -> List[Dict[str, str]]: | ||||||||||
rportilla-databricks marked this conversation as resolved.
Show resolved
Hide resolved
|
||||||||||
'''Retrieve and construct the main UCX queries.''' | ||||||||||
pattern = r"\\b.inventory\\b" | ||||||||||
schema = self._ctx.inventory_database | ||||||||||
sql_files = [ | ||||||||||
file.name | ||||||||||
for file in resources.files(queries).iterdir() | ||||||||||
if file.suffix == ".sql" and "count" not in file.name | ||||||||||
] | ||||||||||
ucx_main_queries = [ | ||||||||||
dict(name = "01_1_permissions","query": f"SELECT * FROM {schema}.permissions"), | ||||||||||
dict(name = "02_2_ucx_grants", "query": f"SELECT * FROM {schema}.grants;"), | ||||||||||
dict(name = "03_3_groups", "query": f"SELECT * FROM {schema}.groups;"), | ||||||||||
] | ||||||||||
for sql_file in sql_files: | ||||||||||
with resources.as_file(resources.files(queries) / sql_file) as file_path: | ||||||||||
content = file_path.read_text() | ||||||||||
modified_content = re.sub(pattern, f" {schema}", content, flags=re.IGNORECASE) | ||||||||||
query_name = sql_file[:-4] | ||||||||||
ucx_main_queries.append(dict(name = query_name, "query": modified_content) | ||||||||||
return ucx_main_queries | ||||||||||
def _cleanup(self) -> None: | ||||||||||
'''Move the temporary results file to the download path and clean up the temp directory.''' | ||||||||||
shutil.move( | ||||||||||
os.path.join(self._TMP_PATH, self._FILE_NAME), | ||||||||||
os.path.join(self._DOWNLOAD_PATH, self._FILE_NAME), | ||||||||||
) | ||||||||||
shutil.rmtree(self._TMP_PATH) | ||||||||||
def _prepare_directories(self) -> None: | ||||||||||
'''Ensure that the necessary directories exist.''' | ||||||||||
os.makedirs(self._TMP_PATH, exist_ok=True) | ||||||||||
os.makedirs(self._DOWNLOAD_PATH, exist_ok=True) | ||||||||||
def _execute_query(self, result: Dict[str, str], writer: pd.ExcelWriter) -> None: | ||||||||||
'''Execute a SQL query and write the result to an Excel sheet.''' | ||||||||||
pattern = r'^\\d+_\\d+_(.*)' | ||||||||||
match = re.search(pattern, result["name"]) | ||||||||||
if match: | ||||||||||
sheet_name = match.group(1) | ||||||||||
sdf = spark.sql(result["query"]) | ||||||||||
if sdf.count() > 0: | ||||||||||
df = sdf.toPandas() | ||||||||||
df.to_excel(writer, sheet_name=sheet_name, index=False) | ||||||||||
def _render_export(self) -> None: | ||||||||||
'''Render an HTML link for downloading the results.''' | ||||||||||
html_content = f''' | ||||||||||
<style>@font-face{{font-family:'DM Sans';src:url(https://cdn.bfldr.com/9AYANS2F/at/p9qfs3vgsvnp5c7txz583vgs/dm-sans-regular.ttf?auto=webp&format=ttf) format('truetype');font-weight:400;font-style:normal}}body{{font-family:'DM Sans',Arial,sans-serif}}.export-container{{text-align:center;margin-top:20px}}.export-container h2{{color:#1B3139;font-size:24px;margin-bottom:20px}}.export-container a{{display:inline-block;padding:12px 25px;background-color:#1B3139;color:#fff;text-decoration:none;border-radius:4px;font-size:18px;font-weight:500;transition:background-color 0.3s ease,transform 0.3s ease}}.export-container a:hover{{background-color:#FF3621;transform:translateY(-2px)}}</style><div class="export-container"><h2>Export Results</h2><a href='{workspace_host}files/ucx_results/ucx_assessment_results.xlsx?o={workspace_id}' target='_blank' download>Download UCX Results </a></div> | ||||||||||
''' | ||||||||||
displayHTML(html_content) | ||||||||||
def export_results(self) -> None: | ||||||||||
'''Main method to export results to an Excel file.''' | ||||||||||
self._prepare_directories() | ||||||||||
results = self._get_ucx_main_queries() | ||||||||||
try: | ||||||||||
with pd.ExcelWriter( | ||||||||||
os.path.join(self._TMP_PATH, self._FILE_NAME), engine="xlsxwriter" | ||||||||||
) as writer: | ||||||||||
with ThreadPoolExecutor(max_workers=4) as executor: | ||||||||||
futures = [ | ||||||||||
executor.submit(self._execute_query, result, writer) | ||||||||||
for result in results | ||||||||||
] | ||||||||||
for future in futures: | ||||||||||
future.result() | ||||||||||
self._cleanup() | ||||||||||
self._render_export() | ||||||||||
except Exception as e: | ||||||||||
print(f"Error exporting results ", e) | ||||||||||
# COMMAND ---------- | ||||||||||
# DBTITLE 1,Automate UCX Data Export | ||||||||||
Exporter().export_results() | ||||||||||
""" | ||||||||||
|
||||||||||
|
||||||||||
class DeployedWorkflows: | ||||||||||
def __init__(self, ws: WorkspaceClient, install_state: InstallState, verify_timeout: timedelta): | ||||||||||
|
@@ -486,6 +607,7 @@ def create_jobs(self) -> None: | |||||||||
|
||||||||||
self._install_state.save() | ||||||||||
self._create_debug(remote_wheels) | ||||||||||
self._create_export(remote_wheels) | ||||||||||
self._create_readme() | ||||||||||
|
||||||||||
@property | ||||||||||
|
@@ -788,6 +910,16 @@ def _create_debug(self, remote_wheels: list[str]): | |||||||||
).encode("utf8") | ||||||||||
self._installation.upload('DEBUG.py', content) | ||||||||||
|
||||||||||
def _create_export(self, remote_wheels: list[str]): | ||||||||||
content = EXPORT_UCX_NOTEBOOK.format( | ||||||||||
remote_wheel=remote_wheels, | ||||||||||
config_file=self._config_file, | ||||||||||
workspace_host=self._ws.config.host, | ||||||||||
workspace_id=self._ws.get_workspace_id(), | ||||||||||
schema=self._config.inventory_database, | ||||||||||
).encode("utf8") | ||||||||||
self._installation.upload('EXPORT_UCX_RESULTS.py', content) | ||||||||||
|
||||||||||
|
||||||||||
class MaxedStreamHandler(logging.StreamHandler): | ||||||||||
|
||||||||||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Remove redundant newlines