Skip to content

Commit

Permalink
Bump ruff, apply formatting updates (#1272)
Browse files Browse the repository at this point in the history
Signed-off-by: MasterSkepticista <karan.shah@intel.com>
  • Loading branch information
MasterSkepticista authored Jan 16, 2025
1 parent a110bae commit 9658c03
Show file tree
Hide file tree
Showing 34 changed files with 84 additions and 109 deletions.
2 changes: 1 addition & 1 deletion linters-requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
pre-commit
ruff==0.8.1
ruff==0.9.1
15 changes: 7 additions & 8 deletions openfl/component/aggregator/aggregator.py
Original file line number Diff line number Diff line change
Expand Up @@ -855,9 +855,9 @@ def _process_named_tensor(self, named_tensor, collaborator_name):
tuple(named_tensor.tags),
)
tensor_name, origin, round_number, report, tags = tensor_key
assert (
"compressed" in tags or "lossy_compressed" in tags
), f"Named tensor {tensor_key} is not compressed"
assert "compressed" in tags or "lossy_compressed" in tags, (
f"Named tensor {tensor_key} is not compressed"
)
if "compressed" in tags:
dec_tk, decompressed_nparray = self.tensor_codec.decompress(
tensor_key,
Expand Down Expand Up @@ -1039,9 +1039,9 @@ def _compute_validation_related_task_metrics(self, task_name) -> dict:
metrics = {}
for tensor_key in self.collaborator_tasks_results[task_key]:
tensor_name, origin, round_number, report, tags = tensor_key
assert (
collaborators_for_task[0] in tags
), f"Tensor {tensor_key} in task {task_name} has not been processed correctly"
assert collaborators_for_task[0] in tags, (
f"Tensor {tensor_key} in task {task_name} has not been processed correctly"
)
# Strip the collaborator label, and lookup aggregated tensor
new_tags = change_tags(tags, remove_field=collaborators_for_task[0])
agg_tensor_key = TensorKey(tensor_name, origin, round_number, report, new_tags)
Expand Down Expand Up @@ -1073,8 +1073,7 @@ def _compute_validation_related_task_metrics(self, task_name) -> dict:
# Compare the accuracy of the model, potentially save it
if self.best_model_score is None or self.best_model_score < agg_results:
logger.info(
f"Round {round_number}: saved the best "
f"model with score {agg_results:f}"
f"Round {round_number}: saved the best model with score {agg_results:f}"
)
self.best_model_score = agg_results
self._save_model(round_number, self.best_state_path)
Expand Down
6 changes: 3 additions & 3 deletions openfl/component/assigner/random_grouped_assigner.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,9 @@ def define_task_assignments(self):
Returns:
None
"""
assert (
np.abs(1.0 - np.sum([group["percentage"] for group in self.task_groups])) < 0.01
), "Task group percentages must sum to 100%"
assert np.abs(1.0 - np.sum([group["percentage"] for group in self.task_groups])) < 0.01, (
"Task group percentages must sum to 100%"
)

# Start by finding all of the tasks in all specified groups
self.all_tasks_in_groups = list(
Expand Down
3 changes: 1 addition & 2 deletions openfl/component/assigner/static_grouped_assigner.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,7 @@ def define_task_assignments(self):
unique_authorized_cols = set(self.authorized_cols)

assert cols_amount == authorized_cols_amount and unique_cols == unique_authorized_cols, (
f"Collaborators in each group must be distinct: "
f"{unique_cols}, {unique_authorized_cols}"
f"Collaborators in each group must be distinct: {unique_cols}, {unique_authorized_cols}"
)

# Start by finding all of the tasks in all specified groups
Expand Down
11 changes: 4 additions & 7 deletions openfl/component/collaborator/collaborator.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ def __init__(
if hasattr(DevicePolicy, device_assignment_policy):
self.device_assignment_policy = DevicePolicy[device_assignment_policy]
else:
logger.error("Unknown device_assignment_policy: " f"{device_assignment_policy.name}.")
logger.error(f"Unknown device_assignment_policy: {device_assignment_policy.name}.")
raise NotImplementedError(
f"Unknown device_assignment_policy: {device_assignment_policy}."
)
Expand Down Expand Up @@ -216,8 +216,7 @@ def run_simulation(self):
for task in tasks:
self.do_task(task, round_number)
logger.info(
f"All tasks completed on {self.collaborator_name} "
f"for round {round_number}..."
f"All tasks completed on {self.collaborator_name} for round {round_number}..."
)
break

Expand Down Expand Up @@ -376,8 +375,7 @@ def get_data_for_tensorkey(self, tensor_key):
)
if nparray is not None:
logger.debug(
f"Found tensor {tensor_name} in local TensorDB "
f"for round {prior_round}"
f"Found tensor {tensor_name} in local TensorDB for round {prior_round}"
)
return nparray
prior_round -= 1
Expand Down Expand Up @@ -413,8 +411,7 @@ def get_data_for_tensorkey(self, tensor_key):
self.tensor_db.cache_tensor({new_model_tk: nparray})
else:
logger.info(
"Could not find previous model layer."
"Fetching latest layer from aggregator"
"Could not find previous model layer.Fetching latest layer from aggregator"
)
# The original model tensor should be fetched from aggregator
nparray = self.get_aggregated_tensor_from_aggregator(
Expand Down
2 changes: 1 addition & 1 deletion openfl/component/director/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ async def start(
"""
self.status = Status.IN_PROGRESS
try:
logger.info(f"New experiment {self.name} for " f"collaborators {self.collaborators}")
logger.info(f"New experiment {self.name} for collaborators {self.collaborators}")

with ExperimentWorkspace(
experiment_name=self.name,
Expand Down
2 changes: 1 addition & 1 deletion openfl/component/envoy/envoy.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ def _get_cuda_device_info(self):
)
except Exception as exc:
logger.exception(
f"Failed to get cuda device info: {exc}. " f"Check your cuda device monitor plugin."
f"Failed to get cuda device info: {exc}. Check your cuda device monitor plugin."
)
return cuda_devices_info

Expand Down
6 changes: 3 additions & 3 deletions openfl/databases/tensor_db.py
Original file line number Diff line number Diff line change
Expand Up @@ -213,9 +213,9 @@ def get_aggregated_tensor(
None: if not all values are present.
"""
if len(collaborator_weight_dict) != 0:
assert (
np.abs(1.0 - sum(collaborator_weight_dict.values())) < 0.01
), f"Collaborator weights do not sum to 1.0: {collaborator_weight_dict}"
assert np.abs(1.0 - sum(collaborator_weight_dict.values())) < 0.01, (
f"Collaborator weights do not sum to 1.0: {collaborator_weight_dict}"
)

collaborator_names = collaborator_weight_dict.keys()
agg_tensor_dict = {}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -461,9 +461,7 @@ def send_task_results(
f" for the wrong round: {round_number}. Ignoring..."
)
else:
logger.info(
f"Collaborator {collab_name} sent task results" f" for round {round_number}."
)
logger.info(f"Collaborator {collab_name} sent task results for round {round_number}.")
# Unpickle the clone (FLSpec object)
clone = dill.loads(clone_bytes)
# Update the clone in clones_dict dictionary
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ def send_task_results(self, next_step: str, clone: Any) -> None:
None
"""
self.logger.info(
f"Round {self.round_number}," f" collaborator {self.name} is sending results..."
f"Round {self.round_number}, collaborator {self.name} is sending results..."
)
self.client.send_task_results(self.name, self.round_number, next_step, dill.dumps(clone))

Expand Down
6 changes: 2 additions & 4 deletions openfl/experimental/workflow/federated/plan/plan.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,8 +169,7 @@ def parse(

except Exception:
Plan.logger.exception(
f"Parsing Federated Learning Plan : "
f"[red]FAILURE[/] : [blue]{plan_config_path}[/].",
f"Parsing Federated Learning Plan : [red]FAILURE[/] : [blue]{plan_config_path}[/].",
extra={"markup": True},
)
raise
Expand Down Expand Up @@ -235,8 +234,7 @@ def import_(template) -> object:
class_name = splitext(template)[1].strip(".")
module_path = splitext(template)[0]
Plan.logger.info(
f"Importing [red]🡆[/] Object [red]{class_name}[/] "
f"from [red]{module_path}[/] Module.",
f"Importing [red]🡆[/] Object [red]{class_name}[/] from [red]{module_path}[/] Module.",
extra={"markup": True},
)
module = import_module(module_path)
Expand Down
6 changes: 3 additions & 3 deletions openfl/experimental/workflow/interface/cli/aggregator.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def start_(plan, authorized_cols, secure):
"--fqdn",
required=False,
type=click_types.FQDN,
help=f"The fully qualified domain name of" f" aggregator node [{getfqdn_env()}]",
help=f"The fully qualified domain name of aggregator node [{getfqdn_env()}]",
default=getfqdn_env(),
)
def _generate_cert_request(fqdn):
Expand All @@ -118,8 +118,8 @@ def generate_cert_request(fqdn):

echo(
f"Creating AGGREGATOR certificate key pair with following settings: "
f'CN={style(common_name, fg="red")},'
f' SAN={style(subject_alternative_name, fg="red")}'
f"CN={style(common_name, fg='red')},"
f" SAN={style(subject_alternative_name, fg='red')}"
)

server_private_key, server_csr = generate_csr(common_name, server=True)
Expand Down
10 changes: 5 additions & 5 deletions openfl/experimental/workflow/interface/cli/collaborator.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,8 @@ def generate_cert_request(collaborator_name, silent, skip_package):

echo(
f"Creating COLLABORATOR certificate key pair with following settings: "
f'CN={style(common_name, fg="red")},'
f' SAN={style(subject_alternative_name, fg="red")}'
f"CN={style(common_name, fg='red')},"
f" SAN={style(subject_alternative_name, fg='red')}"
)

client_private_key, client_csr = generate_csr(common_name, server=False)
Expand Down Expand Up @@ -164,7 +164,7 @@ def generate_cert_request(collaborator_name, silent, skip_package):
make_archive(archive_name, archive_type, tmp_dir)
rmtree(tmp_dir)

echo(f"Archive {archive_file_name} with certificate signing" f" request created")
echo(f"Archive {archive_file_name} with certificate signing request created")
echo(
"This file should be sent to the certificate authority"
" (typically hosted by the aggregator) for signing"
Expand Down Expand Up @@ -233,14 +233,14 @@ def register_collaborator(file_name):
"-r",
"--request-pkg",
type=ClickPath(exists=True),
help="The archive containing the certificate signing" " request (*.zip) for a collaborator",
help="The archive containing the certificate signing request (*.zip) for a collaborator",
)
@option(
"-i",
"--import",
"import_",
type=ClickPath(exists=True),
help="Import the archive containing the collaborator's" " certificate (signed by the CA)",
help="Import the archive containing the collaborator's certificate (signed by the CA)",
)
def certify_(collaborator_name, silent, request_pkg, import_):
"""Certify the collaborator."""
Expand Down
3 changes: 1 addition & 2 deletions openfl/experimental/workflow/interface/cli/plan.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,7 @@ def initialize(context, plan_config, cols_config, data_config, aggregator_addres
plan_origin["network"]["settings"]["agg_addr"] = aggregator_address or getfqdn_env()

logger.warn(
f"Patching Aggregator Addr in Plan"
f" 🠆 {plan_origin['network']['settings']['agg_addr']}"
f"Patching Aggregator Addr in Plan 🠆 {plan_origin['network']['settings']['agg_addr']}"
)

Plan.dump(plan_config, plan_origin)
Expand Down
2 changes: 1 addition & 1 deletion openfl/experimental/workflow/interface/cli/workspace.py
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ def export_(pip_install_options: Tuple[str]):
if confirm("Create a default '.workspace' file?"):
copy2(WORKSPACE / "workspace" / ".workspace", tmp_dir)
else:
echo("To proceed, you must have a '.workspace' " "file in the current directory.")
echo("To proceed, you must have a '.workspace' file in the current directory.")
raise

# Create Zip archive of directory
Expand Down
6 changes: 3 additions & 3 deletions openfl/experimental/workflow/runtime/federated_runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,9 +232,9 @@ def stream_experiment_stdout(self, experiment_name) -> None:
print(f"Getting standard output for experiment: {experiment_name}...")
for stdout_message_dict in self._dir_client.stream_experiment_stdout(experiment_name):
print(
f'Origin: {stdout_message_dict["stdout_origin"]}, '
f'Task: {stdout_message_dict["task_name"]}'
f'\n{stdout_message_dict["stdout_value"]}'
f"Origin: {stdout_message_dict['stdout_origin']}, "
f"Task: {stdout_message_dict['task_name']}"
f"\n{stdout_message_dict['stdout_value']}"
)

def __repr__(self) -> str:
Expand Down
6 changes: 2 additions & 4 deletions openfl/experimental/workflow/workspace_export/export.py
Original file line number Diff line number Diff line change
Expand Up @@ -519,17 +519,15 @@ def generate_data_yaml(self) -> None: # noqa: C901
runtime_created = True
if not runtime_collab_created:
f.write(
f"\nruntime_collaborators = "
f"{runtime_name}._LocalRuntime__collaborators"
f"\nruntime_collaborators = {runtime_name}._LocalRuntime__collaborators"
)
runtime_collab_created = True
f.write(
f"\n{collab_name}_private_attributes = "
f"runtime_collaborators['{collab_name}'].private_attributes"
)
data[collab_name] = {
"private_attributes": f"src."
f"{self.script_name}.{collab_name}_private_attributes"
"private_attributes": f"src.{self.script_name}.{collab_name}_private_attributes"
}

self.__write_yaml(data_yaml, data)
9 changes: 3 additions & 6 deletions openfl/federated/plan/plan.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,8 +163,7 @@ def parse( # noqa: C901

if gandlf_config_path is not None:
Plan.logger.info(
f"Importing GaNDLF Config into plan "
f"from file [red]{gandlf_config_path}[/].",
f"Importing GaNDLF Config into plan from file [red]{gandlf_config_path}[/].",
extra={"markup": True},
)

Expand Down Expand Up @@ -201,8 +200,7 @@ def parse( # noqa: C901

except Exception:
Plan.logger.exception(
f"Parsing Federated Learning Plan : "
f"[red]FAILURE[/] : [blue]{plan_config_path}[/].",
f"Parsing Federated Learning Plan : [red]FAILURE[/] : [blue]{plan_config_path}[/].",
extra={"markup": True},
)
raise
Expand Down Expand Up @@ -248,8 +246,7 @@ def import_(template):
class_name = splitext(template)[1].strip(".")
module_path = splitext(template)[0]
Plan.logger.info(
f"Importing [red]🡆[/] Object [red]{class_name}[/] "
f"from [red]{module_path}[/] Module.",
f"Importing [red]🡆[/] Object [red]{class_name}[/] from [red]{module_path}[/] Module.",
extra={"markup": True},
)
module = import_module(module_path)
Expand Down
2 changes: 1 addition & 1 deletion openfl/federated/task/runner_gandlf.py
Original file line number Diff line number Diff line change
Expand Up @@ -738,7 +738,7 @@ def to_cpu_numpy(state):
# When restoring, we currently assume all values are tensors.
if not pt.is_tensor(v):
raise ValueError(
"We do not currently support non-tensors " "coming from model.state_dict()"
"We do not currently support non-tensors coming from model.state_dict()"
)
# get as a numpy array, making sure is on cpu
state[k] = v.cpu().numpy()
Expand Down
2 changes: 1 addition & 1 deletion openfl/federated/task/runner_pt.py
Original file line number Diff line number Diff line change
Expand Up @@ -714,7 +714,7 @@ def to_cpu_numpy(state):
# When restoring, we currently assume all values are tensors.
if not torch.is_tensor(v):
raise ValueError(
"We do not currently support non-tensors " "coming from model.state_dict()"
"We do not currently support non-tensors coming from model.state_dict()"
)
# get as a numpy array, making sure is on cpu
state[k] = v.cpu().numpy()
Expand Down
6 changes: 3 additions & 3 deletions openfl/federated/task/runner_xgb.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ def check_precision_loss(logger, converted_data, original_data):
reconstructed_json = reconstructed_bytes.decode("utf-8")
reconstructed_data = json.loads(reconstructed_json)

assert type(original_data) is type(
reconstructed_data
), "Reconstructed datatype does not match original."
assert type(original_data) is type(reconstructed_data), (
"Reconstructed datatype does not match original."
)

# Compare the original and reconstructed data
if original_data != reconstructed_data:
Expand Down
6 changes: 3 additions & 3 deletions openfl/interface/aggregator.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def start_(plan, authorized_cols):
"--fqdn",
required=False,
type=click_types.FQDN,
help=f"The fully qualified domain name of" f" aggregator node [{getfqdn_env()}]",
help=f"The fully qualified domain name of aggregator node [{getfqdn_env()}]",
default=getfqdn_env(),
)
def _generate_cert_request(fqdn):
Expand All @@ -101,8 +101,8 @@ def generate_cert_request(fqdn):

echo(
f"Creating AGGREGATOR certificate key pair with following settings: "
f'CN={style(common_name, fg="red")},'
f' SAN={style(subject_alternative_name, fg="red")}'
f"CN={style(common_name, fg='red')},"
f" SAN={style(subject_alternative_name, fg='red')}"
)

server_private_key, server_csr = generate_csr(common_name, server=True)
Expand Down
5 changes: 2 additions & 3 deletions openfl/interface/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,13 +148,12 @@ def format_help(self, ctx, formatter):
help_str = cmd.get_short_help_str()
if level == 0:
formatter.write(
f'\n{style(name, fg="blue", bold=True):<30}'
f" {style(help_str, bold=True)}" + "\n"
f"\n{style(name, fg='blue', bold=True):<30} {style(help_str, bold=True)}" + "\n"
)
formatter.write("─" * 80 + "\n")
if level == 1:
formatter.write(
f' {style("*", fg="green")}' f' {style(name, fg="cyan"):<21} {help_str}' + "\n"
f" {style('*', fg='green')} {style(name, fg='cyan'):<21} {help_str}" + "\n"
)


Expand Down
Loading

0 comments on commit 9658c03

Please sign in to comment.