Skip to content

Commit

Permalink
Finished exporting data.yaml, plan.yaml.
Browse files Browse the repository at this point in the history
Workspace creator validated for 101 MNIST and 301 Watermarking with MNIST

Signed-off-by: Parth Mandaliya <parthx.mandaliya@intel.com>
  • Loading branch information
ParthM-GitHub committed Sep 28, 2023
1 parent dba24af commit 3c4fd3a
Show file tree
Hide file tree
Showing 8 changed files with 396 additions and 2,779 deletions.

This file was deleted.

This file was deleted.

This file was deleted.

64 changes: 56 additions & 8 deletions openfl-tutorials/experimental/Workflow_Interface_101_MNIST.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,25 @@
"The workflow interface is a new way of composing federated learning expermients with OpenFL. It was borne through conversations with researchers and existing users who had novel use cases that didn't quite fit the standard horizontal federated learning paradigm. "
]
},
{
"cell_type": "markdown",
"id": "9819a58c",
"metadata": {},
"source": [
"Before we start the experiment, let's write some `nbdev` tags which enable us to convert this notebook to aggregator-based-workspace workflow.\n",
"Tag below specifies default python script name which will be created."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8e4d1637",
"metadata": {},
"outputs": [],
"source": [
"#| default_exp experiment"
]
},
{
"attachments": {},
"cell_type": "markdown",
Expand All @@ -67,6 +86,8 @@
"metadata": {},
"outputs": [],
"source": [
"#| export\n",
"\n",
"!pip install git+https://github.com/intel/openfl.git\n",
"!pip install -r requirements_workflow_interface.txt\n",
"!pip install torch\n",
Expand Down Expand Up @@ -94,6 +115,8 @@
"metadata": {},
"outputs": [],
"source": [
"#| export\n",
"\n",
"import torch.nn as nn\n",
"import torch.nn.functional as F\n",
"import torch.optim as optim\n",
Expand Down Expand Up @@ -153,7 +176,7 @@
" x = F.dropout(x, training=self.training)\n",
" x = self.fc2(x)\n",
" return F.log_softmax(x)\n",
" \n",
"\n",
"def inference(network,test_loader):\n",
" network.eval()\n",
" test_loss = 0\n",
Expand Down Expand Up @@ -192,13 +215,14 @@
"metadata": {},
"outputs": [],
"source": [
"#| export\n",
"\n",
"from copy import deepcopy\n",
"\n",
"from openfl.experimental.interface import FLSpec, Aggregator, Collaborator\n",
"from openfl.experimental.runtime import LocalRuntime\n",
"from openfl.experimental.placement import aggregator, collaborator\n",
"\n",
"\n",
"def FedAvg(models):\n",
" new_model = models[0]\n",
" state_dicts = [model.state_dict() for model in models]\n",
Expand Down Expand Up @@ -234,6 +258,8 @@
"metadata": {},
"outputs": [],
"source": [
"#| export\n",
"\n",
"class FederatedFlow(FLSpec):\n",
"\n",
" def __init__(self, model=None, optimizer=None, rounds=3, **kwargs):\n",
Expand Down Expand Up @@ -333,10 +359,12 @@
"metadata": {},
"outputs": [],
"source": [
"# Aggregator\n",
"#| export\n",
"\n",
"aggregator_ = Aggregator()\n",
"\n",
"collaborator_names = [\"Portland\", \"Seattle\", \"Chandler\", \"Bangalore\"]\n",
"n_collaborators = len(collaborator_names)\n",
"\n",
"def callable_to_initialize_collaborator_private_attributes(index, n_collaborators, batch_size, train_dataset, test_dataset):\n",
" train = deepcopy(train_dataset)\n",
Expand All @@ -351,20 +379,18 @@
" \"test_loader\": torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=True),\n",
" }\n",
"\n",
"# Setup collaborators private attributes via callable function\n",
"collaborators = []\n",
"for idx, collaborator_name in enumerate(collaborator_names):\n",
" collaborators.append(\n",
" Collaborator(\n",
" name=collaborator_name, num_cpus=0, num_gpus=0,\n",
" private_attributes_callable=callable_to_initialize_collaborator_private_attributes,\n",
" index=idx, n_collaborators=len(collaborator_names),\n",
" index=idx, n_collaborators=n_collaborators,\n",
" train_dataset=mnist_train, test_dataset=mnist_test, batch_size=64\n",
" )\n",
" )\n",
"\n",
"local_runtime = LocalRuntime(aggregator=aggregator_, collaborators=collaborators,\n",
" backend=\"ray\")\n",
"local_runtime = LocalRuntime(aggregator=aggregator_, collaborators=collaborators, backend=\"ray\")\n",
"print(f'Local runtime collaborators = {local_runtime.collaborators}')"
]
},
Expand All @@ -384,6 +410,8 @@
"metadata": {},
"outputs": [],
"source": [
"#| export\n",
"\n",
"model = None\n",
"best_model = None\n",
"optimizer = None\n",
Expand All @@ -392,6 +420,26 @@
"flflow.run()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "46d1259b",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from openfl.experimental.workspace_builder import WorkspaceBuilder\n",
"\n",
"notebook_path = \"./Workflow_Interface_101_MNIST-Test-Approach2 copy.ipynb\"\n",
"template_workspace_path = f\"/home/{os.environ['USER']}/env-workspace-creator/openfl/openfl-workspace/experimental/template_workspace/\"\n",
"\n",
"workspace_builder = WorkspaceBuilder(notebook_path, \"experiment\", f\"/home/{os.environ['USER']}\", template_workspace_path)\n",
"\n",
"workspace_builder.generate_requirements()\n",
"workspace_builder.generate_plan_yaml()\n",
"workspace_builder.generate_data_yaml()"
]
},
{
"attachments": {},
"cell_type": "markdown",
Expand Down Expand Up @@ -702,7 +750,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.17"
"version": "3.8.18"
}
},
"nbformat": 4,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,17 +46,30 @@
"First we start by installing the necessary dependencies for the workflow interface"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d79eacbd",
"metadata": {},
"outputs": [],
"source": [
"#| default_exp experiment"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f7475cba",
"metadata": {},
"outputs": [],
"source": [
"#| export\n",
"\n",
"!pip install git+https://github.com/intel/openfl.git\n",
"!pip install -r requirements_workflow_interface.txt\n",
"!pip install matplotlib\n",
"!pip install torch torchvision\n",
"!pip install torch\n",
"!pip install torchvision\n",
"!pip install git+https://github.com/pyviz-topics/imagen.git@master\n",
"\n",
"\n",
Expand All @@ -82,6 +95,8 @@
"metadata": {},
"outputs": [],
"source": [
"# | export\n",
"\n",
"import torch.nn as nn\n",
"import torch.nn.functional as F\n",
"import torch.optim as optim\n",
Expand Down Expand Up @@ -173,6 +188,7 @@
"def train_model(model, optimizer, data_loader, entity, round_number, log=False):\n",
" # Helper function to train the model\n",
" train_loss = 0\n",
" log_interval = 20\n",
" model.train()\n",
" for batch_idx, (X, y) in enumerate(data_loader):\n",
" optimizer.zero_grad()\n",
Expand All @@ -185,8 +201,7 @@
"\n",
" train_loss += loss.item() * len(X)\n",
" if batch_idx % log_interval == 0 and log:\n",
" print(\n",
" \"{:<20} Train Epoch: {:<3} [{:<3}/{:<4} ({:<.0f}%)] Loss: {:<.6f}\".format(\n",
" print(\"{:<20} Train Epoch: {:<3} [{:<3}/{:<4} ({:<.0f}%)] Loss: {:<.6f}\".format(\n",
" entity,\n",
" round_number,\n",
" batch_idx * len(X),\n",
Expand Down Expand Up @@ -217,6 +232,8 @@
"metadata": {},
"outputs": [],
"source": [
"#| export\n",
"\n",
"watermark_dir = \"./files/watermark-dataset/MWAFFLE/\"\n",
"\n",
"\n",
Expand Down Expand Up @@ -401,6 +418,8 @@
"metadata": {},
"outputs": [],
"source": [
"#| export\n",
"\n",
"from copy import deepcopy\n",
"\n",
"from openfl.experimental.interface import FLSpec, Aggregator, Collaborator\n",
Expand Down Expand Up @@ -448,6 +467,8 @@
"metadata": {},
"outputs": [],
"source": [
"#| export\n",
"\n",
"class FederatedFlow_MNIST_Watermarking(FLSpec):\n",
" \"\"\"\n",
" This Flow demonstrates Watermarking on a Deep Learning Model in Federated Learning\n",
Expand Down Expand Up @@ -576,7 +597,7 @@
" self.optimizer,\n",
" self.train_loader,\n",
" \"<Collab: {:<20}\".format(self.input + \">\"),\n",
" self.round_number,\n",
" self.round_number if self.round_number is not None else 0,\n",
" log=True,\n",
" )\n",
"\n",
Expand Down Expand Up @@ -696,6 +717,8 @@
"metadata": {},
"outputs": [],
"source": [
"#| export\n",
"\n",
"# Set random seed\n",
"random_seed = 42\n",
"torch.manual_seed(random_seed)\n",
Expand Down Expand Up @@ -740,6 +763,8 @@
"metadata": {},
"outputs": [],
"source": [
"#| export\n",
"\n",
"def callable_to_initialize_aggregator_private_attributes(watermark_data, batch_size):\n",
" return {\n",
" \"watermark_data_loader\": torch.utils.data.DataLoader(\n",
Expand All @@ -765,6 +790,7 @@
" \"Bangalore\",\n",
" \"New Delhi\",\n",
"]\n",
"n_collaborators = len(collaborator_names)\n",
"\n",
"def callable_to_initialize_collaborator_private_attributes(index, n_collaborators, batch_size, train_dataset, test_dataset):\n",
" train = deepcopy(train_dataset)\n",
Expand All @@ -786,7 +812,7 @@
" Collaborator(\n",
" name=collaborator_name, num_cpus=0, num_gpus=0,\n",
" private_attributes_callable=callable_to_initialize_collaborator_private_attributes,\n",
" index=idx, n_collaborators=len(collaborator_names),\n",
" index=idx, n_collaborators=n_collaborators,\n",
" train_dataset=mnist_train, test_dataset=mnist_test, batch_size=64\n",
" )\n",
" )\n",
Expand All @@ -811,6 +837,8 @@
"metadata": {},
"outputs": [],
"source": [
"#| export\n",
"\n",
"model = None\n",
"best_model = None\n",
"optimizer = None\n",
Expand All @@ -833,15 +861,36 @@
" print(f\"Starting round {i}...\")\n",
" flflow.run()\n",
" flflow.round_number += 1\n",
" aggregated_model_accuracy = flflow.aggregated_model_accuracy\n",
" if aggregated_model_accuracy > top_model_accuracy:\n",
" print(\n",
" f\"\\nAccuracy improved to {aggregated_model_accuracy} for round {i}, Watermark Acc: {flflow.watermark_retrain_validation_score}\\n\"\n",
" )\n",
" top_model_accuracy = aggregated_model_accuracy\n",
" best_model = flflow.model\n",
" if hasattr(flflow, \"aggregated_model_accuracy\"):\n",
" aggregated_model_accuracy = flflow.aggregated_model_accuracy\n",
" if aggregated_model_accuracy > top_model_accuracy:\n",
" print(\n",
" f\"\\nAccuracy improved to {aggregated_model_accuracy} for round {i}, Watermark Acc: {flflow.watermark_retrain_validation_score}\\n\"\n",
" )\n",
" top_model_accuracy = aggregated_model_accuracy\n",
" best_model = flflow.model\n",
"\n",
" torch.save(best_model.state_dict(), \"watermarked_mnist_model.pth\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "21c98aae",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"from openfl.experimental.workspace_builder import WorkspaceBuilder\n",
"\n",
"notebook_path = \"./Workflow_Interface_301_MNIST_Watermarking.ipynb\"\n",
"template_workspace_path = f\"/home/{os.environ['USER']}/env-workspace-creator/openfl/openfl-workspace/experimental/template_workspace/\"\n",
"\n",
"workspace_builder = WorkspaceBuilder(notebook_path, \"experiment\", f\"/home/{os.environ['USER']}\", template_workspace_path)\n",
"\n",
"torch.save(best_model.state_dict(), \"watermarked_mnist_model.pth\")"
"workspace_builder.generate_requirements()\n",
"workspace_builder.generate_plan_yaml()\n",
"workspace_builder.generate_data_yaml()"
]
},
{
Expand Down Expand Up @@ -882,7 +931,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.16"
"version": "3.8.18"
},
"vscode": {
"interpreter": {
Expand Down
24 changes: 11 additions & 13 deletions openfl-workspace/experimental/template_workspace/plan/plan.yaml
Original file line number Diff line number Diff line change
@@ -1,22 +1,20 @@
# Copyright (C) 2020-2021 Intel Corporation
# Licensed subject to the terms of the separately executed evaluation license agreement between Intel Corporation and you.

aggregator :
defaults : plan/defaults/aggregator.yaml
template : openfl.experimental.component.Aggregator
settings : {}


collaborator :
defaults : plan/defaults/collaborator.yaml
template : openfl.experimental.component.Collaborator
settings : {}
aggregator:
defaults: plan/defaults/aggregator.yaml
template: openfl.experimental.component.Aggregator
settings:
rounds_to_train: 1

collaborator:
defaults: plan/defaults/collaborator.yaml
template: openfl.experimental.component.Collaborator
settings: {}

federated_flow:
template: src.flow.TinyImageNetFlow
settings: {}


network :
defaults : plan/defaults/network.yaml
network:
defaults: plan/defaults/network.yaml
Loading

0 comments on commit 3c4fd3a

Please sign in to comment.