Skip to content

Commit

Permalink
Noninteractive run of notebooks and flake8 checks (#107)
Browse files Browse the repository at this point in the history
* start of automatic and update occ. sens.

* current progress

* current progress

* current progress

* current progress

* vae

* models ensemble

* layer_wise_learning_rate

* gan

* 3d_classification

* add 3d_segmentation as well as flake

* transform speed

* autopep8

* update runner

* flake8 support added

* max_num_epochs->max_epochs

* check that max_epochs exists unless not expected

* uncomment executing notebook

* check pip install

* ignore temp files

* no noqa for indented import monai

* dont check for indented import monai

* flake8 changes

* magic pip installs

* autofixes

* use ! instead of % for pip install

* so far

* current progress. add black, isort, autoflake

* current progress

* update class lung lesion notebook

* class lung lesion

* current progress

* current progress

* finished

* finished

* remove pip install of pinned pytorch version

* = list() to = []

* [DLMED] fix dyunet notebook issue

Signed-off-by: Nic Ma <nma@nvidia.com>

* correct faulty import

* pep8 for dynunet

* all working

* add missing quotations

* remove personal file path

* remove NiftiDataset

* dont remove EOL whitespace from comments

* 2d_classification

* re-add spaces

* spaces

* last spaces

* final changes

* notification at end

* notification on exit

* add github action

* data subfolder

* make folder if necessary

* add flake8 to requirements

* change test name

Co-authored-by: Nic Ma <nma@nvidia.com>
  • Loading branch information
rijobro and Nic-Ma authored Jan 27, 2021
1 parent 49e78ff commit 59b57a4
Show file tree
Hide file tree
Showing 36 changed files with 4,673 additions and 2,373 deletions.
39 changes: 39 additions & 0 deletions .github/workflows/pep8.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
name: build

on:
# quick tests for every pull request
push:
branches:
- master
pull_request:

jobs:
# caching of these jobs:
# - docker-20-03-py3-pip- (shared)
# - ubuntu py37 pip-
# - os-latest-pip- (shared)
pep8:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.8
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: cache weekly timestamp
id: pip-cache
run: |
echo "::set-output name=datew::$(date '+%Y-%V')"
- name: cache for pip
uses: actions/cache@v2
id: cache
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ steps.pip-cache.outputs.datew }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip wheel
python -m pip install -r requirements.txt
- name: PEP8 check
run: |
$(pwd)/runner.sh --no-run
5 changes: 5 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ temp/
.idea/

*~
._*

# Remove .pyre temporary config files
.pyre
Expand All @@ -133,3 +134,7 @@ tests/testing_data/*Hippocampus*

# saved networks
*.pth

# Ignore torch saves
*/torch/runs
logs
102 changes: 50 additions & 52 deletions 2d_classification/mednist_tutorial.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -31,21 +31,8 @@
"metadata": {},
"outputs": [],
"source": [
"try:\n",
" import monai\n",
"except ImportError:\n",
" %pip install -q \"monai[pillow, tqdm]\""
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"scrolled": true,
"tags": []
},
"outputs": [],
"source": [
"!python -c \"import monai\" || pip install -q \"monai[pillow, tqdm]\"\n",
"!python -c \"import matplotlib\" || pip install -q matplotlib\n",
"%matplotlib inline"
]
},
Expand Down Expand Up @@ -107,12 +94,12 @@
"import os\n",
"import shutil\n",
"import tempfile\n",
"\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import PIL\n",
"import torch\n",
"import numpy as np\n",
"from sklearn.metrics import classification_report\n",
"\n",
"from monai.apps import download_and_extract\n",
"from monai.config import print_config\n",
"from monai.metrics import compute_roc_auc\n",
Expand Down Expand Up @@ -154,7 +141,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"/home/rbrown/data/MONAI\n"
"/workspace/data/medical\n"
]
}
],
Expand Down Expand Up @@ -243,7 +230,8 @@
}
],
"source": [
"class_names = sorted(x for x in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, x)))\n",
"class_names = sorted(x for x in os.listdir(data_dir)\n",
" if os.path.isdir(os.path.join(data_dir, x)))\n",
"num_class = len(class_names)\n",
"image_files = [\n",
" [\n",
Expand Down Expand Up @@ -331,12 +319,12 @@
"source": [
"val_frac = 0.1\n",
"test_frac = 0.1\n",
"train_x = list()\n",
"train_y = list()\n",
"val_x = list()\n",
"val_y = list()\n",
"test_x = list()\n",
"test_y = list()\n",
"train_x = []\n",
"train_y = []\n",
"val_x = []\n",
"val_y = []\n",
"test_x = []\n",
"test_y = []\n",
"\n",
"for i in range(num_total):\n",
" rann = np.random.random()\n",
Expand All @@ -350,7 +338,9 @@
" train_x.append(image_files_list[i])\n",
" train_y.append(image_class[i])\n",
"\n",
"print(f\"Training count: {len(train_x)}, Validation count: {len(val_x)}, Test count: {len(test_x)}\")"
"print(\n",
" f\"Training count: {len(train_x)}, Validation count: \"\n",
" f\"{len(val_x)}, Test count: {len(test_x)}\")"
]
},
{
Expand Down Expand Up @@ -378,7 +368,8 @@
" ]\n",
")\n",
"\n",
"val_transforms = Compose([LoadImage(image_only=True), AddChannel(), ScaleIntensity(), ToTensor()])"
"val_transforms = Compose(\n",
" [LoadImage(image_only=True), AddChannel(), ScaleIntensity(), ToTensor()])"
]
},
{
Expand All @@ -401,13 +392,16 @@
"\n",
"\n",
"train_ds = MedNISTDataset(train_x, train_y, train_transforms)\n",
"train_loader = torch.utils.data.DataLoader(train_ds, batch_size=300, shuffle=True, num_workers=10)\n",
"train_loader = torch.utils.data.DataLoader(\n",
" train_ds, batch_size=300, shuffle=True, num_workers=10)\n",
"\n",
"val_ds = MedNISTDataset(val_x, val_y, val_transforms)\n",
"val_loader = torch.utils.data.DataLoader(val_ds, batch_size=300, num_workers=10)\n",
"val_loader = torch.utils.data.DataLoader(\n",
" val_ds, batch_size=300, num_workers=10)\n",
"\n",
"test_ds = MedNISTDataset(test_x, test_y, val_transforms)\n",
"test_loader = torch.utils.data.DataLoader(test_ds, batch_size=300, num_workers=10)"
"test_loader = torch.utils.data.DataLoader(\n",
" test_ds, batch_size=300, num_workers=10)"
]
},
{
Expand All @@ -419,7 +413,7 @@
"1. Set learning rate for how much the model is updated per batch.\n",
"1. Set total epoch number, as we have shuffle and random transforms, so the training data of every epoch is different. \n",
"And as this is just a get start tutorial, let's just train 4 epochs. \n",
"If train 10 epochs, the model can achieve 100% accuracy on test dataset.\n",
"If train 10 epochs, the model can achieve 100% accuracy on test dataset. \n",
"1. Use DenseNet from MONAI and move to GPU devide, this DenseNet can support both 2D and 3D classification tasks.\n",
"1. Use Adam optimizer."
]
Expand All @@ -431,10 +425,11 @@
"outputs": [],
"source": [
"device = torch.device(\"cuda:0\")\n",
"model = densenet121(spatial_dims=2, in_channels=1, out_channels=num_class).to(device)\n",
"model = densenet121(spatial_dims=2, in_channels=1,\n",
" out_channels=num_class).to(device)\n",
"loss_function = torch.nn.CrossEntropyLoss()\n",
"optimizer = torch.optim.Adam(model.parameters(), 1e-5)\n",
"max_num_epochs = 4\n",
"max_epochs = 4\n",
"val_interval = 1"
]
},
Expand Down Expand Up @@ -1130,12 +1125,12 @@
"source": [
"best_metric = -1\n",
"best_metric_epoch = -1\n",
"epoch_loss_values = list()\n",
"metric_values = list()\n",
"epoch_loss_values = []\n",
"metric_values = []\n",
"\n",
"for epoch in range(max_num_epochs):\n",
"for epoch in range(max_epochs):\n",
" print(\"-\" * 10)\n",
" print(f\"epoch {epoch + 1}/{max_num_epochs}\")\n",
" print(f\"epoch {epoch + 1}/{max_epochs}\")\n",
" model.train()\n",
" epoch_loss = 0\n",
" step = 0\n",
Expand All @@ -1148,7 +1143,9 @@
" loss.backward()\n",
" optimizer.step()\n",
" epoch_loss += loss.item()\n",
" print(f\"{step}/{len(train_ds) // train_loader.batch_size}, train_loss: {loss.item():.4f}\")\n",
" print(\n",
" f\"{step}/{len(train_ds) // train_loader.batch_size}, \"\n",
" f\"train_loss: {loss.item():.4f}\")\n",
" epoch_len = len(train_ds) // train_loader.batch_size\n",
" epoch_loss /= step\n",
" epoch_loss_values.append(epoch_loss)\n",
Expand All @@ -1166,22 +1163,27 @@
" )\n",
" y_pred = torch.cat([y_pred, model(val_images)], dim=0)\n",
" y = torch.cat([y, val_labels], dim=0)\n",
" auc_metric = compute_roc_auc(y_pred, y, to_onehot_y=True, softmax=True)\n",
" auc_metric = compute_roc_auc(\n",
" y_pred, y, to_onehot_y=True, softmax=True)\n",
" metric_values.append(auc_metric)\n",
" acc_value = torch.eq(y_pred.argmax(dim=1), y)\n",
" acc_metric = acc_value.sum().item() / len(acc_value)\n",
" if auc_metric > best_metric:\n",
" best_metric = auc_metric\n",
" best_metric_epoch = epoch + 1\n",
" torch.save(model.state_dict(), os.path.join(root_dir, \"best_metric_model.pth\"))\n",
" torch.save(model.state_dict(), os.path.join(\n",
" root_dir, \"best_metric_model.pth\"))\n",
" print(\"saved new best metric model\")\n",
" print(\n",
" f\"current epoch: {epoch + 1} current AUC: {auc_metric:.4f}\"\n",
" f\" current accuracy: {acc_metric:.4f} best AUC: {best_metric:.4f}\"\n",
" f\" current accuracy: {acc_metric:.4f}\"\n",
" f\" best AUC: {best_metric:.4f}\"\n",
" f\" at epoch: {best_metric_epoch}\"\n",
" )\n",
"\n",
"print(f\"train completed, best_metric: {best_metric:.4f} at epoch: {best_metric_epoch}\")"
"print(\n",
" f\"train completed, best_metric: {best_metric:.4f} \"\n",
" f\"at epoch: {best_metric_epoch}\")"
]
},
{
Expand Down Expand Up @@ -1243,10 +1245,11 @@
"metadata": {},
"outputs": [],
"source": [
"model.load_state_dict(torch.load(os.path.join(root_dir, \"best_metric_model.pth\")))\n",
"model.load_state_dict(torch.load(\n",
" os.path.join(root_dir, \"best_metric_model.pth\")))\n",
"model.eval()\n",
"y_true = list()\n",
"y_pred = list()\n",
"y_true = []\n",
"y_pred = []\n",
"with torch.no_grad():\n",
" for test_data in test_loader:\n",
" test_images, test_labels = (\n",
Expand Down Expand Up @@ -1288,13 +1291,8 @@
}
],
"source": [
"try:\n",
" import sklean\n",
"except ImportError:\n",
" %pip install -qU sklearn\n",
"from sklearn.metrics import classification_report\n",
"\n",
"print(classification_report(y_true, y_pred, target_names=class_names, digits=4))"
"print(classification_report(\n",
" y_true, y_pred, target_names=class_names, digits=4))"
]
},
{
Expand Down
Loading

0 comments on commit 59b57a4

Please sign in to comment.