diff --git a/.github/.stale.yml b/.github/.stale.yml
new file mode 100644
index 0000000..4beb2b3
--- /dev/null
+++ b/.github/.stale.yml
@@ -0,0 +1,13 @@
+# Number of days of inactivity before an issue becomes stale
+daysUntilStale: 60
+# Number of days of inactivity before a stale issue is closed
+daysUntilClose: 7
+# Label to use when marking an issue as stale
+staleLabel: wontfix
+# Comment to post when marking an issue as stale. Set to `false` to disable
+markComment: >
+ This issue has been automatically marked as stale because it has not had
+ recent activity. It will be closed if no further activity occurs. Thank you
+ for your contributions.
+# Comment to post when closing a stale issue. Set to `false` to disable
+closeComment: false
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
new file mode 100644
index 0000000..92f8184
--- /dev/null
+++ b/.github/workflows/build.yml
@@ -0,0 +1,55 @@
+name: build
+
+on:
+ push:
+ branches:
+ - master
+ pull_request:
+ branches:
+ - master
+ - develop
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ timeout-minutes: 15
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python 3.8
+ uses: actions/setup-python@v2
+ with:
+ python-version: 3.8
+
+ - name: Install poetry
+ run: |
+ pip install -U pip
+ curl -sSL "https://install.python-poetry.org" | python -
+ echo "${HOME}/.poetry/bin" >> $GITHUB_PATH
+
+ - name: Set up cache
+ uses: actions/cache@v2
+ with:
+ path: .venv
+ key: venv-${{ matrix.python-version }}-${{ hashFiles('poetry.lock') }}
+ - name: Install dependencies
+ run: |
+ poetry config virtualenvs.in-project true
+ poetry run pip install -U pip
+ poetry install
+
+ - name: Check codestyle
+ run: |
+ poetry run flake8 --show-source --statistics --count -- .
+ poetry run isort --check-only -- .
+ # poetry run mypy -- .
+ poetry run doc8 -q -- docs
+
+ # - name: Unit tests
+ # run: |
+ # poetry run pytest .
+
+ - name: Check dependencies compatibility
+ run: |
+ poetry run poetry check
+ poetry run pip check
diff --git a/.github/workflows/pipeline.yml b/.github/workflows/pipeline.yml
index afc53ed..f0b58a7 100644
--- a/.github/workflows/pipeline.yml
+++ b/.github/workflows/pipeline.yml
@@ -7,41 +7,46 @@ on:
pull_request:
branches:
- master
+ - develop
jobs:
- build:
+ build:
runs-on: ubuntu-latest
timeout-minutes: 60
+ strategy:
+ matrix:
+ python-version: [3.8]
steps:
- uses: actions/checkout@v2
- - name: Set up Python 3.8
+ - name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
- python-version: 3.8
+ python-version: ${{ matrix.python-version }}
- - name: Get pip cache dir
- id: pip-cache
+ - name: Install poetry
run: |
- echo "::set-output name=dir::$(pip cache dir)"
+ pip install -U pip
+ curl -sSL "https://install.python-poetry.org" | python -
+ echo "${HOME}/.poetry/bin" >> $GITHUB_PATH
+
- name: Set up cache
uses: actions/cache@v2
with:
- path: ${{ steps.pip-cache.outputs.dir }}
- key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }}
- restore-keys: |
- ${{ runner.os }}-pip-
+ path: .venv
+ key: venv-${{ matrix.python-version }}-${{ hashFiles('poetry.lock') }}
- name: Install dependencies
run: |
- pip install -r requirements.txt
- python setup.py install
+ poetry config virtualenvs.in-project true
+ poetry run pip install -U pip
+ poetry install
- name: Cache data
id: cache-div2k
uses: actions/cache@v2
with:
path: data/*.zip
- key: ${{ runner.os }}-data
+ key: data-div2k
- name: Download DIV2K dataset
if: steps.cache-div2k.outputs.cache-hit != 'true'
run: |
@@ -50,10 +55,6 @@ jobs:
wget http://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_valid_HR.zip -P ./data/
wget https://data.vision.ee.ethz.ch/cvl/DIV2K/DIV2K_valid_LR_bicubic_X4.zip -P ./data/
- - name: Check pipeline - phase 1 (supervised)
- run: |
- catalyst-dl run -C experiment/config_supervised.yml --check --stages/data_params/batch_size=2:int
-
- - name: Check pipeline - phase 2(GAN)
+ - name: Check pipeline
run: |
- catalyst-dl run -C experiment/config_gan.yml --check --stages/data_params/batch_size=2:int
+ poetry run catalyst-dl run -C tests/pipeline/config.yml --check
diff --git a/.readthedocs.yml b/.readthedocs.yml
index 6ec71db..b80f037 100644
--- a/.readthedocs.yml
+++ b/.readthedocs.yml
@@ -6,5 +6,6 @@ sphinx:
python:
version: 3.8
install:
+ - method: pip
+ path: .
- requirements: docs/requirements.txt
- - requirements: requirements.txt
diff --git a/README.md b/README.md
index 005b07a..369f220 100644
--- a/README.md
+++ b/README.md
@@ -19,29 +19,22 @@ Key points of ESRGAN:
* `Catalyst` as pipeline runner for deep learning tasks. This new and rapidly developing [library](https://github.com/catalyst-team/catalyst).
can significantly reduce the amount of boilerplate code. If you are familiar with the TensorFlow ecosystem, you can think of Catalyst
- as Keras for PyTorch. This framework is integrated with logging systems such as the well-known [TensorBoard](https://www.tensorflow.org/tensorboard).
-* `Pytorch`, `torchvision`, and `PIQ` as main frameworks for deep learning.
-* `Albumentations` for data preprocessing.
+ as Keras for PyTorch. This framework is integrated with logging systems such as the well-known [TensorBoard](https://www.tensorflow.org/tensorboard);
+* `Pytorch` and `torchvision` as main frameworks for deep learning;
+* `Albumentations` and `PIQ` for data processing.
## Quick Start
### Setup environment
-`esrgan` requires python >= 3.8. The [requirements.txt](requirements.txt) file can be used to install the necessary packages.
-
-```
-git clone https://github.com/leverxgroup/esrgan.git
-pip install ./esrgan
+```bash
+pip install git+https://github.com/leverxgroup/esrgan.git
```
### Run an experiment
+```bash
+catalyst-dl run -C esrgan/config.yml --benchmark
```
-# step 1 - supervised training of the model
-catalyst-dl run -C esrgan/experiment/config_supervised.yml --benchmark
-
-# step 2 - use weights from step 1 to train model using GAN approach
-catalyst-dl run -C esrgan/experiment/config_gan.yml --benchmark
-```
-where `esrgan/experiment/config.yml` is a path to the [config](experiment/config.yml) file.
+where `esrgan/config.yml` is a path to the [config](config.yml) file.
## Results
Some examples of work of ESRGAN model trained on [DIV2K](https://data.vision.ee.ethz.ch/cvl/DIV2K) dataset:
@@ -52,5 +45,8 @@ Some examples of work of ESRGAN model trained on [DIV2K](https://data.vision.ee.
|
|
|
|
|
|
|
|
+## Documentation
+Full documentation for the project is available at https://esrgan.readthedocs.io/
+
## License
`esrgan` is released under a CC BY-NC-ND 4.0 license. See [LICENSE](LICENSE) for additional details about it.
diff --git a/config.yml b/config.yml
new file mode 100644
index 0000000..3f52e97
--- /dev/null
+++ b/config.yml
@@ -0,0 +1,309 @@
+shared:
+ upscale: &upscale 4 # 2, 4, 8
+ patch_size: &patch_size 128 # 40, 64, 96, 128, 192
+
+model:
+ _key_value: true
+
+ &generator_model generator:
+ _target_: esrgan.models.EncoderDecoderNet
+ encoder:
+ _target_: esrgan.models.ESREncoder
+ in_channels: &num_channels 3
+ out_channels: &latent_channels 64
+ num_basic_blocks: 16
+ growth_channels: 32
+ activation: &activation
+ _mode_: partial
+ _target_: torch.nn.LeakyReLU
+ negative_slope: 0.2
+ inplace: true
+ residual_scaling: 0.2
+ decoder:
+ _target_: esrgan.models.ESRNetDecoder
+ in_channels: *latent_channels
+ out_channels: *num_channels
+ scale_factor: *upscale
+ activation: *activation
+
+ &discriminator_model discriminator:
+ _target_: esrgan.models.VGGConv
+ encoder:
+ _target_: esrgan.models.StridedConvEncoder
+ pool:
+ _target_: catalyst.contrib.layers.AdaptiveAvgPool2d
+ output_size: [7,7]
+ head:
+ _target_: esrgan.models.LinearHead
+ in_channels: 25088 # 512 * (7x7)
+ out_channels: 1
+ latent_channels: [1024]
+
+args:
+ logdir: logs
+
+runner:
+ _target_: esrgan.runner.GANConfigRunner
+ generator_key: *generator_model
+ discriminator_key: *discriminator_model
+
+stages:
+ stage1_supervised:
+ num_epochs: 10000
+
+ loaders: &loaders
+ train: &train_loader
+ _target_: torch.utils.data.DataLoader
+ dataset:
+ _target_: torch.utils.data.ConcatDataset
+ datasets:
+ - &div2k_dataset
+ _target_: esrgan.datasets.DIV2KDataset
+ root: data
+ train: true
+ target_type: bicubic_X4
+ patch_size: [*patch_size,*patch_size]
+ transform:
+ _target_: albumentations.Compose
+ transforms:
+ - &spatial_transforms
+ _target_: albumentations.Compose
+ transforms:
+ _target_: albumentations.OneOf
+ transforms:
+ - _target_: albumentations.Flip
+ p: 0.75 # p = 1/4 (vflip) + 1/4 (hflip) + 1/4 (flip)
+ - _target_: albumentations.Transpose
+ p: 0.25 # p = 1/4
+ p: 0.5
+ additional_targets:
+ real_image: image
+ - &hard_transforms
+ _target_: albumentations.Compose
+ transforms:
+ - _target_: albumentations.CoarseDropout
+ max_holes: 8
+ max_height: 2
+ max_width: 2
+ - _target_: albumentations.ImageCompression
+ quality_lower: 65
+ p: 0.25
+ - &post_transforms
+ _target_: albumentations.Compose
+ transforms:
+ - _target_: albumentations.Normalize
+ mean: 0
+ std: 1
+ - _target_: albumentations.ToTensorV2
+ additional_targets:
+ real_image: image
+ low_resolution_image_key: image
+ high_resolution_image_key: real_image
+ download: true
+
+ - &flickr2k_dataset
+ << : [*div2k_dataset] # Flickr2K with the same params as in `DIV2KDataset`
+ _target_: esrgan.datasets.Flickr2KDataset
+ batch_size: 16
+ shuffle: true
+ num_workers: 8
+ pin_memory: true
+ drop_last: true
+
+ valid:
+ << : [*train_loader]
+ dataset: # redefine dataset to use only DIV2K
+ << : [*div2k_dataset]
+ train: false
+ transform: *post_transforms
+ batch_size: 1
+ drop_last: false
+
+ criterion: &criterions
+ content_loss:
+ # `torch.nn.L1Loss` | `torch.nn.MSELoss`
+ _target_: torch.nn.L1Loss
+
+ optimizer:
+ _key_value: true
+
+ generator:
+ _target_: torch.optim.Adam
+ lr: 0.0002
+ weight_decay: 0.0
+ _model: *generator_model
+
+ scheduler:
+ _key_value: true
+
+ generator:
+ _target_: torch.optim.lr_scheduler.StepLR
+ step_size: 500
+ gamma: 0.5
+ _optimizer: generator
+
+ callbacks: &callbacks
+ psnr_metric:
+ _target_: catalyst.callbacks.FunctionalMetricCallback
+ metric_fn:
+ _target_: piq.psnr
+ data_range: 1.0
+ reduction: mean
+ convert_to_greyscale: false
+ input_key: real_image
+ target_key: fake_image
+ metric_key: psnr
+ ssim_metric:
+ _target_: catalyst.callbacks.FunctionalMetricCallback
+ metric_fn:
+ _target_: piq.ssim
+ kernel_size: 11
+ kernel_sigma: 1.5
+ data_range: 1.0
+ reduction: mean
+ k1: 0.01
+ k2: 0.03
+ input_key: real_image
+ target_key: fake_image
+ metric_key: ssim
+
+ loss_content:
+ _target_: catalyst.callbacks.CriterionCallback
+ input_key: real_image
+ target_key: fake_image
+ metric_key: loss_content
+ criterion_key: content_loss
+
+ optimizer_generator:
+ _target_: catalyst.callbacks.OptimizerCallback
+ metric_key: loss_content
+ model_key: *generator_model
+ optimizer_key: generator
+ grad_clip_fn: &grad_clip_fn
+ _mode_: partial
+ _target_: torch.nn.utils.clip_grad_value_
+ clip_value: 5.0
+
+ scheduler_generator:
+ _target_: catalyst.callbacks.SchedulerCallback
+ scheduler_key: generator
+ loader_key: valid
+ metric_key: loss_content
+
+ stage2_gan:
+ num_epochs: 8000
+
+ loaders:
+ << : [*loaders]
+ train:
+ << : [*train_loader]
+ dataset:
+ << : [*div2k_dataset]
+ transform:
+ _target_: albumentations.Compose
+ transforms:
+ - *spatial_transforms
+ - *post_transforms
+ batch_size: 16
+
+ criterion:
+ << : [*criterions]
+
+ perceptual_loss:
+ _target_: esrgan.nn.PerceptualLoss
+ layers:
+ conv5_4: 1.0
+
+ adversarial_generator_loss:
+ # `esrgan.nn.RelativisticAdversarialLoss` | `esrgan.nn.AdversarialLoss`
+ _target_: &adversarial_criterion esrgan.nn.RelativisticAdversarialLoss
+ mode: generator
+ adversarial_discriminator_loss:
+ _target_: *adversarial_criterion
+ mode: discriminator
+
+ optimizer:
+ _key_value: true
+
+ generator:
+ _target_: torch.optim.AdamW
+ lr: 0.0001
+ weight_decay: 0.0
+ _model: *generator_model
+
+ discriminator:
+ _target_: torch.optim.AdamW
+ lr: 0.0001
+ weight_decay: 0.0
+ _model: *discriminator_model
+
+ scheduler:
+ _key_value: true
+
+ generator:
+ _target_: torch.optim.lr_scheduler.MultiStepLR
+ milestones: &scheduler_milestones [1000,2000,4000,6000]
+ gamma: 0.5
+ _optimizer: generator
+
+ discriminator:
+ _target_: torch.optim.lr_scheduler.MultiStepLR
+ milestones: *scheduler_milestones
+ gamma: 0.5
+ _optimizer: discriminator
+
+ callbacks:
+ # re-use `psnr_metric`, `ssim_metric`, and `loss_content` callbacks
+ << : [*callbacks]
+
+ loss_perceptual:
+ _target_: catalyst.callbacks.CriterionCallback
+ input_key: real_image
+ target_key: fake_image
+ metric_key: loss_perceptual
+ criterion_key: perceptual_loss
+ loss_adversarial:
+ _target_: catalyst.callbacks.CriterionCallback
+ input_key: g_fake_logits # first argument of criterion is fake_logits
+ target_key: g_real_logits # second argument of criterion is real_logits
+ metric_key: loss_adversarial
+ criterion_key: adversarial_generator_loss
+ loss_generator:
+ _target_: catalyst.callbacks.MetricAggregationCallback
+ metric_key: &generator_loss loss_generator
+ metrics:
+ loss_content: 0.01
+ loss_perceptual: 1.0
+ loss_adversarial: 0.005
+ mode: weighted_sum
+
+ loss_discriminator:
+ _target_: catalyst.callbacks.CriterionCallback
+ input_key: d_fake_logits
+ target_key: d_real_logits
+ metric_key: &discriminator_loss loss_discriminator
+ criterion_key: adversarial_discriminator_loss
+
+ optimizer_generator:
+ _target_: catalyst.callbacks.OptimizerCallback
+ metric_key: *generator_loss
+ model_key: *generator_model
+ optimizer_key: generator
+ grad_clip_fn: *grad_clip_fn
+ optimizer_discriminator:
+ _target_: catalyst.callbacks.OptimizerCallback
+ metric_key: *discriminator_loss
+ model_key: *discriminator_model
+ optimizer_key: discriminator
+ grad_clip_fn: *grad_clip_fn
+
+ scheduler_generator:
+ _target_: catalyst.callbacks.SchedulerCallback
+ scheduler_key: generator
+ loader_key: valid
+ metric_key: *generator_loss
+ scheduler_discriminator:
+ _target_: catalyst.callbacks.SchedulerCallback
+ scheduler_key: discriminator
+ loader_key: valid
+ metric_key: *discriminator_loss
diff --git a/docs/_static/abalation_study.png b/docs/_static/abalation_study.png
new file mode 100644
index 0000000..9941535
Binary files /dev/null and b/docs/_static/abalation_study.png differ
diff --git a/docs/_static/architecture.png b/docs/_static/architecture.png
index 5226a30..cca2ae7 100644
Binary files a/docs/_static/architecture.png and b/docs/_static/architecture.png differ
diff --git a/docs/_static/bn_artifacts.jpg b/docs/_static/bn_artifacts.jpg
new file mode 100644
index 0000000..d30ec0e
Binary files /dev/null and b/docs/_static/bn_artifacts.jpg differ
diff --git a/docs/_static/patch_a.png b/docs/_static/patch_a.png
new file mode 100644
index 0000000..a2ddf52
Binary files /dev/null and b/docs/_static/patch_a.png differ
diff --git a/docs/_static/patch_b.png b/docs/_static/patch_b.png
new file mode 100644
index 0000000..f885b7a
Binary files /dev/null and b/docs/_static/patch_b.png differ
diff --git a/docs/_static/qualitative_cmp_01.jpg b/docs/_static/qualitative_cmp_01.jpg
new file mode 100644
index 0000000..c523281
Binary files /dev/null and b/docs/_static/qualitative_cmp_01.jpg differ
diff --git a/docs/_static/qualitative_cmp_02.jpg b/docs/_static/qualitative_cmp_02.jpg
new file mode 100644
index 0000000..54d6bb5
Binary files /dev/null and b/docs/_static/qualitative_cmp_02.jpg differ
diff --git a/docs/_static/qualitative_cmp_03.jpg b/docs/_static/qualitative_cmp_03.jpg
new file mode 100644
index 0000000..1e19c38
Binary files /dev/null and b/docs/_static/qualitative_cmp_03.jpg differ
diff --git a/docs/_static/qualitative_cmp_04.jpg b/docs/_static/qualitative_cmp_04.jpg
new file mode 100644
index 0000000..8b76744
Binary files /dev/null and b/docs/_static/qualitative_cmp_04.jpg differ
diff --git a/docs/_static/train_deeper_neta.png b/docs/_static/train_deeper_neta.png
new file mode 100644
index 0000000..0934de9
Binary files /dev/null and b/docs/_static/train_deeper_neta.png differ
diff --git a/docs/_static/train_deeper_netb.png b/docs/_static/train_deeper_netb.png
new file mode 100644
index 0000000..b6c4f89
Binary files /dev/null and b/docs/_static/train_deeper_netb.png differ
diff --git a/docs/conf.py b/docs/conf.py
index ca830d8..583629b 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -9,24 +9,40 @@
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
-#
-import datetime
+
import os
import sys
+from typing import Iterable
+
+import tomlkit
+
sys.path.insert(0, os.path.abspath("../"))
# -- Project information -----------------------------------------------------
-project = 'esrgan'
-copyright = '2020, Emerline, Inc. and its affiliates.'
-author = 'Emerline'
+def _get_project_meta():
+ with open("../pyproject.toml") as pyproject:
+ file_contents = pyproject.read()
+
+ return tomlkit.parse(file_contents)["tool"]["poetry"]
+
+
+pkg_meta = _get_project_meta()
+project = str(pkg_meta["name"])
+copyright = "2020, Emerline, Inc. and its affiliates." # noqa: WPS125
+author = "Emerline"
+
+# The short X.Y version
+version = str(pkg_meta["version"])
+# The full version, including alpha/beta/rc tags
+release = version
-## -- General configuration ---------------------------------------------------
+# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# extensions coming with Sphinx (named "sphinx.ext.*") or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
@@ -34,7 +50,7 @@
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.napoleon",
- 'sphinx.ext.intersphinx',
+ "sphinx.ext.intersphinx",
]
# Add any paths that contain templates here, relative to this directory.
@@ -57,7 +73,7 @@
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
-exclude_patterns = []
+exclude_patterns: Iterable[str] = []
autodoc_inherit_docstrings = False
napoleon_google_docstring = True
@@ -76,7 +92,7 @@
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
-html_theme = "sphinx_rtd_theme"
+html_theme = "press"
html_favicon = "_static/index.ico"
# Add any paths that contain custom static files (such as style sheets) here,
@@ -84,4 +100,4 @@
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
-html_short_title = "esrgan"
+html_short_title = project
diff --git a/docs/index.rst b/docs/index.rst
index 27db55d..4a7f169 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -22,48 +22,38 @@ capable of recovering HR images from LR ones. And ESRGAN (Enhanced SRGAN) is one
Key points of ESRGAN:
- SRResNet-based architecture with residual-in-residual blocks;
-- Mixture of context, perceptual, and adversarial losses. Context and perceptual losses are used for proper image upscaling,
- while adversarial loss pushes neural network to the natural image manifold using a discriminator network
- that is trained to differentiate between the super-resolved images and original photo-realistic images.
+- Mixture of context, perceptual, and adversarial losses. Context and perceptual losses are used for proper
+ image upscaling, while adversarial loss pushes neural network to the natural image manifold using a discriminator
+ network that is trained to differentiate between the super-resolved images and original photo-realistic images.
.. image:: ./_static/architecture.png
+ :width: 100%
Technologies
============
* `Catalyst` as pipeline runner for deep learning tasks. This new and rapidly developing `library `__
- can significantly reduce the amount of boilerplate code. If you are familiar with the TensorFlow ecosystem, you can think of Catalyst
- as Keras for PyTorch. This framework is integrated with logging systems such as the well-known `TensorBoard `__.
-* `Pytorch`, `torchvision`, and `PIQ` as main frameworks for deep learning.
-* `Albumentations` for data preprocessing.
+ can significantly reduce the amount of boilerplate code. If you are familiar with the TensorFlow ecosystem,
+ you can think of Catalyst as Keras for PyTorch. This framework is integrated with logging systems
+ such as the well-known `TensorBoard `__;
+* `Pytorch` and `torchvision` as main frameworks for deep learning;
+* `Albumentations` and `PIQ` for data processing.
Quick Start
===========
-Setup environment
------------------
-
-`esrgan` requires python >= 3.8. The `requirements.txt <../requirements.txt>`__ file can be used to install the necessary packages.
-
::
- git clone https://github.com/leverxgroup/esrgan.git
- pip install ./esrgan
-
-Run an experiment
------------------
-
-::
+ # step 1 - Setup environment, please check `Installation` for more info
+ pip install git+https://github.com/leverxgroup/esrgan.git
- # step 1 - supervised training of the model
- catalyst-dl run -C esrgan/experiment/config_supervised.yml --benchmark
+ # step 2 - Load / prepare config with training details
+ wget https://github.com/leverxgroup/esrgan/blob/master/config.yml
- # step 2 - use weights from step 1 to train model using GAN approach
- catalyst-dl run -C esrgan/experiment/config_gan.yml --benchmark
-
-where `esrgan/experiment/config.yml` is a path to the `config file <../experiment/config.yml>`__.
+ # step 3 - train ESRGAN
+ catalyst-dl run -C config.yml --benchmark
Results
@@ -71,43 +61,27 @@ Results
Some examples of work of ESRGAN model trained on `DIV2K `__ dataset:
-.. |squirrel_lr| image:: ./_static/sq_crop_lr.png
- :width: 128px
- :height: 128px
-.. |squirrel_sr| image:: ./_static/sq_crop_sr_x4.png
- :width: 128px
- :height: 128px
-.. |squirrel_hr| image:: ./_static/sq_crop_hr.png
- :width: 128px
- :height: 128px
-
-.. |wolf_lr| image:: ./_static/wf_crop_lr.png
- :width: 128px
- :height: 128px
-.. |wolf_sr| image:: ./_static/wf_crop_sr_x4.png
- :width: 128px
- :height: 128px
-.. |wolf__hr| image:: ./_static/wf_crop_hr.png
- :width: 128px
- :height: 128px
-
-.. |fish_lr| image:: ./_static/fish_crop_lr.png
- :width: 128px
- :height: 128px
-.. |fish_sr| image:: ./_static/fish_crop_sr_x4.png
- :width: 128px
- :height: 128px
-.. |fish_hr| image:: ./_static/fish_crop_hr.png
- :width: 128px
- :height: 128px
-
-===================== =============== ======================
- LR (low resolution) ESRGAN (ours) HR (high resolution)
-===================== =============== ======================
- |squirrel_lr| |squirrel_sr| |squirrel_hr|
- |wolf_lr| |wolf_sr| |wolf__hr|
- |fish_lr| |fish_sr| |fish_hr|
-===================== =============== ======================
+.. table::
+ :widths: 33 33 33
+
+ +---------------------------------------+------------------------------------------+---------------------------------------+
+ | .. centered:: LR (low resolution) | .. centered:: ESRGAN (ours) | .. centered:: HR (high resolution) |
+ +=======================================+==========================================+=======================================+
+ | .. image:: ./_static/sq_crop_lr.png | .. image:: ./_static/sq_crop_sr_x4.png | .. image:: ./_static/sq_crop_hr.png |
+ | :width: 128px | :width: 128px | :width: 128px |
+ | :height: 128px | :height: 128px | :height: 128px |
+ | :align: center | :align: center | :align: center |
+ +---------------------------------------+------------------------------------------+---------------------------------------+
+ | .. image:: ./_static/wf_crop_lr.png | .. image:: ./_static/wf_crop_sr_x4.png | .. image:: ./_static/wf_crop_hr.png |
+ | :width: 128px | :width: 128px | :width: 128px |
+ | :height: 128px | :height: 128px | :height: 128px |
+ | :align: center | :align: center | :align: center |
+ +---------------------------------------+------------------------------------------+---------------------------------------+
+ | .. image:: ./_static/fish_crop_lr.png | .. image:: ./_static/fish_crop_sr_x4.png | .. image:: ./_static/fish_crop_hr.png |
+ | :width: 128px | :width: 128px | :width: 128px |
+ | :height: 128px | :height: 128px | :height: 128px |
+ | :align: center | :align: center | :align: center |
+ +---------------------------------------+------------------------------------------+---------------------------------------+
GitHub
@@ -120,18 +94,25 @@ Bugfixes and contributions are very much appreciated!
License
=======
-`esrgan` is released under a CC BY-NC-ND 4.0 license. See `LICENSE <../LICENSE>`__ for additional details about it.
+`esrgan` is released under a CC-BY-NC-ND-4.0 license. See `LICENSE `__ for additional details about it.
+
+
+.. toctree::
+ :maxdepth: 3
+ :caption: General
+ pages/install
+ pages/esrgan
.. toctree::
:maxdepth: 2
:caption: API
- pages/api/core
+ pages/api/nn
pages/api/models
- pages/api/criterions
pages/api/datasets
pages/api/utils
+ pages/api/catalyst
Indices and tables
==================
diff --git a/docs/pages/api/catalyst.rst b/docs/pages/api/catalyst.rst
new file mode 100644
index 0000000..248dcfb
--- /dev/null
+++ b/docs/pages/api/catalyst.rst
@@ -0,0 +1,26 @@
+Catalyst
+========
+
+Various features for customization/modification of `Catalyst `__ pipelines e.g., runners, metrics, callbacks:
+
+.. contents::
+ :depth: 2
+ :local:
+
+
+Runners
+-------
+
+GANRunner
+^^^^^^^^^
+
+.. autoclass:: esrgan.runner.GANRunner
+ :members:
+ :undoc-members:
+
+GANConfigRunner
+^^^^^^^^^^^^^^^
+
+.. autoclass:: esrgan.runner.GANConfigRunner
+ :members:
+ :undoc-members:
diff --git a/docs/pages/api/core.rst b/docs/pages/api/core.rst
deleted file mode 100755
index 1c83027..0000000
--- a/docs/pages/api/core.rst
+++ /dev/null
@@ -1,67 +0,0 @@
-Core (Catalyst abstractions)
-============================
-
-There are 3 main abstractions in `Catalyst `__: *Experiment*, *Runner*, and *Callback*.
-
-In general, the *Experiment* knows **what** you would like to run, *Runner* contains all the logic of **how** to run the experiment,
-while *Callbacks* allow you to customize experiment run logic by using specific custom callback functions without changing neither *Experiment* nor *Runner*.
-
-.. note::
-
- To learn more about Catalyst Core concepts, please check out
-
- - :py:obj:`catalyst.core.experiment.IExperiment` (`docs `__)
- - :py:obj:`catalyst.core.runner.IRunner` (`docs `__)
- - :py:obj:`catalyst.core.callback.Callback` (`docs `__)
-
-
-Experiment
-^^^^^^^^^^
-
-*Experiment* in an abstraction that contains information about the experiment – a model, a criterion, an optimizer, a scheduler, and their hyperparameters.
-It also contains information about the data and transformations used. In other words, the Experiment knows **what** you would like to run.
-
-.. automodule:: esrgan.core.experiment
- :members:
- :undoc-members:
-
-
-GAN Runners
-^^^^^^^^^^^
-
-*Runner* is an abstraction that knows how to run an experiment. It contains all the logic of **how** to run the experiment, stages, epoch and batches.
-
-.. automodule:: esrgan.core.runner
- :members:
- :undoc-members:
-
-
-Callbacks
-^^^^^^^^^
-
-*Callback* is an abstraction that lets you customize your experiment run logic.
-To give users maximum flexibility and extensibility Catalyst supports callback execution anywhere in the training loop:
-
-.. code:: bash
-
- -- stage start
- ---- epoch start
- ------ loader start
- -------- batch start
- ---------- batch handler (Runner logic)
- -------- batch end
- ------ loader end
- ---- epoch end
- -- stage end
-
- exception – if an Exception was raised
-
-For example, to calculate ROC-AUC of the model you may use :py:func:`on_batch_end` method to gather per-batch predictions
-and :py:func:`on_loader_end` method to average those statistics.
-
-Metrics
--------
-
-.. automodule:: esrgan.callbacks.metrics
- :members:
- :undoc-members:
diff --git a/docs/pages/api/criterions.rst b/docs/pages/api/criterions.rst
deleted file mode 100755
index 2ffc2bf..0000000
--- a/docs/pages/api/criterions.rst
+++ /dev/null
@@ -1,14 +0,0 @@
-Criterions
-==========
-
-Adversarial Loss
-^^^^^^^^^^^^^^^^
-.. automodule:: esrgan.criterions.adversarial
- :members:
- :undoc-members:
-
-Perceptual Loss
-^^^^^^^^^^^^^^^
-.. automodule:: esrgan.criterions.perceptual
- :members:
- :undoc-members:
diff --git a/docs/pages/api/datasets.rst b/docs/pages/api/datasets.rst
old mode 100755
new mode 100644
index 677e07b..daba665
--- a/docs/pages/api/datasets.rst
+++ b/docs/pages/api/datasets.rst
@@ -1,20 +1,39 @@
Datasets
========
-All datasets are subclasses of :class:`torch.utils.data.Dataset` i.e, they have ``__getitem__`` and ``__len__`` methods implemented.
-Hence, they can all be passed to a :class:`torch.utils.data.DataLoader` which can load multiple samples parallelly using ``torch.multiprocessing`` workers.
+The models subpackage contains definitions for the following datasets for image super-resolution:
+
+.. contents::
+ :depth: 2
+ :local:
+
+
+All datasets are subclasses of :class:`torch.utils.data.Dataset` i.e, they have ``__getitem__`` and ``__len__`` methods
+implemented. Hence, they can all be passed to a :class:`torch.utils.data.DataLoader` which can load multiple samples
+parallelly using ``torch.multiprocessing`` workers.
For example: ::
- div2k_data = esrgan.dataset.DIV2KDataset('path/to/div2k_root/')
+ div2k_data = esrgan.datasets.DIV2KDataset('path/to/div2k_root/')
data_loader = torch.utils.data.DataLoader(div2k_data, batch_size=4, shuffle=True)
+
DIV2K
^^^^^
-.. autoclass:: esrgan.dataset.div2k.DIV2KDataset
+
+.. autoclass:: esrgan.datasets.DIV2KDataset
:members:
+
+Flickr2K
+^^^^^^^^
+
+.. autoclass:: esrgan.datasets.Flickr2KDataset
+ :members:
+
+
Folder of Images
^^^^^^^^^^^^^^^^
-.. autoclass:: esrgan.dataset.image_folder.ImageFolderDataset
+
+.. autoclass:: esrgan.datasets.ImageFolderDataset
:members:
:undoc-members:
diff --git a/docs/pages/api/models.rst b/docs/pages/api/models.rst
old mode 100755
new mode 100644
index 2169c1b..d563d8e
--- a/docs/pages/api/models.rst
+++ b/docs/pages/api/models.rst
@@ -1,71 +1,68 @@
Models
======
-Generator
----------
+The models subpackage contains definitions of models for addressing image super-resolution tasks:
-.. automodule:: esrgan.model.generator
+.. contents::
+ :depth: 2
+ :local:
+
+
+Generators
+----------
+
+EncoderDecoderNet
+^^^^^^^^^^^^^^^^^
+
+.. autoclass:: esrgan.models.EncoderDecoderNet
:members:
:undoc-members:
+
SRGAN
^^^^^
-.. automodule:: esrgan.model.module.srresnet
+.. autoclass:: esrgan.models.SRResNetEncoder
:members:
:undoc-members:
-ESRGAN
-^^^^^^
-
-.. automodule:: esrgan.model.module.esrnet
+.. autoclass:: esrgan.models.SRResNetDecoder
:members:
:undoc-members:
-Discriminator
--------------
+ESRGAN
+^^^^^^
-.. automodule:: esrgan.model.discriminator
+.. autoclass:: esrgan.models.ESREncoder
:members:
:undoc-members:
-.. automodule:: esrgan.model.module.conv
- :members:
- :undoc-members:
-.. automodule:: esrgan.model.module.linear
+
+.. autoclass:: esrgan.models.ESRNetDecoder
:members:
:undoc-members:
-Layers
-------
-
-These are the basic building block for graphs
-
-Containers
-^^^^^^^^^^
+Discriminators
+--------------
-.. automodule:: esrgan.model.module.blocks.container
- :members:
- :undoc-members:
+VGGConv
+^^^^^^^
-Residual-in-Residual Block
-^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-.. automodule:: esrgan.model.module.blocks.rrdb
+.. autoclass:: esrgan.models.VGGConv
:members:
:undoc-members:
-Upsample
-^^^^^^^^
+StridedConvEncoder
+^^^^^^^^^^^^^^^^^^
-.. automodule:: esrgan.model.module.blocks.upsampling
+.. autoclass:: esrgan.models.StridedConvEncoder
:members:
:undoc-members:
-Misc
-^^^^
+LinearHead
+^^^^^^^^^^
-.. automodule:: esrgan.model.module.blocks.misc
+.. autoclass:: esrgan.models.LinearHead
:members:
:undoc-members:
diff --git a/docs/pages/api/nn.rst b/docs/pages/api/nn.rst
new file mode 100644
index 0000000..88af429
--- /dev/null
+++ b/docs/pages/api/nn.rst
@@ -0,0 +1,99 @@
+NN
+==
+
+These are the basic building block for graphs:
+
+.. contents::
+ :depth: 2
+ :local:
+
+
+Containers
+----------
+
+ConcatInputModule
+^^^^^^^^^^^^^^^^^
+
+.. autoclass:: esrgan.nn.ConcatInputModule
+ :members:
+ :undoc-members:
+
+ResidualModule
+^^^^^^^^^^^^^^
+
+.. autoclass:: esrgan.nn.ResidualModule
+ :members:
+ :undoc-members:
+
+
+Residual-in-Residual layers
+---------------------------
+
+ResidualDenseBlock
+^^^^^^^^^^^^^^^^^^
+
+.. autoclass:: esrgan.nn.ResidualDenseBlock
+ :members:
+ :undoc-members:
+
+ResidualInResidualDenseBlock
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autoclass:: esrgan.nn.ResidualInResidualDenseBlock
+ :members:
+ :undoc-members:
+
+
+UpSampling layers
+-----------------
+
+InterpolateConv
+^^^^^^^^^^^^^^^
+
+.. autoclass:: esrgan.nn.InterpolateConv
+ :members:
+ :undoc-members:
+
+SubPixelConv
+^^^^^^^^^^^^
+
+.. autoclass:: esrgan.nn.SubPixelConv
+ :members:
+ :undoc-members:
+
+
+Loss functions
+--------------
+
+AdversarialLoss
+^^^^^^^^^^^^^^^
+
+.. autoclass:: esrgan.nn.AdversarialLoss
+ :members:
+ :undoc-members:
+
+RelativisticAdversarialLoss
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. autoclass:: esrgan.nn.RelativisticAdversarialLoss
+ :members:
+ :undoc-members:
+
+PerceptualLoss
+^^^^^^^^^^^^^^
+
+.. autoclass:: esrgan.nn.PerceptualLoss
+ :members:
+ :undoc-members:
+
+
+Misc
+----
+
+.. autoclass:: esrgan.nn.Conv2dSN
+ :members:
+ :undoc-members:
+
+.. autoclass:: esrgan.nn.LinearSN
+ :members:
+ :undoc-members:
diff --git a/docs/pages/api/utils.rst b/docs/pages/api/utils.rst
old mode 100755
new mode 100644
index 4908462..705d1ba
--- a/docs/pages/api/utils.rst
+++ b/docs/pages/api/utils.rst
@@ -1,7 +1,19 @@
Utilities
=========
-Set of utilities that can make life a little bit easier.
+Set of utilities that can make life a little bit easier:
+
+.. contents::
+ :depth: 2
+ :local:
+
+
+Augmentation
+^^^^^^^^^^^^
+
+.. automodule:: esrgan.utils.aug
+ :members:
+ :undoc-members:
Model init
@@ -26,6 +38,3 @@ Misc
.. automodule:: esrgan.utils.misc
:members:
:undoc-members:
-.. automodule:: esrgan.utils.types
- :members:
- :undoc-members:
diff --git a/docs/pages/esrgan.rst b/docs/pages/esrgan.rst
new file mode 100644
index 0000000..b435435
--- /dev/null
+++ b/docs/pages/esrgan.rst
@@ -0,0 +1,95 @@
+======
+ESRGAN
+======
+
+
+Network Architecture
+====================
+
+.. image:: ../_static/architecture.png
+ :width: 100%
+
+Main modifications in the structure of generator G, discriminator D, and training process in comparison to SRGAN:
+
+1. all BN layers were removed from the generator;
+2. original basic blocks were replaced with the proposed Residual-in-Residual Dense Blocks (RRDB),
+ which combines multi-level residual network and dense connections;
+3. relativistic discriminator, which tries to predict the probability that a real image :math:`x_r`
+ is relatively more realistic than a fake one :math:`x_f`;
+4. perceptual loss on features before activation.
+
+
+Qualitative Results
+===================
+
+ PSNR (evaluated on the Y channel) and the perceptual index used in the PIRM-SR challenge
+ are also provided for reference. `\[1\]`_
+
+.. image:: ../_static/qualitative_cmp_01.jpg
+ :width: 100%
+.. image:: ../_static/qualitative_cmp_02.jpg
+ :width: 100%
+.. image:: ../_static/qualitative_cmp_03.jpg
+ :width: 100%
+.. image:: ../_static/qualitative_cmp_04.jpg
+ :width: 100%
+
+
+Ablation Study
+==============
+
+ Overall visual comparisons for showing the effects of each component in ESRGAN.
+ Each column represents a model with its configurations in the top.
+ The red sign indicates the main improvement compared with the previous model. `\[1\]`_
+
+.. image:: ../_static/abalation_study.png
+ :width: 100%
+
+
+BatchNorm artifacts
+===================
+
+ We empirically observe that BN layers tend to bring artifacts. These artifacts, namely BN artifacts,
+ occasionally appear among iterations and different settings, violating the needs for a stable performance
+ over training. We find that the network depth, BN position, training dataset and training loss
+ have impact on the occurrence of BN artifacts. `\[1\]`_
+
+.. image:: ../_static/bn_artifacts.jpg
+ :width: 100%
+
+
+Useful techniques to train a very deep network
+==============================================
+
+ We find that residual scaling and smaller initialization can help to train a very deep network.
+
+ - A smaller initialization than MSRA initialization (multiplying 0.1 for all initialization parameters
+ that calculated by MSRA initialization) works well in our experiments;
+ - In our settings, for each residual block, the residual features after the last convolution layer
+ are multiplied by 0.2. `\[1\]`_
+
+.. |init a| image:: ../_static/train_deeper_neta.png
+ :width: 49%
+.. |init b| image:: ../_static/train_deeper_netb.png
+ :width: 49%
+
+|init a| |init b|
+
+
+The influence of training patch size
+====================================
+
+ We observe that training a deeper network benefits from a larger patch size.
+ Moreover, the deeper model achieves more improvement (∼0.12dB) than the shallower one (∼0.04dB)
+ since larger model capacity is capable of taking full advantage of larger training patch size.
+ (Evaluated on Set5 dataset with RGB channels.) `\[1\]`_
+
+.. |16 blocks| image:: ../_static/patch_a.png
+ :width: 49%
+.. |23 blocks| image:: ../_static/patch_b.png
+ :width: 49%
+
+|16 blocks| |23 blocks|
+
+
+.. _\[1\]: https://github.com/xinntao/ESRGAN
diff --git a/docs/pages/install.rst b/docs/pages/install.rst
new file mode 100644
index 0000000..0f8a06e
--- /dev/null
+++ b/docs/pages/install.rst
@@ -0,0 +1,30 @@
+============
+Installation
+============
+
+You can install ``esrgan`` via `pip `_ or directly from source.
+
+Install from GitHub
+===================
+
+You can install the latest development version using `pip `_ directly from the GitHub repository:
+
+.. code-block:: bash
+
+ pip install git+https://github.com/leverxgroup/esrgan.git
+
+Install from source
+===================
+
+It's also possible to clone the Git repository and install it from source with `Poetry `_:
+
+.. code-block:: bash
+
+ git clone https://github.com/leverxgroup/esrgan.git
+ cd esrgan
+ poetry install
+
+
+.. _pip: https://pip.pypa.io/en/stable/
+.. _pypi: https://pypi.org/
+.. _poetry: https://python-poetry.org/
diff --git a/docs/requirements.txt b/docs/requirements.txt
old mode 100755
new mode 100644
index 094a5da..1dbee2e
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -1,2 +1,3 @@
-sphinx==2.2.1
-sphinx_rtd_theme
+sphinx==4.0.1
+sphinx-press-theme==0.8.0
+tomlkit==0.7.2
diff --git a/esrgan/__version__.py b/esrgan/__version__.py
deleted file mode 100644
index f102a9c..0000000
--- a/esrgan/__version__.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.0.1"
diff --git a/esrgan/callbacks/__init__.py b/esrgan/callbacks/__init__.py
deleted file mode 100644
index 94cce5a..0000000
--- a/esrgan/callbacks/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-# flake8: noqa
-from esrgan.callbacks.metrics import PSNRCallback, SSIMCallback
diff --git a/esrgan/callbacks/metrics.py b/esrgan/callbacks/metrics.py
deleted file mode 100644
index 2768687..0000000
--- a/esrgan/callbacks/metrics.py
+++ /dev/null
@@ -1,107 +0,0 @@
-from typing import Union
-
-from catalyst.core import BatchMetricCallback
-import piq
-
-
-class PSNRCallback(BatchMetricCallback):
- """Peak signal-to-noise ratio (PSNR) metric callback.
-
- Compute Peak Signal-to-Noise Ratio for a batch of images.
-
- Args:
- input_key: Input key to use for PSNR calculation;
- specifies our `y_true`.
- output_key: Output key to use for PSNR calculation;
- specifies our `y_pred`.
- prefix: Name of the metric / key to store in logs.
- multiplier: Scale factor for the metric.
- data_range: Value range of input images (usually 1.0 or 255).
- reduction: Reduction over samples in batch, should be one of:
- ``'mean'``, ``'sum'``, or ``'none'``.
- convert_to_greyscale: If ``True``, convert RGB image to YCbCr format
- and computes PSNR only on luminance channel,
- compute on all 3 channels otherwise.
-
- """
-
- def __init__(
- self,
- input_key: str = "targets",
- output_key: str = "outputs",
- prefix: str = "psnr",
- multiplier: float = 1.0,
- data_range: Union[int, float] = 1.0,
- reduction: str = "mean",
- convert_to_greyscale: bool = False,
- ) -> None:
- super().__init__(
- prefix=prefix,
- metric_fn=piq.psnr,
- input_key=input_key,
- output_key=output_key,
- multiplier=multiplier,
- data_range=data_range,
- reduction=reduction,
- convert_to_greyscale=convert_to_greyscale,
- )
-
-
-class SSIMCallback(BatchMetricCallback):
- """Structural similarity (SSIM) metric callback.
-
- Computes Structural Similarity (SSIM) index between two images.
- It has been proposed in `Image Quality Assessment: From Error Visibility
- to Structural Similarity`__.
-
- Args:
- input_key: Input key to use for SSIM calculation;
- specifies our `y_true`.
- output_key: Output key to use for SSIM calculation;
- specifies our `y_pred`.
- prefix: Name of the metric / key to store in logs.
- multiplier: Scale factor for the metric.
- kernel_size: The side-length of the Gaussian sliding window
- used in comparison. Must be an odd value.
- kernel_sigma: Standard deviation of normal distribution.
- data_range: Value range of input images (usually 1.0 or 255).
- reduction: Specifies the reduction to apply to the output, should be
- one of: ``'mean'``, ``'sum'``, or ``'none'``.
- k1: Algorithm parameter, small constant used to stabilize the division
- with small denominator (see original paper for more info).
- k2: Algorithm parameter, small constant used to stabilize the division
- with small denominator (see original paper for more info).
-
- __ https://ece.uwaterloo.ca/~z70wang/publications/ssim.pdf
-
- """
-
- def __init__(
- self,
- input_key: str = "targets",
- output_key: str = "outputs",
- prefix: str = "ssim",
- multiplier: float = 1.0,
- kernel_size: int = 11,
- kernel_sigma: float = 1.5,
- data_range: Union[int, float] = 1.0,
- reduction: str = "mean",
- k1: float = 0.01,
- k2: float = 0.03,
- ) -> None:
- super().__init__(
- prefix=prefix,
- metric_fn=piq.ssim,
- input_key=input_key,
- output_key=output_key,
- multiplier=multiplier,
- kernel_size=kernel_size,
- kernel_sigma=kernel_sigma,
- data_range=data_range,
- reduction=reduction,
- k1=k1,
- k2=k2,
- )
-
-
-__all__ = ["PSNRCallback", "SSIMCallback"]
diff --git a/esrgan/core/__init__.py b/esrgan/core/__init__.py
deleted file mode 100644
index 7f4b655..0000000
--- a/esrgan/core/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# flake8: noqa
-from esrgan.core.experiment import SRExperiment
-from esrgan.core.runner import GANRunner
diff --git a/esrgan/core/experiment.py b/esrgan/core/experiment.py
deleted file mode 100644
index 53b5dc9..0000000
--- a/esrgan/core/experiment.py
+++ /dev/null
@@ -1,72 +0,0 @@
-import copy
-from typing import Dict, Optional
-
-from catalyst import dl
-
-from esrgan import dataset
-
-
-class SRExperiment(dl.ConfigExperiment):
- """Experiment for ESRGAN, please check `catalyst docs`__ for more info.
-
- __ https://catalyst-team.github.io/catalyst/api/core.html#experiment
-
- """
-
- def get_datasets(
- self,
- stage: str,
- train_dataset_params: Optional[Dict] = None,
- valid_dataset_params: Optional[Dict] = None,
- infer_dataset_params: Optional[Dict] = None,
- ) -> dict:
- """Returns the datasets for a given stage and epoch.
-
- Args:
- stage: stage name of interest, e.g. "train", "finetune", "gan" ...
- train_dataset_params: Parameters of train dataset, must contain
- ``'dataset'`` key with the name of dataset to use
- e.g. :py:class:`esrgan.dataset.DIV2KDataset`.
- valid_dataset_params: Parameters of validation dataset.
- infer_dataset_params: Parameters of inference dataset.
-
- Returns:
- Dictionary with datasets for current stage.
-
- Example for Config API:
-
- .. code-block:: yaml
-
- train_dataset_params:
- dataset: DIV2KDataset
- root: data
- train: true
- target_type: bicubic_X4
- download: true
-
- """
- train_dataset_params = train_dataset_params or {}
- valid_dataset_params = valid_dataset_params or {}
- infer_dataset_params = infer_dataset_params or {}
-
- datasets = {}
- for params, mode in zip(
- (train_dataset_params, valid_dataset_params, infer_dataset_params),
- ("train", "valid", "infer"),
- ):
- if params:
- params_ = copy.deepcopy(params)
-
- dataset_name = params_.pop("dataset")
- dataset_ = dataset.__dict__[dataset_name]
-
- transform = self.get_transforms(stage=stage, dataset=mode)
- if transform is not None:
- params_["transform"] = transform
-
- datasets[mode] = dataset_(**params_)
-
- return datasets
-
-
-__all__ = ["SRExperiment"]
diff --git a/esrgan/core/runner.py b/esrgan/core/runner.py
deleted file mode 100644
index 3aa6daa..0000000
--- a/esrgan/core/runner.py
+++ /dev/null
@@ -1,145 +0,0 @@
-from typing import Dict
-
-from catalyst import dl
-import torch
-
-
-class GANRunner(dl.Runner):
- """Runner for experiments with supervised / GAN model."""
-
- def _init(
- self,
- input_key: str = "image",
- target_key: str = "real_image",
- generator_output_key: str = "fake_image",
- discriminator_real_output_gkey: str = "g_real_logits",
- discriminator_fake_output_gkey: str = "g_fake_logits",
- discriminator_real_output_dkey: str = "d_real_logits",
- discriminator_fake_output_dkey: str = "d_fake_logits",
- generator_key: str = "generator",
- discriminator_key: str = "discriminator",
- ) -> None:
- """Catalyst-specific helper method for `__init__`.
-
- Args:
- input_key: Key in batch dict mapping for model input.
- target_key: Key in batch dict mapping for target.
- generator_output_key: Key in output dict model output
- of the generator will be stored under.
- discriminator_real_output_gkey: Key to store predictions of
- discriminator for real inputs, contain gradients for generator.
- discriminator_fake_output_gkey: Key to store predictions of
- discriminator for predictions of generator,
- contain gradients for generator.
- discriminator_real_output_dkey: Key to store predictions of
- discriminator for real inputs,
- contain gradients for discriminator only.
- discriminator_fake_output_dkey: Key to store predictions of
- discriminator for items produced by generator,
- contain gradients for discriminator only.
- generator_key: Key in model dict mapping for generator model.
- discriminator_key: Key in model dict mapping for discriminator
- model (will be used in gan stages only).
-
- """
- super()._init()
-
- self.generator_key = generator_key
- self.discriminator_key = discriminator_key
-
- self.input_key = input_key
- self.target_key = target_key
- self.generator_output_key = generator_output_key
- self.discriminator_real_output_gkey = discriminator_real_output_gkey
- self.discriminator_fake_output_gkey = discriminator_fake_output_gkey
- self.discriminator_real_output_dkey = discriminator_real_output_dkey
- self.discriminator_fake_output_dkey = discriminator_fake_output_dkey
-
- def _prepare_for_stage(self, stage: str) -> None:
- """Prepare `_handle_batch` method for current stage.
-
- Args:
- stage: Name of current stage.
-
- Raises:
- NotImplementedError: Name of the `stage` should ends with
- ``'_supervised'``, ``'_gan'`` or should be ``'infer'``,
- raise error otherwise.
-
- """
- super()._prepare_for_stage(stage=stage)
-
- if stage.endswith("_supervised") or stage == "infer":
- self._handle_batch = self._handle_batch_supervised
- elif stage.endswith("_gan"):
- self._handle_batch = self._handle_batch_gan
- else:
- raise NotImplementedError()
-
- def _handle_batch(self, batch: Dict[str, torch.Tensor]) -> None:
- # `_handle_batch` method is @abstractmethod so it must be defined
- # even if it overwrites in `_prepare_for_stage`
- raise NotImplementedError()
-
- def _handle_batch_supervised(self, batch: Dict[str, torch.Tensor]) -> None:
- """Process train/valid batch, supervised mode.
-
- Args:
- batch: Input batch (batch of samples).
-
- """
- model = self.model[self.generator_key]
- output = model(batch[self.input_key])
-
- self.output = {self.generator_output_key: output}
-
- def _handle_batch_gan(self, batch: Dict[str, torch.Tensor]) -> None:
- """Process train/valid batch, GAN mode.
-
- Args:
- batch: Input batch, should raw samples for generator
- and ground truth samples for discriminator.
-
- """
- generator = self.model[self.generator_key]
- discriminator = self.model[self.discriminator_key]
-
- real_image = batch[self.target_key]
- fake_image = generator(batch[self.input_key])
-
- noise = torch.randn(real_image.shape, device=self.device)
- real_image = torch.clamp((real_image + 0.05 * noise), min=0.0, max=1.0)
-
- # predictions used in calculation of adversarial loss of generator
- real_logits_g = discriminator(real_image)
- fake_logits_g = discriminator(fake_image)
-
- # predictions used in calculation of adversarial loss of discriminator
- real_logits_d = discriminator(real_image)
- fake_logits_d = discriminator(fake_image.detach())
-
- self.output = {
- self.generator_output_key: fake_image,
- self.discriminator_real_output_gkey: real_logits_g,
- self.discriminator_fake_output_gkey: fake_logits_g,
- self.discriminator_real_output_dkey: real_logits_d,
- self.discriminator_fake_output_dkey: fake_logits_d,
- }
-
- def predict_batch(self, batch: Dict[str, torch.Tensor]) -> torch.Tensor:
- """Generate predictions based on input batch (generator inference).
-
- Args:
- batch: Input batch (batch of samples to adjust e.g. zoom).
-
- Returns:
- Batch of predictions of the generator.
-
- """
- model = self.model[self.generator_key]
- output = model(batch[self.input_key].to(self.device))
-
- return output
-
-
-__all__ = ["GANRunner"]
diff --git a/esrgan/criterions/__init__.py b/esrgan/criterions/__init__.py
deleted file mode 100644
index 81f5bb2..0000000
--- a/esrgan/criterions/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# flake8: noqa
-from esrgan.criterions.adversarial import (
- AdversarialLoss, RelativisticAdversarialLoss,
-)
-from esrgan.criterions.perceptual import PerceptualLoss
diff --git a/esrgan/dataset/__init__.py b/esrgan/dataset/__init__.py
deleted file mode 100644
index 89e6706..0000000
--- a/esrgan/dataset/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# flake8: noqa
-from esrgan.dataset.div2k import DIV2KDataset
-from esrgan.dataset.image_folder import ImageFolderDataset
diff --git a/esrgan/dataset/div2k.py b/esrgan/dataset/div2k.py
deleted file mode 100644
index 2af4fe6..0000000
--- a/esrgan/dataset/div2k.py
+++ /dev/null
@@ -1,188 +0,0 @@
-import glob
-from pathlib import Path
-import random
-from typing import Callable, Dict, Iterable, List, Optional, Tuple
-
-from albumentations.augmentations import functional as F
-from catalyst import data, utils
-from catalyst.contrib.datasets.functional import download_and_extract_archive
-import numpy as np
-from torch.utils.data import Dataset
-
-
-def paired_random_crop(
- images: Iterable[np.ndarray], crops_sizes: Iterable[Tuple[int, int]],
-) -> Iterable[np.ndarray]:
- """Crop a random part of the input images.
-
- Args:
- images: Sequence of images.
- crops_sizes: Sequence of crop sizes ``(height, width)``.
-
- Returns:
- List of crops.
-
- """
- h_start, w_start = random.random(), random.random()
-
- crops = [
- F.random_crop(image, height, width, h_start, w_start)
- for image, (height, width) in zip(images, crops_sizes)
- ]
-
- return crops
-
-
-class DIV2KDataset(Dataset):
- """`DIV2K `_ Dataset.
-
- Args:
- root: Root directory where images are downloaded to.
- train: If True, creates dataset from training set,
- otherwise creates from validation set.
- target_type: Type of target to use, ``'bicubic_X2'``, ``'unknown_X4'``,
- ``'X8'``, ``'mild'``, ...
- patch_size: If ``train == True``, define sizes of patches to produce,
- return full image otherwise. Tuple of height and width.
- transform: A function / transform that takes in dictionary (with low
- and high resolution images) and returns a transformed version.
- low_resolution_image_key: Key to use to store images of low resolution.
- high_resolution_image_key: Key to use to store high resolution images.
- download: If true, downloads the dataset from the internet
- and puts it in root directory. If dataset is already downloaded,
- it is not downloaded again.
-
- """
-
- url = "http://data.vision.ee.ethz.ch/cvl/DIV2K/"
- resources = {
- "DIV2K_train_LR_bicubic_X2.zip": "9a637d2ef4db0d0a81182be37fb00692",
- "DIV2K_train_LR_unknown_X2.zip": "1396d023072c9aaeb999c28b81315233",
- "DIV2K_valid_LR_bicubic_X2.zip": "1512c9a3f7bde2a1a21a73044e46b9cb",
- "DIV2K_valid_LR_unknown_X2.zip": "d319bd9033573d21de5395e6454f34f8",
- "DIV2K_train_LR_bicubic_X3.zip": "ad80b9fe40c049a07a8a6c51bfab3b6d",
- "DIV2K_train_LR_unknown_X3.zip": "4e651308aaa54d917fb1264395b7f6fa",
- "DIV2K_valid_LR_bicubic_X3.zip": "18b1d310f9f88c13618c287927b29898",
- "DIV2K_valid_LR_unknown_X3.zip": "05184168e3608b5c539fbfb46bcade4f",
- "DIV2K_train_LR_bicubic_X4.zip": "76c43ec4155851901ebbe8339846d93d",
- "DIV2K_train_LR_unknown_X4.zip": "e3c7febb1b3f78bd30f9ba15fe8e3956",
- "DIV2K_valid_LR_bicubic_X4.zip": "21962de700c8d368c6ff83314480eff0",
- "DIV2K_valid_LR_unknown_X4.zip": "8ac3413102bb3d0adc67012efb8a6c94",
- "DIV2K_train_LR_x8.zip": "613db1b855721b3d2b26f4194a1d22a6",
- "DIV2K_train_LR_mild.zip": "807b3e3a5156f35bd3a86c5bbfb674bc",
- "DIV2K_train_LR_difficult.zip": "5a8f2b9e0c5f5ed0dac271c1293662f4",
- "DIV2K_train_LR_wild.zip": "d00982366bffee7c4739ba7ff1316b3b",
- "DIV2K_valid_LR_x8.zip": "c5aeea2004e297e9ff3abfbe143576a5",
- "DIV2K_valid_LR_mild.zip": "8c433f812ca532eed62c11ec0de08370",
- "DIV2K_valid_LR_difficult.zip": "1620af11bf82996bc94df655cb6490fe",
- "DIV2K_valid_LR_wild.zip": "aacae8db6bec39151ca5bb9c80bf2f6c",
- "DIV2K_train_HR.zip": "bdc2d9338d4e574fe81bf7d158758658",
- "DIV2K_valid_HR.zip": "9fcdda83005c5e5997799b69f955ff88",
- }
-
- def __init__(
- self,
- root: str,
- train: bool = True,
- target_type: str = "bicubic_X4",
- patch_size: Tuple[int, int] = (96, 96),
- transform: Optional[Callable[[Dict], Dict]] = None,
- low_resolution_image_key: str = "lr_image",
- high_resolution_image_key: str = "hr_image",
- download: bool = False,
- ) -> None:
- mode = "train" if train else "valid"
- filename_hr = f"DIV2K_{mode}_HR.zip"
- filename_lr = f"DIV2K_{mode}_LR_{target_type}.zip"
- if download:
- # download HR (target) images
- download_and_extract_archive(
- f"{self.url}{filename_hr}",
- download_root=root,
- filename=filename_hr,
- md5=self.resources[filename_hr],
- )
-
- # download lr (input) images
- download_and_extract_archive(
- f"{self.url}{filename_lr}",
- download_root=root,
- filename=filename_lr,
- md5=self.resources[filename_lr],
- )
-
- self.train = train
-
- self.lr_key = low_resolution_image_key
- self.hr_key = high_resolution_image_key
-
- # 'index' files
- lr_images = self._images_in_dir(Path(root) / Path(filename_lr).stem)
- hr_images = self._images_in_dir(Path(root) / Path(filename_hr).stem)
- assert len(lr_images) == len(hr_images)
-
- self.data = [
- {"lr_image": lr_image, "hr_image": hr_image}
- for lr_image, hr_image in zip(lr_images, hr_images)
- ]
-
- self.open_fn = data.ReaderCompose([
- data.ImageReader(input_key="lr_image", output_key=self.lr_key),
- data.ImageReader(input_key="hr_image", output_key=self.hr_key),
- ])
-
- self.scale = int(target_type[-1]) if target_type[-1].isdigit() else 4
- height, width = patch_size
- self.target_patch_size = patch_size
- self.input_patch_size = (height // self.scale, width // self.scale)
-
- self.transform = transform if transform is not None else lambda x: x
-
- def __getitem__(self, index: int) -> Dict:
- """Gets element of the dataset.
-
- Args:
- index: Index of the element in the dataset.
-
- Returns:
- Dict of low and high resolution images.
-
- """
- record = self.data[index]
-
- sample_dict = self.open_fn(record)
-
- if self.train:
- # use random crops during training
- lr_crop, hr_crop = paired_random_crop(
- (sample_dict[self.lr_key], sample_dict[self.hr_key]),
- (self.input_patch_size, self.target_patch_size),
- )
- sample_dict.update({self.lr_key: lr_crop, self.hr_key: hr_crop})
-
- sample_dict = self.transform(sample_dict)
-
- return sample_dict
-
- def __len__(self) -> int:
- """Get length of the dataset.
-
- Returns:
- int: Length of the dataset.
-
- """
- return len(self.data)
-
- def _images_in_dir(self, path: Path) -> List[str]:
- # fix path to dir for `NTIRE 2017` datasets
- if not path.exists():
- idx = path.name.rfind("_")
- path = path.parent / path.name[:idx] / path.name[idx + 1:]
-
- files = glob.iglob(f"{path}/**/*", recursive=True)
- images = sorted(filter(utils.has_image_extension, files))
-
- return images
-
-
-__all__ = ["DIV2KDataset"]
diff --git a/esrgan/dataset/image_folder.py b/esrgan/dataset/image_folder.py
deleted file mode 100644
index f1854b4..0000000
--- a/esrgan/dataset/image_folder.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import glob
-from typing import Callable, Dict, Optional
-
-from catalyst import data, utils
-
-
-class ImageFolderDataset(data.ListDataset):
- """A generic data loader where the samples are arranged in this way: ::
-
- /xxx.ext
- /xxy.ext
- /xxz.ext
- ...
- /123.ext
- /nsdf3.ext
- /asd932_.ext
-
- Args:
- pathname: Root directory of dataset.
- image_key: Key to use to store image.
- image_name_key: Key to use to store name of the image.
- transform: A function / transform that takes in dictionary
- and returns its transformed version.
-
- """
-
- def __init__(
- self,
- pathname: str,
- image_key: str = "image",
- image_name_key: str = "filename",
- transform: Optional[Callable[[Dict], Dict]] = None,
- ) -> None:
- files = glob.iglob(pathname, recursive=True)
- images = sorted(filter(utils.has_image_extension, files))
-
- list_data = [{"image": filename} for filename in images]
- open_fn = data.ReaderCompose([
- data.ImageReader(input_key="image", output_key=image_key),
- data.LambdaReader(input_key="image", output_key=image_name_key),
- ])
-
- super().__init__(
- list_data=list_data, open_fn=open_fn, dict_transform=transform
- )
-
-
-__all__ = ["ImageFolderDataset"]
diff --git a/esrgan/datasets.py b/esrgan/datasets.py
new file mode 100644
index 0000000..0653dec
--- /dev/null
+++ b/esrgan/datasets.py
@@ -0,0 +1,369 @@
+import glob
+from pathlib import Path
+import random
+from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
+
+from albumentations.augmentations.crops import functional as F
+from catalyst import data
+from catalyst.contrib.datasets import misc
+import numpy as np
+from torch.utils.data import Dataset
+
+from esrgan import utils
+
+__all__ = ["DIV2KDataset", "Flickr2KDataset", "ImageFolderDataset"]
+
+
+def has_image_extension(uri: Union[str, Path]) -> bool:
+ """Checks that file has image extension.
+
+ Args:
+ uri: The resource to load the file from.
+
+ Returns:
+ ``True`` if file has image extension, ``False`` otherwise.
+
+ """
+ ext = Path(uri).suffix
+ return ext.lower() in {".bmp", ".png", ".jpeg", ".jpg", ".tif", ".tiff"}
+
+
+def images_in_dir(*args: Union[str, Path]) -> List[str]:
+ """Searches for all images in the directory.
+
+ Args:
+ *args: Path to the folder with images.
+ Each element of path segments can be either a string
+ representing a path segment, an object implementing
+ the :py:class:`os.PathLike` interface which returns a string,
+ or another path object.
+
+ Returns:
+ List of images in the folder or its subfolders.
+
+ """
+ # fix path to dir for the `NTIRE 2017` datasets
+ path = Path(*args)
+ if not path.exists():
+ idx = path.name.rfind("_")
+ path = path.parent / path.name[:idx] / path.name[idx + 1:]
+
+ files = glob.iglob(f"{path}/**/*", recursive=True)
+ images = sorted(filter(has_image_extension, files))
+
+ return images
+
+
+def paired_random_crop(
+ images: Iterable[np.ndarray], crops_sizes: Iterable[Tuple[int, int]],
+) -> Iterable[np.ndarray]:
+ """Crop a random part of the input images.
+
+ Args:
+ images: Sequence of images.
+ crops_sizes: Sequence of crop sizes ``(height, width)``.
+
+ Returns:
+ List of crops.
+
+ """
+ h_start, w_start = random.random(), random.random()
+
+ crops = [
+ F.random_crop(image, height, width, h_start, w_start)
+ for image, (height, width) in zip(images, crops_sizes)
+ ]
+
+ return crops
+
+
+class _PairedImagesDataset(Dataset):
+ """Base Dataset for the Image Super-Resolution task.
+
+ Args:
+ train: If True, creates dataset from training set,
+ otherwise creates from validation set.
+ target_type: Type of target to use, ``'bicubic_X2'``, ``'unknown_X4'``,
+ ``'X8'``, ``'mild'``, ...
+ patch_size: If ``train == True``, define sizes of patches to produce,
+ return full image otherwise. Tuple of height and width.
+ transform: A function / transform that takes in dictionary (with low
+ and high resolution images) and returns a transformed version.
+ low_resolution_image_key: Key to use to store images of low resolution.
+ high_resolution_image_key: Key to use to store high resolution images.
+
+ """
+
+ def __init__(
+ self,
+ train: bool = True,
+ target_type: str = "bicubic_X4",
+ patch_size: Tuple[int, int] = (96, 96),
+ transform: Optional[Callable[[Any], Dict]] = None,
+ low_resolution_image_key: str = "lr_image",
+ high_resolution_image_key: str = "hr_image",
+ ) -> None:
+ self.train = train
+
+ self.lr_key = low_resolution_image_key
+ self.hr_key = high_resolution_image_key
+
+ self.data: List[Dict[str, str]] = []
+ self.open_fn = data.ReaderCompose([
+ data.ImageReader(input_key="lr_image", output_key=self.lr_key),
+ data.ImageReader(input_key="hr_image", output_key=self.hr_key),
+ ])
+
+ _, downscaling = target_type.split("_")
+ self.scale = int(downscaling) if downscaling.isdigit() else 4
+ height, width = patch_size
+ self.target_patch_size = patch_size
+ self.input_patch_size = (height // self.scale, width // self.scale)
+
+ self.transform = utils.Augmentor(transform)
+
+ def __getitem__(self, index: int) -> Dict:
+ """Gets element of the dataset.
+
+ Args:
+ index: Index of the element in the dataset.
+
+ Returns:
+ Dict of low and high resolution images.
+
+ """
+ record = self.data[index]
+
+ sample_dict = self.open_fn(record)
+
+ if self.train:
+ # use random crops during training
+ lr_crop, hr_crop = paired_random_crop(
+ (sample_dict[self.lr_key], sample_dict[self.hr_key]),
+ (self.input_patch_size, self.target_patch_size),
+ )
+ sample_dict.update({self.lr_key: lr_crop, self.hr_key: hr_crop})
+
+ sample_dict = self.transform(sample_dict)
+
+ return sample_dict
+
+ def __len__(self) -> int:
+ """Get length of the dataset.
+
+ Returns:
+ Length of the dataset.
+
+ """
+ return len(self.data)
+
+
+class DIV2KDataset(_PairedImagesDataset):
+ """`DIV2K `_ Dataset.
+
+ Args:
+ root: Root directory where images are downloaded to.
+ train: If True, creates dataset from training set,
+ otherwise creates from validation set.
+ target_type: Type of target to use, ``'bicubic_X2'``, ``'unknown_X4'``,
+ ``'X8'``, ``'mild'``, ...
+ patch_size: If ``train == True``, define sizes of patches to produce,
+ return full image otherwise. Tuple of height and width.
+ transform: A function / transform that takes in dictionary (with low
+ and high resolution images) and returns a transformed version.
+ low_resolution_image_key: Key to use to store images of low resolution.
+ high_resolution_image_key: Key to use to store high resolution images.
+ download: If true, downloads the dataset from the internet
+ and puts it in root directory. If dataset is already downloaded,
+ it is not downloaded again.
+
+ """
+
+ url = "http://data.vision.ee.ethz.ch/cvl/DIV2K/"
+ resources = {
+ "DIV2K_train_LR_bicubic_X2.zip": "9a637d2ef4db0d0a81182be37fb00692",
+ "DIV2K_train_LR_unknown_X2.zip": "1396d023072c9aaeb999c28b81315233",
+ "DIV2K_valid_LR_bicubic_X2.zip": "1512c9a3f7bde2a1a21a73044e46b9cb",
+ "DIV2K_valid_LR_unknown_X2.zip": "d319bd9033573d21de5395e6454f34f8",
+ "DIV2K_train_LR_bicubic_X3.zip": "ad80b9fe40c049a07a8a6c51bfab3b6d",
+ "DIV2K_train_LR_unknown_X3.zip": "4e651308aaa54d917fb1264395b7f6fa",
+ "DIV2K_valid_LR_bicubic_X3.zip": "18b1d310f9f88c13618c287927b29898",
+ "DIV2K_valid_LR_unknown_X3.zip": "05184168e3608b5c539fbfb46bcade4f",
+ "DIV2K_train_LR_bicubic_X4.zip": "76c43ec4155851901ebbe8339846d93d",
+ "DIV2K_train_LR_unknown_X4.zip": "e3c7febb1b3f78bd30f9ba15fe8e3956",
+ "DIV2K_valid_LR_bicubic_X4.zip": "21962de700c8d368c6ff83314480eff0",
+ "DIV2K_valid_LR_unknown_X4.zip": "8ac3413102bb3d0adc67012efb8a6c94",
+ "DIV2K_train_LR_x8.zip": "613db1b855721b3d2b26f4194a1d22a6",
+ "DIV2K_train_LR_mild.zip": "807b3e3a5156f35bd3a86c5bbfb674bc",
+ "DIV2K_train_LR_difficult.zip": "5a8f2b9e0c5f5ed0dac271c1293662f4",
+ "DIV2K_train_LR_wild.zip": "d00982366bffee7c4739ba7ff1316b3b",
+ "DIV2K_valid_LR_x8.zip": "c5aeea2004e297e9ff3abfbe143576a5",
+ "DIV2K_valid_LR_mild.zip": "8c433f812ca532eed62c11ec0de08370",
+ "DIV2K_valid_LR_difficult.zip": "1620af11bf82996bc94df655cb6490fe",
+ "DIV2K_valid_LR_wild.zip": "aacae8db6bec39151ca5bb9c80bf2f6c",
+ "DIV2K_train_HR.zip": "bdc2d9338d4e574fe81bf7d158758658",
+ "DIV2K_valid_HR.zip": "9fcdda83005c5e5997799b69f955ff88",
+ }
+
+ def __init__(
+ self,
+ root: str,
+ train: bool = True,
+ target_type: str = "bicubic_X4",
+ patch_size: Tuple[int, int] = (96, 96),
+ transform: Optional[Callable[[Any], Dict]] = None,
+ low_resolution_image_key: str = "lr_image",
+ high_resolution_image_key: str = "hr_image",
+ download: bool = False,
+ ) -> None:
+ super().__init__(
+ train=train,
+ target_type=target_type,
+ patch_size=patch_size,
+ transform=transform,
+ low_resolution_image_key=low_resolution_image_key,
+ high_resolution_image_key=high_resolution_image_key,
+ )
+
+ mode = "train" if train else "valid"
+ filename_hr = f"DIV2K_{mode}_HR.zip"
+ filename_lr = f"DIV2K_{mode}_LR_{target_type}.zip"
+ if download:
+ # download HR (target) images
+ misc.download_and_extract_archive(
+ f"{self.url}{filename_hr}",
+ download_root=root,
+ filename=filename_hr,
+ md5=self.resources[filename_hr],
+ )
+
+ # download lr (input) images
+ misc.download_and_extract_archive(
+ f"{self.url}{filename_lr}",
+ download_root=root,
+ filename=filename_lr,
+ md5=self.resources[filename_lr],
+ )
+
+ # 'index' files
+ lr_images = images_in_dir(root, Path(filename_lr).stem)
+ hr_images = images_in_dir(root, Path(filename_hr).stem)
+ assert len(lr_images) == len(hr_images)
+
+ self.data = [
+ {"lr_image": lr_image, "hr_image": hr_image}
+ for lr_image, hr_image in zip(lr_images, hr_images)
+ ]
+
+
+class Flickr2KDataset(_PairedImagesDataset):
+ """`Flickr2K `_ Dataset.
+
+ Args:
+ root: Root directory where images are downloaded to.
+ train: If True, creates dataset from training set,
+ otherwise creates from validation set.
+ target_type: Type of target to use, ``'bicubic_X2'``, ``'unknown_X4'``,
+ ...
+ patch_size: If ``train == True``, define sizes of patches to produce,
+ return full image otherwise. Tuple of height and width.
+ transform: A function / transform that takes in dictionary (with low
+ and high resolution images) and returns a transformed version.
+ low_resolution_image_key: Key to use to store images of low resolution.
+ high_resolution_image_key: Key to use to store high resolution images.
+ download: If true, downloads the dataset from the internet
+ and puts it in root directory. If dataset is already downloaded,
+ it is not downloaded again.
+
+ """
+
+ url = "https://cv.snu.ac.kr/research/EDSR/"
+ resources = {
+ "Flickr2K.tar": "5d3f39443d5e9489bff8963f8f26cb03",
+ }
+
+ def __init__(
+ self,
+ root: str,
+ train: bool = True,
+ target_type: str = "bicubic_X4",
+ patch_size: Tuple[int, int] = (96, 96),
+ transform: Optional[Callable[[Any], Dict]] = None,
+ low_resolution_image_key: str = "lr_image",
+ high_resolution_image_key: str = "hr_image",
+ download: bool = False,
+ ) -> None:
+ super().__init__(
+ train=train,
+ target_type=target_type,
+ patch_size=patch_size,
+ transform=transform,
+ low_resolution_image_key=low_resolution_image_key,
+ high_resolution_image_key=high_resolution_image_key,
+ )
+
+ filename = "Flickr2K.tar"
+ if download:
+ # download images
+ misc.download_and_extract_archive(
+ f"{self.url}{filename}",
+ download_root=root,
+ filename=filename,
+ md5=self.resources[filename],
+ )
+
+ degradation, downscaling = target_type.split("_")
+
+ # 'index' files
+ subdir_lr = Path(f"Flickr2K_LR_{degradation}", downscaling)
+ subdir_hr = "Flickr2K_HR"
+ lr_images = images_in_dir(root, Path(filename).stem, subdir_lr)
+ hr_images = images_in_dir(root, Path(filename).stem, subdir_hr)
+ assert len(lr_images) == len(hr_images)
+
+ self.data = [
+ {"lr_image": lr_image, "hr_image": hr_image}
+ for lr_image, hr_image in zip(lr_images, hr_images)
+ ]
+
+
+class ImageFolderDataset(data.ListDataset):
+ """A generic data loader where the samples are arranged in this way: ::
+
+ /xxx.ext
+ /xxy.ext
+ /xxz.ext
+ ...
+ /123.ext
+ /nsdf3.ext
+ /asd932_.ext
+
+ Args:
+ pathname: Root directory of dataset.
+ image_key: Key to use to store image.
+ image_name_key: Key to use to store name of the image.
+ transform: A function / transform that takes in dictionary
+ and returns its transformed version.
+
+ """
+
+ def __init__(
+ self,
+ pathname: str,
+ image_key: str = "image",
+ image_name_key: str = "filename",
+ transform: Optional[Callable[[Dict], Dict]] = None,
+ ) -> None:
+ files = glob.iglob(pathname, recursive=True)
+ images = sorted(filter(has_image_extension, files))
+
+ list_data = [{"image": filename} for filename in images]
+ open_fn = data.ReaderCompose([
+ data.ImageReader(input_key="image", output_key=image_key),
+ data.LambdaReader(input_key="image", output_key=image_name_key),
+ ])
+ transform = utils.Augmentor(transform)
+
+ super().__init__(
+ list_data=list_data, open_fn=open_fn, dict_transform=transform
+ )
diff --git a/esrgan/model/__init__.py b/esrgan/model/__init__.py
deleted file mode 100644
index 51236e0..0000000
--- a/esrgan/model/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# flake8: noqa
-from esrgan.model import module
-from esrgan.model.discriminator import VGGConv
-from esrgan.model.generator import EncoderDecoderNet
diff --git a/esrgan/model/discriminator.py b/esrgan/model/discriminator.py
deleted file mode 100644
index a18f071..0000000
--- a/esrgan/model/discriminator.py
+++ /dev/null
@@ -1,86 +0,0 @@
-import copy
-from typing import Optional
-
-from catalyst.registry import MODULE
-import torch
-from torch import nn
-
-from esrgan import utils
-
-
-class VGGConv(nn.Module):
- """VGG-like neural network for image classification.
-
- Args:
- encoder: Image encoder module, usually used for the extraction
- of embeddings from input signals.
- pool: Pooling layer, used to reduce embeddings from the encoder.
- head: Classification head, usually consists of Fully Connected layers.
-
- """
-
- def __init__(
- self, encoder: nn.Module, pool: nn.Module, head: nn.Module,
- ) -> None:
- super().__init__()
-
- self.encoder = encoder
- self.pool = pool
- self.head = head
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- """Forward call.
-
- Args:
- x: Batch of images.
-
- Returns:
- Batch of logits.
-
- """
- x = self.pool(self.encoder(x))
- x = x.view(x.shape[0], -1)
- x = self.head(x)
-
- return x
-
- @classmethod
- def get_from_params(
- cls,
- encoder_params: Optional[dict] = None,
- pooling_params: Optional[dict] = None,
- head_params: Optional[dict] = None,
- ) -> "VGGConv":
- """Create model based on it config.
-
- Args:
- encoder_params: Params of encoder module.
- pooling_params: Params of the pooling layer.
- head_params: 'Head' module params.
-
- Returns:
- Model.
-
- """
- encoder: nn.Module = nn.Identity()
- if (encoder_params_ := copy.deepcopy(encoder_params)) is not None:
- encoder_fn = MODULE.get(encoder_params_.pop("module"))
- encoder = encoder_fn(**encoder_params_)
-
- pool: nn.Module = nn.Identity()
- if (pooling_params_ := copy.deepcopy(pooling_params)) is not None:
- pool_fn = MODULE.get(pooling_params_.pop("module"))
- pool = pool_fn(**pooling_params_)
-
- head: nn.Module = nn.Identity()
- if (head_params_ := copy.deepcopy(head_params)) is not None:
- head_fn = MODULE.get(head_params_.pop("module"))
- head = head_fn(**head_params_)
-
- net = cls(encoder=encoder, pool=pool, head=head)
- utils.net_init_(net)
-
- return net
-
-
-__all__ = ["VGGConv"]
diff --git a/esrgan/model/generator.py b/esrgan/model/generator.py
deleted file mode 100644
index 3a4fa90..0000000
--- a/esrgan/model/generator.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import copy
-from typing import Optional
-
-from catalyst.registry import MODULE
-import torch
-from torch import nn
-
-from esrgan import utils
-
-
-class EncoderDecoderNet(nn.Module):
- """Generalized Encoder-Decoder network.
-
- Args:
- encoder: Encoder module, usually used for the extraction
- of embeddings from input signals.
- decoder: Decoder module, usually used for embeddings processing
- e.g. generation of signal similar to the input one (in GANs).
-
- """
-
- def __init__(self, encoder: nn.Module, decoder: nn.Module) -> None:
- super().__init__()
-
- self.encoder = encoder
- self.decoder = decoder
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- """Forward pass method.
-
- Args:
- x: Batch of input signals e.g. images.
-
- Returns:
- Batch of generated signals e.g. images.
-
- """
- x = self.encoder(x)
- x = self.decoder(x)
- x = torch.clamp(x, min=0.0, max=1.0)
-
- return x
-
- @classmethod
- def get_from_params(
- cls,
- encoder_params: Optional[dict] = None,
- decoder_params: Optional[dict] = None,
- ) -> "EncoderDecoderNet":
- """Create model based on it config.
-
- Args:
- encoder_params: Encoder module params.
- decoder_params: Decoder module parameters.
-
- Returns:
- Model.
-
- """
- encoder: nn.Module = nn.Identity()
- if (encoder_params_ := copy.deepcopy(encoder_params)) is not None:
- encoder_fn = MODULE.get(encoder_params_.pop("module"))
- encoder = encoder_fn(**encoder_params_)
-
- decoder: nn.Module = nn.Identity()
- if (decoder_params_ := copy.deepcopy(decoder_params)) is not None:
- decoder_fn = MODULE.get(decoder_params_.pop("module"))
- decoder = decoder_fn(**decoder_params_)
-
- net = cls(encoder=encoder, decoder=decoder)
- utils.net_init_(net)
-
- return net
-
-
-__all__ = ["EncoderDecoderNet"]
diff --git a/esrgan/model/module/__init__.py b/esrgan/model/module/__init__.py
deleted file mode 100644
index caa3f57..0000000
--- a/esrgan/model/module/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# flake8: noqa
-from esrgan.model.module.blocks import (
- ConcatInputModule, Conv2d, Conv2dSN, InterpolateConv, LeakyReLU, LinearSN,
- ResidualDenseBlock, ResidualInResidualDenseBlock, ResidualModule,
- SubPixelConv,
-)
-from esrgan.model.module.conv import StridedConvEncoder
-from esrgan.model.module.esrnet import ESREncoder, ESRNetDecoder
-from esrgan.model.module.linear import LinearHead
-from esrgan.model.module.srresnet import SRResNetDecoder, SRResNetEncoder
diff --git a/esrgan/model/module/blocks/__init__.py b/esrgan/model/module/blocks/__init__.py
deleted file mode 100644
index f046cd3..0000000
--- a/esrgan/model/module/blocks/__init__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# flake8: noqa
-from esrgan.model.module.blocks.container import (
- ConcatInputModule, ResidualModule,
-)
-from esrgan.model.module.blocks.misc import (
- Conv2d, Conv2dSN, LeakyReLU, LinearSN,
-)
-from esrgan.model.module.blocks.rrdb import (
- ResidualDenseBlock, ResidualInResidualDenseBlock,
-)
-from esrgan.model.module.blocks.upsampling import InterpolateConv, SubPixelConv
diff --git a/esrgan/model/module/conv.py b/esrgan/model/module/conv.py
deleted file mode 100644
index 7c01a50..0000000
--- a/esrgan/model/module/conv.py
+++ /dev/null
@@ -1,99 +0,0 @@
-import collections
-from typing import Callable, Dict, Iterable, List, Optional, Tuple
-
-import torch
-from torch import nn
-
-from esrgan import utils
-from esrgan.model.module import blocks
-from esrgan.utils.types import ModuleParams
-
-
-class StridedConvEncoder(nn.Module):
- """Generalized Fully Convolutional encoder.
-
- Args:
- layers: List of feature maps sizes of each block.
- layer_order: Ordered list of layers applied within each block.
- For instance, if you don't want to use normalization layer
- just exclude it from this list.
- conv_fn: Convolutional layer params.
- activation_fn: Activation function to use.
- norm_fn: Normalization layer params, e.g. :py:class:`.nn.BatchNorm2d`.
- residual_fn: Block wrapper function, e.g.
- :py:class:`~.blocks.container.ResidualModule` can be used
- to add residual connections between blocks.
-
- """
-
- @utils.process_fn_params
- def __init__(
- self,
- layers: Iterable[int] = (3, 64, 128, 128, 256, 256, 512, 512),
- layer_order: Iterable[str] = ("conv", "norm", "activation"),
- conv_fn: ModuleParams = blocks.Conv2d,
- activation_fn: ModuleParams = blocks.LeakyReLU,
- norm_fn: Optional[ModuleParams] = nn.BatchNorm2d,
- residual_fn: Optional[ModuleParams] = None,
- ):
- super().__init__()
-
- name2fn: Dict[str, Callable[..., nn.Module]] = {
- "activation": activation_fn,
- "conv": conv_fn,
- "norm": norm_fn,
- }
-
- self._layers = list(layers)
-
- net: List[Tuple[str, nn.Module]] = []
-
- first_conv = collections.OrderedDict([
- ("conv_0", name2fn["conv"](self._layers[0], self._layers[1])),
- ("act", name2fn["activation"]()),
- ])
- net.append(("block_0", nn.Sequential(first_conv)))
-
- channels = utils.pairwise(self._layers[1:])
- for i, (in_ch, out_ch) in enumerate(channels, start=1):
- block_list: List[Tuple[str, nn.Module]] = []
- for name in layer_order:
- # `conv + 2x2 pooling` is equal to `conv with stride=2`
- kwargs = {"stride": out_ch // in_ch} if name == "conv" else {}
-
- module = utils.create_layer(
- layer_name=name,
- layer=name2fn[name],
- in_channels=in_ch,
- out_channels=out_ch,
- **kwargs
- )
- block_list.append((name, module))
- block = nn.Sequential(collections.OrderedDict(block_list))
-
- # add residual connection, like in resnet blocks
- if residual_fn is not None and in_ch == out_ch:
- block = residual_fn(block)
-
- net.append((f"block_{i}", block))
-
- self.net = nn.Sequential(collections.OrderedDict(net))
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- """Forward pass."""
- output = self.net(x)
-
- return output
-
- @property
- def in_channels(self) -> int:
- """The number of channels in the feature map of the input."""
- return self._layers[0]
-
- @property
- def out_channels(self) -> int:
- """Number of channels produced by the block."""
- return self._layers[-1]
-
-
-__all__ = ["StridedConvEncoder"]
diff --git a/esrgan/model/module/linear.py b/esrgan/model/module/linear.py
deleted file mode 100644
index 72b6cdf..0000000
--- a/esrgan/model/module/linear.py
+++ /dev/null
@@ -1,82 +0,0 @@
-from typing import Callable, Dict, Iterable, List, Optional, Tuple
-
-import torch
-from torch import nn
-
-from esrgan import utils
-from esrgan.model.module import blocks
-from esrgan.utils.types import ModuleParams
-
-
-class LinearHead(nn.Module):
- """Stack of linear layers used for embeddings classification.
-
- Args:
- in_channels: Size of each input sample.
- out_channels: Size of each output sample.
- latent_channels: Size of the latent space.
- layer_order: Ordered list of layers applied within each block.
- For instance, if you don't want to use normalization layer
- just exclude it from this list.
- linear_fn: Linear layer params.
- activation_fn: Activation function to use.
- norm_fn: Normalization layer params, e.g. :py:class:`nn.BatchNorm1d`.
- dropout_fn: Dropout layer params, e.g. :py:class:`nn.Dropout`.
-
- """
-
- @utils.process_fn_params
- def __init__(
- self,
- in_channels: int,
- out_channels: int,
- latent_channels: Optional[Iterable[int]] = None,
- layer_order: Iterable[str] = ("linear", "activation"),
- linear_fn: ModuleParams = nn.Linear,
- activation_fn: ModuleParams = blocks.LeakyReLU,
- norm_fn: Optional[ModuleParams] = None,
- dropout_fn: Optional[ModuleParams] = None,
- ) -> None:
- super().__init__()
-
- name2fn: Dict[str, Callable[..., nn.Module]] = {
- "activation": activation_fn,
- "dropout": dropout_fn,
- "linear": linear_fn,
- "norm": norm_fn,
- }
-
- latent_channels = latent_channels if latent_channels else []
- channels = [in_channels, *latent_channels, out_channels]
- channels_pairs: List[Tuple[int, int]] = list(utils.pairwise(channels))
-
- net: List[nn.Module] = []
- for in_ch, out_ch in channels_pairs[:-1]:
- for name in layer_order:
- module = utils.create_layer(
- layer_name=name,
- layer=name2fn[name],
- in_channels=in_ch,
- out_channels=out_ch,
- )
- net.append(module)
- net.append(name2fn["linear"](*channels_pairs[-1]))
-
- self.net = nn.Sequential(*net)
-
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- """Forward pass.
-
- Args:
- x: Batch of inputs e.g. images.
-
- Returns:
- Batch of logits.
-
- """
- output = self.net(x)
-
- return output
-
-
-__all__ = ["LinearHead"]
diff --git a/esrgan/models/__init__.py b/esrgan/models/__init__.py
new file mode 100644
index 0000000..27e828b
--- /dev/null
+++ b/esrgan/models/__init__.py
@@ -0,0 +1,5 @@
+# flake8: noqa
+from esrgan.models.discriminator import LinearHead, StridedConvEncoder, VGGConv
+from esrgan.models.esrnet import ESREncoder, ESRNetDecoder
+from esrgan.models.generator import EncoderDecoderNet
+from esrgan.models.srresnet import SRResNetDecoder, SRResNetEncoder
diff --git a/esrgan/models/discriminator.py b/esrgan/models/discriminator.py
new file mode 100644
index 0000000..2d2a90e
--- /dev/null
+++ b/esrgan/models/discriminator.py
@@ -0,0 +1,231 @@
+import collections
+from typing import Callable, Dict, Iterable, List, Optional, Tuple
+
+import torch
+from torch import nn
+
+from esrgan import utils
+from esrgan.nn import modules
+
+__all__ = ["StridedConvEncoder", "LinearHead", "VGGConv"]
+
+
+class StridedConvEncoder(nn.Module):
+ """Generalized Fully Convolutional encoder.
+
+ Args:
+ layers: List of feature maps sizes of each block.
+ layer_order: Ordered list of layers applied within each block.
+ For instance, if you don't want to use normalization layer
+ just exclude it from this list.
+ conv: Class constructor or partial object which when called
+ should return convolutional layer e.g., :py:class:`nn.Conv2d`.
+ norm: Class constructor or partial object which when called should
+ return normalization layer e.g., :py:class:`.nn.BatchNorm2d`.
+ activation: Class constructor or partial object which when called
+ should return activation function to use e.g., :py:class:`nn.ReLU`.
+ residual: Class constructor or partial object which when called
+ should return block wrapper module e.g.,
+ :py:class:`esrgan.nn.ResidualModule` can be used
+ to add residual connections between blocks.
+
+ """
+
+ def __init__(
+ self,
+ layers: Iterable[int] = (3, 64, 128, 128, 256, 256, 512, 512),
+ layer_order: Iterable[str] = ("conv", "norm", "activation"),
+ conv: Callable[..., nn.Module] = modules.Conv2d,
+ norm: Optional[Callable[..., nn.Module]] = nn.BatchNorm2d,
+ activation: Callable[..., nn.Module] = modules.LeakyReLU,
+ residual: Optional[Callable[..., nn.Module]] = None,
+ ):
+ super().__init__()
+
+ name2fn: Dict[str, Callable[..., nn.Module]] = {
+ "activation": activation,
+ "conv": conv,
+ "norm": norm,
+ }
+
+ self._layers = list(layers)
+
+ net: List[Tuple[str, nn.Module]] = []
+
+ first_conv = collections.OrderedDict([
+ ("conv_0", name2fn["conv"](self._layers[0], self._layers[1])),
+ ("act", name2fn["activation"]()),
+ ])
+ net.append(("block_0", nn.Sequential(first_conv)))
+
+ channels = utils.pairwise(self._layers[1:])
+ for i, (in_ch, out_ch) in enumerate(channels, start=1):
+ block_list: List[Tuple[str, nn.Module]] = []
+ for name in layer_order:
+ # `conv + 2x2 pooling` is equal to `conv with stride=2`
+ kwargs = {"stride": out_ch // in_ch} if name == "conv" else {}
+
+ module = utils.create_layer(
+ layer_name=name,
+ layer=name2fn[name],
+ in_channels=in_ch,
+ out_channels=out_ch,
+ **kwargs
+ )
+ block_list.append((name, module))
+ block = nn.Sequential(collections.OrderedDict(block_list))
+
+ # add residual connection, like in resnet blocks
+ if residual is not None and in_ch == out_ch:
+ block = residual(block)
+
+ net.append((f"block_{i}", block))
+
+ self.net = nn.Sequential(collections.OrderedDict(net))
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Forward pass.
+
+ Args:
+ x: Batch of inputs.
+
+ Returns:
+ Batch of embeddings.
+
+ """
+ output = self.net(x)
+
+ return output
+
+ @property
+ def in_channels(self) -> int:
+ """The number of channels in the feature map of the input.
+
+ Returns:
+ Size of the input feature map.
+
+ """
+ return self._layers[0]
+
+ @property
+ def out_channels(self) -> int:
+ """Number of channels produced by the block.
+
+ Returns:
+ Size of the output feature map.
+
+ """
+ return self._layers[-1]
+
+
+class LinearHead(nn.Module):
+ """Stack of linear layers used for embeddings classification.
+
+ Args:
+ in_channels: Size of each input sample.
+ out_channels: Size of each output sample.
+ latent_channels: Size of the latent space.
+ layer_order: Ordered list of layers applied within each block.
+ For instance, if you don't want to use activation function
+ just exclude it from this list.
+ linear: Class constructor or partial object which when called
+ should return linear layer e.g., :py:class:`nn.Linear`.
+ activation: Class constructor or partial object which when called
+ should return activation function layer e.g., :py:class:`nn.ReLU`.
+ norm: Class constructor or partial object which when called
+ should return normalization layer e.g., :py:class:`nn.BatchNorm1d`.
+ dropout: Class constructor or partial object which when called
+ should return dropout layer e.g., :py:class:`nn.Dropout`.
+
+ """
+
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ latent_channels: Optional[Iterable[int]] = None,
+ layer_order: Iterable[str] = ("linear", "activation"),
+ linear: Callable[..., nn.Module] = nn.Linear,
+ activation: Callable[..., nn.Module] = modules.LeakyReLU,
+ norm: Optional[Callable[..., nn.Module]] = None,
+ dropout: Optional[Callable[..., nn.Module]] = None,
+ ) -> None:
+ super().__init__()
+
+ name2fn: Dict[str, Callable[..., nn.Module]] = {
+ "activation": activation,
+ "dropout": dropout,
+ "linear": linear,
+ "norm": norm,
+ }
+
+ latent_channels = latent_channels or []
+ channels = [in_channels, *latent_channels, out_channels]
+ channels_pairs: List[Tuple[int, int]] = list(utils.pairwise(channels))
+
+ net: List[nn.Module] = []
+ for in_ch, out_ch in channels_pairs[:-1]:
+ for name in layer_order:
+ module = utils.create_layer(
+ layer_name=name,
+ layer=name2fn[name],
+ in_channels=in_ch,
+ out_channels=out_ch,
+ )
+ net.append(module)
+ net.append(name2fn["linear"](*channels_pairs[-1]))
+
+ self.net = nn.Sequential(*net)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Forward pass.
+
+ Args:
+ x: Batch of inputs e.g. images.
+
+ Returns:
+ Batch of logits.
+
+ """
+ output = self.net(x)
+
+ return output
+
+
+class VGGConv(nn.Module):
+ """VGG-like neural network for image classification.
+
+ Args:
+ encoder: Image encoder module, usually used for the extraction
+ of embeddings from input signals.
+ pool: Pooling layer, used to reduce embeddings from the encoder.
+ head: Classification head, usually consists of Fully Connected layers.
+
+ """
+
+ def __init__(
+ self, encoder: nn.Module, pool: nn.Module, head: nn.Module,
+ ) -> None:
+ super().__init__()
+
+ self.encoder = encoder
+ self.pool = pool
+ self.head = head
+
+ utils.net_init_(self)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Forward call.
+
+ Args:
+ x: Batch of images.
+
+ Returns:
+ Batch of logits.
+
+ """
+ x = self.pool(self.encoder(x))
+ x = x.view(x.shape[0], -1)
+ x = self.head(x)
+
+ return x
diff --git a/esrgan/model/module/esrnet.py b/esrgan/models/esrnet.py
similarity index 74%
rename from esrgan/model/module/esrnet.py
rename to esrgan/models/esrnet.py
index d3b7f81..51af6f9 100644
--- a/esrgan/model/module/esrnet.py
+++ b/esrgan/models/esrnet.py
@@ -1,12 +1,13 @@
import collections
-from typing import List, Tuple
+from typing import Callable, List, Tuple
import torch
from torch import nn
from esrgan import utils
-from esrgan.model.module import blocks
-from esrgan.utils.types import ModuleParams
+from esrgan.nn import modules
+
+__all__ = ["ESREncoder", "ESRNetDecoder"]
class ESREncoder(nn.Module):
@@ -23,8 +24,10 @@ class ESREncoder(nn.Module):
Dense block (RRDB) to use.
num_dense_blocks: Number of dense blocks to use to form `RRDB` block.
num_residual_blocks: Number of convolutions to use to form dense block.
- conv_fn: Convolutional layers parameters.
- activation_fn: Activation function to use after BN layers.
+ conv: Class constructor or partial object which when called
+ should return convolutional layer e.g., :py:class:`nn.Conv2d`.
+ activation: Class constructor or partial object which when called
+ should return activation function to use e.g., :py:class:`nn.ReLU`.
residual_scaling: Residual connections scaling factor.
.. _`ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks`:
@@ -32,7 +35,6 @@ class ESREncoder(nn.Module):
"""
- @utils.process_fn_params
def __init__(
self,
in_channels: int = 3,
@@ -41,8 +43,8 @@ def __init__(
num_basic_blocks: int = 23,
num_dense_blocks: int = 3,
num_residual_blocks: int = 5,
- conv_fn: ModuleParams = blocks.Conv2d,
- activation_fn: ModuleParams = blocks.LeakyReLU,
+ conv: Callable[..., nn.Module] = modules.Conv2d,
+ activation: Callable[..., nn.Module] = modules.LeakyReLU,
residual_scaling: float = 0.2,
) -> None:
super().__init__()
@@ -50,16 +52,16 @@ def __init__(
blocks_list: List[nn.Module] = []
# first conv
- first_conv = conv_fn(in_channels, out_channels)
+ first_conv = conv(in_channels, out_channels)
blocks_list.append(first_conv)
# basic blocks - sequence of rrdb layers
for _ in range(num_basic_blocks):
- basic_block = blocks.ResidualInResidualDenseBlock(
+ basic_block = modules.ResidualInResidualDenseBlock(
num_features=out_channels,
growth_channels=growth_channels,
- conv_fn=conv_fn,
- activation_fn=activation_fn,
+ conv=conv,
+ activation=activation,
num_dense_blocks=num_dense_blocks,
num_blocks=num_residual_blocks,
residual_scaling=residual_scaling,
@@ -67,7 +69,7 @@ def __init__(
blocks_list.append(basic_block)
# last conv of the encoder
- last_conv = conv_fn(out_channels, out_channels)
+ last_conv = conv(out_channels, out_channels)
blocks_list.append(last_conv)
self.blocks = nn.ModuleList(blocks_list)
@@ -101,22 +103,23 @@ class ESRNetDecoder(nn.Module):
scale_factor: Ratio between the size of the high-resolution image
(output) and its low-resolution counterpart (input).
In other words multiplier for spatial size.
- conv_fn: Convolutional layers parameters.
- activation_fn: Activation function to use.
+ conv: Class constructor or partial object which when called
+ should return convolutional layer e.g., :py:class:`nn.Conv2d`.
+ activation: Class constructor or partial object which when called
+ should return activation function to use e.g., :py:class:`nn.ReLU`.
.. _`ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks`:
https://arxiv.org/pdf/1809.00219.pdf
"""
- @utils.process_fn_params
def __init__(
self,
in_channels: int = 64,
out_channels: int = 3,
scale_factor: int = 2,
- conv_fn: ModuleParams = blocks.Conv2d,
- activation_fn: ModuleParams = blocks.LeakyReLU,
+ conv: Callable[..., nn.Module] = modules.Conv2d,
+ activation: Callable[..., nn.Module] = modules.LeakyReLU,
) -> None:
super().__init__()
@@ -130,18 +133,18 @@ def __init__(
# upsampling
for i in range(scale_factor // 2):
- upsampling_block = blocks.InterpolateConv(
+ upsampling_block = modules.InterpolateConv(
num_features=in_channels,
- conv_fn=conv_fn,
- activation_fn=activation_fn,
+ conv=conv,
+ activation=activation,
)
blocks_list.append((f"upsampling_{i}", upsampling_block))
# highres conv + last conv
last_conv = nn.Sequential(
- conv_fn(in_channels, in_channels),
- activation_fn(),
- conv_fn(in_channels, out_channels),
+ conv(in_channels, in_channels),
+ activation(),
+ conv(in_channels, out_channels),
)
blocks_list.append(("conv", last_conv))
@@ -160,6 +163,3 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
output = self.blocks(x)
return output
-
-
-__all__ = ["ESREncoder", "ESRNetDecoder"]
diff --git a/esrgan/models/generator.py b/esrgan/models/generator.py
new file mode 100644
index 0000000..75e3552
--- /dev/null
+++ b/esrgan/models/generator.py
@@ -0,0 +1,42 @@
+import torch
+from torch import nn
+
+from esrgan import utils
+
+__all__ = ["EncoderDecoderNet"]
+
+
+class EncoderDecoderNet(nn.Module):
+ """Generalized Encoder-Decoder network.
+
+ Args:
+ encoder: Encoder module, usually used for the extraction
+ of embeddings from input signals.
+ decoder: Decoder module, usually used for embeddings processing
+ e.g. generation of signal similar to the input one (in GANs).
+
+ """
+
+ def __init__(self, encoder: nn.Module, decoder: nn.Module) -> None:
+ super().__init__()
+
+ self.encoder = encoder
+ self.decoder = decoder
+
+ utils.net_init_(self)
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ """Forward pass method.
+
+ Args:
+ x: Batch of input signals e.g. images.
+
+ Returns:
+ Batch of generated signals e.g. images.
+
+ """
+ x = self.encoder(x)
+ x = self.decoder(x)
+ x = torch.clamp(x, min=0.0, max=1.0)
+
+ return x
diff --git a/esrgan/model/module/srresnet.py b/esrgan/models/srresnet.py
similarity index 67%
rename from esrgan/model/module/srresnet.py
rename to esrgan/models/srresnet.py
index 2da11a0..a6ded02 100644
--- a/esrgan/model/module/srresnet.py
+++ b/esrgan/models/srresnet.py
@@ -1,12 +1,13 @@
import collections
-from typing import List, Tuple
+from typing import Callable, List, Tuple
import torch
from torch import nn
from esrgan import utils
-from esrgan.model.module import blocks
-from esrgan.utils.types import ModuleParams
+from esrgan.nn import modules
+
+__all__ = ["SRResNetEncoder", "SRResNetDecoder"]
class SRResNetEncoder(nn.Module):
@@ -19,24 +20,27 @@ class SRResNetEncoder(nn.Module):
in_channels: Number of channels in the input image.
out_channels: Number of channels produced by the encoder.
num_basic_blocks: Depth of the encoder, number of basic blocks to use.
- conv_fn: Convolutional layers parameters.
- norm_fn: Batch norm layer to use.
- activation_fn: Activation function to use after BN layers.
+ conv: Class constructor or partial object which when called
+ should return convolutional layer e.g., :py:class:`nn.Conv2d`.
+ norm: Class constructor or partial object which when called should
+ return normalization layer e.g., :py:class:`.nn.BatchNorm2d`.
+ activation: Class constructor or partial object which when called
+ should return activation function to use after BN layers
+ e.g., :py:class:`nn.PReLU`.
.. _`Photo-Realistic Single Image Super-Resolution Using a Generative
Adversarial Network`: https://arxiv.org/pdf/1609.04802.pdf
"""
- @utils.process_fn_params
def __init__(
self,
in_channels: int = 3,
out_channels: int = 64,
num_basic_blocks: int = 16,
- conv_fn: ModuleParams = blocks.Conv2d,
- norm_fn: ModuleParams = nn.BatchNorm2d,
- activation_fn: ModuleParams = nn.PReLU,
+ conv: Callable[..., nn.Module] = modules.Conv2d,
+ norm: Callable[..., nn.Module] = nn.BatchNorm2d,
+ activation: Callable[..., nn.Module] = nn.PReLU,
) -> None:
super().__init__()
@@ -45,24 +49,24 @@ def __init__(
# first conv
first_conv = nn.Sequential(
- conv_fn(in_channels, num_features), activation_fn()
+ conv(in_channels, num_features), activation()
)
blocks_list.append(first_conv)
# basic blocks - sequence of B residual blocks
for _ in range(num_basic_blocks):
basic_block = nn.Sequential(
- conv_fn(num_features, num_features),
- norm_fn(num_features,),
- activation_fn(),
- conv_fn(num_features, num_features),
- norm_fn(num_features),
+ conv(num_features, num_features),
+ norm(num_features,),
+ activation(),
+ conv(num_features, num_features),
+ norm(num_features),
)
- blocks_list.append(blocks.ResidualModule(basic_block))
+ blocks_list.append(modules.ResidualModule(basic_block))
# last conv of the encoder
last_conv = nn.Sequential(
- conv_fn(num_features, out_channels), norm_fn(out_channels),
+ conv(num_features, out_channels), norm(out_channels),
)
blocks_list.append(last_conv)
@@ -97,22 +101,23 @@ class SRResNetDecoder(nn.Module):
scale_factor: Ratio between the size of the high-resolution image
(output) and its low-resolution counterpart (input).
In other words multiplier for spatial size.
- conv_fn: Convolutional layers parameters.
- activation_fn: Activation function to use.
+ conv: Class constructor or partial object which when called
+ should return convolutional layer e.g., :py:class:`nn.Conv2d`.
+ activation: Class constructor or partial object which when called
+ should return activation function to use e.g., :py:class:`nn.ReLU`.
.. _`Photo-Realistic Single Image Super-Resolution Using a Generative
Adversarial Network`: https://arxiv.org/pdf/1609.04802.pdf
"""
- @utils.process_fn_params
def __init__(
self,
in_channels: int = 64,
out_channels: int = 3,
scale_factor: int = 2,
- conv_fn: ModuleParams = blocks.Conv2d,
- activation_fn: ModuleParams = nn.PReLU,
+ conv: Callable[..., nn.Module] = modules.Conv2d,
+ activation: Callable[..., nn.Module] = nn.PReLU,
) -> None:
super().__init__()
@@ -126,15 +131,15 @@ def __init__(
# upsampling
for i in range(scale_factor // 2):
- upsampling_block = blocks.SubPixelConv(
+ upsampling_block = modules.SubPixelConv(
num_features=in_channels,
- conv_fn=conv_fn,
- activation_fn=activation_fn,
+ conv=conv,
+ activation=activation,
)
blocks_list.append((f"upsampling_{i}", upsampling_block))
# highres conv
- last_conv = conv_fn(in_channels, out_channels)
+ last_conv = conv(in_channels, out_channels)
blocks_list.append(("conv", last_conv))
self.blocks = nn.Sequential(collections.OrderedDict(blocks_list))
@@ -152,6 +157,3 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
output = self.blocks(x)
return output
-
-
-__all__ = ["SRResNetEncoder", "SRResNetDecoder"]
diff --git a/esrgan/nn/__init__.py b/esrgan/nn/__init__.py
new file mode 100644
index 0000000..649ed62
--- /dev/null
+++ b/esrgan/nn/__init__.py
@@ -0,0 +1,9 @@
+# flake8: noqa
+from esrgan.nn.criterions import (
+ AdversarialLoss, PerceptualLoss, RelativisticAdversarialLoss,
+)
+from esrgan.nn.modules import (
+ ConcatInputModule, Conv2d, Conv2dSN, InterpolateConv, LeakyReLU, LinearSN,
+ ResidualDenseBlock, ResidualInResidualDenseBlock, ResidualModule,
+ SubPixelConv,
+)
diff --git a/esrgan/nn/criterions/__init__.py b/esrgan/nn/criterions/__init__.py
new file mode 100644
index 0000000..5784e86
--- /dev/null
+++ b/esrgan/nn/criterions/__init__.py
@@ -0,0 +1,5 @@
+# flake8: noqa
+from esrgan.nn.criterions.adversarial import (
+ AdversarialLoss, RelativisticAdversarialLoss,
+)
+from esrgan.nn.criterions.perceptual import PerceptualLoss
diff --git a/esrgan/criterions/adversarial.py b/esrgan/nn/criterions/adversarial.py
similarity index 98%
rename from esrgan/criterions/adversarial.py
rename to esrgan/nn/criterions/adversarial.py
index 0ca2f63..f037802 100644
--- a/esrgan/criterions/adversarial.py
+++ b/esrgan/nn/criterions/adversarial.py
@@ -2,6 +2,8 @@
from torch.nn import functional as F
from torch.nn.modules.loss import _Loss
+__all__ = ["AdversarialLoss", "RelativisticAdversarialLoss"]
+
class AdversarialLoss(_Loss):
"""GAN Loss function.
@@ -85,7 +87,7 @@ class RelativisticAdversarialLoss(_Loss):
``'generator'``: maximize probability that fake data more realistic
than real (it is useful when training generator),
``'discriminator'``: maximize probability that real data more
- realistic than fake (useful when training discriminator.
+ realistic than fake (useful when training discriminator).
Raises:
NotImplementedError: If `mode` not ``'generator'``
@@ -107,6 +109,7 @@ def __init__(self, mode: str = "discriminator") -> None:
raise NotImplementedError()
def forward(
+ # self, outputs: torch.Tensor, targets: torch.Tensor
self, fake_logits: torch.Tensor, real_logits: torch.Tensor
) -> torch.Tensor:
"""Forward propagation method for the relativistic adversarial loss.
@@ -130,6 +133,3 @@ def forward(
loss = (loss_fr + loss_rf) / 2
return loss
-
-
-__all__ = ["AdversarialLoss", "RelativisticAdversarialLoss"]
diff --git a/esrgan/criterions/perceptual.py b/esrgan/nn/criterions/perceptual.py
similarity index 99%
rename from esrgan/criterions/perceptual.py
rename to esrgan/nn/criterions/perceptual.py
index 10e9865..0981757 100644
--- a/esrgan/criterions/perceptual.py
+++ b/esrgan/nn/criterions/perceptual.py
@@ -6,6 +6,8 @@
from torch.nn.modules.loss import _Loss
import torchvision
+__all__ = ["PerceptualLoss"]
+
def _layer2index_vgg16(layer: str) -> int:
"""Map name of VGG layer to corresponding number in torchvision layer.
@@ -152,6 +154,3 @@ def _get_features(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
features[name] = x
return features
-
-
-__all__ = ["PerceptualLoss"]
diff --git a/esrgan/nn/modules/__init__.py b/esrgan/nn/modules/__init__.py
new file mode 100644
index 0000000..2f9f3aa
--- /dev/null
+++ b/esrgan/nn/modules/__init__.py
@@ -0,0 +1,7 @@
+# flake8: noqa
+from esrgan.nn.modules.container import ConcatInputModule, ResidualModule
+from esrgan.nn.modules.misc import Conv2d, Conv2dSN, LeakyReLU, LinearSN
+from esrgan.nn.modules.rrdb import (
+ ResidualDenseBlock, ResidualInResidualDenseBlock,
+)
+from esrgan.nn.modules.upsampling import InterpolateConv, SubPixelConv
diff --git a/esrgan/model/module/blocks/container.py b/esrgan/nn/modules/container.py
similarity index 85%
rename from esrgan/model/module/blocks/container.py
rename to esrgan/nn/modules/container.py
index bb7d637..881f543 100644
--- a/esrgan/model/module/blocks/container.py
+++ b/esrgan/nn/modules/container.py
@@ -3,6 +3,8 @@
import torch
from torch import nn
+__all__ = ["ConcatInputModule", "ResidualModule"]
+
class ConcatInputModule(nn.Module):
"""Module wrapper, passing outputs of all previous layers
@@ -19,7 +21,15 @@ def __init__(self, module: Iterable[nn.Module]) -> None:
self.module = module
def forward(self, x: torch.Tensor) -> torch.Tensor:
- """Forward pass."""
+ """Forward pass.
+
+ Args:
+ x: Batch of inputs.
+
+ Returns:
+ Processed batch.
+
+ """
output = [x]
for module in self.module:
z = torch.cat(output, dim=1)
@@ -58,8 +68,13 @@ def __init__(
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
- """Forward pass."""
- return x + self.scale * self.module(x)
+ """Forward pass.
+ Args:
+ x: Batch of inputs.
-__all__ = ["ConcatInputModule", "ResidualModule"]
+ Returns:
+ Processed batch.
+
+ """
+ return x + self.scale * self.module(x)
diff --git a/esrgan/model/module/blocks/misc.py b/esrgan/nn/modules/misc.py
similarity index 100%
rename from esrgan/model/module/blocks/misc.py
rename to esrgan/nn/modules/misc.py
index e5a0d34..5ffc38a 100644
--- a/esrgan/model/module/blocks/misc.py
+++ b/esrgan/nn/modules/misc.py
@@ -4,6 +4,9 @@
from torch import nn
from torch.nn.utils.spectral_norm import SpectralNorm
+__all__ = ["Conv2d", "Conv2dSN", "LeakyReLU", "LinearSN"]
+
+
Conv2d: Callable[..., nn.Module] = functools.partial(
nn.Conv2d, kernel_size=(3, 3), padding=1
)
@@ -137,6 +140,3 @@ def __init__(
dim=0,
eps=1e-12
)
-
-
-__all__ = ["Conv2d", "Conv2dSN", "LeakyReLU", "LinearSN"]
diff --git a/esrgan/model/module/blocks/rrdb.py b/esrgan/nn/modules/rrdb.py
similarity index 79%
rename from esrgan/model/module/blocks/rrdb.py
rename to esrgan/nn/modules/rrdb.py
index 95fa73d..cce787c 100644
--- a/esrgan/model/module/blocks/rrdb.py
+++ b/esrgan/nn/modules/rrdb.py
@@ -1,11 +1,12 @@
import collections
-from typing import Any, List, Tuple
+from typing import Any, Callable, List, Tuple
from torch import nn
-from esrgan import utils
-from esrgan.model.module.blocks import container, Conv2d, LeakyReLU
-from esrgan.utils.types import ModuleParams
+from esrgan.nn.modules import container
+from esrgan.nn.modules.misc import Conv2d, LeakyReLU
+
+__all__ = ["ResidualDenseBlock", "ResidualInResidualDenseBlock"]
class ResidualDenseBlock(container.ResidualModule):
@@ -16,20 +17,22 @@ class ResidualDenseBlock(container.ResidualModule):
:math:`(N, C, H, W)`.
growth_channels: Number of channels in the latent space.
num_blocks: Number of convolutional blocks to use to form dense block.
- conv_fn: Convolutional layers parameters.
- activation_fn: Activation function to use after each conv layer.
+ conv: Class constructor or partial object which when called
+ should return convolutional layer e.g., :py:class:`nn.Conv2d`.
+ activation: Class constructor or partial object which when called
+ should return activation function to use after convolution
+ e.g., :py:class:`nn.LeakyReLU`.
residual_scaling: Residual connections scaling factor.
"""
- @utils.process_fn_params
def __init__(
self,
num_features: int,
growth_channels: int,
num_blocks: int = 5,
- conv_fn: ModuleParams = Conv2d,
- activation_fn: ModuleParams = LeakyReLU,
+ conv: Callable[..., nn.Module] = Conv2d,
+ activation: Callable[..., nn.Module] = LeakyReLU,
residual_scaling: float = 0.2,
) -> None:
in_channels = [
@@ -40,8 +43,8 @@ def __init__(
blocks: List[nn.Module] = []
for in_channels_, out_channels_ in zip(in_channels, out_channels):
block = collections.OrderedDict([
- ("conv", conv_fn(in_channels_, out_channels_)),
- ("act", activation_fn()),
+ ("conv", conv(in_channels_, out_channels_)),
+ ("act", activation()),
])
blocks.append(nn.Sequential(block))
@@ -92,6 +95,3 @@ def __init__(
module=nn.Sequential(collections.OrderedDict(blocks)),
scale=residual_scaling
)
-
-
-__all__ = ["ResidualDenseBlock", "ResidualInResidualDenseBlock"]
diff --git a/esrgan/model/module/blocks/upsampling.py b/esrgan/nn/modules/upsampling.py
similarity index 65%
rename from esrgan/model/module/blocks/upsampling.py
rename to esrgan/nn/modules/upsampling.py
index 6cf8b26..f3513e7 100644
--- a/esrgan/model/module/blocks/upsampling.py
+++ b/esrgan/nn/modules/upsampling.py
@@ -1,10 +1,12 @@
+from typing import Callable
+
import torch
from torch import nn
from torch.nn import functional as F
-from esrgan import utils
-from esrgan.model.module.blocks.misc import Conv2d, LeakyReLU
-from esrgan.utils.types import ModuleParams
+from esrgan.nn.modules.misc import Conv2d, LeakyReLU
+
+__all__ = ["SubPixelConv", "InterpolateConv"]
class SubPixelConv(nn.Module):
@@ -19,8 +21,11 @@ class SubPixelConv(nn.Module):
Args:
num_features: Number of channels in the input tensor.
scale_factor: Factor to increase spatial resolution by.
- conv_fn: Convolution layer params.
- activation_fn: Activation function to use after sub-pixel convolution.
+ conv: Class constructor or partial object which when called
+ should return convolutional layer e.g., :py:class:`nn.Conv2d`.
+ activation: Class constructor or partial object which when called
+ should return activation function to use after
+ sub-pixel convolution e.g., :py:class:`nn.PReLU`.
.. _`Real-Time Single Image and Video Super-Resolution Using an Efficient
Sub-Pixel Convolutional Neural Network`:
@@ -28,20 +33,19 @@ class SubPixelConv(nn.Module):
"""
- @utils.process_fn_params
def __init__(
self,
num_features: int,
scale_factor: int = 2,
- conv_fn: ModuleParams = Conv2d,
- activation_fn: ModuleParams = nn.PReLU,
+ conv: Callable[..., nn.Module] = Conv2d,
+ activation: Callable[..., nn.Module] = nn.PReLU,
):
super().__init__()
self.block = nn.Sequential(
- conv_fn(num_features, num_features * 4),
+ conv(num_features, num_features * 4),
nn.PixelShuffle(upscale_factor=scale_factor),
- activation_fn(),
+ activation(),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
@@ -65,25 +69,27 @@ class InterpolateConv(nn.Module):
Args:
num_features: Number of channels in the input tensor.
scale_factor: Factor to increase spatial resolution by.
- conv_fn: Convolutional layer params.
- activation_fn: Activation function to use after convolution.
+ conv: Class constructor or partial object which when called
+ should return convolutional layer e.g., :py:class:`nn.Conv2d`.
+ activation: Class constructor or partial object which when called
+ should return activation function to use after convolution
+ e.g., :py:class:`nn.PReLU`.
"""
- @utils.process_fn_params
def __init__(
self,
num_features: int,
scale_factor: int = 2,
- conv_fn: ModuleParams = Conv2d,
- activation_fn: ModuleParams = LeakyReLU,
+ conv: Callable[..., nn.Module] = Conv2d,
+ activation: Callable[..., nn.Module] = LeakyReLU,
) -> None:
super().__init__()
self.scale_factor = scale_factor
self.block = nn.Sequential(
- conv_fn(num_features, num_features),
- activation_fn(),
+ conv(num_features, num_features),
+ activation(),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
@@ -100,6 +106,3 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
output = self.block(x)
return output
-
-
-__all__ = ["SubPixelConv", "InterpolateConv"]
diff --git a/esrgan/runner.py b/esrgan/runner.py
new file mode 100644
index 0000000..f0b0170
--- /dev/null
+++ b/esrgan/runner.py
@@ -0,0 +1,210 @@
+from typing import Dict
+
+from catalyst import runners
+from catalyst.core.runner import IRunner
+import torch
+
+__all__ = ["GANRunner", "GANConfigRunner"]
+
+
+class GANRunner(IRunner):
+ """Runner for ESRGAN, please check `catalyst docs`__ for more info.
+
+ Args:
+ input_key: Key in batch dict mapping for model input.
+ target_key: Key in batch dict mapping for target.
+ generator_output_key: Key in output dict model output
+ of the generator will be stored under.
+ discriminator_real_output_gkey: Key to store predictions of
+ discriminator for real inputs, contain gradients for generator.
+ discriminator_fake_output_gkey: Key to store predictions of
+ discriminator for predictions of generator,
+ contain gradients for generator.
+ discriminator_real_output_dkey: Key to store predictions of
+ discriminator for real inputs,
+ contain gradients for discriminator only.
+ discriminator_fake_output_dkey: Key to store predictions of
+ discriminator for items produced by generator,
+ contain gradients for discriminator only.
+ generator_key: Key in model dict mapping for generator model.
+ discriminator_key: Key in model dict mapping for discriminator
+ model (will be used in gan stages only).
+
+ __ https://catalyst-team.github.io/catalyst/api/core.html#experiment
+
+ """
+
+ def __init__(
+ self,
+ input_key: str = "image",
+ target_key: str = "real_image",
+ generator_output_key: str = "fake_image",
+ discriminator_real_output_gkey: str = "g_real_logits",
+ discriminator_fake_output_gkey: str = "g_fake_logits",
+ discriminator_real_output_dkey: str = "d_real_logits",
+ discriminator_fake_output_dkey: str = "d_fake_logits",
+ generator_key: str = "generator",
+ discriminator_key: str = "discriminator",
+ ) -> None:
+ super().__init__()
+
+ self.generator_key = generator_key
+ self.discriminator_key = discriminator_key
+
+ self.input_key = input_key
+ self.target_key = target_key
+ self.generator_output_key = generator_output_key
+ self.discriminator_real_output_gkey = discriminator_real_output_gkey
+ self.discriminator_fake_output_gkey = discriminator_fake_output_gkey
+ self.discriminator_real_output_dkey = discriminator_real_output_dkey
+ self.discriminator_fake_output_dkey = discriminator_fake_output_dkey
+
+ def predict_batch(self, batch: Dict[str, torch.Tensor]) -> torch.Tensor:
+ """Generate predictions based on input batch (generator inference).
+
+ Args:
+ batch: Input batch (batch of samples to adjust e.g. zoom).
+
+ Returns:
+ Batch of predictions of the generator.
+
+ """
+ model = self.model[self.generator_key]
+ output = model(batch[self.input_key].to(self.device))
+
+ return output
+
+ def handle_batch(self, batch: Dict[str, torch.Tensor]) -> None:
+ """Inner method to handle specified data batch.
+
+ Args:
+ batch: Dictionary with data batches from DataLoader.
+
+ """
+ # `handle_batch` method is `@abstractmethod` so it must be defined
+ # even if it overwrites in `on_stage_start`
+ self._handle_batch_supervised(batch=batch)
+
+ def _handle_batch_supervised(self, batch: Dict[str, torch.Tensor]) -> None:
+ """Process train/valid batch, supervised mode.
+
+ Args:
+ batch: Input batch (batch of samples).
+
+ """
+ model = self.model[self.generator_key]
+ output = model(batch[self.input_key])
+
+ self.batch = {**batch, self.generator_output_key: output}
+
+ def _handle_batch_gan(self, batch: Dict[str, torch.Tensor]) -> None:
+ """Process train/valid batch, GAN mode.
+
+ Args:
+ batch: Input batch, should raw samples for generator
+ and ground truth samples for discriminator.
+
+ """
+ generator = self.model[self.generator_key]
+ discriminator = self.model[self.discriminator_key]
+
+ real_image = batch[self.target_key]
+ fake_image = generator(batch[self.input_key])
+
+ noise = torch.randn(real_image.shape, device=real_image.device)
+ real_image = torch.clamp((real_image + 0.05 * noise), min=0.0, max=1.0)
+
+ # predictions used in calculation of adversarial loss of generator
+ real_logits_g = discriminator(real_image)
+ fake_logits_g = discriminator(fake_image)
+
+ # predictions used in calculation of adversarial loss of discriminator
+ real_logits_d = discriminator(real_image)
+ fake_logits_d = discriminator(fake_image.detach())
+
+ self.batch = {
+ **batch,
+ self.generator_output_key: fake_image,
+ self.discriminator_real_output_gkey: real_logits_g,
+ self.discriminator_fake_output_gkey: fake_logits_g,
+ self.discriminator_real_output_dkey: real_logits_d,
+ self.discriminator_fake_output_dkey: fake_logits_d,
+ }
+
+ def on_stage_start(self, runner: IRunner) -> None:
+ """Prepare `_handle_batch` method for current stage.
+
+ Args:
+ runner: Current runner.
+
+ Raises:
+ NotImplementedError: Name of the stage should ends with
+ ``'_supervised'``, ``'_gan'`` or should be ``'infer'``,
+ raise error otherwise.
+
+ """
+ super().on_stage_start(runner=runner)
+
+ if self.stage_key.endswith("_supervised") or self.stage_key == "infer":
+ self.handle_batch = self._handle_batch_supervised
+ elif self.stage_key.endswith("_gan"):
+ self.handle_batch = self._handle_batch_gan
+ else:
+ raise NotImplementedError(f"`{self.stage_key}` is not supported")
+
+
+class GANConfigRunner(runners.ConfigRunner, GANRunner):
+ """ConfigRunner for ESRGAN, please check `catalyst docs`__ for more info.
+
+ Args:
+ config: Dictionary with parameters e.g., model or engine to use.
+ input_key: Key in batch dict mapping for model input.
+ target_key: Key in batch dict mapping for target.
+ generator_output_key: Key in output dict model output
+ of the generator will be stored under.
+ discriminator_real_output_gkey: Key to store predictions of
+ discriminator for real inputs, contain gradients for generator.
+ discriminator_fake_output_gkey: Key to store predictions of
+ discriminator for predictions of generator,
+ contain gradients for generator.
+ discriminator_real_output_dkey: Key to store predictions of
+ discriminator for real inputs,
+ contain gradients for discriminator only.
+ discriminator_fake_output_dkey: Key to store predictions of
+ discriminator for items produced by generator,
+ contain gradients for discriminator only.
+ generator_key: Key in model dict mapping for generator model.
+ discriminator_key: Key in model dict mapping for discriminator
+ model (will be used in gan stages only).
+
+ __ https://catalyst-team.github.io/catalyst/api/core.html#experiment
+
+ """
+
+ def __init__(
+ self,
+ config: Dict,
+ input_key: str = "image",
+ target_key: str = "real_image",
+ generator_output_key: str = "fake_image",
+ discriminator_real_output_gkey: str = "g_real_logits",
+ discriminator_fake_output_gkey: str = "g_fake_logits",
+ discriminator_real_output_dkey: str = "d_real_logits",
+ discriminator_fake_output_dkey: str = "d_fake_logits",
+ generator_key: str = "generator",
+ discriminator_key: str = "discriminator",
+ ):
+ GANRunner.__init__(
+ self,
+ input_key=input_key,
+ target_key=target_key,
+ generator_output_key=generator_output_key,
+ discriminator_real_output_gkey=discriminator_real_output_gkey,
+ discriminator_fake_output_gkey=discriminator_fake_output_gkey,
+ discriminator_real_output_dkey=discriminator_real_output_dkey,
+ discriminator_fake_output_dkey=discriminator_fake_output_dkey,
+ generator_key=generator_key,
+ discriminator_key=discriminator_key,
+ )
+
+ runners.ConfigRunner.__init__(self, config=config)
diff --git a/esrgan/utils/__init__.py b/esrgan/utils/__init__.py
index 4353009..dbec8a1 100644
--- a/esrgan/utils/__init__.py
+++ b/esrgan/utils/__init__.py
@@ -1,4 +1,5 @@
# flake8: noqa
+from esrgan.utils.aug import Augmentor
from esrgan.utils.init import module_init_, net_init_
from esrgan.utils.misc import is_power_of_two, pairwise
-from esrgan.utils.module_params import create_layer, process_fn_params
+from esrgan.utils.module_params import create_layer
diff --git a/esrgan/utils/aug.py b/esrgan/utils/aug.py
new file mode 100644
index 0000000..565ecfd
--- /dev/null
+++ b/esrgan/utils/aug.py
@@ -0,0 +1,44 @@
+from typing import Any, Callable, Dict, Optional
+
+__all__ = ["Augmentor"]
+
+
+def indentity(d: Dict) -> Dict:
+ """A placeholder identity operator that is argument-insensitive.
+
+ Args:
+ d: Dictionary with the data that describes sample.
+
+ Returns:
+ Same dictionary ``d``.
+
+ """
+ return d
+
+
+class Augmentor:
+ """Applies provided transformation on dictionaries.
+
+ Args:
+ transform: A function / transform that takes in dictionary
+ and returns a transformed version.
+ If ``None``, the identity function is used.
+
+ """
+
+ def __init__(
+ self, transform: Optional[Callable[[Any], Dict]] = None
+ ) -> None:
+ self.transform = transform if transform is not None else indentity
+
+ def __call__(self, d: Dict) -> Dict:
+ """Applies ``transform`` to the dictionary ``d``.
+
+ Args:
+ d: Dictionary to transform.
+
+ Returns:
+ Output of the ``transform`` function.
+
+ """
+ return self.transform(**d)
diff --git a/esrgan/utils/init.py b/esrgan/utils/init.py
index f8b7cf4..cc1642d 100644
--- a/esrgan/utils/init.py
+++ b/esrgan/utils/init.py
@@ -6,6 +6,8 @@
import torch
from torch import nn
+__all__ = ["kaiming_normal_", "module_init_", "net_init_"]
+
def kaiming_normal_(
tensor: torch.Tensor,
@@ -112,6 +114,3 @@ def net_init_(net: nn.Module) -> None:
activation = m
module_init_(m, nonlinearity=activation)
-
-
-__all__ = ["kaiming_normal_", "module_init_", "net_init_"]
diff --git a/esrgan/utils/misc.py b/esrgan/utils/misc.py
index f12d99e..0995c81 100644
--- a/esrgan/utils/misc.py
+++ b/esrgan/utils/misc.py
@@ -1,6 +1,8 @@
import itertools
from typing import Any, Iterable
+__all__ = ["pairwise", "is_power_of_two"]
+
def pairwise(iterable: Iterable[Any]) -> Iterable[Any]:
"""Iterate sequences by pairs.
@@ -13,7 +15,7 @@ def pairwise(iterable: Iterable[Any]) -> Iterable[Any]:
Examples:
>>> for i in pairwise([1, 2, 5, -3]):
- >>> print(i)
+ ... print(i)
(1, 2)
(2, 5)
(5, -3)
@@ -42,6 +44,3 @@ def is_power_of_two(number: int) -> bool:
"""
result = number == 0 or (number & (number - 1) != 0)
return result
-
-
-__all__ = ["pairwise", "is_power_of_two"]
diff --git a/esrgan/utils/module_params.py b/esrgan/utils/module_params.py
index 7a80a67..cc2ad4f 100644
--- a/esrgan/utils/module_params.py
+++ b/esrgan/utils/module_params.py
@@ -1,67 +1,8 @@
-import copy
-import functools
-import re
-from typing import Any, Callable, Dict, Optional
+from typing import Any, Callable, Optional
-from catalyst.registry import MODULE
from torch import nn
-from esrgan.utils.types import ModuleParams
-
-
-def process_fn_params(function: Callable) -> Callable:
- """Decorator for `fn_params` processing.
-
- Decorator that process all `*_fn` parameters and replaces ``str`` and
- ``dict`` values with corresponding constructors of `nn` modules.
- For example for ``act_fn='ReLU'`` and ``act_fn=nn.ReLU`` parameters
- the result will be ``nn.ReLU`` constructor of ReLU activation function,
- and for ``act_fn={'act': 'ReLU', 'inplace': True}`` the result
- will be 'partial' constructor ``nn.ReLU`` in which
- ``inplace`` argument is set to ``True``.
-
- Args:
- function: Function to wrap.
-
- Returns:
- Wrapped function.
-
- """
- @functools.wraps(function)
- def wrapper(*args: Any, **kwargs: Any) -> Any:
- kwargs_: Dict[str, Any] = {}
- for key, value in kwargs.items():
- if (match := re.match(r"(\w+)_fn", key)) and value:
- value = _process_fn_params(
- params=value, key=match.group(1)
- )
- kwargs_[key] = value
-
- output = function(*args, **kwargs_)
-
- return output
- return wrapper
-
-
-def _process_fn_params(
- params: ModuleParams, key: Optional[str] = None
-) -> Callable[..., nn.Module]:
- module_fn: Callable[..., nn.Module]
- if callable(params):
- module_fn = params
- elif isinstance(params, str):
- name = params
- module_fn = MODULE.get(name)
- elif isinstance(params, dict) and key is not None:
- params = copy.deepcopy(params)
-
- name_or_fn = params.pop(key)
- module_fn = _process_fn_params(name_or_fn)
- module_fn = functools.partial(module_fn, **params)
- else:
- NotImplementedError()
-
- return module_fn
+__all__ = ["create_layer"]
def create_layer(
@@ -101,6 +42,3 @@ def create_layer(
module = layer(in_channels, out_channels, **kwargs)
return module
-
-
-__all__ = ["process_fn_params", "create_layer"]
diff --git a/esrgan/utils/scripts/process_images.py b/esrgan/utils/scripts/process_images.py
index 3e128f2..cbc9b6c 100644
--- a/esrgan/utils/scripts/process_images.py
+++ b/esrgan/utils/scripts/process_images.py
@@ -14,8 +14,13 @@
logger = logging.getLogger(__name__)
-def parse_args():
- """Parses the command line arguments for the main method."""
+def parse_args() -> argparse.Namespace:
+ """Parses the command line arguments for the main method.
+
+ Returns:
+ Command line arguments.
+
+ """
parser = argparse.ArgumentParser()
parser.add_argument("--in-dir", required=True, type=Path)
parser.add_argument("--out-dir", required=True, type=Path)
@@ -83,6 +88,22 @@ def cut_with_overlap(
class Preprocessor:
+ """Cuts raw input images with sliding window.
+
+ Args:
+ in_dir: Directory with raw images.
+ out_dir: Directory to save processed images,
+ patch_height: Height of slice to cut.
+ patch_width: Width of slice to cut.
+ height_overlap: Height of overlap between two slices.
+ width_overlap: Width of overlap between two slices.
+ min_height: Minimal height that image should have,
+ if image is smaller then it wouldn't be cut.
+ min_width: Minimal width that image should have,
+ if image is smaller then it wouldn't be cut.
+
+ """
+
def __init__(
self,
in_dir: Path,
@@ -105,7 +126,12 @@ def __init__(
self.min_width = min_width or min_height
def preprocess(self, filepath: Path) -> None:
- """Process one file."""
+ """Process one file.
+
+ Args:
+ filepath: Path to file to process.
+
+ """
try:
image = np.array(utils.imread(filepath))
except Exception as e:
@@ -131,7 +157,12 @@ def preprocess(self, filepath: Path) -> None:
utils.imwrite(uri=out_path, im=patch)
def process_all(self, pool: Pool) -> None:
- """Process all images in the folder."""
+ """Process all images in the folder.
+
+ Args:
+ pool: Pool of processes which will carry out image processing.
+
+ """
files = glob.iglob(f"{self.in_dir}/**/*", recursive=True)
images = sorted(filter(utils.has_image_extension, files))
images = [Path(filepath) for filepath in (images)]
diff --git a/esrgan/utils/types.py b/esrgan/utils/types.py
deleted file mode 100644
index d153592..0000000
--- a/esrgan/utils/types.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from typing import Any, Callable, Dict, Union
-
-from torch import nn
-
-ModuleParams = Union[Callable[..., nn.Module], str, Dict[str, Any]]
diff --git a/experiment/__init__.py b/experiment/__init__.py
deleted file mode 100644
index 3cf2942..0000000
--- a/experiment/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from catalyst.dl import registry
-from esrgan.core import SRExperiment as Experiment, GANRunner as Runner
-from esrgan import callbacks, criterions, model
-
-
-for registry_key, module in zip(
- ("CALLBACK", "CRITERION", "MODULE", "MODEL"),
- (callbacks, criterions, model.module, model),
-):
- registry.__dict__[registry_key].add_from_module(module, prefix=["esrgan."])
diff --git a/experiment/config_gan.yml b/experiment/config_gan.yml
deleted file mode 100644
index 3e769a2..0000000
--- a/experiment/config_gan.yml
+++ /dev/null
@@ -1,230 +0,0 @@
-shared:
- - &upscale 4 # 2, 4, 8
- - &patch_size 128 # 40, 64, 96, 128, 192
-
-model_params:
- _key_value: true
-
- &generator_model generator:
- model: esrgan.EncoderDecoderNet
- encoder_params:
- module: esrgan.ESREncoder
- in_channels: &num_channels 3
- out_channels: &latent_channels 64
- num_basic_blocks: 16
- growth_channels: 32
- activation_fn: &activation_fn
- activation: LeakyReLU
- negative_slope: 0.2
- inplace: true
- residual_scaling: 0.2
- decoder_params:
- module: esrgan.SRResNetDecoder
- in_channels: *latent_channels
- out_channels: *num_channels
- scale_factor: *upscale
- activation_fn: *activation_fn
- &discriminator_model discriminator:
- model: esrgan.VGGConv
- encoder_params:
- module: esrgan.StridedConvEncoder
- layer_order: [conv, norm, activation]
- norm_fn: BatchNorm2d
- pooling_params:
- module: AdaptiveAvgPool2d
- output_size: [7, 7]
- head_params:
- module: esrgan.LinearHead
- in_channels: 25088 # 512 * (7x7)
- out_channels: 1
- latent_channels: [1024]
- layer_order: [linear, activation]
-
-args:
- expdir: experiment
- logdir: ./logs/esrgan_x4_128ps/gan
-
-runner_params:
- generator_key: *generator_model
- discriminator_key: *discriminator_model
-
-stages:
- state_params:
- main_metric: ssim
- minimize_metric: false
-
- data_params:
- num_workers: 8
- batch_size: 48
- train_dataset_params: &train_dataset_params
- dataset: DIV2KDataset
- root: data
- train: true
- target_type: bicubic_X4
- patch_size: [*patch_size, *patch_size]
- low_resolution_image_key: image
- high_resolution_image_key: real_image
- download: true
- valid_dataset_params:
- << : [*train_dataset_params]
- train: false
- loaders_params:
- valid:
- batch_size: 1
-
- criterion_params:
- _key_value: true
-
- content_loss:
- criterion: L1Loss # L1Loss, MSELoss
- perceptual_loss:
- criterion: esrgan.PerceptualLoss
- layers:
- conv5_4: 1.0
- adversarial_generator_loss:
- criterion: &adversarial_criterion esrgan.RelativisticAdversarialLoss
- mode: generator
- adversarial_discriminator_loss:
- criterion: *adversarial_criterion
- mode: discriminator
-
- callbacks_params:
- metric_psnr:
- callback: esrgan.PSNRCallback
- input_key: real_image
- output_key: fake_image
- metric_ssim:
- callback: esrgan.SSIMCallback
- input_key: real_image
- output_key: fake_image
-
- stage2_gan:
- state_params:
- num_epochs: 16
-
- transform_params:
- _key_value: true
-
- train:
- transform: albumentations.Compose
- transforms:
- - &spatial_transforms
- transform: albumentations.Compose
- transforms:
- - transform: albumentations.HorizontalFlip
- p: 0.5
- additional_targets:
- real_image: image
- - &post_transforms
- transform: albumentations.Compose
- transforms:
- - transform: albumentations.Normalize
- mean: 0
- std: 1
- - transform: albumentations.ToTensorV2
- additional_targets:
- real_image: image
- valid:
- transform: albumentations.Compose
- transforms:
- - *post_transforms
-
- optimizer_params:
- _key_value: true
-
- generator_optimizer:
- _model: *generator_model
- optimizer: AdamW
- lr_linear_scaling:
- lr: 0.00003
- base_batch_size: &base_batch_size 16
- weight_decay: 0.0
- discriminator_optimizer:
- _model: *discriminator_model
- optimizer: AdamW
- lr_linear_scaling:
- lr: 0.0001
- base_batch_size: *base_batch_size
- weight_decay: 0.0
-
- scheduler_params:
- _key_value: true
-
- generator_scheduler:
- _optimizer: generator_optimizer
- scheduler: MultiStepLR
- milestones: [16, 24, 32]
- gamma: 0.5
- discriminator_scheduler:
- _optimizer: discriminator_optimizer
- scheduler: MultiStepLR
- milestones: [8, 16, 24, 32]
- gamma: 0.5
-
- callbacks_params:
- loader:
- callback: CheckpointCallback
- load_on_stage_start:
- model: ./logs/esrgan_x4_128ps/supervised/checkpoints/last.pth
-
- loss_content:
- callback: CriterionCallback
- criterion_key: content_loss
- input_key: real_image
- output_key: fake_image
- prefix: loss_content
- multiplier: 0.01
- loss_perceptual:
- callback: CriterionCallback
- criterion_key: perceptual_loss
- input_key: real_image
- output_key: fake_image
- prefix: loss_perceptual
- multiplier: 1.0
- loss_adversarial_generator:
- callback: CriterionCallback
- criterion_key: adversarial_generator_loss
- input_key: null
- output_key:
- g_real_logits: real_logits
- g_fake_logits: fake_logits
- prefix: loss_adversarial_generator
- multiplier: 0.005
- loss_generator:
- callback: MetricAggregationCallback
- prefix: &generator_loss loss_generator
- metrics: ["loss_content", "loss_perceptual", "loss_adversarial_generator"]
- mode: "sum"
- multiplier: 1.0
-
- loss_discriminator:
- callback: CriterionCallback
- criterion_key: adversarial_discriminator_loss
- input_key: null
- output_key:
- d_real_logits: real_logits
- d_fake_logits: fake_logits
- prefix: &discriminator_loss loss_discriminator
- multiplier: 1.0
-
- optimizer_generator:
- callback: AMPOptimizerCallback
- metric_key: *generator_loss
- optimizer_key: generator_optimizer
- grad_clip_params: &grad_clip_params
- func: clip_grad_value_
- clip_value: 5.0
- optimizer_discriminator:
- callback: AMPOptimizerCallback
- metric_key: *discriminator_loss
- optimizer_key: discriminator_optimizer
- grad_clip_params: *grad_clip_params
-
- scheduler_generator:
- callback: SchedulerCallback
- scheduler_key: generator_scheduler
- reduced_metric: *generator_loss
- scheduler_discriminator:
- callback: SchedulerCallback
- scheduler_key: discriminator_scheduler
- reduced_metric: *discriminator_loss
diff --git a/experiment/config_supervised.yml b/experiment/config_supervised.yml
deleted file mode 100644
index 65611ff..0000000
--- a/experiment/config_supervised.yml
+++ /dev/null
@@ -1,172 +0,0 @@
-shared:
- - &upscale 4 # 2, 4, 8
- - &patch_size 128 # 40, 64, 96, 128, 192
-
-model_params:
- _key_value: true
-
- &generator_model generator:
- model: esrgan.EncoderDecoderNet
- encoder_params:
- module: esrgan.ESREncoder
- in_channels: &num_channels 3
- out_channels: &latent_channels 64
- num_basic_blocks: 16
- growth_channels: 32
- activation_fn: &activation_fn
- activation: LeakyReLU
- negative_slope: 0.2
- inplace: true
- residual_scaling: 0.2
- decoder_params:
- module: esrgan.SRResNetDecoder
- in_channels: *latent_channels
- out_channels: *num_channels
- scale_factor: *upscale
- activation_fn: *activation_fn
- &discriminator_model discriminator:
- model: esrgan.VGGConv
- encoder_params:
- module: esrgan.StridedConvEncoder
- layer_order: [conv, norm, activation]
- norm_fn: BatchNorm2d
- pooling_params:
- module: AdaptiveAvgPool2d
- output_size: [7, 7]
- head_params:
- module: esrgan.LinearHead
- in_channels: 25088 # 512 * (7x7)
- out_channels: 1
- latent_channels: [1024]
- layer_order: [linear, activation]
-
-args:
- expdir: experiment
- logdir: ./logs/esrgan_x4_128ps/supervised
-
-runner_params:
- generator_key: *generator_model
- discriminator_key: *discriminator_model
-
-stages:
- state_params:
- main_metric: ssim
- minimize_metric: false
-
- data_params:
- num_workers: 8
- batch_size: 48
- train_dataset_params: &train_dataset_params
- dataset: DIV2KDataset
- root: data
- train: true
- target_type: bicubic_X4
- patch_size: [*patch_size, *patch_size]
- low_resolution_image_key: image
- high_resolution_image_key: real_image
- download: true
- valid_dataset_params:
- << : [*train_dataset_params]
- train: false
- loaders_params:
- valid:
- batch_size: 1
-
- criterion_params:
- _key_value: true
-
- content_loss:
- criterion: L1Loss # L1Loss, MSELoss
-
- callbacks_params:
- metric_psnr:
- callback: esrgan.PSNRCallback
- input_key: real_image
- output_key: fake_image
- metric_ssim:
- callback: esrgan.SSIMCallback
- input_key: real_image
- output_key: fake_image
-
- stage1_supervised:
- state_params:
- num_epochs: 40
-
- transform_params:
- _key_value: true
-
- train:
- transform: albumentations.Compose
- transforms:
- - &spatial_transforms
- transform: albumentations.Compose
- transforms:
- - transform: albumentations.HorizontalFlip
- p: 0.5
- additional_targets:
- real_image: image
- - &hard_transforms
- transform: albumentations.Compose
- transforms:
- - transform: albumentations.Cutout
- num_holes: 2
- max_h_size: 2
- max_w_size: 2
- - transform: albumentations.ImageCompression
- quality_lower: 65
- p: 0.25
- - &post_transforms
- transform: albumentations.Compose
- transforms:
- - transform: albumentations.Normalize
- mean: 0
- std: 1
- - transform: albumentations.ToTensorV2
- additional_targets:
- real_image: image
- valid:
- transform: albumentations.Compose
- transforms:
- - *post_transforms
-
- optimizer_params:
- _key_value: true
-
- generator_optimizer:
- _model: *generator_model
- optimizer: Ralamb # AdamW
- lr_linear_scaling:
- lr: 0.003 # 0.0001
- base_batch_size: &base_batch_size 16
- weight_decay: 0.0
-
- scheduler_params:
- _key_value: true
-
- generator_scheduler:
- _optimizer: generator_optimizer
- scheduler: MultiStepLR
- milestones: [8, 20, 28]
- gamma: 0.5
-
- callbacks_params:
- loss_content:
- callback: CriterionCallback
- input_key: real_image
- output_key: fake_image
- prefix: loss_content
- criterion_key: content_loss
- multiplier: 1.0
-
- optimizer_generator:
- callback: AMPOptimizerCallback
- metric_key: loss_content
- optimizer_key: generator_optimizer
- grad_clip_params: &grad_clip_params
- func: clip_grad_value_
- clip_value: 5.0
-
- scheduler_generator:
- callback: SchedulerCallback
- scheduler_key: generator_scheduler
- reduced_metric: loss_content
diff --git a/poetry.lock b/poetry.lock
new file mode 100644
index 0000000..d9ab344
--- /dev/null
+++ b/poetry.lock
@@ -0,0 +1,1553 @@
+[[package]]
+name = "albumentations"
+version = "1.1.0"
+description = "Fast image augmentation library and easy to use wrapper around other libraries"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+numpy = ">=1.11.1"
+opencv-python-headless = ">=4.1.1"
+PyYAML = "*"
+qudida = ">=0.0.4"
+scikit-image = ">=0.16.1"
+scipy = "*"
+
+[package.extras]
+develop = ["pytest", "imgaug (>=0.4.0)"]
+imgaug = ["imgaug (>=0.4.0)"]
+tests = ["pytest"]
+
+[[package]]
+name = "attrs"
+version = "21.4.0"
+description = "Classes Without Boilerplate"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
+[package.extras]
+dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"]
+docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"]
+tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "cloudpickle"]
+tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "cloudpickle"]
+
+[[package]]
+name = "catalyst"
+version = "21.12"
+description = "Catalyst. Accelerated deep learning R&D with PyTorch."
+category = "main"
+optional = false
+python-versions = ">=3.6.0"
+
+[package.dependencies]
+albumentations = {version = ">=0.5.1", optional = true, markers = "extra == \"albu\""}
+hydra-slayer = ">=0.1.1"
+imageio = {version = ">=2.5.0", optional = true, markers = "extra == \"cv\""}
+numpy = ">=1.18"
+opencv-python-headless = {version = ">=4.1.1.26", optional = true, markers = "extra == \"cv\""}
+Pillow = {version = ">=6.1", optional = true, markers = "extra == \"cv\""}
+PyYAML = ">=5.1"
+requests = {version = "*", optional = true, markers = "extra == \"cv\""}
+scikit-image = {version = "<0.19.0", optional = true, markers = "extra == \"cv\""}
+tensorboardX = "<2.3.0"
+torch = ">=1.3.0"
+torchvision = {version = ">=0.4.1", optional = true, markers = "extra == \"cv\""}
+tqdm = ">=4.33.0"
+
+[package.extras]
+albu = ["albumentations (>=0.5.1)"]
+all = ["imageio (>=2.5.0)", "opencv-python-headless (>=4.1.1.26)", "scikit-image (<0.19.0)", "torchvision (>=0.4.1)", "Pillow (>=6.1)", "requests", "scipy (>=1.4.1)", "matplotlib (>=3.1.0)", "pandas (>=0.25.0)", "scikit-learn (>=0.22)", "optuna (>=2.0.0)"]
+comet = ["comet-ml"]
+cv = ["imageio (>=2.5.0)", "opencv-python-headless (>=4.1.1.26)", "scikit-image (<0.19.0)", "torchvision (>=0.4.1)", "Pillow (>=6.1)", "requests"]
+deepspeed = ["deepspeed (>=0.4.0)"]
+dev = ["pytest (==5.3.1)", "sphinx (==2.2.1)", "docutils (==0.17.1)", "mock (==3.0.5)", "catalyst-codestyle (==21.09.2)", "black (==21.8b0)", "catalyst-sphinx-theme (==1.2.0)", "tomlkit (==0.7.2)", "pre-commit (==2.13.0)"]
+fairscale = ["fairscale (>=0.3.4)"]
+hydra = ["hydra-core (<1.1.0)", "omegaconf (>=2.0.2)"]
+ml = ["scipy (>=1.4.1)", "matplotlib (>=3.1.0)", "pandas (>=0.25.0)", "scikit-learn (>=0.22)"]
+mlflow = ["mlflow"]
+neptune = ["neptune-client (>=0.9.8)"]
+nifti = ["nibabel (>=3.1.1)"]
+onnx = ["onnx", "onnxruntime"]
+onnx-gpu = ["onnx", "onnxruntime-gpu"]
+optuna = ["optuna (>=2.0.0)"]
+profiler = ["torch-tb-profiler"]
+wandb = ["wandb"]
+
+[[package]]
+name = "certifi"
+version = "2021.10.8"
+description = "Python package for providing Mozilla's CA Bundle."
+category = "main"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "cffi"
+version = "1.15.0"
+description = "Foreign Function Interface for Python calling C code."
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+pycparser = "*"
+
+[[package]]
+name = "chardet"
+version = "4.0.0"
+description = "Universal encoding detector for Python 2 and 3"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
+[[package]]
+name = "charset-normalizer"
+version = "2.0.10"
+description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+category = "main"
+optional = false
+python-versions = ">=3.5.0"
+
+[package.extras]
+unicode_backport = ["unicodedata2"]
+
+[[package]]
+name = "colorama"
+version = "0.4.4"
+description = "Cross-platform colored terminal text."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
+[[package]]
+name = "cycler"
+version = "0.11.0"
+description = "Composable style cycles"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "darglint"
+version = "1.8.1"
+description = "A utility for ensuring Google-style docstrings stay up to date with the source code."
+category = "dev"
+optional = false
+python-versions = ">=3.6,<4.0"
+
+[[package]]
+name = "doc8"
+version = "0.8.1"
+description = "Style checker for Sphinx (or other) RST documentation"
+category = "dev"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+chardet = "*"
+docutils = "*"
+Pygments = "*"
+restructuredtext-lint = ">=0.7"
+six = "*"
+stevedore = "*"
+
+[[package]]
+name = "docutils"
+version = "0.18.1"
+description = "Docutils -- Python Documentation Utilities"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+
+[[package]]
+name = "eradicate"
+version = "2.0.0"
+description = "Removes commented-out code."
+category = "dev"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "flake8"
+version = "3.9.2"
+description = "the modular source code checker: pep8 pyflakes and co"
+category = "dev"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
+
+[package.dependencies]
+mccabe = ">=0.6.0,<0.7.0"
+pycodestyle = ">=2.7.0,<2.8.0"
+pyflakes = ">=2.3.0,<2.4.0"
+
+[[package]]
+name = "flake8-broken-line"
+version = "0.3.0"
+description = "Flake8 plugin to forbid backslashes for line breaks"
+category = "dev"
+optional = false
+python-versions = ">=3.6,<4.0"
+
+[package.dependencies]
+flake8 = ">=3.5,<4.0"
+
+[[package]]
+name = "flake8-comprehensions"
+version = "3.8.0"
+description = "A flake8 plugin to help you write better list/set/dict comprehensions."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+flake8 = ">=3.0,<3.2.0 || >3.2.0"
+
+[[package]]
+name = "flake8-debugger"
+version = "4.0.0"
+description = "ipdb/pdb statement checker plugin for flake8"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+flake8 = ">=3.0"
+pycodestyle = "*"
+six = "*"
+
+[[package]]
+name = "flake8-docstrings"
+version = "1.6.0"
+description = "Extension for flake8 which uses pydocstyle to check docstrings"
+category = "dev"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+flake8 = ">=3"
+pydocstyle = ">=2.1"
+
+[[package]]
+name = "flake8-eradicate"
+version = "1.2.0"
+description = "Flake8 plugin to find commented out code"
+category = "dev"
+optional = false
+python-versions = ">=3.6,<4.0"
+
+[package.dependencies]
+attrs = "*"
+eradicate = ">=2.0,<3.0"
+flake8 = ">=3.5,<5"
+
+[[package]]
+name = "flake8-isort"
+version = "4.1.1"
+description = "flake8 plugin that integrates isort ."
+category = "dev"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+flake8 = ">=3.2.1,<5"
+isort = ">=4.3.5,<6"
+testfixtures = ">=6.8.0,<7"
+
+[package.extras]
+test = ["pytest-cov"]
+
+[[package]]
+name = "flake8-quotes"
+version = "3.3.1"
+description = "Flake8 lint for quotes."
+category = "dev"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+flake8 = "*"
+
+[[package]]
+name = "flake8-rst-docstrings"
+version = "0.2.5"
+description = "Python docstring reStructuredText (RST) validator"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+flake8 = ">=3.0.0"
+pygments = "*"
+restructuredtext-lint = "*"
+
+[[package]]
+name = "fonttools"
+version = "4.28.5"
+description = "Tools to manipulate font files"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+
+[package.extras]
+all = ["fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "zopfli (>=0.1.4)", "lz4 (>=1.7.4.2)", "matplotlib", "sympy", "skia-pathops (>=0.5.0)", "brotlicffi (>=0.8.0)", "scipy", "brotli (>=1.0.1)", "munkres", "unicodedata2 (>=13.0.0)", "xattr"]
+graphite = ["lz4 (>=1.7.4.2)"]
+interpolatable = ["scipy", "munkres"]
+lxml = ["lxml (>=4.0,<5)"]
+pathops = ["skia-pathops (>=0.5.0)"]
+plot = ["matplotlib"]
+symfont = ["sympy"]
+type1 = ["xattr"]
+ufo = ["fs (>=2.2.0,<3)"]
+unicode = ["unicodedata2 (>=13.0.0)"]
+woff = ["zopfli (>=0.1.4)", "brotlicffi (>=0.8.0)", "brotli (>=1.0.1)"]
+
+[[package]]
+name = "hydra-slayer"
+version = "0.4.0"
+description = "A framework for elegantly configuring complex applications"
+category = "main"
+optional = false
+python-versions = ">=3.6,<4.0"
+
+[[package]]
+name = "idna"
+version = "3.3"
+description = "Internationalized Domain Names in Applications (IDNA)"
+category = "main"
+optional = false
+python-versions = ">=3.5"
+
+[[package]]
+name = "imageio"
+version = "2.14.0"
+description = "Library for reading and writing a wide range of image, video, scientific, and volumetric data formats."
+category = "main"
+optional = false
+python-versions = ">=3.5"
+
+[package.dependencies]
+numpy = "*"
+pillow = ">=8.3.2"
+
+[package.extras]
+build = ["wheel"]
+dev = ["invoke", "pytest", "pytest-cov", "black", "flake8"]
+docs = ["sphinx", "numpydoc", "pydata-sphinx-theme"]
+ffmpeg = ["imageio-ffmpeg", "psutil"]
+fits = ["astropy"]
+full = ["astropy", "black", "flake8", "gdal", "imageio-ffmpeg", "invoke", "itk", "numpydoc", "psutil", "pydata-sphinx-theme", "pytest", "pytest-cov", "sphinx", "tifffile", "wheel"]
+gdal = ["gdal"]
+itk = ["itk"]
+linting = ["black", "flake8"]
+test = ["invoke", "pytest", "pytest-cov"]
+tifffile = ["tifffile"]
+
+[[package]]
+name = "isort"
+version = "5.10.1"
+description = "A Python utility / library to sort Python imports."
+category = "dev"
+optional = false
+python-versions = ">=3.6.1,<4.0"
+
+[package.extras]
+pipfile_deprecated_finder = ["pipreqs", "requirementslib"]
+requirements_deprecated_finder = ["pipreqs", "pip-api"]
+colors = ["colorama (>=0.4.3,<0.5.0)"]
+plugins = ["setuptools"]
+
+[[package]]
+name = "joblib"
+version = "1.1.0"
+description = "Lightweight pipelining with Python functions"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "jpeg4py"
+version = "0.1.4"
+description = "libjpeg-turbo cffi bindings and helper classes"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+cffi = "*"
+numpy = "*"
+
+[[package]]
+name = "kiwisolver"
+version = "1.3.2"
+description = "A fast implementation of the Cassowary constraint solver"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+
+[[package]]
+name = "matplotlib"
+version = "3.5.1"
+description = "Python plotting package"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+cycler = ">=0.10"
+fonttools = ">=4.22.0"
+kiwisolver = ">=1.0.1"
+numpy = ">=1.17"
+packaging = ">=20.0"
+pillow = ">=6.2.0"
+pyparsing = ">=2.2.1"
+python-dateutil = ">=2.7"
+setuptools_scm = ">=4"
+
+[[package]]
+name = "mccabe"
+version = "0.6.1"
+description = "McCabe checker, plugin for flake8"
+category = "dev"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "mypy"
+version = "0.910"
+description = "Optional static typing for Python"
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+
+[package.dependencies]
+mypy-extensions = ">=0.4.3,<0.5.0"
+toml = "*"
+typing-extensions = ">=3.7.4"
+
+[package.extras]
+dmypy = ["psutil (>=4.0)"]
+python2 = ["typed-ast (>=1.4.0,<1.5.0)"]
+
+[[package]]
+name = "mypy-extensions"
+version = "0.4.3"
+description = "Experimental type system extensions for programs checked with the mypy typechecker."
+category = "dev"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "networkx"
+version = "2.6.3"
+description = "Python package for creating and manipulating graphs and networks"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+
+[package.extras]
+default = ["numpy (>=1.19)", "scipy (>=1.5,!=1.6.1)", "matplotlib (>=3.3)", "pandas (>=1.1)"]
+developer = ["black (==21.5b1)", "pre-commit (>=2.12)"]
+doc = ["sphinx (>=4.0,<5.0)", "pydata-sphinx-theme (>=0.6,<1.0)", "sphinx-gallery (>=0.9,<1.0)", "numpydoc (>=1.1)", "pillow (>=8.2)", "nb2plots (>=0.6)", "texext (>=0.6.6)"]
+extra = ["lxml (>=4.5)", "pygraphviz (>=1.7)", "pydot (>=1.4.1)"]
+test = ["pytest (>=6.2)", "pytest-cov (>=2.12)", "codecov (>=2.1)"]
+
+[[package]]
+name = "numpy"
+version = "1.22.1"
+description = "NumPy is the fundamental package for array computing with Python."
+category = "main"
+optional = false
+python-versions = ">=3.8"
+
+[[package]]
+name = "opencv-python-headless"
+version = "4.5.5.62"
+description = "Wrapper package for OpenCV python bindings."
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+numpy = [
+ {version = ">=1.21.2", markers = "python_version >= \"3.10\" or python_version >= \"3.6\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""},
+ {version = ">=1.19.3", markers = "python_version >= \"3.6\" and platform_system == \"Linux\" and platform_machine == \"aarch64\" or python_version >= \"3.9\""},
+ {version = ">=1.14.5", markers = "python_version >= \"3.7\""},
+ {version = ">=1.17.3", markers = "python_version >= \"3.8\""},
+]
+
+[[package]]
+name = "packaging"
+version = "21.3"
+description = "Core utilities for Python packages"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+pyparsing = ">=2.0.2,<3.0.5 || >3.0.5"
+
+[[package]]
+name = "pbr"
+version = "5.8.0"
+description = "Python Build Reasonableness"
+category = "dev"
+optional = false
+python-versions = ">=2.6"
+
+[[package]]
+name = "pillow"
+version = "9.0.0"
+description = "Python Imaging Library (Fork)"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+
+[[package]]
+name = "piq"
+version = "0.6.0"
+description = "Measures and metrics for image2image tasks. PyTorch."
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+torchvision = ">=0.6.1"
+
+[[package]]
+name = "protobuf"
+version = "3.19.3"
+description = "Protocol Buffers"
+category = "main"
+optional = false
+python-versions = ">=3.5"
+
+[[package]]
+name = "pycodestyle"
+version = "2.7.0"
+description = "Python style guide checker"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
+[[package]]
+name = "pycparser"
+version = "2.21"
+description = "C parser in Python"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
+[[package]]
+name = "pydocstyle"
+version = "6.1.1"
+description = "Python docstring style checker"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+snowballstemmer = "*"
+
+[package.extras]
+toml = ["toml"]
+
+[[package]]
+name = "pyflakes"
+version = "2.3.1"
+description = "passive checker of Python programs"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+
+[[package]]
+name = "pygments"
+version = "2.11.2"
+description = "Pygments is a syntax highlighting package written in Python."
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+
+[[package]]
+name = "pyparsing"
+version = "3.0.7"
+description = "Python parsing module"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.extras]
+diagrams = ["jinja2", "railroad-diagrams"]
+
+[[package]]
+name = "python-dateutil"
+version = "2.8.2"
+description = "Extensions to the standard Python datetime module"
+category = "main"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+
+[package.dependencies]
+six = ">=1.5"
+
+[[package]]
+name = "pywavelets"
+version = "1.2.0"
+description = "PyWavelets, wavelet transform module"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+numpy = ">=1.17.3"
+
+[[package]]
+name = "pyyaml"
+version = "6.0"
+description = "YAML parser and emitter for Python"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "qudida"
+version = "0.0.4"
+description = "QUick and DIrty Domain Adaptation"
+category = "main"
+optional = false
+python-versions = ">=3.5.0"
+
+[package.dependencies]
+numpy = ">=0.18.0"
+opencv-python-headless = ">=4.0.1"
+scikit-learn = ">=0.19.1"
+typing-extensions = "*"
+
+[[package]]
+name = "requests"
+version = "2.27.1"
+description = "Python HTTP for Humans."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
+
+[package.dependencies]
+certifi = ">=2017.4.17"
+charset-normalizer = {version = ">=2.0.0,<2.1.0", markers = "python_version >= \"3\""}
+idna = {version = ">=2.5,<4", markers = "python_version >= \"3\""}
+urllib3 = ">=1.21.1,<1.27"
+
+[package.extras]
+socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"]
+use_chardet_on_py3 = ["chardet (>=3.0.2,<5)"]
+
+[[package]]
+name = "restructuredtext-lint"
+version = "1.3.2"
+description = "reStructuredText linter"
+category = "dev"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+docutils = ">=0.11,<1.0"
+
+[[package]]
+name = "scikit-image"
+version = "0.18.3"
+description = "Image processing in Python"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+imageio = ">=2.3.0"
+matplotlib = ">=2.0.0,<3.0.0 || >3.0.0"
+networkx = ">=2.0"
+numpy = ">=1.16.5"
+pillow = ">=4.3.0,<7.1.0 || >7.1.0,<7.1.1 || >7.1.1"
+PyWavelets = ">=1.1.1"
+scipy = ">=1.0.1"
+tifffile = ">=2019.7.26"
+
+[package.extras]
+data = ["pooch (>=1.3.0)"]
+docs = ["sphinx (>=1.8,<=2.4.4)", "sphinx-gallery (>=0.7.0,!=0.8.0)", "numpydoc (>=1.0)", "sphinx-copybutton", "pytest-runner", "scikit-learn", "matplotlib (>=3.0.1)", "dask[array] (>=0.15.0,!=2.17.0)", "cloudpickle (>=0.2.1)", "pandas (>=0.23.0)", "seaborn (>=0.7.1)", "pooch (>=1.3.0)", "tifffile (>=2020.5.30)", "myst-parser", "ipywidgets", "plotly (>=4.10.0)"]
+optional = ["simpleitk", "astropy (>=3.1.2)", "qtpy", "pyamg", "dask[array] (>=1.0.0,!=2.17.0)", "cloudpickle (>=0.2.1)", "pooch (>=1.3.0)"]
+test = ["pytest (>=5.2.0)", "pytest-cov (>=2.7.0)", "pytest-localserver", "pytest-faulthandler", "flake8", "codecov", "pooch (>=1.3.0)"]
+
+[[package]]
+name = "scikit-learn"
+version = "1.0.2"
+description = "A set of python modules for machine learning and data mining"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+joblib = ">=0.11"
+numpy = ">=1.14.6"
+scipy = ">=1.1.0"
+threadpoolctl = ">=2.0.0"
+
+[package.extras]
+benchmark = ["matplotlib (>=2.2.3)", "pandas (>=0.25.0)", "memory-profiler (>=0.57.0)"]
+docs = ["matplotlib (>=2.2.3)", "scikit-image (>=0.14.5)", "pandas (>=0.25.0)", "seaborn (>=0.9.0)", "memory-profiler (>=0.57.0)", "sphinx (>=4.0.1)", "sphinx-gallery (>=0.7.0)", "numpydoc (>=1.0.0)", "Pillow (>=7.1.2)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"]
+examples = ["matplotlib (>=2.2.3)", "scikit-image (>=0.14.5)", "pandas (>=0.25.0)", "seaborn (>=0.9.0)"]
+tests = ["matplotlib (>=2.2.3)", "scikit-image (>=0.14.5)", "pandas (>=0.25.0)", "pytest (>=5.0.1)", "pytest-cov (>=2.9.0)", "flake8 (>=3.8.2)", "black (>=21.6b0)", "mypy (>=0.770)", "pyamg (>=4.0.0)"]
+
+[[package]]
+name = "scipy"
+version = "1.6.1"
+description = "SciPy: Scientific Library for Python"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+numpy = ">=1.16.5"
+
+[[package]]
+name = "setuptools-scm"
+version = "6.4.2"
+description = "the blessed package to manage your versions by scm tags"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+packaging = ">=20.0"
+tomli = ">=1.0.0"
+
+[package.extras]
+test = ["pytest (>=6.2)", "virtualenv (>20)"]
+toml = ["setuptools (>=42)"]
+
+[[package]]
+name = "six"
+version = "1.16.0"
+description = "Python 2 and 3 compatibility utilities"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
+
+[[package]]
+name = "snowballstemmer"
+version = "2.2.0"
+description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms."
+category = "dev"
+optional = false
+python-versions = "*"
+
+[[package]]
+name = "stevedore"
+version = "3.5.0"
+description = "Manage dynamic plugins for Python applications"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+
+[package.dependencies]
+pbr = ">=2.0.0,<2.1.0 || >2.1.0"
+
+[[package]]
+name = "tensorboardx"
+version = "2.2"
+description = "TensorBoardX lets you watch Tensors Flow without Tensorflow"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+numpy = "*"
+protobuf = ">=3.8.0"
+
+[[package]]
+name = "testfixtures"
+version = "6.18.3"
+description = "A collection of helpers and mock objects for unit tests and doc tests."
+category = "dev"
+optional = false
+python-versions = "*"
+
+[package.extras]
+build = ["setuptools-git", "wheel", "twine"]
+docs = ["sphinx", "zope.component", "sybil", "twisted", "mock", "django (<2)", "django"]
+test = ["pytest (>=3.6)", "pytest-cov", "pytest-django", "zope.component", "sybil", "twisted", "mock", "django (<2)", "django"]
+
+[[package]]
+name = "threadpoolctl"
+version = "3.0.0"
+description = "threadpoolctl"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "tifffile"
+version = "2021.11.2"
+description = "Read and write TIFF files"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+
+[package.dependencies]
+numpy = ">=1.15.1"
+
+[package.extras]
+all = ["imagecodecs (>=2021.7.30)", "matplotlib (>=3.2)", "lxml"]
+
+[[package]]
+name = "toml"
+version = "0.10.2"
+description = "Python Library for Tom's Obvious, Minimal Language"
+category = "dev"
+optional = false
+python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
+
+[[package]]
+name = "tomli"
+version = "2.0.0"
+description = "A lil' TOML parser"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+
+[[package]]
+name = "torch"
+version = "1.8.1"
+description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration"
+category = "main"
+optional = false
+python-versions = ">=3.6.2"
+
+[package.dependencies]
+numpy = "*"
+typing-extensions = "*"
+
+[[package]]
+name = "torchvision"
+version = "0.9.1"
+description = "image and video datasets and models for torch deep learning"
+category = "main"
+optional = false
+python-versions = "*"
+
+[package.dependencies]
+numpy = "*"
+pillow = ">=4.1.1"
+torch = "1.8.1"
+
+[package.extras]
+scipy = ["scipy"]
+
+[[package]]
+name = "tqdm"
+version = "4.62.3"
+description = "Fast, Extensible Progress Meter"
+category = "main"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[package.extras]
+dev = ["py-make (>=0.1.0)", "twine", "wheel"]
+notebook = ["ipywidgets (>=6)"]
+telegram = ["requests"]
+
+[[package]]
+name = "typing-extensions"
+version = "4.0.1"
+description = "Backported and Experimental Type Hints for Python 3.6+"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+
+[[package]]
+name = "urllib3"
+version = "1.26.8"
+description = "HTTP library with thread-safe connection pooling, file post, and more."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4"
+
+[package.extras]
+brotli = ["brotlipy (>=0.6.0)"]
+secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"]
+socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
+
+[metadata]
+lock-version = "1.1"
+python-versions = "^3.8"
+content-hash = "5b8d7ba042208195aeeb503f1e11fc830d234f9d6ff3cb240b94f6c9188e9697"
+
+[metadata.files]
+albumentations = [
+ {file = "albumentations-1.1.0-py3-none-any.whl", hash = "sha256:6acf78a5f9504bd36c4a8e18eed29f7103c2fa6a7ba5be399c6088820cc88a8a"},
+ {file = "albumentations-1.1.0.tar.gz", hash = "sha256:60b067b3093908bcc52adb2aa5d44f57ebdbb8ab57a47b0b42f3dc1d3b1ce824"},
+]
+attrs = [
+ {file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"},
+ {file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"},
+]
+catalyst = [
+ {file = "catalyst-21.12-py2.py3-none-any.whl", hash = "sha256:e2925bcd081577fdef8cf64d476408b2e8990d65fd3b7d1d22dde40be8cd8dbd"},
+ {file = "catalyst-21.12.tar.gz", hash = "sha256:3abf03d5bd2c2279f6612b59fa7c1c2bff46a4b89fcd6df7c0125a78cb64e44e"},
+]
+certifi = [
+ {file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"},
+ {file = "certifi-2021.10.8.tar.gz", hash = "sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"},
+]
+cffi = [
+ {file = "cffi-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:c2502a1a03b6312837279c8c1bd3ebedf6c12c4228ddbad40912d671ccc8a962"},
+ {file = "cffi-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:23cfe892bd5dd8941608f93348c0737e369e51c100d03718f108bf1add7bd6d0"},
+ {file = "cffi-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:41d45de54cd277a7878919867c0f08b0cf817605e4eb94093e7516505d3c8d14"},
+ {file = "cffi-1.15.0-cp27-cp27m-win32.whl", hash = "sha256:4a306fa632e8f0928956a41fa8e1d6243c71e7eb59ffbd165fc0b41e316b2474"},
+ {file = "cffi-1.15.0-cp27-cp27m-win_amd64.whl", hash = "sha256:e7022a66d9b55e93e1a845d8c9eba2a1bebd4966cd8bfc25d9cd07d515b33fa6"},
+ {file = "cffi-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:14cd121ea63ecdae71efa69c15c5543a4b5fbcd0bbe2aad864baca0063cecf27"},
+ {file = "cffi-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:d4d692a89c5cf08a8557fdeb329b82e7bf609aadfaed6c0d79f5a449a3c7c023"},
+ {file = "cffi-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0104fb5ae2391d46a4cb082abdd5c69ea4eab79d8d44eaaf79f1b1fd806ee4c2"},
+ {file = "cffi-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:91ec59c33514b7c7559a6acda53bbfe1b283949c34fe7440bcf917f96ac0723e"},
+ {file = "cffi-1.15.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f5c7150ad32ba43a07c4479f40241756145a1f03b43480e058cfd862bf5041c7"},
+ {file = "cffi-1.15.0-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:00c878c90cb53ccfaae6b8bc18ad05d2036553e6d9d1d9dbcf323bbe83854ca3"},
+ {file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abb9a20a72ac4e0fdb50dae135ba5e77880518e742077ced47eb1499e29a443c"},
+ {file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a5263e363c27b653a90078143adb3d076c1a748ec9ecc78ea2fb916f9b861962"},
+ {file = "cffi-1.15.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f54a64f8b0c8ff0b64d18aa76675262e1700f3995182267998c31ae974fbc382"},
+ {file = "cffi-1.15.0-cp310-cp310-win32.whl", hash = "sha256:c21c9e3896c23007803a875460fb786118f0cdd4434359577ea25eb556e34c55"},
+ {file = "cffi-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:5e069f72d497312b24fcc02073d70cb989045d1c91cbd53979366077959933e0"},
+ {file = "cffi-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:64d4ec9f448dfe041705426000cc13e34e6e5bb13736e9fd62e34a0b0c41566e"},
+ {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2756c88cbb94231c7a147402476be2c4df2f6078099a6f4a480d239a8817ae39"},
+ {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b96a311ac60a3f6be21d2572e46ce67f09abcf4d09344c49274eb9e0bf345fc"},
+ {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75e4024375654472cc27e91cbe9eaa08567f7fbdf822638be2814ce059f58032"},
+ {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:59888172256cac5629e60e72e86598027aca6bf01fa2465bdb676d37636573e8"},
+ {file = "cffi-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:27c219baf94952ae9d50ec19651a687b826792055353d07648a5695413e0c605"},
+ {file = "cffi-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:4958391dbd6249d7ad855b9ca88fae690783a6be9e86df65865058ed81fc860e"},
+ {file = "cffi-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:f6f824dc3bce0edab5f427efcfb1d63ee75b6fcb7282900ccaf925be84efb0fc"},
+ {file = "cffi-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:06c48159c1abed75c2e721b1715c379fa3200c7784271b3c46df01383b593636"},
+ {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c2051981a968d7de9dd2d7b87bcb9c939c74a34626a6e2f8181455dd49ed69e4"},
+ {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:fd8a250edc26254fe5b33be00402e6d287f562b6a5b2152dec302fa15bb3e997"},
+ {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91d77d2a782be4274da750752bb1650a97bfd8f291022b379bb8e01c66b4e96b"},
+ {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:45db3a33139e9c8f7c09234b5784a5e33d31fd6907800b316decad50af323ff2"},
+ {file = "cffi-1.15.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:263cc3d821c4ab2213cbe8cd8b355a7f72a8324577dc865ef98487c1aeee2bc7"},
+ {file = "cffi-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:17771976e82e9f94976180f76468546834d22a7cc404b17c22df2a2c81db0c66"},
+ {file = "cffi-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:3415c89f9204ee60cd09b235810be700e993e343a408693e80ce7f6a40108029"},
+ {file = "cffi-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4238e6dab5d6a8ba812de994bbb0a79bddbdf80994e4ce802b6f6f3142fcc880"},
+ {file = "cffi-1.15.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0808014eb713677ec1292301ea4c81ad277b6cdf2fdd90fd540af98c0b101d20"},
+ {file = "cffi-1.15.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:57e9ac9ccc3101fac9d6014fba037473e4358ef4e89f8e181f8951a2c0162024"},
+ {file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b6c2ea03845c9f501ed1313e78de148cd3f6cad741a75d43a29b43da27f2e1e"},
+ {file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10dffb601ccfb65262a27233ac273d552ddc4d8ae1bf93b21c94b8511bffe728"},
+ {file = "cffi-1.15.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:786902fb9ba7433aae840e0ed609f45c7bcd4e225ebb9c753aa39725bb3e6ad6"},
+ {file = "cffi-1.15.0-cp38-cp38-win32.whl", hash = "sha256:da5db4e883f1ce37f55c667e5c0de439df76ac4cb55964655906306918e7363c"},
+ {file = "cffi-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:181dee03b1170ff1969489acf1c26533710231c58f95534e3edac87fff06c443"},
+ {file = "cffi-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:45e8636704eacc432a206ac7345a5d3d2c62d95a507ec70d62f23cd91770482a"},
+ {file = "cffi-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:31fb708d9d7c3f49a60f04cf5b119aeefe5644daba1cd2a0fe389b674fd1de37"},
+ {file = "cffi-1.15.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6dc2737a3674b3e344847c8686cf29e500584ccad76204efea14f451d4cc669a"},
+ {file = "cffi-1.15.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74fdfdbfdc48d3f47148976f49fab3251e550a8720bebc99bf1483f5bfb5db3e"},
+ {file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffaa5c925128e29efbde7301d8ecaf35c8c60ffbcd6a1ffd3a552177c8e5e796"},
+ {file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f7d084648d77af029acb79a0ff49a0ad7e9d09057a9bf46596dac9514dc07df"},
+ {file = "cffi-1.15.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ef1f279350da2c586a69d32fc8733092fd32cc8ac95139a00377841f59a3f8d8"},
+ {file = "cffi-1.15.0-cp39-cp39-win32.whl", hash = "sha256:2a23af14f408d53d5e6cd4e3d9a24ff9e05906ad574822a10563efcef137979a"},
+ {file = "cffi-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:3773c4d81e6e818df2efbc7dd77325ca0dcb688116050fb2b3011218eda36139"},
+ {file = "cffi-1.15.0.tar.gz", hash = "sha256:920f0d66a896c2d99f0adbb391f990a84091179542c205fa53ce5787aff87954"},
+]
+chardet = [
+ {file = "chardet-4.0.0-py2.py3-none-any.whl", hash = "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5"},
+ {file = "chardet-4.0.0.tar.gz", hash = "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa"},
+]
+charset-normalizer = [
+ {file = "charset-normalizer-2.0.10.tar.gz", hash = "sha256:876d180e9d7432c5d1dfd4c5d26b72f099d503e8fcc0feb7532c9289be60fcbd"},
+ {file = "charset_normalizer-2.0.10-py3-none-any.whl", hash = "sha256:cb957888737fc0bbcd78e3df769addb41fd1ff8cf950dc9e7ad7793f1bf44455"},
+]
+colorama = [
+ {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"},
+ {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"},
+]
+cycler = [
+ {file = "cycler-0.11.0-py3-none-any.whl", hash = "sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3"},
+ {file = "cycler-0.11.0.tar.gz", hash = "sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"},
+]
+darglint = [
+ {file = "darglint-1.8.1-py3-none-any.whl", hash = "sha256:5ae11c259c17b0701618a20c3da343a3eb98b3bc4b5a83d31cdd94f5ebdced8d"},
+ {file = "darglint-1.8.1.tar.gz", hash = "sha256:080d5106df149b199822e7ee7deb9c012b49891538f14a11be681044f0bb20da"},
+]
+doc8 = [
+ {file = "doc8-0.8.1-py2.py3-none-any.whl", hash = "sha256:4d58a5c8c56cedd2b2c9d6e3153be5d956cf72f6051128f0f2255c66227df721"},
+ {file = "doc8-0.8.1.tar.gz", hash = "sha256:4d1df12598807cf08ffa9a1d5ef42d229ee0de42519da01b768ff27211082c12"},
+]
+docutils = [
+ {file = "docutils-0.18.1-py2.py3-none-any.whl", hash = "sha256:23010f129180089fbcd3bc08cfefccb3b890b0050e1ca00c867036e9d161b98c"},
+ {file = "docutils-0.18.1.tar.gz", hash = "sha256:679987caf361a7539d76e584cbeddc311e3aee937877c87346f31debc63e9d06"},
+]
+eradicate = [
+ {file = "eradicate-2.0.0.tar.gz", hash = "sha256:27434596f2c5314cc9b31410c93d8f7e8885747399773cd088d3adea647a60c8"},
+]
+flake8 = [
+ {file = "flake8-3.9.2-py2.py3-none-any.whl", hash = "sha256:bf8fd333346d844f616e8d47905ef3a3384edae6b4e9beb0c5101e25e3110907"},
+ {file = "flake8-3.9.2.tar.gz", hash = "sha256:07528381786f2a6237b061f6e96610a4167b226cb926e2aa2b6b1d78057c576b"},
+]
+flake8-broken-line = [
+ {file = "flake8-broken-line-0.3.0.tar.gz", hash = "sha256:f74e052833324a9e5f0055032f7ccc54b23faabafe5a26241c2f977e70b10b50"},
+ {file = "flake8_broken_line-0.3.0-py3-none-any.whl", hash = "sha256:611f79c7f27118e7e5d3dc098ef7681c40aeadf23783700c5dbee840d2baf3af"},
+]
+flake8-comprehensions = [
+ {file = "flake8-comprehensions-3.8.0.tar.gz", hash = "sha256:8e108707637b1d13734f38e03435984f6b7854fa6b5a4e34f93e69534be8e521"},
+ {file = "flake8_comprehensions-3.8.0-py3-none-any.whl", hash = "sha256:9406314803abe1193c064544ab14fdc43c58424c0882f6ff8a581eb73fc9bb58"},
+]
+flake8-debugger = [
+ {file = "flake8-debugger-4.0.0.tar.gz", hash = "sha256:e43dc777f7db1481db473210101ec2df2bd39a45b149d7218a618e954177eda6"},
+ {file = "flake8_debugger-4.0.0-py3-none-any.whl", hash = "sha256:82e64faa72e18d1bdd0000407502ebb8ecffa7bc027c62b9d4110ce27c091032"},
+]
+flake8-docstrings = [
+ {file = "flake8-docstrings-1.6.0.tar.gz", hash = "sha256:9fe7c6a306064af8e62a055c2f61e9eb1da55f84bb39caef2b84ce53708ac34b"},
+ {file = "flake8_docstrings-1.6.0-py2.py3-none-any.whl", hash = "sha256:99cac583d6c7e32dd28bbfbef120a7c0d1b6dde4adb5a9fd441c4227a6534bde"},
+]
+flake8-eradicate = [
+ {file = "flake8-eradicate-1.2.0.tar.gz", hash = "sha256:acaa1b6839ff00d284b805c432fdfa6047262bd15a5504ec945797e87b4de1fa"},
+ {file = "flake8_eradicate-1.2.0-py3-none-any.whl", hash = "sha256:51dc660d0c1c1ed93af0f813540bbbf72ab2d3466c14e3f3bac371c618b6042f"},
+]
+flake8-isort = [
+ {file = "flake8-isort-4.1.1.tar.gz", hash = "sha256:d814304ab70e6e58859bc5c3e221e2e6e71c958e7005239202fee19c24f82717"},
+ {file = "flake8_isort-4.1.1-py3-none-any.whl", hash = "sha256:c4e8b6dcb7be9b71a02e6e5d4196cefcef0f3447be51e82730fb336fff164949"},
+]
+flake8-quotes = [
+ {file = "flake8-quotes-3.3.1.tar.gz", hash = "sha256:633adca6fb8a08131536af0d750b44d6985b9aba46f498871e21588c3e6f525a"},
+]
+flake8-rst-docstrings = [
+ {file = "flake8-rst-docstrings-0.2.5.tar.gz", hash = "sha256:4fe93f997dea45d9d3c8bd220f12f0b6c359948fb943b5b48021a3f927edd816"},
+ {file = "flake8_rst_docstrings-0.2.5-py3-none-any.whl", hash = "sha256:b99d9041b769b857efe45a448dc8c71b1bb311f9cacbdac5de82f96498105082"},
+]
+fonttools = [
+ {file = "fonttools-4.28.5-py3-none-any.whl", hash = "sha256:edf251d5d2cc0580d5f72de4621c338d8c66c5f61abb50cf486640f73c8194d5"},
+ {file = "fonttools-4.28.5.zip", hash = "sha256:545c05d0f7903a863c2020e07b8f0a57517f2c40d940bded77076397872d14ca"},
+]
+hydra-slayer = [
+ {file = "hydra-slayer-0.4.0.tar.gz", hash = "sha256:2e7ef0b99e5e11504d80996f689ed5ed7ae8766edb8e76b6933b306966159604"},
+ {file = "hydra_slayer-0.4.0-py3-none-any.whl", hash = "sha256:6c8510811607b9a01d7386d7c09c28d3ea9bf20c945816f17d18fa88d57e8ffd"},
+]
+idna = [
+ {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"},
+ {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"},
+]
+imageio = [
+ {file = "imageio-2.14.0-py3-none-any.whl", hash = "sha256:af6c7c89947ee9d81eab2caee3726f61e1a0d861e87e856904868d0fe7c0aa15"},
+ {file = "imageio-2.14.0.tar.gz", hash = "sha256:1a612b46c24805115701ed7c4e1f2d7feb53bb615d52bfef9713a6836e997bb1"},
+]
+isort = [
+ {file = "isort-5.10.1-py3-none-any.whl", hash = "sha256:6f62d78e2f89b4500b080fe3a81690850cd254227f27f75c3a0c491a1f351ba7"},
+ {file = "isort-5.10.1.tar.gz", hash = "sha256:e8443a5e7a020e9d7f97f1d7d9cd17c88bcb3bc7e218bf9cf5095fe550be2951"},
+]
+joblib = [
+ {file = "joblib-1.1.0-py2.py3-none-any.whl", hash = "sha256:f21f109b3c7ff9d95f8387f752d0d9c34a02aa2f7060c2135f465da0e5160ff6"},
+ {file = "joblib-1.1.0.tar.gz", hash = "sha256:4158fcecd13733f8be669be0683b96ebdbbd38d23559f54dca7205aea1bf1e35"},
+]
+jpeg4py = [
+ {file = "jpeg4py-0.1.4.tar.gz", hash = "sha256:cec3adcb110bd1391659670f2fe64d2a70a0218e84fdb7b08506107946d3bdb0"},
+]
+kiwisolver = [
+ {file = "kiwisolver-1.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1d819553730d3c2724582124aee8a03c846ec4362ded1034c16fb3ef309264e6"},
+ {file = "kiwisolver-1.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8d93a1095f83e908fc253f2fb569c2711414c0bfd451cab580466465b235b470"},
+ {file = "kiwisolver-1.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c4550a359c5157aaf8507e6820d98682872b9100ce7607f8aa070b4b8af6c298"},
+ {file = "kiwisolver-1.3.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2210f28778c7d2ee13f3c2a20a3a22db889e75f4ec13a21072eabb5693801e84"},
+ {file = "kiwisolver-1.3.2-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:82f49c5a79d3839bc8f38cb5f4bfc87e15f04cbafa5fbd12fb32c941cb529cfb"},
+ {file = "kiwisolver-1.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9661a04ca3c950a8ac8c47f53cbc0b530bce1b52f516a1e87b7736fec24bfff0"},
+ {file = "kiwisolver-1.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ddb500a2808c100e72c075cbb00bf32e62763c82b6a882d403f01a119e3f402"},
+ {file = "kiwisolver-1.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:72be6ebb4e92520b9726d7146bc9c9b277513a57a38efcf66db0620aec0097e0"},
+ {file = "kiwisolver-1.3.2-cp310-cp310-win32.whl", hash = "sha256:83d2c9db5dfc537d0171e32de160461230eb14663299b7e6d18ca6dca21e4977"},
+ {file = "kiwisolver-1.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:cba430db673c29376135e695c6e2501c44c256a81495da849e85d1793ee975ad"},
+ {file = "kiwisolver-1.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4116ba9a58109ed5e4cb315bdcbff9838f3159d099ba5259c7c7fb77f8537492"},
+ {file = "kiwisolver-1.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19554bd8d54cf41139f376753af1a644b63c9ca93f8f72009d50a2080f870f77"},
+ {file = "kiwisolver-1.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a7a4cf5bbdc861987a7745aed7a536c6405256853c94abc9f3287c3fa401b174"},
+ {file = "kiwisolver-1.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0007840186bacfaa0aba4466d5890334ea5938e0bb7e28078a0eb0e63b5b59d5"},
+ {file = "kiwisolver-1.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ec2eba188c1906b05b9b49ae55aae4efd8150c61ba450e6721f64620c50b59eb"},
+ {file = "kiwisolver-1.3.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:3dbb3cea20b4af4f49f84cffaf45dd5f88e8594d18568e0225e6ad9dec0e7967"},
+ {file = "kiwisolver-1.3.2-cp37-cp37m-win32.whl", hash = "sha256:5326ddfacbe51abf9469fe668944bc2e399181a2158cb5d45e1d40856b2a0589"},
+ {file = "kiwisolver-1.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:c6572c2dab23c86a14e82c245473d45b4c515314f1f859e92608dcafbd2f19b8"},
+ {file = "kiwisolver-1.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b5074fb09429f2b7bc82b6fb4be8645dcbac14e592128beeff5461dcde0af09f"},
+ {file = "kiwisolver-1.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:22521219ca739654a296eea6d4367703558fba16f98688bd8ce65abff36eaa84"},
+ {file = "kiwisolver-1.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c358721aebd40c243894298f685a19eb0491a5c3e0b923b9f887ef1193ddf829"},
+ {file = "kiwisolver-1.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ba5a1041480c6e0a8b11a9544d53562abc2d19220bfa14133e0cdd9967e97af"},
+ {file = "kiwisolver-1.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44e6adf67577dbdfa2d9f06db9fbc5639afefdb5bf2b4dfec25c3a7fbc619536"},
+ {file = "kiwisolver-1.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d45d1c74f88b9f41062716c727f78f2a59a5476ecbe74956fafb423c5c87a76"},
+ {file = "kiwisolver-1.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:70adc3658138bc77a36ce769f5f183169bc0a2906a4f61f09673f7181255ac9b"},
+ {file = "kiwisolver-1.3.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b6a5431940f28b6de123de42f0eb47b84a073ee3c3345dc109ad550a3307dd28"},
+ {file = "kiwisolver-1.3.2-cp38-cp38-win32.whl", hash = "sha256:ee040a7de8d295dbd261ef2d6d3192f13e2b08ec4a954de34a6fb8ff6422e24c"},
+ {file = "kiwisolver-1.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:8dc3d842fa41a33fe83d9f5c66c0cc1f28756530cd89944b63b072281e852031"},
+ {file = "kiwisolver-1.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a498bcd005e8a3fedd0022bb30ee0ad92728154a8798b703f394484452550507"},
+ {file = "kiwisolver-1.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:80efd202108c3a4150e042b269f7c78643420cc232a0a771743bb96b742f838f"},
+ {file = "kiwisolver-1.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f8eb7b6716f5b50e9c06207a14172cf2de201e41912ebe732846c02c830455b9"},
+ {file = "kiwisolver-1.3.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f441422bb313ab25de7b3dbfd388e790eceb76ce01a18199ec4944b369017009"},
+ {file = "kiwisolver-1.3.2-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:30fa008c172355c7768159983a7270cb23838c4d7db73d6c0f6b60dde0d432c6"},
+ {file = "kiwisolver-1.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f8f6c8f4f1cff93ca5058d6ec5f0efda922ecb3f4c5fb76181f327decff98b8"},
+ {file = "kiwisolver-1.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba677bcaff9429fd1bf01648ad0901cea56c0d068df383d5f5856d88221fe75b"},
+ {file = "kiwisolver-1.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7843b1624d6ccca403a610d1277f7c28ad184c5aa88a1750c1a999754e65b439"},
+ {file = "kiwisolver-1.3.2-cp39-cp39-win32.whl", hash = "sha256:e6f5eb2f53fac7d408a45fbcdeda7224b1cfff64919d0f95473420a931347ae9"},
+ {file = "kiwisolver-1.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:eedd3b59190885d1ebdf6c5e0ca56828beb1949b4dfe6e5d0256a461429ac386"},
+ {file = "kiwisolver-1.3.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:dedc71c8eb9c5096037766390172c34fb86ef048b8e8958b4e484b9e505d66bc"},
+ {file = "kiwisolver-1.3.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:bf7eb45d14fc036514c09554bf983f2a72323254912ed0c3c8e697b62c4c158f"},
+ {file = "kiwisolver-1.3.2-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2b65bd35f3e06a47b5c30ea99e0c2b88f72c6476eedaf8cfbc8e66adb5479dcf"},
+ {file = "kiwisolver-1.3.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25405f88a37c5f5bcba01c6e350086d65e7465fd1caaf986333d2a045045a223"},
+ {file = "kiwisolver-1.3.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:bcadb05c3d4794eb9eee1dddf1c24215c92fb7b55a80beae7a60530a91060560"},
+ {file = "kiwisolver-1.3.2.tar.gz", hash = "sha256:fc4453705b81d03568d5b808ad8f09c77c47534f6ac2e72e733f9ca4714aa75c"},
+]
+matplotlib = [
+ {file = "matplotlib-3.5.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:456cc8334f6d1124e8ff856b42d2cc1c84335375a16448189999496549f7182b"},
+ {file = "matplotlib-3.5.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8a77906dc2ef9b67407cec0bdbf08e3971141e535db888974a915be5e1e3efc6"},
+ {file = "matplotlib-3.5.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e70ae6475cfd0fad3816dcbf6cac536dc6f100f7474be58d59fa306e6e768a4"},
+ {file = "matplotlib-3.5.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53273c5487d1c19c3bc03b9eb82adaf8456f243b97ed79d09dded747abaf1235"},
+ {file = "matplotlib-3.5.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e3b6f3fd0d8ca37861c31e9a7cab71a0ef14c639b4c95654ea1dd153158bf0df"},
+ {file = "matplotlib-3.5.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8c87cdaf06fd7b2477f68909838ff4176f105064a72ca9d24d3f2a29f73d393"},
+ {file = "matplotlib-3.5.1-cp310-cp310-win32.whl", hash = "sha256:e2f28a07b4f82abb40267864ad7b3a4ed76f1b1663e81c7efc84a9b9248f672f"},
+ {file = "matplotlib-3.5.1-cp310-cp310-win_amd64.whl", hash = "sha256:d70a32ee1f8b55eed3fd4e892f0286df8cccc7e0475c11d33b5d0a148f5c7599"},
+ {file = "matplotlib-3.5.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:68fa30cec89b6139dc559ed6ef226c53fd80396da1919a1b5ef672c911aaa767"},
+ {file = "matplotlib-3.5.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e3484d8455af3fdb0424eae1789af61f6a79da0c80079125112fd5c1b604218"},
+ {file = "matplotlib-3.5.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e293b16cf303fe82995e41700d172a58a15efc5331125d08246b520843ef21ee"},
+ {file = "matplotlib-3.5.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e3520a274a0e054e919f5b3279ee5dbccf5311833819ccf3399dab7c83e90a25"},
+ {file = "matplotlib-3.5.1-cp37-cp37m-win32.whl", hash = "sha256:2252bfac85cec7af4a67e494bfccf9080bcba8a0299701eab075f48847cca907"},
+ {file = "matplotlib-3.5.1-cp37-cp37m-win_amd64.whl", hash = "sha256:abf67e05a1b7f86583f6ebd01f69b693b9c535276f4e943292e444855870a1b8"},
+ {file = "matplotlib-3.5.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6c094e4bfecd2fa7f9adffd03d8abceed7157c928c2976899de282f3600f0a3d"},
+ {file = "matplotlib-3.5.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:506b210cc6e66a0d1c2bb765d055f4f6bc2745070fb1129203b67e85bbfa5c18"},
+ {file = "matplotlib-3.5.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b04fc29bcef04d4e2d626af28d9d892be6aba94856cb46ed52bcb219ceac8943"},
+ {file = "matplotlib-3.5.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:577ed20ec9a18d6bdedb4616f5e9e957b4c08563a9f985563a31fd5b10564d2a"},
+ {file = "matplotlib-3.5.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e486f60db0cd1c8d68464d9484fd2a94011c1ac8593d765d0211f9daba2bd535"},
+ {file = "matplotlib-3.5.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b71f3a7ca935fc759f2aed7cec06cfe10bc3100fadb5dbd9c435b04e557971e1"},
+ {file = "matplotlib-3.5.1-cp38-cp38-win32.whl", hash = "sha256:d24e5bb8028541ce25e59390122f5e48c8506b7e35587e5135efcb6471b4ac6c"},
+ {file = "matplotlib-3.5.1-cp38-cp38-win_amd64.whl", hash = "sha256:778d398c4866d8e36ee3bf833779c940b5f57192fa0a549b3ad67bc4c822771b"},
+ {file = "matplotlib-3.5.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bb1c613908f11bac270bc7494d68b1ef6e7c224b7a4204d5dacf3522a41e2bc3"},
+ {file = "matplotlib-3.5.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:edf5e4e1d5fb22c18820e8586fb867455de3b109c309cb4fce3aaed85d9468d1"},
+ {file = "matplotlib-3.5.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:40e0d7df05e8efe60397c69b467fc8f87a2affeb4d562fe92b72ff8937a2b511"},
+ {file = "matplotlib-3.5.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a350ca685d9f594123f652ba796ee37219bf72c8e0fc4b471473d87121d6d34"},
+ {file = "matplotlib-3.5.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3e66497cd990b1a130e21919b004da2f1dc112132c01ac78011a90a0f9229778"},
+ {file = "matplotlib-3.5.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:87900c67c0f1728e6db17c6809ec05c025c6624dcf96a8020326ea15378fe8e7"},
+ {file = "matplotlib-3.5.1-cp39-cp39-win32.whl", hash = "sha256:b8a4fb2a0c5afbe9604f8a91d7d0f27b1832c3e0b5e365f95a13015822b4cd65"},
+ {file = "matplotlib-3.5.1-cp39-cp39-win_amd64.whl", hash = "sha256:fe8d40c434a8e2c68d64c6d6a04e77f21791a93ff6afe0dce169597c110d3079"},
+ {file = "matplotlib-3.5.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:34a1fc29f8f96e78ec57a5eff5e8d8b53d3298c3be6df61e7aa9efba26929522"},
+ {file = "matplotlib-3.5.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:b19a761b948e939a9e20173aaae76070025f0024fc8f7ba08bef22a5c8573afc"},
+ {file = "matplotlib-3.5.1-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6803299cbf4665eca14428d9e886de62e24f4223ac31ab9c5d6d5339a39782c7"},
+ {file = "matplotlib-3.5.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:14334b9902ec776461c4b8c6516e26b450f7ebe0b3ef8703bf5cdfbbaecf774a"},
+ {file = "matplotlib-3.5.1.tar.gz", hash = "sha256:b2e9810e09c3a47b73ce9cab5a72243a1258f61e7900969097a817232246ce1c"},
+]
+mccabe = [
+ {file = "mccabe-0.6.1-py2.py3-none-any.whl", hash = "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42"},
+ {file = "mccabe-0.6.1.tar.gz", hash = "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"},
+]
+mypy = [
+ {file = "mypy-0.910-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:a155d80ea6cee511a3694b108c4494a39f42de11ee4e61e72bc424c490e46457"},
+ {file = "mypy-0.910-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:b94e4b785e304a04ea0828759172a15add27088520dc7e49ceade7834275bedb"},
+ {file = "mypy-0.910-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:088cd9c7904b4ad80bec811053272986611b84221835e079be5bcad029e79dd9"},
+ {file = "mypy-0.910-cp35-cp35m-win_amd64.whl", hash = "sha256:adaeee09bfde366d2c13fe6093a7df5df83c9a2ba98638c7d76b010694db760e"},
+ {file = "mypy-0.910-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:ecd2c3fe726758037234c93df7e98deb257fd15c24c9180dacf1ef829da5f921"},
+ {file = "mypy-0.910-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:d9dd839eb0dc1bbe866a288ba3c1afc33a202015d2ad83b31e875b5905a079b6"},
+ {file = "mypy-0.910-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:3e382b29f8e0ccf19a2df2b29a167591245df90c0b5a2542249873b5c1d78212"},
+ {file = "mypy-0.910-cp36-cp36m-win_amd64.whl", hash = "sha256:53fd2eb27a8ee2892614370896956af2ff61254c275aaee4c230ae771cadd885"},
+ {file = "mypy-0.910-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b6fb13123aeef4a3abbcfd7e71773ff3ff1526a7d3dc538f3929a49b42be03f0"},
+ {file = "mypy-0.910-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:e4dab234478e3bd3ce83bac4193b2ecd9cf94e720ddd95ce69840273bf44f6de"},
+ {file = "mypy-0.910-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:7df1ead20c81371ccd6091fa3e2878559b5c4d4caadaf1a484cf88d93ca06703"},
+ {file = "mypy-0.910-cp37-cp37m-win_amd64.whl", hash = "sha256:0aadfb2d3935988ec3815952e44058a3100499f5be5b28c34ac9d79f002a4a9a"},
+ {file = "mypy-0.910-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ec4e0cd079db280b6bdabdc807047ff3e199f334050db5cbb91ba3e959a67504"},
+ {file = "mypy-0.910-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:119bed3832d961f3a880787bf621634ba042cb8dc850a7429f643508eeac97b9"},
+ {file = "mypy-0.910-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:866c41f28cee548475f146aa4d39a51cf3b6a84246969f3759cb3e9c742fc072"},
+ {file = "mypy-0.910-cp38-cp38-win_amd64.whl", hash = "sha256:ceb6e0a6e27fb364fb3853389607cf7eb3a126ad335790fa1e14ed02fba50811"},
+ {file = "mypy-0.910-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1a85e280d4d217150ce8cb1a6dddffd14e753a4e0c3cf90baabb32cefa41b59e"},
+ {file = "mypy-0.910-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:42c266ced41b65ed40a282c575705325fa7991af370036d3f134518336636f5b"},
+ {file = "mypy-0.910-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:3c4b8ca36877fc75339253721f69603a9c7fdb5d4d5a95a1a1b899d8b86a4de2"},
+ {file = "mypy-0.910-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:c0df2d30ed496a08de5daed2a9ea807d07c21ae0ab23acf541ab88c24b26ab97"},
+ {file = "mypy-0.910-cp39-cp39-win_amd64.whl", hash = "sha256:c6c2602dffb74867498f86e6129fd52a2770c48b7cd3ece77ada4fa38f94eba8"},
+ {file = "mypy-0.910-py3-none-any.whl", hash = "sha256:ef565033fa5a958e62796867b1df10c40263ea9ded87164d67572834e57a174d"},
+ {file = "mypy-0.910.tar.gz", hash = "sha256:704098302473cb31a218f1775a873b376b30b4c18229421e9e9dc8916fd16150"},
+]
+mypy-extensions = [
+ {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"},
+ {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"},
+]
+networkx = [
+ {file = "networkx-2.6.3-py3-none-any.whl", hash = "sha256:80b6b89c77d1dfb64a4c7854981b60aeea6360ac02c6d4e4913319e0a313abef"},
+ {file = "networkx-2.6.3.tar.gz", hash = "sha256:c0946ed31d71f1b732b5aaa6da5a0388a345019af232ce2f49c766e2d6795c51"},
+]
+numpy = [
+ {file = "numpy-1.22.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d62d6b0870b53799204515145935608cdeb4cebb95a26800b6750e48884cc5b"},
+ {file = "numpy-1.22.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:831f2df87bd3afdfc77829bc94bd997a7c212663889d56518359c827d7113b1f"},
+ {file = "numpy-1.22.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8d1563060e77096367952fb44fca595f2b2f477156de389ce7c0ade3aef29e21"},
+ {file = "numpy-1.22.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69958735d5e01f7b38226a6c6e7187d72b7e4d42b6b496aca5860b611ca0c193"},
+ {file = "numpy-1.22.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45a7dfbf9ed8d68fd39763940591db7637cf8817c5bce1a44f7b56c97cbe211e"},
+ {file = "numpy-1.22.1-cp310-cp310-win_amd64.whl", hash = "sha256:7e957ca8112c689b728037cea9c9567c27cf912741fabda9efc2c7d33d29dfa1"},
+ {file = "numpy-1.22.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:800dfeaffb2219d49377da1371d710d7952c9533b57f3d51b15e61c4269a1b5b"},
+ {file = "numpy-1.22.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:65f5e257987601fdfc63f1d02fca4d1c44a2b85b802f03bd6abc2b0b14648dd2"},
+ {file = "numpy-1.22.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:632e062569b0fe05654b15ef0e91a53c0a95d08ffe698b66f6ba0f927ad267c2"},
+ {file = "numpy-1.22.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d245a2bf79188d3f361137608c3cd12ed79076badd743dc660750a9f3074f7c"},
+ {file = "numpy-1.22.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26b4018a19d2ad9606ce9089f3d52206a41b23de5dfe8dc947d2ec49ce45d015"},
+ {file = "numpy-1.22.1-cp38-cp38-win32.whl", hash = "sha256:f8ad59e6e341f38266f1549c7c2ec70ea0e3d1effb62a44e5c3dba41c55f0187"},
+ {file = "numpy-1.22.1-cp38-cp38-win_amd64.whl", hash = "sha256:60f19c61b589d44fbbab8ff126640ae712e163299c2dd422bfe4edc7ec51aa9b"},
+ {file = "numpy-1.22.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2db01d9838a497ba2aa9a87515aeaf458f42351d72d4e7f3b8ddbd1eba9479f2"},
+ {file = "numpy-1.22.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bcd19dab43b852b03868796f533b5f5561e6c0e3048415e675bec8d2e9d286c1"},
+ {file = "numpy-1.22.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:78bfbdf809fc236490e7e65715bbd98377b122f329457fffde206299e163e7f3"},
+ {file = "numpy-1.22.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c51124df17f012c3b757380782ae46eee85213a3215e51477e559739f57d9bf6"},
+ {file = "numpy-1.22.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88d54b7b516f0ca38a69590557814de2dd638d7d4ed04864826acaac5ebb8f01"},
+ {file = "numpy-1.22.1-cp39-cp39-win32.whl", hash = "sha256:b5ec9a5eaf391761c61fd873363ef3560a3614e9b4ead17347e4deda4358bca4"},
+ {file = "numpy-1.22.1-cp39-cp39-win_amd64.whl", hash = "sha256:4ac4d7c9f8ea2a79d721ebfcce81705fc3cd61a10b731354f1049eb8c99521e8"},
+ {file = "numpy-1.22.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e60ef82c358ded965fdd3132b5738eade055f48067ac8a5a8ac75acc00cad31f"},
+ {file = "numpy-1.22.1.zip", hash = "sha256:e348ccf5bc5235fc405ab19d53bec215bb373300e5523c7b476cc0da8a5e9973"},
+]
+opencv-python-headless = [
+ {file = "opencv-python-headless-4.5.5.62.tar.gz", hash = "sha256:12aa335156adf62efdaa6dc5966d6c3415a7e2834d336e4f10ee5fccc65202c8"},
+ {file = "opencv_python_headless-4.5.5.62-cp36-abi3-macosx_10_15_x86_64.whl", hash = "sha256:a4b21d055036460e2e1f5d97809c299c21790c59fb382fa2b9f6ef6113a97a68"},
+ {file = "opencv_python_headless-4.5.5.62-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:359989d3aeb7b5d01f7f2f0445d448260b955274b7b1803f38e983eb85431e1f"},
+ {file = "opencv_python_headless-4.5.5.62-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf84d8e98f6bf38f07fa0ef3a287e087e42fc0d5174ce05292ef322a96c1b4dc"},
+ {file = "opencv_python_headless-4.5.5.62-cp36-abi3-win32.whl", hash = "sha256:d53f70229d23cd0e54de5b8730a79ae92d3d874eedda8c1effb376aa5a4e6ea6"},
+ {file = "opencv_python_headless-4.5.5.62-cp36-abi3-win_amd64.whl", hash = "sha256:e8a8c02ee060ae30a31be27fb65527c9698a6aa0fec967b49d6e56ea4473d6be"},
+ {file = "opencv_python_headless-4.5.5.62-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:5c716001e76ca356d775875f82576c310e9d7cd38d3979a2616eea5e44c8eccd"},
+]
+packaging = [
+ {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"},
+ {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"},
+]
+pbr = [
+ {file = "pbr-5.8.0-py2.py3-none-any.whl", hash = "sha256:176e8560eaf61e127817ef93d8a844803abb27a4d4637f0ff3bb783129be2e0a"},
+ {file = "pbr-5.8.0.tar.gz", hash = "sha256:672d8ebee84921862110f23fcec2acea191ef58543d34dfe9ef3d9f13c31cddf"},
+]
+pillow = [
+ {file = "Pillow-9.0.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:113723312215b25c22df1fdf0e2da7a3b9c357a7d24a93ebbe80bfda4f37a8d4"},
+ {file = "Pillow-9.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bb47a548cea95b86494a26c89d153fd31122ed65255db5dcbc421a2d28eb3379"},
+ {file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:31b265496e603985fad54d52d11970383e317d11e18e856971bdbb86af7242a4"},
+ {file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d154ed971a4cc04b93a6d5b47f37948d1f621f25de3e8fa0c26b2d44f24e3e8f"},
+ {file = "Pillow-9.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80fe92813d208ce8aa7d76da878bdc84b90809f79ccbad2a288e9bcbeac1d9bd"},
+ {file = "Pillow-9.0.0-cp310-cp310-win32.whl", hash = "sha256:d5dcea1387331c905405b09cdbfb34611050cc52c865d71f2362f354faee1e9f"},
+ {file = "Pillow-9.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:52abae4c96b5da630a8b4247de5428f593465291e5b239f3f843a911a3cf0105"},
+ {file = "Pillow-9.0.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:72c3110228944019e5f27232296c5923398496b28be42535e3b2dc7297b6e8b6"},
+ {file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97b6d21771da41497b81652d44191489296555b761684f82b7b544c49989110f"},
+ {file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72f649d93d4cc4d8cf79c91ebc25137c358718ad75f99e99e043325ea7d56100"},
+ {file = "Pillow-9.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7aaf07085c756f6cb1c692ee0d5a86c531703b6e8c9cae581b31b562c16b98ce"},
+ {file = "Pillow-9.0.0-cp37-cp37m-win32.whl", hash = "sha256:03b27b197deb4ee400ed57d8d4e572d2d8d80f825b6634daf6e2c18c3c6ccfa6"},
+ {file = "Pillow-9.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a09a9d4ec2b7887f7a088bbaacfd5c07160e746e3d47ec5e8050ae3b2a229e9f"},
+ {file = "Pillow-9.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:490e52e99224858f154975db61c060686df8a6b3f0212a678e5d2e2ce24675c9"},
+ {file = "Pillow-9.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:500d397ddf4bbf2ca42e198399ac13e7841956c72645513e8ddf243b31ad2128"},
+ {file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ebd8b9137630a7bbbff8c4b31e774ff05bbb90f7911d93ea2c9371e41039b52"},
+ {file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd0e5062f11cb3e730450a7d9f323f4051b532781026395c4323b8ad055523c4"},
+ {file = "Pillow-9.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f3b4522148586d35e78313db4db0df4b759ddd7649ef70002b6c3767d0fdeb7"},
+ {file = "Pillow-9.0.0-cp38-cp38-win32.whl", hash = "sha256:0b281fcadbb688607ea6ece7649c5d59d4bbd574e90db6cd030e9e85bde9fecc"},
+ {file = "Pillow-9.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:b5050d681bcf5c9f2570b93bee5d3ec8ae4cf23158812f91ed57f7126df91762"},
+ {file = "Pillow-9.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:c2067b3bb0781f14059b112c9da5a91c80a600a97915b4f48b37f197895dd925"},
+ {file = "Pillow-9.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2d16b6196fb7a54aff6b5e3ecd00f7c0bab1b56eee39214b2b223a9d938c50af"},
+ {file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:98cb63ca63cb61f594511c06218ab4394bf80388b3d66cd61d0b1f63ee0ea69f"},
+ {file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bc462d24500ba707e9cbdef436c16e5c8cbf29908278af053008d9f689f56dee"},
+ {file = "Pillow-9.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3586e12d874ce2f1bc875a3ffba98732ebb12e18fb6d97be482bd62b56803281"},
+ {file = "Pillow-9.0.0-cp39-cp39-win32.whl", hash = "sha256:68e06f8b2248f6dc8b899c3e7ecf02c9f413aab622f4d6190df53a78b93d97a5"},
+ {file = "Pillow-9.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:6579f9ba84a3d4f1807c4aab4be06f373017fc65fff43498885ac50a9b47a553"},
+ {file = "Pillow-9.0.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:47f5cf60bcb9fbc46011f75c9b45a8b5ad077ca352a78185bd3e7f1d294b98bb"},
+ {file = "Pillow-9.0.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fd8053e1f8ff1844419842fd474fc359676b2e2a2b66b11cc59f4fa0a301315"},
+ {file = "Pillow-9.0.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c5439bfb35a89cac50e81c751317faea647b9a3ec11c039900cd6915831064d"},
+ {file = "Pillow-9.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:95545137fc56ce8c10de646074d242001a112a92de169986abd8c88c27566a05"},
+ {file = "Pillow-9.0.0.tar.gz", hash = "sha256:ee6e2963e92762923956fe5d3479b1fdc3b76c83f290aad131a2f98c3df0593e"},
+]
+piq = [
+ {file = "piq-0.6.0-py3-none-any.whl", hash = "sha256:f2888a2a073169eaebf86a7c2a4061507df0165e50d0cf4285da2ef12bb92fa7"},
+ {file = "piq-0.6.0.tar.gz", hash = "sha256:94ec3d8d1af0a9277bceade69fd7003fbedcfe3ef153fe24d454786b60137f68"},
+]
+protobuf = [
+ {file = "protobuf-3.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1cb2ed66aac593adbf6dca4f07cd7ee7e2958b17bbc85b2cc8bc564ebeb258ec"},
+ {file = "protobuf-3.19.3-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:898bda9cd37ec0c781b598891e86435de80c3bfa53eb483a9dac5a11ec93e942"},
+ {file = "protobuf-3.19.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ad761ef3be34c8bdc7285bec4b40372a8dad9e70cfbdc1793cd3cf4c1a4ce74"},
+ {file = "protobuf-3.19.3-cp310-cp310-win32.whl", hash = "sha256:2cddcbcc222f3144765ccccdb35d3621dc1544da57a9aca7e1944c1a4fe3db11"},
+ {file = "protobuf-3.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:6202df8ee8457cb00810c6e76ced480f22a1e4e02c899a14e7b6e6e1de09f938"},
+ {file = "protobuf-3.19.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:397d82f1c58b76445469c8c06b8dee1ff67b3053639d054f52599a458fac9bc6"},
+ {file = "protobuf-3.19.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e54b8650e849ee8e95e481024bff92cf98f5ec61c7650cb838d928a140adcb63"},
+ {file = "protobuf-3.19.3-cp36-cp36m-win32.whl", hash = "sha256:3bf3a07d17ba3511fe5fa916afb7351f482ab5dbab5afe71a7a384274a2cd550"},
+ {file = "protobuf-3.19.3-cp36-cp36m-win_amd64.whl", hash = "sha256:afa8122de8064fd577f49ae9eef433561c8ace97a0a7b969d56e8b1d39b5d177"},
+ {file = "protobuf-3.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:18c40a1b8721026a85187640f1786d52407dc9c1ba8ec38accb57a46e84015f6"},
+ {file = "protobuf-3.19.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:af7238849fa79285d448a24db686517570099739527a03c9c2971cce99cc5ae2"},
+ {file = "protobuf-3.19.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e765e6dfbbb02c55e4d6d1145743401a84fc0b508f5a81b2c5a738cf86353139"},
+ {file = "protobuf-3.19.3-cp37-cp37m-win32.whl", hash = "sha256:c781402ed5396ab56358d7b866d78c03a77cbc26ba0598d8bb0ac32084b1a257"},
+ {file = "protobuf-3.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:544fe9705189b249380fae07952d220c97f5c6c9372a6f936cc83a79601dcb70"},
+ {file = "protobuf-3.19.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:84bf3aa3efb00dbe1c7ed55da0f20800b0662541e582d7e62b3e1464d61ed365"},
+ {file = "protobuf-3.19.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:3f80a3491eaca767cdd86cb8660dc778f634b44abdb0dffc9b2a8e8d0cd617d0"},
+ {file = "protobuf-3.19.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9401d96552befcc7311f5ef8f0fa7dba0ef5fd805466b158b141606cd0ab6a8"},
+ {file = "protobuf-3.19.3-cp38-cp38-win32.whl", hash = "sha256:ef02d112c025e83db5d1188a847e358beab3e4bbfbbaf10eaf69e67359af51b2"},
+ {file = "protobuf-3.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:1291a0a7db7d792745c99d4657b4c5c4942695c8b1ac1bfb993a34035ec123f7"},
+ {file = "protobuf-3.19.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:49677e5e9c7ea1245a90c2e8a00d304598f22ea3aa0628f0e0a530a9e70665fa"},
+ {file = "protobuf-3.19.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:df2ba379ee42427e8fcc6a0a76843bff6efb34ef5266b17f95043939b5e25b69"},
+ {file = "protobuf-3.19.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2acd7ca329be544d1a603d5f13a4e34a3791c90d651ebaf130ba2e43ae5397c6"},
+ {file = "protobuf-3.19.3-cp39-cp39-win32.whl", hash = "sha256:b53519b2ebec70cfe24b4ddda21e9843f0918d7c3627a785393fb35d402ab8ad"},
+ {file = "protobuf-3.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:8ceaf5fdb72c8e1fcb7be9f2b3b07482ce058a3548180c0bdd5c7e4ac5e14165"},
+ {file = "protobuf-3.19.3-py2.py3-none-any.whl", hash = "sha256:f6d4b5b7595a57e69eb7314c67bef4a3c745b4caf91accaf72913d8e0635111b"},
+ {file = "protobuf-3.19.3.tar.gz", hash = "sha256:d975a6314fbf5c524d4981e24294739216b5fb81ef3c14b86fb4b045d6690907"},
+]
+pycodestyle = [
+ {file = "pycodestyle-2.7.0-py2.py3-none-any.whl", hash = "sha256:514f76d918fcc0b55c6680472f0a37970994e07bbb80725808c17089be302068"},
+ {file = "pycodestyle-2.7.0.tar.gz", hash = "sha256:c389c1d06bf7904078ca03399a4816f974a1d590090fecea0c63ec26ebaf1cef"},
+]
+pycparser = [
+ {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
+ {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
+]
+pydocstyle = [
+ {file = "pydocstyle-6.1.1-py3-none-any.whl", hash = "sha256:6987826d6775056839940041beef5c08cc7e3d71d63149b48e36727f70144dc4"},
+ {file = "pydocstyle-6.1.1.tar.gz", hash = "sha256:1d41b7c459ba0ee6c345f2eb9ae827cab14a7533a88c5c6f7e94923f72df92dc"},
+]
+pyflakes = [
+ {file = "pyflakes-2.3.1-py2.py3-none-any.whl", hash = "sha256:7893783d01b8a89811dd72d7dfd4d84ff098e5eed95cfa8905b22bbffe52efc3"},
+ {file = "pyflakes-2.3.1.tar.gz", hash = "sha256:f5bc8ecabc05bb9d291eb5203d6810b49040f6ff446a756326104746cc00c1db"},
+]
+pygments = [
+ {file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"},
+ {file = "Pygments-2.11.2.tar.gz", hash = "sha256:4e426f72023d88d03b2fa258de560726ce890ff3b630f88c21cbb8b2503b8c6a"},
+]
+pyparsing = [
+ {file = "pyparsing-3.0.7-py3-none-any.whl", hash = "sha256:a6c06a88f252e6c322f65faf8f418b16213b51bdfaece0524c1c1bc30c63c484"},
+ {file = "pyparsing-3.0.7.tar.gz", hash = "sha256:18ee9022775d270c55187733956460083db60b37d0d0fb357445f3094eed3eea"},
+]
+python-dateutil = [
+ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
+ {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
+]
+pywavelets = [
+ {file = "PyWavelets-1.2.0-cp310-cp310-macosx_10_13_universal2.whl", hash = "sha256:4c29efb581245e4ba3e76b23b1bf254a7c79821d7e63f432e68044cf2d233e9e"},
+ {file = "PyWavelets-1.2.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:3089aa6b4962e1f5dbd0434a10f174f7a50f80bf64cb7d33cc725af07bd30ecc"},
+ {file = "PyWavelets-1.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16c30e98f52e1a5d0a06b4b8f294114aaa94a0e95445b4056b6ca0a7a5535a42"},
+ {file = "PyWavelets-1.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79386f4d8518e344487acf22b1c130e5907b3c45852aa50c18df5e19895aa92e"},
+ {file = "PyWavelets-1.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b9bf543d552d20cf6ddfd690c5c18afacc8440cfb09b7515b2242bb9abfcc5eb"},
+ {file = "PyWavelets-1.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28bb3d7d411ffbcfaa5a81a5a32044805893752c1641b39f6544b7e0a24661c3"},
+ {file = "PyWavelets-1.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:333e1370167b0a2b963df82e42968000734bfa23b2ce88191e8ce9d24fc4cc57"},
+ {file = "PyWavelets-1.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2ec1fc92573f56c1b129006d109e7518a098f3c8c6a2183b495619faca931461"},
+ {file = "PyWavelets-1.2.0-cp310-cp310-win32.whl", hash = "sha256:d30c09fa805533bf7c8a5d06aa8babda5ae6c1541cd652cb2ebe6ab0b9536c0e"},
+ {file = "PyWavelets-1.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:73805016353a47c5b5f9cea547ea6ae07cd3520abfd7888916ff56b01e71307a"},
+ {file = "PyWavelets-1.2.0-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:ec670e78be2c3193e26c4bfa31dff1edd89ee8d7e2f4219782f3ef3f6daf37f0"},
+ {file = "PyWavelets-1.2.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:151a7f3d3db36baffe640d691403b7cd3938a1886c8a387b719e7e8b580dd4b1"},
+ {file = "PyWavelets-1.2.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:616fd2967dd153c3f539b1e0979168969f3702125caae4caa769efc5621cc2b7"},
+ {file = "PyWavelets-1.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:abedc0b49273a734d4592e325a5fb32f0741e115d6722e0c59964ecf21344640"},
+ {file = "PyWavelets-1.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9b05d2f21da666f918692f0313484002307794b5380f7291a395b9271abdda5c"},
+ {file = "PyWavelets-1.2.0-cp37-cp37m-win32.whl", hash = "sha256:537b5a8a8a3e9e5b931d34b517aa2312a3d8385937f98c4f8ffa668483329cfb"},
+ {file = "PyWavelets-1.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:7a725030682bf891ced9819b4b21d6ef356fa11b70399d2d3adb319aead1efb0"},
+ {file = "PyWavelets-1.2.0-cp38-cp38-macosx_10_13_universal2.whl", hash = "sha256:d9831b251f63460302811607f80a20285292ed0a0a046f95b4648edc0ed90f9c"},
+ {file = "PyWavelets-1.2.0-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:7e4749ff324e8e01a2fdc859ac9714c4be1cbc6e8a34d5ddedb28fc9513b31a1"},
+ {file = "PyWavelets-1.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:333684cc0d0e89cf6cb3a8b3ea68528790e1a3edc565a100cda47e29860f892d"},
+ {file = "PyWavelets-1.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71415f2c376ae3e1331249043dddb63370c92fed162ebcb108fd87e12a956d89"},
+ {file = "PyWavelets-1.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:44315d197bad9564210ef42f2d5a01c07ee6fd456c679d4a6d0f4e23ec9930cf"},
+ {file = "PyWavelets-1.2.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:c8b388652b6afb4bf7be313be057240386de93817c6744c2aaf43a22890733c8"},
+ {file = "PyWavelets-1.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a1131c27b9f79ca56dd6347d5585a609f51ed3cad0cfb6c17419b1733d3b6acd"},
+ {file = "PyWavelets-1.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:df4d950be507a68d107c8f618ef7a9e0d9071789cfc1a840f89d0c985448880f"},
+ {file = "PyWavelets-1.2.0-cp38-cp38-win32.whl", hash = "sha256:8a72f11c4d23f8ed8544def0003f500e98598e7a1efce892e6f964c430469c05"},
+ {file = "PyWavelets-1.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:be231e4bc569f2b1177711390d406d08ce388c1e01ab864f89be8928db234856"},
+ {file = "PyWavelets-1.2.0-cp39-cp39-macosx_10_13_universal2.whl", hash = "sha256:90d53119f4b518236ad9a8a6be96d86efb1b4eeb73c28e3ed33824ae601ce7b1"},
+ {file = "PyWavelets-1.2.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:351937e4fc6f3df3555cd2813e73bfc344885c5d994fd621d13dd004d05a4cb7"},
+ {file = "PyWavelets-1.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:33375e3e6e361659f2519d412de2b50e2527a97c3946ffd66ca20a8ea1346fea"},
+ {file = "PyWavelets-1.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:449d2d5f9c1f28a1bce01f714f9f742d9fdbce90f66de0a92cad39d98d24477b"},
+ {file = "PyWavelets-1.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:18d84982417790a645f74cb0f968e89fb8af575dbf17a52c64af5075aa5528b8"},
+ {file = "PyWavelets-1.2.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b3d9dab8223d0ce30e7480751f526ce1e97a1dcf5242875f8206e4449953116c"},
+ {file = "PyWavelets-1.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a82e8c307b98d65737b286e0b458343ecee8505dfb519cd314a5f211f4fb92b9"},
+ {file = "PyWavelets-1.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:663d265cb433653ef6335973edd13c66cd86c85fbe9c09e4bd138119bac15974"},
+ {file = "PyWavelets-1.2.0-cp39-cp39-win32.whl", hash = "sha256:fd5ca221ac7bedb2a9aebcf3b05020827564db5a979b25005b3a2c7ba84069a2"},
+ {file = "PyWavelets-1.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:69cfc7f2ceb0a1097e7e8d1a026cbb2ff1afecc2d79820856f1abccb6cb59cc4"},
+ {file = "PyWavelets-1.2.0.tar.gz", hash = "sha256:6cbd69b047bb4e00873097472133425f5f08a4e6bc8b3f0ae709274d4d5e9a8d"},
+]
+pyyaml = [
+ {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"},
+ {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"},
+ {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"},
+ {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"},
+ {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"},
+ {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"},
+ {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"},
+ {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"},
+ {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"},
+ {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"},
+ {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"},
+ {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"},
+ {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"},
+ {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"},
+ {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"},
+ {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"},
+ {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"},
+ {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"},
+ {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"},
+ {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"},
+ {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"},
+ {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"},
+ {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"},
+ {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"},
+ {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"},
+ {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"},
+ {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"},
+ {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"},
+ {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"},
+ {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"},
+ {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"},
+ {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"},
+ {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"},
+]
+qudida = [
+ {file = "qudida-0.0.4-py3-none-any.whl", hash = "sha256:4519714c40cd0f2e6c51e1735edae8f8b19f4efe1f33be13e9d644ca5f736dd6"},
+ {file = "qudida-0.0.4.tar.gz", hash = "sha256:db198e2887ab0c9aa0023e565afbff41dfb76b361f85fd5e13f780d75ba18cc8"},
+]
+requests = [
+ {file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"},
+ {file = "requests-2.27.1.tar.gz", hash = "sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61"},
+]
+restructuredtext-lint = [
+ {file = "restructuredtext_lint-1.3.2.tar.gz", hash = "sha256:d3b10a1fe2ecac537e51ae6d151b223b78de9fafdd50e5eb6b08c243df173c80"},
+]
+scikit-image = [
+ {file = "scikit-image-0.18.3.tar.gz", hash = "sha256:ecae99f93f4c5e9b1bf34959f4dc596c41f2f6b2fc407d9d9ddf85aebd3137ca"},
+ {file = "scikit_image-0.18.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7f27357adae9225df10fd152224d4c43978ae222f44bad7fedbfc2b81b985f9d"},
+ {file = "scikit_image-0.18.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bf3cdadc15db90f875bf59bdd0db080337e6353bb3d165c281f9af456d9d3f2"},
+ {file = "scikit_image-0.18.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2f24eb3df859ba5b3fb66947fe2d7240653b38f307d574e25f1ae29cc2a212ee"},
+ {file = "scikit_image-0.18.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:e2148846fae22e12b7a20d11d951adae57213dd097af5960407eb5c4421c0ab3"},
+ {file = "scikit_image-0.18.3-cp37-cp37m-win32.whl", hash = "sha256:142d070a41f9dfed0c3661e0dd9ce3cdb59a20a5b5ab071f529577d6d3e1fb81"},
+ {file = "scikit_image-0.18.3-cp37-cp37m-win_amd64.whl", hash = "sha256:05b430b1f8e25f7ba4a55afc6bf592af00f0ec809ab1d80bdede8893e7c6af57"},
+ {file = "scikit_image-0.18.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ef92f42d8a0794c47df1eeb1937119b6686b523dc663ecc5ffdf3c91645719ac"},
+ {file = "scikit_image-0.18.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b60fe0bc6e770c126c625f8c2d8af3b20fea53dac845abdf474bef1bd526490"},
+ {file = "scikit_image-0.18.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:088bf793696a3d5f56cce27c75d415fa795d1db9336b7e8257a1764dc03c7c52"},
+ {file = "scikit_image-0.18.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7994866857a1bb388cf3ede4ca7a8fba0b89ef980d5d802ec25e30124a2a34db"},
+ {file = "scikit_image-0.18.3-cp38-cp38-win32.whl", hash = "sha256:b29982f07231f60d6170f4c2c6f2fe88051a7b4194d775aefd81bfee107452b9"},
+ {file = "scikit_image-0.18.3-cp38-cp38-win_amd64.whl", hash = "sha256:3f3aa984638a6868171d176d26d6bd17b7b16a9fd505eaa97482f00a4310e3ff"},
+ {file = "scikit_image-0.18.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f698fc715202eeccabb371190c19c2d6713696de4d07609a0fa0cae3acb0b3dd"},
+ {file = "scikit_image-0.18.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0bf23d3d182ba8fe4ef8a0935e843be1f6c99e7eebeb492ac07c305e8cbb1dcd"},
+ {file = "scikit_image-0.18.3-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:bfa6eb04dc0b8773043f9994eccd8c517d713cd0f9e960dcb6754e19c1abceb1"},
+ {file = "scikit_image-0.18.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8394ad148685ed6ea8d84eb9c41e70cef1adda6c6d9a0ff8476c3126818a9340"},
+ {file = "scikit_image-0.18.3-cp39-cp39-win32.whl", hash = "sha256:ec242ff35bd4bc531aaf00c6edb9f0f64ff36ff353bd6ecd8f1c77886ddc0a7a"},
+ {file = "scikit_image-0.18.3-cp39-cp39-win_amd64.whl", hash = "sha256:3068af85682e90fda021070969dd2fce667f89a868c6aacb2fffbc5aa002e39e"},
+]
+scikit-learn = [
+ {file = "scikit-learn-1.0.2.tar.gz", hash = "sha256:b5870959a5484b614f26d31ca4c17524b1b0317522199dc985c3b4256e030767"},
+ {file = "scikit_learn-1.0.2-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:da3c84694ff693b5b3194d8752ccf935a665b8b5edc33a283122f4273ca3e687"},
+ {file = "scikit_learn-1.0.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:75307d9ea39236cad7eea87143155eea24d48f93f3a2f9389c817f7019f00705"},
+ {file = "scikit_learn-1.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f14517e174bd7332f1cca2c959e704696a5e0ba246eb8763e6c24876d8710049"},
+ {file = "scikit_learn-1.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d9aac97e57c196206179f674f09bc6bffcd0284e2ba95b7fe0b402ac3f986023"},
+ {file = "scikit_learn-1.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:d93d4c28370aea8a7cbf6015e8a669cd5d69f856cc2aa44e7a590fb805bb5583"},
+ {file = "scikit_learn-1.0.2-cp37-cp37m-macosx_10_13_x86_64.whl", hash = "sha256:85260fb430b795d806251dd3bb05e6f48cdc777ac31f2bcf2bc8bbed3270a8f5"},
+ {file = "scikit_learn-1.0.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a053a6a527c87c5c4fa7bf1ab2556fa16d8345cf99b6c5a19030a4a7cd8fd2c0"},
+ {file = "scikit_learn-1.0.2-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:245c9b5a67445f6f044411e16a93a554edc1efdcce94d3fc0bc6a4b9ac30b752"},
+ {file = "scikit_learn-1.0.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:158faf30684c92a78e12da19c73feff9641a928a8024b4fa5ec11d583f3d8a87"},
+ {file = "scikit_learn-1.0.2-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:08ef968f6b72033c16c479c966bf37ccd49b06ea91b765e1cc27afefe723920b"},
+ {file = "scikit_learn-1.0.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16455ace947d8d9e5391435c2977178d0ff03a261571e67f627c8fee0f9d431a"},
+ {file = "scikit_learn-1.0.2-cp37-cp37m-win32.whl", hash = "sha256:2f3b453e0b149898577e301d27e098dfe1a36943f7bb0ad704d1e548efc3b448"},
+ {file = "scikit_learn-1.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:46f431ec59dead665e1370314dbebc99ead05e1c0a9df42f22d6a0e00044820f"},
+ {file = "scikit_learn-1.0.2-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:ff3fa8ea0e09e38677762afc6e14cad77b5e125b0ea70c9bba1992f02c93b028"},
+ {file = "scikit_learn-1.0.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:9369b030e155f8188743eb4893ac17a27f81d28a884af460870c7c072f114243"},
+ {file = "scikit_learn-1.0.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:7d6b2475f1c23a698b48515217eb26b45a6598c7b1840ba23b3c5acece658dbb"},
+ {file = "scikit_learn-1.0.2-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:285db0352e635b9e3392b0b426bc48c3b485512d3b4ac3c7a44ec2a2ba061e66"},
+ {file = "scikit_learn-1.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cb33fe1dc6f73dc19e67b264dbb5dde2a0539b986435fdd78ed978c14654830"},
+ {file = "scikit_learn-1.0.2-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b1391d1a6e2268485a63c3073111fe3ba6ec5145fc957481cfd0652be571226d"},
+ {file = "scikit_learn-1.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc3744dabc56b50bec73624aeca02e0def06b03cb287de26836e730659c5d29c"},
+ {file = "scikit_learn-1.0.2-cp38-cp38-win32.whl", hash = "sha256:a999c9f02ff9570c783069f1074f06fe7386ec65b84c983db5aeb8144356a355"},
+ {file = "scikit_learn-1.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:7626a34eabbf370a638f32d1a3ad50526844ba58d63e3ab81ba91e2a7c6d037e"},
+ {file = "scikit_learn-1.0.2-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:a90b60048f9ffdd962d2ad2fb16367a87ac34d76e02550968719eb7b5716fd10"},
+ {file = "scikit_learn-1.0.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:7a93c1292799620df90348800d5ac06f3794c1316ca247525fa31169f6d25855"},
+ {file = "scikit_learn-1.0.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:eabceab574f471de0b0eb3f2ecf2eee9f10b3106570481d007ed1c84ebf6d6a1"},
+ {file = "scikit_learn-1.0.2-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:55f2f3a8414e14fbee03782f9fe16cca0f141d639d2b1c1a36779fa069e1db57"},
+ {file = "scikit_learn-1.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80095a1e4b93bd33261ef03b9bc86d6db649f988ea4dbcf7110d0cded8d7213d"},
+ {file = "scikit_learn-1.0.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa38a1b9b38ae1fad2863eff5e0d69608567453fdfc850c992e6e47eb764e846"},
+ {file = "scikit_learn-1.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff746a69ff2ef25f62b36338c615dd15954ddc3ab8e73530237dd73235e76d62"},
+ {file = "scikit_learn-1.0.2-cp39-cp39-win32.whl", hash = "sha256:e174242caecb11e4abf169342641778f68e1bfaba80cd18acd6bc84286b9a534"},
+ {file = "scikit_learn-1.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:b54a62c6e318ddbfa7d22c383466d38d2ee770ebdb5ddb668d56a099f6eaf75f"},
+]
+scipy = [
+ {file = "scipy-1.6.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a15a1f3fc0abff33e792d6049161b7795909b40b97c6cc2934ed54384017ab76"},
+ {file = "scipy-1.6.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:e79570979ccdc3d165456dd62041d9556fb9733b86b4b6d818af7a0afc15f092"},
+ {file = "scipy-1.6.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:a423533c55fec61456dedee7b6ee7dce0bb6bfa395424ea374d25afa262be261"},
+ {file = "scipy-1.6.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:33d6b7df40d197bdd3049d64e8e680227151673465e5d85723b3b8f6b15a6ced"},
+ {file = "scipy-1.6.1-cp37-cp37m-win32.whl", hash = "sha256:6725e3fbb47da428794f243864f2297462e9ee448297c93ed1dcbc44335feb78"},
+ {file = "scipy-1.6.1-cp37-cp37m-win_amd64.whl", hash = "sha256:5fa9c6530b1661f1370bcd332a1e62ca7881785cc0f80c0d559b636567fab63c"},
+ {file = "scipy-1.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bd50daf727f7c195e26f27467c85ce653d41df4358a25b32434a50d8870fc519"},
+ {file = "scipy-1.6.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:f46dd15335e8a320b0fb4685f58b7471702234cba8bb3442b69a3e1dc329c345"},
+ {file = "scipy-1.6.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:0e5b0ccf63155d90da576edd2768b66fb276446c371b73841e3503be1d63fb5d"},
+ {file = "scipy-1.6.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:2481efbb3740977e3c831edfd0bd9867be26387cacf24eb5e366a6a374d3d00d"},
+ {file = "scipy-1.6.1-cp38-cp38-win32.whl", hash = "sha256:68cb4c424112cd4be886b4d979c5497fba190714085f46b8ae67a5e4416c32b4"},
+ {file = "scipy-1.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:5f331eeed0297232d2e6eea51b54e8278ed8bb10b099f69c44e2558c090d06bf"},
+ {file = "scipy-1.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0c8a51d33556bf70367452d4d601d1742c0e806cd0194785914daf19775f0e67"},
+ {file = "scipy-1.6.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:83bf7c16245c15bc58ee76c5418e46ea1811edcc2e2b03041b804e46084ab627"},
+ {file = "scipy-1.6.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:794e768cc5f779736593046c9714e0f3a5940bc6dcc1dba885ad64cbfb28e9f0"},
+ {file = "scipy-1.6.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:5da5471aed911fe7e52b86bf9ea32fb55ae93e2f0fac66c32e58897cfb02fa07"},
+ {file = "scipy-1.6.1-cp39-cp39-win32.whl", hash = "sha256:8e403a337749ed40af60e537cc4d4c03febddcc56cd26e774c9b1b600a70d3e4"},
+ {file = "scipy-1.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:a5193a098ae9f29af283dcf0041f762601faf2e595c0db1da929875b7570353f"},
+ {file = "scipy-1.6.1.tar.gz", hash = "sha256:c4fceb864890b6168e79b0e714c585dbe2fd4222768ee90bc1aa0f8218691b11"},
+]
+setuptools-scm = [
+ {file = "setuptools_scm-6.4.2-py3-none-any.whl", hash = "sha256:acea13255093849de7ccb11af9e1fb8bde7067783450cee9ef7a93139bddf6d4"},
+ {file = "setuptools_scm-6.4.2.tar.gz", hash = "sha256:6833ac65c6ed9711a4d5d2266f8024cfa07c533a0e55f4c12f6eff280a5a9e30"},
+]
+six = [
+ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
+ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
+]
+snowballstemmer = [
+ {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"},
+ {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"},
+]
+stevedore = [
+ {file = "stevedore-3.5.0-py3-none-any.whl", hash = "sha256:a547de73308fd7e90075bb4d301405bebf705292fa90a90fc3bcf9133f58616c"},
+ {file = "stevedore-3.5.0.tar.gz", hash = "sha256:f40253887d8712eaa2bb0ea3830374416736dc8ec0e22f5a65092c1174c44335"},
+]
+tensorboardx = [
+ {file = "tensorboardX-2.2-py2.py3-none-any.whl", hash = "sha256:236409a0144094b7ed1f7fc003d27c73a529b7fe326194a26b0f98d40c763779"},
+ {file = "tensorboardX-2.2.tar.gz", hash = "sha256:b68e08adae2d0c6d09721a9b7a3e22000b7d2b57faec069b5491aeace689f76f"},
+]
+testfixtures = [
+ {file = "testfixtures-6.18.3-py2.py3-none-any.whl", hash = "sha256:6ddb7f56a123e1a9339f130a200359092bd0a6455e31838d6c477e8729bb7763"},
+ {file = "testfixtures-6.18.3.tar.gz", hash = "sha256:2600100ae96ffd082334b378e355550fef8b4a529a6fa4c34f47130905c7426d"},
+]
+threadpoolctl = [
+ {file = "threadpoolctl-3.0.0-py3-none-any.whl", hash = "sha256:4fade5b3b48ae4b1c30f200b28f39180371104fccc642e039e0f2435ec8cc211"},
+ {file = "threadpoolctl-3.0.0.tar.gz", hash = "sha256:d03115321233d0be715f0d3a5ad1d6c065fe425ddc2d671ca8e45e9fd5d7a52a"},
+]
+tifffile = [
+ {file = "tifffile-2021.11.2-py3-none-any.whl", hash = "sha256:2e0066f90e2dbeb3e6a287cfd78bafbd2f142fabbca4a76a8ff809573baf5ad5"},
+ {file = "tifffile-2021.11.2.tar.gz", hash = "sha256:153e31fa1d892f482fabb2ae9f2561fa429ee42d01a6f67e58cee13637d9285b"},
+]
+toml = [
+ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"},
+ {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"},
+]
+tomli = [
+ {file = "tomli-2.0.0-py3-none-any.whl", hash = "sha256:b5bde28da1fed24b9bd1d4d2b8cba62300bfb4ec9a6187a957e8ddb9434c5224"},
+ {file = "tomli-2.0.0.tar.gz", hash = "sha256:c292c34f58502a1eb2bbb9f5bbc9a5ebc37bee10ffb8c2d6bbdfa8eb13cc14e1"},
+]
+torch = [
+ {file = "torch-1.8.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:f23eeb1a48cc39209d986c418ad7e02227eee973da45c0c42d36b1aec72f4940"},
+ {file = "torch-1.8.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:4ace9c5bb94d5a7b9582cd089993201658466e9c59ff88bd4e9e08f6f072d1cf"},
+ {file = "torch-1.8.1-cp36-cp36m-win_amd64.whl", hash = "sha256:6ffa1e7ae079c7cb828712cb0cdaae5cc4fb87c16a607e6d14526b62c20bcc17"},
+ {file = "torch-1.8.1-cp36-none-macosx_10_9_x86_64.whl", hash = "sha256:16f2630d9604c4ee28ea7d6e388e2264cd7bc6031c6ecd796bae3f56b5efa9a3"},
+ {file = "torch-1.8.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:95b7bbbacc3f28fe438f418392ceeae146a01adc03b29d44917d55214ac234c9"},
+ {file = "torch-1.8.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:55137feb2f5a0dc7aced5bba690dcdb7652054ad3452b09a2bbb59f02a11e9ff"},
+ {file = "torch-1.8.1-cp37-cp37m-win_amd64.whl", hash = "sha256:8ad2252bf09833dcf46a536a78544e349b8256a370e03a98627ebfb118d9555b"},
+ {file = "torch-1.8.1-cp37-none-macosx_10_9_x86_64.whl", hash = "sha256:1388b30fbd262c1a053d6c9ace73bb0bd8f5871b4892b6f3e02d1d7bc9768563"},
+ {file = "torch-1.8.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:e7ad1649adb7dc2a450e70a3e51240b84fa4746c69c8f98989ce0c254f9fba3a"},
+ {file = "torch-1.8.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:3e4190c04dfd89c59bad06d5fe451446643a65e6d2607cc989eb1001ee76e12f"},
+ {file = "torch-1.8.1-cp38-cp38-win_amd64.whl", hash = "sha256:5c2e9a33d44cdb93ebd739b127ffd7da786bf5f740539539195195b186a05f6c"},
+ {file = "torch-1.8.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:c6ede2ae4dcd8214b63e047efabafa92493605205a947574cf358216ca4e440a"},
+ {file = "torch-1.8.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:ce7d435426f3dd14f95710d779aa46e9cd5e077d512488e813f7589fdc024f78"},
+ {file = "torch-1.8.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:a50ea8ed900927fb30cadb63aa7a32fdd59c7d7abe5012348dfbe35a8355c083"},
+ {file = "torch-1.8.1-cp39-cp39-win_amd64.whl", hash = "sha256:dac4d10494e74f7e553c92d7263e19ea501742c4825ddd26c4decfa27be95981"},
+ {file = "torch-1.8.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:225ee4238c019b28369c71977327deeeb2bd1c6b8557e6fcf631b8866bdc5447"},
+]
+torchvision = [
+ {file = "torchvision-0.9.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:da4c4f7363b60b0637354974ea0a29dbc301f66c9f25d92ed5f10637909f3500"},
+ {file = "torchvision-0.9.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:8a937cd3b53656e15de03671f8a638b5e8e4c100725b854d73bdb51e41455e9e"},
+ {file = "torchvision-0.9.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:f16ceec2862faaffc8fc19bca20e0e79ffdab18a53e6cb75e42e33d090e80d04"},
+ {file = "torchvision-0.9.1-cp36-cp36m-win_amd64.whl", hash = "sha256:99cd75163938b4b3728815696d75c0df8b66390c489abed2365a530a040059a1"},
+ {file = "torchvision-0.9.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8aa438869e3033cbd8749d041d1ca7beb6171ca9f7f47b42e742fabd6900f8fc"},
+ {file = "torchvision-0.9.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:a1421a26b21b8c098935c3375182470c4c4d99d5e14d81ec3ac14a35e7a85285"},
+ {file = "torchvision-0.9.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:b14b5b7fed0b7dc6245c2608b9fd2262d5b375ba998e097b980a1046683ca7f6"},
+ {file = "torchvision-0.9.1-cp37-cp37m-win_amd64.whl", hash = "sha256:86e4facb1cf4670ab3d67b7a947f0c43cd0805ec269a5e22ad0b82be727bcb3b"},
+ {file = "torchvision-0.9.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d38d0d23c6ce6ba15eba094a9319393e429796ab2bab228fa3b996abc9e33c3f"},
+ {file = "torchvision-0.9.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:0bfcc3ab99128081bfc9a5c3ab31f5227c4df3b802e6d4217dac104bf5ba8636"},
+ {file = "torchvision-0.9.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:85f21862e504590eb4a77b1d9a1742156a296af55827fb8c82296601922b7ac1"},
+ {file = "torchvision-0.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:dda0dcb914bcab1a43f823348736b8b1c926bf1fbe9cbb3be892fdbe2ab6d097"},
+ {file = "torchvision-0.9.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:091812c9fa405bef12aca9b9c3e671fcae7c0a4945b68705534ba8a401396ad1"},
+ {file = "torchvision-0.9.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:46b82b6cdccd2cb982819165b6ddaa097629315377ba6bbf77bdcb02c2e83692"},
+ {file = "torchvision-0.9.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:92c936584e03dfca39ff31bbc4a4fb54edb08fe8362e75dc08a2fa4b43266068"},
+ {file = "torchvision-0.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:42bec9b8e8a1dcd478751457191f317f843fa463555c141994c809c4b11ad60d"},
+]
+tqdm = [
+ {file = "tqdm-4.62.3-py2.py3-none-any.whl", hash = "sha256:8dd278a422499cd6b727e6ae4061c40b48fce8b76d1ccbf5d34fca9b7f925b0c"},
+ {file = "tqdm-4.62.3.tar.gz", hash = "sha256:d359de7217506c9851b7869f3708d8ee53ed70a1b8edbba4dbcb47442592920d"},
+]
+typing-extensions = [
+ {file = "typing_extensions-4.0.1-py3-none-any.whl", hash = "sha256:7f001e5ac290a0c0401508864c7ec868be4e701886d5b573a9528ed3973d9d3b"},
+ {file = "typing_extensions-4.0.1.tar.gz", hash = "sha256:4ca091dea149f945ec56afb48dae714f21e8692ef22a395223bcd328961b6a0e"},
+]
+urllib3 = [
+ {file = "urllib3-1.26.8-py2.py3-none-any.whl", hash = "sha256:000ca7f471a233c2251c6c7023ee85305721bfdf18621ebff4fd17a8653427ed"},
+ {file = "urllib3-1.26.8.tar.gz", hash = "sha256:0e7c33d9a63e7ddfcb86780aac87befc2fbddf46c58dbb487e0855f7ceec283c"},
+]
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..9881a0a
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,65 @@
+[build-system]
+requires = ["poetry-core>=1.0.0"]
+build-backend = "poetry.core.masonry.api"
+
+[tool.poetry]
+name = "esrgan"
+version = "0.1.2"
+description = "Implementation of paper `ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks`"
+license = "CC-BY-NC-ND-4.0"
+authors = [
+ "Yauheni Kachan "
+]
+readme = "README.md"
+repository = "https://github.com/leverxgroup/esrgan"
+documentation = "https://esrgan.readthedocs.io/"
+keywords = [
+ "computer vision",
+ "super-resolution",
+ "generative adversarial networks",
+ "esrgan",
+ "pytorch",
+]
+classifiers = [
+ "Development Status :: 4 - Beta",
+ "Operating System :: OS Independent",
+ "License :: Free for non-commercial use",
+ "Intended Audience :: Developers",
+ "Intended Audience :: Science/Research",
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
+ "Topic :: Scientific/Engineering :: Image Recognition",
+ "Topic :: Software Development :: Libraries",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+]
+
+[tool.poetry.scripts]
+esrgan-process-images = 'esrgan.utils.scripts.process_images:main'
+
+[tool.poetry.dependencies]
+python = "^3.7"
+
+catalyst = { version = "~21.12", extras = ["cv", "albu"] }
+jpeg4py = "^0.1.4"
+numpy = "^1.19.1"
+piq = "~0.6.0"
+torch = "~1.8.1"
+torchvision = "~0.9.1"
+
+[tool.poetry.dev-dependencies]
+darglint = "^1.8"
+doc8 = "^0.8"
+flake8 = "^3.8"
+flake8-broken-line = "^0.3."
+flake8-comprehensions = "^3.5"
+flake8-debugger = "^4.0"
+flake8-docstrings = "^1.6"
+flake8-eradicate = "^1.0"
+flake8-isort = "^4.0"
+flake8-quotes = "^3.2"
+flake8-rst-docstrings = "^0.2.3"
+isort = "^5.8"
+mypy = "^0.910"
diff --git a/requirements.txt b/requirements.txt
deleted file mode 100644
index c203ba2..0000000
--- a/requirements.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-albumentations==0.4.6
-jpeg4py==0.1.4
-numpy==1.19.1
-piq==0.5.1
-torch==1.6.0
-torchvision==0.7.0
-catalyst @ git+https://github.com/catalyst-team/catalyst.git@4f9fcd9c046b387378de6d295e7722b40361a020#egg=catalyst[cv]
diff --git a/setup.cfg b/setup.cfg
index 1379d49..cb08d22 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,56 +1,126 @@
-[catalyst]
-albumentations_required = True
-use_libjpeg_turbo = False
+# [catalyst]
+# use_libjpeg_turbo = True
+
+# All configuration for plugins and other utils is defined here.
+# Read more about `setup.cfg`:
+# https://docs.python.org/3/distutils/configfile.html
+
+
+# flake8 configuration:
+# https://flake8.pycqa.org/en/latest/user/configuration.html
+[flake8]
+convention = google
+doctests = True
+
+# plugins:
+max-complexity = 12
+max-line-length = 79
+max-doc-length = 79
+docstring-quotes = double
+inline-quotes = double
+multiline-quotes = double
+
+# darglint configuration:
+# https://github.com/terrencepreilly/darglint
+strictness = short
+
+# excluding some directories:
+exclude =
+ .git,
+ __pycache__,
+ .venv,
+ .eggs,
+ *.egg,
+ dist
+
+# exclude some pydoctest checks globally:
+ignore =
+ # D100: Missing docstring in public module
+ D100,
+ # D104: Missing docstring in public package
+ D104,
+ # D107: Missing docstring in __init__
+ D107,
+ # D205: 1 blank line required between summary line and description
+ D205,
+ # D301: Use r""" if any backslashes in a docstring
+ D301,
+ # D400: First line should end with a period
+ D400,
+ # D401: First line should be in imperative mood; try rephrasing
+ D401,
+ # RST201: Block quote ends without a blank line; unexpected unindent
+ RST201,
+ # RST203: Definition list ends without a blank line; unexpected unindent
+ RST203,
+ # RST210: Inline strong start-string without end-string
+ RST210,
+ # RST213 Inline emphasis start-string without end-string
+ RST213,
+ # RST301: Unexpected indentation
+ RST301,
+ # RST304: Unknown interpreted text role
+ RST304,
+ # N812: lowercase imported as non lowercase
+ N812
+
+
+# isort configuration:
+# https://github.com/timothycrosley/isort/wiki/isort-Settings
[isort]
multi_line_output = 5
-known_third_party = albumentations,catalyst,cv2,imageio,numpy,pycocotools,torch
-known_first_party = ttfnet
case_sensitive = False
combine_as_imports = True
+default_section = THIRDPARTY
force_grid_wrap = 0
force_sort_within_sections = True
include_trailing_comma = True
+known_first_party = esrgan
line_length = 79
lines_between_types = 0
order_by_type = False
use_parentheses = True
-[flake8]
-exclude = .git,__pycache__,build,dist
-ignore =
- # C812-C815: missing trailing comma
- C812,C815,
- # D100: Missing docstring in public module
- D100,
- # D104: Missing docstring in public package
- D104,
- # D107 Missing docstring in __init__
- D107,
- # D205: 1 blank line required between summary line and description
- D205,
- # D400: First line should end with a period
- D400,
- # D301: Use r""" if any backslashes in a docstring
- D301,
- # D401: First line should be in imperative mood; try rephrasing
- D401,
- # RST201: Block quote ends without a blank line; unexpected unindent
- RST201,
- # RST203: Definition list ends without a blank line; unexpected unindent
- RST203,
- # RST301: Unexpected indentation
- RST301,
- # RST304: Unknown interpreted text role
- RST304,
- # N812: lowercase imported as non lowercase
- N812,
- # WPS: ignore wemake-python-style errors
- WPS
-# extend-ignore = D101, D102, D107, DAR002, DAR101, D105, DAR102, W505, DAR201, D103
-max-line-length = 79
-max-doc-length = 79
-docstring-quotes = double
-inline-quotes = double
-multiline-quotes = double
-convention = google
+
+# mypy configurations:
+# https://mypy.readthedocs.io/en/latest/config_file.html
+[mypy]
+allow_redefinition = True
+check_untyped_defs = True
+disallow_incomplete_defs = True
+disallow_any_generics = True
+ignore_missing_imports = True
+implicit_reexport = False
+strict_optional = True
+strict_equality = True
+show_error_codes = True
+show_error_context = True
+no_implicit_optional = True
+warn_unused_ignores = True
+warn_redundant_casts = True
+warn_return_any = True
+warn_unused_configs = True
+warn_unreachable = True
+warn_no_return = True
+
+
+# doc8 configuration:
+# https://pypi.org/project/doc8/
+[doc8]
+ignore-path = docs/build
+max-line-length = 119
+sphinx = True
+
+
+# py.test configuration:
+# http://doc.pytest.org/en/latest/customize.html
+[tool:pytest]
+# directories that are not visited by pytest collector:
+norecursedirs = *.egg .eggs dist build docs .tox .git __pycache__
+doctest_optionflags = NUMBER NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL
+filterwarnings =
+ ignore::DeprecationWarning
+addopts =
+ --strict-markers
+ --doctest-modules
diff --git a/setup.py b/setup.py
deleted file mode 100644
index dd3aec0..0000000
--- a/setup.py
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-from pathlib import Path
-from typing import Union
-
-from setuptools import find_packages, setup
-
-PROJECT_ROOT = Path(__file__).parent.resolve()
-
-
-def load_requirements(filename: Union[Path, str] = "requirements.txt"):
- with open(PROJECT_ROOT / filename) as f:
- return f.read().splitlines()
-
-
-def load_readme(filename: Union[Path, str] = "README.md") -> str:
- with open(PROJECT_ROOT / filename, encoding="utf-8") as f:
- return f"\n{f.read()}"
-
-
-def load_version(filename: Union[Path, str]) -> str:
- context = {}
- with open(PROJECT_ROOT / filename) as f:
- exec(f.read(), context)
- return context["__version__"]
-
-
-setup(
- name="esrgan",
- version=load_version(Path("esrgan", "__version__.py")),
- description=(
- "Implementation of paper"
- " `ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks`"
- ),
- long_description=load_readme("README.md"),
- long_description_content_type="text/markdown",
- license="Attribution-NonCommercial-NoDerivatives 4.0 International (CC BY-NC-ND 4.0)", # noqa: E501
- author="Yauheni Kachan",
- author_email="yauheni.kachan@leverx.com",
- python_requires=">=3.8.0",
- url="esrgan/experiment/config.yml",
- install_requires=load_requirements("requirements.txt"),
- packages=find_packages(exclude=("tests",)),
- include_package_data=True,
- entry_points={
- "console_scripts": [
- "esrgan-process-images=esrgan.utils.scripts.process_images:main",
- ],
- },
- classifiers=[
- "Development Status :: 4 - Beta",
- "Operating System :: OS Independent",
- "License :: Free for non-commercial use",
- "Intended Audience :: Developers",
- "Intended Audience :: Science/Research",
- "Topic :: Scientific/Engineering :: Artificial Intelligence",
- "Topic :: Scientific/Engineering :: Image Recognition",
- "Topic :: Software Development :: Libraries",
- "Topic :: Software Development :: Libraries :: Python Modules",
- "Programming Language :: Python",
- "Programming Language :: Python :: 3.8",
- ],
-)
diff --git a/tests/pipeline/config.yml b/tests/pipeline/config.yml
new file mode 100644
index 0000000..96bd0f0
--- /dev/null
+++ b/tests/pipeline/config.yml
@@ -0,0 +1,262 @@
+shared:
+ upscale: &upscale 4
+ patch_size: &patch_size 40
+
+model:
+ _key_value: true
+
+ &generator_model generator:
+ _target_: esrgan.models.EncoderDecoderNet
+ encoder:
+ _target_: esrgan.models.ESREncoder
+ in_channels: &num_channels 3
+ out_channels: &latent_channels 64
+ num_basic_blocks: 16
+ growth_channels: 32
+ activation: &activation
+ _mode_: partial
+ _target_: torch.nn.LeakyReLU
+ negative_slope: 0.2
+ inplace: true
+ residual_scaling: 0.2
+ decoder:
+ _target_: esrgan.models.ESRNetDecoder
+ in_channels: *latent_channels
+ out_channels: *num_channels
+ scale_factor: *upscale
+ activation: *activation
+
+ &discriminator_model discriminator:
+ _target_: esrgan.models.VGGConv
+ encoder:
+ _target_: esrgan.models.StridedConvEncoder
+ pool:
+ _target_: catalyst.contrib.layers.AdaptiveAvgPool2d
+ output_size: [7,7]
+ head:
+ _target_: esrgan.models.LinearHead
+ in_channels: 25088
+ out_channels: 1
+ latent_channels: [1024]
+
+args:
+ logdir: logs/tests
+
+runner:
+ _target_: esrgan.runner.GANConfigRunner
+ generator_key: *generator_model
+ discriminator_key: *discriminator_model
+
+stages:
+ stage1_supervised:
+ num_epochs: 5
+
+ loaders: &loaders
+ train: &train_loader
+ _target_: torch.utils.data.DataLoader
+ dataset: &div2k_dataset
+ _target_: esrgan.datasets.DIV2KDataset
+ root: data
+ train: true
+ target_type: bicubic_X4
+ patch_size: [*patch_size,*patch_size]
+ transform:
+ _target_: albumentations.Compose
+ transforms:
+ - _target_: albumentations.Normalize
+ mean: 0
+ std: 1
+ - _target_: albumentations.ToTensorV2
+ additional_targets:
+ real_image: image
+ low_resolution_image_key: image
+ high_resolution_image_key: real_image
+ download: true
+ batch_size: 2
+ shuffle: true
+ num_workers: 8
+ pin_memory: true
+ drop_last: true
+
+ valid:
+ << : [*train_loader]
+ dataset:
+ << : [*div2k_dataset]
+ train: false
+ batch_size: 1
+ drop_last: false
+
+ criterion: &criterions
+ content_loss:
+ _target_: torch.nn.L1Loss
+
+ optimizer:
+ _key_value: true
+
+ generator:
+ _target_: torch.optim.Adam
+ lr: 0.0002
+ weight_decay: 0.0
+ _model: *generator_model
+
+ scheduler:
+ _key_value: true
+
+ generator:
+ _target_: torch.optim.lr_scheduler.StepLR
+ step_size: 3
+ gamma: 0.5
+ _optimizer: generator
+
+ callbacks: &callbacks
+ psnr_metric:
+ _target_: catalyst.callbacks.FunctionalMetricCallback
+ metric_fn:
+ _target_: piq.psnr
+ data_range: 1.0
+ reduction: mean
+ convert_to_greyscale: false
+ input_key: real_image
+ target_key: fake_image
+ metric_key: psnr
+ ssim_metric:
+ _target_: catalyst.callbacks.FunctionalMetricCallback
+ metric_fn:
+ _target_: piq.ssim
+ kernel_size: 11
+ kernel_sigma: 1.5
+ data_range: 1.0
+ reduction: mean
+ k1: 0.01
+ k2: 0.03
+ input_key: real_image
+ target_key: fake_image
+ metric_key: ssim
+
+ loss_content:
+ _target_: catalyst.callbacks.CriterionCallback
+ input_key: real_image
+ target_key: fake_image
+ metric_key: loss_content
+ criterion_key: content_loss
+
+ optimizer_generator:
+ _target_: catalyst.callbacks.OptimizerCallback
+ metric_key: loss_content
+ model_key: *generator_model
+ optimizer_key: generator
+ grad_clip_fn: &grad_clip_fn
+ _mode_: partial
+ _target_: torch.nn.utils.clip_grad_value_
+ clip_value: 5.0
+
+ scheduler_generator:
+ _target_: catalyst.callbacks.SchedulerCallback
+ scheduler_key: generator
+ loader_key: valid
+ metric_key: loss_content
+
+ stage2_gan:
+ num_epochs: 5
+
+ loaders: *loaders
+
+ criterion:
+ << : [*criterions]
+
+ perceptual_loss:
+ _target_: esrgan.nn.PerceptualLoss
+ layers:
+ conv5_4: 1.0
+
+ adversarial_generator_loss:
+ _target_: &adversarial_criterion esrgan.nn.RelativisticAdversarialLoss
+ mode: generator
+ adversarial_discriminator_loss:
+ _target_: *adversarial_criterion
+ mode: discriminator
+
+ optimizer:
+ _key_value: true
+
+ generator:
+ _target_: torch.optim.AdamW
+ lr: 0.0001
+ weight_decay: 0.0
+ _model: *generator_model
+
+ discriminator:
+ _target_: torch.optim.AdamW
+ lr: 0.0001
+ weight_decay: 0.0
+ _model: *discriminator_model
+
+ scheduler:
+ _key_value: true
+
+ generator:
+ _target_: torch.optim.lr_scheduler.MultiStepLR
+ milestones: &scheduler_milestones [2, 4]
+ gamma: 0.5
+ _optimizer: generator
+
+ discriminator:
+ _target_: torch.optim.lr_scheduler.MultiStepLR
+ milestones: *scheduler_milestones
+ gamma: 0.5
+ _optimizer: discriminator
+
+ callbacks:
+ << : [*callbacks]
+
+ loss_perceptual:
+ _target_: catalyst.callbacks.CriterionCallback
+ input_key: real_image
+ target_key: fake_image
+ metric_key: loss_perceptual
+ criterion_key: perceptual_loss
+ loss_adversarial:
+ _target_: catalyst.callbacks.CriterionCallback
+ input_key: g_fake_logits
+ target_key: g_real_logits
+ metric_key: loss_adversarial
+ criterion_key: adversarial_generator_loss
+ loss_generator:
+ _target_: catalyst.callbacks.MetricAggregationCallback
+ metric_key: &generator_loss loss_generator
+ metrics:
+ loss_content: 0.01
+ loss_perceptual: 1.0
+ loss_adversarial: 0.005
+ mode: weighted_sum
+
+ loss_discriminator:
+ _target_: catalyst.callbacks.CriterionCallback
+ input_key: d_fake_logits
+ target_key: d_real_logits
+ metric_key: &discriminator_loss loss_discriminator
+ criterion_key: adversarial_discriminator_loss
+
+ optimizer_generator:
+ _target_: catalyst.callbacks.OptimizerCallback
+ metric_key: *generator_loss
+ model_key: *generator_model
+ optimizer_key: generator
+ grad_clip_fn: *grad_clip_fn
+ optimizer_discriminator:
+ _target_: catalyst.callbacks.OptimizerCallback
+ metric_key: *discriminator_loss
+ model_key: *discriminator_model
+ optimizer_key: discriminator
+ grad_clip_fn: *grad_clip_fn
+
+ scheduler_generator:
+ _target_: catalyst.callbacks.SchedulerCallback
+ scheduler_key: generator
+ loader_key: valid
+ metric_key: *generator_loss
+ scheduler_discriminator:
+ _target_: catalyst.callbacks.SchedulerCallback
+ scheduler_key: discriminator
+ loader_key: valid
+ metric_key: *discriminator_loss