Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

simplify mypy #776

Merged
merged 7 commits into from
Nov 26, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 13 additions & 13 deletions .github/workflows/code-format.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,16 @@ jobs:
- name: PEP8
run: flake8 .

#typing-check-mypy:
# runs-on: ubuntu-20.04
# steps:
# - uses: actions/checkout@master
# - uses: actions/setup-python@v2
# with:
# python-version: 3.8
# - name: Install mypy
# run: |
# pip install mypy
# pip list
# - name: mypy
# run: mypy
typing-check-mypy:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@master
- uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install mypy
run: |
pip install mypy types-setuptools
pip list
- name: mypy
run: mypy
6 changes: 3 additions & 3 deletions pl_bolts/callbacks/byol_updates.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,14 +58,14 @@ def on_train_batch_end(
self.current_tau = self.update_tau(pl_module, trainer)

def update_tau(self, pl_module: LightningModule, trainer: Trainer) -> float:
max_steps = len(trainer.train_dataloader) * trainer.max_epochs # type: ignore[attr-defined]
max_steps = len(trainer.train_dataloader) * trainer.max_epochs
tau = 1 - (1 - self.initial_tau) * (math.cos(math.pi * pl_module.global_step / max_steps) + 1) / 2
return tau

def update_weights(self, online_net: Union[Module, Tensor], target_net: Union[Module, Tensor]) -> None:
# apply MA weight update
for (name, online_p), (_, target_p) in zip(
online_net.named_parameters(), # type: ignore[union-attr]
target_net.named_parameters(), # type: ignore[union-attr]
online_net.named_parameters(),
target_net.named_parameters(),
):
target_p.data = self.current_tau * target_p.data + (1 - self.current_tau) * online_p.data
3 changes: 1 addition & 2 deletions pl_bolts/callbacks/data_monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
import wandb
else: # pragma: no cover
warn_missing_pkg("wandb")
wandb = None # type: ignore


class DataMonitorBase(Callback):
Expand All @@ -44,7 +43,7 @@ def __init__(self, log_every_n_steps: int = None):

def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
self._log = self._is_logger_available(trainer.logger)
self._log_every_n_steps = self._log_every_n_steps or trainer.log_every_n_steps # type: ignore[attr-defined]
self._log_every_n_steps = self._log_every_n_steps or trainer.log_every_n_steps
self._trainer = trainer

def on_train_batch_start(
Expand Down
6 changes: 3 additions & 3 deletions pl_bolts/callbacks/knn_online.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class KNNOnlineEvaluator(Callback):
)
"""

def __init__(self, k=200, temperature=0.07) -> None:
def __init__(self, k: int = 200, temperature: float = 0.07) -> None:
"""
Args:
k: k for k nearest neighbor
Expand All @@ -42,7 +42,7 @@ def setup(self, trainer: Trainer, pl_module: LightningModule, stage: Optional[st
self.num_classes = trainer.datamodule.num_classes
self.dataset = trainer.datamodule.name

def predict(self, query_feature: Tensor, feature_bank: Tensor, target_bank: Tensor):
def predict(self, query_feature: Tensor, feature_bank: Tensor, target_bank: Tensor) -> Tensor:
"""
Args:
query_feature: (B, D) a batch of B query vectors with dim=D
Expand Down Expand Up @@ -132,5 +132,5 @@ def on_validation_epoch_end(self, trainer: Trainer, pl_module: LightningModule)
pl_module.log("online_knn_val_acc", total_top1 / total_num, on_step=False, on_epoch=True, sync_dist=True)


def concat_all_gather(tensor: Tensor, accelerator: Accelerator):
def concat_all_gather(tensor: Tensor, accelerator: Accelerator) -> Tensor:
return accelerator.all_gather(tensor).view(-1, *tensor.shape[1:])
6 changes: 3 additions & 3 deletions pl_bolts/callbacks/sparseml.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from typing import Any, Optional

import torch
from pytorch_lightning import Callback, LightningModule, Trainer
Expand All @@ -32,7 +32,7 @@ class SparseMLCallback(Callback):
More information at https://docs.neuralmagic.com/sparseml/source/recipes.html
"""

def __init__(self, recipe_path):
def __init__(self, recipe_path: str):
if not _SPARSEML_AVAILABLE:
if not _PL_GREATER_EQUAL_1_4_5:
raise MisconfigurationException("SparseML requires PyTorch Lightning 1.4.5 or greater.")
Expand Down Expand Up @@ -79,7 +79,7 @@ def _num_training_steps_per_epoch(self, trainer: Trainer) -> int:

@staticmethod
def export_to_sparse_onnx(
model: LightningModule, output_dir: str, sample_batch: Optional[torch.Tensor] = None, **export_kwargs
model: LightningModule, output_dir: str, sample_batch: Optional[torch.Tensor] = None, **export_kwargs: Any
) -> None:
"""Exports the model to ONNX format."""
with model._prevent_trainer_and_dataloaders_deepcopy():
Expand Down
4 changes: 2 additions & 2 deletions pl_bolts/callbacks/ssl_online.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def on_train_batch_end(
representations = representations.detach()

# forward pass
mlp_logits = pl_module.non_linear_evaluator(representations) # type: ignore[operator]
mlp_logits = pl_module.non_linear_evaluator(representations)
mlp_loss = F.cross_entropy(mlp_logits, y)

# update finetune weights
Expand Down Expand Up @@ -130,7 +130,7 @@ def on_validation_batch_end(
representations = representations.detach()

# forward pass
mlp_logits = pl_module.non_linear_evaluator(representations) # type: ignore[operator]
mlp_logits = pl_module.non_linear_evaluator(representations)
mlp_loss = F.cross_entropy(mlp_logits, y)

# log metrics
Expand Down
2 changes: 1 addition & 1 deletion pl_bolts/callbacks/torch_ort.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class ORTCallback(Callback):
trainer = flash.Trainer(callbacks=ORTCallback())
"""

def __init__(self):
def __init__(self) -> None:
if not _TORCH_ORT_AVAILABLE:
raise MisconfigurationException(
"Torch ORT is required to use ORT. See here for installation: https://github.com/pytorch/ort"
Expand Down
6 changes: 2 additions & 4 deletions pl_bolts/callbacks/variational.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,10 +59,8 @@ def __init__(

def on_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
if (trainer.current_epoch + 1) % self.interpolate_epoch_interval == 0:
images = self.interpolate_latent_space(
pl_module, latent_dim=pl_module.hparams.latent_dim # type: ignore[union-attr]
)
images = torch.cat(images, dim=0) # type: ignore[assignment]
images = self.interpolate_latent_space(pl_module, latent_dim=pl_module.hparams.latent_dim)
images = torch.cat(images, dim=0)

num_rows = self.steps
grid = torchvision.utils.make_grid(images, nrow=num_rows, normalize=self.normalize)
Expand Down
8 changes: 4 additions & 4 deletions pl_bolts/callbacks/vision/confused_logit.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def on_train_batch_end(
dataloader_idx: int,
) -> None:
# show images only every 20 batches
if (batch_idx + 1) % self.logging_batch_interval != 0: # type: ignore[attr-defined]
if (batch_idx + 1) % self.logging_batch_interval != 0:
return

# pick the last batch and logits
Expand All @@ -92,9 +92,9 @@ def training_step(...):
raise AttributeError(m) from err

# only check when it has opinions (ie: the logit > 5)
if logits.max() > self.min_logit_value: # type: ignore[operator]
if logits.max() > self.min_logit_value:
# pick the top two confused probs
(values, idxs) = torch.topk(logits, k=2, dim=1) # type: ignore[arg-type]
(values, idxs) = torch.topk(logits, k=2, dim=1)

# care about only the ones that are at most eps close to each other
eps = self.max_logit_difference
Expand Down Expand Up @@ -132,7 +132,7 @@ def _plot(

batch_size, c, w, h = confusing_x.size()
for logit_i, x_param in enumerate((x_param_a, x_param_b)):
x_param = x_param.to(model.device) # type: ignore[assignment]
x_param = x_param.to(model.device)
logits = model(x_param)
logits[:, mask_idxs[:, logit_i]].sum().backward()

Expand Down
2 changes: 1 addition & 1 deletion pl_bolts/callbacks/vision/image_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def __init__(
self.pad_value = pad_value

def on_epoch_end(self, trainer: Trainer, pl_module: LightningModule) -> None:
dim = (self.num_samples, pl_module.hparams.latent_dim) # type: ignore[union-attr]
dim = (self.num_samples, pl_module.hparams.latent_dim)
z = torch.normal(mean=0.0, std=1.0, size=dim, device=pl_module.device)

# generate images
Expand Down
2 changes: 1 addition & 1 deletion pl_bolts/datamodules/vision_datamodule.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def setup(self, stage: Optional[str] = None) -> None:

def _split_dataset(self, dataset: Dataset, train: bool = True) -> Dataset:
"""Splits the dataset into train and validation set."""
len_dataset = len(dataset) # type: ignore[arg-type]
len_dataset = len(dataset)
splits = self._get_splits(len_dataset)
dataset_train, dataset_val = random_split(dataset, splits, generator=torch.Generator().manual_seed(self.seed))

Expand Down
3 changes: 2 additions & 1 deletion pl_bolts/utils/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import importlib
import operator
from typing import Callable

import torch
from packaging.version import Version
Expand All @@ -10,7 +11,7 @@


# Ported from https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/utilities/imports.py
def _compare_version(package: str, op, version) -> bool:
def _compare_version(package: str, op: Callable, version: str) -> bool:
"""Compare package version with some requirements.

>>> _compare_version("torch", operator.ge, "0.1")
Expand Down
36 changes: 36 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,3 +18,39 @@ known_first_party = [
skip_glob = []
profile = "black"
line_length = 120

[tool.mypy]
files = ["pl_bolts"]
disallow_untyped_defs = "True"
ignore_missing_imports = "True"
show_error_codes = "True"
warn_redundant_casts = "True"
warn_unused_configs = "True"
warn_unused_ignores = "True"
allow_redefinition = "True"
# disable this rule as the Trainer attributes are defined in the connectors, not in its __init__
disable_error_code = "attr-defined"
# style choices
warn_no_return = "False"

# TODO: Fix typing for these modules
[[tool.mypy.overrides]]
module = [
"pl_bolts.datasets.*",
"pl_bolts.datamodules",
"pl_bolts.datamodules.experience_source",
"pl_bolts.datamodules.sklearn_datamodule",
"pl_bolts.datamodules.vocdetection_datamodule",
"pl_bolts.losses.*",
"pl_bolts.metrics.*",
"pl_bolts.models.mnist_module",
"pl_bolts.models.autoencoders.*",
"pl_bolts.models.detection.*",
"pl_bolts.models.gans.*",
"pl_bolts.models.rl.*",
"pl_bolts.models.self_supervised.*",
"pl_bolts.models.vision.*",
"pl_bolts.optimizers.*",
"pl_bolts.transforms.*",
]
ignore_errors = "True"
65 changes: 0 additions & 65 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -57,68 +57,3 @@ license_file = LICENSE
description-file = README.md
# long_description = file:README.md
# long_description_content_type = text/markdown


[mypy]
# Typing tests is low priority, but enabling type checking on the
# untyped test functions (using `--check-untyped-defs`) is still
# high-value because it helps test the typing.
files = pl_bolts, tests
pretty = True
disallow_untyped_defs = True
ignore_missing_imports = True
show_error_codes = True
warn_redundant_casts = True
warn_unused_configs = True

[mypy-pl_bolts.datasets.*]
ignore_errors = True

[mypy-pl_bolts.datamodules]
# pl_bolts/datamodules/__init__.py
ignore_errors = True

[mypy-pl_bolts.datamodules.experience_source]
ignore_errors = True

[mypy-pl_bolts.datamodules.sklearn_datamodule]
ignore_errors = True

[mypy-pl_bolts.datamodules.vocdetection_datamodule]
ignore_errors = True

[mypy-pl_bolts.losses.*]
ignore_errors = True

[mypy-pl_bolts.metrics.*]
ignore_errors = True

[mypy-pl_bolts.models.mnist_module]
ignore_errors = True

[mypy-pl_bolts.models.autoencoders.*]
ignore_errors = True

[mypy-pl_bolts.models.detection.*]
ignore_errors = True

[mypy-pl_bolts.models.gans.*]
ignore_errors = True

[mypy-pl_bolts.models.rl.*]
ignore_errors = True

[mypy-pl_bolts.models.self_supervised.*]
ignore_errors = True

[mypy-pl_bolts.models.vision.*]
ignore_errors = True

[mypy-pl_bolts.optimizers.*]
ignore_errors = True

[mypy-pl_bolts.transforms.*]
ignore_errors = True

[mypy-tests.*]
ignore_errors = True