From 1fd1e42aa60f8e7e08553389051527229ae137c9 Mon Sep 17 00:00:00 2001 From: Jirka Borovec Date: Sat, 9 Nov 2019 14:59:14 +0900 Subject: [PATCH] Fix setup-doc for pypi (#472) * add Twine to CI * freeze Twine * freeze Twine * minor refactoring * try another * fix req. * update README * fix __doc__ * fix multiple req. test-tube --- .travis.yml | 14 ++-- README.md | 153 +++++++++++++++++----------------- pytorch_lightning/__init__.py | 7 +- requirements.txt | 1 - setup.cfg | 15 +++- setup.py | 9 +- tests/requirements.txt | 3 +- tox.ini | 4 +- 8 files changed, 111 insertions(+), 95 deletions(-) diff --git a/.travis.yml b/.travis.yml index 1ad6eba5c086a..30e9cccc30b76 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,16 +16,13 @@ language: python matrix: include: - - os: linux - dist: xenial # Ubuntu 16.04 +# - dist: xenial # Ubuntu 16.04 +# python: 3.5 +# env: TOXENV=py35 + - dist: bionic # Ubuntu 18.04 python: 3.6 env: TOXENV=py36 - - os: linux - dist: bionic # Ubuntu 18.04 - python: 3.6 - env: TOXENV=py36 - - os: linux - dist: bionic # Ubuntu 18.04 + - dist: bionic # Ubuntu 18.04 python: 3.7 env: TOXENV=py37 - os: osx @@ -58,6 +55,7 @@ script: # integration - tox --sitepackages - pip install --editable . + #- python setup.py install --dry-run --user after_success: - coverage report diff --git a/README.md b/README.md index 6262c03b320f1..df48531de4d22 100644 --- a/README.md +++ b/README.md @@ -63,85 +63,85 @@ Lightning sets up all the boilerplate state-of-the-art training for you so you c --- ## How do I do use it? -Think about Lightning as refactoring your research code instead of using a new framework. The research code goes into a [LightningModule]((https://williamfalcon.github.io/pytorch-lightning/LightningModule/RequiredTrainerInterface/)) which you fit using a Trainer. +Think about Lightning as refactoring your research code instead of using a new framework. The research code goes into a [LightningModule](https://williamfalcon.github.io/pytorch-lightning/LightningModule/RequiredTrainerInterface/) which you fit using a Trainer. The LightningModule defines a *system* such as seq-2-seq, GAN, etc... It can ALSO define a simple classifier such as the example below. To use lightning do 2 things: -1. [Define a LightningModule](https://williamfalcon.github.io/pytorch-lightning/LightningModule/RequiredTrainerInterface/) - -**WARNING:** This syntax is for version 0.5.0+ where abbreviations were removed. -```python -import os -import torch -from torch.nn import functional as F -from torch.utils.data import DataLoader -from torchvision.datasets import MNIST -import torchvision.transforms as transforms - -import pytorch_lightning as pl - -class CoolSystem(pl.LightningModule): - - def __init__(self): - super(CoolSystem, self).__init__() - # not the best model... - self.l1 = torch.nn.Linear(28 * 28, 10) - - def forward(self, x): - return torch.relu(self.l1(x.view(x.size(0), -1))) - - def training_step(self, batch, batch_nb): - # REQUIRED - x, y = batch - y_hat = self.forward(x) - loss = F.cross_entropy(y_hat, y) - tensorboard_logs = {'train_loss': loss} - return {'loss': loss, 'log': tensorboard_logs} - - def validation_step(self, batch, batch_nb): - # OPTIONAL - x, y = batch - y_hat = self.forward(x) - return {'val_loss': F.cross_entropy(y_hat, y)} - - def validation_end(self, outputs): - # OPTIONAL - avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean() - tensorboard_logs = {'val_loss': avg_loss} - return {'avg_val_loss': avg_loss, 'log': tensorboard_logs} - - def configure_optimizers(self): - # REQUIRED - # can return multiple optimizers and learning_rate schedulers - # (LBFGS it is automatically supported, no need for closure function) - return torch.optim.Adam(self.parameters(), lr=0.02) - - @pl.data_loader - def train_dataloader(self): - # REQUIRED - return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=32) - - @pl.data_loader - def val_dataloader(self): - # OPTIONAL - return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=32) - - @pl.data_loader - def test_dataloader(self): - # OPTIONAL - return DataLoader(MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor()), batch_size=32) -``` +1. [Define a LightningModule](https://williamfalcon.github.io/pytorch-lightning/LightningModule/RequiredTrainerInterface/) +**WARNING:** This syntax is for version 0.5.0+ where abbreviations were removed. + ```python + import os + + import torch + from torch.nn import functional as F + from torch.utils.data import DataLoader + from torchvision.datasets import MNIST + from torchvision import transforms + + import pytorch_lightning as pl + + class CoolSystem(pl.LightningModule): + + def __init__(self): + super(CoolSystem, self).__init__() + # not the best model... + self.l1 = torch.nn.Linear(28 * 28, 10) + + def forward(self, x): + return torch.relu(self.l1(x.view(x.size(0), -1))) + + def training_step(self, batch, batch_nb): + # REQUIRED + x, y = batch + y_hat = self.forward(x) + loss = F.cross_entropy(y_hat, y) + tensorboard_logs = {'train_loss': loss} + return {'loss': loss, 'log': tensorboard_logs} + + def validation_step(self, batch, batch_nb): + # OPTIONAL + x, y = batch + y_hat = self.forward(x) + return {'val_loss': F.cross_entropy(y_hat, y)} + + def validation_end(self, outputs): + # OPTIONAL + avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean() + tensorboard_logs = {'val_loss': avg_loss} + return {'avg_val_loss': avg_loss, 'log': tensorboard_logs} + + def configure_optimizers(self): + # REQUIRED + # can return multiple optimizers and learning_rate schedulers + # (LBFGS it is automatically supported, no need for closure function) + return torch.optim.Adam(self.parameters(), lr=0.02) + + @pl.data_loader + def train_dataloader(self): + # REQUIRED + return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=32) + + @pl.data_loader + def val_dataloader(self): + # OPTIONAL + return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=32) + + @pl.data_loader + def test_dataloader(self): + # OPTIONAL + return DataLoader(MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor()), batch_size=32) + ``` 2. Fit with a [trainer](https://williamfalcon.github.io/pytorch-lightning/Trainer/) -```python -from pytorch_lightning import Trainer - -model = CoolSystem() - -# most basic trainer, uses good defaults -trainer = Trainer() -trainer.fit(model) -``` + ```python + from pytorch_lightning import Trainer + + model = CoolSystem() + + # most basic trainer, uses good defaults + trainer = Trainer() + trainer.fit(model) + ``` Trainer sets up a tensorboard logger, early stopping and checkpointing by default (you can modify all of them or use something other than tensorboard). @@ -166,7 +166,7 @@ trainer.fit(model) # view tensorboard logs logging.info(f'View tensorboard logs by running\ntensorboard --logdir {os.getcwd()}') logging.info('and going to http://localhost:6006 on your browser') -``` +``` When you're all done you can even run the test set separately. ```python @@ -348,7 +348,8 @@ Lightning also adds a text column with all the hyperparameters for this experime - [9 key speed features in Pytorch-Lightning](https://towardsdatascience.com/9-tips-for-training-lightning-fast-neural-networks-in-pytorch-8e63a502f565) - [SLURM, multi-node training with Lightning](https://towardsdatascience.com/trivial-multi-node-training-with-pytorch-lightning-ff75dfb809bd) ---- +--- + ## Asking for help Welcome to the Lightning community! diff --git a/pytorch_lightning/__init__.py b/pytorch_lightning/__init__.py index f19210951d059..44e28145f9cb1 100644 --- a/pytorch_lightning/__init__.py +++ b/pytorch_lightning/__init__.py @@ -5,10 +5,9 @@ __author_email__ = 'waf2107@columbia.edu' __license__ = 'Apache-2.0' __homepage__ = 'https://github.com/williamFalcon/pytorch-lightning' -__docs__ = """# PyTorch Lightning - -The lightweight PyTorch wrapper for ML researchers. Scale your models. Write less boilerplate. -""" +# this has to be simple string, see: https://github.com/pypa/twine/issues/522 +__docs__ = "PyTorch Lightning is the lightweight PyTorch wrapper for ML researchers." \ + " Scale your models. Write less boilerplate." try: diff --git a/requirements.txt b/requirements.txt index 67b49415b999f..46586a41facee 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,5 @@ scikit-learn==0.20.2 tqdm==4.35.0 -twine==1.13.0 numpy==1.16.4 torch>=1.2.0 torchvision>=0.3.0 diff --git a/setup.cfg b/setup.cfg index f56376af3a00c..40283b196dab2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -41,12 +41,13 @@ exclude_lines = break pass os.makedirs +# TODO: to be reviewed, this should not be skipped omit = pytorch_lightning/callbacks/pt_callbacks.py tests/test_models.py pytorch_lightning/testing_models/lm_test_module.py pytorch_lightning/utilities/arg_parse.py - examples/templates + pl_examples/templates [flake8] # TODO: this should be 88 or 100 according PEP8 @@ -58,3 +59,15 @@ verbose = 2 # https://pep8.readthedocs.io/en/latest/intro.html#error-codes format = pylint ignore = E731,W504,F401,F841 + +[check-manifest] +ignore = + .travis.yml + tox.ini + .github + .github/* + +[metadata] +license_file = LICENSE +# long_description = file:README.md +# long_description_content_type = text/markdown diff --git a/setup.py b/setup.py index 6b3b4c542290d..23e1149319e16 100755 --- a/setup.py +++ b/setup.py @@ -46,15 +46,18 @@ def load_requirements(path_dir=PATH_ROOT, comment_char='#'): url=pytorch_lightning.__homepage__, download_url='https://github.com/williamFalcon/pytorch-lightning', license=pytorch_lightning.__license__, - packages=find_packages(exclude=['examples']), - # long_description=open('README.md', encoding='utf-8').read(), - # long_description_content_type='text/markdown', + packages=find_packages(exclude=['tests']), + + long_description=open('README.md', encoding='utf-8').read(), + long_description_content_type='text/markdown', include_package_data=True, zip_safe=False, + keywords=['deep learning', 'pytorch', 'AI'], python_requires='>=3.6', setup_requires=[], install_requires=load_requirements(PATH_ROOT), + classifiers=[ 'Environment :: Console', 'Natural Language :: English', diff --git a/tests/requirements.txt b/tests/requirements.txt index 3e44dab3d8f04..1ff186d835b9d 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -5,6 +5,7 @@ pytest>=3.0.5 pytest-cov flake8 check-manifest -test_tube +# test_tube # already installed in main req. mlflow comet_ml +twine==1.13.0 \ No newline at end of file diff --git a/tox.ini b/tox.ini index 6d0c317174da2..1db46c9b036af 100644 --- a/tox.ini +++ b/tox.ini @@ -35,10 +35,12 @@ deps = -r ./tests/requirements.txt commands = pip list - check-manifest --ignore tox.ini + check-manifest python setup.py check --metadata --strict coverage run --source pytorch_lightning -m py.test pytorch_lightning tests pl_examples -v --doctest-modules flake8 . + python setup.py sdist + twine check dist/* # DROP, it is duplication of setup.cfg # [flake8]