Skip to content

Commit

Permalink
Fix setup-doc for pypi (#472)
Browse files Browse the repository at this point in the history
* add Twine to CI

* freeze Twine

* freeze Twine

* minor refactoring

* try another

* fix req.

* update README

* fix __doc__

* fix multiple req. test-tube
  • Loading branch information
Borda authored and williamFalcon committed Nov 9, 2019
1 parent a3f785d commit 1fd1e42
Show file tree
Hide file tree
Showing 8 changed files with 111 additions and 95 deletions.
14 changes: 6 additions & 8 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,13 @@ language: python

matrix:
include:
- os: linux
dist: xenial # Ubuntu 16.04
# - dist: xenial # Ubuntu 16.04
# python: 3.5
# env: TOXENV=py35
- dist: bionic # Ubuntu 18.04
python: 3.6
env: TOXENV=py36
- os: linux
dist: bionic # Ubuntu 18.04
python: 3.6
env: TOXENV=py36
- os: linux
dist: bionic # Ubuntu 18.04
- dist: bionic # Ubuntu 18.04
python: 3.7
env: TOXENV=py37
- os: osx
Expand Down Expand Up @@ -58,6 +55,7 @@ script:
# integration
- tox --sitepackages
- pip install --editable .
#- python setup.py install --dry-run --user

after_success:
- coverage report
Expand Down
153 changes: 77 additions & 76 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,85 +63,85 @@ Lightning sets up all the boilerplate state-of-the-art training for you so you c
---

## How do I do use it?
Think about Lightning as refactoring your research code instead of using a new framework. The research code goes into a [LightningModule]((https://williamfalcon.github.io/pytorch-lightning/LightningModule/RequiredTrainerInterface/)) which you fit using a Trainer.
Think about Lightning as refactoring your research code instead of using a new framework. The research code goes into a [LightningModule](https://williamfalcon.github.io/pytorch-lightning/LightningModule/RequiredTrainerInterface/) which you fit using a Trainer.

The LightningModule defines a *system* such as seq-2-seq, GAN, etc... It can ALSO define a simple classifier such as the example below.

To use lightning do 2 things:
1. [Define a LightningModule](https://williamfalcon.github.io/pytorch-lightning/LightningModule/RequiredTrainerInterface/)

**WARNING:** This syntax is for version 0.5.0+ where abbreviations were removed.
```python
import os
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
import torchvision.transforms as transforms

import pytorch_lightning as pl

class CoolSystem(pl.LightningModule):

def __init__(self):
super(CoolSystem, self).__init__()
# not the best model...
self.l1 = torch.nn.Linear(28 * 28, 10)

def forward(self, x):
return torch.relu(self.l1(x.view(x.size(0), -1)))

def training_step(self, batch, batch_nb):
# REQUIRED
x, y = batch
y_hat = self.forward(x)
loss = F.cross_entropy(y_hat, y)
tensorboard_logs = {'train_loss': loss}
return {'loss': loss, 'log': tensorboard_logs}

def validation_step(self, batch, batch_nb):
# OPTIONAL
x, y = batch
y_hat = self.forward(x)
return {'val_loss': F.cross_entropy(y_hat, y)}

def validation_end(self, outputs):
# OPTIONAL
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'val_loss': avg_loss}
return {'avg_val_loss': avg_loss, 'log': tensorboard_logs}

def configure_optimizers(self):
# REQUIRED
# can return multiple optimizers and learning_rate schedulers
# (LBFGS it is automatically supported, no need for closure function)
return torch.optim.Adam(self.parameters(), lr=0.02)

@pl.data_loader
def train_dataloader(self):
# REQUIRED
return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=32)

@pl.data_loader
def val_dataloader(self):
# OPTIONAL
return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=32)

@pl.data_loader
def test_dataloader(self):
# OPTIONAL
return DataLoader(MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor()), batch_size=32)
```
1. [Define a LightningModule](https://williamfalcon.github.io/pytorch-lightning/LightningModule/RequiredTrainerInterface/)
**WARNING:** This syntax is for version 0.5.0+ where abbreviations were removed.
```python
import os
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision import transforms
import pytorch_lightning as pl
class CoolSystem(pl.LightningModule):
def __init__(self):
super(CoolSystem, self).__init__()
# not the best model...
self.l1 = torch.nn.Linear(28 * 28, 10)
def forward(self, x):
return torch.relu(self.l1(x.view(x.size(0), -1)))
def training_step(self, batch, batch_nb):
# REQUIRED
x, y = batch
y_hat = self.forward(x)
loss = F.cross_entropy(y_hat, y)
tensorboard_logs = {'train_loss': loss}
return {'loss': loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_nb):
# OPTIONAL
x, y = batch
y_hat = self.forward(x)
return {'val_loss': F.cross_entropy(y_hat, y)}
def validation_end(self, outputs):
# OPTIONAL
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'val_loss': avg_loss}
return {'avg_val_loss': avg_loss, 'log': tensorboard_logs}
def configure_optimizers(self):
# REQUIRED
# can return multiple optimizers and learning_rate schedulers
# (LBFGS it is automatically supported, no need for closure function)
return torch.optim.Adam(self.parameters(), lr=0.02)
@pl.data_loader
def train_dataloader(self):
# REQUIRED
return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=32)
@pl.data_loader
def val_dataloader(self):
# OPTIONAL
return DataLoader(MNIST(os.getcwd(), train=True, download=True, transform=transforms.ToTensor()), batch_size=32)
@pl.data_loader
def test_dataloader(self):
# OPTIONAL
return DataLoader(MNIST(os.getcwd(), train=False, download=True, transform=transforms.ToTensor()), batch_size=32)
```
2. Fit with a [trainer](https://williamfalcon.github.io/pytorch-lightning/Trainer/)
```python
from pytorch_lightning import Trainer

model = CoolSystem()

# most basic trainer, uses good defaults
trainer = Trainer()
trainer.fit(model)
```
```python
from pytorch_lightning import Trainer
model = CoolSystem()
# most basic trainer, uses good defaults
trainer = Trainer()
trainer.fit(model)
```

Trainer sets up a tensorboard logger, early stopping and checkpointing by default (you can modify all of them or
use something other than tensorboard).
Expand All @@ -166,7 +166,7 @@ trainer.fit(model)
# view tensorboard logs
logging.info(f'View tensorboard logs by running\ntensorboard --logdir {os.getcwd()}')
logging.info('and going to http://localhost:6006 on your browser')
```
```

When you're all done you can even run the test set separately.
```python
Expand Down Expand Up @@ -348,7 +348,8 @@ Lightning also adds a text column with all the hyperparameters for this experime
- [9 key speed features in Pytorch-Lightning](https://towardsdatascience.com/9-tips-for-training-lightning-fast-neural-networks-in-pytorch-8e63a502f565)
- [SLURM, multi-node training with Lightning](https://towardsdatascience.com/trivial-multi-node-training-with-pytorch-lightning-ff75dfb809bd)

---
---

## Asking for help
Welcome to the Lightning community!

Expand Down
7 changes: 3 additions & 4 deletions pytorch_lightning/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,9 @@
__author_email__ = 'waf2107@columbia.edu'
__license__ = 'Apache-2.0'
__homepage__ = 'https://github.com/williamFalcon/pytorch-lightning'
__docs__ = """# PyTorch Lightning
The lightweight PyTorch wrapper for ML researchers. Scale your models. Write less boilerplate.
"""
# this has to be simple string, see: https://github.com/pypa/twine/issues/522
__docs__ = "PyTorch Lightning is the lightweight PyTorch wrapper for ML researchers." \
" Scale your models. Write less boilerplate."


try:
Expand Down
1 change: 0 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
scikit-learn==0.20.2
tqdm==4.35.0
twine==1.13.0
numpy==1.16.4
torch>=1.2.0
torchvision>=0.3.0
Expand Down
15 changes: 14 additions & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,13 @@ exclude_lines =
break
pass
os.makedirs
# TODO: to be reviewed, this should not be skipped
omit =
pytorch_lightning/callbacks/pt_callbacks.py
tests/test_models.py
pytorch_lightning/testing_models/lm_test_module.py
pytorch_lightning/utilities/arg_parse.py
examples/templates
pl_examples/templates

[flake8]
# TODO: this should be 88 or 100 according PEP8
Expand All @@ -58,3 +59,15 @@ verbose = 2
# https://pep8.readthedocs.io/en/latest/intro.html#error-codes
format = pylint
ignore = E731,W504,F401,F841

[check-manifest]
ignore =
.travis.yml
tox.ini
.github
.github/*

[metadata]
license_file = LICENSE
# long_description = file:README.md
# long_description_content_type = text/markdown
9 changes: 6 additions & 3 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,15 +46,18 @@ def load_requirements(path_dir=PATH_ROOT, comment_char='#'):
url=pytorch_lightning.__homepage__,
download_url='https://github.com/williamFalcon/pytorch-lightning',
license=pytorch_lightning.__license__,
packages=find_packages(exclude=['examples']),
# long_description=open('README.md', encoding='utf-8').read(),
# long_description_content_type='text/markdown',
packages=find_packages(exclude=['tests']),

long_description=open('README.md', encoding='utf-8').read(),
long_description_content_type='text/markdown',
include_package_data=True,
zip_safe=False,

keywords=['deep learning', 'pytorch', 'AI'],
python_requires='>=3.6',
setup_requires=[],
install_requires=load_requirements(PATH_ROOT),

classifiers=[
'Environment :: Console',
'Natural Language :: English',
Expand Down
3 changes: 2 additions & 1 deletion tests/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ pytest>=3.0.5
pytest-cov
flake8
check-manifest
test_tube
# test_tube # already installed in main req.
mlflow
comet_ml
twine==1.13.0
4 changes: 3 additions & 1 deletion tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,12 @@ deps =
-r ./tests/requirements.txt
commands =
pip list
check-manifest --ignore tox.ini
check-manifest
python setup.py check --metadata --strict
coverage run --source pytorch_lightning -m py.test pytorch_lightning tests pl_examples -v --doctest-modules
flake8 .
python setup.py sdist
twine check dist/*

# DROP, it is duplication of setup.cfg
# [flake8]
Expand Down

0 comments on commit 1fd1e42

Please sign in to comment.