Skip to content

Commit

Permalink
add package info (Lightning-AI#395)
Browse files Browse the repository at this point in the history
* add package info Lightning-AI#358

* Update __init__.py

* wrap package info

* update CI

* fix package info

* fix for Lightning-AI#388

* prune duplicated configs

* fix install

* use req from file

* move info to sep. module
drop comments from req

* add setup req.

* add setup req.

* update get info

* refactor init

* update pip

* fix failing on buildins

* fix failing open

* fix test imports

* fix tests

* fix pep8
  • Loading branch information
Borda authored and williamFalcon committed Oct 28, 2019
1 parent b86d223 commit 37647d8
Show file tree
Hide file tree
Showing 10 changed files with 125 additions and 54 deletions.
7 changes: 3 additions & 4 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -51,14 +51,13 @@ matrix:
cache: pip

install:
- pip install -r requirements.txt
- pip install -r ./tests/requirements.txt
- pip --version ; pip list
- pip install future # needed for `builtins`
- sudo pip install tox

script:
# integration
- tox --sitepackages
- python setup.py install --dry-run
- pip install --editable .

after_success:
- coverage report
Expand Down
45 changes: 36 additions & 9 deletions pytorch_lightning/__init__.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,36 @@
from .root_module.decorators import data_loader
from .root_module.root_module import LightningModule
from .trainer.trainer import Trainer

__all__ = [
'Trainer',
'LightningModule',
'data_loader',
]
"""Package info"""

__version__ = '0.5.2.1'
__author__ = ' William Falcon et al.'
__author_email__ = 'waf2107@columbia.edu'
__license__ = 'Apache-2.0'
__homepage__ = 'https://github.com/williamFalcon/pytorch-lightning',
__docs__ = """# PyTorch Lightning
The lightweight PyTorch wrapper for ML researchers. Scale your models. Write less boilerplate.
"""


try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of skimage when
# the binaries are not built
__LIGHTNING_SETUP__
except NameError:
__LIGHTNING_SETUP__ = False

if __LIGHTNING_SETUP__:
import sys
sys.stderr.write('Partial import of skimage during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from .trainer.trainer import Trainer
from .root_module.root_module import LightningModule
from .root_module.decorators import data_loader

__all__ = [
'Trainer',
'LightningModule',
'data_loader',
]
7 changes: 4 additions & 3 deletions pytorch_lightning/logging/comet_logger.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from os import environ

from comet_ml import Experiment as CometExperiment
try:
from comet_ml import Experiment as CometExperiment
except ImportError:
raise ImportError('Missing comet_ml package.')

from .base import LightningLoggerBase, rank_zero_only

Expand Down
5 changes: 4 additions & 1 deletion pytorch_lightning/logging/mlflow_logger.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
from logging import getLogger
from time import time

import mlflow
try:
import mlflow
except ImportError:
raise ImportError('Missing mlflow package.')

from .base import LightningLoggerBase, rank_zero_only

Expand Down
5 changes: 4 additions & 1 deletion pytorch_lightning/logging/test_tube_logger.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
from test_tube import Experiment
try:
from test_tube import Experiment
except ImportError:
raise ImportError('Missing test-tube package.')

from .base import LightningLoggerBase, rank_zero_only

Expand Down
6 changes: 5 additions & 1 deletion pytorch_lightning/testing/lm_test_module_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,16 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
from test_tube import HyperOptArgumentParser
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from torchvision import transforms
from torchvision.datasets import MNIST
try:
from test_tube import HyperOptArgumentParser
except ImportError:
# TODO: this should be discussed and moved out of this package
raise ImportError('Missing test-tube package.')

from pytorch_lightning import data_loader
from pytorch_lightning.root_module.root_module import LightningModule
Expand Down
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,5 @@ twine==1.13.0
numpy==1.16.4
torch>=1.2.0
torchvision>=0.3.0
pandas
pandas>=0.20.3
# future>=0.17.1 # required for buildins in setup.py
12 changes: 10 additions & 2 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,14 @@ markers =
slow
remote_data
filterwarnings
gpus_param_tests

[pycodestyle]
ignore = E731,W504
max-line-length = 120

[coverage:report]
# TODO: this looks suspicion, it should be reviewed
exclude_lines =
pragma: no cover
def __repr__
Expand All @@ -39,7 +41,6 @@ exclude_lines =
break
pass
os.makedirs

omit =
pytorch_lightning/callbacks/pt_callbacks.py
tests/test_models.py
Expand All @@ -48,5 +49,12 @@ omit =
examples/templates

[flake8]
ignore = E731,W504,F401,F841
# TODO: this should be 88 or 100 according PEP8
max-line-length = 120
exclude = .tox,*.egg,build,temp,examples/*
select = E,W,F
doctests = True
verbose = 2
# https://pep8.readthedocs.io/en/latest/intro.html#error-codes
format = pylint
ignore = E731,W504,F401,F841
49 changes: 35 additions & 14 deletions setup.py
Original file line number Diff line number Diff line change
@@ -1,39 +1,60 @@
#!/usr/bin/env python

import os
from io import open
# Always prefer setuptools over distutils
from setuptools import setup, find_packages

# https://packaging.python.org/guides/single-sourcing-package-version/
try:
import builtins
except ImportError:
import __builtin__ as builtins

# https://packaging.python.org/guides/single-sourcing-package-version/
# http://blog.ionelmc.ro/2014/05/25/python-packaging/

PATH_ROOT = os.path.dirname(__file__)
builtins.__LIGHTNING_SETUP__ = True

import pytorch_lightning # noqa: E402


def load_requirements(path_dir=PATH_ROOT, comment_char='#'):
with open(os.path.join(path_dir, 'requirements.txt'), 'r') as file:
lines = [ln.strip() for ln in file.readlines()]
reqs = []
for ln in lines:
# filer all comments
if comment_char in ln:
ln = ln[:ln.index(comment_char)]
if ln: # if requirement is not empty
reqs.append(ln)
return reqs


# https://packaging.python.org/discussions/install-requires-vs-requirements /
# keep the meta-data here for simplicity in reading this file... it's not obvious
# what happens and to non-engineers they won't know to look in init ...
# the goal of the project is simplicity for researchers, don't want to add too much
# engineer specific practices
setup(
name='pytorch-lightning',
version='0.5.2.1',
description='The Keras for ML researchers using PyTorch',
author='William Falcon',
author_email='waf2107@columbia.edu',
url='https://github.com/williamFalcon/pytorch-lightning',
version=pytorch_lightning.__version__,
description=pytorch_lightning.__docs__,
author=pytorch_lightning.__author__,
author_email=pytorch_lightning.__author_email__,
url=pytorch_lightning.__homepage__,
download_url='https://github.com/williamFalcon/pytorch-lightning',
license='Apache-2',
packages=find_packages(),
license=pytorch_lightning.__license__,
packages=find_packages(exclude=['examples']),
long_description=open('README.md', encoding='utf-8').read(),
long_description_content_type='text/markdown',
include_package_data=True,
zip_safe=False,
keywords=['deep learning', 'pytorch', 'AI'],
python_requires='>=3.6',
install_requires=[
'torch>=1.2.0',
'tqdm>=4.35.0',
'test-tube>=0.6.9',
'pandas>=0.20.3',
],
setup_requires=[],
install_requires=load_requirements(PATH_ROOT),
classifiers=[
'Environment :: Console',
'Natural Language :: English',
Expand Down
40 changes: 22 additions & 18 deletions tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -12,36 +12,40 @@
# and also to help confirm pull requests to this project.

[tox]
envlist = py{35,36,37}
envlist = py{35,36,37,38}

[pytest]
log_cli = 0
log_cli_level = CRITICAL
log_cli_format = %(message)s
log_file = pytest.log
log_file_level = DEBUG
log_file_format = %(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)
log_file_date_format = %Y-%m-%d %H:%M:%S
# DROP, it is duplication of setup.cfg
# [pytest]
# log_cli = 0
# log_cli_level = CRITICAL
# log_cli_format = %(message)s
# log_file = pytest.log
# log_file_level = DEBUG
# log_file_format = %(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)
# log_file_date_format=%Y-%m-%d %H:%M:%S

[testenv]
basepython =
py35: python3.5
py36: python3.6
py37: python3.7
py38: python3.8
deps =
-r requirements.txt
-r ./tests/requirements.txt
commands =
pip list
check-manifest --ignore tox.ini
python setup.py check -m -s
flake8 .
python setup.py check --metadata --strict
coverage run --source pytorch_lightning -m py.test pytorch_lightning tests pl_examples -v --doctest-modules
flake8 .

[flake8]
exclude = .tox,*.egg,build,temp,examples/*
select = E,W,F
doctests = True
verbose = 2
# DROP, it is duplication of setup.cfg
# [flake8]
# exclude = .tox,*.egg,build,temp,examples/*
# select = E,W,F
# doctests = True
# verbose = 2
# https://pep8.readthedocs.io/en/latest/intro.html#error-codes
format = pylint
max-line-length = 100
# format = pylint
# max-line-length = 100

0 comments on commit 37647d8

Please sign in to comment.