Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

default logger is now tensorboard #609

Merged
merged 5 commits into from
Jan 14, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions pytorch_lightning/trainer/callback_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
from abc import ABC

from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning.logging import TestTubeLogger
from pytorch_lightning.logging import TensorboardLogger


class TrainerCallbackConfigMixin(ABC):
Expand Down Expand Up @@ -69,7 +69,7 @@ def configure_early_stopping(self, early_stop_callback, logger):
# configure logger
if logger is True:
# default logger
self.logger = TestTubeLogger(
self.logger = TensorboardLogger(
save_dir=self.default_save_path,
version=self.slurm_job_id,
name='lightning_logs'
Expand Down
25 changes: 19 additions & 6 deletions pytorch_lightning/trainer/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,41 +139,53 @@ def __init__(

"""
# Transfer params
if nb_gpu_nodes is not None: # Backward compatibility
# Backward compatibility
if nb_gpu_nodes is not None:
warnings.warn("`nb_gpu_nodes` has renamed to `num_nodes` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
if not num_nodes: # in case you did not set the proper value
num_nodes = nb_gpu_nodes
self.num_gpu_nodes = num_nodes

self.log_gpu_memory = log_gpu_memory
if gradient_clip is not None: # Backward compatibility

# Backward compatibility
if gradient_clip is not None:
warnings.warn("`gradient_clip` has renamed to `gradient_clip_val` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
if not gradient_clip_val: # in case you did not set the proper value
gradient_clip_val = gradient_clip
self.gradient_clip_val = gradient_clip_val

self.check_val_every_n_epoch = check_val_every_n_epoch
self.track_grad_norm = track_grad_norm
self.on_gpu = True if (gpus and torch.cuda.is_available()) else False
self.process_position = process_position
self.weights_summary = weights_summary
if max_nb_epochs is not None: # Backward compatibility

# Backward compatibility
if max_nb_epochs is not None:
warnings.warn("`max_nb_epochs` has renamed to `max_epochs` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
if not max_epochs: # in case you did not set the proper value
max_epochs = max_nb_epochs
self.max_epochs = max_epochs
if min_nb_epochs is not None: # Backward compatibility

# Backward compatibility
if min_nb_epochs is not None:
warnings.warn("`min_nb_epochs` has renamed to `min_epochs` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
if not min_epochs: # in case you did not set the proper value
min_epochs = min_nb_epochs
self.min_epochs = min_epochs
if nb_sanity_val_steps is not None: # Backward compatibility

# Backward compatibility
if nb_sanity_val_steps is not None:
warnings.warn("`nb_sanity_val_steps` has renamed to `num_sanity_val_steps` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
if not num_sanity_val_steps: # in case you did not set the proper value
num_sanity_val_steps = nb_sanity_val_steps

self.num_sanity_val_steps = num_sanity_val_steps
self.print_nan_grads = print_nan_grads
self.truncated_bptt_steps = truncated_bptt_steps
Expand Down Expand Up @@ -261,8 +273,9 @@ def __init__(
# logging
self.log_save_interval = log_save_interval
self.val_check_interval = val_check_interval

# backward compatibility
if add_row_log_interval is not None:
# backward compatibility
warnings.warn("`add_row_log_interval` has renamed to `row_log_interval` since v0.5.0"
" and will be removed in v0.8.0", DeprecationWarning)
if not row_log_interval: # in case you did not set the proper value
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,4 @@ torch>=1.1
torchvision>=0.4.0
pandas>=0.24 # lower version do not support py3.7
test-tube>=0.7.5
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

add test-tube to test/requirements.txt

future>=0.17.1 # required for builtins in setup.py
future>=0.17.1 # required for builtins in setup.py