project Refactor, CNN Classifier Basics
This commit is contained in:
28
main.py
28
main.py
@ -10,13 +10,11 @@ import warnings
|
||||
import torch
|
||||
from pytorch_lightning import Trainer
|
||||
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
|
||||
from torch.utils.data import DataLoader
|
||||
|
||||
from lib.modules.utils import LightningBaseModule
|
||||
from lib.utils.config import Config
|
||||
from lib.utils.logging import Logger
|
||||
|
||||
from lib.evaluation.classification import ROCEvaluation
|
||||
from lib.utils.model_io import SavedLightningModels
|
||||
|
||||
warnings.filterwarnings('ignore', category=FutureWarning)
|
||||
warnings.filterwarnings('ignore', category=UserWarning)
|
||||
@ -36,8 +34,8 @@ main_arg_parser.add_argument("--main_seed", type=int, default=69, help="")
|
||||
# Data Parameters
|
||||
main_arg_parser.add_argument("--data_worker", type=int, default=10, help="")
|
||||
main_arg_parser.add_argument("--data_batchsize", type=int, default=100, help="")
|
||||
main_arg_parser.add_argument("--data_root", type=str, default='/data/', help="")
|
||||
main_arg_parser.add_argument("--data_map_root", type=str, default='/res/maps', help="")
|
||||
main_arg_parser.add_argument("--data_root", type=str, default='data', help="")
|
||||
main_arg_parser.add_argument("--data_map_root", type=str, default='res/shapes', help="")
|
||||
|
||||
# Transformations
|
||||
main_arg_parser.add_argument("--transformations_to_tensor", type=strtobool, default=False, help="")
|
||||
@ -50,10 +48,11 @@ main_arg_parser.add_argument("--train_batch_size", type=int, default=256, help="
|
||||
main_arg_parser.add_argument("--train_lr", type=float, default=0.002, help="")
|
||||
|
||||
# Model
|
||||
main_arg_parser.add_argument("--model_type", type=str, default="classifier_cnn", help="")
|
||||
main_arg_parser.add_argument("--model_type", type=str, default="generator_cnn", help="")
|
||||
main_arg_parser.add_argument("--model_activation", type=str, default="relu", help="")
|
||||
main_arg_parser.add_argument("--model_filters", type=str, default="[32, 16, 4]", help="")
|
||||
main_arg_parser.add_argument("--model_classes", type=int, default=2, help="")
|
||||
main_arg_parser.add_argument("--model_lat_dim", type=int, default=2, help="")
|
||||
main_arg_parser.add_argument("--model_use_bias", type=strtobool, default=True, help="")
|
||||
main_arg_parser.add_argument("--model_use_norm", type=strtobool, default=True, help="")
|
||||
main_arg_parser.add_argument("--model_dropout", type=float, default=0.00, help="")
|
||||
@ -77,9 +76,10 @@ def run_lightning_loop(config_obj):
|
||||
# =============================================================================
|
||||
# Checkpoint Saving
|
||||
checkpoint_callback = ModelCheckpoint(
|
||||
filepath=str(logger.log_dir / 'ckpt_weights'),
|
||||
verbose=True, save_top_k=5,
|
||||
filepath=str(logger.log_dir / 'ckpt_weights'),
|
||||
verbose=True, save_top_k=0,
|
||||
)
|
||||
|
||||
# =============================================================================
|
||||
# Early Stopping
|
||||
# TODO: For This to work, one must set a validation step and End Eval and Score
|
||||
@ -94,6 +94,11 @@ def run_lightning_loop(config_obj):
|
||||
# Init
|
||||
model: LightningBaseModule = config_obj.model_class(config_obj.model_paramters)
|
||||
model.init_weights()
|
||||
if model.name == 'CNNRouteGenerator':
|
||||
# ToDo: Make this dependent on the used seed
|
||||
path = Path(Path(config_obj.train.outpath) / 'classifier_cnn' / 'version_0')
|
||||
disc_model = SavedLightningModels.load_checkpoint(path).restore()
|
||||
model.set_discriminator(disc_model)
|
||||
|
||||
# Trainer
|
||||
# =============================================================================
|
||||
@ -101,8 +106,8 @@ def run_lightning_loop(config_obj):
|
||||
show_progress_bar=True,
|
||||
weights_save_path=logger.log_dir,
|
||||
gpus=[0] if torch.cuda.is_available() else None,
|
||||
row_log_interval=(model.data_len * 0.01), # TODO: Better Value / Setting
|
||||
log_save_interval=(model.data_len * 0.04), # TODO: Better Value / Setting
|
||||
# row_log_interval=(model.n_train_batches * 0.1), # TODO: Better Value / Setting
|
||||
# log_save_interval=(model.n_train_batches * 0.2), # TODO: Better Value / Setting
|
||||
checkpoint_callback=checkpoint_callback,
|
||||
logger=logger,
|
||||
fast_dev_run=config_obj.main.debug,
|
||||
@ -110,7 +115,7 @@ def run_lightning_loop(config_obj):
|
||||
)
|
||||
|
||||
# Train It
|
||||
trainer.fit(model,)
|
||||
trainer.fit(model)
|
||||
|
||||
# Save the last state & all parameters
|
||||
trainer.save_checkpoint(logger.log_dir / 'weights.ckpt')
|
||||
@ -118,6 +123,7 @@ def run_lightning_loop(config_obj):
|
||||
|
||||
# Evaluate It
|
||||
trainer.test()
|
||||
|
||||
return model
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user