All models running.

This commit is contained in:
Si11ium
2019-08-24 19:05:46 +02:00
parent 7b0b96eaa3
commit 18305a9e7e
6 changed files with 45 additions and 45 deletions

View File

@ -1,37 +1,33 @@
from torch.distributions import Normal
from networks.auto_encoder import *
import os
import time
from networks.variational_auto_encoder import *
from networks.adverserial_auto_encoder import *
from networks.seperating_adversarial_auto_encoder import *
from networks.modules import LightningModule
from torch.optim import Adam
from torch.utils.data import DataLoader
from pytorch_lightning import data_loader
from dataset import DataContainer
from torch.nn import BatchNorm1d
from pytorch_lightning import Trainer
from test_tube import Experiment
from argparse import Namespace
from argparse import ArgumentParser
from distutils.util import strtobool
args = ArgumentParser()
args.add_argument('--step', default=0)
args.add_argument('--features', default=0)
args.add_argument('--size', default=0)
args.add_argument('--latent_dim', default=0)
args.add_argument('--step', default=6)
args.add_argument('--features', default=6)
args.add_argument('--size', default=9)
args.add_argument('--latent_dim', default=4)
args.add_argument('--model', default='Model')
args.add_argument('--refresh', type=strtobool, default=False)
# ToDo: How to implement this better?
# other_classes = [AutoEncoder, AutoEncoderLightningOverrides]
class Model(AutoEncoderLightningOverrides, LightningModule):
def __init__(self, parameters, **kwargs):
def __init__(self, parameters):
assert all([x in parameters for x in ['step', 'size', 'latent_dim', 'features']])
self.size = parameters.size
self.latent_dim = parameters.latent_dim
@ -43,7 +39,7 @@ class Model(AutoEncoderLightningOverrides, LightningModule):
class AdversarialModel(AdversarialAELightningOverrides, LightningModule):
def __init__(self, parameters: Namespace, **kwargs):
def __init__(self, parameters: Namespace):
assert all([x in parameters for x in ['step', 'size', 'latent_dim', 'features']])
self.size = parameters.size
self.latent_dim = parameters.latent_dim
@ -57,7 +53,7 @@ class AdversarialModel(AdversarialAELightningOverrides, LightningModule):
class SeparatingAdversarialModel(SeparatingAdversarialAELightningOverrides, LightningModule):
def __init__(self, parameters: Namespace, **kwargs):
def __init__(self, parameters: Namespace):
assert all([x in parameters for x in ['step', 'size', 'latent_dim', 'features']])
self.size = parameters.size
self.latent_dim = parameters.latent_dim
@ -65,16 +61,12 @@ class SeparatingAdversarialModel(SeparatingAdversarialAELightningOverrides, Ligh
self.step = parameters.step
super(SeparatingAdversarialModel, self).__init__()
self.normal = Normal(0, 1)
self.network = SeperatingAdversarialAutoEncoder(self.latent_dim, self.features, **kwargs)
self.network = SeperatingAdversarialAutoEncoder(self.latent_dim, self.features)
pass
if __name__ == '__main__':
features = 6
tag_dict = dict(features=features, latent_dim=4, size=5, step=6, refresh=False,
transforms=[BatchNorm1d(features)])
arguments = args.parse_args()
arguments.__dict__.update(tag_dict)
model = globals()[arguments.model](arguments)
@ -82,19 +74,19 @@ if __name__ == '__main__':
outpath = os.path.join(os.getcwd(), 'output', model.name, time.asctime().replace(' ', '_').replace(':', '-'))
os.makedirs(outpath, exist_ok=True)
exp = Experiment(save_dir=outpath)
exp.tag(tag_dict=tag_dict)
exp.tag(tag_dict=arguments.__dict__)
from pytorch_lightning.callbacks import ModelCheckpoint
checkpoint_callback = ModelCheckpoint(
filepath=os.path.join(outpath, 'weights.ckpt'),
save_best_only=True,
save_best_only=False,
verbose=True,
monitor='val_loss', # val_loss
mode='min',
period=4
)
trainer = Trainer(experiment=exp, checkpoint_callback=checkpoint_callback, max_nb_epochs=15) # gpus=[0...LoL]
trainer = Trainer(experiment=exp, max_nb_epochs=250, gpus=[0],
add_log_row_interval=1000, checkpoint_callback=checkpoint_callback)
trainer.fit(model)
trainer.save_checkpoint(os.path.join(outpath, 'weights.ckpt'))