Done: Latent Space Viz

ToDo: Visualization for variational spaces
Trajectory Coloring
Post Processing
Metric
Slurm Skript
This commit is contained in:
Si11ium
2019-08-23 09:54:00 +02:00
parent 744c0c50b7
commit 1a0400d736
9 changed files with 159 additions and 76 deletions

View File

@ -14,21 +14,35 @@ from torch.nn import BatchNorm1d
from pytorch_lightning import Trainer
from test_tube import Experiment
from argparse import Namespace
from argparse import ArgumentParser
args = ArgumentParser()
args.add_argument('step')
args.add_argument('features')
args.add_argument('size')
args.add_argument('latent_dim')
# ToDo: How to implement this better?
# other_classes = [AutoEncoder, AutoEncoderLightningOverrides]
class Model(VariationalAutoEncoderLightningOverrides, LightningModule):
class Model(AutoEncoderLightningOverrides, LightningModule):
def __init__(self, dataParams: dict):
def __init__(self, latent_dim=0, size=0, step=0, features=0, **kwargs):
assert all([x in args for x in ['step', 'size', 'latent_dim', 'features']])
self.size = args.size
self.latent_dim = args.latent_dim
self.features = args.features
self.step = args.step
super(Model, self).__init__()
self.dataParams = dataParams
self.network = VariationalAutoEncoder(self.dataParams)
self.network = AutoEncoder(self.latent_dim, self.features)
def configure_optimizers(self):
return [Adam(self.parameters(), lr=0.02)]
@data_loader
def tng_dataloader(self):
return DataLoader(DataContainer('data', **self.dataParams), shuffle=True, batch_size=100)
return DataLoader(DataContainer('data', self.size, self.step), shuffle=True, batch_size=100)
class AdversarialModel(AdversarialAELightningOverrides, LightningModule):
@ -37,11 +51,15 @@ class AdversarialModel(AdversarialAELightningOverrides, LightningModule):
def name(self):
return self.network.name
def __init__(self, dataParams: dict):
def __init__(self, args: Namespace, **kwargs):
assert all([x in args for x in ['step', 'size', 'latent_dim', 'features']])
self.size = args.size
self.latent_dim = args.latent_dim
self.features = args.features
self.step = args.step
super(AdversarialModel, self).__init__()
self.dataParams = dataParams
self.normal = Normal(0, 1)
self.network = AdversarialAutoEncoder(self.dataParams)
self.network = AdversarialAutoEncoder(self.latent_dim, self.features)
pass
# This is Fucked up, why do i need to put an additional empty list here?
@ -52,17 +70,20 @@ class AdversarialModel(AdversarialAELightningOverrides, LightningModule):
@data_loader
def tng_dataloader(self):
return DataLoader(DataContainer('data', **self.dataParams), shuffle=True, batch_size=100)
return DataLoader(DataContainer('data', self.size, self.step), shuffle=True, batch_size=100)
class SeparatingAdversarialModel(SeparatingAdversarialAELightningOverrides, LightningModule):
def __init__(self, latent_dim, dataParams: dict):
def __init__(self, args: Namespace, **kwargs):
assert all([x in args for x in ['step', 'size', 'latent_dim', 'features']])
self.size = args.size
self.latent_dim = args.latent_dim
self.features = args.features
self.step = args.step
super(SeparatingAdversarialModel, self).__init__()
self.latent_dim = latent_dim
self.dataParams = dataParams
self.normal = Normal(0, 1)
self.network = SeperatingAdversarialAutoEncoder(self.latent_dim, self.dataParams)
self.network = SeperatingAdversarialAutoEncoder(self.latent_dim, self.features, **kwargs)
pass
# This is Fucked up, why do i need to put an additional empty list here?
@ -78,22 +99,24 @@ class SeparatingAdversarialModel(SeparatingAdversarialAELightningOverrides, Ligh
@data_loader
def tng_dataloader(self):
return DataLoader(DataContainer('data', **self.dataParams), shuffle=True, batch_size=100)
num_workers = os.cpu_count() // 2
return DataLoader(DataContainer('data', self.size, self.step), shuffle=True, batch_size=100, num_workers=num_workers)
if __name__ == '__main__':
features = 6
latent_dim = 4
model = SeparatingAdversarialModel(latent_dim=latent_dim, dataParams=dict(refresh=False, size=5, step=5,
features=features, transforms=[BatchNorm1d(features)]
)
)
tag_dict = dict(features=features, latent_dim=4, size=5, step=6, refresh=False,
transforms=[BatchNorm1d(features)])
arguments = args.parse_args()
arguments.__dict__.update(tag_dict)
model = SeparatingAdversarialModel(arguments)
# PyTorch summarywriter with a few bells and whistles
outpath = os.path.join(os.getcwd(), 'output', model.name, time.asctime().replace(' ', '_').replace(':', '-'))
os.makedirs(outpath, exist_ok=True)
exp = Experiment(save_dir=outpath)
exp.tag(tag_dict=tag_dict)
from pytorch_lightning.callbacks import ModelCheckpoint
@ -101,9 +124,8 @@ if __name__ == '__main__':
filepath=os.path.join(outpath, 'weights.ckpt'),
save_best_only=True,
verbose=True,
monitor='val_loss',
monitor='tng_loss', # val_loss
mode='min',
)
trainer = Trainer(experiment=exp, checkpoint_callback=checkpoint_callback, max_nb_epochs=15) # gpus=[0...LoL]