ae_toolbox_torch/run_models.py
Si11ium f2cb9b7c42 Done: AE, VAE, AAE
ToDo: Double AAE, Visualization
All Modularized
2019-08-16 22:39:24 +02:00

61 lines
2.0 KiB
Python

from networks.auto_encoder import *
from networks.variational_auto_encoder import *
from networks.adverserial_auto_encoder import *
from networks.modules import LightningModule
from torch.optim import Adam
from torch.utils.data import DataLoader
from pytorch_lightning import data_loader
from dataset import DataContainer
from torch.nn import BatchNorm1d
from pytorch_lightning import Trainer
# ToDo: How to implement this better?
# other_classes = [AutoEncoder, AutoEncoderLightningOverrides]
class Model(VariationalAutoEncoderLightningOverrides, LightningModule):
def __init__(self, dataParams: dict):
super(Model, self).__init__()
self.dataParams = dataParams
self.network = VariationalAutoEncoder(self.dataParams)
def configure_optimizers(self):
return [Adam(self.parameters(), lr=0.02)]
@data_loader
def tng_dataloader(self):
return DataLoader(DataContainer('data', **self.dataParams), shuffle=True, batch_size=100)
class AdversarialModel(AdversarialAELightningOverrides, LightningModule):
def __init__(self, dataParams: dict):
super(AdversarialModel, self).__init__()
self.dataParams = dataParams
self.normal = Normal(0, 1)
self.network = AdversarialAutoEncoder(self.dataParams)
pass
# This is Fucked up, why do i need to put an additional empty list here?
def configure_optimizers(self):
return [Adam(self.network.discriminator.parameters(), lr=0.02),
Adam([*self.network.encoder.parameters(), *self.network.decoder.parameters()], lr=0.02)],\
[]
@data_loader
def tng_dataloader(self):
return DataLoader(DataContainer('data', **self.dataParams), shuffle=True, batch_size=100)
if __name__ == '__main__':
features = 6
ae = AdversarialModel(
dataParams=dict(refresh=False, size=5, step=5,
features=features, transforms=[BatchNorm1d(features)]
)
)
trainer = Trainer()
trainer.fit(ae)