Done: Latent Space Viz
ToDo: Visualization for variational spaces Trajectory Coloring Post Processing Metric Slurm Skript
This commit is contained in:
@ -10,7 +10,7 @@ class AdversarialAutoEncoder(AutoEncoder):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(AdversarialAutoEncoder, self).__init__(*args, **kwargs)
|
||||
self.discriminator = Discriminator(self.latent_dim, self.dataParams)
|
||||
self.discriminator = Discriminator(self.latent_dim, self.features)
|
||||
|
||||
def forward(self, batch):
|
||||
# Encoder
|
||||
@ -18,7 +18,7 @@ class AdversarialAutoEncoder(AutoEncoder):
|
||||
z = self.encoder(batch)
|
||||
# Decoder
|
||||
# First repeat the data accordingly to the batch size
|
||||
z_repeatet = Repeater((batch.shape[0], self.dataParams['size'], -1))(z)
|
||||
z_repeatet = Repeater((batch.shape[0], batch.shape[1], -1))(z)
|
||||
x_hat = self.decoder(z_repeatet)
|
||||
return z, x_hat
|
||||
|
||||
|
@ -7,12 +7,13 @@ from torch import Tensor
|
||||
# Basic AE-Implementation
|
||||
class AutoEncoder(AbstractNeuralNetwork, ABC):
|
||||
|
||||
def __init__(self, latent_dim: int, dataParams: dict, **kwargs):
|
||||
def __init__(self, latent_dim: int=0, features: int = 0, **kwargs):
|
||||
assert latent_dim and features
|
||||
super(AutoEncoder, self).__init__()
|
||||
self.dataParams = dataParams
|
||||
self.latent_dim = latent_dim
|
||||
self.features = features
|
||||
self.encoder = Encoder(self.latent_dim)
|
||||
self.decoder = Decoder(self.latent_dim, self.dataParams['features'])
|
||||
self.decoder = Decoder(self.latent_dim, self.features)
|
||||
|
||||
def forward(self, batch: Tensor):
|
||||
# Encoder
|
||||
@ -20,7 +21,7 @@ class AutoEncoder(AbstractNeuralNetwork, ABC):
|
||||
z = self.encoder(batch)
|
||||
# Decoder
|
||||
# First repeat the data accordingly to the batch size
|
||||
z_repeatet = Repeater((batch.shape[0], self.dataParams['size'], -1))(z)
|
||||
z_repeatet = Repeater((batch.shape[0], batch.shape[1], -1))(z)
|
||||
x_hat = self.decoder(z_repeatet)
|
||||
return z, x_hat
|
||||
|
||||
|
@ -131,13 +131,13 @@ class AvgDimPool(Module):
|
||||
# Generators, Decoders, Encoders, Discriminators
|
||||
class Discriminator(Module):
|
||||
|
||||
def __init__(self, latent_dim, dataParams, dropout=.0, activation=ReLU):
|
||||
def __init__(self, latent_dim, features, dropout=.0, activation=ReLU):
|
||||
super(Discriminator, self).__init__()
|
||||
self.dataParams = dataParams
|
||||
self.features = features
|
||||
self.latent_dim = latent_dim
|
||||
self.l1 = Linear(self.latent_dim, self.dataParams['features'] * 10)
|
||||
self.l2 = Linear(self.dataParams['features'] * 10, self.dataParams['features'] * 20)
|
||||
self.lout = Linear(self.dataParams['features'] * 20, 1)
|
||||
self.l1 = Linear(self.latent_dim, self.features * 10)
|
||||
self.l2 = Linear(self.features * 10, self.features * 20)
|
||||
self.lout = Linear(self.features * 20, 1)
|
||||
self.dropout = Dropout(dropout)
|
||||
self.activation = activation()
|
||||
self.sigmoid = Sigmoid()
|
||||
|
@ -6,17 +6,17 @@ import torch
|
||||
|
||||
class SeperatingAdversarialAutoEncoder(Module):
|
||||
|
||||
def __init__(self, latent_dim, dataParams, **kwargs):
|
||||
def __init__(self, latent_dim, features, **kwargs):
|
||||
assert latent_dim % 2 == 0, f'Your latent space needs to be even, not odd, but was: "{latent_dim}"'
|
||||
super(SeperatingAdversarialAutoEncoder, self).__init__()
|
||||
|
||||
self.latent_dim = latent_dim
|
||||
self.dataParams = dataParams
|
||||
self.features = features
|
||||
self.spatial_encoder = PoolingEncoder(self.latent_dim // 2)
|
||||
self.temporal_encoder = Encoder(self.latent_dim // 2)
|
||||
self.decoder = Decoder(self.latent_dim, self.dataParams['features'])
|
||||
self.spatial_discriminator = Discriminator(self.latent_dim // 2, self.dataParams)
|
||||
self.temporal_discriminator = Discriminator(self.latent_dim // 2, self.dataParams)
|
||||
self.decoder = Decoder(self.latent_dim, self.features)
|
||||
self.spatial_discriminator = Discriminator(self.latent_dim // 2, self.features)
|
||||
self.temporal_discriminator = Discriminator(self.latent_dim // 2, self.features)
|
||||
|
||||
def forward(self, batch):
|
||||
# Encoder
|
||||
@ -25,7 +25,7 @@ class SeperatingAdversarialAutoEncoder(Module):
|
||||
# Decoder
|
||||
# First repeat the data accordingly to the batch size
|
||||
z_concat = torch.cat((z_spatial, z_temporal), dim=-1)
|
||||
z_repeatet = Repeater((batch.shape[0], self.dataParams['size'], -1))(z_concat)
|
||||
z_repeatet = Repeater((batch.shape[0], batch.shape[1], -1))(z_concat)
|
||||
x_hat = self.decoder(z_repeatet)
|
||||
return z_spatial, z_temporal, x_hat
|
||||
|
||||
|
@ -10,12 +10,13 @@ class VariationalAutoEncoder(AbstractNeuralNetwork, ABC):
|
||||
def name(self):
|
||||
return self.__class__.__name__
|
||||
|
||||
def __init__(self, dataParams, **kwargs):
|
||||
def __init__(self, latent_dim=0, features=0, **kwargs):
|
||||
assert latent_dim and features
|
||||
super(VariationalAutoEncoder, self).__init__()
|
||||
self.dataParams = dataParams
|
||||
self.latent_dim = kwargs.get('latent_dim', 2)
|
||||
self.features = features
|
||||
self.latent_dim = latent_dim
|
||||
self.encoder = Encoder(self.latent_dim, variational=True)
|
||||
self.decoder = Decoder(self.latent_dim, self.dataParams['features'], variational=True)
|
||||
self.decoder = Decoder(self.latent_dim, self.features, variational=True)
|
||||
|
||||
@staticmethod
|
||||
def reparameterize(mu, logvar):
|
||||
@ -27,7 +28,7 @@ class VariationalAutoEncoder(AbstractNeuralNetwork, ABC):
|
||||
def forward(self, batch):
|
||||
mu, logvar = self.encoder(batch)
|
||||
z = self.reparameterize(mu, logvar)
|
||||
repeat = Repeater((batch.shape[0], self.dataParams['size'], -1))
|
||||
repeat = Repeater((batch.shape[0], batch.shape[1], -1))
|
||||
x_hat = self.decoder(repeat(z))
|
||||
return x_hat, mu, logvar
|
||||
|
||||
|
Reference in New Issue
Block a user