Done: AE, VAE, AAE

ToDo: Double AAE, Visualization
All Modularized
This commit is contained in:
Si11ium
2019-08-16 22:39:24 +02:00
parent 265c900f33
commit f2cb9b7c42
12 changed files with 409 additions and 266 deletions

View File

@ -0,0 +1,66 @@
from networks.auto_encoder import AutoEncoder
from torch.nn.functional import mse_loss
from torch.nn import Sequential, Linear, ReLU, Dropout, Sigmoid
from torch.distributions import Normal
from networks.modules import *
import torch
class AdversarialAutoEncoder(AutoEncoder):
def __init__(self, *args, **kwargs):
super(AdversarialAutoEncoder, self).__init__(*args, **kwargs)
self.discriminator = Discriminator(self.latent_dim, self.dataParams)
def forward(self, batch):
# Encoder
# outputs, hidden (Batch, Timesteps aka. Size, Features / Latent Dim Size)
z = self.encoder(batch)
# Decoder
# First repeat the data accordingly to the batch size
z_repeatet = Repeater((batch.shape[0], self.dataParams['size'], -1))(z)
x_hat = self.decoder(z_repeatet)
return z, x_hat
class AdversarialAELightningOverrides:
def forward(self, x):
return self.network.forward(x)
def training_step(self, batch, _, optimizer_i):
if optimizer_i == 0:
# ---------------------
# Train Discriminator
# ---------------------
# latent_fake, reconstruction
latent_fake, _ = self.network.forward(batch)
latent_real = self.normal.sample(latent_fake.shape)
# Evaluate the input
d_real_prediction = self.network.discriminator.forward(latent_real)
d_fake_prediction = self.network.discriminator.forward(latent_fake)
# Train the discriminator
d_loss_real = mse_loss(d_real_prediction, torch.zeros(d_real_prediction.shape))
d_loss_fake = mse_loss(d_fake_prediction, torch.ones(d_fake_prediction.shape))
# Calculate the mean over both the real and the fake acc
d_loss = 0.5 * torch.add(d_loss_real, d_loss_fake)
return {'loss': d_loss}
elif optimizer_i == 1:
# ---------------------
# Train AutoEncoder
# ---------------------
# z, x_hat
_, batch_hat = self.forward(batch)
loss = mse_loss(batch, batch_hat)
return {'loss': loss}
else:
raise RuntimeError('This should not have happened, catch me if u can.')
if __name__ == '__main__':
raise PermissionError('Get out of here - never run this module')

45
networks/auto_encoder.py Normal file
View File

@ -0,0 +1,45 @@
from .modules import *
from torch.nn.functional import mse_loss
from torch import Tensor
#######################
# Basic AE-Implementation
class AutoEncoder(Module, ABC):
@property
def name(self):
return self.__class__.__name__
def __init__(self, dataParams, **kwargs):
super(AutoEncoder, self).__init__()
self.dataParams = dataParams
self.latent_dim = kwargs.get('latent_dim', 2)
self.encoder = Encoder(self.latent_dim)
self.decoder = Decoder(self.latent_dim, self.dataParams['features'])
def forward(self, batch: Tensor):
# Encoder
# outputs, hidden (Batch, Timesteps aka. Size, Features / Latent Dim Size)
z = self.encoder(batch)
# Decoder
# First repeat the data accordingly to the batch size
z_repeatet = Repeater((batch.shape[0], self.dataParams['size'], -1))(z)
x_hat = self.decoder(z_repeatet)
return z, x_hat
class AutoEncoderLightningOverrides:
def forward(self, x):
return self.network.forward(x)
def training_step(self, x, batch_nb):
# z, x_hat
_, x_hat = self.forward(x)
loss = mse_loss(x, x_hat)
return {'loss': loss}
if __name__ == '__main__':
raise PermissionError('Get out of here - never run this module')

View File

@ -1,73 +0,0 @@
from torch.nn import Sequential, Linear, GRU, ReLU, Tanh
from .modules import *
from torch.nn.functional import mse_loss
#######################
# Basic AE-Implementation
class BasicAE(Module, ABC):
@property
def name(self):
return self.__class__.__name__
def __init__(self, dataParams, **kwargs):
super(BasicAE, self).__init__()
self.dataParams = dataParams
self.latent_dim = kwargs.get('latent_dim', 2)
self.encoder = self._build_encoder()
self.decoder = self._build_decoder(out_shape=self.dataParams['features'])
def _build_encoder(self):
encoder = Sequential(
Linear(6, 100, bias=True),
ReLU(),
Linear(100, 10, bias=True),
ReLU()
)
gru = Sequential(
TimeDistributed(encoder),
GRU(10, 10, batch_first=True),
RNNOutputFilter(only_last=True),
Linear(10, self.latent_dim)
)
return gru
def _build_decoder(self, out_shape):
decoder = Sequential(
Linear(10, 100, bias=True),
ReLU(),
Linear(100, out_shape, bias=True),
Tanh()
)
gru = Sequential(
GRU(self.latent_dim, 10,batch_first=True),
RNNOutputFilter(),
TimeDistributed(decoder)
)
return gru
def forward(self, batch: torch.Tensor):
# Encoder
# outputs, hidden (Batch, Timesteps aka. Size, Features / Latent Dim Size)
z = self.encoder(batch)
# Decoder
# First repeat the data accordingly to the batch size
z = Repeater((batch.shape[0], self.dataParams['size'], -1))(z)
x_hat = self.decoder(z)
return z, x_hat
class AELightningOverrides:
def training_step(self, x, batch_nb):
# z, x_hat
_, x_hat = self.forward(x)
loss = mse_loss(x, x_hat)
return {'loss': loss}
if __name__ == '__main__':
raise PermissionError('Get out of here - never run this module')

View File

@ -1,81 +0,0 @@
from torch.nn import Sequential, Linear, GRU, ReLU
from .modules import *
from torch.nn.functional import mse_loss
#######################
# Basic AE-Implementation
class BasicVAE(Module, ABC):
@property
def name(self):
return self.__class__.__name__
def __init__(self, dataParams, **kwargs):
super(BasicVAE, self).__init__()
self.dataParams = dataParams
self.latent_dim = kwargs.get('latent_dim', 2)
self.encoder = self._build_encoder()
self.decoder = self._build_decoder(out_shape=self.dataParams['features'])
self.mu, self.logvar = Linear(10, self.latent_dim), Linear(10, self.latent_dim)
def _build_encoder(self):
linear_stack = Sequential(
Linear(6, 100, bias=True),
ReLU(),
Linear(100, 10, bias=True),
ReLU()
)
encoder = Sequential(
TimeDistributed(linear_stack),
GRU(10, 10, batch_first=True),
RNNOutputFilter(only_last=True),
)
return encoder
def reparameterize(self, mu, logvar):
# Lambda Layer, add gaussian noise
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return mu + eps*std
def _build_decoder(self, out_shape):
decoder = Sequential(
Linear(10, 100, bias=True),
ReLU(),
Linear(100, out_shape, bias=True),
ReLU()
)
sequential_decoder = Sequential(
GRU(self.latent_dim, 10, batch_first=True),
RNNOutputFilter(),
TimeDistributed(decoder)
)
return sequential_decoder
def forward(self, batch):
encoding = self.encoder(batch)
mu_logvar = self.mu(encoding), self.logvar(encoding)
z = self.reparameterize(*mu_logvar)
repeat = Repeater((batch.shape[0], self.dataParams['size'], -1))
x_hat = self.decoder(repeat(z))
return (x_hat, *mu_logvar)
class VAELightningOverrides:
def training_step(self, x, batch_nb):
x_hat, logvar, mu = self.forward(x)
BCE = mse_loss(x_hat, x, reduction='mean')
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return {'loss': BCE + KLD}
if __name__ == '__main__':
raise PermissionError('Get out of here - never run this module')

View File

@ -1,6 +1,6 @@
import torch
import pytorch_lightning as pl
from torch.nn import Module
from torch.nn import Module, Linear, ReLU, Tanh, Sigmoid, Dropout, GRU
from abc import ABC, abstractmethod
@ -85,9 +85,10 @@ class Repeater(Module):
self.shape = shape
def forward(self, x: torch.Tensor):
x.unsqueeze_(-2)
x = x.unsqueeze(-2)
return x.expand(self.shape)
class RNNOutputFilter(Module):
def __init__(self, return_output=True, only_last=False):
@ -101,5 +102,108 @@ class RNNOutputFilter(Module):
return out if not self.only_last else out[:, -1, :]
#######################
# Network Modules
# Generators, Decoders, Encoders, Discriminators
class Discriminator(Module):
def __init__(self, latent_dim, dataParams, dropout=.0, activation=ReLU):
super(Discriminator, self).__init__()
self.dataParams = dataParams
self.latent_dim = latent_dim
self.l1 = Linear(self.latent_dim, self.dataParams['features'] * 10)
self.l2 = Linear(self.dataParams['features']*10, self.dataParams['features'] * 20)
self.lout = Linear(self.dataParams['features']*20, 1)
self.dropout = Dropout(dropout)
self.activation = activation()
self.sigmoid = Sigmoid()
def forward(self, x, **kwargs):
tensor = self.l1(x)
tensor = self.dropout(self.activation(tensor))
tensor = self.l2(tensor)
tensor = self.dropout(self.activation(tensor))
tensor = self.lout(tensor)
tensor = self.sigmoid(tensor)
return tensor
class DecoderLinearStack(Module):
def __init__(self, out_shape):
super(DecoderLinearStack, self).__init__()
self.l1 = Linear(10, 100, bias=True)
self.l2 = Linear(100, out_shape, bias=True)
self.activation = ReLU()
self.activation_out = Tanh()
def forward(self, x):
tensor = self.l1(x)
tensor = self.activation(tensor)
tensor = self.l2(tensor)
tensor = self.activation_out(tensor)
return tensor
class EncoderLinearStack(Module):
def __init__(self):
super(EncoderLinearStack, self).__init__()
self.l1 = Linear(6, 100, bias=True)
self.l2 = Linear(100, 10, bias=True)
self.activation = ReLU()
def forward(self, x):
tensor = self.l1(x)
tensor = self.activation(tensor)
tensor = self.l2(tensor)
tensor = self.activation(tensor)
return tensor
class Encoder(Module):
def __init__(self, lat_dim, variational=False):
self.lat_dim = lat_dim
self.variational = variational
super(Encoder, self).__init__()
self.l_stack = TimeDistributed(EncoderLinearStack())
self.gru = GRU(10, 10, batch_first=True)
self.filter = RNNOutputFilter(only_last=True)
if variational:
self.mu = Linear(10, self.lat_dim)
self.logvar = Linear(10, self.lat_dim)
else:
self.lat_dim_layer = Linear(10, self.lat_dim)
def forward(self, x):
tensor = self.l_stack(x)
tensor = self.gru(tensor)
tensor = self.filter(tensor)
if self.variational:
tensor = self.mu(tensor), self.logvar(tensor)
else:
tensor = self.lat_dim_layer(tensor)
return tensor
class Decoder(Module):
def __init__(self, latent_dim, *args, variational=False):
self.variational = variational
super(Decoder, self).__init__()
self.g = GRU(latent_dim, 10, batch_first=True)
self.filter = RNNOutputFilter()
self.l_stack = TimeDistributed(DecoderLinearStack(*args))
pass
def forward(self, x):
tensor = self.g(x)
tensor = self.filter(tensor)
tensor = self.l_stack(tensor)
return tensor
if __name__ == '__main__':
raise PermissionError('Get out of here - never run this module')

View File

@ -0,0 +1,53 @@
from .modules import *
from torch.nn.functional import mse_loss
#######################
# Basic AE-Implementation
class VariationalAutoEncoder(Module, ABC):
@property
def name(self):
return self.__class__.__name__
def __init__(self, dataParams, **kwargs):
super(VariationalAutoEncoder, self).__init__()
self.dataParams = dataParams
self.latent_dim = kwargs.get('latent_dim', 2)
self.encoder = Encoder(self.latent_dim, variational=True)
self.decoder = Decoder(self.latent_dim, self.dataParams['features'], variational=True)
@staticmethod
def reparameterize(mu, logvar):
# Lambda Layer, add gaussian noise
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return mu + eps*std
def forward(self, batch):
mu, logvar = self.encoder(batch)
z = self.reparameterize(mu, logvar)
repeat = Repeater((batch.shape[0], self.dataParams['size'], -1))
x_hat = self.decoder(repeat(z))
return x_hat, mu, logvar
class VariationalAutoEncoderLightningOverrides:
def forward(self, x):
return self.network.forward(x)
def training_step(self, x, _):
x_hat, logvar, mu = self.forward(x)
BCE = mse_loss(x_hat, x, reduction='mean')
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return {'loss': BCE + KLD}
if __name__ == '__main__':
raise PermissionError('Get out of here - never run this module')