Visualization approach n

This commit is contained in:
Si11ium
2019-09-29 09:37:30 +02:00
parent 1386cdfd33
commit a70c9b7fef
15 changed files with 652 additions and 197 deletions

View File

@ -1,9 +1,13 @@
import os
from operator import mul
from functools import reduce
import torch
from torch import randn
import pytorch_lightning as pl
from pytorch_lightning import data_loader
from torch.nn import Module, Linear, ReLU, Tanh, Sigmoid, Dropout, GRU
from torch.nn import Module, Linear, ReLU, Sigmoid, Dropout, GRU
from torchvision.transforms import Normalize
from abc import ABC, abstractmethod
@ -29,8 +33,16 @@ class LightningModuleOverrides:
@data_loader
def tng_dataloader(self):
num_workers = 0 # os.cpu_count() // 2
return DataLoader(DataContainer(os.path.join('data', 'training'), self.size, self.step),
return DataLoader(DataContainer(os.path.join('data', 'training'),
self.size, self.step, transforms=[Normalize]),
shuffle=True, batch_size=10000, num_workers=num_workers)
"""
@data_loader
def val_dataloader(self):
num_workers = 0 # os.cpu_count() // 2
return DataLoader(DataContainer(os.path.join('data', 'validation'), self.size, self.step),
shuffle=True, batch_size=100, num_workers=num_workers)
"""
class AbstractNeuralNetwork(Module):
@ -82,6 +94,7 @@ class LightningModule(pl.LightningModule, ABC):
# return DataLoader(MNIST(os.getcwd(), train=True, download=True,
# transform=transforms.ToTensor()), batch_size=32)
"""
@pl.data_loader
def val_dataloader(self):
# OPTIONAL
@ -91,7 +104,7 @@ class LightningModule(pl.LightningModule, ABC):
def test_dataloader(self):
# OPTIONAL
pass
"""
#######################
# Utility Modules
@ -185,7 +198,7 @@ class DecoderLinearStack(Module):
self.l1 = Linear(10, 100, bias=True)
self.l2 = Linear(100, out_shape, bias=True)
self.activation = ReLU()
self.activation_out = Tanh()
self.activation_out = Sigmoid()
def forward(self, x):
tensor = self.l1(x)
@ -197,30 +210,53 @@ class DecoderLinearStack(Module):
class EncoderLinearStack(Module):
def __init__(self):
@property
def shape(self):
x = randn(self.features).unsqueeze(0)
output = self(x)
return output.shape[1:]
def __init__(self, features=6, separated=False, use_bias=True):
super(EncoderLinearStack, self).__init__()
# FixMe: Get Hardcoded shit out of here
self.l1 = Linear(6, 100, bias=True)
self.l2 = Linear(100, 10, bias=True)
self.separated = separated
self.features = features
if self.separated:
self.l1s = [Linear(1, 10, bias=use_bias) for _ in range(self.features)]
self.l2s = [Linear(10, 5, bias=use_bias) for _ in range(self.features)]
else:
self.l1 = Linear(self.features, self.features * 10, bias=use_bias)
self.l2 = Linear(self.features * 10, self.features * 5, bias=use_bias)
self.l3 = Linear(self.features * 5, 10, use_bias)
self.activation = ReLU()
def forward(self, x):
tensor = self.l1(x)
tensor = self.activation(tensor)
tensor = self.l2(tensor)
if self.separated:
x = x.unsqueeze(-1)
tensors = [self.l1s[idx](x[:, idx, :]) for idx in range(len(self.l1s))]
tensors = [self.activation(tensor) for tensor in tensors]
tensors = [self.l2s[idx](tensors[idx]) for idx in range(len(self.l2s))]
tensors = [self.activation(tensor) for tensor in tensors]
tensor = torch.cat(tensors, dim=-1)
else:
tensor = self.l1(x)
tensor = self.activation(tensor)
tensor = self.l2(tensor)
tensor = self.l3(tensor)
tensor = self.activation(tensor)
return tensor
class Encoder(Module):
def __init__(self, lat_dim, variational=False):
def __init__(self, lat_dim, variational=False, separate_features=False, with_dense=True, features=6):
self.lat_dim = lat_dim
self.features = features
self.variational = variational
super(Encoder, self).__init__()
self.l_stack = TimeDistributed(EncoderLinearStack())
self.gru = GRU(10, 10, batch_first=True)
self.l_stack = TimeDistributed(EncoderLinearStack(separated=separate_features,
features=features)) if with_dense else False
self.gru = GRU(10 if with_dense else self.features, 10, batch_first=True)
self.filter = RNNOutputFilter(only_last=True)
if variational:
self.mu = Linear(10, self.lat_dim)
@ -229,8 +265,9 @@ class Encoder(Module):
self.lat_dim_layer = Linear(10, self.lat_dim)
def forward(self, x):
tensor = self.l_stack(x)
tensor = self.gru(tensor)
if self.l_stack:
x = self.l_stack(x)
tensor = self.gru(x)
tensor = self.filter(tensor)
if self.variational:
tensor = self.mu(tensor), self.logvar(tensor)
@ -262,10 +299,10 @@ class PoolingEncoder(Module):
self.p = AvgDimPool()
self.l = EncoderLinearStack()
if variational:
self.mu = Linear(10, self.lat_dim)
self.logvar = Linear(10, self.lat_dim)
self.mu = Linear(self.l.shape, self.lat_dim)
self.logvar = Linear(self.l.shape, self.lat_dim)
else:
self.lat_dim_layer = Linear(10, self.lat_dim)
self.lat_dim_layer = Linear(reduce(mul, self.l.shape), self.lat_dim)
def forward(self, x):
tensor = self.p(x)