diff --git a/.idea/ae_toolbox_torch.iml b/.idea/ae_toolbox_torch.iml
deleted file mode 100644
index 8159b14..0000000
--- a/.idea/ae_toolbox_torch.iml
+++ /dev/null
@@ -1,16 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/dictionaries/illium.xml b/.idea/dictionaries/illium.xml
deleted file mode 100644
index 32b081c..0000000
--- a/.idea/dictionaries/illium.xml
+++ /dev/null
@@ -1,9 +0,0 @@
-
-
-
- dataloader
- datasets
- isovists
-
-
-
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
deleted file mode 100644
index 0eefe32..0000000
--- a/.idea/inspectionProfiles/profiles_settings.xml
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
deleted file mode 100644
index a663f10..0000000
--- a/.idea/misc.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
deleted file mode 100644
index fe9fbe4..0000000
--- a/.idea/modules.xml
+++ /dev/null
@@ -1,8 +0,0 @@
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/other.xml b/.idea/other.xml
deleted file mode 100644
index 640fd80..0000000
--- a/.idea/other.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
deleted file mode 100644
index 94a25f7..0000000
--- a/.idea/vcs.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-
-
-
-
-
-
\ No newline at end of file
diff --git a/.idea/workspace.xml b/.idea/workspace.xml
deleted file mode 100644
index 4fa74cf..0000000
--- a/.idea/workspace.xml
+++ /dev/null
@@ -1,284 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- 1564587418949
-
-
- 1564587418949
-
-
-
-
-
-
-
- 1565793753423
-
-
-
- 1565793753423
-
-
- 1565958589041
-
-
-
- 1565958589041
-
-
- 1565987964760
-
-
-
- 1565987964760
-
-
- 1566064016196
-
-
-
- 1566064016196
-
-
- 1566366992088
-
-
-
- 1566366992088
-
-
- 1566546840536
-
-
-
- 1566546840536
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- file://$PROJECT_DIR$/networks/modules.py
- 206
-
-
-
- file://$PROJECT_DIR$/networks/seperating_adversarial_auto_encoder.py
- 23
-
-
-
- file://$PROJECT_DIR$/viz/viz_latent.py
- 67
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/dataset.py b/dataset.py
index a5c85a1..6d160dc 100644
--- a/dataset.py
+++ b/dataset.py
@@ -167,7 +167,7 @@ class Trajectories(Dataset):
dataDict = dict()
for key, val in kwargs.items():
if key in self.isovistMeasures:
- dataDict[key] = torch.tensor(val)
+ dataDict[key] = torch.tensor(val, requires_grad=False)
# Check if all keys are of same length
assert len(set(x.size()[0] for x in dataDict.values() if torch.is_tensor(x))) <= 1
data = torch.stack([dataDict[key] for key in self.isovistMeasures], dim=-1)
diff --git a/networks/adverserial_auto_encoder.py b/networks/adverserial_auto_encoder.py
index 53352fe..faaeee8 100644
--- a/networks/adverserial_auto_encoder.py
+++ b/networks/adverserial_auto_encoder.py
@@ -1,7 +1,7 @@
+from torch.optim import Adam
+
from networks.auto_encoder import AutoEncoder
from torch.nn.functional import mse_loss
-from torch.nn import Sequential, Linear, ReLU, Dropout, Sigmoid
-from torch.distributions import Normal
from networks.modules import *
import torch
@@ -23,14 +23,10 @@ class AdversarialAutoEncoder(AutoEncoder):
return z, x_hat
-class AdversarialAELightningOverrides:
-
- @property
- def name(self):
- return self.__class__.__name__
-
- def forward(self, x):
- return self.network.forward(x)
+class AdversarialAELightningOverrides(LightningModuleOverrides):
+
+ def __init__(self):
+ super(AdversarialAELightningOverrides, self).__init__()
def training_step(self, batch, _, optimizer_i):
if optimizer_i == 0:
@@ -67,5 +63,12 @@ class AdversarialAELightningOverrides:
raise RuntimeError('This should not have happened, catch me if u can.')
+ # This is Fucked up, why do i need to put an additional empty list here?
+ def configure_optimizers(self):
+ return [Adam(self.network.discriminator.parameters(), lr=0.02),
+ Adam([*self.network.encoder.parameters(), *self.network.decoder.parameters()], lr=0.02)],\
+ []
+
+
if __name__ == '__main__':
raise PermissionError('Get out of here - never run this module')
diff --git a/networks/auto_encoder.py b/networks/auto_encoder.py
index b72bc59..a834d1d 100644
--- a/networks/auto_encoder.py
+++ b/networks/auto_encoder.py
@@ -1,3 +1,5 @@
+from torch.optim import Adam
+
from .modules import *
from torch.nn.functional import mse_loss
from torch import Tensor
@@ -26,14 +28,10 @@ class AutoEncoder(AbstractNeuralNetwork, ABC):
return z, x_hat
-class AutoEncoderLightningOverrides:
+class AutoEncoderLightningOverrides(LightningModuleOverrides):
- @property
- def name(self):
- return self.__class__.__name__
-
- def forward(self, x):
- return self.network.forward(x)
+ def __init__(self):
+ super(AutoEncoderLightningOverrides, self).__init__()
def training_step(self, x, batch_nb):
# z, x_hat
@@ -41,6 +39,9 @@ class AutoEncoderLightningOverrides:
loss = mse_loss(x, x_hat)
return {'loss': loss}
+ def configure_optimizers(self):
+ return [Adam(self.parameters(), lr=0.02)]
+
if __name__ == '__main__':
raise PermissionError('Get out of here - never run this module')
diff --git a/networks/modules.py b/networks/modules.py
index 0cc5ccf..81a02bb 100644
--- a/networks/modules.py
+++ b/networks/modules.py
@@ -1,11 +1,34 @@
+import os
+
import torch
import pytorch_lightning as pl
-from torch.nn import Module, Linear, ReLU, Tanh, Sigmoid, Dropout, GRU, AvgPool2d
+from pytorch_lightning import data_loader
+from torch.nn import Module, Linear, ReLU, Tanh, Sigmoid, Dropout, GRU
from abc import ABC, abstractmethod
#######################
-# Abstract NN Class
+# Abstract NN Class & Lightning Module
+from torch.utils.data import DataLoader
+
+from dataset import DataContainer
+
+
+class LightningModuleOverrides:
+
+ @property
+ def name(self):
+ return self.__class__.__name__
+
+ def forward(self, x):
+ return self.network.forward(x)
+
+ @data_loader
+ def tng_dataloader(self):
+ num_workers = os.cpu_count() // 2
+ return DataLoader(DataContainer('data', self.size, self.step),
+ shuffle=True, batch_size=100, num_workers=num_workers)
+
class AbstractNeuralNetwork(Module):
diff --git a/networks/seperating_adversarial_auto_encoder.py b/networks/seperating_adversarial_auto_encoder.py
index 5bf5fc5..b0872f4 100644
--- a/networks/seperating_adversarial_auto_encoder.py
+++ b/networks/seperating_adversarial_auto_encoder.py
@@ -1,3 +1,5 @@
+from torch.optim import Adam
+
from networks.auto_encoder import AutoEncoder
from torch.nn.functional import mse_loss
from networks.modules import *
@@ -7,16 +9,15 @@ import torch
class SeperatingAdversarialAutoEncoder(Module):
def __init__(self, latent_dim, features, **kwargs):
- assert latent_dim % 2 == 0, f'Your latent space needs to be even, not odd, but was: "{latent_dim}"'
super(SeperatingAdversarialAutoEncoder, self).__init__()
self.latent_dim = latent_dim
self.features = features
- self.spatial_encoder = PoolingEncoder(self.latent_dim // 2)
- self.temporal_encoder = Encoder(self.latent_dim // 2)
+ self.spatial_encoder = PoolingEncoder(self.latent_dim)
+ self.temporal_encoder = Encoder(self.latent_dim)
self.decoder = Decoder(self.latent_dim, self.features)
- self.spatial_discriminator = Discriminator(self.latent_dim // 2, self.features)
- self.temporal_discriminator = Discriminator(self.latent_dim // 2, self.features)
+ self.spatial_discriminator = Discriminator(self.latent_dim, self.features)
+ self.temporal_discriminator = Discriminator(self.latent_dim, self.features)
def forward(self, batch):
# Encoder
@@ -30,14 +31,10 @@ class SeperatingAdversarialAutoEncoder(Module):
return z_spatial, z_temporal, x_hat
-class SeparatingAdversarialAELightningOverrides:
+class SeparatingAdversarialAELightningOverrides(LightningModuleOverrides):
- @property
- def name(self):
- return self.__class__.__name__
-
- def forward(self, x):
- return self.network.forward(x)
+ def __init__(self):
+ super(SeparatingAdversarialAELightningOverrides, self).__init__()
def training_step(self, batch, _, optimizer_i):
spatial_latent_fake, temporal_latent_fake, batch_hat = self.network.forward(batch)
@@ -91,6 +88,17 @@ class SeparatingAdversarialAELightningOverrides:
else:
raise RuntimeError('This should not have happened, catch me if u can.')
+ # This is Fucked up, why do i need to put an additional empty list here?
+ def configure_optimizers(self):
+ return [Adam([*self.network.spatial_discriminator.parameters(), *self.network.spatial_encoder.parameters()]
+ , lr=0.02),
+ Adam([*self.network.temporal_discriminator.parameters(), *self.network.temporal_encoder.parameters()]
+ , lr=0.02),
+ Adam([*self.network.temporal_encoder.parameters(),
+ *self.network.spatial_encoder.parameters(),
+ *self.network.decoder.parameters()]
+ , lr=0.02)], []
+
if __name__ == '__main__':
raise PermissionError('Get out of here - never run this module')
diff --git a/networks/variational_auto_encoder.py b/networks/variational_auto_encoder.py
index 64cb7a9..aad4a54 100644
--- a/networks/variational_auto_encoder.py
+++ b/networks/variational_auto_encoder.py
@@ -1,3 +1,5 @@
+from torch.optim import Adam
+
from .modules import *
from torch.nn.functional import mse_loss
@@ -33,14 +35,10 @@ class VariationalAutoEncoder(AbstractNeuralNetwork, ABC):
return x_hat, mu, logvar
-class VariationalAutoEncoderLightningOverrides:
+class VariationalAutoEncoderLightningOverrides(LightningModuleOverrides):
- @property
- def name(self):
- return self.network.name
-
- def forward(self, x):
- return self.network.forward(x)
+ def __init__(self):
+ super(VariationalAutoEncoderLightningOverrides, self).__init__()
def training_step(self, x, _):
x_hat, logvar, mu = self.forward(x)
@@ -53,6 +51,9 @@ class VariationalAutoEncoderLightningOverrides:
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return {'loss': BCE + KLD}
+ def configure_optimizers(self):
+ return [Adam(self.parameters(), lr=0.02)]
+
if __name__ == '__main__':
raise PermissionError('Get out of here - never run this module')
diff --git a/run_models.py b/run_models.py
index 6362036..228430d 100644
--- a/run_models.py
+++ b/run_models.py
@@ -1,3 +1,5 @@
+from torch.distributions import Normal
+
from networks.auto_encoder import *
import os
import time
@@ -18,90 +20,54 @@ from argparse import Namespace
from argparse import ArgumentParser
args = ArgumentParser()
-args.add_argument('step')
-args.add_argument('features')
-args.add_argument('size')
-args.add_argument('latent_dim')
+args.add_argument('--step', default=0)
+args.add_argument('--features', default=0)
+args.add_argument('--size', default=0)
+args.add_argument('--latent_dim', default=0)
+args.add_argument('--model', default='Model')
# ToDo: How to implement this better?
# other_classes = [AutoEncoder, AutoEncoderLightningOverrides]
class Model(AutoEncoderLightningOverrides, LightningModule):
- def __init__(self, latent_dim=0, size=0, step=0, features=0, **kwargs):
- assert all([x in args for x in ['step', 'size', 'latent_dim', 'features']])
- self.size = args.size
- self.latent_dim = args.latent_dim
- self.features = args.features
- self.step = args.step
+ def __init__(self, parameters, **kwargs):
+ assert all([x in parameters for x in ['step', 'size', 'latent_dim', 'features']])
+ self.size = parameters.size
+ self.latent_dim = parameters.latent_dim
+ self.features = parameters.features
+ self.step = parameters.step
super(Model, self).__init__()
self.network = AutoEncoder(self.latent_dim, self.features)
- def configure_optimizers(self):
- return [Adam(self.parameters(), lr=0.02)]
-
- @data_loader
- def tng_dataloader(self):
- return DataLoader(DataContainer('data', self.size, self.step), shuffle=True, batch_size=100)
-
class AdversarialModel(AdversarialAELightningOverrides, LightningModule):
- @property
- def name(self):
- return self.network.name
-
- def __init__(self, args: Namespace, **kwargs):
- assert all([x in args for x in ['step', 'size', 'latent_dim', 'features']])
- self.size = args.size
- self.latent_dim = args.latent_dim
- self.features = args.features
- self.step = args.step
+ def __init__(self, parameters: Namespace, **kwargs):
+ assert all([x in parameters for x in ['step', 'size', 'latent_dim', 'features']])
+ self.size = parameters.size
+ self.latent_dim = parameters.latent_dim
+ self.features = parameters.features
+ self.step = parameters.step
super(AdversarialModel, self).__init__()
self.normal = Normal(0, 1)
self.network = AdversarialAutoEncoder(self.latent_dim, self.features)
pass
- # This is Fucked up, why do i need to put an additional empty list here?
- def configure_optimizers(self):
- return [Adam(self.network.discriminator.parameters(), lr=0.02),
- Adam([*self.network.encoder.parameters(), *self.network.decoder.parameters()], lr=0.02)],\
- []
-
- @data_loader
- def tng_dataloader(self):
- return DataLoader(DataContainer('data', self.size, self.step), shuffle=True, batch_size=100)
-
class SeparatingAdversarialModel(SeparatingAdversarialAELightningOverrides, LightningModule):
- def __init__(self, args: Namespace, **kwargs):
- assert all([x in args for x in ['step', 'size', 'latent_dim', 'features']])
- self.size = args.size
- self.latent_dim = args.latent_dim
- self.features = args.features
- self.step = args.step
+ def __init__(self, parameters: Namespace, **kwargs):
+ assert all([x in parameters for x in ['step', 'size', 'latent_dim', 'features']])
+ self.size = parameters.size
+ self.latent_dim = parameters.latent_dim
+ self.features = parameters.features
+ self.step = parameters.step
super(SeparatingAdversarialModel, self).__init__()
self.normal = Normal(0, 1)
self.network = SeperatingAdversarialAutoEncoder(self.latent_dim, self.features, **kwargs)
pass
- # This is Fucked up, why do i need to put an additional empty list here?
- def configure_optimizers(self):
- return [Adam([*self.network.spatial_discriminator.parameters(), *self.network.spatial_encoder.parameters()]
- , lr=0.02),
- Adam([*self.network.temporal_discriminator.parameters(), *self.network.temporal_encoder.parameters()]
- , lr=0.02),
- Adam([*self.network.temporal_encoder.parameters(),
- *self.network.spatial_encoder.parameters(),
- *self.network.decoder.parameters()]
- , lr=0.02)], []
-
- @data_loader
- def tng_dataloader(self):
- num_workers = os.cpu_count() // 2
- return DataLoader(DataContainer('data', self.size, self.step), shuffle=True, batch_size=100, num_workers=num_workers)
-
if __name__ == '__main__':
features = 6
@@ -110,7 +76,7 @@ if __name__ == '__main__':
arguments = args.parse_args()
arguments.__dict__.update(tag_dict)
- model = SeparatingAdversarialModel(arguments)
+ model = globals()[arguments.model](arguments)
# PyTorch summarywriter with a few bells and whistles
outpath = os.path.join(os.getcwd(), 'output', model.name, time.asctime().replace(' ', '_').replace(':', '-'))
@@ -124,7 +90,7 @@ if __name__ == '__main__':
filepath=os.path.join(outpath, 'weights.ckpt'),
save_best_only=True,
verbose=True,
- monitor='tng_loss', # val_loss
+ monitor='val_loss', # val_loss
mode='min',
)
diff --git a/viz/viz_latent.py b/viz/viz_latent.py
index 140304c..15e61f0 100644
--- a/viz/viz_latent.py
+++ b/viz/viz_latent.py
@@ -1,21 +1,17 @@
-# TODO: THIS
-import seaborn as sb
-import torch
-from torch.utils.data import DataLoader
-from pytorch_lightning import data_loader
-from dataset import DataContainer
from collections import defaultdict
from tqdm import tqdm
-import os
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
-import seaborn as sns; sns.set()
+import seaborn as sns
import matplotlib.pyplot as plt
from run_models import *
+sns.set()
+
+
def search_for_weights(folder):
while not os.path.exists(folder):
if len(os.path.split(folder)) >= 50:
@@ -32,6 +28,8 @@ def search_for_weights(folder):
def load_and_predict(path_like_element):
+ if any([x.name.endswith('.png') for x in os.scandir(os.path.dirname(path_like_element))]):
+ return
# Define Loop to search for models and folder with visualizations
model = globals()[path_like_element.path.split(os.sep)[-3]]
@@ -46,36 +44,50 @@ def load_and_predict(path_like_element):
pretrained_model.eval()
pretrained_model.freeze()
- # Load the data for prediction
- dataset = DataContainer(os.path.join(os.pardir, 'data'), 5, 5)
+ with torch.no_grad():
- # Do the inference
- prediction_dict = defaultdict(list)
- for i in tqdm(range(len(dataset)), total=len(dataset)):
- p_X = pretrained_model(dataset[i].unsqueeze(0))
- for idx in range(len(p_X) - 1):
- prediction_dict[idx].append(p_X[idx])
+ # Load the data for prediction
+ dataset = DataContainer(os.path.join(os.pardir, 'data'), 5, 5)
+
+ # Do the inference
+ prediction_dict = defaultdict(list)
+ for i in tqdm(range(len(dataset)), total=len(dataset)):
+ p_X = pretrained_model(dataset[i].unsqueeze(0))
+ for idx in range(len(p_X) - 1):
+ prediction_dict[idx].append(p_X[idx])
predictions = [torch.cat(prediction).detach().numpy() for prediction in prediction_dict.values()]
- for prediction in predictions:
- viz_latent(prediction)
+ for idx, prediction in enumerate(predictions):
+ plot, _ = viz_latent(prediction)
+ plot.savefig(os.path.join(os.path.dirname(path_like_element), f'latent_space_{idx}.png'))
-def viz_latent(prediction):
+def viz_latent(prediction, title=f'Latent Space '):
if prediction.shape[-1] <= 1:
raise ValueError('How did this happen?')
elif prediction.shape[-1] == 2:
ax = sns.scatterplot(x=prediction[:, 0], y=prediction[:, 1])
- plt.show()
- return ax
+ try:
+ plt.show()
+ except:
+ pass
+ return ax.figure, (ax)
else:
fig, axs = plt.subplots(ncols=2)
- predictions_pca = PCA(n_components=2)
- predictions_tsne = TSNE(n_components=2)
- pca_plot = sns.scatterplot(x=predictions_pca[:, 0], y=predictions_pca[:, 1], ax=axs[0])
- tsne_plot = sns.scatterplot(x=predictions_tsne[:, 0], y=predictions_tsne[:, 1], ax=axs[1])
- plt.show()
- return fig, axs, pca_plot, tsne_plot
+ plots = []
+ for idx, dim_reducer in enumerate([PCA, TSNE]):
+ predictions_reduced = dim_reducer(n_components=2).fit_transform(prediction)
+ plot = sns.scatterplot(x=predictions_reduced[:, 0], y=predictions_reduced[:, 1],
+ ax=axs[idx])
+ plot.set_title(dim_reducer.__name__)
+ plots.append(plot)
+
+ try:
+ plt.show()
+ except:
+ pass
+ return fig, (*plots, )
+
if __name__ == '__main__':
path = 'output'