transition
This commit is contained in:
parent
4c489237d7
commit
578727d043
@ -5,7 +5,6 @@ from distutils.util import strtobool
|
|||||||
import os
|
import os
|
||||||
import ast
|
import ast
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
from torch.nn.modules import BatchNorm1d
|
|
||||||
|
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -108,11 +107,6 @@ class AbstractDataset(ConcatDataset, ABC):
|
|||||||
|
|
||||||
class DataContainer(AbstractDataset):
|
class DataContainer(AbstractDataset):
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def calculate_model_shapes(size, step, **kwargs):
|
|
||||||
|
|
||||||
return
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def raw_filenames(self):
|
def raw_filenames(self):
|
||||||
return [f'{x}_trajec.csv' for x in self.maps]
|
return [f'{x}_trajec.csv' for x in self.maps]
|
||||||
@ -209,7 +203,7 @@ class Trajectories(Dataset):
|
|||||||
|
|
||||||
def get_both_by_key(self, item):
|
def get_both_by_key(self, item):
|
||||||
data = self.data[item:item + self.size * self.step or None:self.step]
|
data = self.data[item:item + self.size * self.step or None:self.step]
|
||||||
return data[0]
|
return data
|
||||||
|
|
||||||
def __len__(self):
|
def __len__(self):
|
||||||
total_len = self.data.size()[0]
|
total_len = self.data.size()[0]
|
||||||
|
@ -1,6 +0,0 @@
|
|||||||
#ToDo: We need a metric that analysis sequences of coordinates of arbitrary length and clusters them based
|
|
||||||
# on their embedded type of mevement
|
|
||||||
|
|
||||||
# ToDo: we ne a function, that compares the clustering outcome of our movement analysis with the AE output.
|
|
||||||
|
|
||||||
# Do the variants of AE really adjust their latent space regarding the embedded moveement type?
|
|
@ -11,9 +11,10 @@ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|||||||
|
|
||||||
class AdversarialAE(AutoEncoder):
|
class AdversarialAE(AutoEncoder):
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, train_on_predictions=False, use_norm=False, **kwargs):
|
||||||
super(AdversarialAE, self).__init__(*args, **kwargs)
|
super(AdversarialAE, self).__init__(*args, **kwargs)
|
||||||
self.discriminator = Discriminator(self.latent_dim, self.features)
|
self.discriminator = Discriminator(self.latent_dim, self.features, use_norm=use_norm)
|
||||||
|
self.train_on_predictions = train_on_predictions
|
||||||
|
|
||||||
def forward(self, batch):
|
def forward(self, batch):
|
||||||
# Encoder
|
# Encoder
|
||||||
@ -25,13 +26,6 @@ class AdversarialAE(AutoEncoder):
|
|||||||
x_hat = self.decoder(z_repeatet)
|
x_hat = self.decoder(z_repeatet)
|
||||||
return z, x_hat
|
return z, x_hat
|
||||||
|
|
||||||
|
|
||||||
class AdversarialAE_LO(LightningModuleOverrides):
|
|
||||||
|
|
||||||
def __init__(self, train_on_predictions=False):
|
|
||||||
super(AdversarialAE_LO, self).__init__()
|
|
||||||
self.train_on_predictions = train_on_predictions
|
|
||||||
|
|
||||||
def training_step(self, batch, _, optimizer_i):
|
def training_step(self, batch, _, optimizer_i):
|
||||||
x, y = batch
|
x, y = batch
|
||||||
z, x_hat = self.forward(x)
|
z, x_hat = self.forward(x)
|
||||||
@ -66,8 +60,7 @@ class AdversarialAE_LO(LightningModuleOverrides):
|
|||||||
else:
|
else:
|
||||||
raise RuntimeError('This should not have happened, catch me if u can.')
|
raise RuntimeError('This should not have happened, catch me if u can.')
|
||||||
|
|
||||||
|
#FIXME: This is Fucked up, why do i need to put an additional empty list here?
|
||||||
# This is Fucked up, why do i need to put an additional empty list here?
|
|
||||||
def configure_optimizers(self):
|
def configure_optimizers(self):
|
||||||
return [Adam(self.network.discriminator.parameters(), lr=0.02),
|
return [Adam(self.network.discriminator.parameters(), lr=0.02),
|
||||||
Adam([*self.network.encoder.parameters(), *self.network.decoder.parameters()], lr=0.02), ],\
|
Adam([*self.network.encoder.parameters(), *self.network.decoder.parameters()], lr=0.02), ],\
|
||||||
|
@ -27,12 +27,6 @@ class AE_WithAttention(AbstractNeuralNetwork, ABC):
|
|||||||
x_hat = self.decoder(z_repeatet)
|
x_hat = self.decoder(z_repeatet)
|
||||||
return z, x_hat
|
return z, x_hat
|
||||||
|
|
||||||
|
|
||||||
class AE_WithAttention_LO(LightningModuleOverrides):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super(AE_WithAttention_LO, self).__init__()
|
|
||||||
|
|
||||||
def training_step(self, x, batch_nb):
|
def training_step(self, x, batch_nb):
|
||||||
# ToDo: We need a new loss function, fullfilling all attention needs
|
# ToDo: We need a new loss function, fullfilling all attention needs
|
||||||
# z, x_hat
|
# z, x_hat
|
||||||
|
@ -9,9 +9,11 @@ from torch import Tensor
|
|||||||
# Basic AE-Implementation
|
# Basic AE-Implementation
|
||||||
class AutoEncoder(AbstractNeuralNetwork, ABC):
|
class AutoEncoder(AbstractNeuralNetwork, ABC):
|
||||||
|
|
||||||
def __init__(self, latent_dim: int=0, features: int = 0, use_norm=True, **kwargs):
|
def __init__(self, latent_dim: int=0, features: int = 0, use_norm=True,
|
||||||
|
train_on_predictions=False, **kwargs):
|
||||||
assert latent_dim and features
|
assert latent_dim and features
|
||||||
super(AutoEncoder, self).__init__()
|
super(AutoEncoder, self).__init__()
|
||||||
|
self.train_on_predictions = train_on_predictions
|
||||||
self.latent_dim = latent_dim
|
self.latent_dim = latent_dim
|
||||||
self.features = features
|
self.features = features
|
||||||
self.encoder = Encoder(self.latent_dim, use_norm=use_norm)
|
self.encoder = Encoder(self.latent_dim, use_norm=use_norm)
|
||||||
@ -27,13 +29,6 @@ class AutoEncoder(AbstractNeuralNetwork, ABC):
|
|||||||
x_hat = self.decoder(z_repeatet)
|
x_hat = self.decoder(z_repeatet)
|
||||||
return z, x_hat
|
return z, x_hat
|
||||||
|
|
||||||
|
|
||||||
class AutoEncoder_LO(LightningModuleOverrides):
|
|
||||||
|
|
||||||
def __init__(self, train_on_predictions=False):
|
|
||||||
super(AutoEncoder_LO, self).__init__()
|
|
||||||
self.train_on_predictions = train_on_predictions
|
|
||||||
|
|
||||||
def training_step(self, batch, batch_nb):
|
def training_step(self, batch, batch_nb):
|
||||||
x, y = batch
|
x, y = batch
|
||||||
# z, x_hat
|
# z, x_hat
|
||||||
|
@ -5,9 +5,7 @@ from functools import reduce
|
|||||||
import torch
|
import torch
|
||||||
from torch import randn
|
from torch import randn
|
||||||
import pytorch_lightning as pl
|
import pytorch_lightning as pl
|
||||||
from pytorch_lightning import data_loader
|
|
||||||
from torch.nn import Module, Linear, ReLU, Sigmoid, Dropout, GRU, Tanh
|
from torch.nn import Module, Linear, ReLU, Sigmoid, Dropout, GRU, Tanh
|
||||||
from torchvision.transforms import Normalize
|
|
||||||
|
|
||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
@ -27,21 +25,12 @@ class LightningModuleOverrides:
|
|||||||
def name(self):
|
def name(self):
|
||||||
return self.__class__.__name__
|
return self.__class__.__name__
|
||||||
|
|
||||||
def forward(self, x):
|
@pl.data_loader
|
||||||
return self.network.forward(x)
|
|
||||||
|
|
||||||
@data_loader
|
|
||||||
def train_dataloader(self):
|
def train_dataloader(self):
|
||||||
num_workers = 0 # os.cpu_count() // 2
|
num_workers = 0 # os.cpu_count() // 2
|
||||||
return DataLoader(DataContainer(os.path.join('data', 'training'), self.size, self.step),
|
return DataLoader(DataContainer(os.path.join('data', 'training'), self.size, self.step),
|
||||||
shuffle=True, batch_size=10000, num_workers=num_workers)
|
shuffle=True, batch_size=10000, num_workers=num_workers)
|
||||||
"""
|
|
||||||
@data_loader
|
|
||||||
def val_dataloader(self):
|
|
||||||
num_workers = 0 # os.cpu_count() // 2
|
|
||||||
return DataLoader(DataContainer(os.path.join('data', 'validation'), self.size, self.step),
|
|
||||||
shuffle=True, batch_size=100, num_workers=num_workers)
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class AbstractNeuralNetwork(Module):
|
class AbstractNeuralNetwork(Module):
|
||||||
@ -56,53 +45,6 @@ class AbstractNeuralNetwork(Module):
|
|||||||
def forward(self, batch):
|
def forward(self, batch):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
######################
|
|
||||||
# Abstract Network class following the Lightning Syntax
|
|
||||||
class LightningModule(pl.LightningModule, ABC):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super(LightningModule, self).__init__()
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def forward(self, x):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def training_step(self, batch, batch_nb):
|
|
||||||
# REQUIRED
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def configure_optimizers(self):
|
|
||||||
# REQUIRED
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@pl.data_loader
|
|
||||||
def train_dataloader(self):
|
|
||||||
# REQUIRED
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
"""
|
|
||||||
def validation_step(self, batch, batch_nb):
|
|
||||||
# OPTIONAL
|
|
||||||
pass
|
|
||||||
|
|
||||||
def validation_end(self, outputs):
|
|
||||||
# OPTIONAL
|
|
||||||
pass
|
|
||||||
|
|
||||||
@pl.data_loader
|
|
||||||
def val_dataloader(self):
|
|
||||||
# OPTIONAL
|
|
||||||
pass
|
|
||||||
|
|
||||||
@pl.data_loader
|
|
||||||
def test_dataloader(self):
|
|
||||||
# OPTIONAL
|
|
||||||
pass
|
|
||||||
"""
|
|
||||||
|
|
||||||
#######################
|
#######################
|
||||||
# Utility Modules
|
# Utility Modules
|
||||||
class TimeDistributed(Module):
|
class TimeDistributed(Module):
|
||||||
@ -167,12 +109,14 @@ class AvgDimPool(Module):
|
|||||||
# Generators, Decoders, Encoders, Discriminators
|
# Generators, Decoders, Encoders, Discriminators
|
||||||
class Discriminator(Module):
|
class Discriminator(Module):
|
||||||
|
|
||||||
def __init__(self, latent_dim, features, dropout=.0, activation=ReLU):
|
def __init__(self, latent_dim, features, dropout=.0, activation=ReLU, use_norm=False):
|
||||||
super(Discriminator, self).__init__()
|
super(Discriminator, self).__init__()
|
||||||
self.features = features
|
self.features = features
|
||||||
self.latent_dim = latent_dim
|
self.latent_dim = latent_dim
|
||||||
self.l1 = Linear(self.latent_dim, self.features * 10)
|
self.l1 = Linear(self.latent_dim, self.features * 10)
|
||||||
|
self.norm1 = torch.nn.BatchNorm1d(self.features * 10) if use_norm else False
|
||||||
self.l2 = Linear(self.features * 10, self.features * 20)
|
self.l2 = Linear(self.features * 10, self.features * 20)
|
||||||
|
self.norm2 = torch.nn.BatchNorm1d(self.features * 20) if use_norm else False
|
||||||
self.lout = Linear(self.features * 20, 1)
|
self.lout = Linear(self.features * 20, 1)
|
||||||
self.dropout = Dropout(dropout)
|
self.dropout = Dropout(dropout)
|
||||||
self.activation = activation()
|
self.activation = activation()
|
||||||
@ -180,9 +124,15 @@ class Discriminator(Module):
|
|||||||
|
|
||||||
def forward(self, x, **kwargs):
|
def forward(self, x, **kwargs):
|
||||||
tensor = self.l1(x)
|
tensor = self.l1(x)
|
||||||
tensor = self.dropout(self.activation(tensor))
|
tensor = self.dropout(tensor)
|
||||||
|
if self.norm1:
|
||||||
|
tensor = self.norm1(tensor)
|
||||||
|
tensor = self.activation(tensor)
|
||||||
tensor = self.l2(tensor)
|
tensor = self.l2(tensor)
|
||||||
tensor = self.dropout(self.activation(tensor))
|
tensor = self.dropout(tensor)
|
||||||
|
if self.norm2:
|
||||||
|
tensor = self.norm2(tensor)
|
||||||
|
tensor = self.activation(tensor)
|
||||||
tensor = self.lout(tensor)
|
tensor = self.lout(tensor)
|
||||||
tensor = self.sigmoid(tensor)
|
tensor = self.sigmoid(tensor)
|
||||||
return tensor
|
return tensor
|
||||||
@ -296,13 +246,13 @@ class AttentionEncoder(Module):
|
|||||||
|
|
||||||
class PoolingEncoder(Module):
|
class PoolingEncoder(Module):
|
||||||
|
|
||||||
def __init__(self, lat_dim, variational=False):
|
def __init__(self, lat_dim, variational=False, use_norm=True):
|
||||||
self.lat_dim = lat_dim
|
self.lat_dim = lat_dim
|
||||||
self.variational = variational
|
self.variational = variational
|
||||||
|
|
||||||
super(PoolingEncoder, self).__init__()
|
super(PoolingEncoder, self).__init__()
|
||||||
self.p = AvgDimPool()
|
self.p = AvgDimPool()
|
||||||
self.l = EncoderLinearStack()
|
self.l = EncoderLinearStack(use_norm=use_norm)
|
||||||
if variational:
|
if variational:
|
||||||
self.mu = Linear(self.l.shape, self.lat_dim)
|
self.mu = Linear(self.l.shape, self.lat_dim)
|
||||||
self.logvar = Linear(self.l.shape, self.lat_dim)
|
self.logvar = Linear(self.l.shape, self.lat_dim)
|
||||||
|
@ -6,12 +6,13 @@ import torch
|
|||||||
|
|
||||||
class SeperatingAAE(Module):
|
class SeperatingAAE(Module):
|
||||||
|
|
||||||
def __init__(self, latent_dim, features, use_norm=True):
|
def __init__(self, latent_dim, features, train_on_predictions=False, use_norm=True):
|
||||||
super(SeperatingAAE, self).__init__()
|
super(SeperatingAAE, self).__init__()
|
||||||
|
|
||||||
self.latent_dim = latent_dim
|
self.latent_dim = latent_dim
|
||||||
self.features = features
|
self.features = features
|
||||||
self.spatial_encoder = PoolingEncoder(self.latent_dim)
|
self.train_on_predictions = train_on_predictions
|
||||||
|
self.spatial_encoder = PoolingEncoder(self.latent_dim, use_norm=use_norm)
|
||||||
self.temporal_encoder = Encoder(self.latent_dim, use_dense=False, use_norm=use_norm)
|
self.temporal_encoder = Encoder(self.latent_dim, use_dense=False, use_norm=use_norm)
|
||||||
self.decoder = Decoder(self.latent_dim * 2, self.features, use_norm=use_norm)
|
self.decoder = Decoder(self.latent_dim * 2, self.features, use_norm=use_norm)
|
||||||
self.spatial_discriminator = Discriminator(self.latent_dim, self.features)
|
self.spatial_discriminator = Discriminator(self.latent_dim, self.features)
|
||||||
@ -28,13 +29,6 @@ class SeperatingAAE(Module):
|
|||||||
x_hat = self.decoder(z_repeatet)
|
x_hat = self.decoder(z_repeatet)
|
||||||
return z_spatial, z_temporal, x_hat
|
return z_spatial, z_temporal, x_hat
|
||||||
|
|
||||||
|
|
||||||
class SeparatingAAE_LO(LightningModuleOverrides):
|
|
||||||
|
|
||||||
def __init__(self, train_on_predictions=False):
|
|
||||||
super(SeparatingAAE_LO, self).__init__()
|
|
||||||
self.train_on_predictions = train_on_predictions
|
|
||||||
|
|
||||||
def training_step(self, batch, _, optimizer_i):
|
def training_step(self, batch, _, optimizer_i):
|
||||||
x, y = batch
|
x, y = batch
|
||||||
spatial_latent_fake, temporal_latent_fake, x_hat = self.network.forward(x)
|
spatial_latent_fake, temporal_latent_fake, x_hat = self.network.forward(x)
|
||||||
@ -92,7 +86,7 @@ class SeparatingAAE_LO(LightningModuleOverrides):
|
|||||||
else:
|
else:
|
||||||
raise RuntimeError('This should not have happened, catch me if u can.')
|
raise RuntimeError('This should not have happened, catch me if u can.')
|
||||||
|
|
||||||
# This is Fucked up, why do i need to put an additional empty list here?
|
#FixMe: This is Fucked up, why do i need to put an additional empty list here?
|
||||||
def configure_optimizers(self):
|
def configure_optimizers(self):
|
||||||
return [Adam([*self.network.spatial_discriminator.parameters(), *self.network.spatial_encoder.parameters()]
|
return [Adam([*self.network.spatial_discriminator.parameters(), *self.network.spatial_encoder.parameters()]
|
||||||
, lr=0.02),
|
, lr=0.02),
|
||||||
|
@ -12,7 +12,7 @@ class VariationalAE(AbstractNeuralNetwork, ABC):
|
|||||||
def name(self):
|
def name(self):
|
||||||
return self.__class__.__name__
|
return self.__class__.__name__
|
||||||
|
|
||||||
def __init__(self, latent_dim=0, features=0, use_norm=True, **kwargs):
|
def __init__(self, latent_dim=0, features=0, use_norm=True, train_on_predictions=False, **kwargs):
|
||||||
assert latent_dim and features
|
assert latent_dim and features
|
||||||
super(VariationalAE, self).__init__()
|
super(VariationalAE, self).__init__()
|
||||||
self.features = features
|
self.features = features
|
||||||
@ -34,13 +34,6 @@ class VariationalAE(AbstractNeuralNetwork, ABC):
|
|||||||
x_hat = self.decoder(repeat(z))
|
x_hat = self.decoder(repeat(z))
|
||||||
return mu, logvar, x_hat
|
return mu, logvar, x_hat
|
||||||
|
|
||||||
|
|
||||||
class VAE_LO(LightningModuleOverrides):
|
|
||||||
|
|
||||||
def __init__(self, train_on_predictions=False):
|
|
||||||
super(VAE_LO, self).__init__()
|
|
||||||
self.train_on_predictions=train_on_predictions
|
|
||||||
|
|
||||||
def training_step(self, batch, _):
|
def training_step(self, batch, _):
|
||||||
x, y = batch
|
x, y = batch
|
||||||
mu, logvar, x_hat = self.forward(x)
|
mu, logvar, x_hat = self.forward(x)
|
||||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1 +0,0 @@
|
|||||||
{"name": "default", "version": 0, "tags_path": "C:\\Users\\steff\\Google Drive\\LMU\\Research\\ae_toolbox_torch\\output\\AE_Model\\Sun_Sep_29_12-35-27_2019\\default\\version_0/meta_tags.csv", "metrics_path": "C:\\Users\\steff\\Google Drive\\LMU\\Research\\ae_toolbox_torch\\output\\AE_Model\\Sun_Sep_29_12-35-27_2019\\default\\version_0/metrics.csv", "autosave": false, "description": null, "created_at": "2019-09-29 10:35:27.965484", "exp_hash": "default_v0"}
|
|
@ -1,8 +0,0 @@
|
|||||||
key,value
|
|
||||||
step,5
|
|
||||||
features,6
|
|
||||||
size,9
|
|
||||||
latent_dim,2
|
|
||||||
model,AE_Model
|
|
||||||
refresh,False
|
|
||||||
future_predictions,False
|
|
|
@ -1,2 +0,0 @@
|
|||||||
loss,epoch,created_at
|
|
||||||
1.454,0.0,2019-09-29 10:41:14.039965
|
|
|
Binary file not shown.
@ -1 +0,0 @@
|
|||||||
{"name": "default", "version": 0, "tags_path": "C:\\Users\\steff\\Google Drive\\LMU\\Research\\ae_toolbox_torch\\output\\AE_Model\\Sun_Sep_29_12-44-13_2019\\default\\version_0/meta_tags.csv", "metrics_path": "C:\\Users\\steff\\Google Drive\\LMU\\Research\\ae_toolbox_torch\\output\\AE_Model\\Sun_Sep_29_12-44-13_2019\\default\\version_0/metrics.csv", "autosave": false, "description": null, "created_at": "2019-09-29 10:44:13.614075", "exp_hash": "default_v0"}
|
|
@ -1,8 +0,0 @@
|
|||||||
key,value
|
|
||||||
step,5
|
|
||||||
features,6
|
|
||||||
size,9
|
|
||||||
latent_dim,2
|
|
||||||
model,AE_Model
|
|
||||||
refresh,False
|
|
||||||
future_predictions,True
|
|
|
Binary file not shown.
@ -1 +0,0 @@
|
|||||||
{"name": "default", "version": 0, "tags_path": "C:\\Users\\steff\\Google Drive\\LMU\\Research\\ae_toolbox_torch\\output\\AE_Model\\Sun_Sep_29_12-44-29_2019\\default\\version_0/meta_tags.csv", "metrics_path": "C:\\Users\\steff\\Google Drive\\LMU\\Research\\ae_toolbox_torch\\output\\AE_Model\\Sun_Sep_29_12-44-29_2019\\default\\version_0/metrics.csv", "autosave": false, "description": null, "created_at": "2019-09-29 10:44:29.534657", "exp_hash": "default_v0"}
|
|
@ -1,8 +0,0 @@
|
|||||||
key,value
|
|
||||||
step,5
|
|
||||||
features,6
|
|
||||||
size,9
|
|
||||||
latent_dim,2
|
|
||||||
model,AE_Model
|
|
||||||
refresh,False
|
|
||||||
future_predictions,True
|
|
|
@ -1,3 +0,0 @@
|
|||||||
loss,epoch,created_at
|
|
||||||
1.372,0.0,2019-09-29 10:44:34.492200
|
|
||||||
0.267,1.0,2019-09-29 10:54:22.294891
|
|
|
Binary file not shown.
@ -1 +0,0 @@
|
|||||||
{"name": "default", "version": 0, "tags_path": "C:\\Users\\steff\\Google Drive\\LMU\\Research\\ae_toolbox_torch\\output\\SAAE_Model\\Sun_Sep_29_12-54-18_2019\\default\\version_0/meta_tags.csv", "metrics_path": "C:\\Users\\steff\\Google Drive\\LMU\\Research\\ae_toolbox_torch\\output\\SAAE_Model\\Sun_Sep_29_12-54-18_2019\\default\\version_0/metrics.csv", "autosave": false, "description": null, "created_at": "2019-09-29 10:54:18.863108", "exp_hash": "default_v0"}
|
|
@ -1,8 +0,0 @@
|
|||||||
key,value
|
|
||||||
step,5
|
|
||||||
features,6
|
|
||||||
size,9
|
|
||||||
latent_dim,2
|
|
||||||
model,SAAE_Model
|
|
||||||
refresh,False
|
|
||||||
future_predictions,True
|
|
|
@ -1,48 +0,0 @@
|
|||||||
loss,epoch,created_at
|
|
||||||
0.471,0.0,2019-09-29 10:54:25.127533
|
|
||||||
0.076,1.0,2019-09-29 11:04:46.930249
|
|
||||||
0.069,2.0,2019-09-29 11:14:02.826272
|
|
||||||
0.089,3.0,2019-09-29 11:23:11.776641
|
|
||||||
0.068,4.0,2019-09-29 11:32:19.540023
|
|
||||||
0.066,5.0,2019-09-29 11:41:27.129607
|
|
||||||
0.067,6.0,2019-09-29 11:50:33.679401
|
|
||||||
0.071,7.0,2019-09-29 11:59:38.747566
|
|
||||||
0.068,8.0,2019-09-29 12:08:46.713434
|
|
||||||
0.067,9.0,2019-09-29 12:17:55.462982
|
|
||||||
0.07,10.0,2019-09-29 12:27:03.690029
|
|
||||||
0.066,11.0,2019-09-29 12:36:10.274328
|
|
||||||
0.066,12.0,2019-09-29 12:45:17.844777
|
|
||||||
0.064,13.0,2019-09-29 12:54:25.440055
|
|
||||||
0.064,14.0,2019-09-29 13:03:32.662178
|
|
||||||
0.063,15.0,2019-09-29 13:12:39.334202
|
|
||||||
0.063,16.0,2019-09-29 13:21:45.282941
|
|
||||||
0.063,17.0,2019-09-29 13:30:50.702369
|
|
||||||
0.062,18.0,2019-09-29 13:39:56.479320
|
|
||||||
0.062,19.0,2019-09-29 13:49:03.009732
|
|
||||||
0.062,20.0,2019-09-29 13:58:09.206604
|
|
||||||
0.062,21.0,2019-09-29 14:07:16.674273
|
|
||||||
0.062,22.0,2019-09-29 14:16:32.081830
|
|
||||||
0.061,23.0,2019-09-29 14:25:47.816996
|
|
||||||
0.061,24.0,2019-09-29 14:34:59.053729
|
|
||||||
0.061,25.0,2019-09-29 14:44:12.326646
|
|
||||||
0.061,26.0,2019-09-29 14:53:20.545392
|
|
||||||
0.061,27.0,2019-09-29 15:02:29.076439
|
|
||||||
0.061,28.0,2019-09-29 15:11:40.214715
|
|
||||||
0.061,29.0,2019-09-29 15:20:47.708415
|
|
||||||
0.061,30.0,2019-09-29 15:29:55.151460
|
|
||||||
0.061,31.0,2019-09-29 15:39:02.450643
|
|
||||||
0.061,32.0,2019-09-29 15:48:13.678387
|
|
||||||
0.061,33.0,2019-09-29 15:57:22.619685
|
|
||||||
0.061,34.0,2019-09-29 16:06:32.276767
|
|
||||||
0.061,35.0,2019-09-29 16:15:39.175331
|
|
||||||
0.061,36.0,2019-09-29 16:24:48.090009
|
|
||||||
0.061,37.0,2019-09-29 16:33:53.686359
|
|
||||||
0.061,38.0,2019-09-29 16:43:01.209447
|
|
||||||
0.061,39.0,2019-09-29 16:52:09.086088
|
|
||||||
0.061,40.0,2019-09-29 17:01:17.997290
|
|
||||||
0.06,41.0,2019-09-29 17:10:24.687865
|
|
||||||
0.061,42.0,2019-09-29 17:19:33.252531
|
|
||||||
0.061,43.0,2019-09-29 17:28:40.294962
|
|
||||||
0.06,44.0,2019-09-29 17:37:50.408505
|
|
||||||
0.06,45.0,2019-09-29 17:46:57.046547
|
|
||||||
0.06,46.0,2019-09-29 17:56:05.325744
|
|
|
Binary file not shown.
@ -1,4 +1,5 @@
|
|||||||
from torch.distributions import Normal
|
from torch.distributions import Normal
|
||||||
|
from torch.cuda import is_available
|
||||||
|
|
||||||
import time
|
import time
|
||||||
import os
|
import os
|
||||||
@ -22,9 +23,10 @@ args.add_argument('--step', default=5)
|
|||||||
args.add_argument('--features', default=6)
|
args.add_argument('--features', default=6)
|
||||||
args.add_argument('--size', default=9)
|
args.add_argument('--size', default=9)
|
||||||
args.add_argument('--latent_dim', default=2)
|
args.add_argument('--latent_dim', default=2)
|
||||||
args.add_argument('--model', default='SAAE_Model')
|
args.add_argument('--model', default='AE_Model')
|
||||||
args.add_argument('--refresh', type=strtobool, default=False)
|
args.add_argument('--refresh', type=strtobool, default=False)
|
||||||
args.add_argument('--future_predictions', type=strtobool, default=True)
|
args.add_argument('--future_predictions', type=strtobool, default=False)
|
||||||
|
args.add_argument('--use_norm', type=strtobool, default=True)
|
||||||
|
|
||||||
|
|
||||||
class AE_Model(AutoEncoder_LO, LightningModule):
|
class AE_Model(AutoEncoder_LO, LightningModule):
|
||||||
@ -36,7 +38,7 @@ class AE_Model(AutoEncoder_LO, LightningModule):
|
|||||||
self.features = parameters.features
|
self.features = parameters.features
|
||||||
self.step = parameters.step
|
self.step = parameters.step
|
||||||
super(AE_Model, self).__init__(train_on_predictions=parameters.future_predictions)
|
super(AE_Model, self).__init__(train_on_predictions=parameters.future_predictions)
|
||||||
self.network = AutoEncoder(self.latent_dim, self.features)
|
self.network = AutoEncoder(self.latent_dim, self.features, use_norm=parameters.use_norm)
|
||||||
|
|
||||||
|
|
||||||
class VAE_Model(VAE_LO, LightningModule):
|
class VAE_Model(VAE_LO, LightningModule):
|
||||||
@ -48,7 +50,7 @@ class VAE_Model(VAE_LO, LightningModule):
|
|||||||
self.features = parameters.features
|
self.features = parameters.features
|
||||||
self.step = parameters.step
|
self.step = parameters.step
|
||||||
super(VAE_Model, self).__init__(train_on_predictions=parameters.future_predictions)
|
super(VAE_Model, self).__init__(train_on_predictions=parameters.future_predictions)
|
||||||
self.network = VariationalAE(self.latent_dim, self.features)
|
self.network = VariationalAE(self.latent_dim, self.features, use_norm=parameters.use_norm)
|
||||||
|
|
||||||
|
|
||||||
class AAE_Model(AdversarialAE_LO, LightningModule):
|
class AAE_Model(AdversarialAE_LO, LightningModule):
|
||||||
@ -61,7 +63,7 @@ class AAE_Model(AdversarialAE_LO, LightningModule):
|
|||||||
self.step = parameters.step
|
self.step = parameters.step
|
||||||
super(AAE_Model, self).__init__(train_on_predictions=parameters.future_predictions)
|
super(AAE_Model, self).__init__(train_on_predictions=parameters.future_predictions)
|
||||||
self.normal = Normal(0, 1)
|
self.normal = Normal(0, 1)
|
||||||
self.network = AdversarialAE(self.latent_dim, self.features)
|
self.network = AdversarialAE(self.latent_dim, self.features, use_norm=parameters.use_norm)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@ -75,7 +77,7 @@ class SAAE_Model(SeparatingAAE_LO, LightningModule):
|
|||||||
self.step = parameters.step
|
self.step = parameters.step
|
||||||
super(SAAE_Model, self).__init__(train_on_predictions=parameters.future_predictions)
|
super(SAAE_Model, self).__init__(train_on_predictions=parameters.future_predictions)
|
||||||
self.normal = Normal(0, 1)
|
self.normal = Normal(0, 1)
|
||||||
self.network = SeperatingAAE(self.latent_dim, self.features)
|
self.network = SeperatingAAE(self.latent_dim, self.features, use_norm=parameters.use_norm)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@ -93,17 +95,17 @@ if __name__ == '__main__':
|
|||||||
from pytorch_lightning.callbacks import ModelCheckpoint
|
from pytorch_lightning.callbacks import ModelCheckpoint
|
||||||
|
|
||||||
checkpoint_callback = ModelCheckpoint(
|
checkpoint_callback = ModelCheckpoint(
|
||||||
filepath=os.path.join(outpath, 'weights.ckpt'),
|
filepath=os.path.join(outpath, 'weights'),
|
||||||
save_best_only=False,
|
save_best_only=False,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
period=4
|
period=4
|
||||||
)
|
)
|
||||||
|
|
||||||
trainer = Trainer(experiment=exp,
|
trainer = Trainer(experiment=exp,
|
||||||
max_nb_epochs=250,
|
max_nb_epochs=60,
|
||||||
gpus=[0],
|
gpus=[0] if is_available() else None,
|
||||||
row_log_interval=1000,
|
row_log_interval=1000,
|
||||||
# checkpoint_callback=checkpoint_callback
|
checkpoint_callback=checkpoint_callback
|
||||||
)
|
)
|
||||||
|
|
||||||
trainer.fit(model)
|
trainer.fit(model)
|
||||||
|
@ -1,50 +1,26 @@
|
|||||||
from argparse import ArgumentParser
|
from argparse import ArgumentParser
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
from torch import device
|
||||||
|
from torch.cuda import is_available
|
||||||
|
|
||||||
from dataset import DataContainer
|
from dataset import DataContainer
|
||||||
from viz.utils import MotionAnalyser, Printer, MapContainer, search_for_weights
|
from viz.utils import Printer, MapContainer
|
||||||
import torch
|
|
||||||
from run_models import SAAE_Model, AAE_Model, VAE_Model, AE_Model
|
available_device = device('cuda' if is_available() else 'cpu')
|
||||||
|
|
||||||
arguments = ArgumentParser()
|
arguments = ArgumentParser()
|
||||||
arguments.add_argument('--data', default='output')
|
arguments.add_argument('--data', default='output')
|
||||||
|
|
||||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
||||||
|
|
||||||
|
|
||||||
def load_and_viz(path_like_element):
|
|
||||||
# Define Loop to search for models and folder with visualizations
|
|
||||||
splitpath = path_like_element.split(os.sep)
|
|
||||||
base_dir = os.path.join(*splitpath[:4])
|
|
||||||
model = globals()[splitpath[2]]
|
|
||||||
print(f'... loading model named: "{model.name}" from timestamp: {splitpath[3]}')
|
|
||||||
pretrained_model = model.load_from_metrics(
|
|
||||||
weights_path=path_like_element,
|
|
||||||
tags_csv=os.path.join(base_dir, 'default', 'version_0', 'meta_tags.csv'),
|
|
||||||
on_gpu=True if torch.cuda.is_available() else False,
|
|
||||||
# map_location=None
|
|
||||||
)
|
|
||||||
|
|
||||||
# Init model and freeze its weights ( for faster inference)
|
|
||||||
pretrained_model = pretrained_model.to(device)
|
|
||||||
pretrained_model.eval()
|
|
||||||
pretrained_model.freeze()
|
|
||||||
|
|
||||||
dataIndex = 0
|
|
||||||
|
|
||||||
datasets = DataContainer(os.path.join(os.pardir, 'data', 'validation'), 9, 6).to(device)
|
|
||||||
dataset = datasets.datasets[dataIndex]
|
|
||||||
# ToDO: use dataloader for iteration instead! - dataloader = DataLoader(dataset, )
|
|
||||||
|
|
||||||
maps = MapContainer(os.path.join(os.pardir, 'data', 'validation'))
|
|
||||||
base_map = maps.datasets[dataIndex]
|
|
||||||
|
|
||||||
p = Printer(pretrained_model)
|
|
||||||
p.print_trajec_on_basemap(dataset, base_map, save=os.path.join(base_dir, f'{base_map.name}_movement.png'),
|
|
||||||
color_by_movement=True)
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
args = arguments.parse_args()
|
args = arguments.parse_args()
|
||||||
search_for_weights(load_and_viz, args.data, file_type='movement')
|
|
||||||
|
maps = MapContainer(os.path.join(os.pardir, 'data', 'validation'))
|
||||||
|
base_map = maps.datasets[0]
|
||||||
|
|
||||||
|
datasets = DataContainer(os.path.join(os.pardir, 'data', 'validation'), 9, 6).to(available_device)
|
||||||
|
dataset = datasets.datasets[0]
|
||||||
|
|
||||||
|
p = Printer(None)
|
||||||
|
p.print_trajec_on_basemap(dataset, base_map, save=os.path.join(f'{base_map.name}_movement.png'),
|
||||||
|
color_by_movement=True, n=20, clustering='fastdtw', show=True)
|
BIN
viz/tum_map_movement.png
Normal file
BIN
viz/tum_map_movement.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 97 KiB |
150
viz/utils.py
150
viz/utils.py
@ -1,9 +1,8 @@
|
|||||||
from typing import Union
|
from typing import Union
|
||||||
from functools import reduce
|
|
||||||
|
|
||||||
from statistics import stdev
|
from statistics import stdev
|
||||||
|
|
||||||
from sklearn.cluster import Birch, KMeans, DBSCAN
|
from sklearn.cluster import Birch, KMeans
|
||||||
from sklearn.manifold import TSNE
|
from sklearn.manifold import TSNE
|
||||||
from sklearn.decomposition import PCA
|
from sklearn.decomposition import PCA
|
||||||
|
|
||||||
@ -16,7 +15,7 @@ from matplotlib.collections import LineCollection, PatchCollection
|
|||||||
import matplotlib.colors as mcolors
|
import matplotlib.colors as mcolors
|
||||||
import matplotlib.cm as cmaps
|
import matplotlib.cm as cmaps
|
||||||
|
|
||||||
from math import pi
|
from math import pi, cos, sin
|
||||||
|
|
||||||
|
|
||||||
def search_for_weights(func, folder, file_type='latent_space'):
|
def search_for_weights(func, folder, file_type='latent_space'):
|
||||||
@ -24,10 +23,13 @@ def search_for_weights(func, folder, file_type='latent_space'):
|
|||||||
if len(os.path.split(folder)) >= 50:
|
if len(os.path.split(folder)) >= 50:
|
||||||
raise FileNotFoundError(f'The folder "{folder}" could not be found')
|
raise FileNotFoundError(f'The folder "{folder}" could not be found')
|
||||||
folder = os.path.join(os.pardir, folder)
|
folder = os.path.join(os.pardir, folder)
|
||||||
|
|
||||||
if any([file_type in x.name for x in os.scandir(folder)]):
|
if any([file_type in x.name for x in os.scandir(folder)]):
|
||||||
return
|
return
|
||||||
|
elif folder == 'weights' and os.path.isdir(folder):
|
||||||
|
return
|
||||||
|
|
||||||
if any(['.ckpt' in element.name and element.is_dir() for element in os.scandir(folder)]):
|
if any(['weights.ckpt' in element.name and element.is_dir() for element in os.scandir(folder)]) and False:
|
||||||
_, _, filenames = next(os.walk(os.path.join(folder, 'weights.ckpt')))
|
_, _, filenames = next(os.walk(os.path.join(folder, 'weights.ckpt')))
|
||||||
filenames.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))
|
filenames.sort(key=lambda f: int(''.join(filter(str.isdigit, f))))
|
||||||
func(os.path.join(folder, 'weights.ckpt', filenames[-1]))
|
func(os.path.join(folder, 'weights.ckpt', filenames[-1]))
|
||||||
@ -37,7 +39,7 @@ def search_for_weights(func, folder, file_type='latent_space'):
|
|||||||
if os.path.exists(element):
|
if os.path.exists(element):
|
||||||
if element.is_dir():
|
if element.is_dir():
|
||||||
search_for_weights(func, element.path, file_type=file_type)
|
search_for_weights(func, element.path, file_type=file_type)
|
||||||
elif element.is_file() and element.name.endswith('.ckpt'):
|
elif element.is_file() and element.name.endswith('weights.ckpt'):
|
||||||
func(element.path)
|
func(element.path)
|
||||||
else:
|
else:
|
||||||
continue
|
continue
|
||||||
@ -47,16 +49,15 @@ class Printer(object):
|
|||||||
|
|
||||||
def __init__(self, model: AbstractNeuralNetwork, ax=None):
|
def __init__(self, model: AbstractNeuralNetwork, ax=None):
|
||||||
self.norm = mcolors.Normalize(vmin=0, vmax=1)
|
self.norm = mcolors.Normalize(vmin=0, vmax=1)
|
||||||
self.colormap = cmaps.gist_rainbow
|
self.colormap = cmaps.tab20
|
||||||
self.network = model
|
self.network = model
|
||||||
self.fig = plt.figure(dpi=300)
|
self.fig = plt.figure(dpi=300)
|
||||||
self.ax = ax if ax else plt.subplot(1, 1, 1)
|
self.ax = ax if ax else plt.subplot(1, 1, 1)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def colorize(self, x, min_val: Union[float, None] = None, max_val: Union[float, None] = None,
|
def colorize(self, x, min_val: Union[float, None] = None, max_val: Union[float, None] = None, **kwargs):
|
||||||
colormap=cmaps.rainbow, **kwargs):
|
|
||||||
norm = mcolors.Normalize(vmin=min_val, vmax=max_val)
|
norm = mcolors.Normalize(vmin=min_val, vmax=max_val)
|
||||||
colored = colormap(norm(x))
|
colored = self.colormap(norm(x))
|
||||||
return colored
|
return colored
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -79,20 +80,26 @@ class Printer(object):
|
|||||||
clusterer.init = np.asarray(centers)
|
clusterer.init = np.asarray(centers)
|
||||||
else:
|
else:
|
||||||
# clusterer = Birch(n_clusters=None)
|
# clusterer = Birch(n_clusters=None)
|
||||||
clusterer = Birch()
|
clusterer = KMeans(3)
|
||||||
|
|
||||||
labels = clusterer.fit_predict(data)
|
labels = clusterer.fit_predict(data)
|
||||||
print('Birch Clustering Sucessfull')
|
print('Birch Clustering Sucessfull')
|
||||||
return labels
|
return labels
|
||||||
|
|
||||||
def print_possible_latent_spaces(self, data: Trajectories, n: Union[int, str] = 1000, **kwargs):
|
def print_possible_latent_spaces(self, data: Trajectories, n: Union[int, str] = 1000,
|
||||||
predictions, _ = self._gather_predictions(data, n)
|
cluster_by_motion=True, **kwargs):
|
||||||
|
predictions, motion_sequence = self._gather_predictions(data, n)
|
||||||
if len(predictions) >= 2:
|
if len(predictions) >= 2:
|
||||||
predictions += (torch.cat(predictions, dim=-1), )
|
predictions += (torch.cat(predictions, dim=-1), )
|
||||||
|
|
||||||
|
if cluster_by_motion:
|
||||||
|
motion_analyzer = MotionAnalyser()
|
||||||
|
labels = motion_analyzer.cluster_motion(motion_sequence)
|
||||||
|
else:
|
||||||
labels = self.cluster_data(predictions[-1])
|
labels = self.cluster_data(predictions[-1])
|
||||||
|
|
||||||
for idx, prediction in enumerate(predictions):
|
for idx, prediction in enumerate(predictions):
|
||||||
self.print_latent_space(prediction, labels, running_index=idx, **kwargs)
|
self.print_latent_space(prediction, labels.squeeze(), running_index=idx, **kwargs)
|
||||||
|
|
||||||
def print_latent_space(self, prediction, labels, running_index=0, save=None):
|
def print_latent_space(self, prediction, labels, running_index=0, save=None):
|
||||||
|
|
||||||
@ -179,12 +186,13 @@ class Printer(object):
|
|||||||
print("Gathering Predictions")
|
print("Gathering Predictions")
|
||||||
|
|
||||||
n = n if isinstance(n, int) and n else len(data) - (data.size * data.step)
|
n = n if isinstance(n, int) and n else len(data) - (data.size * data.step)
|
||||||
idxs = np.random.choice(np.arange(len(data) - data.step * data.size), n, replace=False)
|
idxs = np.random.choice(np.arange(len(data)), n, replace=True)
|
||||||
complete_data = torch.stack([data.get_both_by_key(idx) for idx in idxs], dim=0)
|
complete_data = torch.stack([data.get_both_by_key(idx) for idx in idxs], dim=0)
|
||||||
segment_coords, trajectories = complete_data[:, :, :2], complete_data[:, :, 2:]
|
segment_coords, trajectories = complete_data[:, :, :2], complete_data[:, :, 2:]
|
||||||
if color_by_movement:
|
if color_by_movement:
|
||||||
motion_analyser = MotionAnalyser()
|
motion_analyser = MotionAnalyser()
|
||||||
predictions = (motion_analyser.cluster_motion(segment_coords), )
|
predictions = (motion_analyser.cluster_motion(segment_coords,
|
||||||
|
clustering=kwargs.get('clustering', 'kmeans')), )
|
||||||
|
|
||||||
else:
|
else:
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
@ -193,7 +201,7 @@ class Printer(object):
|
|||||||
return predictions, segment_coords
|
return predictions, segment_coords
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def colorize_as_hsv(self, x, min_val: Union[float, None] = None, max_val: Union[float, None] = None,
|
def colorize_as_hsv(x, min_val: Union[float, None] = None, max_val: Union[float, None] = None,
|
||||||
colormap=cmaps.rainbow, **kwargs):
|
colormap=cmaps.rainbow, **kwargs):
|
||||||
norm = mcolors.Normalize(vmin=min_val, vmax=max_val)
|
norm = mcolors.Normalize(vmin=min_val, vmax=max_val)
|
||||||
colored = colormap(norm(x))
|
colored = colormap(norm(x))
|
||||||
@ -248,11 +256,12 @@ class Printer(object):
|
|||||||
patches = [Polygon(base_map[i], True, color='black') for i in range(len(base_map))]
|
patches = [Polygon(base_map[i], True, color='black') for i in range(len(base_map))]
|
||||||
return PatchCollection(patches, color='black')
|
return PatchCollection(patches, color='black')
|
||||||
|
|
||||||
def print_trajec_on_basemap(self, data, base_map: Map, save=False, color_by_movement=False, **kwargs):
|
def print_trajec_on_basemap(self, data, base_map: Map, save=False, show=False, color_by_movement=False, **kwargs):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
:rtype: object
|
:rtype: object
|
||||||
"""
|
"""
|
||||||
|
|
||||||
prediction_segments = self._gather_predictions(data, color_by_movement=color_by_movement, **kwargs)
|
prediction_segments = self._gather_predictions(data, color_by_movement=color_by_movement, **kwargs)
|
||||||
trajectory_shapes = self._build_trajectory_shapes(*prediction_segments, **kwargs)
|
trajectory_shapes = self._build_trajectory_shapes(*prediction_segments, **kwargs)
|
||||||
map_shapes = self._build_map_shapes(base_map)
|
map_shapes = self._build_map_shapes(base_map)
|
||||||
@ -266,7 +275,8 @@ class Printer(object):
|
|||||||
self.save(save)
|
self.save(save)
|
||||||
else:
|
else:
|
||||||
self.save(base_map.name)
|
self.save(base_map.name)
|
||||||
pass
|
if show:
|
||||||
|
self.show()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def show():
|
def show():
|
||||||
@ -284,15 +294,25 @@ class MotionAnalyser(object):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def _sequential_pairwise_map(self, func, xy_sequence, on_deltas=False):
|
def _sequential_pairwise_map(self, func, xy_sequence, on_deltas=False):
|
||||||
zipped_list = [x for x in zip(xy_sequence[:-1], xy_sequence[1:])]
|
|
||||||
|
|
||||||
if on_deltas:
|
if on_deltas:
|
||||||
|
zipped_list = [x for x in zip(xy_sequence[:-1], xy_sequence[1:])]
|
||||||
zipped_list = [self.delta(*movement) for movement in zipped_list]
|
zipped_list = [self.delta(*movement) for movement in zipped_list]
|
||||||
else:
|
else:
|
||||||
pass
|
zipped_list = xy_sequence
|
||||||
|
|
||||||
return [func(*xy) for xy in zipped_list]
|
return [func(*xy) for xy in zipped_list]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _rotatePoint(point, center, angle, is_rad=True):
|
||||||
|
|
||||||
|
angle = (angle) * (pi / 180) if not is_rad else angle # Convert to radians if
|
||||||
|
rotatedX = cos(angle) * (point[0] - center[0]) - sin(angle) * (point[1] - center[1]) + center[0]
|
||||||
|
rotatedY = sin(angle) * (point[0] - center[0]) + cos(angle) * (point[1] - center[1]) + center[1]
|
||||||
|
|
||||||
|
return rotatedX, rotatedY
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def delta(x1y1, x2y2):
|
def delta(x1y1, x2y2):
|
||||||
x1, y1 = x1y1
|
x1, y1 = x1y1
|
||||||
@ -306,10 +326,16 @@ class MotionAnalyser(object):
|
|||||||
return r
|
return r
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_theta(deltax, deltay, rad=False):
|
def get_theta(deltax, deltay, as_radians=True):
|
||||||
# https://mathinsight.org/polar_coordinates
|
# https://mathinsight.org/polar_coordinates
|
||||||
|
try:
|
||||||
|
deltax = torch.as_tensor(deltax)
|
||||||
|
deltay = torch.as_tensor(deltay)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
theta = torch.atan2(deltay, deltax)
|
theta = torch.atan2(deltay, deltax)
|
||||||
return theta if rad else theta * 180 / pi
|
return theta if as_radians else theta * 180 / pi
|
||||||
|
|
||||||
def get_theta_for_sequence(self, xy_sequence):
|
def get_theta_for_sequence(self, xy_sequence):
|
||||||
ts = self._sequential_pairwise_map(self.get_theta, xy_sequence, on_deltas=True)
|
ts = self._sequential_pairwise_map(self.get_theta, xy_sequence, on_deltas=True)
|
||||||
@ -319,24 +345,57 @@ class MotionAnalyser(object):
|
|||||||
rs = self._sequential_pairwise_map(self.get_r, xy_sequence, on_deltas=True)
|
rs = self._sequential_pairwise_map(self.get_r, xy_sequence, on_deltas=True)
|
||||||
return rs
|
return rs
|
||||||
|
|
||||||
|
def move_to_zero(self, xy_sequence):
|
||||||
|
old_origin = xy_sequence[0]
|
||||||
|
return xy_sequence - old_origin
|
||||||
|
|
||||||
def get_unique_seq_identifier(self, xy_sequence):
|
def get_unique_seq_identifier(self, xy_sequence):
|
||||||
|
xy_sequence = xy_sequence.cpu()
|
||||||
|
|
||||||
|
# Move all points so that the first point is always (0, 0)
|
||||||
|
# moved_sequence = self.move_to_zero(xy_sequence)
|
||||||
|
moved_sequence = xy_sequence
|
||||||
|
|
||||||
|
# Rotate, so that x is zero for last point
|
||||||
|
angle = self.get_theta(*self.delta(moved_sequence[0], moved_sequence[1]))
|
||||||
|
rotated_sequence = torch.as_tensor([self._rotatePoint(point, moved_sequence[0], -angle)
|
||||||
|
for point in moved_sequence[1:]])
|
||||||
|
rotated_sequence = torch.cat((moved_sequence[0].unsqueeze(0), rotated_sequence))
|
||||||
|
# rotated_sequence = moved_sequence
|
||||||
|
std, mean = torch.std_mean(rotated_sequence)
|
||||||
|
rotated_sequence = (rotated_sequence - mean) / std
|
||||||
|
|
||||||
|
def centroid_for(arr):
|
||||||
|
try:
|
||||||
|
arr = torch.as_tensor(arr)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
size = arr.shape[0]
|
||||||
|
sum_x = torch.sum(arr[:, 0])
|
||||||
|
sum_y = torch.sum(arr[:, 1])
|
||||||
|
return sum_x/size, sum_y/size
|
||||||
|
|
||||||
# Globals
|
# Globals
|
||||||
global_delta = self.delta(xy_sequence[0], xy_sequence[-1])
|
global_delta = self.delta(rotated_sequence[0], rotated_sequence[-1])
|
||||||
global_theta = self.get_theta(*global_delta)
|
|
||||||
global_r = self.get_r(*global_delta)
|
global_r = self.get_r(*global_delta)
|
||||||
|
|
||||||
|
def f(*args):
|
||||||
|
return args
|
||||||
|
centroid = centroid_for(self._sequential_pairwise_map(f, rotated_sequence, on_deltas=True))
|
||||||
|
|
||||||
|
hull_length = sum(self.get_r_for_sequence(torch.cat((rotated_sequence, rotated_sequence[0].unsqueeze(0)))))
|
||||||
|
|
||||||
# For Each
|
# For Each
|
||||||
theta_seq = self.get_theta_for_sequence(xy_sequence)
|
theta_seq = self.get_theta_for_sequence(rotated_sequence)
|
||||||
mean_theta = sum(theta_seq) / len(theta_seq)
|
mean_theta = sum(theta_seq) / len(theta_seq)
|
||||||
theta_sum = sum([abs(theta) for theta in theta_seq])
|
theta_sum = sum([abs(theta) for theta in theta_seq])
|
||||||
std_theta = stdev(map(float, theta_seq))
|
std_theta = stdev(map(float, theta_seq))
|
||||||
|
|
||||||
return torch.stack((global_r, torch.as_tensor(std_theta), mean_theta, global_theta))
|
return torch.stack((centroid[0], centroid[1], torch.as_tensor(std_theta), mean_theta, theta_sum, hull_length))
|
||||||
|
|
||||||
def cluster_motion(self, trajectory_samples, cluster_class=KMeans):
|
|
||||||
cluster_class = cluster_class(3)
|
|
||||||
|
|
||||||
|
def cluster_motion(self, trajectory_samples, clustering='kmeans'):
|
||||||
|
if clustering.lower() == 'kmeans':
|
||||||
|
cluster_class = KMeans(3)
|
||||||
std, mean = torch.std_mean(trajectory_samples, dim=0)
|
std, mean = torch.std_mean(trajectory_samples, dim=0)
|
||||||
trajectory_samples = (trajectory_samples - mean) / std
|
trajectory_samples = (trajectory_samples - mean) / std
|
||||||
|
|
||||||
@ -344,13 +403,32 @@ class MotionAnalyser(object):
|
|||||||
for trajectory in trajectory_samples])
|
for trajectory in trajectory_samples])
|
||||||
|
|
||||||
clustered_movement = cluster_class.fit_predict(unique_seq_identifiers)
|
clustered_movement = cluster_class.fit_predict(unique_seq_identifiers)
|
||||||
if False:
|
elif clustering.lower() == 'fastdtw':
|
||||||
from sklearn.decomposition import PCA
|
# Move all points so that the first point is always (0, 0)
|
||||||
p = PCA(2)
|
moved_sequence = self.move_to_zero(trajectory_samples)
|
||||||
t = p.fit_transform(unique_seq_identifiers)
|
rotated_sequences = []
|
||||||
f = plt.figure()
|
for sequence in moved_sequence:
|
||||||
plt.scatter(t[:, 0], t[:,1])
|
# Rotate, so that x is zero for last point
|
||||||
plt.show()
|
angle = self.get_theta(*self.delta(sequence[0], sequence[1]))
|
||||||
|
rotated_sequence = torch.as_tensor([self._rotatePoint(point, sequence[0], -angle)
|
||||||
|
for point in sequence[1:]])
|
||||||
|
rotated_sequence = torch.cat((sequence[0].unsqueeze(0), rotated_sequence)).unsqueeze(0)
|
||||||
|
rotated_sequences.append(rotated_sequence)
|
||||||
|
# deltas = [self._sequential_pairwise_map(self.delta, x, on_deltas=False) for x in rotated_sequence]
|
||||||
|
t = torch.cat(rotated_sequences)
|
||||||
|
# t = torch.as_tensor(deltas)
|
||||||
|
z = torch.zeros((t.shape[0], t.shape[0]))
|
||||||
|
|
||||||
|
import fastdtw
|
||||||
|
for idx, x in tqdm(enumerate(t), total=z.shape[0]):
|
||||||
|
for idy, y in enumerate(t):
|
||||||
|
z[idx, idy] = fastdtw.fastdtw(x, y)[0]
|
||||||
|
|
||||||
|
from sklearn.cluster.hierarchical import AgglomerativeClustering
|
||||||
|
clusterer = KMeans(3)
|
||||||
|
clustered_movement = clusterer.fit_predict(z)
|
||||||
|
else:
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
return clustered_movement.reshape(-1, 1)
|
return clustered_movement.reshape(-1, 1)
|
||||||
|
|
||||||
|
@ -18,7 +18,6 @@ def load_and_predict(path_like_element):
|
|||||||
weights_path=path_like_element,
|
weights_path=path_like_element,
|
||||||
tags_csv=os.path.join(base_dir, 'default', 'version_0', 'meta_tags.csv'),
|
tags_csv=os.path.join(base_dir, 'default', 'version_0', 'meta_tags.csv'),
|
||||||
on_gpu=True if torch.cuda.is_available() else False,
|
on_gpu=True if torch.cuda.is_available() else False,
|
||||||
# map_location=None
|
|
||||||
)
|
)
|
||||||
print(f'... loading model named: "{model.name}" from timestamp: {splitpath[3]}')
|
print(f'... loading model named: "{model.name}" from timestamp: {splitpath[3]}')
|
||||||
|
|
||||||
@ -44,7 +43,7 @@ def load_and_predict(path_like_element):
|
|||||||
# Important:
|
# Important:
|
||||||
# Use all given valdiation samples, even if they relate to differnt maps. This is important since we want to have a
|
# Use all given valdiation samples, even if they relate to differnt maps. This is important since we want to have a
|
||||||
# view on the complete latent space, not just in relation to a single basemap, which would be a major bias.
|
# view on the complete latent space, not just in relation to a single basemap, which would be a major bias.
|
||||||
p.print_possible_latent_spaces(dataset, save=os.path.join(base_dir, f'latent_space'))
|
p.print_possible_latent_spaces(dataset, save=os.path.join(base_dir, f'latent_space'), cluster_by_motion=False)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
Loading…
x
Reference in New Issue
Block a user