Debugging
This commit is contained in:
0
lib/models/generators/recurrent.py
Normal file
0
lib/models/generators/recurrent.py
Normal file
@@ -1,3 +1,6 @@
|
||||
from functools import reduce
|
||||
from operator import mul
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
import torch.nn.functional as F
|
||||
@@ -13,7 +16,7 @@ class ConvHomDetector(LightningBaseModule):
|
||||
name = 'CNNHomotopyClassifier'
|
||||
|
||||
def configure_optimizers(self):
|
||||
return Adam(self.parameters(), lr=self.lr)
|
||||
return Adam(self.parameters(), lr=self.hparams.lr)
|
||||
|
||||
def validation_step(self, *args, **kwargs):
|
||||
pass
|
||||
@@ -32,29 +35,36 @@ class ConvHomDetector(LightningBaseModule):
|
||||
|
||||
def __init__(self, *params):
|
||||
super(ConvHomDetector, self).__init__(*params)
|
||||
|
||||
# Dataset
|
||||
self.dataset = TrajData(self.hparams.data_param.data_root)
|
||||
self.dataset = TrajData(self.hparams.data_param.root)
|
||||
|
||||
# Additional Attributes
|
||||
self.map_shape = self.dataset.map_shapes_max
|
||||
|
||||
# Model Paramters
|
||||
self.in_shape = self.dataset.map_shapes_max
|
||||
assert len(self.in_shape) == 3, f'Image or map shape has to have 3 dims, but had: {len(self.in_shape)}'
|
||||
|
||||
# NN Nodes
|
||||
# ============================
|
||||
# Convolutional Map Processing
|
||||
#
|
||||
self.map_res_1 = ResidualModule(self.in_shape, ConvModule, 3,
|
||||
self.map_conv_0 = ConvModule(self.in_shape, conv_kernel=3, conv_stride=1,
|
||||
conv_padding=0, conv_filters=self.hparams.model_param.filters[0])
|
||||
self.map_res_1 = ResidualModule(self.map_conv_0.shape, ConvModule, 3,
|
||||
**dict(conv_kernel=3, conv_stride=1,
|
||||
conv_padding=0, conv_filters=self.hparams.model_param.filters[0]))
|
||||
conv_padding=1, conv_filters=self.hparams.model_param.filters[0]))
|
||||
self.map_conv_1 = ConvModule(self.map_res_1.shape, conv_kernel=5, conv_stride=1,
|
||||
conv_padding=0, conv_filters=self.hparams.model_param.filters[0])
|
||||
self.map_res_2 = ResidualModule(self.map_conv_1.shape, ConvModule, 3,
|
||||
**dict(conv_kernel=3, conv_stride=1,
|
||||
conv_padding=0, conv_filters=self.hparams.model_param.filters[0]))
|
||||
conv_padding=1, conv_filters=self.hparams.model_param.filters[0]))
|
||||
self.map_conv_2 = ConvModule(self.map_res_2.shape, conv_kernel=5, conv_stride=1,
|
||||
conv_padding=0, conv_filters=self.hparams.model_param.filters[0])
|
||||
self.map_res_3 = ResidualModule(self.map_conv_2.shape, ConvModule, 3,
|
||||
**dict(conv_kernel=3, conv_stride=1,
|
||||
conv_padding=0, conv_filters=self.hparams.model_param.filters[0]))
|
||||
conv_padding=1, conv_filters=self.hparams.model_param.filters[0]))
|
||||
self.map_conv_3 = ConvModule(self.map_res_3.shape, conv_kernel=5, conv_stride=1,
|
||||
conv_padding=0, conv_filters=self.hparams.model_param.filters[0])
|
||||
|
||||
@@ -64,12 +74,13 @@ class ConvHomDetector(LightningBaseModule):
|
||||
# Classifier
|
||||
#
|
||||
|
||||
self.linear = nn.Linear(self.flatten.shape.item(), self.hparams.model_param.classes * 10)
|
||||
self.classifier = nn.Linear(self.linear.shape, self.hparams.model_param.classes)
|
||||
self.linear = nn.Linear(reduce(mul, self.flatten.shape), self.hparams.model_param.classes * 10)
|
||||
self.classifier = nn.Linear(self.hparams.model_param.classes * 10, self.hparams.model_param.classes)
|
||||
self.softmax = nn.Softmax()
|
||||
|
||||
def forward(self, x):
|
||||
tensor = self.map_res_1(x)
|
||||
tensor = self.map_conv_0(x)
|
||||
tensor = self.map_res_1(tensor)
|
||||
tensor = self.map_conv_1(tensor)
|
||||
tensor = self.map_res_2(tensor)
|
||||
tensor = self.map_conv_2(tensor)
|
||||
|
||||
@@ -36,9 +36,9 @@ class ConvModule(nn.Module):
|
||||
self.stride = conv_stride
|
||||
|
||||
# Modules
|
||||
self.dropout = nn.Dropout2d(dropout) if dropout else False
|
||||
self.pooling = nn.MaxPool2d(pooling_size) if pooling_size else False
|
||||
self.norm = nn.BatchNorm2d(in_channels, eps=1e-04, affine=False) if use_norm else False
|
||||
self.dropout = nn.Dropout2d(dropout) if dropout else lambda x: x
|
||||
self.pooling = nn.MaxPool2d(pooling_size) if pooling_size else lambda x: x
|
||||
self.norm = nn.BatchNorm2d(in_channels, eps=1e-04, affine=False) if use_norm else lambda x: x
|
||||
self.conv = conv_class(in_channels, conv_filters, conv_kernel, bias=use_bias,
|
||||
padding=self.padding, stride=self.stride
|
||||
)
|
||||
@@ -47,8 +47,8 @@ class ConvModule(nn.Module):
|
||||
x = self.norm(x) if self.norm else x
|
||||
|
||||
tensor = self.conv(x)
|
||||
tensor = self.dropout(tensor) if self.dropout else tensor
|
||||
tensor = self.pooling(tensor) if self.pooling else tensor
|
||||
tensor = self.dropout(tensor)
|
||||
tensor = self.pooling(tensor)
|
||||
tensor = self.activation(tensor)
|
||||
return tensor
|
||||
|
||||
@@ -72,23 +72,23 @@ class DeConvModule(nn.Module):
|
||||
self.in_shape = in_shape
|
||||
self.conv_filters = conv_filters
|
||||
|
||||
self.autopad = AutoPad() if autopad else False
|
||||
self.interpolation = Interpolate(scale_factor=interpolation_scale) if interpolation_scale else False
|
||||
self.norm = nn.BatchNorm2d(in_channels, eps=1e-04, affine=False) if normalize else False
|
||||
self.dropout = nn.Dropout2d(dropout) if dropout else False
|
||||
self.autopad = AutoPad() if autopad else lambda x: x
|
||||
self.interpolation = Interpolate(scale_factor=interpolation_scale) if interpolation_scale else lambda x: x
|
||||
self.norm = nn.BatchNorm2d(in_channels, eps=1e-04, affine=False) if normalize else lambda x: x
|
||||
self.dropout = nn.Dropout2d(dropout) if dropout else lambda x: x
|
||||
self.de_conv = nn.ConvTranspose2d(in_channels, self.conv_filters, conv_kernel, bias=use_bias,
|
||||
padding=self.padding, stride=self.stride)
|
||||
|
||||
self.activation = activation() if activation else None
|
||||
self.activation = activation() if activation else lambda x: x
|
||||
|
||||
def forward(self, x):
|
||||
x = self.norm(x) if self.norm else x
|
||||
x = self.dropout(x) if self.dropout else x
|
||||
x = self.autopad(x) if self.autopad else x
|
||||
x = self.interpolation(x) if self.interpolation else x
|
||||
x = self.norm(x)
|
||||
x = self.dropout(x)
|
||||
x = self.autopad(x)
|
||||
x = self.interpolation(x)
|
||||
|
||||
tensor = self.de_conv(x)
|
||||
tensor = self.activation(tensor) if self.activation else tensor
|
||||
tensor = self.activation(tensor)
|
||||
return tensor
|
||||
|
||||
|
||||
@@ -100,12 +100,13 @@ class ResidualModule(nn.Module):
|
||||
output = self(x)
|
||||
return output.shape[1:]
|
||||
|
||||
def __init__(self, in_shape, module_class, n, **module_paramters):
|
||||
def __init__(self, in_shape, module_class, n, activation=None, **module_paramters):
|
||||
assert n >= 1
|
||||
super(ResidualModule, self).__init__()
|
||||
self.in_shape = in_shape
|
||||
module_paramters.update(in_shape=in_shape)
|
||||
self.residual_block = [module_class(**module_paramters) for x in range(n)]
|
||||
self.activation = activation() if activation else lambda x: x
|
||||
self.residual_block = [module_class(**module_paramters) for _ in range(n)]
|
||||
assert self.in_shape == self.shape, f'The in_shape: {self.in_shape} - must match the out_shape: {self.shape}.'
|
||||
|
||||
def forward(self, x):
|
||||
@@ -114,6 +115,7 @@ class ResidualModule(nn.Module):
|
||||
|
||||
# noinspection PyUnboundLocalVariable
|
||||
tensor = tensor + x
|
||||
tensor = self.activation(tensor)
|
||||
return tensor
|
||||
|
||||
|
||||
|
||||
@@ -123,6 +123,10 @@ class LightningBaseModule(pl.LightningModule, ABC):
|
||||
batch_size=self.hparams.data_param.batchsize,
|
||||
num_workers=self.hparams.data_param.worker)
|
||||
|
||||
@property
|
||||
def data_len(self):
|
||||
return len(self.dataset.train_dataset)
|
||||
|
||||
def configure_optimizers(self):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
import shelve
|
||||
from pathlib import Path
|
||||
from collections import UserDict
|
||||
|
||||
|
||||
import copy
|
||||
from math import sqrt
|
||||
@@ -46,6 +44,10 @@ class Map(object):
|
||||
return self.map_array
|
||||
|
||||
def __init__(self, name='', array_like_map_representation=None):
|
||||
if array_like_map_representation is not None:
|
||||
if array_like_map_representation.ndim == 2:
|
||||
array_like_map_representation = np.expand_dims(array_like_map_representation, axis=0)
|
||||
assert array_like_map_representation.ndim == 3
|
||||
self.map_array: np.ndarray = array_like_map_representation
|
||||
self.name = name
|
||||
pass
|
||||
@@ -63,22 +65,19 @@ class Map(object):
|
||||
# Check pixels for their color (determine if walkable)
|
||||
for idx, value in np.ndenumerate(self.map_array):
|
||||
if value == self.white:
|
||||
try:
|
||||
y, x = idx
|
||||
except ValueError:
|
||||
y, x, channels = idx
|
||||
idx = (y, x)
|
||||
# IF walkable, add node
|
||||
graph.add_node((y, x), count=0)
|
||||
graph.add_node(idx, count=0)
|
||||
# Fully connect to all surrounding neighbors
|
||||
for n, (xdif, ydif, weight) in enumerate(neighbors):
|
||||
# Differentiate between 8 and 4 neighbors
|
||||
if not full_neighbors and n >= 2:
|
||||
break
|
||||
|
||||
query_node = (y + ydif, x + xdif)
|
||||
# ToDO: make this explicite and less ugly
|
||||
query_node = idx[:1] + (idx[1] + ydif,) + (idx[2] + xdif,)
|
||||
if graph.has_node(query_node):
|
||||
graph.add_edge(idx, query_node, weight=weight)
|
||||
|
||||
return graph
|
||||
|
||||
@classmethod
|
||||
@@ -87,7 +86,7 @@ class Map(object):
|
||||
# Turn the image to single Channel Greyscale
|
||||
if image.mode != 'L':
|
||||
image = image.convert('L')
|
||||
map_array = np.array(image)
|
||||
map_array = np.expand_dims(np.array(image), axis=0)
|
||||
return cls(name=imagepath.name, array_like_map_representation=map_array)
|
||||
|
||||
def simple_trajectory_between(self, start, dest):
|
||||
|
||||
@@ -2,8 +2,9 @@ from math import atan2
|
||||
from typing import List, Tuple, Union
|
||||
|
||||
from matplotlib import pyplot as plt
|
||||
from lib.objects import variables as V
|
||||
from lib import variables as V
|
||||
|
||||
import numpy as np
|
||||
|
||||
class Trajectory(object):
|
||||
|
||||
@@ -31,6 +32,10 @@ class Trajectory(object):
|
||||
def as_paired_list(self):
|
||||
return list(zip(self.vertices[:-1], self.vertices[1:]))
|
||||
|
||||
@property
|
||||
def np_vertices(self):
|
||||
return [np.array(vertice) for vertice in self.vertices]
|
||||
|
||||
def __init__(self, vertices: Union[List[Tuple[int]], None] = None):
|
||||
assert any((isinstance(vertices, list), vertices is None))
|
||||
if vertices is not None:
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
from pathlib import Path
|
||||
_ROOT = Path('..')
|
||||
|
||||
HOMOTOPIC = 0
|
||||
ALTERNATIVE = 1
|
||||
|
||||
_key_1 = 'eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vdWkubmVwdHVuZS5haSIsImFwaV91cmwiOiJodHRwczovL3VpLm'
|
||||
_key_2 = '5lcHR1bmUuYWkiLCJhcGlfa2V5IjoiZmI0OGMzNzUtOTg1NS00Yzg2LThjMzYtMWFiYjUwMDUyMjVlIn0='
|
||||
NEPTUNE_KEY = _key_1 + _key_2
|
||||
@@ -5,6 +5,7 @@ from collections import defaultdict
|
||||
from configparser import ConfigParser
|
||||
from pathlib import Path
|
||||
|
||||
from lib.models.homotopy_classification.cnn_based import ConvHomDetector
|
||||
from lib.utils.model_io import ModelParameters
|
||||
|
||||
|
||||
@@ -24,6 +25,15 @@ class Config(ConfigParser):
|
||||
# for section in self.default_sections:
|
||||
# self.__setattr__(section, property(lambda x :x._get_namespace_for_section(section))
|
||||
|
||||
@property
|
||||
def model_class(self):
|
||||
model_dict = dict(classifier_cnn=ConvHomDetector)
|
||||
try:
|
||||
return model_dict[self.get('model', 'type')]
|
||||
except KeyError as e:
|
||||
raise KeyError(rf'The model alias you provided ("{self.get("model", "type")}") does not exist! \n'
|
||||
f'Try one of these:\n{list(model_dict.keys())}')
|
||||
|
||||
@property
|
||||
def main(self):
|
||||
return self._get_namespace_for_section('main')
|
||||
|
||||
@@ -16,6 +16,12 @@ class Logger(LightningLoggerBase):
|
||||
else:
|
||||
return self.neptunelogger.experiment
|
||||
|
||||
@property
|
||||
def log_dir(self):
|
||||
if self.debug:
|
||||
return Path(self.outpath)
|
||||
return Path(self.experiment.log_dir).parent
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.config.model.type
|
||||
@@ -50,10 +56,9 @@ class Logger(LightningLoggerBase):
|
||||
self.debug = debug
|
||||
self.config = config
|
||||
self._testtube_kwargs = dict(save_dir=self.outpath, version=self.version, name=self.name)
|
||||
self._neptune_kwargs = dict(offline_mode= self.debug,
|
||||
self._neptune_kwargs = dict(offline_mode=self.debug,
|
||||
api_key=self.config.project.neptune_key,
|
||||
project_name=self.project_name,
|
||||
name=self.name,
|
||||
upload_source_files=list())
|
||||
self.neptunelogger = NeptuneLogger(**self._neptune_kwargs)
|
||||
self.testtubelogger = TestTubeLogger(**self._testtube_kwargs)
|
||||
@@ -67,3 +72,6 @@ class Logger(LightningLoggerBase):
|
||||
self.neptunelogger.log_metrics(metrics, step_num)
|
||||
self.testtubelogger.log_metrics(metrics, step_num)
|
||||
pass
|
||||
|
||||
def log_config_as_ini(self):
|
||||
self.config.write(self.log_dir)
|
||||
|
||||
5
lib/variables.py
Normal file
5
lib/variables.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from pathlib import Path
|
||||
_ROOT = Path('..')
|
||||
|
||||
HOMOTOPIC = 0
|
||||
ALTERNATIVE = 1
|
||||
Reference in New Issue
Block a user