CNN Model Body

This commit is contained in:
Steffen Illium
2020-02-16 21:00:07 +01:00
parent 1ce8d5993b
commit 2e60b19fa6
9 changed files with 412 additions and 318 deletions

View File

@ -2,6 +2,8 @@
<dictionary name="steffen"> <dictionary name="steffen">
<words> <words>
<w>conv</w> <w>conv</w>
<w>homotopic</w>
<w>hyperparamter</w>
<w>numlayers</w> <w>numlayers</w>
</words> </words>
</dictionary> </dictionary>

15
.idea/webResources.xml generated Normal file
View File

@ -0,0 +1,15 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="WebResourcesPaths">
<contentEntries>
<entry url="file://$PROJECT_DIR$">
<entryData>
<resourceRoots>
<path value="file://$PROJECT_DIR$/res" />
<path value="file://$PROJECT_DIR$/data" />
</resourceRoots>
</entryData>
</entry>
</contentEntries>
</component>
</project>

View File

@ -0,0 +1,21 @@
from PIL import ImageDraw
from PIL import Image
import numpy as np
def are_homotopic(map_array, trajectory, other_trajectory):
polyline = trajectory.vertices.copy()
polyline.extend(reversed(other_trajectory.vertices))
height, width = map_array.shape
img = Image.new('L', (height, width), 0)
ImageDraw.Draw(img).polygon(polyline, outline=1, fill=1)
a = (np.array(img) * map_array).sum()
if a >= 1:
return False
else:
return True

View File

@ -1,125 +1,132 @@
from pathlib import Path from pathlib import Path
import copy import copy
from math import sqrt from math import sqrt
import numpy as np from random import choice
from PIL import Image, ImageDraw import numpy as np
import networkx as nx
from matplotlib import pyplot as plt from PIL import Image, ImageDraw
import networkx as nx
from lib.objects.trajectory import Trajectory from matplotlib import pyplot as plt
from lib.objects.trajectory import Trajectory
class Map(object):
white = [1, 255] class Map(object):
black = [0]
# This setting is for Img mode "L" aka GreyScale Image; values: 0-255
def __copy__(self): white = 255
return copy.deepcopy(self) black = 0
@property def __copy__(self):
def shape(self): return copy.deepcopy(self)
return self.map_array.shape
@property
@property def shape(self):
def width(self): return self.map_array.shape
return self.shape[0]
@property
@property def width(self):
def height(self): return self.shape[0]
return self.shape[1]
@property
@property def height(self):
def as_graph(self): return self.shape[1]
return self._G
@property
@property def as_graph(self):
def as_array(self): return self._G
return self.map_array
@property
def __init__(self, name='', array_like_map_representation=None): def as_array(self):
self.map_array: np.ndarray = array_like_map_representation return self.map_array
self.name = name
pass def __init__(self, name='', array_like_map_representation=None):
self.map_array: np.ndarray = array_like_map_representation
def __setattr__(self, key, value): self.name = name
super(Map, self).__setattr__(key, value) pass
if key == 'map_array' and self.map_array is not None:
self._G = self._build_graph() def __setattr__(self, key, value):
super(Map, self).__setattr__(key, value)
def _build_graph(self, full_neighbors=True): if key == 'map_array' and self.map_array is not None:
graph = nx.Graph() self._G = self._build_graph()
# Do checks in order: up - left - upperLeft - lowerLeft
neighbors = [(0, -1, 1), (-1, 0, 1), (-1, -1, sqrt(2)), (-1, 1, sqrt(2))] def _build_graph(self, full_neighbors=True):
graph = nx.Graph()
# Check pixels for their color (determine if walkable) # Do checks in order: up - left - upperLeft - lowerLeft
for idx, value in np.ndenumerate(self.map_array): neighbors = [(0, -1, 1), (-1, 0, 1), (-1, -1, sqrt(2)), (-1, 1, sqrt(2))]
if value in self.white:
y, x = idx # Check pixels for their color (determine if walkable)
# IF walkable, add node for idx, value in np.ndenumerate(self.map_array):
graph.add_node((y, x), count=0) if value == self.white:
# Fully connect to all surrounding neighbors try:
for n, (xdif, ydif, weight) in enumerate(neighbors): y, x = idx
# Differentiate between 8 and 4 neighbors except ValueError:
if not full_neighbors and n >= 2: y, x, channels = idx
break idx = (y, x)
# IF walkable, add node
query_node = (y + ydif, x + xdif) graph.add_node((y, x), count=0)
if graph.has_node(query_node): # Fully connect to all surrounding neighbors
graph.add_edge(idx, query_node, weight=weight) for n, (xdif, ydif, weight) in enumerate(neighbors):
return graph # Differentiate between 8 and 4 neighbors
if not full_neighbors and n >= 2:
@classmethod break
def from_image(cls, imagepath: Path):
with Image.open(imagepath) as image: query_node = (y + ydif, x + xdif)
return cls(name=imagepath.name, array_like_map_representation=np.array(image)) if graph.has_node(query_node):
graph.add_edge(idx, query_node, weight=weight)
def simple_trajectory_between(self, start, dest): return graph
vertices = list(nx.shortest_path(self._G, start, dest))
trajectory = Trajectory(vertices) @classmethod
return trajectory def from_image(cls, imagepath: Path):
with Image.open(imagepath) as image:
def get_valid_position(self): # Turn the image to single Channel Greyscale
not_found, valid_position = True, (-9999, -9999) if image.mode != 'L':
while not_found: image = image.convert('L')
valid_position = int(np.random.choice(self.height, 1)), int(np.random.choice(self.width, 1)) map_array = np.array(image)
if self._G.has_node(valid_position): return cls(name=imagepath.name, array_like_map_representation=map_array)
not_found = False
pass def simple_trajectory_between(self, start, dest):
return valid_position vertices = list(nx.shortest_path(self._G, start, dest))
trajectory = Trajectory(vertices)
def get_trajectory_from_vertices(self, *args): return trajectory
coords = list()
for start, dest in zip(args[:-1], args[1:]): def get_valid_position(self):
coords.extend(nx.shortest_path(self._G, start, dest)) valid_position = choice(list(self._G.nodes))
return Trajectory(coords) return valid_position
def get_random_trajectory(self): def get_trajectory_from_vertices(self, *args):
start = self.get_valid_position() coords = list()
dest = self.get_valid_position() for start, dest in zip(args[:-1], args[1:]):
return self.simple_trajectory_between(start, dest) coords.extend(nx.shortest_path(self._G, start, dest))
return Trajectory(coords)
def are_homotopic(self, trajectory, other_trajectory):
if not all(isinstance(x, Trajectory) for x in [trajectory, other_trajectory]): def get_random_trajectory(self):
raise TypeError start = self.get_valid_position()
polyline = trajectory.vertices.copy() dest = self.get_valid_position()
polyline.extend(reversed(other_trajectory.vertices)) return self.simple_trajectory_between(start, dest)
img = Image.new('L', (self.height, self.width), 0) def are_homotopic(self, trajectory, other_trajectory):
ImageDraw.Draw(img).polygon(polyline, outline=1, fill=1) if not all(isinstance(x, Trajectory) for x in [trajectory, other_trajectory]):
raise TypeError
a = (np.array(img) * self.map_array).sum() polyline = trajectory.vertices.copy()
if a >= 1: polyline.extend(reversed(other_trajectory.vertices))
return False
else: img = Image.new('L', (self.height, self.width), 0)
return True draw = ImageDraw.Draw(img)
draw.polygon(polyline, outline=255, fill=255)
def draw(self):
fig, ax = plt.gcf(), plt.gca() a = (np.array(img) * np.where(self.map_array == self.white, 0, 1)).sum()
# The standard colormaps also all have reversed versions. if a >= 1:
# They have the same names with _r tacked on to the end. return False
# https: // matplotlib.org / api / pyplot_summary.html?highlight = colormaps else:
img = ax.imshow(self.as_array, cmap='Greys_r') return True
return dict(img=img, fig=fig, ax=ax)
def draw(self):
fig, ax = plt.gcf(), plt.gca()
# The standard colormaps also all have reversed versions.
# They have the same names with _r tacked on to the end.
# https: // matplotlib.org / api / pyplot_summary.html?highlight = colormaps
img = ax.imshow(self.as_array, cmap='Greys_r')
return dict(img=img, fig=fig, ax=ax)

View File

@ -1,121 +1,124 @@
import multiprocessing as mp import multiprocessing as mp
import pickle import pickle
import shelve import shelve
from collections import defaultdict from collections import defaultdict
from pathlib import Path from pathlib import Path
from typing import Union from typing import Union
from tqdm import trange from tqdm import trange
from lib.objects.map import Map from lib.objects.map import Map
from lib.utils.parallel import run_n_in_parallel
class Generator: class Generator:
possible_modes = ['one_patching'] possible_modes = ['one_patching']
def __init__(self, data_root, map_obj, binary=True): def __init__(self, data_root, map_obj, binary=True):
self.binary: bool = binary self.binary: bool = binary
self.map: Map = map_obj self.map: Map = map_obj
self.data_root = Path(data_root) self.data_root = Path(data_root)
def generate_n_trajectories_m_alternatives(self, n, m, dataset_name='', **kwargs): def generate_n_trajectories_m_alternatives(self, n, m, dataset_name='', **kwargs):
trajectories_with_alternatives = list() trajectories_with_alternatives = list()
for _ in trange(n, desc='Processing Trajectories'): for _ in trange(n, desc='Processing Trajectories'):
trajectory = self.map.get_random_trajectory() trajectory = self.map.get_random_trajectory()
alternatives, labels = self.generate_n_alternatives(trajectory, m, dataset_name=dataset_name, **kwargs) alternatives, labels = self.generate_n_alternatives(trajectory, m, dataset_name=dataset_name, **kwargs)
trajectories_with_alternatives.append(dict(trajectory=trajectory, alternatives=alternatives, labels=labels)) if not alternatives or labels:
return trajectories_with_alternatives continue
else:
def generate_alternatives(self, trajectory, output: Union[mp. trajectories_with_alternatives.append(
Queue, None] = None, mode='one_patching'): dict(trajectory=trajectory, alternatives=alternatives, labels=labels)
start, dest = trajectory.endpoints )
if mode == 'one_patching': return trajectories_with_alternatives
patch = self.map.get_valid_position()
alternative = self.map.get_trajectory_from_vertices(start, patch, dest) def generate_alternatives(self, trajectory, output: Union[mp.
else: Queue, None] = None, mode='one_patching'):
raise RuntimeError(f'mode checking went wrong...') start, dest = trajectory.endpoints
if mode == 'one_patching':
if output: patch = self.map.get_valid_position()
output.put(alternative) alternative = self.map.get_trajectory_from_vertices(start, patch, dest)
return alternative else:
raise RuntimeError(f'mode checking went wrong...')
def generate_n_alternatives(self, trajectory, n, dataset_name: Union[str, Path] = '',
mode='one_patching', equal_samples=True): if output:
assert mode in self.possible_modes, f'Parameter "mode" must be either {self.possible_modes}, but was {mode}.' output.put(alternative)
# Define an output queue return alternative
output = mp.Queue()
# Setup a list of processes that we want to run def generate_n_alternatives(self, trajectory, n, dataset_name: Union[str, Path] = '',
processes = [mp.Process(target=self.generate_alternatives, mode='one_patching', equal_samples=True, binary_check=True):
kwargs=dict(trajectory=trajectory, output=output, mode=mode)) assert mode in self.possible_modes, f'Parameter "mode" must be either {self.possible_modes}, but was {mode}.'
for _ in range(n)] # Define an output queue
# Run processes #output = mp.Queue()
for p in processes:
p.start() results = run_n_in_parallel(self.generate_alternatives, n, trajectory=trajectory, mode=mode) # , output=output)
# Exit the completed processes
for p in processes: # Get process results from the output queue
p.join() #results = [output.get() for _ in range(n)]
# Get process results from the output queue
results = [output.get() for _ in processes] # label per homotopic class
homotopy_classes = defaultdict(list)
# label per homotopic class homotopy_classes[0].append(trajectory)
homotopy_classes = defaultdict(list) for i in range(len(results)):
homotopy_classes[0].append(trajectory) alternative = results[i]
for i in range(len(results)): class_not_found = True
alternative = results[i] # check for homotopy class
class_not_found, label = True, None for label in homotopy_classes.keys():
# check for homotopy class if self.map.are_homotopic(homotopy_classes[label][0], alternative):
for label in homotopy_classes.keys(): homotopy_classes[label].append(alternative)
if self.map.are_homotopic(homotopy_classes[label][0], alternative): class_not_found = False
homotopy_classes[label].append(alternative) break
class_not_found = False if class_not_found:
break label = 1 if binary_check else len(homotopy_classes)
if class_not_found: homotopy_classes[label].append(alternative)
label = len(homotopy_classes)
homotopy_classes[label].append(alternative) # There should be as much homotopic samples as non-homotopic samples
if equal_samples:
# There should be as much homotopic samples as non-homotopic samples homotopy_classes = self._remove_unequal(homotopy_classes)
if equal_samples: if not homotopy_classes:
homotopy_classes = self._remove_unequal(homotopy_classes) return None, None
# Compose lists of alternatives with labels # Compose lists of alternatives with labels
alternatives, labels = list(), list() alternatives, labels = list(), list()
for key in homotopy_classes.keys(): for key in homotopy_classes.keys():
alternatives.extend([homotopy_classes[key]]) alternatives.extend(homotopy_classes[key])
labels.extend([key] * len(homotopy_classes[key])) labels.extend([key] * len(homotopy_classes[key]))
# Write to disk # Write to disk
if dataset_name: if dataset_name:
self.write_to_disk(dataset_name, trajectory, alternatives, labels) self.write_to_disk(dataset_name, trajectory, alternatives, labels)
# Return # Return
return alternatives, labels return alternatives, labels
def write_to_disk(self, filepath, trajectory, alternatives, labels): def write_to_disk(self, filepath, trajectory, alternatives, labels):
dataset_name = filepath if filepath.endswith('.pik') else f'{filepath}.pik' dataset_name = filepath if filepath.endswith('.pik') else f'{filepath}.pik'
self.data_root.mkdir(exist_ok=True, parents=True) self.data_root.mkdir(exist_ok=True, parents=True)
with shelve.open(str(self.data_root / dataset_name), protocol=pickle.HIGHEST_PROTOCOL) as f: with shelve.open(str(self.data_root / dataset_name), protocol=pickle.HIGHEST_PROTOCOL) as f:
new_key = len(f) new_key = len(f)
f[f'trajectory_{new_key}'] = dict(alternatives=alternatives, f[f'trajectory_{new_key}'] = dict(alternatives=alternatives,
trajectory=trajectory, trajectory=trajectory,
labels=labels) labels=labels)
if 'map' not in f: if 'map' not in f:
f['map'] = dict(map=self.map, name=f'map_{self.map.name}') f['map'] = dict(map=self.map, name=f'map_{self.map.name}')
@staticmethod @staticmethod
def _remove_unequal(hom_dict): def _remove_unequal(hom_dict):
hom_dict = hom_dict.copy() # We argue, that there will always be more non-homotopic routes than homotopic alternatives.
# TODO: Otherwise introduce a second condition / loop
counter = len(hom_dict) hom_dict = hom_dict.copy()
while sum([len(hom_dict[class_id]) for class_id in range(len(hom_dict))]) > len(hom_dict[0]): if len(hom_dict[0]) <= 1:
if counter > len(hom_dict): return None
counter = len(hom_dict) counter = len(hom_dict)
if counter in hom_dict: while sum([len(hom_dict[class_id]) for class_id in range(1, len(hom_dict))]) > len(hom_dict[0]):
if len(hom_dict[counter]) == 0: if counter == 0:
del hom_dict[counter] counter = len(hom_dict)
else: if counter in hom_dict:
del hom_dict[counter][-1] if len(hom_dict[counter]) == 0:
counter -= 1 del hom_dict[counter]
return hom_dict else:
del hom_dict[counter][-1]
counter -= 1
return hom_dict

View File

@ -5,6 +5,8 @@ from collections import defaultdict
from configparser import ConfigParser from configparser import ConfigParser
from pathlib import Path from pathlib import Path
from lib.utils.model_io import ModelParameters
def is_jsonable(x): def is_jsonable(x):
import json import json
@ -43,6 +45,10 @@ class Config(ConfigParser):
return self._get_namespace_for_section('project') return self._get_namespace_for_section('project')
################################################### ###################################################
@property
def model_paramters(self):
return ModelParameters(self.model, self.train, self.data)
@property @property
def tags(self, ): def tags(self, ):
return [f'{key}: {val}' for key, val in self.serializable.items()] return [f'{key}: {val}' for key, val in self.serializable.items()]

View File

@ -50,7 +50,7 @@ class Logger(LightningLoggerBase):
self.debug = debug self.debug = debug
self.config = config self.config = config
self._testtube_kwargs = dict(save_dir=self.outpath, version=self.version, name=self.name) self._testtube_kwargs = dict(save_dir=self.outpath, version=self.version, name=self.name)
self._neptune_kwargs = dict(offline_mode=not self.debug, self._neptune_kwargs = dict(offline_mode= self.debug,
api_key=self.config.project.neptune_key, api_key=self.config.project.neptune_key,
project_name=self.project_name, project_name=self.project_name,
name=self.name, name=self.name,

23
lib/utils/parallel.py Normal file
View File

@ -0,0 +1,23 @@
import multiprocessing as mp
import time
def run_n_in_parallel(f, n, **kwargs):
output = mp.Queue()
kwargs.update(output=output)
# Setup a list of processes that we want to run
processes = [mp.Process(target=f, kwargs=kwargs) for _ in range(n)]
# Run processes
results = []
for p in processes:
p.start()
while len(results) != n:
time.sleep(1)
# Get process results from the output queue
results.extend([output.get() for _ in processes])
# Exit the completed processes
for p in processes:
p.join()
return results

159
main.py
View File

@ -1,71 +1,88 @@
# Imports # Imports
# ============================================================================= # =============================================================================
import os import os
from distutils.util import strtobool from distutils.util import strtobool
from pathlib import Path from pathlib import Path
from argparse import ArgumentParser from argparse import ArgumentParser
import warnings import warnings
from pytorch_lightning import Trainer from pytorch_lightning import Trainer
from torch.utils.data import DataLoader from torch.utils.data import DataLoader
from dataset.dataset import TrajData from dataset.dataset import TrajData
from lib.utils.config import Config from lib.utils.config import Config
from lib.utils.logging import Logger from lib.utils.logging import Logger
warnings.filterwarnings('ignore', category=FutureWarning) warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=UserWarning) warnings.filterwarnings('ignore', category=UserWarning)
_ROOT = Path(__file__).parent _ROOT = Path(__file__).parent
# Paramter Configuration # Paramter Configuration
# ============================================================================= # =============================================================================
# Argument Parser # Argument Parser
main_arg_parser = ArgumentParser(description="parser for fast-neural-style") main_arg_parser = ArgumentParser(description="parser for fast-neural-style")
# Main Parameters # Main Parameters
main_arg_parser.add_argument("--main_debug", type=strtobool, default=False, help="") main_arg_parser.add_argument("--main_debug", type=strtobool, default=False, help="")
main_arg_parser.add_argument("--main_eval", type=strtobool, default=False, help="") main_arg_parser.add_argument("--main_eval", type=strtobool, default=False, help="")
main_arg_parser.add_argument("--main_seed", type=int, default=69, help="") main_arg_parser.add_argument("--main_seed", type=int, default=69, help="")
# Data Parameters # Data Parameters
main_arg_parser.add_argument("--data_worker", type=int, default=10, help="") main_arg_parser.add_argument("--data_worker", type=int, default=10, help="")
main_arg_parser.add_argument("--data_batchsize", type=int, default=100, help="") main_arg_parser.add_argument("--data_batchsize", type=int, default=100, help="")
main_arg_parser.add_argument("--data_root", type=str, default='../data/rpoot', help="") main_arg_parser.add_argument("--data_root", type=str, default='../data/rpoot', help="")
# Transformations # Transformations
main_arg_parser.add_argument("--transformations_to_tensor", type=strtobool, default=False, help="") main_arg_parser.add_argument("--transformations_to_tensor", type=strtobool, default=False, help="")
# Transformations # Transformations
main_arg_parser.add_argument("--train_outpath", type=str, default="output", help="") main_arg_parser.add_argument("--train_outpath", type=str, default="output", help="")
main_arg_parser.add_argument("--train_version", type=strtobool, required=False, help="") main_arg_parser.add_argument("--train_version", type=strtobool, required=False, help="")
main_arg_parser.add_argument("--train_epochs", type=int, default=10, help="") main_arg_parser.add_argument("--train_epochs", type=int, default=10, help="")
main_arg_parser.add_argument("--train_batch_size", type=int, default=512, help="") main_arg_parser.add_argument("--train_batch_size", type=int, default=512, help="")
main_arg_parser.add_argument("--train_lr", type=float, default=0.002, help="") main_arg_parser.add_argument("--train_lr", type=float, default=0.002, help="")
# Model # Model
main_arg_parser.add_argument("--model_type", type=str, default="LeNetAE", help="") main_arg_parser.add_argument("--model_type", type=str, default="LeNetAE", help="")
main_arg_parser.add_argument("--model_activation", type=str, default="relu", help="") main_arg_parser.add_argument("--model_activation", type=str, default="relu", help="")
main_arg_parser.add_argument("--model_filters", type=str, default="[32, 16, 4]", help="") main_arg_parser.add_argument("--model_filters", type=str, default="[32, 16, 4]", help="")
main_arg_parser.add_argument("--model_use_bias", type=strtobool, default=True, help="") main_arg_parser.add_argument("--model_use_bias", type=strtobool, default=True, help="")
main_arg_parser.add_argument("--model_use_norm", type=strtobool, default=True, help="") main_arg_parser.add_argument("--model_use_norm", type=strtobool, default=True, help="")
main_arg_parser.add_argument("--model_dropout", type=float, default=0.00, help="") main_arg_parser.add_argument("--model_dropout", type=float, default=0.00, help="")
# Project # Project
main_arg_parser.add_argument("--project_name", type=str, default='traj-gen', help="") main_arg_parser.add_argument("--project_name", type=str, default='traj-gen', help="")
main_arg_parser.add_argument("--project_owner", type=str, default='si11ium', help="") main_arg_parser.add_argument("--project_owner", type=str, default='si11ium', help="")
main_arg_parser.add_argument("--project_neptune_key", type=str, default=os.getenv('NEPTUNE_KEY'), help="") main_arg_parser.add_argument("--project_neptune_key", type=str, default=os.getenv('NEPTUNE_KEY'), help="")
# Parse it # Parse it
args = main_arg_parser.parse_args() args = main_arg_parser.parse_args()
config = Config.read_namespace(args) config = Config.read_namespace(args)
# Trainer loading ################
# ============================================================================= # TESTING ONLY #
trainer = Trainer(logger=Logger(config, debug=True)) # =============================================================================
hparams = config.model_paramters
dataset = TrajData('data', mapname='tate', alternatives=100, trajectories=10000)
if __name__ == '__main__': dataloader = DataLoader(dataset=dataset.train_dataset, shuffle=True,
print(next(iter(train_dataloader))) batch_size=hparams.data_param.batchsize,
pass num_workers=hparams.data_param.worker)
# Logger
# =============================================================================
logger = Logger(config, debug=True)
# Trainer
# =============================================================================
trainer = Trainer(logger=logger)
# Model
# =============================================================================
model = None
if __name__ == '__main__':
next(iter(dataloader))
pass