From 2305c8e54a2054205f89460d2a35eb938c0da30c Mon Sep 17 00:00:00 2001 From: Steffen Illium Date: Fri, 13 Mar 2020 21:52:33 +0100 Subject: [PATCH] fig clf inserted and not resize on kld --- .gitignore | 1 + datasets/mnist.py | 29 ++ datasets/trajectory_dataset.py | 93 ++++--- lib/models/generators/cnn.py | 301 ++++++++------------- lib/models/generators/cnn_discriminated.py | 116 ++++++++ lib/objects/map.py | 2 +- lib/utils/config.py | 4 +- lib/utils/logging.py | 2 +- lib/utils/tools.py | 1 + lib/variables.py | 8 +- lib/visualization/generator_eval.py | 109 ++++++-- main.py | 16 +- res/shapes/inverted_1.bmp | Bin 30054 -> 0 bytes res/shapes/inverted_10.bmp | Bin 30054 -> 0 bytes res/shapes/inverted_2.bmp | Bin 30054 -> 0 bytes res/shapes/inverted_3.bmp | Bin 30054 -> 0 bytes res/shapes/inverted_4.bmp | Bin 30054 -> 0 bytes res/shapes/inverted_5.bmp | Bin 30054 -> 0 bytes res/shapes/inverted_6.bmp | Bin 30054 -> 0 bytes res/shapes/inverted_7.bmp | Bin 30054 -> 0 bytes res/shapes/inverted_8.bmp | Bin 30054 -> 0 bytes res/shapes/inverted_9.bmp | Bin 30054 -> 0 bytes res/shapes/shapes_1.bmp | Bin 1662 -> 0 bytes res/shapes/shapes_10.bmp | Bin 30138 -> 0 bytes res/shapes/shapes_2.bmp | Bin 1662 -> 0 bytes res/shapes/shapes_3.bmp | Bin 1662 -> 0 bytes res/shapes/shapes_3.png | Bin 831 -> 0 bytes res/shapes/shapes_4.bmp | Bin 1662 -> 0 bytes res/shapes/shapes_5.bmp | Bin 1662 -> 0 bytes res/shapes/shapes_6.bmp | Bin 1662 -> 0 bytes res/shapes/shapes_7.bmp | Bin 30054 -> 0 bytes res/shapes/shapes_8.bmp | Bin 30054 -> 0 bytes res/shapes/shapes_9.bmp | Bin 30054 -> 0 bytes 33 files changed, 403 insertions(+), 279 deletions(-) create mode 100644 datasets/mnist.py create mode 100644 lib/models/generators/cnn_discriminated.py delete mode 100644 res/shapes/inverted_1.bmp delete mode 100644 res/shapes/inverted_10.bmp delete mode 100644 res/shapes/inverted_2.bmp delete mode 100644 res/shapes/inverted_3.bmp delete mode 100644 res/shapes/inverted_4.bmp delete mode 100644 res/shapes/inverted_5.bmp delete mode 100644 res/shapes/inverted_6.bmp delete mode 100644 res/shapes/inverted_7.bmp delete mode 100644 res/shapes/inverted_8.bmp delete mode 100644 res/shapes/inverted_9.bmp delete mode 100644 res/shapes/shapes_1.bmp delete mode 100644 res/shapes/shapes_10.bmp delete mode 100644 res/shapes/shapes_2.bmp delete mode 100644 res/shapes/shapes_3.bmp delete mode 100644 res/shapes/shapes_3.png delete mode 100644 res/shapes/shapes_4.bmp delete mode 100644 res/shapes/shapes_5.bmp delete mode 100644 res/shapes/shapes_6.bmp delete mode 100644 res/shapes/shapes_7.bmp delete mode 100644 res/shapes/shapes_8.bmp delete mode 100644 res/shapes/shapes_9.bmp diff --git a/.gitignore b/.gitignore index 04b95f7..473201b 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ # User-specific stuff .idea/** +res/** # CMake cmake-build-*/ diff --git a/datasets/mnist.py b/datasets/mnist.py new file mode 100644 index 0000000..66fe06f --- /dev/null +++ b/datasets/mnist.py @@ -0,0 +1,29 @@ +from torchvision.datasets import MNIST +import numpy as np + + +class MyMNIST(MNIST): + + @property + def map_shapes_max(self): + return np.asarray(self.test_dataset[0][0]).shape + + def __init__(self, *args, **kwargs): + super(MyMNIST, self).__init__('res', train=False, download=True) + pass + + def __getitem__(self, item): + image = super(MyMNIST, self).__getitem__(item) + return np.expand_dims(np.asarray(image[0]), axis=0).astype(np.float32), image[1] + + @property + def train_dataset(self): + return self.__class__('res', train=True, download=True) + + @property + def test_dataset(self): + return self.__class__('res', train=False, download=True) + + @property + def val_dataset(self): + return self.__class__('res', train=False, download=True) diff --git a/datasets/trajectory_dataset.py b/datasets/trajectory_dataset.py index 2b62a24..f48113c 100644 --- a/datasets/trajectory_dataset.py +++ b/datasets/trajectory_dataset.py @@ -1,6 +1,9 @@ import shelve +from collections import defaultdict from pathlib import Path -from typing import Union, List +from typing import Union + +from torchvision.transforms import Normalize import multiprocessing as mp @@ -24,16 +27,17 @@ class TrajDataShelve(Dataset): return self[0][0].shape def __init__(self, file_path, **kwargs): + assert Path(file_path).exists() super(TrajDataShelve, self).__init__() self._mutex = mp.Lock() self.file_path = str(file_path) - def __len__(self): self._mutex.acquire() with shelve.open(self.file_path) as d: length = len(d) - self._mutex.release() + d.close() + self._mutex.release() return length def seed(self): @@ -43,12 +47,20 @@ class TrajDataShelve(Dataset): self._mutex.acquire() with shelve.open(self.file_path) as d: sample = d[str(item)] - self._mutex.release() + d.close() + self._mutex.release() return sample class TrajDataset(Dataset): + @property + def _last_label_init(self): + d = defaultdict(lambda: -1) + d['generator_hom_all_in_map'] = V.ALTERNATIVE + d['generator_alt_all_in_map'] = V.HOMOTOPIC + return d[self.mode] + @property def map_shape(self): return self.map.as_array.shape @@ -57,17 +69,18 @@ class TrajDataset(Dataset): length=100000, mode='separated_arrays', embedding_size=None, preserve_equal_samples=False, **kwargs): super(TrajDataset, self).__init__() - assert mode.lower() in ['generator_all_in_map', 'generator_hom_all_in_map' - 'classifier_all_in_map'] - self.normalized = normalized + assert mode.lower() in ['generator_all_in_map', 'generator_hom_all_in_map', 'generator_alt_all_in_map', + 'ae_no_label_in_map', + 'generator_alt_no_label_in_map', 'classifier_all_in_map', 'vae_no_label_in_map'] + self.normalize = Normalize(0.5, 0.5) if normalized else lambda x: x self.preserve_equal_samples = preserve_equal_samples self.mode = mode self.mapname = mapname if mapname.endswith('.bmp') else f'{mapname}.bmp' self.maps_root = maps_root self._len = length - self.last_label = V.ALTERNATIVE if 'hom' in self.mode else choice([-1, V.ALTERNATIVE, V.HOMOTOPIC]) + self.last_label = self._last_label_init - self.map = Map(self.mapname).from_image(self.maps_root / self.mapname, embedding_size=embedding_size) + self.map = Map.from_image(self.maps_root / self.mapname, embedding_size=embedding_size) def __len__(self): return self._len @@ -82,6 +95,7 @@ class TrajDataset(Dataset): map_array = torch.as_tensor(self.map.as_array).float() return (map_array, trajectory_space), label + # Produce an alternative. while True: trajectory = self.map.get_random_trajectory() alternative = self.map.generate_alternative(trajectory) @@ -91,18 +105,19 @@ class TrajDataset(Dataset): else: break - self.last_label = label if self.mode != ['generator_hom_all_in_map'] else V.ALTERNATIVE - if self.mode.lower() in ['classifier_all_in_map', 'generator_all_in_map']: + self.last_label = label if self._last_label_init == V.ANY else self._last_label_init[self.mode] + if 'in_map' in self.mode.lower(): map_array = self.map.as_array trajectory = trajectory.draw_in_array(self.map_shape) alternative = alternative.draw_in_array(self.map_shape) label_as_array = np.full_like(map_array, label) - if self.normalized: - map_array = map_array / V.WHITE - trajectory = trajectory / V.WHITE - alternative = alternative / V.WHITE + if self.mode == 'generator_all_in_map': return np.concatenate((map_array, trajectory, label_as_array)), alternative + elif self.mode in ['vae_no_label_in_map', 'ae_no_label_in_map']: + return np.sum((map_array, trajectory, alternative), axis=0), 0 + elif self.mode in ['generator_alt_no_label_in_map', 'generator_hom_no_label_in_map']: + return np.concatenate((map_array, trajectory)), alternative elif self.mode == 'classifier_all_in_map': return np.concatenate((map_array, trajectory, alternative)), label @@ -119,13 +134,13 @@ class TrajDataset(Dataset): class TrajData(object): @property def map_shapes(self): - return [dataset.map_shape for dataset in self._train_dataset.datasets] + return [dataset.map_shape for dataset in self.train_dataset.datasets] @property def map_shapes_max(self): shapes = self.map_shapes shape_list = list(map(max, zip(*shapes))) - if '_all_in_map' in self.mode: + if '_all_in_map' in self.mode and not self.preprocessed: shape_list[0] += 2 return shape_list @@ -139,14 +154,13 @@ class TrajData(object): self.mode = mode self.maps_root = Path(map_root) self.length = length - self._test_dataset = self._load_datasets('train') - self._val_dataset = self._load_datasets('val') - self._train_dataset = self._load_datasets('test') + self.test_dataset = self._load_datasets('test') + self.val_dataset = self._load_datasets('val') + self.train_dataset = self._load_datasets('train') def _load_datasets(self, dataset_type=''): map_files = list(self.maps_root.glob('*.bmp')) - equal_split = int(self.length // len(map_files)) or 1 # find max image size among available maps: max_map_size = (1, ) + tuple(reversed(tuple(map(max, *[Image.open(map_file).size for map_file in map_files])))) @@ -156,10 +170,11 @@ class TrajData(object): preprocessed_map_names = [p.name for p in preprocessed_map_files] datasets = [] for map_file in map_files: - new_pik_name = f'{dataset_type}_{str(map_file.name)[:-3]}.pik' + equal_split = int(self.length // len(map_files)) or 5 + new_pik_name = f'{self.mode}_{map_file.name[:-4]}_{dataset_type}.pik' if dataset_type != 'train': - equal_split *= 0.01 - if not [f'{new_pik_name[:-3]}.bmp' in preprocessed_map_names]: + equal_split = max(int(equal_split * 0.01), 10) + if not new_pik_name in preprocessed_map_names: traj_dataset = TrajDataset(maps_root=self.maps_root, mapname=map_file.name, length=equal_split, mode=self.mode, embedding_size=max_map_size, normalized=self.normalized, preserve_equal_samples=True) @@ -168,6 +183,9 @@ class TrajData(object): dataset = TrajDataShelve(map_file.parent / new_pik_name) datasets.append(dataset) return ConcatDataset(datasets) + + # Set the equal split so that all maps are visited with the same frequency + equal_split = int(self.length // len(map_files)) or 5 return ConcatDataset([TrajDataset(maps_root=self.maps_root, mapname=map_file.name, length=equal_split, mode=self.mode, embedding_size=max_map_size, normalized=self.normalized, preserve_equal_samples=True) @@ -185,29 +203,14 @@ class TrajData(object): def dump_n(self, file_path, traj_dataset: TrajDataset, n=100000): assert str(file_path).endswith('.pik') - processes = mp.cpu_count() - 1 mutex = mp.Lock() - with mp.Pool(processes) as pool: - async_results = [pool.apply_async(traj_dataset.__getitem__, kwds=dict(item=i)) for i in range(n)] + for i in tqdm(range(n), total=n, desc=f'Generating {n} Samples'): + sample = traj_dataset[i] + mutex.acquire() + write_to_shelve(file_path, sample) + mutex.release() - for result_obj in tqdm(async_results, total=n, desc=f'Generating {n} Samples'): - sample = result_obj.get() - mutex.acquire() - write_to_shelve(file_path, sample) - mutex.release() - print(f'{n} samples sucessfully dumped to "{file_path}"!') - - @property - def train_dataset(self): - return self._train_dataset - - @property - def val_dataset(self): - return self._val_dataset - - @property - def test_dataset(self): - return self._test_dataset + print(f'{n} samples successfully dumped to "{file_path}"!') def get_datasets(self): return self._train_dataset, self._val_dataset, self._test_dataset diff --git a/lib/models/generators/cnn.py b/lib/models/generators/cnn.py index ac55354..076f1a6 100644 --- a/lib/models/generators/cnn.py +++ b/lib/models/generators/cnn.py @@ -1,19 +1,22 @@ -from random import choices, seed -import numpy as np - -import torch from functools import reduce from operator import mul +from random import choices, choice + +import torch + from torch import nn from torch.optim import Adam +from torchvision.datasets import MNIST +from datasets.mnist import MyMNIST from datasets.trajectory_dataset import TrajData -from lib.evaluation.classification import ROCEvaluation from lib.modules.blocks import ConvModule, ResidualModule, DeConvModule from lib.modules.utils import LightningBaseModule, Flatten import matplotlib.pyplot as plt +import lib.variables as V +from lib.visualization.generator_eval import GeneratorVisualizer class CNNRouteGeneratorModel(LightningBaseModule): @@ -24,48 +27,71 @@ class CNNRouteGeneratorModel(LightningBaseModule): return Adam(self.parameters(), lr=self.hparams.train_param.lr) def training_step(self, batch_xy, batch_nb, *args, **kwargs): - batch_x, alternative = batch_xy + batch_x, target = batch_xy generated_alternative, z, mu, logvar = self(batch_x) - element_wise_loss = self.criterion(generated_alternative, alternative) - # see Appendix B from VAE paper: - # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 - # https://arxiv.org/abs/1312.6114 - # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) + target = batch_x if 'ae' in self.hparams.data_param.mode else target + element_wise_loss = self.criterion(generated_alternative, target) - kld_loss = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) - # Dimensional Resizing TODO: Does This make sense? Sanity Check it! - # kld_loss /= reduce(mul, self.in_shape) - # kld_loss *= self.hparams.data_param.dataset_length / self.hparams.train_param.batch_size * 100 + if 'vae' in self.hparams.data_param.mode: + # see Appendix B from VAE paper: + # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 + # https://arxiv.org/abs/1312.6114 + # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) + kld_loss = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) + # Dimensional Resizing TODO: Does This make sense? Sanity Check it! + # kld_loss /= reduce(mul, self.in_shape) + # kld_loss *= self.hparams.data_param.dataset_length / self.hparams.train_param.batch_size - loss = kld_loss + element_wise_loss + loss = kld_loss + element_wise_loss + else: + loss = element_wise_loss + kld_loss = 0 return dict(loss=loss, log=dict(element_wise_loss=element_wise_loss, loss=loss, kld_loss=kld_loss)) def _test_val_step(self, batch_xy, batch_nb, *args): batch_x, _ = batch_xy - map_array = batch_x[:, 0].unsqueeze(1) - trajectory = batch_x[:, 1].unsqueeze(1) - labels = batch_x[:, 2].unsqueeze(1).max(dim=-1).values.max(-1).values + if 'vae' in self.hparams.data_param.mode: + z, mu, logvar = self.encode(batch_x) + else: + z = self.encode(batch_x) + mu, logvar = z, z - _, mu, _ = self.encode(batch_x) generated_alternative = self.generate(mu) - return dict(maps=map_array, trajectories=trajectory, batch_nb=batch_nb, labels=labels, - generated_alternative=generated_alternative, pred_label=-1) + return_dict = dict(input=batch_x, batch_nb=batch_nb, output=generated_alternative, z=z, mu=mu, logvar=logvar) + + if 'hom' in self.hparams.data_param.mode: + labels = torch.full((batch_x.shape[0], 1), V.HOMOTOPIC) + elif 'alt' in self.hparams.data_param.mode: + labels = torch.full((batch_x.shape[0], 1), V.ALTERNATIVE) + elif 'vae' in self.hparams.data_param.mode: + labels = torch.full((batch_x.shape[0], 1), V.ANY) + elif 'ae' in self.hparams.data_param.mode: + labels = torch.full((batch_x.shape[0], 1), V.ANY) + else: + labels = batch_x[:, 2].unsqueeze(1).max(dim=-1).values.max(-1).values + + return_dict.update(labels=self._move_to_model_device(labels)) + return return_dict def _test_val_epoch_end(self, outputs, test=False): - val_restul_dict = self.generate_random() + plt.close('all') - from lib.visualization.generator_eval import GeneratorVisualizer - g = GeneratorVisualizer(**val_restul_dict) - fig = g.draw() + g = GeneratorVisualizer(choice(outputs)) + fig = g.draw_io_bundle() self.logger.log_image(f'{self.name}_Output', fig, step=self.global_step) plt.clf() + fig = g.draw_latent() + self.logger.log_image(f'{self.name}_Latent', fig, step=self.global_step) + plt.clf() + return dict(epoch=self.current_epoch) def on_epoch_start(self): - self.dataset.seed(self.logger.version) + # self.dataset.seed(self.logger.version) # torch.random.manual_seed(self.logger.version) # np.random.seed(self.logger.version) + pass def validation_step(self, *args): return self._test_val_step(*args) @@ -82,19 +108,23 @@ class CNNRouteGeneratorModel(LightningBaseModule): def __init__(self, *params, issubclassed=False): super(CNNRouteGeneratorModel, self).__init__(*params) - if not issubclassed: + if False: # Dataset - self.dataset = TrajData(self.hparams.data_param.map_root, mode='generator_all_in_map', + self.dataset = TrajData(self.hparams.data_param.map_root, + mode=self.hparams.data_param.mode, preprocessed=self.hparams.data_param.use_preprocessed, length=self.hparams.data_param.dataset_length, normalized=True) - self.criterion = nn.MSELoss() + self.criterion = nn.MSELoss() + + self.dataset = MyMNIST() # Additional Attributes # ####################################################### self.in_shape = self.dataset.map_shapes_max self.use_res_net = self.hparams.model_param.use_res_net self.lat_dim = self.hparams.model_param.lat_dim - self.feature_dim = self.lat_dim * 10 + self.feature_dim = self.lat_dim + self.out_channels = 1 if 'generator' in self.hparams.data_param.mode else self.in_shape[0] ######################################################## # NN Nodes @@ -119,7 +149,7 @@ class CNNRouteGeneratorModel(LightningBaseModule): conv_filters=self.hparams.model_param.filters[1], use_norm=self.hparams.model_param.use_norm, use_bias=self.hparams.model_param.use_bias) - self.enc_conv_1b = ConvModule(self.enc_conv_1a.shape, conv_kernel=3, conv_stride=2, conv_padding=0, + self.enc_conv_1b = ConvModule(self.enc_conv_1a.shape, conv_kernel=3, conv_stride=1, conv_padding=0, conv_filters=self.hparams.model_param.filters[1], use_norm=self.hparams.model_param.use_norm, use_bias=self.hparams.model_param.use_bias) @@ -137,20 +167,8 @@ class CNNRouteGeneratorModel(LightningBaseModule): use_norm=self.hparams.model_param.use_norm, use_bias=self.hparams.model_param.use_bias) - self.enc_res_3 = ResidualModule(self.enc_conv_2b.shape, ConvModule, 2, conv_kernel=7, conv_stride=1, - conv_padding=3, conv_filters=self.hparams.model_param.filters[2], - use_norm=self.hparams.model_param.use_norm, - use_bias=self.hparams.model_param.use_bias) - self.enc_conv_3a = ConvModule(self.enc_res_3.shape, conv_kernel=7, conv_stride=1, conv_padding=0, - conv_filters=self.hparams.model_param.filters[2], - use_norm=self.hparams.model_param.use_norm, - use_bias=self.hparams.model_param.use_bias) - self.enc_conv_3b = ConvModule(self.enc_conv_3a.shape, conv_kernel=7, conv_stride=1, conv_padding=0, - conv_filters=self.hparams.model_param.filters[2], - use_norm=self.hparams.model_param.use_norm, - use_bias=self.hparams.model_param.use_bias) - - self.enc_flat = Flatten(self.enc_conv_3b.shape) + last_conv_shape = self.enc_conv_2b.shape + self.enc_flat = Flatten(last_conv_shape) self.enc_lin_1 = nn.Linear(self.enc_flat.shape, self.feature_dim) # @@ -160,46 +178,43 @@ class CNNRouteGeneratorModel(LightningBaseModule): # # Variational Bottleneck - self.mu = nn.Linear(self.feature_dim, self.lat_dim) - self.logvar = nn.Linear(self.feature_dim, self.lat_dim) + if 'vae' in self.hparams.data_param.mode: + self.mu = nn.Linear(self.feature_dim, self.lat_dim) + self.logvar = nn.Linear(self.feature_dim, self.lat_dim) + + # + # Linear Bottleneck + else: + self.z = nn.Linear(self.feature_dim, self.lat_dim) # # Alternative Generator - self.gen_lin_1 = nn.Linear(self.hparams.model_param.lat_dim, self.feature_dim) + self.gen_lin_1 = nn.Linear(self.lat_dim, self.enc_flat.shape) - self.gen_lin_2 = nn.Linear(self.feature_dim, self.enc_flat.shape) + # self.gen_lin_2 = nn.Linear(self.feature_dim, self.enc_flat.shape) - self.reshape_to_last_conv = Flatten(self.enc_flat.shape, self.enc_conv_3b.shape) + self.reshape_to_last_conv = Flatten(self.enc_flat.shape, last_conv_shape) - self.gen_deconv_1a = DeConvModule(self.enc_conv_3b.shape, self.hparams.model_param.filters[2], - conv_padding=0, conv_kernel=11, conv_stride=1, - use_norm=self.hparams.model_param.use_norm) - self.gen_deconv_1b = DeConvModule(self.gen_deconv_1a.shape, self.hparams.model_param.filters[2], - conv_padding=0, conv_kernel=9, conv_stride=2, + self.gen_deconv_1a = DeConvModule(last_conv_shape, self.hparams.model_param.filters[2], + conv_padding=1, conv_kernel=9, conv_stride=1, use_norm=self.hparams.model_param.use_norm) - self.gen_deconv_2a = DeConvModule(self.gen_deconv_1b.shape, self.hparams.model_param.filters[1], - conv_padding=0, conv_kernel=7, conv_stride=1, - use_norm=self.hparams.model_param.use_norm) - self.gen_deconv_2b = DeConvModule(self.gen_deconv_2a.shape, self.hparams.model_param.filters[1], - conv_padding=0, conv_kernel=7, conv_stride=1, + self.gen_deconv_2a = DeConvModule(self.gen_deconv_1a.shape, self.hparams.model_param.filters[1], + conv_padding=1, conv_kernel=7, conv_stride=1, use_norm=self.hparams.model_param.use_norm) - self.gen_deconv_3a = DeConvModule(self.gen_deconv_2b.shape, self.hparams.model_param.filters[0], - conv_padding=1, conv_kernel=5, conv_stride=1, - use_norm=self.hparams.model_param.use_norm) - self.gen_deconv_3b = DeConvModule(self.gen_deconv_3a.shape, self.hparams.model_param.filters[0], - conv_padding=1, conv_kernel=4, conv_stride=1, - use_norm=self.hparams.model_param.use_norm) - - self.gen_deconv_out = DeConvModule(self.gen_deconv_3b.shape, 1, activation=None, + self.gen_deconv_out = DeConvModule(self.gen_deconv_2a.shape, self.out_channels, activation=None, conv_padding=0, conv_kernel=3, conv_stride=1, use_norm=self.hparams.model_param.use_norm) def forward(self, batch_x): # # Encode - z, mu, logvar = self.encode(batch_x) + if 'vae' in self.hparams.data_param.mode: + z, mu, logvar = self.encode(batch_x) + else: + z = self.encode(batch_x) + mu, logvar = z, z # # Generate @@ -220,148 +235,46 @@ class CNNRouteGeneratorModel(LightningBaseModule): combined_tensor = self.enc_res_2(combined_tensor) if self.use_res_net else combined_tensor combined_tensor = self.enc_conv_2a(combined_tensor) combined_tensor = self.enc_conv_2b(combined_tensor) - combined_tensor = self.enc_res_3(combined_tensor) if self.use_res_net else combined_tensor - combined_tensor = self.enc_conv_3a(combined_tensor) - combined_tensor = self.enc_conv_3b(combined_tensor) + # combined_tensor = self.enc_res_3(combined_tensor) if self.use_res_net else combined_tensor + # combined_tensor = self.enc_conv_3a(combined_tensor) + # combined_tensor = self.enc_conv_3b(combined_tensor) combined_tensor = self.enc_flat(combined_tensor) combined_tensor = self.enc_lin_1(combined_tensor) - combined_tensor = self.enc_lin_2(combined_tensor) - combined_tensor = self.enc_norm(combined_tensor) combined_tensor = self.activation(combined_tensor) + combined_tensor = self.enc_lin_2(combined_tensor) combined_tensor = self.enc_norm(combined_tensor) combined_tensor = self.activation(combined_tensor) # + # Variational # Parameter and Sampling - mu = self.mu(combined_tensor) - logvar = self.logvar(combined_tensor) - z = self.reparameterize(mu, logvar) - return z, mu, logvar + if 'vae' in self.hparams.data_param.mode: + mu = self.mu(combined_tensor) + logvar = self.logvar(combined_tensor) + z = self.reparameterize(mu, logvar) + return z, mu, logvar + else: + # + # Linear Bottleneck + z = self.z(combined_tensor) + return z def generate(self, z): alt_tensor = self.gen_lin_1(z) alt_tensor = self.activation(alt_tensor) - alt_tensor = self.gen_lin_2(alt_tensor) - alt_tensor = self.activation(alt_tensor) + # alt_tensor = self.gen_lin_2(alt_tensor) + # alt_tensor = self.activation(alt_tensor) alt_tensor = self.reshape_to_last_conv(alt_tensor) alt_tensor = self.gen_deconv_1a(alt_tensor) - alt_tensor = self.gen_deconv_1b(alt_tensor) + alt_tensor = self.gen_deconv_2a(alt_tensor) - alt_tensor = self.gen_deconv_2b(alt_tensor) - alt_tensor = self.gen_deconv_3a(alt_tensor) - alt_tensor = self.gen_deconv_3b(alt_tensor) + + # alt_tensor = self.gen_deconv_3a(alt_tensor) + # alt_tensor = self.gen_deconv_3b(alt_tensor) alt_tensor = self.gen_deconv_out(alt_tensor) # alt_tensor = self.activation(alt_tensor) - alt_tensor = self.sigmoid(alt_tensor) + # alt_tensor = self.sigmoid(alt_tensor) return alt_tensor - - def generate_random(self, n=12): - - samples, alternatives = zip(*[self.dataset.test_dataset[choice] - for choice in choices(range(self.dataset.length), k=n)]) - samples = self._move_to_model_device(torch.stack([torch.as_tensor(x) for x in samples])) - alternatives = self._move_to_model_device(torch.stack([torch.as_tensor(x) for x in alternatives])) - - return self._test_val_step((samples, alternatives), -9999) - - -class CNNRouteGeneratorDiscriminated(CNNRouteGeneratorModel): - - name = 'CNNRouteGeneratorDiscriminated' - - def training_step(self, batch_xy, batch_nb, *args, **kwargs): - batch_x, label = batch_xy - - generated_alternative, z, mu, logvar = self(batch_x) - map_array, trajectory = batch_x - - map_stack = torch.cat((map_array, trajectory, generated_alternative), dim=1) - pred_label = self.discriminator(map_stack) - discriminated_bce_loss = self.criterion(pred_label, label.float().unsqueeze(-1)) - - # see Appendix B from VAE paper: - # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 - # https://arxiv.org/abs/1312.6114 - # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) - kld_loss = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) - # Dimensional Resizing - kld_loss /= reduce(mul, self.in_shape) - - loss = (kld_loss + discriminated_bce_loss) / 2 - return dict(loss=loss, log=dict(loss=loss, - discriminated_bce_loss=discriminated_bce_loss, - kld_loss=kld_loss) - ) - - def _test_val_step(self, batch_xy, batch_nb, *args): - batch_x, label = batch_xy - - generated_alternative, z, mu, logvar = self(batch_x) - map_array, trajectory = batch_x - - map_stack = torch.cat((map_array, trajectory, generated_alternative), dim=1) - pred_label = self.discriminator(map_stack) - - discriminated_bce_loss = self.criterion(pred_label, label.float().unsqueeze(-1)) - return dict(discriminated_bce_loss=discriminated_bce_loss, batch_nb=batch_nb, - pred_label=pred_label, label=label, generated_alternative=generated_alternative) - - def validation_step(self, *args): - return self._test_val_step(*args) - - def validation_epoch_end(self, outputs: list): - return self._test_val_epoch_end(outputs) - - def _test_val_epoch_end(self, outputs, test=False): - evaluation = ROCEvaluation(plot_roc=True) - pred_label = torch.cat([x['pred_label'] for x in outputs]) - labels = torch.cat([x['label'] for x in outputs]).unsqueeze(1) - mean_losses = torch.stack([x['discriminated_bce_loss'] for x in outputs]).mean() - - # Sci-py call ROC eval call is eval(true_label, prediction) - roc_auc, tpr, fpr = evaluation(labels.cpu().numpy(), pred_label.cpu().numpy(), ) - if test: - # self.logger.log_metrics(score_dict) - self.logger.log_image(f'{self.name}_ROC-Curve', plt.gcf(), step=self.global_step) - plt.clf() - - maps, trajectories, labels, val_restul_dict = self.generate_random() - - from lib.visualization.generator_eval import GeneratorVisualizer - g = GeneratorVisualizer(maps, trajectories, labels, val_restul_dict) - fig = g.draw() - self.logger.log_image(f'{self.name}_Output', fig, step=self.global_step) - plt.clf() - - return dict(mean_losses=mean_losses, roc_auc=roc_auc, epoch=self.current_epoch) - - def test_step(self, *args): - return self._test_val_step(*args) - - def test_epoch_end(self, outputs): - return self._test_val_epoch_end(outputs, test=True) - - @property - def discriminator(self): - if self._disc is None: - raise RuntimeError('Set the Discriminator first; "set_discriminator(disc_model)') - return self._disc - - def set_discriminator(self, disc_model): - if self._disc is not None: - raise RuntimeError('Discriminator has already been set... What are trying to do?') - self._disc = disc_model - - def __init__(self, *params): - raise NotImplementedError - super(CNNRouteGeneratorDiscriminated, self).__init__(*params, issubclassed=True) - - self._disc = None - - self.criterion = nn.BCELoss() - - self.dataset = TrajData(self.hparams.data_param.map_root, mode='just_route', preprocessed=True, - length=self.hparams.data_param.dataset_length, normalized=True) diff --git a/lib/models/generators/cnn_discriminated.py b/lib/models/generators/cnn_discriminated.py new file mode 100644 index 0000000..9857a01 --- /dev/null +++ b/lib/models/generators/cnn_discriminated.py @@ -0,0 +1,116 @@ +from random import choices, seed +import numpy as np + +import torch +from functools import reduce +from operator import mul + +from torch import nn +from torch.optim import Adam + +from datasets.trajectory_dataset import TrajData +from lib.evaluation.classification import ROCEvaluation +from lib.models.generators.cnn import CNNRouteGeneratorModel +from lib.modules.blocks import ConvModule, ResidualModule, DeConvModule +from lib.modules.utils import LightningBaseModule, Flatten + +import matplotlib.pyplot as plt + + +class CNNRouteGeneratorDiscriminated(CNNRouteGeneratorModel): + + name = 'CNNRouteGeneratorDiscriminated' + + def training_step(self, batch_xy, batch_nb, *args, **kwargs): + batch_x, label = batch_xy + + generated_alternative, z, mu, logvar = self(batch_x) + map_array, trajectory = batch_x + + map_stack = torch.cat((map_array, trajectory, generated_alternative), dim=1) + pred_label = self.discriminator(map_stack) + discriminated_bce_loss = self.criterion(pred_label, label.float().unsqueeze(-1)) + + # see Appendix B from VAE paper: + # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014 + # https://arxiv.org/abs/1312.6114 + # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2) + kld_loss = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) + # Dimensional Resizing + kld_loss /= reduce(mul, self.in_shape) + + loss = (kld_loss + discriminated_bce_loss) / 2 + return dict(loss=loss, log=dict(loss=loss, + discriminated_bce_loss=discriminated_bce_loss, + kld_loss=kld_loss) + ) + + def _test_val_step(self, batch_xy, batch_nb, *args): + batch_x, label = batch_xy + + generated_alternative, z, mu, logvar = self(batch_x) + map_array, trajectory = batch_x + + map_stack = torch.cat((map_array, trajectory, generated_alternative), dim=1) + pred_label = self.discriminator(map_stack) + + discriminated_bce_loss = self.criterion(pred_label, label.float().unsqueeze(-1)) + return dict(discriminated_bce_loss=discriminated_bce_loss, batch_nb=batch_nb, + pred_label=pred_label, label=label, generated_alternative=generated_alternative) + + def validation_step(self, *args): + return self._test_val_step(*args) + + def validation_epoch_end(self, outputs: list): + return self._test_val_epoch_end(outputs) + + def _test_val_epoch_end(self, outputs, test=False): + evaluation = ROCEvaluation(plot_roc=True) + pred_label = torch.cat([x['pred_label'] for x in outputs]) + labels = torch.cat([x['label'] for x in outputs]).unsqueeze(1) + mean_losses = torch.stack([x['discriminated_bce_loss'] for x in outputs]).mean() + + # Sci-py call ROC eval call is eval(true_label, prediction) + roc_auc, tpr, fpr = evaluation(labels.cpu().numpy(), pred_label.cpu().numpy(), ) + if test: + # self.logger.log_metrics(score_dict) + self.logger.log_image(f'{self.name}_ROC-Curve', plt.gcf(), step=self.global_step) + plt.clf() + + maps, trajectories, labels, val_restul_dict = self.generate_random() + + from lib.visualization.generator_eval import GeneratorVisualizer + g = GeneratorVisualizer(maps, trajectories, labels, val_restul_dict) + fig = g.draw() + self.logger.log_image(f'{self.name}_Output', fig, step=self.global_step) + plt.clf() + + return dict(mean_losses=mean_losses, roc_auc=roc_auc, epoch=self.current_epoch) + + def test_step(self, *args): + return self._test_val_step(*args) + + def test_epoch_end(self, outputs): + return self._test_val_epoch_end(outputs, test=True) + + @property + def discriminator(self): + if self._disc is None: + raise RuntimeError('Set the Discriminator first; "set_discriminator(disc_model)') + return self._disc + + def set_discriminator(self, disc_model): + if self._disc is not None: + raise RuntimeError('Discriminator has already been set... What are trying to do?') + self._disc = disc_model + + def __init__(self, *params): + raise NotImplementedError + super(CNNRouteGeneratorDiscriminated, self).__init__(*params, issubclassed=True) + + self._disc = None + + self.criterion = nn.BCELoss() + + self.dataset = TrajData(self.hparams.data_param.map_root, mode='just_route', preprocessed=True, + length=self.hparams.data_param.dataset_length, normalized=True) diff --git a/lib/objects/map.py b/lib/objects/map.py index 46a0423..726d292 100644 --- a/lib/objects/map.py +++ b/lib/objects/map.py @@ -189,5 +189,5 @@ class MapStorage(UserDict): ) for map_file in map_files: - current_map = Map().from_image(map_file, embedding_size=self.max_map_size) + current_map = Map.from_image(map_file, embedding_size=self.max_map_size) self.__setitem__(map_file.name, current_map) diff --git a/lib/utils/config.py b/lib/utils/config.py index ee790e7..673bcfb 100644 --- a/lib/utils/config.py +++ b/lib/utils/config.py @@ -5,7 +5,9 @@ from collections import defaultdict from configparser import ConfigParser from pathlib import Path -from lib.models.generators.cnn import CNNRouteGeneratorModel, CNNRouteGeneratorDiscriminated +from lib.models.generators.cnn import CNNRouteGeneratorModel +from lib.models.generators.cnn_discriminated import CNNRouteGeneratorDiscriminated + from lib.models.homotopy_classification.cnn_based import ConvHomDetector from lib.utils.model_io import ModelParameters from lib.utils.transforms import AsArray diff --git a/lib/utils/logging.py b/lib/utils/logging.py index deb3d38..8050687 100644 --- a/lib/utils/logging.py +++ b/lib/utils/logging.py @@ -37,7 +37,7 @@ class Logger(LightningLoggerBase): @property def outpath(self): # ToDo: Add further path modification such as dataset config etc. - return Path(self.config.train.outpath) + return Path(self.config.train.outpath) / self.config.data.mode def __init__(self, config: Config): """ diff --git a/lib/utils/tools.py b/lib/utils/tools.py index 6516bd8..594fff4 100644 --- a/lib/utils/tools.py +++ b/lib/utils/tools.py @@ -9,6 +9,7 @@ def write_to_shelve(file_path, value): with shelve.open(str(file_path), protocol=pickle.HIGHEST_PROTOCOL) as f: new_key = str(len(f)) f[new_key] = value + f.close() def load_from_shelve(file_path, key): diff --git a/lib/variables.py b/lib/variables.py index ab97da8..9b27a6c 100644 --- a/lib/variables.py +++ b/lib/variables.py @@ -1,9 +1,15 @@ from pathlib import Path _ROOT = Path('..') +# Labels for classes HOMOTOPIC = 1 ALTERNATIVE = 0 +ANY = -1 + +# Colors for img files WHITE = 255 BLACK = 0 -DPI = 100 +# Variables for plotting +PADDING = 0.25 +DPI = 50 diff --git a/lib/visualization/generator_eval.py b/lib/visualization/generator_eval.py index 1241717..b895816 100644 --- a/lib/visualization/generator_eval.py +++ b/lib/visualization/generator_eval.py @@ -1,53 +1,106 @@ +from collections import defaultdict + import matplotlib.pyplot as plt +import matplotlib.cm as cmaps from mpl_toolkits.axisartist.axes_grid import ImageGrid +from sklearn.cluster import Birch, DBSCAN, KMeans +from sklearn.decomposition import PCA +from sklearn.manifold import TSNE + import lib.variables as V +import numpy as np class GeneratorVisualizer(object): - def __init__(self, **kwargs): - # val_results = dict(discriminated_bce_loss, batch_nb, pred_label, label, generated_alternative) - self.alternatives = kwargs.get('generated_alternative') - self.labels = kwargs.get('labels') - self.trajectories = kwargs.get('trajectories') - self.maps = kwargs.get('maps') + def __init__(self, outputs, k=8): + d = defaultdict(list) + for key in outputs.keys(): + try: + d[key] = outputs[key][:k].cpu().numpy() + except AttributeError: + d[key] = outputs[key][:k] + except TypeError: + self.batch_nb = outputs[key] + for key in d.keys(): + self.__setattr__(key, d[key]) - self._map_width, self._map_height = self.maps[0].squeeze().shape + # val_results = dict(discriminated_bce_loss, batch_nb, pred_label, label, generated_alternative) + self._map_width, self._map_height = self.input.shape[1], self.input.shape[2] self.column_dict_list = self._build_column_dict_list() self._cols = len(self.column_dict_list) self._rows = len(self.column_dict_list[0]) + self.colormap = cmaps.tab20 + def _build_column_dict_list(self): trajectories = [] - non_hom_alternatives = [] - hom_alternatives = [] + alternatives = [] - for idx in range(self.alternatives.shape[0]): - image = (self.alternatives[idx]).cpu().numpy().squeeze() - label = self.labels[idx].item() - # Dirty and Quick hack incomming. - if label == V.HOMOTOPIC: - hom_alternatives.append(dict(image=image, label='Homotopic')) - non_hom_alternatives.append(None) - else: - non_hom_alternatives.append(dict(image=image, label='NonHomotopic')) - hom_alternatives.append(None) - for idx in range(max(len(hom_alternatives), len(non_hom_alternatives))): - image = (self.maps[idx] + self.trajectories[idx]).cpu().numpy().squeeze() + for idx in range(self.output.shape[0]): + image = (self.output[idx]).squeeze() + label = 'Homotopic' if self.labels[idx].item() == V.HOMOTOPIC else 'Alternative' + alternatives.append(dict(image=image, label=label)) + + for idx in range(len(alternatives)): + image = (self.input[idx]).squeeze() label = 'original' trajectories.append(dict(image=image, label=label)) - return trajectories, hom_alternatives, non_hom_alternatives + return trajectories, alternatives - def draw(self): - padding = 0.25 - additional_size = self._cols * padding + 3 * padding - width = (self._map_width * self._cols) / V.DPI + additional_size - height = (self._map_height * self._rows) / V.DPI + additional_size + @staticmethod + def cluster_data(data): + + cluster = Birch() + + labels = cluster.fit_predict(data) + return labels + + def draw_latent(self): + plt.close('all') + clusterer = KMeans(10) + try: + labels = clusterer.fit_predict(self.logvar) + except ValueError: + fig = plt.figure() + return fig + if self.z.shape[-1] > 2: + fig, axs = plt.subplots(ncols=2, nrows=1) + transformers = [TSNE(2), PCA(2)] + for idx, transformer in enumerate(transformers): + transformed = transformer.fit_transform(self.z) + + colored = self.colormap(labels) + ax = axs[idx] + ax.scatter(x=transformed[:, 0], y=transformed[:, 1], c=colored) + ax.set_title(transformer.__class__.__name__) + ax.set_xlim(np.min(transformed[:, 0])*1.1, np.max(transformed[:, 0]*1.1)) + ax.set_ylim(np.min(transformed[:, 1]*1.1), np.max(transformed[:, 1]*1.1)) + elif self.z.shape[-1] == 2: + fig, axs = plt.subplots() + + # TODO: Build transformation for lat_dim_size >= 3 + print('All Predictions sucesfully Gathered and Shaped ') + axs.set_xlim(np.min(self.z[:, 0]), np.max(self.z[:, 0])) + axs.set_ylim(np.min(self.z[:, 1]), np.max(self.z[:, 1])) + # ToDo: Insert Normalization + colored = self.colormap(labels) + plt.scatter(self.z[:, 0], self.z[:, 1], c=colored) + else: + raise NotImplementedError("Latent Dimensions can not be one-dimensional (yet).") + + return fig + + def draw_io_bundle(self): + width, height = self._cols * 5, self._rows * 5 + additional_size = self._cols * V.PADDING + 3 * V.PADDING + # width = (self._map_width * self._cols) / V.DPI + additional_size + # height = (self._map_height * self._rows) / V.DPI + additional_size fig = plt.figure(figsize=(width, height), dpi=V.DPI) grid = ImageGrid(fig, 111, # similar to subplot(111) nrows_ncols=(self._rows, self._cols), - axes_pad=padding, # pad between axes in inch. + axes_pad=V.PADDING, # pad between axes in inch. ) for idx in range(len(grid.axes_all)): diff --git a/main.py b/main.py index 84fcbde..5b20fe4 100644 --- a/main.py +++ b/main.py @@ -33,12 +33,13 @@ main_arg_parser.add_argument("--main_seed", type=int, default=69, help="") # Data Parameters main_arg_parser.add_argument("--data_worker", type=int, default=10, help="") -main_arg_parser.add_argument("--data_dataset_length", type=int, default=100000, help="") +main_arg_parser.add_argument("--data_dataset_length", type=int, default=10000, help="") main_arg_parser.add_argument("--data_root", type=str, default='data', help="") main_arg_parser.add_argument("--data_map_root", type=str, default='res/shapes', help="") main_arg_parser.add_argument("--data_normalized", type=strtobool, default=True, help="") main_arg_parser.add_argument("--data_use_preprocessed", type=strtobool, default=True, help="") +main_arg_parser.add_argument("--data_mode", type=str, default='ae_no_label_in_map', help="") # Transformations main_arg_parser.add_argument("--transformations_to_tensor", type=strtobool, default=False, help="") @@ -46,7 +47,7 @@ main_arg_parser.add_argument("--transformations_to_tensor", type=strtobool, defa # Transformations main_arg_parser.add_argument("--train_outpath", type=str, default="output", help="") main_arg_parser.add_argument("--train_version", type=strtobool, required=False, help="") -main_arg_parser.add_argument("--train_epochs", type=int, default=20, help="") +main_arg_parser.add_argument("--train_epochs", type=int, default=200, help="") main_arg_parser.add_argument("--train_batch_size", type=int, default=164, help="") main_arg_parser.add_argument("--train_lr", type=float, default=0.002, help="") main_arg_parser.add_argument("--train_num_sanity_val_steps", type=int, default=0, help="") @@ -54,9 +55,9 @@ main_arg_parser.add_argument("--train_num_sanity_val_steps", type=int, default=0 # Model main_arg_parser.add_argument("--model_type", type=str, default="CNNRouteGenerator", help="") main_arg_parser.add_argument("--model_activation", type=str, default="elu", help="") -main_arg_parser.add_argument("--model_filters", type=str, default="[16, 32, 64]", help="") +main_arg_parser.add_argument("--model_filters", type=str, default="[16, 32, 32]", help="") main_arg_parser.add_argument("--model_classes", type=int, default=2, help="") -main_arg_parser.add_argument("--model_lat_dim", type=int, default=8, help="") +main_arg_parser.add_argument("--model_lat_dim", type=int, default=4, help="") main_arg_parser.add_argument("--model_use_bias", type=strtobool, default=True, help="") main_arg_parser.add_argument("--model_use_norm", type=strtobool, default=True, help="") main_arg_parser.add_argument("--model_use_res_net", type=strtobool, default=False, help="") @@ -101,7 +102,7 @@ def run_lightning_loop(config_obj): model.init_weights(torch.nn.init.xavier_normal_) if model.name == 'CNNRouteGeneratorDiscriminated': # ToDo: Make this dependent on the used seed - path = Path(Path(config_obj.train.outpath) / 'classifier_cnn' / 'version_0') + path = logger.outpath / 'classifier_cnn' / 'version_0' disc_model = SavedLightningModels.load_checkpoint(path).restore() model.set_discriminator(disc_model) @@ -111,13 +112,12 @@ def run_lightning_loop(config_obj): show_progress_bar=True, weights_save_path=logger.log_dir, gpus=[0] if torch.cuda.is_available() else None, - check_val_every_n_epoch=1, - num_sanity_val_steps=config_obj.train.num_sanity_val_steps, + check_val_every_n_epoch=10, + # num_sanity_val_steps=config_obj.train.num_sanity_val_steps, # row_log_interval=(model.n_train_batches * 0.1), # TODO: Better Value / Setting # log_save_interval=(model.n_train_batches * 0.2), # TODO: Better Value / Setting checkpoint_callback=checkpoint_callback, logger=logger, - val_percent_check=0.025, fast_dev_run=config_obj.main.debug, early_stop_callback=None ) diff --git a/res/shapes/inverted_1.bmp b/res/shapes/inverted_1.bmp deleted file mode 100644 index 6686a62453de572d1f2a39434f15612211335a8e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30054 zcmeH@OO6yl3`F4;9DyY(4#1vUAa20zn<_}A~h|GvC` zejdNykMEz4@7FKi-tiwF@9%&1&%VIEz`nq~z`nq~zylX}oBBKuw1MT+sos>}q^#`r zD7!s5)X2w`E~KFB_T*3_A6L4Ng0kC_ zLyde~=|T$1Zch$1@^PgLDJZ)=In>C9?DphPBOh0~kb<(?lS7SsTjn?5k@1UewcN0=pQuX9eqjh-CJ1D8v-Gr2tR6RM=XdT}3 z4oa$ZHz8#uRZk8zT8H<%gOY09O-NZu)ssVwd|c^53d(Lz4mI*|r3)!2yBQAoM)5I2 zRN>^0IwL~LBlBK_e53f7DyneuN1YKN<&k+WLcURaOchl)`J>K=kn+g97a`xLaHSn( zQ+6{P@_hXHPr3+y#}dGC?NaD4w(qty8v;$OM^u zqj=Ifv`*PVA`@itjp9k`&^l!ciA<2mH;N~%L+g|+Br-uJ-zc864y{wRkjMm?e51H% z9hH6Yt^N!AG+54x`IFuBUoqP)`XnM|xSd9M4m{)K$ofy#l$e%NIa8hk&%|(K{U>Wm zOv|a9DbImtVmPw?lQku#D99jR#niA7;Drd@b;F%bXtp8+9iD@~NGvzt( zObkcXf3l{;w4BPB@*H?3h9m1gSyN(KPUTE_4m=aXk@cUfDKRala;7{7o{8be`cKxB zn3hvHQ=S9Q#BgN&Cu>Sf%c-0x&w*!RII{keH6^Cy6wDMu#4i(yIwQ?0kFO_UIR!Jt z5b?`|qRt%c(*0h-T*%kfWtbOp#c=6&-XQWx>@%2P3r(mWSB7T`r z)EQ}3d3-$)%PE*ChKLs=>|-I`glK(=6eOV|hM>&eToz$p(BWRLBegZFR(ALFR(AL WFR(ALFR(ALFR(ALFR(8Vy1;+In+Z_> diff --git a/res/shapes/inverted_10.bmp b/res/shapes/inverted_10.bmp deleted file mode 100644 index 5a9d0121c4ac44379e68189c03f986ae5bc97358..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30054 zcmeH@TdrhT5kp}X41rI+7y$pw0x<)|5222hwd8p0eKPabqp6mv<*=8%Pu_q0^WXpZ z({KO%J^ugK`1h~h|NZ$-fBDV-+wb{r$FKkW`V0K}3;fW3f&b3`z4P6WYj)l&h_UX^a_ak!V*f>LeXsnCY5Oy)`7{2skAHLhv$W2~-~AZ}+rLaT zzs*mJcll8LjC5`o>#`DB>Ez!7A|-6|^|H6)6X;wVZ5)z9$LW$ag&L`)|I)D$8kW_oNknkkh{Z=Buny z&uOHRa;D0U=Iet~96!%^JD0kV^<26?PQ=eXeih-WmwJU;r!E*)fp6*9$LMUoL1`5& zFSGL15uBm|=@_=3k4BFB)0%vKjLa&CwYpZ!v6iJeiG|6tYPFtOBPsU!J#1uqPH!1JVc}1t`F1XYd-Q1M z9r&_qSe(oE8!XcN_7r?CY-cY%f@8{fp3_5QLQh!wgh|rhHCso`sYh@WpYnQZDhiO8 z$neFaAZgLllT*~(p&y!2tBtRxwJKoFBYuGy0`?xR)t1xQykta|WmXuLy)t4xknwp~>3S;afDSg8T4sqlO zuz1ugrdDN%?31!$ymlyzv6E!G8GL2v90vm?!|qYDm|B%3YF{*7I~2y)NiwwoaaM+o z@rvuA>56Fm9(rit7 zffcpF23safViPm@Q+M)nr>1m5x6c_fozbmPJ4n+ijHGWE!Bw!%bn=5?6gpe2$-x%0 z*3DTr+vOIs{gzow)`?AiFpNTHt2H^;V%GZLd3?G8M~9zBc=Cf`6gpe2$-x%0)@M(9 z-Nr4wIk`Z7@`Gt3nxQ#GZP-_zdfFb6|449z#mNt*jcA7E6g4sWshNJMu20`M{3P;| z9}J^ObBdZgeQGkH6`B9esm({?ofntx^bM@M!>G%_d#!qkT08=^^0H%Z>ExhH3Id@S zwVHeYJm1uM?#-QJT@RUH(V{#Fgl-v`6Kwtue(L9#;AdMDCsXN)$q|!6zJvka@zf`! z;FnmWb7z=Jt7}3)3QO}e=7T4n<1GC}-&7Wck@WPO0EtQ}`na&SxNp&P-c)vmY3iG| zf^0EcBV*CilT)Etw`e+VDm%k8^vy$AsiaL!YZbe3_?+>K7R}a#?F`e_Y-jc~H_3#a9t!`STt@1SJ^I=!Y)!8(45QHPf-w`OuJQ682^+CT zncmOdw8Yq~de3O z*4~{?7LN=87DF?VHm05;TT%JxA*;8=&zkn4PQ6l{8i@&^G=!as$eR{JVN83H|03PK z8|KY(ZI{Ia=tv*CB15#WndHAn+lxAHo)b*LqI{aE6;V`74zWoK{vz?tyKR@>Nr1jI zfza}+FtR;s>$A=~j~=@Qv|mo91=1uoY1YZmx3wBA4mYJ2o{@M7rlIAhuvDioIaP?6 zgjEciQu?ZdnVM;8F%p$&9q|%|$(JxdX97A+sJ@83!jvPEKYb6o6T?mEy;#$5)UBeX z?ZRYourgCR`MJO}bzKgpV%4cdGBr(y%&w<}*5_0OrgZYBPp!-0aHQ48Rtu!r4z}hp zViD^W#W|&uuLT03<(n|fPJ{C#(*iO{E?WFiaHPUhI{8{45L$jK4CYz+JvE;2q^+`? zT&UQTPQDfhgqFVutK@0CF_yNcRqSjlt$8dG;KJlTWhQ>wryrX2RvX`*ZLylx-2tYL ziUG4bY1`zfN^2^ug8ZUuu{x~RG@J>bAG&srz_DTr^{fl#OqJ@VV|6>(vMA39pdY$+ z3DmT8tirR^?MRp{oTe%hBcKXaY8s9KJgIAUPb**FGKYhy+mWSJ`MS9*6TY8fi<%3Q zPsa3Ap-*k~IGLKRa5{$Zw8~{f>;-L)PA8X*VI(a^&LRZ2Vk#0QLq9a5H?=KKCts-y z(-(9dG-uirMnGl?MxiY|o~$3?`eLnc@TsnAlXJOT>nzrEcV6h;RLCrP|F&bgEN4^~ zSgGmmywJU`kgR(Du*80Svu|hcYv629b zmz75t_wV(iCj#d3u}e5LD@$q-TaUEuym=ms`$v+RFqlpp`4Pu!0$f64oiU*xa9!2kJQ;D7dL BKQsUU diff --git a/res/shapes/inverted_2.bmp b/res/shapes/inverted_2.bmp deleted file mode 100644 index 202b327ebd98c0d6cc07d4ca915dbd164b0120cd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30054 zcmeH^&21(}3`EyiI0PSkasa+N3&R;We!HO-h~CGauI4wIU5q3^0*icAz{wW zz5V<#{{A+8|9=1d<=d}s{omi`Z-RgR`(!`-ep!FAcY6O^`FXwrKj&MGum0{8H1TQQ z>haH&{r}tl@hq^>4{uO%yisvY243A5l1TG1bTyhjd32NJ zj{t-h@+fw|VU!Z$jf!hB@ajg9M1CQ~!0_xDt0edX6j!)ZH|nH6FYM@ngKQ^wqrMkj z-FQS|ODK;52kb+Xgq~|A!>g=oFxV2x(^!P)_WO#f$u3?B#0l~?Mv{eGkJIg`O{nbZTrSlx~TZZL`~a9AP5 zK*JDa{cUl-aeLAPaM+M_UyL0Xl7D8SavMI!9?E~|I#QB-g6R4o#_etXq1FfS(`_IC zBw$*|`;EmhbA*$ITrKrAFiG(e0!2L#B?F$?;WRLN#wSz6|sIKofX7}Kd@S%oR#Ih|up&&8BYD^2+Hinr< zn^Gi0tY~6aL1J{j5eH0(GlmSY(bP#XA;Wwht38l)+D0FUs3ARLog>&>Yk`DyRM%G- zk@KP~ZuZFq9FBa#13LDl^|u%%7IoUQr{WVC_UzZFzs?>(Eb6ppPsJxP>}PllJ4Puk zJXl$`G8e8cqSlSC)WwR%==74t3ej?UiNcGkMNHYaGKgJAbq!-E>x9#(UvZT1qJ8W_ zg1#NbOzMF(8Rpj(Onm}?twh3>Hvq;$n&p+k=bxrso}eFq)x(I6Nj)N#y6kRLUP7sa2yj+j5TEvW3 z3S=r4Fzp202-3^?q`?E|N$hQev=dH^;34gD^ry7xD+Q7pQSF3=5tdcj=V18W=C6$rUgu4jlf z&J$e&Ya49^5^k1uhF;*p8;o-W_TDe`FEtWJT7O%#jWZecOs;2SOs~Hti!~OBu7S0U z-is)cIL7*y6bM%&_9~XiTxCSgl;Ig6*=~j_>Lzy8@H*`o;qr1_f!f2bbP%pctWzwL zX&7Pga=YDI+MVq#4`7!Evd)nau$S2u^u4j@)Lu*#S?oF*6}z@Mh1ZX^X%pBQJ+SSF ztG8IVXzLn4Xe^paW*7@&u_7BGp)GyB80C%I&NewrfGcF`CKB3m-J*xAuA{nGk>$XW zVMQMacgo@_aTH#gc>!lw(MQ5$O?qIMD>YL0nr diff --git a/res/shapes/inverted_3.bmp b/res/shapes/inverted_3.bmp deleted file mode 100644 index a7d945f6990da1523312052b9df3a8644f9e89ef..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30054 zcmeHG&28>R41DP#CFGb>3dlWO1nD64C&H6K3^?S7%iWdq0XATRJDfiX@SlJE^Y4$J zKYtv5e>;Bv{`&pp$FD!}-@o=hA76j|`~La<0^eWYlfMA|JM%0qW3nMa^@%IHOAq2S zEp%Jq^YWEDL_EbILY1wA+dEg;At5eK2>C@&*uG~~yv1muxpCTqFn@g2I7)PGxUod| zKe*za6+{%S$SWKX@zknPn>nH_lSdI@{@`l#ii2Ni+ps+)74xS!^`j;mUItD2@0MTo zH^QnWX6S}h{`-7f;q1Ai0A}s*U+#&8vw3-F+wf@E*3JvF?_>CU#43lg(ki!xkA0JH zXc%siSiMdD6n>N)grLh`C6 zFZT&=?}0DBbV(+}1-7>3?+y=9h0)le1bzP&NJNK9K9%s(5PNs)G` zas+;25N{bl{y8&e65$Akpubz;6KUZ*L_Qplf6k%p2{QRoBHY9(3*tDz6#lqkPu$|1 zWg*Arj9ia1D?QYM1XD*ewj%3TIm02c(~dqx)~MAgzNmDrEU3;j$;=?)W^8A)q7tr- z2z+MqOcU1LlftFgE;?A^lA#zH(%J<=x8-2Fo)-3cYEQB4#f_>& zftl54jg#L_?8ObK+TFBpr~{I`J(sO9okc2)NsQukGGg*kc#PF%(mPoRQ&^!b)haGrW z-~fwRPA*iAiO*1lglrClyFo59lz7U8g#8)GzDgDBZLb99Da z4d&oBoLM4}(?Jk$6~^rYF*=(uojJG-XO=8M!mO=er8X8|K5oOACG$uav=uCN#ttmO zZ8)=J9tnfCg2m3*fhD-jW>(E0S;|(tQrHz>i6ewhR?Q$`#Qh+j;5u9q9N(TSoI%X? z%>y4;Z_C;lq@Mc}(_J@O!^zvv(cDM)?z&zFXEOTih2F1ncims$gA(1hI-eiZUsUDO z1i7+e!vm=Z-^>Y*GO*p;9Nf@0d_?HV7oVe5we6_eQ+$1e-qt&7pKxpDxHWL$fd0p< zogKOU)V8>-d%l5*&3SG#WD|ij{2!EvLypyGG400NQUAyJe1b!Y4V|j;EIPl6R=H~j X=T#q}SZ2p}sCX9Zy^U5mZ8_(EYwm;y diff --git a/res/shapes/inverted_4.bmp b/res/shapes/inverted_4.bmp deleted file mode 100644 index 4aad5c7a9bce0b5d09f51b2b8605758913fc6cde..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30054 zcmeHH%Z(jJ43x8Q2tMlM0DN~AhBHw7c4K=%5Jgrs`!nsE^L|i{H=b^AdVCD_4$OT6@FP}tQKQZT_ zutH$w4X?-rM>sE^L|i{H=b^AdVCD_4$TdfpFQ1kZwXkv(ohx$95#~E55h`qjs(AX8 zxyT)!#HR_ZP!&(h<{(VtfKyq`N0W^PgHQo;VHM^dIl(7f-2vkKdUf=^zi->;eNW*K z;|vk1hn%dc4VM5%*vjPh;|CkgS~0%I?|%@H=!^MTNZ85C{XEO> zTBiCS?mx=U3zfXw&&1X2=`Aht|R--S2(b?hF=ldJuM;**M%-S$C43Q zHXjOOmLWs!%>*l^hwEC2-0MQuoJCbQt@#gCkmA>&_GW_bR-Df^6rpE>_T5BPaoW!{ zhkH^iZf6ixho_xf(~1=1)CcBPcMwcwUc`lS{lv2B4#u3dihQ$*LnO~4(Q_7wiL(yI zk5(Nq0t{1pMy}(KzOcru1(Pm`RvwAA{t9y>Hv~AdYwGzaP?wLbX8D`X6$^Y2nf>-z zKKnH7V^=2s2yU&?Ay!Ih9*h!(cvU(x;ivGlq=B)Oo!Ag18eu*S*TVa+qH5Y!^Ha(2 zqnPJVcr$_Z?08ycL6Z6a7;S;@@ijK|NLtSm(m7R4f!%gWasxU2*aqrorh3jdrVaH+ z4X>$Z1GSK;I0`!%z-{-IhI>aI=s9 z-Z-bBk=Brux!G;Ub4Qzg;1Em`Q&q}rU`j56JDxk*Vm+XxiNLXSifk%2q;4H#5|1;v z=bW9>L>&PVO%_-1@vBhT$@Beegg*Ean?b_XO*ylz;4He~J95y96n_F$x_Um$DgPh{ zael3&c)p)O>rY|^Rl07qX7;ov4SuSZSo>b1o>0y)ClE`M{Jeyd(abNp48Z&y#=pKp%Sg?$;#>{riE=ZZ<4 zlSX*3Zl(KIe0*-7wJP!f(ei~W_`^DD2y$C1K1POtX@M43CVU7U3wNYaZ&u6rhF3yA z$g!RE9tK7Ef~1>VCmcEpLi|962$Oz_ z!Uf6uR_xK@^S*EOz7@Pc{6_)=SR>yMV8+#$Fhu4-1hT|B42@bXiToQ7T}fY;>qXmW bCD8yG~^P>vCW!@KfU*LU#`V0IIVdif~ diff --git a/res/shapes/inverted_5.bmp b/res/shapes/inverted_5.bmp deleted file mode 100644 index 4eb661e35c70e00ef563379a53aed564a112a590..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30054 zcmeH`(T$up41}F7Qi31-lmhbZE&_Mp`V--F5C~En4Jk^ydszc_;7<;}8PWFNKY#uH z_VMSh_WOJL`RDWJk8gi|^nZNLKR!Nx{rmp&zQFqes|(2gGG4u6e5tv2gw6gJxY6bX z66-H?w8ZPmSKj9eo%B`C6?)Vm@%vVx!@keOpX*g_)pGKWI;4Eat@>MX*8CkvemRiz zWn?NFvjQ}O1~lYopuB?QRLQ0>s^I&aX}r_;I~=A>%kzYQI9>?Fh_T&r%2*uI+|Raj1cF0HAh94=ACa7pY-u_b zge=O%(@3gWZrXs#g8w`bhGa4!NmNwESOZPeZeI_=^ijwny0Ua%3PSNp#2^yMmjrU9 z%jDut9Wvb)+o&Wnz1a9;w|kAxbU|j`m#Ks#P;IqOaU)$0OIhw~*^r z<^e-}i#^wA!5BEm&#ZeIfMrG=_OX4Od?y!;aJ|8D1aLQK}8e^Pt*ER7OGD$Ljj^<5O~}HYlf`QI*K}~ zUp_x7NTdZ83EH6*5HdYVMD@H9sBpO+TPCGjmg}c*iOtUHjWOPk5*ck>n{t zx0;G?Hf3^I-^^gWWRhN6&CQO@qdK`T4U$`wTO~;(FjuP>_%mVAfr-WGN-b?_ft&a~ zYU-hs%zk={uhqBj!X5QGES6KX_@bZkMNBQ9$>^mk=jn8I&1_aDNfz2m|pNo)3>&C?Ullv6U(1k9?D7X$DU zD}Xi~c^0@oMHMzZchADpkG{7U>JM6?2pREWki>(46*=e`%LsGAm2pCh$1SuGDy( zL|IUeMlV*9MywI6{3Jk;V2q7ss~XEQNX2e6b&5G6Jy%xX*6v}zYHIuec@Xz g57h+kTQOJan*aH&_(LuKU4>AW#nHcIsn13I4>9GU_W%F@ diff --git a/res/shapes/inverted_6.bmp b/res/shapes/inverted_6.bmp deleted file mode 100644 index cfc01f4666672710a01296e97986812f81a0503d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30054 zcmeHFOKufG41`;71eUBg0DEqMxB=&fRwf#?%63iJ?Yw@2UZh0ha{b)OkDtH)e0ux& zKK_0kzrWspKY#l2hJSnCzYVWHuNQc|z|+0J-&_6ceZE^g6_$PQ(_#E==LfvoZQn*M z-uQ-UdzLlPyV!Hno>!99YJTKo6*4q^e3*8V=T;r0$< z0g*PJ*8cr~4iEt+z;kY;D9MlIoym zUD~=9M=luF6Z$ev=Yb5o}$m7xmF-|IS&S*l0NdDy3b&`R^Z@r2ncdJ60KcX zc?z+HCzA3R)vW6gR>lK}6-Qh*f#z21VdyN+c9<;x9U!G-6d=o#y%n$vbk>wN+G`;3SBslvS@YajUJA6D%jnvdk0`*$zDAbC8K!ZB^(nBvDq)?Nmj! z13x+X2XU*d^yrxxWzC$zcVs*8UN;g%R)r2j5*gOqPE}-k;3rM|;BJMjm?j`DdW&n9HTf<&S_odkxv=twWDnm7x4?YjmVl*kRI37uCU zpI6E^$k#^_x^5mhkkEMr@_D6vgM57?q3h<60|}j1AfH#tH^^@~DwhrpTqmkd8pv-t zp6BonxQ>(U7UVY_?OiT%)9ZR`yDwwG-q4%6*=KQ)n_kyj+kF`e_J*#z*@h>wZXP+D z5&R>P@4`vjkE=vN=T&=w*}V6=z3OQw_B{E=$Dsl+HHx_fTB=5hES3RJQMH8zRU1d~ z97q&ZTWCD^#A(2|2Ta9`F(%x2JQy#1|Hgg{J*Hy^MmYQ&QCQ%XkR2d zA+j`*b%_70n0XODAwyFd9{!+G&Fmk^IemzAOTr(NS(96RW>m||_nDMQjU1*$$=h_@ zVpS(imPW-aM|r!v-W1njRcFaV6tf(cU@geLxRxwC);yuU1lBrakBaxjwPe|e&e`fE z4RtG9*g?v$h=%&WC=GfaTW=v=RWPAS6N*;e_86j7aSL%eyZKIZxeBHGs3@&O4}n9$ zaQ-2I7YwtcV2G9CVLhag0w;(hkTNGoT`@pq@sA9U)L22JotIfb=<}$mgwZdeSYO}G z@-36MZQ^nuA|}bo4y-JcX8BQOm3j#u%q6rg1Q95@vb9ZQ)nW-8%q27nawbKlBk}G^ zuEnb5x-3no4`9d@6`x5_Q!Ni&4palz>mG^ta@9HX_-2lF;&yMj& zq@UTjJrZyQ+C6rA~{i zyQ|^HcXu^h5d?BqXf7#0UP4n4z*V7SYmYK(6L~GR(B#Zky*yOquH{>=Q2lPG(&Ua^ z@=Q~XF~e!SlI+y2v2+|gf7KI?i}4X6<*$>88p`bGk;rySIK5-kHLW zh~BD6qIO>oxGy7bL0edm(N(5$L*Ahn@S!p!0Cv8xav8Ga&GnI(vpX)3Ckrcr!CKy2 zABj1;?*iN}e28`HGs~@IO!twPCOAgGox-t4uT)CZKGcIhROSTWcZ(d;a;XSQmhOX} zCiq71YvGG0Z+B;bKRKsm2mkKMS`qqpS6IccIJv7bjEo!378L*}+EAvjRE#0fcz%3^Md5NWci_>|@%ny#?EJt2FIN>{^ c<#J^G&S~#dcLR3=cLR3=cLR3=f6xa00~(OSn*aa+ diff --git a/res/shapes/inverted_8.bmp b/res/shapes/inverted_8.bmp deleted file mode 100644 index 8674237f0cbd969982f3d376a8ca7bc7f59820ed..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30054 zcmeH^L2?~85JR0?=m-TcmP>oIhcib{hqNBsl!@za&dll0onR-5me<*FXP$ z`TFxm`}i_=j{_ElM&*vBT`~q+H3;g%}=k2ce1~&Wtk$(L9gr$BA zdrNBm@cE|m?B(0}(AVCM9+hl0%AWYi+*dK9cP0CbvL}8r_f^d3UCBP9?5>~8vdr$F zU7m)H;)LmO6x{N)cFWnCbpgg@CUL|@dJ@;GQOSv~Qy%N!Zj)xjM%us(fR(@NPOv50|uX?+j{dsTl-gA zp2F#Qs*{+#Lr?ax<0d>^U5jIi3X>zotzMRBY8h#Orn+;I9d%ki8KjJ@DpPrKiY!6E z=vX|8g3-J~*PvZtmkCQWdpW38_c`H`irJMEvyNshrltm;>F7S8W0wrG4WoD%q|}p- z;gX8kJrx@{6dz&aQ|ie_S|&f*Juk;E2kjc&tj-CubybDC!cNIcD1JFeJ^e^+mYJK? zfLNKk0!vEgB^Bu+4|bB`bksHi!l9bd=ETvS<|M(+P&^M>a@j^eKiVoZHHEji)}H2E zj^{y3(lW{r(2t6dqbUNCqn3oztDG9jzb2M#1ccja>cVBsK)KLC4v1}tC-xh-p z*{ur0w&JY_OAw$ z$6M9PmxFMbyW(;Q21j~#nZ{$4#_?9Q^5r00=B_v?!K~`tO*)S~+JLuKYb2>1rO3yL zbd?-2PCAJzGcTcAJ!)Zd2(wILVo$;=gAJ&aOdM>uNhfPq2KTYR(!_?+%hU*{hMUA_ zZsOxg39k$`lvXTss^KOPu3<4;$pT9g8%i%zBcK{?5~I0^k1Hj-GT2aBvCyf8n?$&V z#c(AHBqo-0ourh}o?S;fn}a152+fD|U`f~Ebi%Y}*P+68qy`0Y2dnawlGxL<0*Qkq zyL``T3Og?^)@mQs^QbD^M_;WD@?Fmvz0Dn(l?Ro4NEMdo2z8L}dd}!=?$E3}sN_Sc zutY~#2O~`@gMi-V4))kV%PMN9<1%Q0is#ELBTK+;q9*p}dUX5bqi$;*+)4qwZCZic znN-o$^N-;xa50VGZPNg{BfJR2nsET|#+BGaQnrrHqZOOS=*)HaAzg#86vd z=$B~)l3iPDX;M$;$($Uu-@=OJ4i#1q{W7gUvTKViP3_rowBN=R%N;H3S@p}I^72a% z5FRUjP3RQcqgd$JwsSL_PT1jGYHLSZs95OOws&gklkGzk3mx0`PECEXeTZVAW82=T zX`ifbu~=wo+dFmad-W|A3r%f1cf!%<7_MsD5a`-sp^0tt2$JV>=d;3FT**cdINurD z?6@+KJfB;&CjwWpJqVodjBR#YnMj_`t=bcTE7=|d&UeN(JFZM5&*xU{iNKX?4+7^q zW1Ag^Cc;hQ)k`$)V;4Z6Ym0>@wk;zFH@)O<1!x(&Pr~F&V%su>u-$Fh@IZi;vHK)U zz7n=AQwZDLmJJUCXc@at!XztQ=%Gk5UB-O6A;Ca!j@iGxLw5w;%XZET5)dW9}>qKQJ)= zGWPcTYuMVQc|LdEfD2d>f%`O#zHiV2n_so_tLC13x>xP|s<|iMs%3HQ!>T-vLz~Wn gKOy<~t+xKt@pEkje@?i6uAlSmpRfM>0$X3;e?iX41>8_rCX3?y)E64flRR8ejU;q63 z?Z=-#;@{um@855KfBE+75C8YK{g2`8pYLCP*1rJ%P@&i0*XH|s{8rNWzxhva4VzS8Fr zX^3(4R3=Z})A#UJv>ffX0>^mBOME{1OVW`B#{{@EN}FV5>*FO};FHl`0!M1$hmh6P zk*mr!0es|(zGa8WL(R3K51TnI%`!3xEsTu7Ldc3o#-IVf843hTF)V4Yver#+aE2Y` z*=NlR^cDbF3XYe$Oij$Ptu(!@#9%THlLZ5*d7Ii9Asz4%cRbZQ-q1VVHs?!H3MULA zfZ;%3)eG<li&bCcTLdATfdhsGwkdcOTDB#F2w*r6SoH!t3yI`P%*n`|eTkD> zXZ3(G$glvs>a_%8?D$XLGA{tHdVM?C*XCBrV_c=2?44Cl_+^lJ0eIEx z+rhpzw$hBT3}QPGWdqEz0oxD)@T%7m%(0L7wk+w*ZHWUzr@YHq0eIDDN%7dEe8s_` z-j+Bpcgi;)E?fA$kd%ylmbVg|j2urbDF^0`ROQHV2H0;jM_e~SS$THi%u9saj^%Yu zF~EMKIU>4A`AV`p2FxRC>EV@VA^TCXIZA{XS*=EnuwL=*bE?!5DVeP)C9ARNkqDEr z@{wWD4zC;u`d9`X+d5^Bn0-kD%Sy&ewUW*8y&Jf{e1MxADK$qJU|%$2t_AkdBjR|Z zcO))jpN*~X&B#@Zu`il2Xkk_(_(rUPz+3i3GnQI_X7p@04L@0xox|Ey zw)FWi46x6d8E6K2jYx_f9&V)*<2DWwA|k0Fc*TYAL$J@98E6I?4L(IfURp`*s=UP5 zl%u=~-E3urQ5w7iqzlP9aLbcChKpl{A;UFg{Ak z0XTkCvKSFb0e>qb7-Juq_e*nGD~yaD5uhJE9*##!tTZoW87|w{WH>ahpoQTP3=0Of znuE}u!YTy=%_=b2^2jBrd0T3ZY=L2C^l)vXhZp587ovep)&L-QD;i8G7TTyaFnmda$s1OrU^%Yf8-CgUtm&^h?Ds-A-i+0` z@bdR_^0kljuRgpfUq$AAQ~sG(%?I|+Zi3rY-+XDyw=M;*!hERIePJ2dntkV`t=yVt zCAb>7aK*r9c%=bP#H!?mU!EKGlvy{q&9~w(+-nJzW8YA2qo?%4EKj7liokEOyaL~! ziwKi9^y+9?#qdf?aD~Aao<}gf;7I5O780=g=T7qstmF#(t$0CDPQ;k6a{t8HeklOC z0)H!B5R?-!;#I!7;h=3OIzhCr+|iHEH0ZE1nr>hp}ozq+;vYtB#zw6qjs{ zlLii!JOj{k5|I>T@P;Y1FO2ufUI6H^7VR>+lp_R&>IpIqYFfdUD(XW$LQQS3(ctlInvAbF;_cFb0in2$(Fk0K>O6 zUs=7x%O%wZ6Xp_+!(j{>TL`RDz)vCDr$)Xq@8s0>c@B6SPWc9GT_i7lS#oajm3bSd zQa4!#JPxPq!S<3Ut4>V5KJVdF>L%-e$KjMc*j^Q7+ig>>4|^mPV-rl60guCB3>w=* zfx>%2t`AEj6=M@jm;sN&VGJ7EX9fC)LayISMM)FP69ULxjDp386+|*JmL!;o@YCW* ziELNnJOxs!;JX0nt8lPwL>ZizO=qm6p@*@Zr;a3dK zqY%ywfszncgV|Ns9PfEW%w0OjBuAcI=`KTu;~=7IPh z_z~uh@@W_zz&J$--(kQ9rBQhLfu`9(&Hur`fZ{%6KFBjei4SzEJ+ga;0v{B{Kmaa( WK<)+e1}PuvHV_3XFJVH1kq-dp&%(|C diff --git a/res/shapes/shapes_10.bmp b/res/shapes/shapes_10.bmp deleted file mode 100644 index 49ff557cd8d99e956ec3455ecb45bdac86559273..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30138 zcmeH`y{c|!6@^bw1Z_pI5(^PaF|9>NV`XD)VIk)R#Lgm(jg56WFGDQ72`|FhI?kLK zCJ$r&thwIr+dF~mLvk4J7|$4U?H%&zZ~yh@cOU-z&Hnz2{rktS{{86Pk3aao{3`$O zh`+vg_s;+M()r@k-~8@RpZ)uPzxcyHfAXWB{`J2fee&_gKm6?d`=5XE{{8p<@%iW9 z|Md_4^PQjl{_nqvx{2@d@#V^q-~WdN)g~W9XBY zpD3`7>3lB^DWkaZM8SR6<+xxa6XpgMMovo_%PzW6HmeH84*RsgcN{X(@Y~O%}6OoQj_U4dDkT=*fC>8 znaW2qz;jH)FU?3PNK%uT^<|5I>2htucu~kaC_s6RCVBdziDfNt`IJ`4m`N-%OigDX zJ5kAzIgxJGmN2nWmtR6tT8yZP(KpS=wEzcCZeG@xFtJV#x1_Qp1>28Ead>ibz&u*Q z#5z6Pl1fJom_+vMM)tmJUKYs@FtJW|b>TbA{DB;zVQ`M_Zj!&X%%bV8E7(BVg23El zME?=Z&H2Op3i->sghhAVC3g8S-05!J_3v)p>7rzg&0&0q z>>Sg$UYe0gi?C~wXIeJTMurymiP1OB$TZW4N;6Vv5zhN;E%kDc*m60ruG{%uoYNQ= z!B*A+-+jtpS>G^a(M{Hgb-po-OtW>VrOW4KcLq4sebt5;uCPw5^NnF-nypJMT|U_j zpXQ*0cTtiXrYwfZg)mKYzC?zxb*VWkn%yBHmbDkNZ_e>%tZ^YsS*AZnLY7Zn3{5i3 z5KPZ>Ou2Y`gbN&}Z+eJ?JbQBA^x25Cp0ddi?svX1UDWxU-DvKZ-c)8r29Gzn!;2eV zj&>E!ZuG4k`G#_zgC#FO$=EV69@_Y_*;P1usW~G1WjFXTH8TJg851C$Gs!e6CN{n? zU4`{a%@H}ByBm0BB;|8H$Fys8zIBI)%Y)`Z=slmw7zrUv_@y8$Ko`*;G}i`fh`12C z;{$L}Vwa2n^5o_+Vr9@=8?ZUzi0KX%tR&AEws^hsjTs^?P#;uSm!rNWKsY%pWt#5ot?n_xs=B4VQ1l4$wl!f5&~rWp;ItpOXN%>tSE+ei*amr=JyfBqgGVvGJ#v)&yQro-Z*^YMH{@HBTi zpG?+gE#G0+b>25eu#$NRbGXZBfBLND`|P?d?+R8jFJTUM8SPJ>wS1pl*ZHn68F)E| zr{&5J3CZVK%NN*n&E=BGz{@%OJh?JNLh^am@&$HHb2c=YFmH*+7xKBY&9jzYXV>)n zNhOdg;>d}!5!tjSXLH&)XDvC`{RkZInC`>!311ud$0L4lUYpa*yTjJ-BM_6Rh{WeJ z7zzSU*Ylw{?LLMr*Ck0zfFTN!Oz22w;rz_xG_IJqn`Yh}ZVq2XOs0k?NHWv`4B{^D zQ*&!hyN_YZbx9HvfKj)QnU0ikd$KEOM4jJViJ}SV4o|kaWvdgDHAbO@k%a-9i;~8U zi%2`a1j4)&GHcIV5#|zbeuHI3ocT0U7CWDe>FzrPizjEht}xN15N0#QdZ%SpFs6(; zpNwlGvspUa44uh{=4E1`^V%&ZcFNOT+WEIm3(U>X!q_t+6FH?rGBkm^I!4OVUE29~ zuM5xF@_URYBlY|w96Do5Cl*ZPN|$ziUby9Ow)`GjBeNW5#r3i%7jm`em0uxtz_*3W4%y-Ygy2 ziDwCi8CZ9^1L&7s0@mlXo#QQsG=Bs1gA@P(|D*Z>TXP0i`uTh4RSbU7lYqf)|= zinxR`FfTBjyE!eL{h*QMf}7k4-SGirAr%4=*^{Yh0hmf8-JF)relULw2e}iPK-PhY zAjycnYo?|%FgWZE7^BO|B@k;QT}RZ3Trr0*0s5}zH1l*0x|@nKJk;sUTJExCngApE zkL!cw*QsoNwD=OjVVa=-xIS2Zoyzf#cJO)2TL;UpQ(1n0#tuF&kCe{A^6ON}uicmj zpSPJc=3x1CD&_a{^hTE>dviFk9O1%Qg$pmLyU%<4Xt(^h|M<6^Ep%Ugw#HVs{J766 wucn93e()QH<;SB3zfJ0||2>ai+%L!v8_)g1w~BoG{d}wbbHDwtPx%e}AM*`_H`gh$9R9}spKu= zPuuf+^%}%i=gmuh$7@^LAN9q$&pkU?2d+oq=UIQ? z%cJvu8khY>^TmFo^}vR>t=EL+IF9*j{6CKPPC2*d#rJ8b?zg-M`;%jBzxUrKcg**$ aovZK5wVs`O$2eEw5BlC>`lBO_xBClAQuJs5 diff --git a/res/shapes/shapes_3.bmp b/res/shapes/shapes_3.bmp deleted file mode 100644 index 52c6d7ef8de68e0aa1310ba9766c717347019955..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1662 zcma)*!EJ;v5JioU!jTeuO%-vJd%8#+F}IYFR!ApEX<&~?OLp@AA7jiahafHM$ISfs zW6aCz`$N$6g!?0YZ{^p8PS2l9T+rOitfBud2b4`sasuBP&fqqIXqTGMiMPtV_z<8x zu4zvwJnuZ(8nRi#bAaZu$0JMg(J*pG7L_ZpRd3Zm;;K71D>K))v#O+!HOf85>L4#n z8*Nu~XPiRi!JTvJSq~0g$emBlxEdAauHCbYmUuv1mTUgix%al6UHl~#?rrfAdf%De zW^EL9Mz5l`X;dYQ%&;{yOdo^s5f+HEFBm0SNC3r^^#X2~Z1y<+$Ce=&(N#M&{1_c2 zh&peHf?_q^u(e?1=uHe894sMMUa?h|Kn=ym6oU>dfDqkTd^^D95u2OP9a2dV1i8$U^B7!5cQ(fq*DD8DYYR6dOI z%+a#*j2KpxS1_iJ9D$Evl)DW9XPd@$)UF zXEfyfPo~k}DvFCaS(}d`ccxPLu#`{d1BR|~EJuy+{a6~ARP=a!@`!lvI6;>1s;*b3=DjSL74G){tBRWrYoKwerJ+IH9G?X-$VB~scC6_YKTgdzbE9MaW zRii*kn`k95k0WAMJHM0jGa*m^9p_`Slq(p|+k6ekyQWw^_n7zxbs#OJA(6yH`ErH6%_K)Qd$`U)Z=jO~?6&NXWMZ$^JJZ z+NRWf`MMyv|JW~=T!M$G^*KOokX!~M?+b@U55p8$aL_(Bn9g=e=*j=!mrnrld73i#N`O_2PdcRmb z={qZNIv}9;3ztgW*W1aGTtKfGW$HM)n_gNEWaaBP`zopr0Kh(KqW}N^ diff --git a/res/shapes/shapes_4.bmp b/res/shapes/shapes_4.bmp deleted file mode 100644 index 46a848b595e1d74ae7df9fe9adc55f34737afb5f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1662 zcmcJO%T2^E5QZIDB1f0t>sEmgcec67GN&(gAzSA==@e@gvTlRkT7_%HzvF zuguubFK-V<)+6pGT(9LC%pl*NiZ0OIh^V7`U*l!x5h=4gEkZZucV1P`bj*vykJX$G z_#^A#iyc;DT;XKmJOAfxCZ%T{tN(DoR_RVL(fNw}H6E(dY?ZF~dcdpFFN%#tJ6isP z*E`uMd(XV}(*(ub{!Sl=|8KJ&iBQDGfoSfSHxGK>bY=u>4CIaL^M1t2dh$rP@nS|K?AhWEdN$0c87oL}k*3OtyA>3BN=@hPJ8;VWiFu@X$CfTT z9)9uCbeV3{ycRts(L>K3)n7Z^lkxPK>tTDalNq7H8=Pmj>6$UL5^s`tng^wuxVJ|E zGom9u{2ia)(!V&3>v+Vx-^#qiUbFZw93Js&JjGr!#+2npw=@Og^QlqP@wE$j}%mzfRp|Hs9)(P72j;l&KJ^Kz)~g<2f3g?wnQHg19((N!^On-m7~v^p9(W(wl&Se6yDM*uQV+PN79ZL6fhm3? zXT@h{_JK3l;}z9k!R{$m?c7k^3!VI{4W0jB2kh!E)NSCS@rqywc}r8Mgj->Tx$%+M z&utD>5X+qVGN{CcbB!vfk}ViHp}|?Rl3iuCdt*O!Y$z%}J2QjI6;om`lw%>I2TEZ_ z*EBxC2>5}~7Hw31mJqD4P`D#(KLzkMGWI*Bkd64_D3fjUwYD}z+*Nr>$VY1q`jzHC zOK0avb=78{v=Q}tcpRH2s{a~Sf3TO(i^v;1?RVo$L6iN8`cS|@{u%Wr7dT(MP|$2Y zqJMb2VRkVz_~CLv85{+=pkmi`qM(r}sNDHV3xZf}zWCtbBBboAo#bdNhY16G;%AqA zwl{e&HLF|Qwmbw)rH8Fp_f&A|+y`A~0AV$6@CKwUh%1L7SXn*aa+ diff --git a/res/shapes/shapes_6.bmp b/res/shapes/shapes_6.bmp deleted file mode 100644 index 6bc8c110ebd45ec1c320d6e5e882744e9f3ee582..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1662 zcmeH{K~BUl3`J8R(H%WPww!_>_S}MVkV;&v5(};~hy}Aig#TSTV@##af(;UplF7@r zW5=%M?*8#I$+*FKjrpK+NQ0c;LVvzmryS_gT1%LH9B|$Nn=R(qg&J|sIh?~VGJECrW- zf8-qW61eU#WPiU@+r^V^xO|wN&o465^Li$`dXz18^{mug{RK}f>A8L>_kQZfb4c3n zagE)-hiT;oKdGlN!U$*8jZ5IV!}?Zc&HG*hzwPjQ&~c5_&hGnlehK$KZS#V+@I6iS tKg=v;bNTvuK+P}Q2!x$m=m-TSUHr&+m-hSS3UQ4fNm36P9ezbnz8kbLK z_Pofyp@+sae)vHp+u1%k_p~9}ErUbG`Uj6#$ys<-b)ph7kGoToaF333H;DpYSYKI% zKCT^C2=(&f)me%B4XKL0tQlQ{jk9Bv20i15M~G(?qKn-OS=*@VHblF{T_zQ*=2Ne> z4!O7EtP`SLVm6*Qb{!K;ZKUf(1sVi>D(=>p8Y~Q>K{6XRjMx<$6pw7gK8l1~Q!~oZ zj490sePdRRflFZF_@ZQyblW$ z(vvDl09S zwokKaZ_^!M(*)1p9v5r^yQ@HRya09_Wf1nbU;)@wR>(vwXi$}v7!jJ46{6@Cl~p$) z2r4T?(Jg3JU2#YNa%jpa2Ambn@tll$ZOUXy-=)$(1J;4@7Zy9|5F}kp=XZzdY=q=!(K}?MoA>Xc;?bFqV1b~O8G%;wa z3+DowEZ4`Qa^u<*uxATxy<8t}$&DLRARjD@^%7TjNMg1-0=QjtIaFYy=q;U^QUgAP z#vp(j1sfSTGM5pQI19dbJKh9eEXMhiz%1z1lp6dgv=##XU9^IUoz-68Pu!WJz^|-m zEMi?*iLr>yompKAP&;v-ktX0H5MvhnyD*efuBhD;DQL(OGQx9N1*3*HU=>U*RJ<3w(~da8Bi9@XC$bQ%m8uH7qJC<6E$l za5P2+ua;Wufx`vGU?$o)_*bE6(tW_w1Y;2JwlJnVR}@I~TAymtHQ;H2I0&p(h*Qp{ z#$)0I$30l6`2{9oSQEe0Yk)p-koe?IR)ht+_O*xMYq8*(yQKm7`7pHPf zFzsR~{I-r-*bBe>Z78mnPs4s-Wo75XZvAnwLHp>(o>JQTi?{w&#c{3a$DUGZn;U)q zss^OJ@?%da+nqff=e8cyt#Ja+%KXovO8?Gu%BcOovr6L$+EsL6jdS&*sN;I$E#g&T y;g*wtJjqD=7NhedJ0D!b2!~!B-0(TAX*jfg&TVU}N}v*`1S)|_pc43x68Hym{|CkZ diff --git a/res/shapes/shapes_8.bmp b/res/shapes/shapes_8.bmp deleted file mode 100644 index e04ea55ebc473bc7b3da49126c50535b5cdeaa72..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30054 zcmeH`-EAZ{5QNWJI0Qf5lLPR(voM^&#cva$(D=}QY;rg|Bkc&-MX|c7x=2ZX{r&H+ zA0Iz|w(mdM=bz8dZ$Ezj;D3J3KMtQ?|2;p?3p_7yzZZ~y()(4rQ6qUJ0XHgFt|H}+ zfP7n?oO^FI%`Z})MwhB2MBcmcmAh0l(Y=m%qs%EjDKM4O#7WfDDB4>IiM+FXWtY;- zu2|C-mnxY(u5DK?8(U{o)EP&XN%ASPI59>e8*-3|7H#C&Fo~s%II;8*#i!z7lj8#R z&(o;vIMQ3?3z(?PMxM9W63f7s;VsRFA-v8|oC_HBVw@M-HRYv_WHRR@-0IonX;OxGLhvYhA5mHUq4m#++);= z;z%>(86tS-xw+X2+l26O{Ja-@>aCu%4EtA!HNFb zku(%g=_6tR6_?eBoxsYzG{dWPymlR%)0nlW+q&!>7pmHj&MZXT*#Tx?~HmMKP2#t`zv)HKXdglUO1L!KcW4=r6_jg~1!QN|GRf+-Lz(+G&N zfk-pt8Pf64(goINnPL=W3?VO=0%8%kpssZ7on%PEiBgS}(LE|l<;WbX=hR}W7lvXX zXTfp^jS-$L)AWq8c1|s}dSNIQauys3q1WQsCSA`Qo4~1yt)&thxfIWhYE(JW-f_j1 zXc%Z^oVwUr8l#l1rb05OqCZ=c_uWWBbI5n zZp@X8&6W9dS5CgAIAxh#Z?!R#QN{HAfT84sMi;6i-#!3y>jZ`v(e1eCTp&$w8=y(njh`FVX7ZQe+p`ciAm_xxbE|zIB zt|+>OmZ6}yD3Ni+L7Z$+BQ8>-=s2_t1yORH=Zb@PZE3}&id8IUXjF|O$9g?(sko?b zxMDd&qiW0^7b>Y0m4pRLC3a1t%JicfrIfBFCl?ZE$-}?dloxNfaywco`fsCIxcGwQ zdpy_d#i?)jdptt%7QB1KLVfgBT~kT>LYm6O)pdpz3Y7OtA9=5aD|eyA>Nzb2+7QdA z*p10{SH!3ek&HYo`Vyr6Dj@DSwSl1Rtt=^Kk9HPpc`c~XkEKG~rt(db#rpnkp zR?wN*?KY>9pj;KpsT%gNi#R2=%7md&;l$NOQ{p^o%ana=Ag9Du88LLZI5IX~=@aE^ zM(krZa!PEK5kr@YBV*&OJ`q<&45U8dBTdtD)GL{nGo^RR4{V{SBy#{UIL|2M=Bdu(;FUD;eLEg zPqq3rOAl_js`FKQk382^ov+$^QwGT$s?OfV$-S~#e=dGrG>3ps!<9mwxbG_%? MpIbjKF!cie0|vi{kN^Mx diff --git a/res/shapes/shapes_9.bmp b/res/shapes/shapes_9.bmp deleted file mode 100644 index a89033561ed7e2559683ad1165c22dc42cd54ce1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30054 zcmeH_L5>|a3`Ct<=n&TLig5&YuvY0r60jC`zsF`Gb4|HcG`;MfQjP{Q37k zKR*8aVgLSSfB*XY`^%4CKm6Z6=O2&HfBt>&5BJtJ53Hw1vf*Vl58l@Pb=Mob>ZQc? zcg3K8R~!``Z%72I60D6Jo{|;7+bW_`C`VRmovHLY+(Z(olSv6jQkXv(T+&JHQjft> zmBfX#X98D7EEJxj>x)`>>y$ zHM!38!E$L{Dv#Ob$a%zMbjOqHOdnh!%}eDm+Z;JhFqy`u^Kn#hJRXnXs8U6S+1O07 zt7F_9@@`I7)JgV>W=VY{q(9k4?Z8wp)z#7v83F8BVPYye?2&0Q64IW8MMrq#NH9LT zLBS#;fITBqao8h#GD=9MnijbGj43+ZNYP|Z{hnB!_i!4s?huRGv5|-oaYn(4FID!O zrdlLoc@?KI>kcv3KFx*miO3t0Y3xkkHey6zj+!RGDSgqzVu`@=j9GJp*?vlSr6-BV zXyeY#idZ zwM3?X@ZIN;l5X=w(+}(R_;e&B6)^!SPbsNH_L#P4FB?;GwmgeGF*eY5O`vHXofTYk zq?v`HyZyqnr;{>upF$XWa%!4@jOtI8G+ob*$n8!By_}|L_bG&l7y%E`bnR0LpGHA! zRZH7a+_Ro?rwR8(2xiYI5F?`%Bhr1@ReyTJXm!2H$YR?R6CioU^AgxcF;#IqZlc2H zOtFnvcdr1+vwKg%=Ob$)AmzzO$jY) zo1VQcxQK1^_cYr~k4gkf$WcS?$Np`Hi&#Rx>3o{=mW_ZrY;M{|*F`Mfc8bTu{%KGm z2`?m{Nvb|#;T2uk*zI2p=6rWWGxK0&nv^e%rn^RM)8HyFT4ereRpgWM;wW}Tr&|@V zQ_fB6y0vw)_;uaFVs;yorQ1CWN@si;+%;K}>limDj~tnDZ!F~WnS0_5iNo^}?3odd zY5JMH-)>9%q0ISqU-7`f-5=`xU#t7eC%@I%|6N?vy`*A#Nd-R6`1Y91{jQI2eXg2Mmvm!1shbsusU($I=GUbSx+v%I;=(x^zak{-aFR4$R zabv7v1UMcsg zX^E3jShj0UJ0Ae#um?7B#b`Ty@}@azT7dhWviiP7Npts;yQY=M!SR_ax-ybQSE?S% z%FW&99mjCd9sPH7^5&7gd+(^f-7}izKEEX=H>ND7E>>+s^5%<$7vuSD#VK!C)x{MJ zlk%mk!HlnFPw~8ARWIEj4=5K9NJe<}DnFh5~czs^^*OIGhEU-=gD ztGd$vB(r)y`O2N-S9PWTNoMta@=xCR3cL92tNIo0f8X%_1>V2F`xkis0{`1z;6JG+ B`{V!s