BandwiseBinaryClassifier is work in progress; TODO: Shape Piping.
This commit is contained in:
@ -1,22 +1,15 @@
|
||||
from typing import Union
|
||||
import warnings
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from ml_lib.modules.utils import AutoPad, Interpolate
|
||||
from ml_lib.modules.utils import AutoPad, Interpolate, ShapeMixin
|
||||
|
||||
|
||||
#
|
||||
# Sub - Modules
|
||||
###################
|
||||
|
||||
class ConvModule(nn.Module):
|
||||
|
||||
@property
|
||||
def shape(self):
|
||||
x = torch.randn(self.in_shape).unsqueeze(0)
|
||||
output = self(x)
|
||||
return output.shape[1:]
|
||||
class ConvModule(ShapeMixin, nn.Module):
|
||||
|
||||
def __init__(self, in_shape, conv_filters, conv_kernel, activation: nn.Module = nn.ELU, pooling_size=None,
|
||||
bias=True, norm=False, dropout: Union[int, float] = 0,
|
||||
@ -51,13 +44,7 @@ class ConvModule(nn.Module):
|
||||
return tensor
|
||||
|
||||
|
||||
class DeConvModule(nn.Module):
|
||||
|
||||
@property
|
||||
def shape(self):
|
||||
x = torch.randn(self.in_shape).unsqueeze(0)
|
||||
output = self(x)
|
||||
return output.shape[1:]
|
||||
class DeConvModule(ShapeMixin, nn.Module):
|
||||
|
||||
def __init__(self, in_shape, conv_filters, conv_kernel, conv_stride=1, conv_padding=0,
|
||||
dropout: Union[int, float] = 0, autopad=0,
|
||||
@ -91,13 +78,7 @@ class DeConvModule(nn.Module):
|
||||
return tensor
|
||||
|
||||
|
||||
class ResidualModule(nn.Module):
|
||||
|
||||
@property
|
||||
def shape(self):
|
||||
x = torch.randn(self.in_shape).unsqueeze(0)
|
||||
output = self(x)
|
||||
return output.shape[1:]
|
||||
class ResidualModule(ShapeMixin, nn.Module):
|
||||
|
||||
def __init__(self, in_shape, module_class, n, activation=None, **module_parameters):
|
||||
assert n >= 1
|
||||
@ -118,13 +99,7 @@ class ResidualModule(nn.Module):
|
||||
return tensor
|
||||
|
||||
|
||||
class RecurrentModule(nn.Module):
|
||||
|
||||
@property
|
||||
def shape(self):
|
||||
x = torch.randn(self.in_shape).unsqueeze(0)
|
||||
output = self(x)
|
||||
return output.shape[1:]
|
||||
class RecurrentModule(ShapeMixin, nn.Module):
|
||||
|
||||
def __init__(self, in_shape, hidden_size, num_layers=1, cell_type=nn.GRU, bias=True, dropout=0):
|
||||
super(RecurrentModule, self).__init__()
|
||||
|
@ -1,23 +0,0 @@
|
||||
from typing import List
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from ml_lib.modules.utils import FlipTensor
|
||||
from ml_lib.objects.map import MapStorage, Map
|
||||
from ml_lib.objects.trajectory import Trajectory
|
||||
|
||||
|
||||
class BinaryHomotopicLoss(nn.Module):
|
||||
def __init__(self, map_storage: MapStorage):
|
||||
super(BinaryHomotopicLoss, self).__init__()
|
||||
self.map_storage = map_storage
|
||||
self.flipper = FlipTensor()
|
||||
|
||||
def forward(self, x: torch.Tensor, y: torch.Tensor, mapnames: str):
|
||||
maps: List[Map] = [self.map_storage[mapname] for mapname in mapnames]
|
||||
for basemap in maps:
|
||||
basemap = basemap.as_2d_array
|
||||
|
||||
|
||||
|
@ -4,6 +4,8 @@
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from ml_lib.modules.utils import ShapeMixin
|
||||
|
||||
|
||||
class Generator(nn.Module):
|
||||
@property
|
||||
@ -112,12 +114,7 @@ class UnitGenerator(Generator):
|
||||
return tensor
|
||||
|
||||
|
||||
class BaseEncoder(nn.Module):
|
||||
@property
|
||||
def shape(self):
|
||||
x = torch.randn(self.in_shape).unsqueeze(0)
|
||||
output = self(x)
|
||||
return output.shape[1:]
|
||||
class BaseEncoder(ShapeMixin, nn.Module):
|
||||
|
||||
# noinspection PyUnresolvedReferences
|
||||
def __init__(self, in_shape, lat_dim=256, use_bias=True, use_norm=False, dropout: Union[int, float] = 0,
|
||||
|
@ -1,5 +1,3 @@
|
||||
from copy import deepcopy
|
||||
|
||||
from abc import ABC
|
||||
from pathlib import Path
|
||||
|
||||
@ -24,6 +22,15 @@ class F_x(object):
|
||||
return x
|
||||
|
||||
|
||||
class ShapeMixin:
|
||||
|
||||
@property
|
||||
def shape(self):
|
||||
x = torch.randn(self.in_shape).unsqueeze(0)
|
||||
output = self(x)
|
||||
return output.shape[1:]
|
||||
|
||||
|
||||
# Utility - Modules
|
||||
###################
|
||||
class Flatten(nn.Module):
|
||||
@ -100,7 +107,7 @@ class LightningBaseModule(pl.LightningModule, ABC):
|
||||
|
||||
@classmethod
|
||||
def name(cls):
|
||||
raise NotImplementedError('Give your model a name!')
|
||||
return cls.__name__
|
||||
|
||||
@property
|
||||
def shape(self):
|
||||
@ -218,3 +225,62 @@ class FlipTensor(nn.Module):
|
||||
idx = torch.as_tensor(idx).long()
|
||||
inverted_tensor = x.index_select(self.dim, idx)
|
||||
return inverted_tensor
|
||||
|
||||
|
||||
class AutoPadToShape(object):
|
||||
def __init__(self, shape):
|
||||
self.shape = shape
|
||||
|
||||
def __call__(self, x):
|
||||
if not torch.is_tensor(x):
|
||||
x = torch.as_tensor(x)
|
||||
if x.shape == self.shape:
|
||||
return x
|
||||
embedding = torch.zeros(self.shape)
|
||||
embedding[: x.shape] = x
|
||||
return embedding
|
||||
|
||||
def __repr__(self):
|
||||
return f'AutoPadTransform({self.shape})'
|
||||
|
||||
|
||||
class HorizontalSplitter(nn.Module):
|
||||
|
||||
def __init__(self, in_shape, n):
|
||||
super(HorizontalSplitter, self).__init__()
|
||||
assert len(in_shape) == 3
|
||||
self.n = n
|
||||
self.in_shape = in_shape
|
||||
|
||||
self.channel, self.height, self.width = self.in_shape
|
||||
self.new_height = (self.height // self.n_horizontal_splits) + 1 if self.height % self.n != 0 else 0
|
||||
|
||||
self.shape = (self.channel, self.new_height, self.width)
|
||||
self.autopad = AutoPadToShape(self.shape)
|
||||
|
||||
def foward(self, x):
|
||||
n_blocks = list()
|
||||
for block_idx in range(self.n):
|
||||
start = (self.channel, block_idx * self.height, self.width)
|
||||
end = (self.channel, (block_idx + 1) * self.height, self.width)
|
||||
block = self.autopad(x[start:end])
|
||||
n_blocks.append(block)
|
||||
|
||||
return tuple(n_blocks)
|
||||
|
||||
|
||||
class HorizontalMerger(nn.Module):
|
||||
|
||||
@property
|
||||
def shape(self):
|
||||
merged_shape = self.in_shape[0], self.in_shape[1] * self.n, self.in_shape[2]
|
||||
return merged_shape
|
||||
|
||||
def __init__(self, in_shape, n):
|
||||
super(HorizontalMerger, self).__init__()
|
||||
assert len(in_shape) == 3
|
||||
self.n = n
|
||||
self.in_shape = in_shape
|
||||
|
||||
def forward(self, x):
|
||||
return torch.cat(x, dim=-2)
|
||||
|
Reference in New Issue
Block a user