New Model, Many Changes
This commit is contained in:
@@ -4,7 +4,7 @@ from torch import nn
|
||||
from torch.nn import ModuleList
|
||||
|
||||
from ml_lib.modules.blocks import ConvModule, LinearModule
|
||||
from ml_lib.modules.util import (LightningBaseModule, HorizontalSplitter, HorizontalMerger)
|
||||
from ml_lib.modules.util import (LightningBaseModule, Splitter, Merger)
|
||||
from util.module_mixins import (BaseOptimizerMixin, BaseTrainMixin, BaseValMixin, BinaryMaskDatasetMixin,
|
||||
BaseDataloadersMixin)
|
||||
|
||||
@@ -33,7 +33,7 @@ class BandwiseConvClassifier(BinaryMaskDatasetMixin,
|
||||
|
||||
# Modules
|
||||
# =============================================================================
|
||||
self.split = HorizontalSplitter(self.in_shape, self.n_band_sections)
|
||||
self.split = Splitter(self.in_shape, self.n_band_sections)
|
||||
|
||||
k = 3
|
||||
self.band_list = ModuleList()
|
||||
@@ -48,7 +48,7 @@ class BandwiseConvClassifier(BinaryMaskDatasetMixin,
|
||||
# last_shape = self.conv_list[-1].shape
|
||||
self.band_list.append(conv_list)
|
||||
|
||||
self.merge = HorizontalMerger(self.band_list[-1][-1].shape, self.n_band_sections)
|
||||
self.merge = Merger(self.band_list[-1][-1].shape, self.n_band_sections)
|
||||
|
||||
self.full_1 = LinearModule(self.merge.shape, self.params.lat_dim, **self.params.module_kwargs)
|
||||
self.full_2 = LinearModule(self.full_1.shape, self.full_1.shape * 2, **self.params.module_kwargs)
|
||||
|
||||
@@ -5,7 +5,7 @@ from torch import nn
|
||||
from torch.nn import ModuleList
|
||||
|
||||
from ml_lib.modules.blocks import ConvModule, LinearModule
|
||||
from ml_lib.modules.util import (LightningBaseModule, Flatten, HorizontalSplitter)
|
||||
from ml_lib.modules.util import (LightningBaseModule, Splitter)
|
||||
from util.module_mixins import (BaseOptimizerMixin, BaseTrainMixin, BaseValMixin, BinaryMaskDatasetMixin,
|
||||
BaseDataloadersMixin)
|
||||
|
||||
@@ -69,7 +69,7 @@ class BandwiseConvMultiheadClassifier(BinaryMaskDatasetMixin,
|
||||
|
||||
# Modules
|
||||
# =============================================================================
|
||||
self.split = HorizontalSplitter(self.in_shape, self.n_band_sections)
|
||||
self.split = Splitter(self.in_shape, self.n_band_sections)
|
||||
|
||||
self.band_list = ModuleList()
|
||||
for band in range(self.n_band_sections):
|
||||
|
||||
@@ -1,16 +1,19 @@
|
||||
import variables as V
|
||||
from argparse import Namespace
|
||||
|
||||
import warnings
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from einops import rearrange, repeat
|
||||
|
||||
from ml_lib.modules.blocks import TransformerModule
|
||||
from ml_lib.modules.util import (LightningBaseModule, AutoPadToShape)
|
||||
from ml_lib.modules.util import (LightningBaseModule, AutoPadToShape, F_x)
|
||||
from util.module_mixins import (BaseOptimizerMixin, BaseTrainMixin, BaseValMixin, BinaryMaskDatasetMixin,
|
||||
BaseDataloadersMixin)
|
||||
|
||||
MIN_NUM_PATCHES = 16
|
||||
|
||||
|
||||
class VisualTransformer(BinaryMaskDatasetMixin,
|
||||
BaseDataloadersMixin,
|
||||
BaseTrainMixin,
|
||||
@@ -22,69 +25,83 @@ class VisualTransformer(BinaryMaskDatasetMixin,
|
||||
def __init__(self, hparams):
|
||||
super(VisualTransformer, self).__init__(hparams)
|
||||
|
||||
self.in_shape = self.dataset.train_dataset.sample_shape
|
||||
assert len(self.in_shape) == 3, 'There need to be three Dimensions'
|
||||
channels, height, width = self.in_shape
|
||||
|
||||
# Automatic Image Shaping
|
||||
image_size = (max(height, width) // self.params.patch_size) * self.params.patch_size
|
||||
self.image_size = image_size + self.params.patch_size if image_size < max(height, width) else image_size
|
||||
|
||||
# This should be obsolete
|
||||
assert self.image_size % self.params.patch_size == 0, 'image dimensions must be divisible by the patch size'
|
||||
|
||||
num_patches = (self.image_size // self.params.patch_size) ** 2
|
||||
patch_dim = channels * self.params.patch_size ** 2
|
||||
assert num_patches > MIN_NUM_PATCHES, f'your number of patches ({num_patches}) is way too small for ' \
|
||||
f'attention. Try decreasing your patch size'
|
||||
|
||||
# Dataset
|
||||
# =============================================================================
|
||||
self.dataset = self.build_dataset()
|
||||
|
||||
self.in_shape = self.dataset.train_dataset.sample_shape
|
||||
assert len(self.in_shape) == 3, 'There need to be three Dimensions'
|
||||
channels, height, width = self.in_shape
|
||||
|
||||
# Model Paramters
|
||||
# =============================================================================
|
||||
# Additional parameters
|
||||
self.attention_dim = self.params.features
|
||||
self.embed_dim = self.params.embedding_size
|
||||
|
||||
# Automatic Image Shaping
|
||||
self.patch_size = self.params.patch_size
|
||||
image_size = (max(height, width) // self.patch_size) * self.patch_size
|
||||
self.image_size = image_size + self.patch_size if image_size < max(height, width) else image_size
|
||||
|
||||
# This should be obsolete
|
||||
assert self.image_size % self.patch_size == 0, 'image dimensions must be divisible by the patch size'
|
||||
|
||||
num_patches = (self.image_size // self.patch_size) ** 2
|
||||
patch_dim = channels * self.patch_size ** 2
|
||||
assert num_patches >= MIN_NUM_PATCHES, f'your number of patches ({num_patches}) is way too small for ' + \
|
||||
f'attention. Try decreasing your patch size'
|
||||
|
||||
# Correct the Embedding Dim
|
||||
if not self.embed_dim % self.params.heads == 0:
|
||||
self.embed_dim = (self.embed_dim // self.params.heads) * self.params.heads
|
||||
message = ('Embedding Dimension was fixed to be devideable by the number' +
|
||||
f' of attention heads, is now: {self.embed_dim}')
|
||||
for func in print, warnings.warn:
|
||||
func(message)
|
||||
|
||||
# Utility Modules
|
||||
self.autopad = AutoPadToShape((self.image_size, self.image_size))
|
||||
|
||||
# Modules with Parameters
|
||||
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, self.attention_dim), False)
|
||||
self.embedding = nn.Linear(patch_dim, self.attention_dim)
|
||||
self.cls_token = nn.Parameter(torch.randn(1, 1, self.attention_dim), False)
|
||||
self.dropout = nn.Dropout(self.params.dropout)
|
||||
self.transformer = TransformerModule(in_shape=self.embed_dim, hidden_size=self.params.lat_dim,
|
||||
n_heads=self.params.heads, num_layers=self.params.attn_depth,
|
||||
dropout=self.params.dropout, use_norm=self.params.use_norm,
|
||||
activation=self.params.activation_as_string
|
||||
)
|
||||
|
||||
self.transformer = TransformerModule(self.attention_dim, self.params.attn_depth, self.params.heads,
|
||||
self.params.lat_dim, self.params.dropout)
|
||||
|
||||
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, self.embed_dim))
|
||||
self.patch_to_embedding = nn.Linear(patch_dim, self.embed_dim) if self.params.embedding_size \
|
||||
else F_x(self.embed_dim)
|
||||
self.cls_token = nn.Parameter(torch.randn(1, 1, self.embed_dim))
|
||||
self.dropout = nn.Dropout(self.params.dropout)
|
||||
|
||||
self.to_cls_token = nn.Identity()
|
||||
|
||||
self.mlp_head = nn.Sequential(
|
||||
nn.LayerNorm(self.attention_dim),
|
||||
nn.Linear(self.attention_dim, self.params.lat_dim),
|
||||
nn.LayerNorm(self.embed_dim),
|
||||
nn.Linear(self.embed_dim, self.params.lat_dim),
|
||||
nn.GELU(),
|
||||
nn.Dropout(self.params.dropout),
|
||||
nn.Linear(self.params.lat_dim, V.NUM_CLASSES)
|
||||
nn.Linear(self.params.lat_dim, 1),
|
||||
nn.Sigmoid()
|
||||
)
|
||||
|
||||
def forward(self, x, mask=None):
|
||||
"""
|
||||
:param tensor: the sequence to the encoder (required).
|
||||
:param x: the sequence to the encoder (required).
|
||||
:param mask: the mask for the src sequence (optional).
|
||||
:return:
|
||||
"""
|
||||
|
||||
tensor = self.autopad(x)
|
||||
p = self.params.patch_size
|
||||
# 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p
|
||||
tensor = torch.reshape(x, (-1, self.image_size * self.image_size, p * p * self.in_shape[0]))
|
||||
tensor = rearrange(tensor, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
|
||||
|
||||
tensor = self.patch_to_embedding(tensor)
|
||||
b, n, _ = tensor.shape
|
||||
|
||||
# '() n d -> b n d', b = b
|
||||
cls_tokens = tensor.repeat(self.cls_token, b)
|
||||
cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b)
|
||||
|
||||
tensor = torch.cat((cls_tokens, tensor), dim=1)
|
||||
tensor += self.pos_embedding[:, :(n + 1)]
|
||||
tensor = self.dropout(tensor)
|
||||
@@ -93,4 +110,4 @@ class VisualTransformer(BinaryMaskDatasetMixin,
|
||||
|
||||
tensor = self.to_cls_token(tensor[:, 0])
|
||||
tensor = self.mlp_head(tensor)
|
||||
return tensor
|
||||
return Namespace(main_out=tensor)
|
||||
|
||||
114
models/transformer_model_sequential.py
Normal file
114
models/transformer_model_sequential.py
Normal file
@@ -0,0 +1,114 @@
|
||||
from argparse import Namespace
|
||||
|
||||
import warnings
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from einops import repeat
|
||||
|
||||
from ml_lib.modules.blocks import TransformerModule
|
||||
from ml_lib.modules.util import (LightningBaseModule, AutoPadToShape, F_x, SlidingWindow)
|
||||
from util.module_mixins import (BaseOptimizerMixin, BaseTrainMixin, BaseValMixin, BinaryMaskDatasetMixin,
|
||||
BaseDataloadersMixin)
|
||||
|
||||
MIN_NUM_PATCHES = 16
|
||||
|
||||
class SequentialVisualTransformer(BinaryMaskDatasetMixin,
|
||||
BaseDataloadersMixin,
|
||||
BaseTrainMixin,
|
||||
BaseValMixin,
|
||||
BaseOptimizerMixin,
|
||||
LightningBaseModule
|
||||
):
|
||||
|
||||
def __init__(self, hparams):
|
||||
super(SequentialVisualTransformer, self).__init__(hparams)
|
||||
|
||||
# Dataset
|
||||
# =============================================================================
|
||||
self.dataset = self.build_dataset()
|
||||
|
||||
self.in_shape = self.dataset.train_dataset.sample_shape
|
||||
assert len(self.in_shape) == 3, 'There need to be three Dimensions'
|
||||
channels, height, width = self.in_shape
|
||||
|
||||
# Model Paramters
|
||||
# =============================================================================
|
||||
# Additional parameters
|
||||
self.embed_dim = self.params.embedding_size
|
||||
self.patch_size = self.params.patch_size
|
||||
self.height = height
|
||||
|
||||
# Automatic Image Shaping
|
||||
image_size = (max(height, width) // self.patch_size) * self.patch_size
|
||||
self.image_size = image_size + self.patch_size if image_size < max(height, width) else image_size
|
||||
|
||||
# This should be obsolete
|
||||
assert self.image_size % self.patch_size == 0, 'image dimensions must be divisible by the patch size'
|
||||
|
||||
num_patches = (self.image_size // self.patch_size) ** 2
|
||||
patch_dim = channels * self.patch_size * self.image_size
|
||||
assert num_patches >= MIN_NUM_PATCHES, f'your number of patches ({num_patches}) is way too small for ' + \
|
||||
f'attention. Try decreasing your patch size'
|
||||
|
||||
# Correct the Embedding Dim
|
||||
if not self.embed_dim % self.params.heads == 0:
|
||||
self.embed_dim = (self.embed_dim // self.params.heads) * self.params.heads
|
||||
message = ('Embedding Dimension was fixed to be devideable by the number' +
|
||||
f' of attention heads, is now: {self.embed_dim}')
|
||||
for func in print, warnings.warn:
|
||||
func(message)
|
||||
|
||||
# Utility Modules
|
||||
self.autopad = AutoPadToShape((self.image_size, self.image_size))
|
||||
self.dropout = nn.Dropout(self.params.dropout)
|
||||
self.slider = SlidingWindow((self.image_size, self.patch_size), keepdim=False)
|
||||
|
||||
# Modules with Parameters
|
||||
self.transformer = TransformerModule(in_shape=self.embed_dim, hidden_size=self.params.lat_dim,
|
||||
n_heads=self.params.heads, num_layers=self.params.attn_depth,
|
||||
dropout=self.params.dropout, use_norm=self.params.use_norm,
|
||||
activation=self.params.activation_as_string
|
||||
)
|
||||
|
||||
|
||||
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, self.embed_dim))
|
||||
self.patch_to_embedding = nn.Linear(patch_dim, self.embed_dim) if self.params.embedding_size \
|
||||
else F_x(self.embed_dim)
|
||||
self.cls_token = nn.Parameter(torch.randn(1, 1, self.embed_dim))
|
||||
self.to_cls_token = nn.Identity()
|
||||
|
||||
self.mlp_head = nn.Sequential(
|
||||
nn.LayerNorm(self.embed_dim),
|
||||
nn.Linear(self.embed_dim, self.params.lat_dim),
|
||||
nn.GELU(),
|
||||
nn.Dropout(self.params.dropout),
|
||||
nn.Linear(self.params.lat_dim, 1),
|
||||
nn.Sigmoid()
|
||||
)
|
||||
|
||||
def forward(self, x, mask=None):
|
||||
"""
|
||||
:param x: the sequence to the encoder (required).
|
||||
:param mask: the mask for the src sequence (optional).
|
||||
:return:
|
||||
"""
|
||||
tensor = self.autopad(x)
|
||||
tensor = self.slider(tensor)
|
||||
|
||||
tensor = self.patch_to_embedding(tensor)
|
||||
b, n, _ = tensor.shape
|
||||
|
||||
# cls_tokens = repeat(self.cls_token, '() n d -> b n d', b = b)
|
||||
cls_tokens = self.cls_token.repeat((b, 1, 1))
|
||||
|
||||
tensor = torch.cat((cls_tokens, tensor), dim=1)
|
||||
tensor += self.pos_embedding[:, :(n + 1)]
|
||||
tensor = self.dropout(tensor)
|
||||
|
||||
tensor = self.transformer(tensor, mask)
|
||||
|
||||
tensor = self.to_cls_token(tensor[:, 0])
|
||||
tensor = self.mlp_head(tensor)
|
||||
return Namespace(main_out=tensor)
|
||||
Reference in New Issue
Block a user