52 lines
1.9 KiB
Python
52 lines
1.9 KiB
Python
import inspect
|
|
from argparse import Namespace
|
|
|
|
from torch import nn
|
|
|
|
from ml_lib.metrics.multi_class_classification import MultiClassScores
|
|
from ml_lib.modules.blocks import LinearModule
|
|
from ml_lib.modules.model_parts import CNNEncoder
|
|
from ml_lib.modules.util import (LightningBaseModule)
|
|
from util.module_mixins import CombinedModelMixins
|
|
|
|
|
|
class CNNBaseline(CombinedModelMixins,
|
|
LightningBaseModule
|
|
):
|
|
|
|
def __init__(self, in_shape, n_classes, weight_init, activation, use_bias, use_norm, dropout, lat_dim, features,
|
|
filters, lr, weight_decay, sto_weight_avg, lr_warm_restart_epochs, opt_reset_interval, loss):
|
|
|
|
# TODO: Move this to parent class, or make it much easieer to access....
|
|
a = dict(locals())
|
|
params = {arg: a[arg] for arg in inspect.signature(self.__init__).parameters.keys() if arg != 'self'}
|
|
super(CNNBaseline, self).__init__(params)
|
|
|
|
# Model
|
|
# =============================================================================
|
|
# Additional parameters
|
|
self.in_shape = in_shape
|
|
assert len(self.in_shape) == 3, 'There need to be three Dimensions'
|
|
channels, height, width = self.in_shape
|
|
|
|
# Modules with Parameters
|
|
self.encoder = CNNEncoder(in_shape=self.in_shape, **self.params.module_kwargs)
|
|
|
|
module_kwargs = self.params.module_kwargs
|
|
module_kwargs.update(activation=nn.Softmax)
|
|
self.classifier = LinearModule(self.encoder.shape, n_classes, **module_kwargs)
|
|
|
|
def forward(self, x, mask=None, return_attn_weights=False):
|
|
"""
|
|
:param x: the sequence to the encoder (required).
|
|
:param mask: the mask for the src sequence (optional).
|
|
:return:
|
|
"""
|
|
tensor = self.encoder(x)
|
|
|
|
tensor = self.classifier(tensor)
|
|
return Namespace(main_out=tensor)
|
|
|
|
def additional_scores(self, outputs):
|
|
return MultiClassScores(self)(outputs)
|