diff --git a/examples/multi_run.py b/examples/multi_run.py index 3d0e377..9577a4b 100644 --- a/examples/multi_run.py +++ b/examples/multi_run.py @@ -15,10 +15,10 @@ if __name__ == '__main__': # Model Settings config = Config().read_namespace(args) - # use_bias, activation, model, use_norm, max_epochs, filters + # bias, activation, model, norm, max_epochs, filters cnn_classifier = dict(train_epochs=10, model_use_bias=True, model_use_norm=True, model_activation='leaky_relu', model_type='classifier_cnn', model_filters=[16, 32, 64], data_batchsize=512) - # use_bias, activation, model, use_norm, max_epochs, sr, feature_mixed_dim, filters + # bias, activation, model, norm, max_epochs, sr, feature_mixed_dim, filters for arg_dict in [cnn_classifier]: for seed in range(5): diff --git a/modules/blocks.py b/modules/blocks.py index ac103ac..95bfe44 100644 --- a/modules/blocks.py +++ b/modules/blocks.py @@ -19,7 +19,7 @@ class ConvModule(nn.Module): return output.shape[1:] def __init__(self, in_shape, conv_filters, conv_kernel, activation: nn.Module = nn.ELU, pooling_size=None, - use_bias=True, use_norm=False, dropout: Union[int, float] = 0, + bias=True, norm=False, dropout: Union[int, float] = 0, conv_class=nn.Conv2d, conv_stride=1, conv_padding=0, **kwargs): super(ConvModule, self).__init__() warnings.warn(f'The following arguments have been ignored: \n {list(kwargs.keys())}') @@ -37,14 +37,13 @@ class ConvModule(nn.Module): # Modules self.dropout = nn.Dropout2d(dropout) if dropout else lambda x: x self.pooling = nn.MaxPool2d(pooling_size) if pooling_size else lambda x: x - self.norm = nn.BatchNorm2d(in_channels, eps=1e-04) if use_norm else lambda x: x - self.conv = conv_class(in_channels, self.conv_filters, self.conv_kernel, bias=use_bias, + self.norm = nn.BatchNorm2d(in_channels, eps=1e-04) if norm else lambda x: x + self.conv = conv_class(in_channels, self.conv_filters, self.conv_kernel, bias=bias, padding=self.padding, stride=self.stride ) def forward(self, x): x = self.norm(x) - tensor = self.conv(x) tensor = self.dropout(tensor) tensor = self.pooling(tensor) @@ -63,7 +62,7 @@ class DeConvModule(nn.Module): def __init__(self, in_shape, conv_filters, conv_kernel, conv_stride=1, conv_padding=0, dropout: Union[int, float] = 0, autopad=0, activation: Union[None, nn.Module] = nn.ReLU, interpolation_scale=0, - use_bias=True, use_norm=False): + bias=True, norm=False): super(DeConvModule, self).__init__() in_channels, height, width = in_shape[0], in_shape[1], in_shape[2] self.padding = conv_padding @@ -74,9 +73,9 @@ class DeConvModule(nn.Module): self.autopad = AutoPad() if autopad else lambda x: x self.interpolation = Interpolate(scale_factor=interpolation_scale) if interpolation_scale else lambda x: x - self.norm = nn.BatchNorm2d(in_channels, eps=1e-04) if use_norm else lambda x: x + self.norm = nn.BatchNorm2d(in_channels, eps=1e-04) if norm else lambda x: x self.dropout = nn.Dropout2d(dropout) if dropout else lambda x: x - self.de_conv = nn.ConvTranspose2d(in_channels, self.conv_filters, self.conv_kernel, bias=use_bias, + self.de_conv = nn.ConvTranspose2d(in_channels, self.conv_filters, self.conv_kernel, bias=bias, padding=self.padding, stride=self.stride) self.activation = activation() if activation else lambda x: x @@ -127,16 +126,16 @@ class RecurrentModule(nn.Module): output = self(x) return output.shape[1:] - def __init__(self, in_shape, hidden_size, num_layers=1, cell_type=nn.GRU, use_bias=True, dropout=0): + def __init__(self, in_shape, hidden_size, num_layers=1, cell_type=nn.GRU, bias=True, dropout=0): super(RecurrentModule, self).__init__() - self.use_bias = use_bias + self.bias = bias self.num_layers = num_layers self.in_shape = in_shape self.hidden_size = hidden_size self.dropout = dropout self.rnn = cell_type(self.in_shape[-1] * self.in_shape[-2], hidden_size, num_layers=num_layers, - bias=self.use_bias, + bias=self.bias, batch_first=True, dropout=self.dropout) diff --git a/modules/model_parts.py b/modules/model_parts.py index b1a4ee2..806ce63 100644 --- a/modules/model_parts.py +++ b/modules/model_parts.py @@ -61,7 +61,7 @@ class Generator(nn.Module): self.deconv4 = DeConvModule(self.deconv3.shape, conv_filters=out_channels, conv_kernel=3, conv_padding=1, - # normalize=use_norm, + # normalize=norm, activation=self.out_activation ) diff --git a/modules/utils.py b/modules/utils.py index c307b9d..111f4a2 100644 --- a/modules/utils.py +++ b/modules/utils.py @@ -1,3 +1,5 @@ +from copy import deepcopy + from abc import ABC from pathlib import Path @@ -109,7 +111,7 @@ class LightningBaseModule(pl.LightningModule, ABC): def __init__(self, hparams): super(LightningBaseModule, self).__init__() - self.hparams = hparams + self.hparams = deepcopy(hparams) # Data loading # ============================================================================= diff --git a/utils/config.py b/utils/config.py index 14819b7..e7f48fc 100644 --- a/utils/config.py +++ b/utils/config.py @@ -66,6 +66,7 @@ class Config(ConfigParser, ABC): @property def project(self): return self._get_namespace_for_section('project') + ################################################### @property diff --git a/utils/model_io.py b/utils/model_io.py index 0452188..43ed268 100644 --- a/utils/model_io.py +++ b/utils/model_io.py @@ -10,6 +10,21 @@ from torch import nn # Hyperparamter Object class ModelParameters(Mapping, Namespace): + @property + def module_paramters(self): + paramter_mapping = dict() + paramter_mapping.update(self.model_param.__dict__) + + paramter_mapping.update( + dict( + activation=self._activations[paramter_mapping['activation']] + ) + ) + + del paramter_mapping['in_shape'] + + return paramter_mapping + def __getitem__(self, k): # k: _KT -> _VT_co return self.__dict__[k] @@ -22,6 +37,10 @@ class ModelParameters(Mapping, Namespace): # -> Iterator[_T_co] return iter(list(self.__dict__.keys())) + def __delitem__(self, key): + self.__dict__.__delitem__(key) + return True + _activations = dict( leaky_relu=nn.LeakyReLU, relu=nn.ReLU,