Parameter Adjustmens and Ensemble Model Implementation
This commit is contained in:
@@ -0,0 +1,3 @@
|
||||
from models.bandwise_conv_multihead_classifier import BandwiseConvMultiheadClassifier
|
||||
from models.bandwise_conv_classifier import BandwiseConvClassifier
|
||||
from models.conv_classifier import ConvClassifier
|
||||
|
@@ -1,99 +0,0 @@
|
||||
from argparse import Namespace
|
||||
|
||||
from torch import nn
|
||||
from torch.nn import ModuleDict
|
||||
|
||||
from torchvision.transforms import Compose, ToTensor
|
||||
|
||||
from ml_lib.audio_toolset.audio_io import AudioToMel, NormalizeLocal, PowerToDB, MelToImage
|
||||
from ml_lib.modules.blocks import ConvModule
|
||||
from ml_lib.modules.utils import LightningBaseModule, Flatten, BaseModuleMixin_Dataloaders, HorizontalSplitter, \
|
||||
HorizontalMerger
|
||||
from models.module_mixins import BaseOptimizerMixin, BaseTrainMixin, BaseValMixin
|
||||
|
||||
|
||||
class BandwiseBinaryClassifier(BaseModuleMixin_Dataloaders,
|
||||
BaseTrainMixin,
|
||||
BaseValMixin,
|
||||
BaseOptimizerMixin,
|
||||
LightningBaseModule
|
||||
):
|
||||
|
||||
def __init__(self, hparams):
|
||||
super(BandwiseBinaryClassifier, self).__init__(hparams)
|
||||
|
||||
# Dataset and Dataloaders
|
||||
# =============================================================================
|
||||
# Transforms
|
||||
transforms = Compose([AudioToMel(n_mels=32), MelToImage(), ToTensor(), NormalizeLocal()])
|
||||
# Datasets
|
||||
from datasets.binar_masks import BinaryMasksDataset
|
||||
self.dataset = Namespace(
|
||||
**dict(
|
||||
train_dataset=BinaryMasksDataset(self.params.root, setting='train', transforms=transforms),
|
||||
val_dataset=BinaryMasksDataset(self.params.root, setting='devel', transforms=transforms),
|
||||
test_dataset=BinaryMasksDataset(self.params.root, setting='test', transforms=transforms),
|
||||
)
|
||||
)
|
||||
|
||||
# Model Paramters
|
||||
# =============================================================================
|
||||
# Additional parameters
|
||||
self.in_shape = self.dataset.train_dataset.sample_shape
|
||||
self.conv_filters = self.params.filters
|
||||
self.criterion = nn.BCELoss()
|
||||
self.n_band_sections = 5
|
||||
|
||||
# Modules
|
||||
self.split = HorizontalSplitter(self.in_shape, self.n_band_sections)
|
||||
self.conv_dict = ModuleDict()
|
||||
|
||||
self.conv_dict.update({f"conv_1_{band_section}":
|
||||
ConvModule(self.split.shape, self.conv_filters[0], 3, conv_stride=1, **self.params.module_kwargs)
|
||||
for band_section in range(self.n_band_sections)}
|
||||
)
|
||||
self.conv_dict.update({f"conv_2_{band_section}":
|
||||
ConvModule(self.conv_dict['conv_1_1'].shape, self.conv_filters[1], 3, conv_stride=1,
|
||||
**self.params.module_kwargs) for band_section in range(self.n_band_sections)}
|
||||
)
|
||||
self.conv_dict.update({f"conv_3_{band_section}":
|
||||
ConvModule(self.conv_dict['conv_2_1'].shape, self.conv_filters[2], 3, conv_stride=1,
|
||||
**self.params.module_kwargs)
|
||||
for band_section in range(self.n_band_sections)}
|
||||
)
|
||||
|
||||
self.merge = HorizontalMerger(self.conv_dict['conv_3_1'].shape, self.n_band_sections)
|
||||
|
||||
self.flat = Flatten(self.merge.shape)
|
||||
|
||||
self.full_1 = nn.Linear(self.flat.shape, self.params.lat_dim, self.params.bias)
|
||||
self.full_2 = nn.Linear(self.full_1.out_features, self.full_1.out_features // 2, self.params.bias)
|
||||
|
||||
self.full_out = nn.Linear(self.full_2.out_features, 1, self.params.bias)
|
||||
|
||||
# Utility Modules
|
||||
|
||||
self.dropout = nn.Dropout2d(self.params.dropout) if self.params.dropout else lambda x: x
|
||||
self.activation = self.params.activation()
|
||||
self.sigmoid = nn.Sigmoid()
|
||||
|
||||
def forward(self, batch, **kwargs):
|
||||
tensors = self.split(batch)
|
||||
for idx, tensor in enumerate(tensors):
|
||||
tensors[idx] = self.conv_dict[f"conv_1_{idx}"](tensor)
|
||||
for idx, tensor in enumerate(tensors):
|
||||
tensors[idx] = self.conv_dict[f"conv_2_{idx}"](tensor)
|
||||
for idx, tensor in enumerate(tensors):
|
||||
tensors[idx] = self.conv_dict[f"conv_3_{idx}"](tensor)
|
||||
|
||||
tensor = self.merge(tensors)
|
||||
tensor = self.flat(tensor)
|
||||
tensor = self.full_1(tensor)
|
||||
tensor = self.activation(tensor)
|
||||
tensor = self.dropout(tensor)
|
||||
tensor = self.full_2(tensor)
|
||||
tensor = self.activation(tensor)
|
||||
tensor = self.dropout(tensor)
|
||||
tensor = self.full_out(tensor)
|
||||
tensor = self.sigmoid(tensor)
|
||||
return tensor
|
86
models/bandwise_conv_classifier.py
Normal file
86
models/bandwise_conv_classifier.py
Normal file
@@ -0,0 +1,86 @@
|
||||
from argparse import Namespace
|
||||
|
||||
from torch import nn
|
||||
from torch.nn import ModuleDict, ModuleList
|
||||
|
||||
from ml_lib.modules.blocks import ConvModule
|
||||
from ml_lib.modules.utils import (LightningBaseModule, HorizontalSplitter,
|
||||
HorizontalMerger)
|
||||
from util.module_mixins import (BaseOptimizerMixin, BaseTrainMixin, BaseValMixin, BinaryMaskDatasetFunction,
|
||||
BaseDataloadersMixin)
|
||||
|
||||
|
||||
class BandwiseConvClassifier(BinaryMaskDatasetFunction,
|
||||
BaseDataloadersMixin,
|
||||
BaseTrainMixin,
|
||||
BaseValMixin,
|
||||
BaseOptimizerMixin,
|
||||
LightningBaseModule
|
||||
):
|
||||
|
||||
def __init__(self, hparams):
|
||||
super(BandwiseConvClassifier, self).__init__(hparams)
|
||||
|
||||
# Dataset
|
||||
# =============================================================================
|
||||
self.dataset = self.build_dataset()
|
||||
|
||||
# Model Paramters
|
||||
# =============================================================================
|
||||
# Additional parameters
|
||||
self.in_shape = self.dataset.train_dataset.sample_shape
|
||||
self.conv_filters = self.params.filters
|
||||
self.criterion = nn.BCELoss()
|
||||
self.n_band_sections = 4
|
||||
|
||||
# Modules
|
||||
# =============================================================================
|
||||
self.split = HorizontalSplitter(self.in_shape, self.n_band_sections)
|
||||
self.conv_dict = ModuleDict()
|
||||
|
||||
self.conv_dict.update({f"conv_1_{band_section}":
|
||||
ConvModule(self.split.shape, self.conv_filters[0], 3, conv_stride=1, **self.params.module_kwargs)
|
||||
for band_section in range(self.n_band_sections)}
|
||||
)
|
||||
self.conv_dict.update({f"conv_2_{band_section}":
|
||||
ConvModule(self.conv_dict['conv_1_1'].shape, self.conv_filters[1], 3, conv_stride=1,
|
||||
**self.params.module_kwargs) for band_section in range(self.n_band_sections)}
|
||||
)
|
||||
self.conv_dict.update({f"conv_3_{band_section}":
|
||||
ConvModule(self.conv_dict['conv_2_1'].shape, self.conv_filters[2], 3, conv_stride=1,
|
||||
**self.params.module_kwargs)
|
||||
for band_section in range(self.n_band_sections)}
|
||||
)
|
||||
|
||||
self.merge = HorizontalMerger(self.conv_dict['conv_3_1'].shape, self.n_band_sections)
|
||||
|
||||
self.full_1 = nn.Linear(self.flat.shape, self.params.lat_dim, self.params.bias)
|
||||
self.full_2 = nn.Linear(self.full_1.out_features, self.full_1.out_features // 2, self.params.bias)
|
||||
|
||||
self.full_out = nn.Linear(self.full_2.out_features, 1, self.params.bias)
|
||||
|
||||
# Utility Modules
|
||||
self.dropout = nn.Dropout2d(self.params.dropout) if self.params.dropout else lambda x: x
|
||||
self.activation = self.params.activation()
|
||||
self.sigmoid = nn.Sigmoid()
|
||||
|
||||
def forward(self, batch, **kwargs):
|
||||
tensors = self.split(batch)
|
||||
for idx, tensor in enumerate(tensors):
|
||||
tensors[idx] = self.conv_dict[f"conv_1_{idx}"](tensor)
|
||||
for idx, tensor in enumerate(tensors):
|
||||
tensors[idx] = self.conv_dict[f"conv_2_{idx}"](tensor)
|
||||
for idx, tensor in enumerate(tensors):
|
||||
tensors[idx] = self.conv_dict[f"conv_3_{idx}"](tensor)
|
||||
|
||||
tensor = self.merge(tensors)
|
||||
tensor = self.flat(tensor)
|
||||
tensor = self.full_1(tensor)
|
||||
tensor = self.activation(tensor)
|
||||
tensor = self.dropout(tensor)
|
||||
tensor = self.full_2(tensor)
|
||||
tensor = self.activation(tensor)
|
||||
tensor = self.dropout(tensor)
|
||||
tensor = self.full_out(tensor)
|
||||
tensor = self.sigmoid(tensor)
|
||||
return Namespace(main_out=tensor)
|
107
models/bandwise_conv_multihead_classifier.py
Normal file
107
models/bandwise_conv_multihead_classifier.py
Normal file
@@ -0,0 +1,107 @@
|
||||
from argparse import Namespace
|
||||
from collections import defaultdict
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import ModuleDict, ModuleList
|
||||
from torchcontrib.optim import SWA
|
||||
|
||||
from ml_lib.modules.blocks import ConvModule
|
||||
from ml_lib.modules.utils import (LightningBaseModule, Flatten, HorizontalSplitter)
|
||||
from util.module_mixins import (BaseOptimizerMixin, BaseTrainMixin, BaseValMixin, BinaryMaskDatasetFunction,
|
||||
BaseDataloadersMixin)
|
||||
|
||||
|
||||
class BandwiseConvMultiheadClassifier(BinaryMaskDatasetFunction,
|
||||
BaseDataloadersMixin,
|
||||
BaseTrainMixin,
|
||||
BaseValMixin,
|
||||
BaseOptimizerMixin,
|
||||
LightningBaseModule
|
||||
):
|
||||
|
||||
def training_step(self, batch_xy, batch_nb, *args, **kwargs):
|
||||
batch_x, batch_y = batch_xy
|
||||
y = self(batch_x)
|
||||
y, bands_y = y.main_out, y.bands
|
||||
bands_y_losses = [self.criterion(band_y, batch_y) for band_y in bands_y]
|
||||
return_dict = {f'band_{band_idx}_loss': band_y for band_idx, band_y in enumerate(bands_y_losses)}
|
||||
overall_loss = self.criterion(y, batch_y)
|
||||
combined_loss = overall_loss + torch.stack(bands_y_losses).sum()
|
||||
return_dict.update(loss=combined_loss, overall_loss=overall_loss)
|
||||
return return_dict
|
||||
|
||||
def validation_step(self, batch_xy, batch_idx, *args, **kwargs):
|
||||
batch_x, batch_y = batch_xy
|
||||
y = self(batch_x)
|
||||
y, bands_y = y.main_out, y.bands
|
||||
bands_y_losses = [self.criterion(band_y, batch_y) for band_y in bands_y]
|
||||
return_dict = {f'band_{band_idx}_val_loss': band_y for band_idx, band_y in enumerate(bands_y_losses)}
|
||||
overall_loss = self.criterion(y, batch_y)
|
||||
combined_loss = overall_loss + torch.stack(bands_y_losses).sum()
|
||||
|
||||
val_abs_loss = self.absolute_loss(y, batch_y)
|
||||
return_dict.update(val_bce_loss=combined_loss, val_abs_loss=val_abs_loss,
|
||||
batch_idx=batch_idx, y=y, batch_y=batch_y
|
||||
)
|
||||
return return_dict
|
||||
|
||||
def __init__(self, hparams):
|
||||
super(BandwiseConvMultiheadClassifier, self).__init__(hparams)
|
||||
|
||||
# Dataset
|
||||
# =============================================================================
|
||||
self.dataset = self.build_dataset()
|
||||
|
||||
# Model Paramters
|
||||
# =============================================================================
|
||||
# Additional parameters
|
||||
self.in_shape = self.dataset.train_dataset.sample_shape
|
||||
self.conv_filters = self.params.filters
|
||||
self.criterion = nn.BCELoss()
|
||||
self.n_band_sections = 8
|
||||
|
||||
# Modules
|
||||
# =============================================================================
|
||||
self.split = HorizontalSplitter(self.in_shape, self.n_band_sections)
|
||||
self.conv_dict = ModuleDict()
|
||||
|
||||
self.conv_dict.update({f"conv_1_{band_section}":
|
||||
ConvModule(self.split.shape, self.conv_filters[0], 3, conv_stride=1, **self.params.module_kwargs)
|
||||
for band_section in range(self.n_band_sections)}
|
||||
)
|
||||
self.conv_dict.update({f"conv_2_{band_section}":
|
||||
ConvModule(self.conv_dict['conv_1_1'].shape, self.conv_filters[1], 3, conv_stride=1,
|
||||
**self.params.module_kwargs) for band_section in range(self.n_band_sections)}
|
||||
)
|
||||
self.conv_dict.update({f"conv_3_{band_section}":
|
||||
ConvModule(self.conv_dict['conv_2_1'].shape, self.conv_filters[2], 3, conv_stride=1,
|
||||
**self.params.module_kwargs)
|
||||
for band_section in range(self.n_band_sections)}
|
||||
)
|
||||
|
||||
self.flat = Flatten(self.conv_dict['conv_3_1'].shape)
|
||||
self.bandwise_latent_list = ModuleList([
|
||||
nn.Linear(self.flat.shape, self.params.lat_dim, self.params.bias) for _ in range(self.n_band_sections)])
|
||||
self.bandwise_classifier_list = ModuleList([nn.Linear(self.params.lat_dim, 1, self.params.bias)
|
||||
for _ in range(self.n_band_sections)])
|
||||
|
||||
self.full_out = nn.Linear(self.n_band_sections, 1, self.params.bias)
|
||||
|
||||
# Utility Modules
|
||||
self.sigmoid = nn.Sigmoid()
|
||||
|
||||
def forward(self, batch, **kwargs):
|
||||
tensors = self.split(batch)
|
||||
for idx, tensor in enumerate(tensors):
|
||||
tensor = self.conv_dict[f"conv_1_{idx}"](tensor)
|
||||
tensor = self.conv_dict[f"conv_2_{idx}"](tensor)
|
||||
tensor = self.conv_dict[f"conv_3_{idx}"](tensor)
|
||||
tensor = self.flat(tensor)
|
||||
tensor = self.bandwise_latent_list[idx](tensor)
|
||||
tensor = self.bandwise_classifier_list[idx](tensor)
|
||||
tensors[idx] = self.sigmoid(tensor)
|
||||
tensor = torch.cat(tensors, dim=1)
|
||||
tensor = self.full_out(tensor)
|
||||
tensor = self.sigmoid(tensor)
|
||||
return Namespace(main_out=tensor, bands=tensors)
|
@@ -1,79 +0,0 @@
|
||||
from argparse import Namespace
|
||||
|
||||
from torch import nn
|
||||
|
||||
from torchvision.transforms import Compose, ToTensor
|
||||
|
||||
from ml_lib.audio_toolset.audio_io import AudioToMel, NormalizeLocal, PowerToDB, MelToImage
|
||||
from ml_lib.modules.blocks import ConvModule
|
||||
from ml_lib.modules.utils import LightningBaseModule, Flatten, BaseModuleMixin_Dataloaders
|
||||
from models.module_mixins import BaseOptimizerMixin, BaseTrainMixin, BaseValMixin
|
||||
|
||||
|
||||
class BinaryClassifier(BaseModuleMixin_Dataloaders,
|
||||
BaseTrainMixin,
|
||||
BaseValMixin,
|
||||
BaseOptimizerMixin,
|
||||
LightningBaseModule
|
||||
):
|
||||
|
||||
def __init__(self, hparams):
|
||||
super(BinaryClassifier, self).__init__(hparams)
|
||||
|
||||
# Dataset and Dataloaders
|
||||
# =============================================================================
|
||||
# Transforms
|
||||
transforms = Compose([AudioToMel(), MelToImage(), ToTensor(), NormalizeLocal()])
|
||||
# Datasets
|
||||
from datasets.binar_masks import BinaryMasksDataset
|
||||
self.dataset = Namespace(
|
||||
**dict(
|
||||
train_dataset=BinaryMasksDataset(self.params.root, setting='train', transforms=transforms),
|
||||
val_dataset=BinaryMasksDataset(self.params.root, setting='devel', transforms=transforms),
|
||||
test_dataset=BinaryMasksDataset(self.params.root, setting='test', transforms=transforms),
|
||||
)
|
||||
)
|
||||
|
||||
# Model Paramters
|
||||
# =============================================================================
|
||||
# Additional parameters
|
||||
self.in_shape = self.dataset.train_dataset.sample_shape
|
||||
self.conv_filters = self.params.filters
|
||||
self.criterion = nn.BCELoss()
|
||||
|
||||
# Modules with Parameters
|
||||
self.conv_1 = ConvModule(self.in_shape, self.conv_filters[0], 3, conv_stride=2, **self.params.module_kwargs)
|
||||
self.conv_1b = ConvModule(self.conv_1.shape, self.conv_filters[0], 1, conv_stride=1, **self.params.module_kwargs)
|
||||
self.conv_2 = ConvModule(self.conv_1b.shape, self.conv_filters[1], 5, conv_stride=2, **self.params.module_kwargs)
|
||||
self.conv_2b = ConvModule(self.conv_2.shape, self.conv_filters[1], 1, conv_stride=1, **self.params.module_kwargs)
|
||||
self.conv_3 = ConvModule(self.conv_2b.shape, self.conv_filters[2], 7, conv_stride=2, **self.params.module_kwargs)
|
||||
self.conv_3b = ConvModule(self.conv_3.shape, self.conv_filters[2], 1, conv_stride=1, **self.params.module_kwargs)
|
||||
|
||||
self.flat = Flatten(self.conv_3b.shape)
|
||||
self.full_1 = nn.Linear(self.flat.shape, self.params.lat_dim, self.params.bias)
|
||||
self.full_2 = nn.Linear(self.full_1.out_features, self.full_1.out_features // 2, self.params.bias)
|
||||
|
||||
self.full_out = nn.Linear(self.full_2.out_features, 1, self.params.bias)
|
||||
|
||||
# Utility Modules
|
||||
self.dropout = nn.Dropout2d(self.params.dropout) if self.params.dropout else lambda x: x
|
||||
self.activation = self.params.activation()
|
||||
self.sigmoid = nn.Sigmoid()
|
||||
|
||||
def forward(self, batch, **kwargs):
|
||||
tensor = self.conv_1(batch)
|
||||
tensor = self.conv_1b(tensor)
|
||||
tensor = self.conv_2(tensor)
|
||||
tensor = self.conv_2b(tensor)
|
||||
tensor = self.conv_3(tensor)
|
||||
tensor = self.conv_3b(tensor)
|
||||
tensor = self.flat(tensor)
|
||||
tensor = self.full_1(tensor)
|
||||
tensor = self.activation(tensor)
|
||||
tensor = self.dropout(tensor)
|
||||
tensor = self.full_2(tensor)
|
||||
tensor = self.activation(tensor)
|
||||
tensor = self.dropout(tensor)
|
||||
tensor = self.full_out(tensor)
|
||||
tensor = self.sigmoid(tensor)
|
||||
return tensor
|
75
models/conv_classifier.py
Normal file
75
models/conv_classifier.py
Normal file
@@ -0,0 +1,75 @@
|
||||
from argparse import Namespace
|
||||
|
||||
from torch import nn
|
||||
from torch.nn import ModuleList
|
||||
|
||||
from ml_lib.modules.blocks import ConvModule
|
||||
from ml_lib.modules.utils import LightningBaseModule, Flatten
|
||||
from util.module_mixins import (BaseOptimizerMixin, BaseTrainMixin, BaseValMixin, BinaryMaskDatasetFunction,
|
||||
BaseDataloadersMixin)
|
||||
|
||||
|
||||
class ConvClassifier(BinaryMaskDatasetFunction,
|
||||
BaseDataloadersMixin,
|
||||
BaseTrainMixin,
|
||||
BaseValMixin,
|
||||
BaseOptimizerMixin,
|
||||
LightningBaseModule
|
||||
):
|
||||
|
||||
def __init__(self, hparams):
|
||||
super(ConvClassifier, self).__init__(hparams)
|
||||
|
||||
# Dataset
|
||||
# =============================================================================
|
||||
self.dataset = self.build_dataset()
|
||||
|
||||
# Model Paramters
|
||||
# =============================================================================
|
||||
# Additional parameters
|
||||
self.in_shape = self.dataset.train_dataset.sample_shape
|
||||
self.conv_filters = self.params.filters
|
||||
self.criterion = nn.BCELoss()
|
||||
|
||||
# Modules with Parameters
|
||||
self.conv_list = ModuleList()
|
||||
last_shape = self.in_shape
|
||||
k = 3 # Base Kernel Value
|
||||
for filters in self.conv_filters:
|
||||
self.conv_list.append(ConvModule(last_shape, filters, (k, k*2), conv_stride=2, **self.params.module_kwargs))
|
||||
last_shape = self.conv_list[-1].shape
|
||||
self.conv_list.appen(ConvModule(last_shape, filters, 1, conv_stride=1, **self.params.module_kwargs))
|
||||
last_shape = self.conv_list[-1].shape
|
||||
self.conv_list.appen(ConvModule(last_shape, 1, 1, conv_stride=1, **self.params.module_kwargs))
|
||||
last_shape = self.conv_list[-1].shape
|
||||
k = k+2
|
||||
|
||||
self.flat = Flatten(self.conv_list[-1].shape)
|
||||
self.full_1 = nn.Linear(self.flat.shape, self.params.lat_dim, self.params.bias)
|
||||
self.full_2 = nn.Linear(self.full_1.out_features, self.full_1.out_features * 2, self.params.bias)
|
||||
self.full_3 = nn.Linear(self.full_2.out_features, self.full_2.out_features // 2, self.params.bias)
|
||||
|
||||
self.full_out = nn.Linear(self.full_3.out_features, 1, self.params.bias)
|
||||
|
||||
# Utility Modules
|
||||
self.dropout = nn.Dropout2d(self.params.dropout) if self.params.dropout else lambda x: x
|
||||
self.activation = self.params.activation()
|
||||
self.sigmoid = nn.Sigmoid()
|
||||
|
||||
def forward(self, batch, **kwargs):
|
||||
tensor = batch
|
||||
for conv in self.conv_list:
|
||||
tensor = conv(tensor)
|
||||
tensor = self.flat(tensor)
|
||||
tensor = self.full_1(tensor)
|
||||
tensor = self.activation(tensor)
|
||||
tensor = self.dropout(tensor)
|
||||
tensor = self.full_2(tensor)
|
||||
tensor = self.activation(tensor)
|
||||
tensor = self.dropout(tensor)
|
||||
tensor = self.full_3(tensor)
|
||||
tensor = self.activation(tensor)
|
||||
tensor = self.dropout(tensor)
|
||||
tensor = self.full_out(tensor)
|
||||
tensor = self.sigmoid(tensor)
|
||||
return Namespace(main_out=tensor)
|
55
models/ensemble.py
Normal file
55
models/ensemble.py
Normal file
@@ -0,0 +1,55 @@
|
||||
from argparse import Namespace
|
||||
from pathlib import Path
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
from torch.nn import ModuleList
|
||||
|
||||
from ml_lib.modules.utils import LightningBaseModule
|
||||
from ml_lib.utils.config import Config
|
||||
from ml_lib.utils.model_io import SavedLightningModels
|
||||
from util.module_mixins import (BaseOptimizerMixin, BaseTrainMixin, BaseValMixin, BinaryMaskDatasetFunction,
|
||||
BaseDataloadersMixin)
|
||||
|
||||
|
||||
class Ensemble(BinaryMaskDatasetFunction,
|
||||
BaseDataloadersMixin,
|
||||
BaseTrainMixin,
|
||||
BaseValMixin,
|
||||
BaseOptimizerMixin,
|
||||
LightningBaseModule
|
||||
):
|
||||
|
||||
def __init__(self, hparams):
|
||||
super(Ensemble, self).__init__(hparams)
|
||||
|
||||
# Dataset
|
||||
# =============================================================================
|
||||
self.dataset = self.build_dataset()
|
||||
|
||||
# Model Paramters
|
||||
# =============================================================================
|
||||
# Additional parameters
|
||||
self.in_shape = self.dataset.train_dataset.sample_shape
|
||||
self.conv_filters = self.params.filters
|
||||
self.criterion = nn.BCELoss()
|
||||
|
||||
# Pre_trained_models
|
||||
out_path = Path('output') / self.params.secondary_type
|
||||
# exp_paths = list(out_path.rglob(f'*{self.params.exp_fingerprint}'))
|
||||
exp_paths = list(out_path.rglob('*e87b8f455ba134504b1ae17114ac2a2a'))
|
||||
config_ini_files = sum([list(exp_path.rglob('config.ini')) for exp_path in exp_paths], [])
|
||||
|
||||
self.model_list = ModuleList()
|
||||
|
||||
configs = [Config() for _ in range(len(config_ini_files))]
|
||||
for config, ini_file in zip(configs, config_ini_files):
|
||||
config.read_file(ini_file.open('r'))
|
||||
model = SavedLightningModels.load_checkpoint(models_root_path=config.exp_path / config.version).restore()
|
||||
self.model_list.append(model)
|
||||
|
||||
def forward(self, batch, **kwargs):
|
||||
ys = [model(batch).main_out for model in self.model_list]
|
||||
tensor = torch.stack(ys).mean(dim=0)
|
||||
|
||||
return Namespace(main_out=tensor)
|
@@ -1,55 +0,0 @@
|
||||
import sklearn
|
||||
import torch
|
||||
import numpy as np
|
||||
from torch.nn import L1Loss
|
||||
from torch.optim import Adam
|
||||
|
||||
|
||||
class BaseOptimizerMixin:
|
||||
|
||||
def configure_optimizers(self):
|
||||
return Adam(params=self.parameters(), lr=self.params.lr)
|
||||
|
||||
|
||||
class BaseTrainMixin:
|
||||
|
||||
def training_step(self, batch_xy, batch_nb, *args, **kwargs):
|
||||
batch_x, batch_y = batch_xy
|
||||
y = self(batch_x)
|
||||
loss = self.criterion(y, batch_y)
|
||||
return dict(loss=loss)
|
||||
|
||||
def training_epoch_end(self, outputs):
|
||||
mean_train_loss = torch.mean(torch.stack([output['loss'] for output in outputs]))
|
||||
return dict(log=dict(mean_train_loss=mean_train_loss))
|
||||
|
||||
|
||||
class BaseValMixin:
|
||||
|
||||
absolute_loss = L1Loss()
|
||||
|
||||
def validation_step(self, batch_xy, batch_idx, *args, **kwargs):
|
||||
batch_x, batch_y = batch_xy
|
||||
y = self(batch_x)
|
||||
val_loss = self.criterion(y, batch_y)
|
||||
absolute_error = self.absolute_loss(y, batch_y)
|
||||
return dict(val_loss=val_loss, absolute_error=absolute_error, batch_idx=batch_idx, y=y, batch_y=batch_y)
|
||||
|
||||
def validation_epoch_end(self, outputs):
|
||||
overall_val_loss = torch.mean(torch.stack([output['val_loss'] for output in outputs]))
|
||||
mean_absolute_error = torch.mean(torch.stack([output['absolute_error'] for output in outputs]))
|
||||
|
||||
# UnweightedAverageRecall
|
||||
y_true = torch.cat([output['batch_y'] for output in outputs]) .cpu().numpy()
|
||||
y_pred = torch.cat([output['y'] for output in outputs]).squeeze().cpu().numpy()
|
||||
|
||||
y_pred = (y_pred >= 0.5).astype(np.float32)
|
||||
|
||||
uar_score = sklearn.metrics.recall_score(y_true, y_pred, labels=[0, 1], average='macro',
|
||||
sample_weight=None, zero_division='warn')
|
||||
|
||||
return dict(
|
||||
log=dict(mean_val_loss=overall_val_loss,
|
||||
mean_absolute_error=mean_absolute_error,
|
||||
uar_score=uar_score)
|
||||
)
|
Reference in New Issue
Block a user