80 lines
3.6 KiB
Python
80 lines
3.6 KiB
Python
from argparse import Namespace
|
|
|
|
from torch import nn
|
|
|
|
from torchvision.transforms import Compose, ToTensor
|
|
|
|
from ml_lib.audio_toolset.audio_io import AudioToMel, NormalizeLocal, PowerToDB, MelToImage
|
|
from ml_lib.modules.blocks import ConvModule
|
|
from ml_lib.modules.utils import LightningBaseModule, Flatten, BaseModuleMixin_Dataloaders
|
|
from models.module_mixins import BaseOptimizerMixin, BaseTrainMixin, BaseValMixin
|
|
|
|
|
|
class BinaryClassifier(BaseModuleMixin_Dataloaders,
|
|
BaseTrainMixin,
|
|
BaseValMixin,
|
|
BaseOptimizerMixin,
|
|
LightningBaseModule
|
|
):
|
|
|
|
def __init__(self, hparams):
|
|
super(BinaryClassifier, self).__init__(hparams)
|
|
|
|
# Dataset and Dataloaders
|
|
# =============================================================================
|
|
# Transforms
|
|
transforms = Compose([AudioToMel(), MelToImage(), ToTensor(), NormalizeLocal()])
|
|
# Datasets
|
|
from datasets.binar_masks import BinaryMasksDataset
|
|
self.dataset = Namespace(
|
|
**dict(
|
|
train_dataset=BinaryMasksDataset(self.params.root, setting='train', transforms=transforms),
|
|
val_dataset=BinaryMasksDataset(self.params.root, setting='devel', transforms=transforms),
|
|
test_dataset=BinaryMasksDataset(self.params.root, setting='test', transforms=transforms),
|
|
)
|
|
)
|
|
|
|
# Model Paramters
|
|
# =============================================================================
|
|
# Additional parameters
|
|
self.in_shape = self.dataset.train_dataset.sample_shape
|
|
self.conv_filters = self.params.filters
|
|
self.criterion = nn.BCELoss()
|
|
|
|
# Modules with Parameters
|
|
self.conv_1 = ConvModule(self.in_shape, self.conv_filters[0], 3, conv_stride=2, **self.params.module_kwargs)
|
|
self.conv_1b = ConvModule(self.conv_1.shape, self.conv_filters[0], 1, conv_stride=1, **self.params.module_kwargs)
|
|
self.conv_2 = ConvModule(self.conv_1b.shape, self.conv_filters[1], 5, conv_stride=2, **self.params.module_kwargs)
|
|
self.conv_2b = ConvModule(self.conv_2.shape, self.conv_filters[1], 1, conv_stride=1, **self.params.module_kwargs)
|
|
self.conv_3 = ConvModule(self.conv_2b.shape, self.conv_filters[2], 7, conv_stride=2, **self.params.module_kwargs)
|
|
self.conv_3b = ConvModule(self.conv_3.shape, self.conv_filters[2], 1, conv_stride=1, **self.params.module_kwargs)
|
|
|
|
self.flat = Flatten(self.conv_3b.shape)
|
|
self.full_1 = nn.Linear(self.flat.shape, self.params.lat_dim, self.params.bias)
|
|
self.full_2 = nn.Linear(self.full_1.out_features, self.full_1.out_features // 2, self.params.bias)
|
|
|
|
self.full_out = nn.Linear(self.full_2.out_features, 1, self.params.bias)
|
|
|
|
# Utility Modules
|
|
self.dropout = nn.Dropout2d(self.params.dropout) if self.params.dropout else lambda x: x
|
|
self.activation = self.params.activation()
|
|
self.sigmoid = nn.Sigmoid()
|
|
|
|
def forward(self, batch, **kwargs):
|
|
tensor = self.conv_1(batch)
|
|
tensor = self.conv_1b(tensor)
|
|
tensor = self.conv_2(tensor)
|
|
tensor = self.conv_2b(tensor)
|
|
tensor = self.conv_3(tensor)
|
|
tensor = self.conv_3b(tensor)
|
|
tensor = self.flat(tensor)
|
|
tensor = self.full_1(tensor)
|
|
tensor = self.activation(tensor)
|
|
tensor = self.dropout(tensor)
|
|
tensor = self.full_2(tensor)
|
|
tensor = self.activation(tensor)
|
|
tensor = self.dropout(tensor)
|
|
tensor = self.full_out(tensor)
|
|
tensor = self.sigmoid(tensor)
|
|
return tensor
|