requirements

This commit is contained in:
Si11ium 2020-05-14 23:08:36 +02:00
parent 407df15bbf
commit e7d1a4895a
9 changed files with 52 additions and 38 deletions

View File

@ -30,12 +30,12 @@ main_arg_parser.add_argument("--data_n_fft", type=int, default=512, help="")
main_arg_parser.add_argument("--data_mixup", type=strtobool, default=False, help="") main_arg_parser.add_argument("--data_mixup", type=strtobool, default=False, help="")
# Transformation Parameters # Transformation Parameters
main_arg_parser.add_argument("--data_loudness_ratio", type=float, default=0.4, help="") main_arg_parser.add_argument("--data_loudness_ratio", type=float, default=0, help="") # 0.4
main_arg_parser.add_argument("--data_shift_ratio", type=float, default=0.3, help="") main_arg_parser.add_argument("--data_shift_ratio", type=float, default=0, help="") # 0.3
main_arg_parser.add_argument("--data_noise_ratio", type=float, default=0.4, help="") main_arg_parser.add_argument("--data_noise_ratio", type=float, default=0, help="") # 0.4
main_arg_parser.add_argument("--data_mask_ratio", type=float, default=0.2, help="") main_arg_parser.add_argument("--data_mask_ratio", type=float, default=0.2, help="") # 0.2
main_arg_parser.add_argument("--data_speed_ratio", type=float, default=0.3, help="") main_arg_parser.add_argument("--data_speed_ratio", type=float, default=0.3, help="") # 0.3
main_arg_parser.add_argument("--data_speed_factor", type=float, default=0.7, help="") main_arg_parser.add_argument("--data_speed_factor", type=float, default=0.7, help="") # 0.7
# Training Parameters # Training Parameters
main_arg_parser.add_argument("--train_outpath", type=str, default="output", help="") main_arg_parser.add_argument("--train_outpath", type=str, default="output", help="")
@ -49,8 +49,8 @@ main_arg_parser.add_argument("--train_lr", type=float, default=1e-4, help="")
main_arg_parser.add_argument("--train_num_sanity_val_steps", type=int, default=0, help="") main_arg_parser.add_argument("--train_num_sanity_val_steps", type=int, default=0, help="")
# Model Parameters # Model Parameters
main_arg_parser.add_argument("--model_type", type=str, default="CC", help="") main_arg_parser.add_argument("--model_type", type=str, default="BCMC", help="")
main_arg_parser.add_argument("--model_secondary_type", type=str, default="CC", help="") main_arg_parser.add_argument("--model_secondary_type", type=str, default="BCMC", help="")
main_arg_parser.add_argument("--model_weight_init", type=str, default="xavier_normal_", help="") main_arg_parser.add_argument("--model_weight_init", type=str, default="xavier_normal_", help="")
main_arg_parser.add_argument("--model_activation", type=str, default="leaky_relu", help="") main_arg_parser.add_argument("--model_activation", type=str, default="leaky_relu", help="")
main_arg_parser.add_argument("--model_filters", type=str, default="[32, 64, 128, 64]", help="") main_arg_parser.add_argument("--model_filters", type=str, default="[32, 64, 128, 64]", help="")

View File

@ -47,9 +47,10 @@ class BinaryMasksDataset(Dataset):
filename, label = row.strip().split(',') filename, label = row.strip().split(',')
labeldict[filename] = self._to_label[label.lower()] if not self.setting == 'test' else filename labeldict[filename] = self._to_label[label.lower()] if not self.setting == 'test' else filename
if self.stretch and self.setting == V.DATA_OPTIONS.train: if self.stretch and self.setting == V.DATA_OPTIONS.train:
labeldict.update({f'X_{key}': val for key, val in labeldict.items()}) additional_dict = ({f'X_{key}': val for key, val in labeldict.items()})
labeldict.update({f'X_X_{key}': val for key, val in labeldict.items()}) additional_dict.update({f'X_X_{key}': val for key, val in labeldict.items()})
labeldict.update({f'X_X_X_{key}': val for key, val in labeldict.items()}) additional_dict.update({f'X_X_X_{key}': val for key, val in labeldict.items()})
labeldict.update(additional_dict)
return labeldict return labeldict
def __len__(self): def __len__(self):

View File

@ -5,7 +5,7 @@ from tqdm import tqdm
import variables as V import variables as V
from torch.utils.data import DataLoader, Dataset from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose from torchvision.transforms import Compose, RandomApply
from ml_lib.audio_toolset.audio_io import AudioToMel, NormalizeLocal, MelToImage from ml_lib.audio_toolset.audio_io import AudioToMel, NormalizeLocal, MelToImage
@ -13,6 +13,7 @@ from ml_lib.audio_toolset.audio_io import AudioToMel, NormalizeLocal, MelToImage
# ============================================================================= # =============================================================================
# Transforms # Transforms
from ml_lib.audio_toolset.mel_augmentation import NoiseInjection, LoudnessManipulator, ShiftTime, MaskAug
from ml_lib.utils.logging import Logger from ml_lib.utils.logging import Logger
from ml_lib.utils.model_io import SavedLightningModels from ml_lib.utils.model_io import SavedLightningModels
from ml_lib.utils.transforms import ToTensor from ml_lib.utils.transforms import ToTensor
@ -28,8 +29,18 @@ def prepare_dataloader(config_obj):
AudioToMel(sr=config_obj.data.sr, n_mels=config_obj.data.n_mels, n_fft=config_obj.data.n_fft, AudioToMel(sr=config_obj.data.sr, n_mels=config_obj.data.n_mels, n_fft=config_obj.data.n_fft,
hop_length=config_obj.data.hop_length), MelToImage()]) hop_length=config_obj.data.hop_length), MelToImage()])
transforms = Compose([NormalizeLocal(), ToTensor()]) transforms = Compose([NormalizeLocal(), ToTensor()])
aug_transforms = Compose([
RandomApply([
NoiseInjection(config_obj.data.noise_ratio),
LoudnessManipulator(config_obj.data.loudness_ratio),
ShiftTime(config_obj.data.shift_ratio),
MaskAug(config_obj.data.mask_ratio),
], p=0.6),
# Utility
NormalizeLocal(), ToTensor()
])
dataset: Dataset = BinaryMasksDataset(config_obj.data.root, setting='test', dataset: Dataset = BinaryMasksDataset(config_obj.data.root, setting='train',
mel_transforms=mel_transforms, transforms=transforms mel_transforms=mel_transforms, transforms=transforms
) )
# noinspection PyTypeChecker # noinspection PyTypeChecker
@ -49,9 +60,9 @@ def restore_logger_and_model(config_obj):
if __name__ == '__main__': if __name__ == '__main__':
outpath = Path('output') outpath = Path('output')
model_type = 'BandwiseConvMultiheadClassifier' model_type = 'CC'
parameters = 'BCMC_9c70168a5711c269b33701f1650adfb9/' parameters = 'CC_213adb16e46592c5a405abfbd693835e/'
version = 'version_1' version = 'version_41'
config_filename = 'config.ini' config_filename = 'config.ini'
inference_out = 'manual_test_out.csv' inference_out = 'manual_test_out.csv'

View File

@ -5,11 +5,11 @@ from torch.nn import ModuleList
from ml_lib.modules.blocks import ConvModule, LinearModule from ml_lib.modules.blocks import ConvModule, LinearModule
from ml_lib.modules.utils import (LightningBaseModule, HorizontalSplitter, HorizontalMerger) from ml_lib.modules.utils import (LightningBaseModule, HorizontalSplitter, HorizontalMerger)
from util.module_mixins import (BaseOptimizerMixin, BaseTrainMixin, BaseValMixin, BinaryMaskDatasetFunction, from util.module_mixins import (BaseOptimizerMixin, BaseTrainMixin, BaseValMixin, BinaryMaskDatasetMixin,
BaseDataloadersMixin) BaseDataloadersMixin)
class BandwiseConvClassifier(BinaryMaskDatasetFunction, class BandwiseConvClassifier(BinaryMaskDatasetMixin,
BaseDataloadersMixin, BaseDataloadersMixin,
BaseTrainMixin, BaseTrainMixin,
BaseValMixin, BaseValMixin,

View File

@ -6,11 +6,11 @@ from torch.nn import ModuleList
from ml_lib.modules.blocks import ConvModule, LinearModule from ml_lib.modules.blocks import ConvModule, LinearModule
from ml_lib.modules.utils import (LightningBaseModule, Flatten, HorizontalSplitter) from ml_lib.modules.utils import (LightningBaseModule, Flatten, HorizontalSplitter)
from util.module_mixins import (BaseOptimizerMixin, BaseTrainMixin, BaseValMixin, BinaryMaskDatasetFunction, from util.module_mixins import (BaseOptimizerMixin, BaseTrainMixin, BaseValMixin, BinaryMaskDatasetMixin,
BaseDataloadersMixin) BaseDataloadersMixin)
class BandwiseConvMultiheadClassifier(BinaryMaskDatasetFunction, class BandwiseConvMultiheadClassifier(BinaryMaskDatasetMixin,
BaseDataloadersMixin, BaseDataloadersMixin,
BaseTrainMixin, BaseTrainMixin,
BaseValMixin, BaseValMixin,
@ -42,7 +42,7 @@ class BandwiseConvMultiheadClassifier(BinaryMaskDatasetFunction,
return_dict = {f'band_{band_idx}_val_loss': band_y for band_idx, band_y in enumerate(bands_y_losses)} return_dict = {f'band_{band_idx}_val_loss': band_y for band_idx, band_y in enumerate(bands_y_losses)}
last_bce_loss = self.bce_loss(y, batch_y) last_bce_loss = self.bce_loss(y, batch_y)
return_dict.update(last_bce_loss=last_bce_loss) return_dict.update(last_val_bce_loss=last_bce_loss)
bands_y_losses.append(last_bce_loss) bands_y_losses.append(last_bce_loss)
combined_loss = torch.stack(bands_y_losses).mean() combined_loss = torch.stack(bands_y_losses).mean()
@ -76,7 +76,7 @@ class BandwiseConvMultiheadClassifier(BinaryMaskDatasetFunction,
last_shape = self.split.shape last_shape = self.split.shape
conv_list = ModuleList() conv_list = ModuleList()
for filters in self.conv_filters: for filters in self.conv_filters:
conv_list.append(ConvModule(last_shape, filters, (k,k), conv_stride=(1, 1), conv_list.append(ConvModule(last_shape, filters, (k, k), conv_stride=(2, 2), conv_padding=2,
**self.params.module_kwargs)) **self.params.module_kwargs))
last_shape = conv_list[-1].shape last_shape = conv_list[-1].shape
# self.conv_list.append(ConvModule(last_shape, 1, 1, conv_stride=1, **self.params.module_kwargs)) # self.conv_list.append(ConvModule(last_shape, 1, 1, conv_stride=1, **self.params.module_kwargs))
@ -84,10 +84,10 @@ class BandwiseConvMultiheadClassifier(BinaryMaskDatasetFunction,
self.band_list.append(conv_list) self.band_list.append(conv_list)
self.bandwise_deep_list_1 = ModuleList([ self.bandwise_deep_list_1 = ModuleList([
LinearModule(self.band_list[0][-1].shape, self.params.lat_dim * 4, **self.params.module_kwargs) LinearModule(self.band_list[0][-1].shape, self.params.lat_dim, **self.params.module_kwargs)
for _ in range(self.n_band_sections)]) for _ in range(self.n_band_sections)])
self.bandwise_deep_list_2 = ModuleList([ self.bandwise_deep_list_2 = ModuleList([
LinearModule(self.params.lat_dim * 4, self.params.lat_dim * 2, **self.params.module_kwargs) LinearModule(self.params.lat_dim, self.params.lat_dim * 2, **self.params.module_kwargs)
for _ in range(self.n_band_sections)]) for _ in range(self.n_band_sections)])
self.bandwise_latent_list = ModuleList([ self.bandwise_latent_list = ModuleList([
LinearModule(self.params.lat_dim * 2, self.params.lat_dim, **self.params.module_kwargs) LinearModule(self.params.lat_dim * 2, self.params.lat_dim, **self.params.module_kwargs)
@ -96,7 +96,7 @@ class BandwiseConvMultiheadClassifier(BinaryMaskDatasetFunction,
LinearModule(self.params.lat_dim, 1, bias=self.params.bias, activation=nn.Sigmoid) LinearModule(self.params.lat_dim, 1, bias=self.params.bias, activation=nn.Sigmoid)
for _ in range(self.n_band_sections)]) for _ in range(self.n_band_sections)])
self.full_1 = LinearModule(self.n_band_sections, self.params.lat_dim * 4, **self.params.module_kwargs) self.full_1 = LinearModule(self.n_band_sections, self.params.lat_dim, **self.params.module_kwargs)
self.full_2 = LinearModule(self.full_1.shape, self.params.lat_dim * 2, **self.params.module_kwargs) self.full_2 = LinearModule(self.full_1.shape, self.params.lat_dim * 2, **self.params.module_kwargs)
self.full_3 = LinearModule(self.full_2.shape, self.params.lat_dim, **self.params.module_kwargs) self.full_3 = LinearModule(self.full_2.shape, self.params.lat_dim, **self.params.module_kwargs)
self.full_out = LinearModule(self.full_3.shape, 1, bias=self.params.bias, activation=nn.Sigmoid) self.full_out = LinearModule(self.full_3.shape, 1, bias=self.params.bias, activation=nn.Sigmoid)

View File

@ -5,11 +5,11 @@ from torch.nn import ModuleList
from ml_lib.modules.blocks import ConvModule, LinearModule from ml_lib.modules.blocks import ConvModule, LinearModule
from ml_lib.modules.utils import LightningBaseModule from ml_lib.modules.utils import LightningBaseModule
from util.module_mixins import (BaseOptimizerMixin, BaseTrainMixin, BaseValMixin, BinaryMaskDatasetFunction, from util.module_mixins import (BaseOptimizerMixin, BaseTrainMixin, BaseValMixin, BinaryMaskDatasetMixin,
BaseDataloadersMixin) BaseDataloadersMixin)
class ConvClassifier(BinaryMaskDatasetFunction, class ConvClassifier(BinaryMaskDatasetMixin,
BaseDataloadersMixin, BaseDataloadersMixin,
BaseTrainMixin, BaseTrainMixin,
BaseValMixin, BaseValMixin,

View File

@ -8,17 +8,17 @@ from torch.nn import ModuleList
from ml_lib.modules.utils import LightningBaseModule from ml_lib.modules.utils import LightningBaseModule
from ml_lib.utils.config import Config from ml_lib.utils.config import Config
from ml_lib.utils.model_io import SavedLightningModels from ml_lib.utils.model_io import SavedLightningModels
from util.module_mixins import (BaseOptimizerMixin, BaseTrainMixin, BaseValMixin, BinaryMaskDatasetFunction, from util.module_mixins import (BaseOptimizerMixin, BaseTrainMixin, BaseValMixin, BinaryMaskDatasetMixin,
BaseDataloadersMixin) BaseDataloadersMixin)
class Ensemble(BinaryMaskDatasetFunction, class Ensemble(BinaryMaskDatasetMixin,
BaseDataloadersMixin, BaseDataloadersMixin,
BaseTrainMixin, BaseTrainMixin,
BaseValMixin, BaseValMixin,
BaseOptimizerMixin, BaseOptimizerMixin,
LightningBaseModule LightningBaseModule
): ):
def __init__(self, hparams): def __init__(self, hparams):
super(Ensemble, self).__init__(hparams) super(Ensemble, self).__init__(hparams)

View File

@ -5,11 +5,11 @@ from torch.nn import ModuleList
from ml_lib.modules.blocks import ConvModule, LinearModule, ResidualModule from ml_lib.modules.blocks import ConvModule, LinearModule, ResidualModule
from ml_lib.modules.utils import LightningBaseModule from ml_lib.modules.utils import LightningBaseModule
from util.module_mixins import (BaseOptimizerMixin, BaseTrainMixin, BaseValMixin, BinaryMaskDatasetFunction, from util.module_mixins import (BaseOptimizerMixin, BaseTrainMixin, BaseValMixin, BinaryMaskDatasetMixin,
BaseDataloadersMixin) BaseDataloadersMixin)
class ResidualConvClassifier(BinaryMaskDatasetFunction, class ResidualConvClassifier(BinaryMaskDatasetMixin,
BaseDataloadersMixin, BaseDataloadersMixin,
BaseTrainMixin, BaseTrainMixin,
BaseValMixin, BaseValMixin,
@ -45,6 +45,8 @@ class ResidualConvClassifier(BinaryMaskDatasetFunction,
last_shape = self.conv_list[-1].shape last_shape = self.conv_list[-1].shape
self.conv_list.append(ConvModule(last_shape, filters, (k, k), conv_stride=(2, 2), conv_padding=2, self.conv_list.append(ConvModule(last_shape, filters, (k, k), conv_stride=(2, 2), conv_padding=2,
**self.params.module_kwargs)) **self.params.module_kwargs))
for param in self.conv_list[-1].parameters():
param.requires_grad = False
last_shape = self.conv_list[-1].shape last_shape = self.conv_list[-1].shape
self.full_1 = LinearModule(self.conv_list[-1].shape, self.params.lat_dim, **self.params.module_kwargs) self.full_1 = LinearModule(self.conv_list[-1].shape, self.params.lat_dim, **self.params.module_kwargs)

View File

@ -105,7 +105,7 @@ class BaseValMixin:
return summary_dict return summary_dict
class BinaryMaskDatasetFunction: class BinaryMaskDatasetMixin:
def build_dataset(self): def build_dataset(self):
assert isinstance(self, LightningBaseModule) assert isinstance(self, LightningBaseModule)