adjustment fot CCS

This commit is contained in:
Steffen Illium 2021-03-19 17:16:38 +01:00
parent 74a2603c79
commit 0a0a1cdcb5
5 changed files with 27 additions and 22 deletions

View File

@ -19,9 +19,6 @@ sr = 16000
hop_length = 128
n_fft = 256
sample_segment_len=50
sample_hop_len=20
random_apply_chance = 0.7
loudness_ratio = 0.0
shift_ratio = 0.3
@ -51,7 +48,7 @@ use_norm = True
dropout = 0.2
lat_dim = 32
features = 64
filters = [16, 32, 64, 128]
filters = [16, 32, 64]
[VisualTransformer]
weight_init = xavier_normal_

View File

@ -14,10 +14,12 @@ class CNNBaseline(CombinedModelMixins,
LightningBaseModule
):
def __init__(self, in_shape, n_classes, weight_init, activation, use_bias, use_norm, dropout, lat_dim, features,
def __init__(self, in_shape, n_classes, weight_init, activation,
use_bias, use_norm, dropout, lat_dim, features,
filters,
lr, weight_decay, sto_weight_avg, lr_warm_restart_epochs, opt_reset_interval,
loss, scheduler):
loss, scheduler, lr_scheduler_parameter
):
# TODO: Move this to parent class, or make it much easieer to access....
a = dict(locals())

View File

@ -21,7 +21,7 @@ class VisualTransformer(CombinedModelMixins,
):
def __init__(self, in_shape, n_classes, weight_init, activation,
embedding_size, heads, attn_depth, patch_size, use_residual,
embedding_size, heads, attn_depth, patch_size, use_residual, variable_length,
use_bias, use_norm, dropout, lat_dim, loss, scheduler, mlp_dim, head_dim,
lr, weight_decay, sto_weight_avg, lr_scheduler_parameter, opt_reset_interval):
@ -88,15 +88,18 @@ class VisualTransformer(CombinedModelMixins,
tensor = self.autopad(x)
p = self.params.patch_size
tensor = rearrange(tensor, 'b c (h p1) (w p2) -> b (w h) (p1 p2 c)', p1=p, p2=p)
tensor = rearrange(tensor, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1=p, p2=p)
b, n, _ = tensor.shape
if self.params.variable_length and mask is None:
# mask
lengths = torch.count_nonzero(tensor, dim=-1)
mask = (lengths == torch.zeros_like(lengths))
# CLS-token awareness
# mask = torch.cat((torch.zeros(b, 1), mask), dim=-1)
# mask = repeat(mask, 'b n -> b n', h=self.params.heads)
else:
mask = mask
tensor = self.patch_to_embedding(tensor)

View File

@ -14,21 +14,24 @@ if __name__ == '__main__':
model_name=['VisualTransformer'],
batch_size=[50],
max_epochs=[200],
random_apply_chance=[0.3], # trial.suggest_float('random_apply_chance', 0.1, 0.5, step=0.1),
variable_length=[False],
sample_segment_len=[40],
sample_hop_len=[15],
random_apply_chance=[0.5], # trial.suggest_float('random_apply_chance', 0.1, 0.5, step=0.1),
loudness_ratio=[0], # trial.suggest_float('loudness_ratio', 0.0, 0.5, step=0.1),
shift_ratio=[0.3], # trial.suggest_float('shift_ratio', 0.0, 0.5, step=0.1),
noise_ratio=[0.3], # trial.suggest_float('noise_ratio', 0.0, 0.5, step=0.1),
mask_ratio=[0.3], # trial.suggest_float('mask_ratio', 0.0, 0.5, step=0.1),
lr=[2e-3], # trial.suggest_uniform('lr', 1e-3, 3e-3),
lr=[1e-3], # trial.suggest_uniform('lr', 1e-3, 3e-3),
dropout=[0.2], # trial.suggest_float('dropout', 0.0, 0.3, step=0.05),
lat_dim=[32], # 2 ** trial.suggest_int('lat_dim', 1, 5, step=1),
mlp_dim=[16], # 2 ** trial.suggest_int('mlp_dim', 1, 5, step=1),
head_dim=[6], # 2 ** trial.suggest_int('head_dim', 1, 5, step=1),
patch_size=[12], # trial.suggest_int('patch_size', 6, 12, step=3),
attn_depth=[10], # trial.suggest_int('attn_depth', 2, 14, step=4),
attn_depth=[12], # trial.suggest_int('attn_depth', 2, 14, step=4),
heads=[6], # trial.suggest_int('heads', 2, 16, step=2),
scheduler=['CosineAnnealingWarmRestarts'], # trial.suggest_categorical('scheduler', [None, 'LambdaLR']),
lr_scheduler_parameter=[5], # [0.98],
scheduler=['LambdaLR'], # trial.suggest_categorical('scheduler', [None, 'LambdaLR']),
lr_scheduler_parameter=[0.95], # [0.98],
embedding_size=[30], # trial.suggest_int('embedding_size', 12, 64, step=12),
loss=['ce_loss'],
sampler=['WeightedRandomSampler'],
@ -40,7 +43,7 @@ if __name__ == '__main__':
permutations_dicts = [dict(zip(keys, v)) for v in itertools.product(*values)]
for permutations_dict in tqdm(permutations_dicts, total=len(permutations_dicts)):
# Parse comandline args, read config and get model
cmd_args, found_data_class, found_model_class = parse_comandline_args_add_defaults(
cmd_args, found_data_class, found_model_class, found_seed = parse_comandline_args_add_defaults(
'_parameters.ini', overrides=permutations_dict)
hparams = dict(**cmd_args)
@ -50,6 +53,6 @@ if __name__ == '__main__':
# RUN
# ---------------------------------------
print(f'Running Loop, parameters are: {permutations_dict}')
run_lightning_loop(hparams, found_data_class, found_model_class)
run_lightning_loop(hparams, found_data_class, found_model_class, seed=found_seed)
print(f'Done, parameters were: {permutations_dict}')
pass

View File

@ -148,7 +148,7 @@ class TestMixin:
class_names = {val: key for val, key in enumerate(['background', 'chimpanze', 'geunon', 'mandrille', 'redcap'])}
df = pd.DataFrame(data=dict(filename=[Path(x).name for x in sorted_y.keys()],
prediction=y_max.cpu().numpy()))
prediction=[class_names[x.item()] for x in y_max.cpu()]))
result_file = Path(self.logger.log_dir / 'predictions.csv')
if result_file.exists():
try: