adjustment fot CCS
This commit is contained in:
parent
74a2603c79
commit
0a0a1cdcb5
@ -19,9 +19,6 @@ sr = 16000
|
|||||||
hop_length = 128
|
hop_length = 128
|
||||||
n_fft = 256
|
n_fft = 256
|
||||||
|
|
||||||
sample_segment_len=50
|
|
||||||
sample_hop_len=20
|
|
||||||
|
|
||||||
random_apply_chance = 0.7
|
random_apply_chance = 0.7
|
||||||
loudness_ratio = 0.0
|
loudness_ratio = 0.0
|
||||||
shift_ratio = 0.3
|
shift_ratio = 0.3
|
||||||
@ -51,7 +48,7 @@ use_norm = True
|
|||||||
dropout = 0.2
|
dropout = 0.2
|
||||||
lat_dim = 32
|
lat_dim = 32
|
||||||
features = 64
|
features = 64
|
||||||
filters = [16, 32, 64, 128]
|
filters = [16, 32, 64]
|
||||||
|
|
||||||
[VisualTransformer]
|
[VisualTransformer]
|
||||||
weight_init = xavier_normal_
|
weight_init = xavier_normal_
|
||||||
|
@ -14,10 +14,12 @@ class CNNBaseline(CombinedModelMixins,
|
|||||||
LightningBaseModule
|
LightningBaseModule
|
||||||
):
|
):
|
||||||
|
|
||||||
def __init__(self, in_shape, n_classes, weight_init, activation, use_bias, use_norm, dropout, lat_dim, features,
|
def __init__(self, in_shape, n_classes, weight_init, activation,
|
||||||
|
use_bias, use_norm, dropout, lat_dim, features,
|
||||||
filters,
|
filters,
|
||||||
lr, weight_decay, sto_weight_avg, lr_warm_restart_epochs, opt_reset_interval,
|
lr, weight_decay, sto_weight_avg, lr_warm_restart_epochs, opt_reset_interval,
|
||||||
loss, scheduler):
|
loss, scheduler, lr_scheduler_parameter
|
||||||
|
):
|
||||||
|
|
||||||
# TODO: Move this to parent class, or make it much easieer to access....
|
# TODO: Move this to parent class, or make it much easieer to access....
|
||||||
a = dict(locals())
|
a = dict(locals())
|
||||||
|
@ -21,7 +21,7 @@ class VisualTransformer(CombinedModelMixins,
|
|||||||
):
|
):
|
||||||
|
|
||||||
def __init__(self, in_shape, n_classes, weight_init, activation,
|
def __init__(self, in_shape, n_classes, weight_init, activation,
|
||||||
embedding_size, heads, attn_depth, patch_size, use_residual,
|
embedding_size, heads, attn_depth, patch_size, use_residual, variable_length,
|
||||||
use_bias, use_norm, dropout, lat_dim, loss, scheduler, mlp_dim, head_dim,
|
use_bias, use_norm, dropout, lat_dim, loss, scheduler, mlp_dim, head_dim,
|
||||||
lr, weight_decay, sto_weight_avg, lr_scheduler_parameter, opt_reset_interval):
|
lr, weight_decay, sto_weight_avg, lr_scheduler_parameter, opt_reset_interval):
|
||||||
|
|
||||||
@ -88,15 +88,18 @@ class VisualTransformer(CombinedModelMixins,
|
|||||||
tensor = self.autopad(x)
|
tensor = self.autopad(x)
|
||||||
p = self.params.patch_size
|
p = self.params.patch_size
|
||||||
|
|
||||||
tensor = rearrange(tensor, 'b c (h p1) (w p2) -> b (w h) (p1 p2 c)', p1=p, p2=p)
|
tensor = rearrange(tensor, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1=p, p2=p)
|
||||||
b, n, _ = tensor.shape
|
b, n, _ = tensor.shape
|
||||||
|
|
||||||
# mask
|
if self.params.variable_length and mask is None:
|
||||||
lengths = torch.count_nonzero(tensor, dim=-1)
|
# mask
|
||||||
mask = (lengths == torch.zeros_like(lengths))
|
lengths = torch.count_nonzero(tensor, dim=-1)
|
||||||
# CLS-token awareness
|
mask = (lengths == torch.zeros_like(lengths))
|
||||||
# mask = torch.cat((torch.zeros(b, 1), mask), dim=-1)
|
# CLS-token awareness
|
||||||
# mask = repeat(mask, 'b n -> b n', h=self.params.heads)
|
# mask = torch.cat((torch.zeros(b, 1), mask), dim=-1)
|
||||||
|
# mask = repeat(mask, 'b n -> b n', h=self.params.heads)
|
||||||
|
else:
|
||||||
|
mask = mask
|
||||||
|
|
||||||
tensor = self.patch_to_embedding(tensor)
|
tensor = self.patch_to_embedding(tensor)
|
||||||
|
|
||||||
|
17
multi_run.py
17
multi_run.py
@ -14,21 +14,24 @@ if __name__ == '__main__':
|
|||||||
model_name=['VisualTransformer'],
|
model_name=['VisualTransformer'],
|
||||||
batch_size=[50],
|
batch_size=[50],
|
||||||
max_epochs=[200],
|
max_epochs=[200],
|
||||||
random_apply_chance=[0.3], # trial.suggest_float('random_apply_chance', 0.1, 0.5, step=0.1),
|
variable_length=[False],
|
||||||
|
sample_segment_len=[40],
|
||||||
|
sample_hop_len=[15],
|
||||||
|
random_apply_chance=[0.5], # trial.suggest_float('random_apply_chance', 0.1, 0.5, step=0.1),
|
||||||
loudness_ratio=[0], # trial.suggest_float('loudness_ratio', 0.0, 0.5, step=0.1),
|
loudness_ratio=[0], # trial.suggest_float('loudness_ratio', 0.0, 0.5, step=0.1),
|
||||||
shift_ratio=[0.3], # trial.suggest_float('shift_ratio', 0.0, 0.5, step=0.1),
|
shift_ratio=[0.3], # trial.suggest_float('shift_ratio', 0.0, 0.5, step=0.1),
|
||||||
noise_ratio=[0.3], # trial.suggest_float('noise_ratio', 0.0, 0.5, step=0.1),
|
noise_ratio=[0.3], # trial.suggest_float('noise_ratio', 0.0, 0.5, step=0.1),
|
||||||
mask_ratio=[0.3], # trial.suggest_float('mask_ratio', 0.0, 0.5, step=0.1),
|
mask_ratio=[0.3], # trial.suggest_float('mask_ratio', 0.0, 0.5, step=0.1),
|
||||||
lr=[2e-3], # trial.suggest_uniform('lr', 1e-3, 3e-3),
|
lr=[1e-3], # trial.suggest_uniform('lr', 1e-3, 3e-3),
|
||||||
dropout=[0.2], # trial.suggest_float('dropout', 0.0, 0.3, step=0.05),
|
dropout=[0.2], # trial.suggest_float('dropout', 0.0, 0.3, step=0.05),
|
||||||
lat_dim=[32], # 2 ** trial.suggest_int('lat_dim', 1, 5, step=1),
|
lat_dim=[32], # 2 ** trial.suggest_int('lat_dim', 1, 5, step=1),
|
||||||
mlp_dim=[16], # 2 ** trial.suggest_int('mlp_dim', 1, 5, step=1),
|
mlp_dim=[16], # 2 ** trial.suggest_int('mlp_dim', 1, 5, step=1),
|
||||||
head_dim=[6], # 2 ** trial.suggest_int('head_dim', 1, 5, step=1),
|
head_dim=[6], # 2 ** trial.suggest_int('head_dim', 1, 5, step=1),
|
||||||
patch_size=[12], # trial.suggest_int('patch_size', 6, 12, step=3),
|
patch_size=[12], # trial.suggest_int('patch_size', 6, 12, step=3),
|
||||||
attn_depth=[10], # trial.suggest_int('attn_depth', 2, 14, step=4),
|
attn_depth=[12], # trial.suggest_int('attn_depth', 2, 14, step=4),
|
||||||
heads=[6], # trial.suggest_int('heads', 2, 16, step=2),
|
heads=[6], # trial.suggest_int('heads', 2, 16, step=2),
|
||||||
scheduler=['CosineAnnealingWarmRestarts'], # trial.suggest_categorical('scheduler', [None, 'LambdaLR']),
|
scheduler=['LambdaLR'], # trial.suggest_categorical('scheduler', [None, 'LambdaLR']),
|
||||||
lr_scheduler_parameter=[5], # [0.98],
|
lr_scheduler_parameter=[0.95], # [0.98],
|
||||||
embedding_size=[30], # trial.suggest_int('embedding_size', 12, 64, step=12),
|
embedding_size=[30], # trial.suggest_int('embedding_size', 12, 64, step=12),
|
||||||
loss=['ce_loss'],
|
loss=['ce_loss'],
|
||||||
sampler=['WeightedRandomSampler'],
|
sampler=['WeightedRandomSampler'],
|
||||||
@ -40,7 +43,7 @@ if __name__ == '__main__':
|
|||||||
permutations_dicts = [dict(zip(keys, v)) for v in itertools.product(*values)]
|
permutations_dicts = [dict(zip(keys, v)) for v in itertools.product(*values)]
|
||||||
for permutations_dict in tqdm(permutations_dicts, total=len(permutations_dicts)):
|
for permutations_dict in tqdm(permutations_dicts, total=len(permutations_dicts)):
|
||||||
# Parse comandline args, read config and get model
|
# Parse comandline args, read config and get model
|
||||||
cmd_args, found_data_class, found_model_class = parse_comandline_args_add_defaults(
|
cmd_args, found_data_class, found_model_class, found_seed = parse_comandline_args_add_defaults(
|
||||||
'_parameters.ini', overrides=permutations_dict)
|
'_parameters.ini', overrides=permutations_dict)
|
||||||
|
|
||||||
hparams = dict(**cmd_args)
|
hparams = dict(**cmd_args)
|
||||||
@ -50,6 +53,6 @@ if __name__ == '__main__':
|
|||||||
# RUN
|
# RUN
|
||||||
# ---------------------------------------
|
# ---------------------------------------
|
||||||
print(f'Running Loop, parameters are: {permutations_dict}')
|
print(f'Running Loop, parameters are: {permutations_dict}')
|
||||||
run_lightning_loop(hparams, found_data_class, found_model_class)
|
run_lightning_loop(hparams, found_data_class, found_model_class, seed=found_seed)
|
||||||
print(f'Done, parameters were: {permutations_dict}')
|
print(f'Done, parameters were: {permutations_dict}')
|
||||||
pass
|
pass
|
||||||
|
@ -148,7 +148,7 @@ class TestMixin:
|
|||||||
class_names = {val: key for val, key in enumerate(['background', 'chimpanze', 'geunon', 'mandrille', 'redcap'])}
|
class_names = {val: key for val, key in enumerate(['background', 'chimpanze', 'geunon', 'mandrille', 'redcap'])}
|
||||||
|
|
||||||
df = pd.DataFrame(data=dict(filename=[Path(x).name for x in sorted_y.keys()],
|
df = pd.DataFrame(data=dict(filename=[Path(x).name for x in sorted_y.keys()],
|
||||||
prediction=y_max.cpu().numpy()))
|
prediction=[class_names[x.item()] for x in y_max.cpu()]))
|
||||||
result_file = Path(self.logger.log_dir / 'predictions.csv')
|
result_file = Path(self.logger.log_dir / 'predictions.csv')
|
||||||
if result_file.exists():
|
if result_file.exists():
|
||||||
try:
|
try:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user