inference restored

This commit is contained in:
steffen 2020-05-16 08:18:27 +02:00
parent ca1599e17b
commit 511b8064cd
3 changed files with 14 additions and 19 deletions

View File

@ -32,11 +32,11 @@ main_arg_parser.add_argument("--data_stretch", type=strtobool, default=True, hel
# Transformation Parameters
main_arg_parser.add_argument("--data_loudness_ratio", type=float, default=0, help="") # 0.4
main_arg_parser.add_argument("--data_shift_ratio", type=float, default=0, help="") # 0.3
main_arg_parser.add_argument("--data_shift_ratio", type=float, default=0.3, help="") # 0.3
main_arg_parser.add_argument("--data_noise_ratio", type=float, default=0, help="") # 0.4
main_arg_parser.add_argument("--data_mask_ratio", type=float, default=0, help="") # 0.2
main_arg_parser.add_argument("--data_speed_ratio", type=float, default=0.3, help="") # 0.3
main_arg_parser.add_argument("--data_speed_factor", type=float, default=0.7, help="") # 0.7
main_arg_parser.add_argument("--data_speed_ratio", type=float, default=0, help="") # 0.3
main_arg_parser.add_argument("--data_speed_factor", type=float, default=0, help="") # 0.7
# Model Parameters
main_arg_parser.add_argument("--model_type", type=str, default="RCC", help="")

View File

@ -27,7 +27,6 @@ from datasets.binar_masks import BinaryMasksDataset
def prepare_dataloader(config_obj):
mel_transforms = Compose([
Speed(0, 0),
AudioToMel(sr=config_obj.data.sr, n_mels=config_obj.data.n_mels, n_fft=config_obj.data.n_fft,
hop_length=config_obj.data.hop_length),
MelToImage()])
@ -40,16 +39,15 @@ def prepare_dataloader(config_obj):
NormalizeLocal(), ToTensor()
])
dataset: Dataset = BinaryMasksDataset(config_obj.data.root, setting='train',
mel_transforms=mel_transforms, transforms=aug_transforms
dataset: Dataset = BinaryMasksDataset(config_obj.data.root, setting='test',
mel_transforms=mel_transforms, transforms=transforms
)
# noinspection PyTypeChecker
return DataLoader(dataset, batch_size=None, num_workers=0, shuffle=False)
def restore_logger_and_model(config_obj):
logger = Logger(config_obj)
model = SavedLightningModels.load_checkpoint(models_root_path=logger.log_dir, n=-2)
def restore_logger_and_model(log_dir):
model = SavedLightningModels.load_checkpoint(models_root_path=log_dir, n=-2)
model = model.restore()
if torch.cuda.is_available():
model.cuda()
@ -63,23 +61,18 @@ if __name__ == '__main__':
model_type = 'CC'
parameters = 'CC_213adb16e46592c5a405abfbd693835e/'
version = 'version_41'
model_path = Path('/home/steffen/projects/inter_challenge_2020/output/CC/CC_fd2020a7ead9d5c80609a7364741f24b/version_40')
config_filename = 'config.ini'
inference_out = 'manual_test_out.csv'
config = MConfig()
config.read_file((outpath / model_type / parameters / version / config_filename).open('r'))
config.read_file((Path(model_path) / config_filename).open('r'))
test_dataloader = prepare_dataloader(config)
p = Plotter(outpath)
from matplotlib import pyplot as plt
d = test_dataloader.dataset[100][0].squeeze()
plt.imshow(d)
p.save_current_figure('100')
loaded_model = restore_logger_and_model(config)
loaded_model = restore_logger_and_model(model_path)
loaded_model.eval()
with (outpath / model_type / parameters / version / inference_out).open(mode='w') as outfile:
with (model_path / inference_out).open(mode='w') as outfile:
outfile.write(f'file_name,prediction\n')
for batch in tqdm(test_dataloader, total=len(test_dataloader)):

View File

@ -16,6 +16,8 @@ if __name__ == '__main__':
continue
out_file = (model_path / metric_file_name)
for paramter_configuration in model_path.iterdir():
if not model_path.is_dir():
continue
uar_scores = defaultdict(list)
for metric_file in paramter_configuration.rglob(metric_file_name):
with metric_file.open('r') as f:
@ -37,7 +39,7 @@ if __name__ == '__main__':
metric_dict[header].append(value)
for score, func in zip(['mean', 'max', 'median', 'std'], [np.mean, np.max, np.median, np.std]):
try:
uar_scores[score].append(func(np.asarray(metric_dict['uar_score'])).round(2))
uar_scores[score].append(round(func(np.asarray(metric_dict['uar_score'])) * 100, 2))
except ValueError as e:
print(e)
pass