Debugging Validation and testing

This commit is contained in:
Si11ium
2020-03-10 08:40:40 +01:00
parent 48f4d84bf3
commit 21e7e31805
3 changed files with 10 additions and 11 deletions

View File

@@ -57,11 +57,11 @@ class TrajDataset(Dataset):
if self.mode.lower() in ['all_in_map', 'separated_arrays']:
map_array = torch.as_tensor(self.map.as_array).float()
if self.mode == 'separated_arrays':
return (map_array, trajectory.draw_in_array(self.map_shape), int(label)), \
alternative.draw_in_array(self.map_shape)
return (map_array, torch.as_tensor(trajectory.draw_in_array(self.map_shape)).float(), int(label)), \
torch.as_tensor(alternative.draw_in_array(self.map_shape)).float()
else:
return torch.cat((map_array, trajectory.draw_in_array(self.map_shape),
alternative.draw_in_array(self.map_shape))), int(label)
return torch.cat((map_array, torch.as_tensor(trajectory.draw_in_array(self.map_shape)).float(),
torch.as_tensor(alternative.draw_in_array(self.map_shape)).float())), int(label)
elif self.mode == 'vectors':
return trajectory.vertices, alternative.vertices, label, self.mapname

View File

@@ -78,14 +78,14 @@ class CNNRouteGeneratorModel(LightningBaseModule):
roc_auc, tpr, fpr = evaluation(labels.cpu().numpy(), pred_label.cpu().numpy(), )
if test:
# self.logger.log_metrics(score_dict)
self.logger.log_image(f'{self.name}_ROC-Curve_E{self.current_epoch}', plt.gcf())
self.logger.log_image(f'{self.name}_ROC-Curve', plt.gcf())
plt.clf()
maps, trajectories, labels, val_restul_dict = self.generate_random()
from lib.visualization.generator_eval import GeneratorVisualizer
g = GeneratorVisualizer(maps, trajectories, labels, val_restul_dict)
fig = g.draw()
self.logger.log_image(f'{self.name}_Output_E{self.current_epoch}', fig)
self.logger.log_image(f'{self.name}_Output', fig)
return dict(mean_losses=mean_losses, roc_auc=roc_auc, epoch=self.current_epoch)

View File

@@ -28,12 +28,12 @@ main_arg_parser = ArgumentParser(description="parser for fast-neural-style")
# Main Parameters
main_arg_parser.add_argument("--main_debug", type=strtobool, default=False, help="")
main_arg_parser.add_argument("--main_eval", type=strtobool, default=True, help="")
main_arg_parser.add_argument("--main_eval", type=strtobool, default=False, help="")
main_arg_parser.add_argument("--main_seed", type=int, default=69, help="")
# Data Parameters
main_arg_parser.add_argument("--data_worker", type=int, default=10, help="")
main_arg_parser.add_argument("--data_dataset_length", type=int, default=100000, help="")
main_arg_parser.add_argument("--data_dataset_length", type=int, default=10000, help="")
main_arg_parser.add_argument("--data_root", type=str, default='data', help="")
main_arg_parser.add_argument("--data_map_root", type=str, default='res/shapes', help="")
@@ -43,7 +43,7 @@ main_arg_parser.add_argument("--transformations_to_tensor", type=strtobool, defa
# Transformations
main_arg_parser.add_argument("--train_outpath", type=str, default="output", help="")
main_arg_parser.add_argument("--train_version", type=strtobool, required=False, help="")
main_arg_parser.add_argument("--train_epochs", type=int, default=12, help="")
main_arg_parser.add_argument("--train_epochs", type=int, default=10, help="")
main_arg_parser.add_argument("--train_batch_size", type=int, default=256, help="")
main_arg_parser.add_argument("--train_lr", type=float, default=0.002, help="")
@@ -123,8 +123,7 @@ def run_lightning_loop(config_obj):
model.save_to_disk(logger.log_dir)
# Evaluate It
if config_obj.main.eval:
trainer.test()
trainer.test()
return model