bugs fixed, binary datasets working
This commit is contained in:
@@ -96,39 +96,27 @@ class ValMixin:
|
||||
for file_name in sorted_y:
|
||||
sorted_y.update({file_name: torch.stack(sorted_y[file_name])})
|
||||
|
||||
#y_mean = torch.stack(
|
||||
# [torch.mean(x, dim=0, keepdim=True) if x.shape[0] > 1 else x for x in sorted_y.values()]
|
||||
#).squeeze()
|
||||
|
||||
#if y_mean.ndim == 1:
|
||||
# y_mean = y_mean.unsqueeze(0)
|
||||
#if sorted_batch_y.ndim == 1:
|
||||
# sorted_batch_y = sorted_batch_y.unsqueeze(-1)
|
||||
#
|
||||
#mean_vote_loss = self.ce_loss(y_mean, sorted_batch_y)
|
||||
#summary_dict.update(val_mean_vote_loss=mean_vote_loss)
|
||||
|
||||
if self.params.n_classes <= 2:
|
||||
mean_sorted_y = torch.stack([x.mean(dim=0) if x.shape[0] > 1 else x for x in sorted_y.values()])
|
||||
mean_sorted_y = torch.stack([x.mean(dim=0) if x.shape[0] > 1 else x for x in sorted_y.values()]).squeeze()
|
||||
max_vote_loss = self.bce_loss(mean_sorted_y.float(), sorted_batch_y.float())
|
||||
# Sklearn Scores
|
||||
additional_scores = self.additional_scores(dict(y=mean_sorted_y, batch_y=sorted_batch_y))
|
||||
|
||||
else:
|
||||
y_max = torch.stack(
|
||||
[torch.argmax(x.mean(dim=0)) if x.shape[0] > 1 else torch.argmax(x) for x in sorted_y.values()]
|
||||
).squeeze()
|
||||
y_one_hot = torch.nn.functional.one_hot(y_max, num_classes=self.params.n_classes).float()
|
||||
max_vote_loss = self.ce_loss(y_one_hot, sorted_batch_y)
|
||||
summary_dict.update(val_max_vote_loss=max_vote_loss)
|
||||
# Sklearn Scores
|
||||
additional_scores = self.additional_scores(dict(y=y_one_hot, batch_y=sorted_batch_y))
|
||||
|
||||
summary_dict.update(val_max_vote_loss=max_vote_loss, **additional_scores)
|
||||
|
||||
summary_dict.update({f'mean_{key}': torch.mean(torch.stack([output[key]
|
||||
for output in outputs]))
|
||||
for key in keys if 'loss' in key}
|
||||
)
|
||||
# Sklearn Scores
|
||||
if self.params.n_classes <= 2:
|
||||
additional_scores = self.additional_scores(dict(y=y_max, batch_y=sorted_batch_y))
|
||||
else:
|
||||
additional_scores = self.additional_scores(dict(y=y_one_hot, batch_y=sorted_batch_y))
|
||||
summary_dict.update(**additional_scores)
|
||||
|
||||
pl_metrics, pl_images = self.metrics.compute_and_prepare()
|
||||
self.metrics.reset()
|
||||
@@ -166,19 +154,20 @@ class TestMixin:
|
||||
for file_name in sorted_y:
|
||||
sorted_y.update({file_name: torch.stack(sorted_y[file_name])})
|
||||
|
||||
y_max = torch.stack(
|
||||
[torch.argmax(x.mean(dim=0)) if x.shape[0] > 1 else torch.argmax(x) for x in sorted_y.values()]
|
||||
).squeeze().cpu()
|
||||
if self.params.n_classes == 5:
|
||||
|
||||
if self.params.n_classes > 2:
|
||||
pred = torch.stack(
|
||||
[torch.argmax(x.mean(dim=0)) if x.shape[0] > 1 else torch.argmax(x) for x in sorted_y.values()]
|
||||
).squeeze().cpu()
|
||||
class_names = {val: key for val, key in
|
||||
enumerate(['background', 'chimpanze', 'geunon', 'mandrille', 'redcap'])}
|
||||
elif self.params.n_classes == 2:
|
||||
class_names = {val: key for val, key in ['negative', 'positive']}
|
||||
else:
|
||||
raise AttributeError(f'n_classes has to be any of: [2, 5]')
|
||||
pred = torch.stack([x.mean(dim=0) if x.shape[0] > 1 else x for x in sorted_y.values()]).squeeze()
|
||||
class_names = {val: key for val, key in ['negative', 'positive']}
|
||||
|
||||
|
||||
df = pd.DataFrame(data=dict(filename=[Path(x).name for x in sorted_y.keys()],
|
||||
prediction=[class_names[x.item()] for x in y_max.cpu()]))
|
||||
prediction=[class_names[x.item()] for x in pred.cpu()]))
|
||||
result_file = Path(self.logger.log_dir / 'predictions.csv')
|
||||
if result_file.exists():
|
||||
try:
|
||||
|
||||
Reference in New Issue
Block a user