Urban 8k Train running with newest Lightning and pytorch
This commit is contained in:
parent
93103aba01
commit
f6156c6cde
@ -1,6 +1,6 @@
|
||||
from typing import Union
|
||||
|
||||
import torch
|
||||
import numpy as np
|
||||
|
||||
try:
|
||||
import librosa
|
||||
@ -53,7 +53,7 @@ class NormalizeLocal(object):
|
||||
def __repr__(self):
|
||||
return f'{self.__class__.__name__}({self.__dict__})'
|
||||
|
||||
def __call__(self, x: torch.Tensor):
|
||||
def __call__(self, x: np.ndarray):
|
||||
mean = x.mean()
|
||||
std = x.std() + 0.0001
|
||||
|
||||
@ -62,8 +62,8 @@ class NormalizeLocal(object):
|
||||
# Numpy Version
|
||||
x = (x - mean) / std
|
||||
|
||||
x[torch.isnan(x)] = 0
|
||||
x[torch.isinf(x)] = 0
|
||||
x[np.isnan(x)] = 0
|
||||
x[np.isinf(x)] = 0
|
||||
|
||||
return x
|
||||
|
||||
@ -76,13 +76,13 @@ class NormalizeMelband(object):
|
||||
def __repr__(self):
|
||||
return f'{self.__class__.__name__}({self.__dict__})'
|
||||
|
||||
def __call__(self, x: torch.Tensor):
|
||||
def __call__(self, x: np.ndarray):
|
||||
mean = x.mean(-1).unsqueeze(-1)
|
||||
std = x.std(-1).unsqueeze(-1)
|
||||
|
||||
x = x.__sub__(mean).__div__(std)
|
||||
x[torch.isnan(x)] = 0
|
||||
x[torch.isinf(x)] = 0
|
||||
x[np.isnan(x)] = 0
|
||||
x[np.isinf(x)] = 0
|
||||
return x
|
||||
|
||||
|
||||
@ -100,8 +100,6 @@ class LibrosaAudioToMel(object):
|
||||
self.power_to_db = power_to_db
|
||||
|
||||
def __call__(self, y):
|
||||
import numpy as np
|
||||
|
||||
mel = librosa.feature.melspectrogram(y, **self.mel_kwargs)
|
||||
if self.amplitude_to_db:
|
||||
mel = librosa.amplitude_to_db(mel, ref=np.max)
|
||||
@ -121,7 +119,6 @@ class PowerToDB(object):
|
||||
return f'{self.__class__.__name__}({self.__dict__})'
|
||||
|
||||
def __call__(self, x):
|
||||
import numpy as np
|
||||
if self.running_max is not None:
|
||||
self.running_max = max(np.max(x), self.running_max)
|
||||
return librosa.power_to_db(x, ref=self.running_max)
|
||||
@ -148,11 +145,11 @@ class MelToImage(object):
|
||||
|
||||
def __call__(self, x):
|
||||
# Source to Solution: https://stackoverflow.com/a/57204349
|
||||
mels = torch.log(x + 1e-9) # add small number to avoid log(0)
|
||||
mels = np.log(x + 1e-9) # add small number to avoid log(0)
|
||||
|
||||
# min-max scale to fit inside 8-bit range
|
||||
img = scale_minmax(mels, 0, 255).int()
|
||||
img = torch.flip(img, dims=(0,)) # put low frequencies at the bottom in image
|
||||
img = torch.as_tensor(255) - img # invert. make black==more energy
|
||||
img = img.float()
|
||||
img = scale_minmax(mels, 0, 255)
|
||||
img = np.flip(img) # put low frequencies at the bottom in image
|
||||
img = 255 - img # invert. make black==more energy
|
||||
img = img.astype(np.float)
|
||||
return img
|
||||
|
@ -35,7 +35,7 @@ class _AudioToMelDataset(Dataset, ABC):
|
||||
self.audio_augmentations = audio_augmentations
|
||||
|
||||
self.dataset = TorchMelDataset(self.mel_file_path, sample_segment_len, sample_hop_len, label,
|
||||
self.audio_file_duration, mel_kwargs['sample_rate'], mel_kwargs['hop_length'],
|
||||
self.audio_file_duration, mel_kwargs['sr'], mel_kwargs['hop_length'],
|
||||
mel_kwargs['n_mels'], transform=mel_augmentations)
|
||||
|
||||
def _build_mel(self):
|
||||
@ -70,7 +70,7 @@ class LibrosaAudioToMelDataset(_AudioToMelDataset):
|
||||
audio_file_path = Path(audio_file_path)
|
||||
# audio_file, sampling_rate = librosa.load(self.audio_path, sr=sampling_rate)
|
||||
mel_kwargs = kwargs.get('mel_kwargs', dict())
|
||||
mel_kwargs.update(sr=mel_kwargs.get('sr', None) or librosa.get_samplerate(self.audio_path))
|
||||
mel_kwargs.update(sr=mel_kwargs.get('sr', None) or librosa.get_samplerate(audio_file_path))
|
||||
kwargs.update(mel_kwargs=mel_kwargs)
|
||||
|
||||
super(LibrosaAudioToMelDataset, self).__init__(audio_file_path, *args, **kwargs)
|
||||
@ -84,11 +84,14 @@ class LibrosaAudioToMelDataset(_AudioToMelDataset):
|
||||
if self.reset:
|
||||
self.mel_file_path.unlink(missing_ok=True)
|
||||
if not self.mel_file_path.exists():
|
||||
lockfile = Path(str(self.mel_file_path).replace(self.mel_file_path.suffix, '.lock'))
|
||||
self.mel_file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
lockfile.touch(exist_ok=False)
|
||||
raw_sample, _ = librosa.core.load(self.audio_path, sr=self.sampling_rate)
|
||||
mel_sample = self._mel_transform(raw_sample)
|
||||
with self.mel_file_path.open('wb') as mel_file:
|
||||
pickle.dump(mel_sample, mel_file, protocol=pickle.HIGHEST_PROTOCOL)
|
||||
lockfile.unlink(missing_ok=False)
|
||||
else:
|
||||
pass
|
||||
|
||||
|
@ -13,7 +13,7 @@ class TorchMelDataset(Dataset):
|
||||
super(TorchMelDataset, self).__init__()
|
||||
self.sampling_rate = sampling_rate
|
||||
self.audio_file_len = audio_file_len
|
||||
self.padding = AutoPadToShape((1, n_mels , sub_segment_len)) if auto_pad_to_shape else None
|
||||
self.padding = AutoPadToShape((n_mels , sub_segment_len)) if auto_pad_to_shape else None
|
||||
self.path = Path(mel_path)
|
||||
self.sub_segment_len = sub_segment_len
|
||||
self.mel_hop_len = mel_hop_len
|
||||
@ -29,7 +29,7 @@ class TorchMelDataset(Dataset):
|
||||
with self.path.open('rb') as mel_file:
|
||||
mel_spec = pickle.load(mel_file, fix_imports=True)
|
||||
start = self.offsets[item]
|
||||
snippet = mel_spec[:, : , start: start + self.sub_segment_len]
|
||||
snippet = mel_spec[: , start: start + self.sub_segment_len]
|
||||
if self.transform:
|
||||
snippet = self.transform(snippet)
|
||||
if self.padding:
|
||||
|
0
metrics/__init__.py
Normal file
0
metrics/__init__.py
Normal file
13
metrics/_base_score.py
Normal file
13
metrics/_base_score.py
Normal file
@ -0,0 +1,13 @@
|
||||
from abc import ABC
|
||||
|
||||
|
||||
class _BaseScores(ABC):
|
||||
|
||||
def __init__(self, lightning_model):
|
||||
self.model = lightning_model
|
||||
pass
|
||||
|
||||
def __call__(self, outputs):
|
||||
# summary_dict = dict()
|
||||
# return summary_dict
|
||||
raise NotImplementedError
|
56
metrics/binary_class_classifictaion.py
Normal file
56
metrics/binary_class_classifictaion.py
Normal file
@ -0,0 +1,56 @@
|
||||
import numpy as np
|
||||
|
||||
import torch
|
||||
from sklearn.ensemble import IsolationForest
|
||||
from sklearn.metrics import recall_score, roc_auc_score, average_precision_score
|
||||
|
||||
from ml_lib.metrics._base_score import _BaseScores
|
||||
|
||||
|
||||
class BinaryScores(_BaseScores):
|
||||
|
||||
def __init__(self, *args):
|
||||
super(BinaryScores, self).__init__(*args)
|
||||
|
||||
def __call__(self, outputs):
|
||||
summary_dict = dict()
|
||||
|
||||
# Additional Score like the unweighted Average Recall:
|
||||
#########################
|
||||
# UnweightedAverageRecall
|
||||
y_true = torch.cat([output['batch_y'] for output in outputs]) .cpu().numpy()
|
||||
y_pred = torch.cat([output['element_wise_recon_error'] for output in outputs]).squeeze().cpu().numpy()
|
||||
|
||||
# How to apply a threshold manualy
|
||||
# y_pred = (y_pred >= 0.5).astype(np.float32)
|
||||
|
||||
# How to apply a threshold by IF (Isolation Forest)
|
||||
clf = IsolationForest(random_state=self.model.seed)
|
||||
y_score = clf.fit_predict(y_pred.reshape(-1,1))
|
||||
y_score = (np.asarray(y_score) == -1).astype(np.float32)
|
||||
|
||||
uar_score = recall_score(y_true, y_score, labels=[0, 1], average='macro',
|
||||
sample_weight=None, zero_division='warn')
|
||||
summary_dict.update(dict(uar_score=uar_score))
|
||||
#########################
|
||||
# Precission
|
||||
precision_score = average_precision_score(y_true, y_score)
|
||||
summary_dict.update(dict(precision_score=precision_score))
|
||||
|
||||
#########################
|
||||
# AUC
|
||||
try:
|
||||
auc_score = roc_auc_score(y_true=y_true, y_score=y_score)
|
||||
summary_dict.update(dict(auc_score=auc_score))
|
||||
except ValueError:
|
||||
summary_dict.update(dict(auc_score=-1))
|
||||
|
||||
#########################
|
||||
# pAUC
|
||||
try:
|
||||
pauc = roc_auc_score(y_true=y_true, y_score=y_score, max_fpr=0.15)
|
||||
summary_dict.update(dict(pauc_score=pauc))
|
||||
except ValueError:
|
||||
summary_dict.update(dict(pauc_score=-1))
|
||||
|
||||
return summary_dict
|
129
metrics/multi_class_classification.py
Normal file
129
metrics/multi_class_classification.py
Normal file
@ -0,0 +1,129 @@
|
||||
from itertools import cycle
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from sklearn.metrics import f1_score, roc_curve, auc, roc_auc_score, ConfusionMatrixDisplay, confusion_matrix
|
||||
|
||||
from ml_lib.metrics._base_score import _BaseScores
|
||||
from ml_lib.utils.tools import to_one_hot
|
||||
|
||||
from matplotlib import pyplot as plt
|
||||
|
||||
|
||||
class MultiClassScores(_BaseScores):
|
||||
|
||||
def __init__(self, *args):
|
||||
super(MultiClassScores, self).__init__(*args)
|
||||
pass
|
||||
|
||||
def __call__(self, outputs):
|
||||
summary_dict = dict()
|
||||
#######################################################################################
|
||||
# Additional Score - UAR - ROC - Conf. Matrix - F1
|
||||
#######################################################################################
|
||||
#
|
||||
# INIT
|
||||
y_true = torch.cat([output['batch_y'] for output in outputs]).cpu().numpy()
|
||||
y_true_one_hot = to_one_hot(y_true, self.model.n_classes)
|
||||
|
||||
y_pred = torch.cat([output['y'] for output in outputs]).squeeze().cpu().float().numpy()
|
||||
y_pred_max = np.argmax(y_pred, axis=1)
|
||||
|
||||
class_names = {val: key for key, val in self.model.dataset.test_dataset.classes.items()}
|
||||
######################################################################################
|
||||
#
|
||||
# F1 SCORE
|
||||
micro_f1_score = f1_score(y_true, y_pred_max, labels=None, pos_label=1, average='micro', sample_weight=None,
|
||||
zero_division=True)
|
||||
macro_f1_score = f1_score(y_true, y_pred_max, labels=None, pos_label=1, average='macro', sample_weight=None,
|
||||
zero_division=True)
|
||||
summary_dict.update(dict(micro_f1_score=micro_f1_score, macro_f1_score=macro_f1_score))
|
||||
|
||||
#######################################################################################
|
||||
#
|
||||
# ROC Curve
|
||||
|
||||
# Compute ROC curve and ROC area for each class
|
||||
fpr = dict()
|
||||
tpr = dict()
|
||||
roc_auc = dict()
|
||||
for i in range(self.model.n_classes):
|
||||
fpr[i], tpr[i], _ = roc_curve(y_true_one_hot[:, i], y_pred[:, i])
|
||||
roc_auc[i] = auc(fpr[i], tpr[i])
|
||||
|
||||
# Compute micro-average ROC curve and ROC area
|
||||
fpr["micro"], tpr["micro"], _ = roc_curve(y_true_one_hot.ravel(), y_pred.ravel())
|
||||
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
|
||||
|
||||
# First aggregate all false positive rates
|
||||
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(self.model.n_classes)]))
|
||||
|
||||
# Then interpolate all ROC curves at this points
|
||||
mean_tpr = np.zeros_like(all_fpr)
|
||||
for i in range(self.model.n_classes):
|
||||
mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
|
||||
|
||||
# Finally average it and compute AUC
|
||||
mean_tpr /= self.model.n_classes
|
||||
|
||||
fpr["macro"] = all_fpr
|
||||
tpr["macro"] = mean_tpr
|
||||
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
|
||||
|
||||
# Plot all ROC curves
|
||||
plt.figure()
|
||||
plt.plot(fpr["micro"], tpr["micro"],
|
||||
label=f'micro ROC ({round(roc_auc["micro"], 2)})',
|
||||
color='deeppink', linestyle=':', linewidth=4)
|
||||
|
||||
plt.plot(fpr["macro"], tpr["macro"],
|
||||
label=f'macro ROC({round(roc_auc["macro"], 2)})',
|
||||
color='navy', linestyle=':', linewidth=4)
|
||||
|
||||
colors = cycle(['firebrick', 'orangered', 'gold', 'olive', 'limegreen', 'aqua',
|
||||
'dodgerblue', 'slategrey', 'royalblue', 'indigo', 'fuchsia'], )
|
||||
|
||||
for i, color in zip(range(self.model.n_classes), colors):
|
||||
plt.plot(fpr[i], tpr[i], color=color, lw=2, label=f'{class_names[i]} ({round(roc_auc[i], 2)})')
|
||||
|
||||
plt.plot([0, 1], [0, 1], 'k--', lw=2)
|
||||
plt.xlim([0.0, 1.0])
|
||||
plt.ylim([0.0, 1.05])
|
||||
plt.xlabel('False Positive Rate')
|
||||
plt.ylabel('True Positive Rate')
|
||||
plt.legend(loc="lower right")
|
||||
|
||||
self.model.logger.log_image('ROC', image=plt.gcf(), step=self.model.current_epoch)
|
||||
# self.model.logger.log_image('ROC', image=plt.gcf(), step=self.model.current_epoch, ext='pdf')
|
||||
plt.clf()
|
||||
|
||||
#######################################################################################
|
||||
#
|
||||
# ROC AUC SCORE
|
||||
|
||||
try:
|
||||
macro_roc_auc_ovr = roc_auc_score(y_true_one_hot, y_pred, multi_class="ovr",
|
||||
average="macro")
|
||||
summary_dict.update(macro_roc_auc_ovr=macro_roc_auc_ovr)
|
||||
except ValueError:
|
||||
micro_roc_auc_ovr = roc_auc_score(y_true_one_hot, y_pred, multi_class="ovr",
|
||||
average="micro")
|
||||
summary_dict.update(micro_roc_auc_ovr=micro_roc_auc_ovr)
|
||||
|
||||
#######################################################################################
|
||||
#
|
||||
# Confusion matrix
|
||||
|
||||
cm = confusion_matrix([class_names[x] for x in y_true], [class_names[x] for x in y_pred_max],
|
||||
labels=[class_names[key] for key in class_names.keys()],
|
||||
normalize='all')
|
||||
disp = ConfusionMatrixDisplay(confusion_matrix=cm,
|
||||
display_labels=[class_names[i] for i in range(self.model.n_classes)]
|
||||
)
|
||||
disp.plot(include_values=True)
|
||||
|
||||
self.model.logger.log_image('Confusion_Matrix', image=disp.figure_, step=self.model.current_epoch)
|
||||
# self.model.logger.log_image('Confusion_Matrix', image=disp.figure_, step=self.model.current_epoch, ext='pdf')
|
||||
|
||||
plt.close('all')
|
||||
return summary_dict
|
Loading…
x
Reference in New Issue
Block a user