bringing brances up to date

This commit is contained in:
Steffen Illium
2021-02-15 11:39:54 +01:00
parent 010176e80b
commit a966321576
11 changed files with 216 additions and 197 deletions

View File

@ -10,24 +10,31 @@ from ml_lib.audio_toolset.audio_io import LibrosaAudioToMel, MelToImage
from ml_lib.audio_toolset.mel_dataset import TorchMelDataset
class _AudioToMelDataset(Dataset, ABC):
import librosa
class LibrosaAudioToMelDataset(Dataset):
@property
def audio_file_duration(self):
raise NotImplementedError
return librosa.get_duration(sr=self.mel_kwargs.get('sr', None), filename=self.audio_path)
@property
def sampling_rate(self):
raise NotImplementedError
return self.mel_kwargs.get('sr', None)
def __init__(self, audio_file_path, label, sample_segment_len=0, sample_hop_len=0, reset=False,
audio_augmentations=None, mel_augmentations=None, mel_kwargs=None, **kwargs):
self.ignored_kwargs = kwargs
super(LibrosaAudioToMelDataset, self).__init__()
# audio_file, sampling_rate = librosa.load(self.audio_path, sr=sampling_rate)
mel_kwargs.update(sr=mel_kwargs.get('sr', None) or librosa.get_samplerate(audio_file_path))
self.mel_kwargs = mel_kwargs
self.reset = reset
self.audio_path = Path(audio_file_path)
mel_folder_suffix = self.audio_path.parent.parent.name
self.mel_file_path = Path(str(self.audio_path)
.replace(mel_folder_suffix, f'{mel_folder_suffix}_mel_folder')
.replace(self.audio_path.suffix, '.npy'))
@ -38,59 +45,25 @@ class _AudioToMelDataset(Dataset, ABC):
self.audio_file_duration, mel_kwargs['sr'], mel_kwargs['hop_length'],
mel_kwargs['n_mels'], transform=mel_augmentations)
def _build_mel(self):
raise NotImplementedError
def __getitem__(self, item):
try:
return self.dataset[item]
except FileNotFoundError:
assert self._build_mel()
return self.dataset[item]
def __len__(self):
return len(self.dataset)
import librosa
class LibrosaAudioToMelDataset(_AudioToMelDataset):
@property
def audio_file_duration(self):
return librosa.get_duration(sr=self.mel_kwargs.get('sr', None), filename=self.audio_path)
@property
def sampling_rate(self):
return self.mel_kwargs.get('sr', None)
def __init__(self, audio_file_path, *args, **kwargs):
audio_file_path = Path(audio_file_path)
# audio_file, sampling_rate = librosa.load(self.audio_path, sr=sampling_rate)
mel_kwargs = kwargs.get('mel_kwargs', dict())
mel_kwargs.update(sr=mel_kwargs.get('sr', None) or librosa.get_samplerate(audio_file_path))
kwargs.update(mel_kwargs=mel_kwargs)
super(LibrosaAudioToMelDataset, self).__init__(audio_file_path, *args, **kwargs)
self._mel_transform = Compose([LibrosaAudioToMel(**mel_kwargs),
MelToImage()
])
def _build_mel(self):
def __getitem__(self, item):
return self.dataset[item]
def __len__(self):
return len(self.dataset)
def build_mel(self):
if self.reset:
self.mel_file_path.unlink(missing_ok=True)
if not self.mel_file_path.exists():
lockfile = Path(str(self.mel_file_path).replace(self.mel_file_path.suffix, '.lock'))
self.mel_file_path.parent.mkdir(parents=True, exist_ok=True)
lockfile.touch(exist_ok=False)
raw_sample, _ = librosa.core.load(self.audio_path, sr=self.sampling_rate)
mel_sample = self._mel_transform(raw_sample)
with self.mel_file_path.open('wb') as mel_file:
pickle.dump(mel_sample, mel_file, protocol=pickle.HIGHEST_PROTOCOL)
lockfile.unlink(missing_ok=False)
else:
pass

View File

@ -11,13 +11,16 @@ class TorchMelDataset(Dataset):
def __init__(self, mel_path, sub_segment_len, sub_segment_hop_len, label, audio_file_len,
sampling_rate, mel_hop_len, n_mels, transform=None, auto_pad_to_shape=True):
super(TorchMelDataset, self).__init__()
self.sampling_rate = sampling_rate
self.audio_file_len = audio_file_len
self.padding = AutoPadToShape((n_mels, sub_segment_len)) if auto_pad_to_shape and sub_segment_len else None
self.sampling_rate = int(sampling_rate)
self.audio_file_len = int(audio_file_len)
if auto_pad_to_shape and sub_segment_len:
self.padding = AutoPadToShape((int(n_mels), int(sub_segment_len)))
else:
self.padding = None
self.path = Path(mel_path)
self.sub_segment_len = sub_segment_len
self.mel_hop_len = mel_hop_len
self.sub_segment_hop_len = sub_segment_hop_len
self.sub_segment_len = int(sub_segment_len)
self.mel_hop_len = int(mel_hop_len)
self.sub_segment_hop_len = int(sub_segment_hop_len)
self.n = int((self.sampling_rate / self.mel_hop_len) * self.audio_file_len + 1)
if self.sub_segment_len and self.sub_segment_hop_len:
self.offsets = list(range(0, self.n - self.sub_segment_len, self.sub_segment_hop_len))
@ -27,8 +30,6 @@ class TorchMelDataset(Dataset):
self.transform = transform
def __getitem__(self, item):
while Path(str(self.path).replace(self.path.suffix, '.lock')).exists():
time.sleep(0.01)
with self.path.open('rb') as mel_file:
mel_spec = pickle.load(mel_file, fix_imports=True)
start = self.offsets[item]
@ -38,7 +39,7 @@ class TorchMelDataset(Dataset):
snippet = self.transform(snippet)
if self.padding:
snippet = self.padding(snippet)
return snippet, self.label
return self.path.__str__(), snippet, self.label
def __len__(self):
return len(self.offsets)