Initial ComParE commit, Primate Dataset

This commit is contained in:
Steffen Illium
2021-02-15 09:26:21 +01:00
commit 94d3c701b5
6 changed files with 306 additions and 0 deletions

View File

@ -0,0 +1,34 @@
import numpy as np
import torch
import pytorch_lightning as pl
import librosa
import pandas as pd
import variables as v
from tqdm import tqdm
sr = 16000
wavs = list((v.PRIMATES_Root/'wav').glob('*.wav'))
if __name__ == '__main__':
durations = []
for wav in tqdm(wavs):
duration = librosa.get_duration(filename=str(wav), sr=sr)
durations.append(duration)
mean_duration, std_duration, min_duration, max_duration = np.mean(durations), np.std(durations), np.min(durations), np.max(durations)
print(f'Mean duration: {mean_duration:.3f}s\tstd: {std_duration:.3f}s\tmin: {min_duration:.3f}s\t max: {max_duration:.3f}s')
primates_train_csv, primates_devel_csv, primates_test_csv = \
[pd.read_csv(p) for p in [v.PRIMATES_Root / 'lab' / 'train.csv',
v.PRIMATES_Root / 'lab' / 'devel.csv',
v.PRIMATES_Root / 'lab' / 'test.csv']]
csv = pd.concat((primates_train_csv,
primates_devel_csv,
primates_test_csv))
print(csv.groupby('label').count())
print([np.quantile(durations, q=q) for q in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]])

View File

@ -0,0 +1,121 @@
from multiprocessing.pool import ApplyResult
from pathlib import Path
from typing import List
from torch.utils.data import DataLoader, ConcatDataset
from torchvision.transforms import Compose, RandomApply
from tqdm import tqdm
from ml_lib.audio_toolset.audio_io import NormalizeLocal
from ml_lib.audio_toolset.audio_to_mel_dataset import LibrosaAudioToMelDataset
from ml_lib.audio_toolset.mel_augmentation import NoiseInjection, LoudnessManipulator, ShiftTime, MaskAug
from ml_lib.utils._basedatamodule import _BaseDataModule, DATA_OPTION_test, DATA_OPTION_train, DATA_OPTION_devel
from ml_lib.utils.transforms import ToTensor
import multiprocessing as mp
data_options = [DATA_OPTION_test, DATA_OPTION_train, DATA_OPTION_devel]
class_names = {key: val for val, key in enumerate(['background', 'chimpanze', 'geunon', 'mandrille', 'redcap'])}
class PrimatesLibrosaDatamodule(_BaseDataModule):
@property
def mel_folder(self):
return self.root / 'mel_folder'
@property
def wav_folder(self):
return self.root / 'wav'
def __init__(self, root, batch_size, num_worker, sr, n_mels, n_fft, hop_length,
sample_segment_len=40, sample_hop_len=15):
super(PrimatesLibrosaDatamodule, self).__init__()
self.sample_hop_len = sample_hop_len
self.sample_segment_len = sample_segment_len
self.num_worker = num_worker
self.batch_size = batch_size
self.root = Path(root) / 'primates'
# Mel Transforms - will be pushed with all other paramters by self.__dict__ to subdataset-class
self.mel_kwargs = dict(sr=sr, n_mels=n_mels, n_fft=n_fft, hop_length=hop_length)
# Utility
self.utility_transforms = Compose([NormalizeLocal(), ToTensor()])
# Data Augmentations
self.mel_augmentations = Compose([
# ToDo: HP Search this parameters, make it adjustable from outside
RandomApply([NoiseInjection(0.2)], p=0.3),
RandomApply([LoudnessManipulator(0.5)], p=0.3),
RandomApply([ShiftTime(0.4)], p=0.3),
RandomApply([MaskAug(0.2)], p=0.3),
self.utility_transforms])
def train_dataloader(self):
return DataLoader(dataset=self.datasets[DATA_OPTION_train], shuffle=True,
batch_size=self.batch_size, pin_memory=True,
num_workers=self.num_worker)
# Validation Dataloader
def val_dataloader(self):
return DataLoader(dataset=self.datasets[DATA_OPTION_devel], shuffle=False, pin_memory=True,
batch_size=self.batch_size, num_workers=self.num_worker)
# Test Dataloader
def test_dataloader(self):
return DataLoader(dataset=self.datasets[DATA_OPTION_test], shuffle=False,
batch_size=self.batch_size, pin_memory=True,
num_workers=self.num_worker)
def _build_subdataset(self, row, build=False):
slice_file_name, class_name = row.strip().split(',')
class_id = class_names.get(class_name, -1)
audio_file_path = self.wav_folder / slice_file_name
# DATA OPTION DIFFERENTIATION !!!!!!!!!!! - Begin
kwargs = self.__dict__
if any([x in slice_file_name for x in [DATA_OPTION_devel, DATA_OPTION_test]]):
kwargs.update(mel_augmentations=self.utility_transforms)
# DATA OPTION DIFFERENTIATION !!!!!!!!!!! - End
mel_dataset = LibrosaAudioToMelDataset(audio_file_path, class_id, **kwargs)
if build:
assert mel_dataset.build_mel()
return mel_dataset
def prepare_data(self, *args, **kwargs):
datasets = dict()
for data_option in data_options:
with open(Path(self.root) / 'lab' / f'{data_option}.csv', mode='r') as f:
# Exclude the header
_ = next(f)
all_rows = list(f)
chunksize = len(all_rows) // max(self.num_worker, 1)
dataset = list()
with mp.Pool(processes=self.num_worker) as pool:
pbar = tqdm(total=len(all_rows))
def update():
pbar.update(chunksize)
from itertools import repeat
results = pool.starmap_async(self._build_subdataset, zip(all_rows, repeat(True, len(all_rows))),
chunksize=chunksize)
for sub_dataset in results.get():
dataset.append(sub_dataset)
tqdm.update() # FIXME: will i ever get this to work?
datasets[data_option] = ConcatDataset(dataset)
self.datasets = datasets
return datasets
def setup(self, stag=None):
datasets = dict()
for data_option in data_options:
with open(Path(self.root) / 'lab' / f'{data_option}.csv', mode='r') as f:
# Exclude the header
_ = next(f)
all_rows = list(f)
dataset = list()
for row in all_rows:
dataset.append(self._build_subdataset(row))
datasets[data_option] = ConcatDataset(dataset)
self.datasets = datasets
return datasets