ResidualModule and New Parameters, Speed Manipulation
This commit is contained in:
parent
f6c6726509
commit
dfe2db342f
@ -1,58 +1,19 @@
|
||||
import librosa
|
||||
import numpy as np
|
||||
|
||||
|
||||
class NoiseInjection(object):
|
||||
class Speed(object):
|
||||
|
||||
def __init__(self, noise_factor: float, sigma=0.5, mu=0.5):
|
||||
assert noise_factor > 0, f'max_shift_ratio has to be greater then 0, but was: {noise_factor}.'
|
||||
self.mu = mu
|
||||
self.sigma = sigma
|
||||
self.noise_factor = noise_factor
|
||||
def __init__(self, max_ratio=0.3, speed_factor=1):
|
||||
self.speed_factor = speed_factor
|
||||
self.max_ratio = max_ratio
|
||||
|
||||
def __call__(self, x: np.ndarray):
|
||||
noise = np.random.normal(loc=self.mu, scale=self.sigma, size=x.shape)
|
||||
augmented_data = x + self.noise_factor * noise
|
||||
# Cast back to same data type
|
||||
augmented_data = augmented_data.astype(x.dtype)
|
||||
return augmented_data
|
||||
|
||||
|
||||
class LoudnessManipulator(object):
|
||||
|
||||
def __init__(self, max_factor: float):
|
||||
assert 1 > max_factor > 0, f'max_shift_ratio has to be between [0,1], but was: {max_factor}.'
|
||||
|
||||
self.max_factor = max_factor
|
||||
|
||||
def __call__(self, x: np.ndarray):
|
||||
augmented_data = x + x * (np.random.random() * self.max_factor)
|
||||
# Cast back to same data type
|
||||
augmented_data = augmented_data.astype(x.dtype)
|
||||
return augmented_data
|
||||
|
||||
|
||||
class ShiftTime(object):
|
||||
|
||||
valid_shifts = ['right', 'left', 'any']
|
||||
|
||||
def __init__(self, max_shift_ratio: float, shift_direction: str = 'any'):
|
||||
assert 1 > max_shift_ratio > 0, f'max_shift_ratio has to be between [0,1], but was: {max_shift_ratio}.'
|
||||
assert shift_direction.lower() in self.valid_shifts, f'shift_direction has to be one of: {self.valid_shifts}'
|
||||
self.max_shift_ratio = max_shift_ratio
|
||||
self.shift_direction = shift_direction.lower()
|
||||
|
||||
def __call__(self, x: np.ndarray):
|
||||
shift = np.random.randint(max(int(self.max_shift_ratio * x.shape[-1]), 1))
|
||||
if self.shift_direction == 'right':
|
||||
shift = -1 * shift
|
||||
elif self.shift_direction == 'any':
|
||||
direction = np.random.choice([1, -1], 1)
|
||||
shift = direction * shift
|
||||
augmented_data = np.roll(x, shift)
|
||||
# Set to silence for heading/ tailing
|
||||
shift = int(shift)
|
||||
if shift > 0:
|
||||
augmented_data[:shift] = 0
|
||||
else:
|
||||
augmented_data[shift:] = 0
|
||||
return augmented_data
|
||||
def __call__(self, x):
|
||||
start = int(np.random.randint(0, x.shape[-1],1))
|
||||
end = min(int((np.random.uniform(0, self.max_ratio, 1) * x.shape[-1]) + start), x.shape[-1])
|
||||
try:
|
||||
speed_factor = float(np.random.uniform(min(self.speed_factor, 1), max(self.speed_factor, 1), 1))
|
||||
aug_data = librosa.effects.time_stretch(x[start:end], speed_factor)
|
||||
return np.concatenate((x[:start], aug_data, x[end:]), axis=0)[:x.shape[-1]]
|
||||
except ValueError:
|
||||
return x
|
||||
|
101
audio_toolset/mel_augmentation.py
Normal file
101
audio_toolset/mel_augmentation.py
Normal file
@ -0,0 +1,101 @@
|
||||
from ctypes import Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
class NoiseInjection(object):
|
||||
|
||||
def __init__(self, noise_factor: float, sigma=0.5, mu=0.5):
|
||||
assert noise_factor >= 0, f'max_shift_ratio has to be greater then 0, but was: {noise_factor}.'
|
||||
self.mu = mu
|
||||
self.sigma = sigma
|
||||
self.noise_factor = noise_factor
|
||||
|
||||
def __call__(self, x: np.ndarray):
|
||||
if self.noise_factor:
|
||||
noise = np.random.normal(loc=self.mu, scale=self.sigma, size=x.shape)
|
||||
augmented_data = x + self.noise_factor * noise
|
||||
# Cast back to same data type
|
||||
augmented_data = augmented_data.astype(x.dtype)
|
||||
return augmented_data
|
||||
else:
|
||||
return x
|
||||
|
||||
|
||||
class LoudnessManipulator(object):
|
||||
|
||||
def __init__(self, max_factor: float):
|
||||
assert 1 > max_factor >= 0, f'max_shift_ratio has to be between [0,1], but was: {max_factor}.'
|
||||
|
||||
self.max_factor = max_factor
|
||||
|
||||
def __call__(self, x: np.ndarray):
|
||||
if self.max_factor:
|
||||
augmented_data = x + x * (np.random.random() * self.max_factor)
|
||||
# Cast back to same data type
|
||||
augmented_data = augmented_data.astype(x.dtype)
|
||||
return augmented_data
|
||||
else:
|
||||
return x
|
||||
|
||||
|
||||
class ShiftTime(object):
|
||||
|
||||
valid_shifts = ['right', 'left', 'any']
|
||||
|
||||
def __init__(self, max_shift_ratio: float, shift_direction: str = 'any'):
|
||||
assert 1 > max_shift_ratio >= 0, f'max_shift_ratio has to be between [0,1], but was: {max_shift_ratio}.'
|
||||
assert shift_direction.lower() in self.valid_shifts, f'shift_direction has to be one of: {self.valid_shifts}'
|
||||
self.max_shift_ratio = max_shift_ratio
|
||||
self.shift_direction = shift_direction.lower()
|
||||
|
||||
def __call__(self, x: np.ndarray):
|
||||
if self.max_shift_ratio:
|
||||
shift = np.random.randint(max(int(self.max_shift_ratio * x.shape[-1]), 1))
|
||||
if self.shift_direction == 'right':
|
||||
shift = -1 * shift
|
||||
elif self.shift_direction == 'any':
|
||||
direction = np.random.choice([1, -1], 1)
|
||||
shift = direction * shift
|
||||
augmented_data = np.roll(x, shift)
|
||||
# Set to silence for heading/ tailing
|
||||
shift = int(shift)
|
||||
if shift > 0:
|
||||
augmented_data[:shift] = 0
|
||||
else:
|
||||
augmented_data[shift:] = 0
|
||||
return augmented_data
|
||||
else:
|
||||
return x
|
||||
|
||||
|
||||
class MaskAug(object):
|
||||
|
||||
w_idx = -1
|
||||
h_idx = -2
|
||||
|
||||
def __init__(self, duration_ratio_max=0.3, mask_with_noise=True):
|
||||
assertion = f'"duration_ratio" has to be within [0..1], but was: {duration_ratio_max}'
|
||||
if isinstance(duration_ratio_max, (tuple, list)):
|
||||
assert all([0 < max_val < 1 for max_val in duration_ratio_max]), assertion
|
||||
if isinstance(duration_ratio_max, (float, int)):
|
||||
assert 0 <= duration_ratio_max < 1, assertion
|
||||
super().__init__()
|
||||
|
||||
self.mask_with_noise = mask_with_noise
|
||||
self.duration_ratio_max = duration_ratio_max if isinstance(duration_ratio_max, (tuple, list)) \
|
||||
else (duration_ratio_max, duration_ratio_max)
|
||||
|
||||
def __call__(self, x):
|
||||
for dim in (self.w_idx, self.h_idx):
|
||||
if self.duration_ratio_max[dim]:
|
||||
start = int(np.random.choice(x.shape[dim], 1))
|
||||
v_max = x.shape[dim] * self.duration_ratio_max[dim]
|
||||
size = int(np.random.randint(0, v_max, 1))
|
||||
end = int(min(start + size, x.shape[dim]))
|
||||
size = end - start
|
||||
if dim == self.w_idx:
|
||||
x[:, start:end] = np.random.random((x.shape[self.h_idx], size)) if self.mask_with_noise else 0
|
||||
else:
|
||||
x[start:end, :] = np.random.random((size, x.shape[self.w_idx])) if self.mask_with_noise else 0
|
||||
return x
|
@ -15,7 +15,7 @@ DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
###################
|
||||
class LinearModule(ShapeMixin, nn.Module):
|
||||
|
||||
def __init__(self, in_shape, out_features, activation=None, bias=True,
|
||||
def __init__(self, in_shape, out_features, bias=True, activation=None,
|
||||
norm=False, dropout: Union[int, float] = 0, **kwargs):
|
||||
warnings.warn(f'The following arguments have been ignored: \n {list(kwargs.keys())}')
|
||||
super(LinearModule, self).__init__()
|
||||
@ -25,10 +25,11 @@ class LinearModule(ShapeMixin, nn.Module):
|
||||
self.dropout = nn.Dropout(dropout) if dropout else F_x(self.flat.shape)
|
||||
self.norm = nn.BatchNorm1d(self.flat.shape) if norm else F_x(self.flat.shape)
|
||||
self.linear = nn.Linear(self.flat.shape, out_features, bias=bias)
|
||||
self.activation = activation() or F_x(self.linear.out_features)
|
||||
self.activation = activation() if activation else F_x(self.linear.out_features)
|
||||
|
||||
def forward(self, x):
|
||||
tensor = self.flat(x)
|
||||
tensor = self.dropout(tensor)
|
||||
tensor = self.norm(tensor)
|
||||
tensor = self.linear(tensor)
|
||||
tensor = self.activation(tensor)
|
||||
@ -108,12 +109,16 @@ class DeConvModule(ShapeMixin, nn.Module):
|
||||
|
||||
class ResidualModule(ShapeMixin, nn.Module):
|
||||
|
||||
def __init__(self, in_shape, module_class, n, activation=None, **module_parameters):
|
||||
def __init__(self, in_shape, module_class, n, **module_parameters):
|
||||
assert n >= 1
|
||||
super(ResidualModule, self).__init__()
|
||||
self.in_shape = in_shape
|
||||
module_parameters.update(in_shape=in_shape)
|
||||
self.activation = activation() if activation else lambda x: x
|
||||
self.activation = module_parameters.get('activation', None)
|
||||
if self.activation is not None:
|
||||
self.activation = self.activation()
|
||||
else:
|
||||
self.activation = F_x(self.in_shape)
|
||||
self.residual_block = nn.ModuleList([module_class(**module_parameters) for _ in range(n)])
|
||||
assert self.in_shape == self.shape, f'The in_shape: {self.in_shape} - must match the out_shape: {self.shape}.'
|
||||
|
||||
|
@ -18,12 +18,14 @@ class ShapeMixin:
|
||||
@property
|
||||
def shape(self):
|
||||
assert isinstance(self, (LightningBaseModule, nn.Module))
|
||||
|
||||
x = torch.randn(self.in_shape)
|
||||
# This is needed for BatchNorm shape checking
|
||||
x = torch.stack((x, x))
|
||||
output = self(x)
|
||||
return output.shape[1:] if len(output.shape[1:]) > 1 else output.shape[-1]
|
||||
if self.in_shape is not None:
|
||||
x = torch.randn(self.in_shape)
|
||||
# This is needed for BatchNorm shape checking
|
||||
x = torch.stack((x, x))
|
||||
output = self(x)
|
||||
return output.shape[1:] if len(output.shape[1:]) > 1 else output.shape[-1]
|
||||
else:
|
||||
return -1
|
||||
|
||||
|
||||
class F_x(ShapeMixin, nn.Module):
|
||||
|
Loading…
x
Reference in New Issue
Block a user