add more modularity

This commit is contained in:
Robert Müller 2020-03-18 17:53:52 +01:00
parent 55402d219c
commit f4606a7f6c
4 changed files with 58 additions and 51 deletions

6
cfg.py
View File

@ -2,11 +2,11 @@ from pathlib import Path
import torch
BATCH_SIZE = 128
NUM_EPOCHS = 1
NUM_WORKERS = 0
NUM_EPOCHS = 10
NUM_WORKERS = 4
NUM_SEGMENTS = 5
NUM_SEGMENT_HOPS = 2
SEEDS = [42, 1337]
ALL_DATASET_PATHS = list((Path(__file__).parent.absolute() / 'data' / 'mimii').glob('*/'))
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'

89
main.py
View File

@ -1,60 +1,57 @@
import numpy as np
from tqdm import tqdm
from cfg import *
from mimii import MIMII
from models.ae import AE
import torch.nn as nn
import torch.optim as optim
import random
if __name__ == '__main__':
import numpy as np
from tqdm import tqdm
from cfg import *
from mimii import MIMII
from models.ae import AE
import torch.nn as nn
import torch.optim as optim
import random
torch.manual_seed(42)
torch.cuda.manual_seed(42)
np.random.seed(42)
random.seed(42)
torch.manual_seed(42)
torch.cuda.manual_seed(42)
np.random.seed(42)
random.seed(42)
dataset_path = ALL_DATASET_PATHS[0]
print(f'Training on {dataset_path.name}')
mimii = MIMII(dataset_path=dataset_path, machine_id=0)
mimii.to(DEVICE)
#mimii.preprocess(n_fft=1024, hop_length=256, n_mels=80, center=False, power=2.0)
dataset_path = ALL_DATASET_PATHS[0]
print(f'Training on {dataset_path.name}')
mimii = MIMII(dataset_path=dataset_path, machine_id=0)
mimii.to(DEVICE)
#mimii.preprocess(n_fft=1024, hop_length=256, n_mels=80, center=False, power=2.0)
dl = mimii.train_dataloader(
segment_len=NUM_SEGMENTS,
hop_len=NUM_SEGMENT_HOPS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
shuffle=True
)
dl = mimii.train_dataloader(
segment_len=NUM_SEGMENTS,
hop_len=NUM_SEGMENT_HOPS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
shuffle=True
)
model = AE(400).to(DEVICE)
model.init_weights()
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
model = AE(400).to(DEVICE)
model.init_weights()
optimizer = optim.Adam(model.parameters(), lr=0.001)
beta_1 = 0.00
beta_2 = 0.0
for epoch in range(NUM_EPOCHS):
print(f'EPOCH #{epoch+1}')
losses = []
for batch in tqdm(dl):
data, labels = batch
data = data.to(DEVICE)
for epoch in range(NUM_EPOCHS):
print(f'EPOCH #{epoch+1}')
losses = []
for batch in tqdm(dl):
data, labels = batch
data = data.to(DEVICE)
loss = model.train_loss(data)
y_hat, y = model(data)
loss = criterion(y_hat, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.item())
print(f'Loss: {np.mean(losses)}')
losses.append(loss.item())
print(f'Loss: {np.mean(losses)}')
auc = mimii.evaluate_model(model, NUM_SEGMENTS, NUM_SEGMENTS)
print(f'AUC: {auc}')
auc = mimii.evaluate_model(model, NUM_SEGMENTS, NUM_SEGMENTS)
print(f'AUC: {auc}')

View File

@ -92,8 +92,7 @@ class MIMII(object):
data, labels = batch
data = data.to(self.device)
y_hat, y = f(data)
preds = torch.sum((y_hat - y) ** 2, dim=tuple(range(1, y_hat.dim())))
preds = f.test_loss(data)
file_preds += preds.cpu().data.tolist()
y_true.append(labels.max().item())

View File

@ -24,6 +24,17 @@ class AE(nn.Module):
x = data.view(data.shape[0], -1)
return self.net(x), x
def train_loss(self, data):
criterion = nn.MSELoss()
y_hat, y = self.forward(data)
loss = criterion(y_hat, y)
return loss
def test_loss(self, data):
y_hat, y = self.forward(data)
preds = torch.sum((y_hat - y) ** 2, dim=tuple(range(1, y_hat.dim())))
return preds
def init_weights(self):
def _weight_init(m):
if hasattr(m, 'weight'):