requirements
This commit is contained in:
parent
f3335d508a
commit
407df15bbf
2
main.py
2
main.py
@ -50,7 +50,7 @@ def run_lightning_loop(config_obj):
|
||||
show_progress_bar=True,
|
||||
weights_save_path=logger.log_dir,
|
||||
gpus=[0] if torch.cuda.is_available() else None,
|
||||
check_val_every_n_epoch=5,
|
||||
check_val_every_n_epoch=10,
|
||||
# num_sanity_val_steps=config_obj.train.num_sanity_val_steps,
|
||||
# row_log_interval=(model.n_train_batches * 0.1), # TODO: Better Value / Setting
|
||||
# log_save_interval=(model.n_train_batches * 0.2), # TODO: Better Value / Setting
|
||||
|
80
requirements
Normal file
80
requirements
Normal file
@ -0,0 +1,80 @@
|
||||
absl-py==0.9.0
|
||||
attrs==19.3.0
|
||||
audioread==2.1.8
|
||||
bravado==10.6.0
|
||||
bravado-core==5.17.0
|
||||
cachetools==4.1.0
|
||||
certifi==2020.4.5.1
|
||||
cffi==1.14.0
|
||||
chardet==3.0.4
|
||||
click==7.1.1
|
||||
cycler==0.10.0
|
||||
decorator==4.4.2
|
||||
future==0.18.2
|
||||
gitdb==4.0.4
|
||||
GitPython==3.1.1
|
||||
google-auth==1.13.1
|
||||
google-auth-oauthlib==0.4.1
|
||||
grpcio==1.28.1
|
||||
idna==2.9
|
||||
imageio==2.8.0
|
||||
importlib-metadata==1.6.0
|
||||
joblib==0.14.1
|
||||
jsonpointer==2.0
|
||||
jsonref==0.2
|
||||
jsonschema==3.2.0
|
||||
kiwisolver==1.2.0
|
||||
librosa==0.7.2
|
||||
llvmlite==0.31.0
|
||||
Markdown==3.2.1
|
||||
matplotlib==3.2.1
|
||||
monotonic==1.5
|
||||
msgpack==1.0.0
|
||||
msgpack-python==0.5.6
|
||||
natsort==7.0.1
|
||||
neptune-client==0.4.109
|
||||
numba==0.48.0
|
||||
numpy==1.18.2
|
||||
oauthlib==3.1.0
|
||||
pandas==1.0.3
|
||||
Pillow==7.1.1
|
||||
protobuf==3.11.3
|
||||
psutil==5.7.0
|
||||
py3nvml==0.2.6
|
||||
pyasn1==0.4.8
|
||||
pyasn1-modules==0.2.8
|
||||
pycparser==2.20
|
||||
PyJWT==1.7.1
|
||||
pyparsing==2.4.7
|
||||
pyrsistent==0.16.0
|
||||
python-dateutil==2.8.1
|
||||
pytorch-lightning==0.7.3
|
||||
pytz==2019.3
|
||||
PyYAML==5.3.1
|
||||
requests==2.23.0
|
||||
requests-oauthlib==1.3.0
|
||||
resampy==0.2.2
|
||||
rfc3987==1.3.8
|
||||
rsa==4.0
|
||||
scikit-learn==0.22.2.post1
|
||||
scipy==1.4.1
|
||||
simplejson==3.17.0
|
||||
six==1.14.0
|
||||
smmap==3.0.2
|
||||
SoundFile==0.10.3.post1
|
||||
strict-rfc3339==0.7
|
||||
swagger-spec-validator==2.5.0
|
||||
tensorboard==2.2.0
|
||||
tensorboard-plugin-wit==1.6.0.post3
|
||||
test-tube==0.7.5
|
||||
torch==1.4.0
|
||||
torchcontrib==0.0.2
|
||||
torchvision==0.5.0
|
||||
tqdm==4.45.0
|
||||
typing-extensions==3.7.4.2
|
||||
urllib3==1.25.8
|
||||
webcolors==1.11.1
|
||||
websocket-client==0.57.0
|
||||
Werkzeug==1.0.1
|
||||
xmltodict==0.12.0
|
||||
zipp==3.1.0
|
50
util/metric_reader.py
Normal file
50
util/metric_reader.py
Normal file
@ -0,0 +1,50 @@
|
||||
import csv
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
import numpy as np
|
||||
from util.config import MConfig
|
||||
|
||||
|
||||
outpath = Path('..', 'output')
|
||||
metric_file_name = 'metrics.csv'
|
||||
config_file_name = 'config.ini'
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
for model_path in outpath.iterdir():
|
||||
out_file = (model_path / metric_file_name)
|
||||
for paramter_configuration in model_path.iterdir():
|
||||
uar_scores = defaultdict(list)
|
||||
for metric_file in paramter_configuration.rglob(metric_file_name):
|
||||
with metric_file.open('r') as f:
|
||||
config = MConfig()
|
||||
with (metric_file.parent / config_file_name).open('r') as c:
|
||||
config.read_file(c)
|
||||
for key, val in config.data.__dict__.items():
|
||||
uar_scores[key].append(val)
|
||||
|
||||
headers = f.readline().split(',')
|
||||
metric_dict = defaultdict(list)
|
||||
for line in f:
|
||||
values = line.split(',')
|
||||
for header, value in zip(headers, values):
|
||||
if value:
|
||||
try:
|
||||
metric_dict[header].append(float(value))
|
||||
except ValueError:
|
||||
metric_dict[header].append(value)
|
||||
for score, func in zip(['mean', 'max', 'median', 'std'], [np.mean, np.max, np.median, np.std]):
|
||||
try:
|
||||
uar_scores[score].append(func(np.asarray(metric_dict['uar_score'])).round(2))
|
||||
except ValueError as e:
|
||||
print(e)
|
||||
pass
|
||||
file_existed = out_file.exists()
|
||||
with out_file.open('a+') as f:
|
||||
headers = list(uar_scores.keys())
|
||||
|
||||
writer = csv.DictWriter(f, delimiter=',', lineterminator='\n', fieldnames=headers)
|
||||
if not file_existed:
|
||||
writer.writeheader() # file doesn't exist yet, write a header
|
||||
for row_idx in range(len(uar_scores['mean'])):
|
||||
writer.writerow({key: uar_scores[key][row_idx] for key in headers})
|
Loading…
x
Reference in New Issue
Block a user