Compare commits

...

14 Commits

Author SHA1 Message Date
Steffen Illium
ec67c9092f Delete New Text Document.txt 2023-11-14 16:21:29 +01:00
Steffen Illium
5706b62c67 Delete literature directory 2023-11-14 16:21:13 +01:00
Steffen Illium
5194816044 Delete paper directory 2023-11-14 16:20:48 +01:00
Si11ium
de85f45e6b Bug Fixes: Taskingsoup now can have n entities per layer, where n is the size of the task input. Entity task input and soup task input need to be of same size. 2019-07-14 17:29:45 +02:00
Si11ium
9bbe5df2b2 TaskingSoup, TaskingSoupExperiment 2019-07-03 09:17:20 +02:00
Si11ium
320c5c26bc TaskDecorator, Tasks and Experiments 2019-06-26 13:40:34 +02:00
Si11ium
a12577465c Wrong function call resolved 2019-06-18 08:03:06 +02:00
Si11ium
93bbda54a1 Bug Resolved in Particle.is_zero()
Now at normal execution Times
2019-06-18 08:02:10 +02:00
Si11ium
4b7999479f Refactor - Weight class to weight toolkit (faster) 2019-06-17 15:14:15 +02:00
Si11ium
5dfbfcaa20 Refactor:
Step 6: Experiments
2019-06-14 18:32:38 +02:00
Si11ium
4a81279b58 Refactor:
Step 4 - Aggregating Neural Networks
Step 5 - Training Neural Networks
2019-06-14 09:55:51 +02:00
Si11ium
9189759320 Refactor:
Step 4 - Aggregating Neural Networks
Step 5 - Training Neural Networks
2019-06-10 18:27:52 +02:00
Si11ium
203c5b45e3 Refactor:
Step 4 - Aggregating Neural Networks
Step 5 - Training Neural Networks
2019-06-08 21:28:38 +02:00
Si11ium
50f7f84084 Refactor:
Step 1 - Introduction of Weight object for global weight operations
Step2 - Cleanup
Step 3 - Redone WEightwise network updates in clean numpy code
2019-06-06 21:57:22 +02:00
30 changed files with 1149 additions and 1597 deletions

View File

@ -1,96 +0,0 @@
import os
from experiment import Experiment
# noinspection PyUnresolvedReferences
from soup import Soup
from typing import List
from collections import defaultdict
from argparse import ArgumentParser
import numpy as np
import plotly as pl
import plotly.graph_objs as go
import colorlover as cl
import dill
def build_args():
arg_parser = ArgumentParser()
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
return arg_parser.parse_args()
def plot_bars(names_bars_tuple, filename='histogram_plot'):
# catagorical
ryb = cl.scales['10']['div']['RdYlBu']
names, bars = names_bars_tuple
situations = list(bars[0].keys())
names = ['Weightwise', 'Aggregating', 'Recurrent'] # [name.split(' ')[0] for name in names]
data_dict = {}
for idx, name in enumerate(names):
data_dict[name] = bars[idx]
data = []
for idx, situation in enumerate(situations):
bar = go.Bar(
y=[data_dict[name][situation] for name in names],
# x=[key for key in data_dict[name].keys()],
x=names,
name=situation,
showlegend=True,
)
data.append(bar)
layout = dict(xaxis=dict(title="Networks", titlefont=dict(size=20)),
barmode='stack',
# height=400, width=400,
# margin=dict(l=20, r=20, t=20, b=20)
legend=dict(orientation="h", x=0.05)
)
fig = go.Figure(data=data, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=[]):
if os.path.isdir(absolut_file_or_folder):
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
search_and_apply(sub_file_or_folder.path, plotting_function, files_to_look_for=files_to_look_for)
elif absolut_file_or_folder.endswith('.dill'):
file_or_folder = os.path.split(absolut_file_or_folder)[-1]
if file_or_folder in files_to_look_for and not os.path.exists('{}.html'.format(file_or_folder[:-5])):
print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__,
file=absolut_file_or_folder)
)
with open(absolut_file_or_folder, 'rb') as in_f:
bars = dill.load(in_f)
names_dill_location = os.path.join(*os.path.split(absolut_file_or_folder)[:-1], 'all_names.dill')
with open(names_dill_location, 'rb') as in_f:
names = dill.load(in_f)
plotting_function((names, bars), filename='{}.html'.format(absolut_file_or_folder[:-5]))
else:
pass
# This was not a file i should look for.
else:
# This was either another FilyType or Plot.html alerady exists.
pass
if __name__ == '__main__':
args = build_args()
in_file = args.in_file[0]
out_file = args.out_file
search_and_apply(in_file, plot_bars, files_to_look_for=['all_counters.dill'])
# , 'all_names.dill', 'all_notable_nets.dill'])

View File

@ -1,129 +0,0 @@
import os
from experiment import Experiment
# noinspection PyUnresolvedReferences
from soup import Soup
from typing import List
from collections import defaultdict
from argparse import ArgumentParser
import numpy as np
import plotly as pl
import plotly.graph_objs as go
import colorlover as cl
import dill
def build_args():
arg_parser = ArgumentParser()
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
return arg_parser.parse_args()
def plot_box(exp: Experiment, filename='histogram_plot'):
# catagorical
ryb = cl.scales['10']['div']['RdYlBu']
data = []
for d in range(exp.depth):
names = ['D 10e-{}'.format(d)] * exp.trials
data.extend(names)
trace_list = []
vergence_box = go.Box(
y=exp.ys,
x=data,
name='Time to Vergence',
boxpoints=False,
showlegend=True,
marker=dict(
color=ryb[3]
),
)
fixpoint_box = go.Box(
y=exp.zs,
x=data,
name='Time as Fixpoint',
boxpoints=False,
showlegend=True,
marker=dict(
color=ryb[-1]
),
)
trace_list.extend([vergence_box, fixpoint_box])
layout = dict(title='{}'.format('Known Fixpoint Variation'),
titlefont=dict(size=30),
legend=dict(
orientation="h",
x=.1, y=-0.1,
font=dict(
size=20,
color='black'
),
),
boxmode='group',
boxgap=0,
# barmode='group',
bargap=0,
xaxis=dict(showgrid=False,
zeroline=True,
tickangle=0,
showticklabels=True),
yaxis=dict(
title='Steps',
zeroline=False,
titlefont=dict(
size=30
)
),
# height=400, width=400,
margin=dict(t=50)
)
fig = go.Figure(data=trace_list, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=[]):
if os.path.isdir(absolut_file_or_folder):
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
search_and_apply(sub_file_or_folder.path, plotting_function, files_to_look_for=files_to_look_for)
elif absolut_file_or_folder.endswith('.dill'):
file_or_folder = os.path.split(absolut_file_or_folder)[-1]
if file_or_folder in files_to_look_for and not os.path.exists('{}.html'.format(file_or_folder[:-5])):
print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__,
file=absolut_file_or_folder)
)
with open(absolut_file_or_folder, 'rb') as in_f:
exp = dill.load(in_f)
try:
plotting_function(exp, filename='{}.html'.format(absolut_file_or_folder[:-5]))
except AttributeError:
pass
else:
pass
# This was not a file i should look for.
else:
# This was either another FilyType or Plot.html alerady exists.
pass
if __name__ == '__main__':
args = build_args()
in_file = args.in_file[0]
out_file = args.out_file
search_and_apply(in_file, plot_box, files_to_look_for=['experiment.dill'])
# , 'all_names.dill', 'all_notable_nets.dill'])

View File

@ -2,77 +2,136 @@ import os
import time
import dill
from tqdm import tqdm
import copy
from copy import copy
from tensorflow.python.keras import backend as K
from abc import ABC, abstractmethod
class Experiment:
class IllegalArgumentError(ValueError):
pass
class Experiment(ABC):
@staticmethod
def from_dill(path):
with open(path, "rb") as dill_file:
return dill.load(dill_file)
def __init__(self, name=None, ident=None):
self.experiment_id = '{}_{}'.format(ident or '', time.time())
@staticmethod
def reset_model():
K.clear_session()
def __init__(self, name=None, ident=None, **kwargs):
self.experiment_id = f'{ident or ""}_{time.time()}'
self.experiment_name = name or 'unnamed_experiment'
self.next_iteration = 0
self.log_messages = []
self.historical_particles = {}
self.iteration = 0
self.log_messages = list()
self.historical_particles = dict()
self.params = dict(exp_iterations=100, application_steps=100, prints=True, trains_per_application=100)
self.with_params(**kwargs)
def __copy__(self, *args, **kwargs):
params = self.params
params.update(name=self.experiment_name)
params.update(**kwargs)
self_copy = self.__class__(*args, **params)
return self_copy
def __enter__(self):
self.dir = os.path.join('experiments', 'exp-{name}-{id}-{it}'.format(
name=self.experiment_name, id=self.experiment_id, it=self.next_iteration)
)
self.dir = os.path.join('experiments', f'exp-{self.experiment_name}-{self.experiment_id}-{self.iteration}')
os.makedirs(self.dir)
print("** created {dir} **".format(dir=self.dir))
print(f'** created {self.dir} **')
return self
def __exit__(self, exc_type, exc_value, traceback):
self.save(experiment=self.without_particles())
self.save_log()
self.next_iteration += 1
# Clean Exit
self.reset_all()
# self.iteration += 1 Taken From here!
def with_params(self, **kwargs):
# Make them your own
self.params.update(kwargs)
return self
def log(self, message, **kwargs):
self.log_messages.append(message)
print(message, **kwargs)
def save_log(self, log_name="log"):
with open(os.path.join(self.dir, "{name}.txt".format(name=log_name)), "w") as log_file:
with open(os.path.join(self.dir, f"{log_name}.txt"), "w") as log_file:
for log_message in self.log_messages:
print(str(log_message), file=log_file)
def __copy__(self):
copy_ = Experiment(name=self.experiment_name,)
copy_.__dict__ = {attr: self.__dict__[attr] for attr in self.__dict__ if
attr not in ['particles', 'historical_particles']}
return copy_
def without_particles(self):
self_copy = copy.copy(self)
# self_copy.particles = [particle.states for particle in self.particles]
self_copy.historical_particles = {key: val.states for key, val in self.historical_particles.items()}
self_copy = copy(self)
# Check if attribute exists
if hasattr(self, 'historical_particles'):
# Check if it is empty.
if self.historical_particles:
# Do the Update
# self_copy.particles = [particle.states for particle in self.particles]
self_copy.historical_particles = {key: val.states for key, val in self.historical_particles.items()}
return self_copy
def save(self, **kwargs):
for name, value in kwargs.items():
with open(os.path.join(self.dir, "{name}.dill".format(name=name)), "wb") as dill_file:
with open(os.path.join(self.dir, f"{name}.dill"), "wb") as dill_file:
dill.dump(value, dill_file)
def reset_log(self):
self.log_messages = list()
@abstractmethod
def run_net(self, net, **kwargs):
raise NotImplementedError
pass
def run_exp(self, network_generator, reset_model=False, **kwargs):
# INFO Run_ID needs to be more than 0, so that exp stores the trajectories!
for run_id in range(self.params.get('exp_iterations')):
network = network_generator()
self.run_net(network, **kwargs)
self.historical_particles[run_id] = network
if self.params.get('prints'):
print("Fixpoint? " + str(network.is_fixpoint()))
self.iteration += 1
if reset_model:
self.reset_model()
def reset_all(self):
self.reset_log()
self.reset_model()
class FixpointExperiment(Experiment):
def __init__(self, **kwargs):
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
super().__init__(**kwargs)
self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
self.interesting_fixpoints = []
def run_net(self, net, step_limit=100, run_id=0):
i = 0
while i < step_limit and not net.is_diverged() and not net.is_fixpoint():
net.self_attack()
i += 1
if run_id:
def run_exp(self, network_generator, logging=True, reset_model=False, **kwargs):
kwargs.update(reset_model=False)
super(FixpointExperiment, self).run_exp(network_generator, **kwargs)
if logging:
self.log(self.counters)
if reset_model:
self.reset_model()
def run_net(self, net, **kwargs):
if len(kwargs):
raise IllegalArgumentError
for i in range(self.params.get('application_steps')):
if net.is_diverged() or net.is_fixpoint():
break
net.set_weights(net.apply_to_weights(net.get_weights()))
if self.iteration and hasattr(self, 'save_state'):
net.save_state(time=i)
self.count(net)
@ -90,31 +149,129 @@ class FixpointExperiment(Experiment):
else:
self.counters['other'] += 1
def reset_counters(self):
for key in self.counters.keys():
self.counters[key] = 0
return True
def reset_all(self):
super(FixpointExperiment, self).reset_all()
self.reset_counters()
class MixedFixpointExperiment(FixpointExperiment):
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0):
def __init__(self, **kwargs):
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
super(MixedFixpointExperiment, self).__init__(**kwargs)
i = 0
while i < step_limit and not net.is_diverged() and not net.is_fixpoint():
net.self_attack()
with tqdm(postfix=["Loss", dict(value=0)]) as bar:
for _ in range(trains_per_application):
loss = net.compiled().train()
bar.postfix[1]["value"] = loss
def run_net(self, net, **kwargs):
assert hasattr(net, 'train'), 'This Network must be trainable, i.e. use the "TrainingNeuralNetworkDecorator"!'
for application in range(self.params.get('application_steps')):
epoch_num = self.params.get('trains_per_application') * application
net.set_weights(net.apply_to_weights(net.get_weights()))
if net.is_diverged() or net.is_fixpoint():
break
barformat = "Experiment Iteration: {postfix[iteration]} | "
barformat += "Evolution Step:{postfix[step]}| "
barformat += "Training Epoch:{postfix[epoch]}| "
barformat += "Loss: {postfix[loss]} | {bar}"
with tqdm(total=self.params.get('trains_per_application'),
postfix={'step': 0, 'loss': 0, 'iteration': self.iteration, 'epoch': 0, None: None},
bar_format=barformat) as bar:
# This iterates for self.trains_per_application times, the addition is just for epoch enumeration
for epoch in range(epoch_num, epoch_num + self.params.get('trains_per_application')):
if net.is_diverged():
print('Network diverged to either inf or nan... breaking')
break
loss = net.train(epoch=epoch)
if epoch % 10 == 0:
bar.postfix.update(step=application, epoch=epoch, loss=loss, iteration=self.iteration)
bar.update()
i += 1
if run_id:
epoch_num += 1
if self.iteration and hasattr(net, 'save_sate'):
net.save_state()
self.count(net)
class TaskExperiment(MixedFixpointExperiment):
def __init__(self, **kwargs):
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
super(TaskExperiment, self).__init__(**kwargs)
def run_exp(self, network_generator, reset_model=False, logging=True, **kwargs):
kwargs.update(reset_model=False, logging=logging)
super(FixpointExperiment, self).run_exp(network_generator, **kwargs)
if reset_model:
self.reset_model()
pass
def run_net(self, net, **kwargs):
assert hasattr(net, 'evaluate')
super(TaskExperiment, self).run_net(net, **kwargs)
# Get Performance without Training
task_performance = net.evaluate(*net.get_samples(task_samples=True),
batchsize=net.get_amount_of_weights())
self_performance = net.evaluate(*net.get_samples(self_samples=True),
batchsize=net.get_amount_of_weights())
current_performance = dict(task_performance=task_performance,
self_performance=self_performance,
counters=self.counters, id=self.iteration)
self.log(current_performance)
pass
class SoupExperiment(Experiment):
pass
class IdentLearningExperiment(Experiment):
def __init__(self):
super(IdentLearningExperiment, self).__init__(name=self.__class__.__name__)
def __init__(self, soup_generator, **kwargs):
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
self.soup_generator = soup_generator
super(SoupExperiment, self).__init__(**kwargs)
def run_exp(self, network_generator, **kwargs):
for i in range(self.params.get('exp_iterations')):
soup = self.soup_generator()
soup.seed()
for _ in tqdm(range(self.params.get('application_steps'))):
soup.evolve()
self.log(soup.count())
self.save(soup=soup.without_particles())
K.clear_session()
def run_net(self, net, **kwargs):
raise NotImplementedError
pass
class TaskingSoupExperiment(Experiment):
def __init__(self, soup_generator, **kwargs):
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
super(TaskingSoupExperiment, self).__init__(**kwargs)
self.soup_generator = soup_generator
def __copy__(self):
super(TaskingSoupExperiment, self).__copy__(self.soup_generator)
def run_exp(self, **kwargs):
for i in range(self.params.get('exp_iterations')):
soup = self.soup_generator()
soup.seed()
for _ in tqdm(range(self.params.get('application_steps'))):
soup.evolve()
self.log(soup.count())
self.save(soup=soup.without_particles())
K.clear_session()
def run_net(self, net, **kwargs):
raise NotImplementedError()
pass
if __name__ == '__main__':
pass

View File

@ -1,118 +0,0 @@
import os
from experiment import Experiment
# noinspection PyUnresolvedReferences
from soup import Soup
from argparse import ArgumentParser
import numpy as np
import plotly as pl
import plotly.graph_objs as go
import colorlover as cl
import dill
from sklearn.manifold.t_sne import TSNE, PCA
def build_args():
arg_parser = ArgumentParser()
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
return arg_parser.parse_args()
def line_plot(names_exp_tuple, filename='lineplot'):
names, line_dict_list = names_exp_tuple
names = ['Weightwise', 'Aggregating', 'Recurrent']
if False:
data = []
base_scale = cl.scales['10']['div']['RdYlGn']
scale = cl.interp(base_scale, len(line_dict_list) + 1) # Map color scale to N bins
for ld_id, line_dict in enumerate(line_dict_list):
for data_point in ['ys', 'zs']:
trace = go.Scatter(
x=line_dict['xs'],
y=line_dict[data_point],
name='{} {}zero-fixpoints'.format(names[ld_id], 'non-' if data_point == 'zs' else ''),
line=dict(
# color=scale[ld_id],
width=5,
# dash='dash' if data_point == 'ys' else ''
),
)
data.append(trace)
if True:
data = []
base_scale = cl.scales['10']['div']['RdYlGn']
scale = cl.interp(base_scale, len(line_dict_list) + 1) # Map color scale to N bins
for ld_id, line_dict in enumerate(line_dict_list):
trace = go.Scatter(
x=line_dict['xs'],
y=line_dict['ys'],
name=names[ld_id],
line=dict( # color=scale[ld_id],
width=5
),
)
data.append(trace)
layout = dict(xaxis=dict(title='Trains per self-application', titlefont=dict(size=20)),
yaxis=dict(title='Average amount of fixpoints found',
titlefont=dict(size=20),
# type='log',
# range=[0, 2]
),
legend=dict(orientation='h', x=0.3, y=-0.3),
# height=800, width=800,
margin=dict(b=0)
)
fig = go.Figure(data=data, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=[]):
if os.path.isdir(absolut_file_or_folder):
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
search_and_apply(sub_file_or_folder.path, plotting_function, files_to_look_for=files_to_look_for)
elif absolut_file_or_folder.endswith('.dill'):
file_or_folder = os.path.split(absolut_file_or_folder)[-1]
if file_or_folder in files_to_look_for and not os.path.exists('{}.html'.format(absolut_file_or_folder[:-5])):
print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__,
file=absolut_file_or_folder)
)
with open(absolut_file_or_folder, 'rb') as in_f:
exp = dill.load(in_f)
names_dill_location = os.path.join(*os.path.split(absolut_file_or_folder)[:-1], 'all_names.dill')
with open(names_dill_location, 'rb') as in_f:
names = dill.load(in_f)
try:
plotting_function((names, exp), filename='{}.html'.format(absolut_file_or_folder[:-5]))
except ValueError:
pass
except AttributeError:
pass
else:
# This was either another FilyType or Plot.html alerady exists.
pass
if __name__ == '__main__':
args = build_args()
in_file = args.in_file[0]
out_file = args.out_file
search_and_apply(in_file, line_plot, ["all_data.dill"])

View File

@ -1,191 +0,0 @@
import tensorflow as tf
from keras.models import Sequential, Model
from keras.layers import SimpleRNN, Dense
from keras.layers import Input, TimeDistributed
from tqdm import tqdm
import time
import os
import dill
from experiment import Experiment
import itertools
from typing import Union
import numpy as np
class Network(object):
def __init__(self, features, cells, layers, bias=False, recurrent=False):
self.features = features
self.cells = cells
self.num_layer = layers
bias_params = cells if bias else 0
# Recurrent network
if recurrent:
# First RNN
p_layer_1 = (self.features * self.cells + self.cells ** 2 + bias_params)
# All other RNN Layers
p_layer_n = (self.cells * self.cells + self.cells ** 2 + bias_params) * (self.num_layer - 1)
else:
# First Dense
p_layer_1 = (self.features * self.cells + bias_params)
# All other Dense Layers
p_layer_n = (self.cells * self.cells + bias_params) * (self.num_layer - 1)
# Final Dense
p_layer_out = self.features * self.cells + bias_params
self.parameters = np.sum([p_layer_1, p_layer_n, p_layer_out])
# Build network
cell = SimpleRNN if recurrent else Dense
self.inputs, x = Input(shape=(self.parameters // self.features,
self.features) if recurrent else (self.features,)), None
for layer in range(self.num_layer):
if recurrent:
x = SimpleRNN(self.cells, activation=None, use_bias=False,
return_sequences=True)(self.inputs if layer == 0 else x)
else:
x = Dense(self.cells, activation=None, use_bias=False,
)(self.inputs if layer == 0 else x)
self.outputs = Dense(self.features if recurrent else 1, activation=None, use_bias=False)(x)
print('Network initialized, i haz {p} params @:{e}Features: {f}{e}Cells: {c}{e}Layers: {l}'.format(
p=self.parameters, l=self.num_layer, c=self.cells, f=self.features, e='\n{}'.format(' ' * 5))
)
pass
def get_inputs(self):
return self.inputs
def get_outputs(self):
return self.outputs
class _BaseNetwork(Model):
def __init__(self, **kwargs):
super(_BaseNetwork, self).__init__(**kwargs)
# This is dirty
self.features = None
def get_weights_flat(self):
weights = super().get_weights()
flat = np.asarray(np.concatenate([x.flatten() for x in weights]))
return flat
def step(self, x):
pass
def step_other(self, other: Union[Sequential, Model]) -> bool:
pass
def get_parameter_count(self):
return np.sum([np.prod(x.shape) for x in self.get_weights()])
def train_on_batch(self, *args, **kwargs):
raise NotImplementedError
def compile(self, *args, **kwargs):
raise NotImplementedError
@staticmethod
def mean_abs_error(labels, predictions):
return np.mean(np.abs(predictions - labels), axis=-1)
@staticmethod
def mean_sqrd_error(labels, predictions):
return np.mean(np.square(predictions - labels), axis=-1)
class RecurrentNetwork(_BaseNetwork):
def __init__(self, network: Network, *args, **kwargs):
super().__init__(inputs=network.inputs, outputs=network.outputs)
self.features = network.features
self.parameters = network.parameters
assert self.parameters == self.get_parameter_count()
def step(self, x):
shaped = np.reshape(x, (1, -1, self.features))
return self.predict(shaped).flatten()
def fit(self, epochs=500, **kwargs):
losses = []
with tqdm(total=epochs, ascii=True,
desc='Type: {t}'. format(t=self.__class__.__name__),
postfix=["Loss", dict(value=0)]) as bar:
for _ in range(epochs):
x = self.get_weights_flat()
y = self.step(x)
weights = self.get_weights()
global_idx = 0
for idx, weight_matrix in enumerate(weights):
flattened = weight_matrix.flatten()
new_weights = y[global_idx:global_idx + flattened.shape[0]]
weights[idx] = np.reshape(new_weights, weight_matrix.shape)
global_idx += flattened.shape[0]
losses.append(self.mean_sqrd_error(y.flatten(), self.get_weights_flat()))
self.set_weights(weights)
bar.postfix[1]["value"] = losses[-1]
bar.update()
return losses
class FeedForwardNetwork(_BaseNetwork):
def __init__(self, network:Network, **kwargs):
super().__init__(inputs=network.inputs, outputs=network.outputs, **kwargs)
self.features = network.features
self.parameters = network.parameters
self.num_layer = network.num_layer
self.num_cells = network.cells
# assert self.parameters == self.get_parameter_count()
def step(self, x):
return self.predict(x)
def step_other(self, x):
return self.predict(x)
def fit(self, epochs=500, **kwargs):
losses = []
with tqdm(total=epochs, ascii=True,
desc='Type: {t} @ Epoch:'. format(t=self.__class__.__name__),
postfix=["Loss", dict(value=0)]) as bar:
for _ in range(epochs):
all_weights = self.get_weights_flat()
cell_idx = np.apply_along_axis(lambda x: x/self.num_cells, 0, np.arange(int(self.get_parameter_count())))
xc = np.concatenate((all_weights[..., None], cell_idx[..., None]), axis=1)
y = self.step(xc)
weights = self.get_weights()
global_idx = 0
for idx, weight_matrix in enumerate(weights):
# UPDATE THE WEIGHTS
flattened = weight_matrix.flatten()
new_weights = y[global_idx:global_idx + flattened.shape[0], 0]
weights[idx] = np.reshape(new_weights, weight_matrix.shape)
global_idx += flattened.shape[0]
losses.append(self.mean_sqrd_error(y[:, 0].flatten(), self.get_weights_flat()))
self.set_weights(weights)
bar.postfix[1]["value"] = losses[-1]
bar.update()
return losses
if __name__ == '__main__':
with Experiment() as exp:
features, cells, layers = 2, 2, 2
use_recurrent = False
if use_recurrent:
network = Network(features, cells, layers, recurrent=use_recurrent)
r = RecurrentNetwork(network)
loss = r.fit(epochs=10)
exp.save(rnet=r)
else:
network = Network(features, cells, layers, recurrent=use_recurrent)
ff = FeedForwardNetwork(network)
loss = ff.fit(epochs=10)
exp.save(ffnet=ff)
print(loss)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,66 @@
import os
from experiment import Experiment
# noinspection PyUnresolvedReferences
from soup import Soup
from argparse import ArgumentParser
import numpy as np
import plotly as pl
import plotly.graph_objs as go
import colorlover as cl
import dill
from sklearn.manifold.t_sne import TSNE, PCA
def build_args():
arg_parser = ArgumentParser()
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
return arg_parser.parse_args()
class DataPlotter:
def __init__(self, path=None):
self.path = path or os.getcwd()
pass
def search_and_apply(self, plotting_function, files_to_look_for=None, absolut_file_or_folder=None):
absolut_file_or_folder, files_to_look_for = self.path or absolut_file_or_folder, list() or files_to_look_for
if os.path.isdir(absolut_file_or_folder):
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
self.search_and_apply(plotting_function, files_to_look_for=files_to_look_for,
absolut_file_or_folder=sub_file_or_folder.path)
elif absolut_file_or_folder.endswith('.dill'):
file_or_folder = os.path.split(absolut_file_or_folder)[-1]
if file_or_folder in files_to_look_for and not os.path.exists(
'{}.html'.format(absolut_file_or_folder[:-5])):
print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__,
file=absolut_file_or_folder)
)
with open(absolut_file_or_folder, 'rb') as in_f:
exp = dill.load(in_f)
names_dill_location = os.path.join(*os.path.split(absolut_file_or_folder)[:-1], 'all_names.dill')
with open(names_dill_location, 'rb') as in_f:
names = dill.load(in_f)
try:
plotting_function((names, exp), filename='{}.html'.format(absolut_file_or_folder[:-5]))
except ValueError:
pass
except AttributeError:
pass
else:
# This was either another FilyType or Plot.html already exists.
pass
if __name__ == '__main__':
plotter = DataPlotter
pass

View File

@ -0,0 +1,109 @@
import os
from collections import defaultdict
# noinspection PyUnresolvedReferences
from soup import Soup
from experiment import TaskExperiment
from argparse import ArgumentParser
import plotly as pl
import plotly.graph_objs as go
import colorlover as cl
import dill
import numpy as np
def build_args():
arg_parser = ArgumentParser()
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
return arg_parser.parse_args()
def line_plot(exp: TaskExperiment, filename='lineplot'):
assert isinstance(exp, TaskExperiment), ' This has to be a TaskExperiment!'
traces, data = [], defaultdict(list)
color_scale = cl.scales['3']['div']['RdYlBu']
# Sort data per Key
for message in exp.log_messages:
for key in message.keys():
try:
data[key].append(-0.1 if np.isnan(message[key]) or np.isinf(message[key]) else message[key])
except:
data[key].append(message[key])
for line_id, key in enumerate(data.keys()):
if key not in ['counters', 'id']:
trace = go.Scatter(
x=[x for x in range(len(data[key]))],
y=data[key],
name=key,
line=dict(
color=color_scale[line_id],
width=5
),
)
traces.append(trace)
else:
continue
layout = dict(xaxis=dict(title='Trains per self-application', titlefont=dict(size=20)),
yaxis=dict(title='Average amount of fixpoints found',
titlefont=dict(size=20),
# type='log',
# range=[0, 2]
),
legend=dict(orientation='h', x=0.3, y=-0.3),
# height=800, width=800,
margin=dict(b=0)
)
fig = go.Figure(data=traces, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=None, override=False):
# ToDo: Clean this Mess
assert os.path.exists(absolut_file_or_folder), f'The given path does not exist! Given: {absolut_file_or_folder}'
files_to_look_for = files_to_look_for or list()
if os.path.isdir(absolut_file_or_folder):
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
search_and_apply(sub_file_or_folder.path, plotting_function,
files_to_look_for=files_to_look_for, override=override)
elif absolut_file_or_folder.endswith('.dill'):
file_or_folder = os.path.split(absolut_file_or_folder)[-1]
if file_or_folder in files_to_look_for or not files_to_look_for:
if not os.path.exists('{}.html'.format(absolut_file_or_folder[:-5])) or override:
print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__,
file=absolut_file_or_folder)
)
with open(absolut_file_or_folder, 'rb') as in_f:
exp = dill.load(in_f)
try:
plotting_function(exp, filename='{}.html'.format(absolut_file_or_folder[:-5]))
except ValueError:
pass
except AttributeError:
pass
else:
# Plot.html already exists.
pass
else:
# This was a wrong FilyType.
pass
if __name__ == '__main__':
args = build_args()
in_file = args.in_file[0]
out_file = args.out_file
search_and_apply(in_file, line_plot, override=True)

View File

@ -4,16 +4,16 @@ import os
# Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.')
from util import *
from experiment import *
from network import *
import keras.backend as K
def generate_counters():
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
def count(counters, net, notable_nets=[]):
def count(counters, net, notable_nets: list=None):
notable_nets = notable_nets or list()
if net.is_diverged():
counters['divergent'] += 1
elif net.is_fixpoint():
@ -32,7 +32,7 @@ def count(counters, net, notable_nets=[]):
if __name__ == '__main__':
with Experiment('applying_fixpoint') as exp:
with FixpointExperiment(name='applying_fixpoint') as exp:
exp.trials = 50
exp.run_count = 100
exp.epsilon = 1e-4
@ -41,7 +41,7 @@ if __name__ == '__main__':
for use_bias in [False]:
net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
# net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
all_counters = []
all_notable_nets = []
all_names = []
@ -51,14 +51,14 @@ if __name__ == '__main__':
for _ in tqdm(range(exp.trials)):
net = ParticleDecorator(net_generator())
net.with_params(epsilon=exp.epsilon)
name = str(net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias'))
name = str(net.name) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias'))
for run_id in range(exp.run_count):
loss = net.self_attack()
count(counters, net, notable_nets)
all_counters += [counters]
all_notable_nets += [notable_nets]
all_names += [name]
K.clear_session()
exp.reset_model()
exp.save(all_counters=all_counters)
exp.save(trajectorys=exp.without_particles())
# net types reached in the end

File diff suppressed because one or more lines are too long

View File

@ -1,4 +0,0 @@
TrainingNeuralNetworkDecorator activiation='linear' use_bias=False
{'xs': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'ys': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'zs': [0.0, 1.2, 5.2, 7.4, 8.1, 9.1, 9.6, 9.8, 10.0, 9.9, 9.9]}

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

Width:  |  Height:  |  Size: 207 KiB

View File

@ -3,16 +3,18 @@ import os
# Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.')
from util import *
from experiment import *
from network import *
import keras.backend
import tensorflow.python.keras.backend as K
def generate_counters():
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
def count(counters, net, notable_nets=[]):
def count(counters, net, notable_nets=None):
notable_nets = notable_nets or []
if net.is_diverged():
counters['divergent'] += 1
elif net.is_fixpoint():
@ -52,7 +54,7 @@ if __name__ == '__main__':
net = ParticleDecorator(net)
name = str(net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias='" + str(net.get_keras_params().get('use_bias')) + "'"
count(counters, net, notable_nets)
keras.backend.clear_session()
K.clear_session()
all_counters += [counters]
# all_notable_nets += [notable_nets]
all_names += [name]

View File

@ -5,12 +5,11 @@ import os
# Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.')
from util import *
from experiment import *
from network import *
from soup import prng
import keras.backend
import tensorflow.python.keras.backend as K
from statistics import mean
@ -85,7 +84,7 @@ if __name__ == '__main__':
exp.ys += [time_to_something]
# time steps still regarded as sthe initial fix-point
exp.zs += [time_as_fixpoint]
keras.backend.clear_session()
K.backend.clear_session()
current_scale /= 10.0
for d in range(exp.depth):
exp.log('variation 10e-' + str(d))

View File

@ -6,13 +6,12 @@ sys.path += os.path.join('..', '.')
from typing import Tuple
from util import *
from experiment import *
from network import *
from soup import *
import keras.backend
from tensorflow.python.keras import backend as K
from statistics import mean
avg = mean
@ -28,7 +27,7 @@ def generate_counters():
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
def count(counters, soup, notable_nets=[]):
def count(counters, soup, notable_nets=None):
"""
Count the occurences ot the types of weight trajectories.
@ -40,6 +39,7 @@ def count(counters, soup, notable_nets=[]):
:return: Both the counter dictionary and the list of interessting nets.
"""
notable_nets = notable_nets or list()
for net in soup.particles:
if net.is_diverged():
counters['divergent'] += 1
@ -59,7 +59,7 @@ def count(counters, soup, notable_nets=[]):
if __name__ == '__main__':
with SoupExperiment('learn-from-soup') as exp:
with SoupExperiment(name='learn-from-soup') as exp:
exp.soup_size = 10
exp.soup_life = 100
exp.trials = 10
@ -83,14 +83,14 @@ if __name__ == '__main__':
counters = generate_counters()
results = []
for _ in tqdm(range(exp.trials)):
soup = Soup(exp.soup_size, lambda net_generator=net_generator,exp=exp: TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon))
soup = Soup(exp.soup_size, lambda net_generator=net_generator, exp=exp: TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon))
soup.with_params(attacking_rate=-1, learn_from_rate=0.1, train=0, learn_from_severity=learn_from_severity)
soup.seed()
name = str(soup.particles[0].net.__class__.__name__) + " activiation='" + str(soup.particles[0].get_keras_params().get('activation')) + "' use_bias=" + str(soup.particles[0].get_keras_params().get('use_bias'))
name = str(soup.particles[0].name) + " activiation='" + str(soup.particles[0].get_keras_params().get('activation')) + "' use_bias=" + str(soup.particles[0].get_keras_params().get('use_bias'))
for time in range(exp.soup_life):
soup.evolve()
count(counters, soup, notable_nets)
keras.backend.clear_session()
K.clear_session()
xs += [learn_from_severity]
ys += [float(counters['fix_zero']) / float(exp.trials)]

View File

@ -6,12 +6,9 @@ from typing import Tuple
# Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.')
from util import *
from experiment import *
from network import *
import keras.backend
def generate_counters():
"""
@ -23,7 +20,7 @@ def generate_counters():
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
def count(counters, net, notable_nets=[]):
def count(counters, net, notable_nets=None):
"""
Count the occurences ot the types of weight trajectories.
@ -34,7 +31,7 @@ def count(counters, net, notable_nets=[]):
:rtype Tuple[dict, list]
:return: Both the counter dictionary and the list of interessting nets.
"""
notable_nets = notable_nets or list()
if net.is_diverged():
counters['divergent'] += 1
elif net.is_fixpoint():
@ -85,7 +82,7 @@ if __name__ == '__main__':
if net.is_diverged() or net.is_fixpoint():
break
count(counters, net, notable_nets)
keras.backend.clear_session()
exp.reset_model()
xs += [trains_per_selfattack]
ys += [float(counters['fix_zero'] + counters['fix_other']) / float(exp.trials)]
all_names += [name]

View File

@ -6,12 +6,11 @@ sys.path += os.path.join('..', '.')
from typing import Tuple
from util import *
from experiment import *
from network import *
from soup import *
import keras.backend
import tensorflow.python.keras.backend as K
def generate_counters():
@ -24,7 +23,7 @@ def generate_counters():
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
def count(counters, soup, notable_nets=[]):
def count(counters, soup, notable_nets=None):
"""
Count the occurences ot the types of weight trajectories.
@ -36,6 +35,7 @@ def count(counters, soup, notable_nets=[]):
:return: Both the counter dictionary and the list of interessting nets.
"""
notable_nets = notable_nets or list()
for net in soup.particles:
if net.is_diverged():
counters['divergent'] += 1
@ -89,7 +89,7 @@ if __name__ == '__main__':
for _ in range(exp.soup_life):
soup.evolve()
count(counters, soup, notable_nets)
keras.backend.clear_session()
K.clear_session()
xs += [trains_per_selfattack]
ys += [float(counters['fix_zero']) / float(exp.trials)]

View File

@ -104,7 +104,7 @@ if __name__ == '__main__':
for run_id in range(10):
net = TrainingNeuralNetworkDecorator(FFTNeuralNetwork(2, width=2, depth=2))\
.with_params(epsilon=0.0001, activation='sigmoid')
exp.run_net(net, 500, 10)
exp.run_net(net)
net.print_weights()

View File

@ -10,23 +10,23 @@ from experiment import *
if __name__ == '__main__':
if True:
with SoupExperiment("soup") as exp:
for run_id in range(1):
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))\
# .with_keras_params(activation='linear')
# net_generator = lambda: TrainingNeuralNetworkDecorator(FFTNeuralNetwork(4, 2, 2))\
# .with_keras_params(activation='linear')
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
soup = Soup(20, net_generator).with_params(remove_divergent=True, remove_zero=True,
train=30,
learn_from_rate=-1)
soup.seed()
for _ in tqdm(range(100)):
soup.evolve()
exp.log(soup.count())
# you can access soup.historical_particles[particle_uid].states[time_step]['loss']
# or soup.historical_particles[particle_uid].states[time_step]['weights']
# from soup.dill
exp.save(soup=soup.without_particles())
with SoupExperiment(namne="soup") as exp:
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))\
# .with_keras_params(activation='linear')
# net_generator = lambda: TrainingNeuralNetworkDecorator(FFTNeuralNetwork(4, 2, 2))\
# .with_keras_params(activation='linear')
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
soup = Soup(20, net_generator).with_params(remove_divergent=True, remove_zero=True,
train=30,
learn_from_rate=-1)
soup.seed()
for _ in tqdm(range(100)):
soup.evolve()
exp.log(soup.count())
# you can access soup.historical_particles[particle_uid].states[time_step]['loss']
# or soup.historical_particles[particle_uid].states[time_step]['weights']
# from soup.dill
exp.save(soup=soup.without_particles())
K.clear_session()

View File

@ -4,16 +4,16 @@ import os
# Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.')
from util import *
from experiment import *
from network import *
import keras.backend as K
import tensorflow.python.keras.backend as K
def generate_counters():
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
def count(counters, net, notable_nets=[]):
def count(counters, net, notable_nets=None):
notable_nets = notable_nets or list()
if net.is_diverged():
counters['divergent'] += 1
elif net.is_fixpoint():

View File

@ -1,12 +1,55 @@
import random
from tensorflow.python.keras.layers import Dense, Dropout, BatchNormalization
from tensorflow.python.keras.layers import Input, Layer, Concatenate, RepeatVector, Reshape
from tensorflow.python.keras.models import Sequential, Model
from tensorflow.python.keras import backend as K
from network import *
from typing import List, Tuple
# Functions and Operators
from operator import mul
from functools import reduce
from itertools import accumulate
import numpy as np
from task import Task, TaskAdditionOfN
from copy import copy, deepcopy
from network import ParticleDecorator, WeightwiseNeuralNetwork, TrainingNeuralNetworkDecorator, \
EarlyStoppingByInfNanLoss
from experiment import TaskingSoupExperiment
from math import sqrt
def prng():
return random.random()
class SlicingLayer(Layer):
def __init__(self):
self.kernel: None
self.inputs: int
super(SlicingLayer, self).__init__()
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.kernel = None
self.inputs = input_shape[-1]
super(SlicingLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x, **kwargs):
concats = [Concatenate()([x[:, i][..., None]] * self.inputs) for i in range(x.shape[-1].value)]
return concats
def compute_output_shape(self, input_shape):
return [Concatenate()([(None, 1)] * 4) for _ in range(input_shape[-1])]
class Soup(object):
def __init__(self, size, generator, **kwargs):
@ -14,24 +57,29 @@ class Soup(object):
self.generator = generator
self.particles = []
self.historical_particles = {}
self.params = dict(attacking_rate=0.1, learn_from_rate=0.1, train=0, learn_from_severity=1)
self.params.update(kwargs)
self.soup_params = dict(attacking_rate=0.1, learn_from_rate=0.1, train=0, learn_from_severity=1)
self.soup_params.update(kwargs)
self.time = 0
self.is_seeded = False
self.is_compiled = False
def __len__(self):
return len(self.particles)
def __copy__(self):
copy_ = Soup(self.size, self.generator, **self.params)
copy_ = Soup(self.size, self.generator, **self.soup_params)
copy_.__dict__ = {attr: self.__dict__[attr] for attr in self.__dict__ if
attr not in ['particles', 'historical_particles']}
return copy_
def without_particles(self):
self_copy = copy.copy(self)
self_copy = copy(self)
# self_copy.particles = [particle.states for particle in self.particles]
self_copy.historical_particles = {key: val.states for key, val in self.historical_particles.items()}
return self_copy
def with_params(self, **kwargs):
self.params.update(kwargs)
def with_soup_params(self, **kwargs):
self.soup_params.update(kwargs)
return self
def generate_particle(self):
@ -43,9 +91,13 @@ class Soup(object):
return self.historical_particles.get(uid, otherwise)
def seed(self):
self.particles = []
for _ in range(self.size):
self.particles += [self.generate_particle()]
if not self.is_seeded:
self.particles = []
for _ in range(self.size):
self.particles += [self.generate_particle()]
else:
print('already seeded!')
self.is_seeded = True
return self
def evolve(self, iterations=1):
@ -53,33 +105,36 @@ class Soup(object):
self.time += 1
for particle_id, particle in enumerate(self.particles):
description = {'time': self.time}
if prng() < self.params.get('attacking_rate'):
if prng() < self.soup_params.get('attacking_rate'):
other_particle_id = int(prng() * len(self.particles))
other_particle = self.particles[other_particle_id]
particle.attack(other_particle)
description['action'] = 'attacking'
description['counterpart'] = other_particle.get_uid()
if prng() < self.params.get('learn_from_rate'):
if prng() < self.soup_params.get('learn_from_rate'):
other_particle_id = int(prng() * len(self.particles))
other_particle = self.particles[other_particle_id]
for _ in range(self.params.get('learn_from_severity', 1)):
for _ in range(self.soup_params.get('learn_from_severity', 1)):
particle.learn_from(other_particle)
description['action'] = 'learn_from'
description['counterpart'] = other_particle.get_uid()
for _ in range(self.params.get('train', 0)):
particle.compiled()
for _ in range(self.soup_params.get('train', 0)):
# callbacks on save_state are broken for TrainingNeuralNetwork
loss = particle.train(store_states=False)
description['fitted'] = self.params.get('train', 0)
description['fitted'] = self.soup_params.get('train', 0)
description['loss'] = loss
description['action'] = 'train_self'
description['counterpart'] = None
if self.params.get('remove_divergent') and particle.is_diverged():
if self.soup_params.get('remove_divergent') and particle.is_diverged():
new_particle = self.generate_particle()
self.particles[particle_id] = new_particle
description['action'] = 'divergent_dead'
description['counterpart'] = new_particle.get_uid()
if self.params.get('remove_zero') and particle.is_zero():
if self.soup_params.get('remove_zero') and particle.is_zero():
new_particle = self.generate_particle()
self.particles[particle_id] = new_particle
description['action'] = 'zweo_dead'
@ -108,40 +163,214 @@ class Soup(object):
print(particle.is_fixpoint())
if __name__ == '__main__':
if False:
with SoupExperiment() as exp:
for run_id in range(1):
net_generator = lambda: WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
# net_generator = lambda: FFTNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
soup = Soup(100, net_generator).with_params(remove_divergent=True, remove_zero=True)
soup.seed()
for _ in tqdm(range(1000)):
soup.evolve()
exp.log(soup.count())
exp.save(soup=soup.without_particles())
class TaskingSoup(Soup):
@staticmethod
def weights_to_flat_array(weights: List[np.ndarray]) -> np.ndarray:
return np.concatenate([d.ravel() for d in weights])
@staticmethod
def reshape_flat_array(array, shapes: List[Tuple[int]]) -> List[np.ndarray]:
# Same thing, but with an additional np call
# sizes: List[int] = [int(np.prod(shape)) for shape in shapes]
sizes = [reduce(mul, shape) for shape in shapes]
# Split the incoming array into slices for layers
slices = [array[x: y] for x, y in zip(accumulate([0] + sizes), accumulate(sizes))]
# reshape them in accordance to the given shapes
weights = [np.reshape(weight_slice, shape) for weight_slice, shape in zip(slices, shapes)]
return weights
def __init__(self, population_size: int, task: Task, particle_generator, sparsity_rate=0.1, use_bias=False,
safe=True, **kwargs):
if safe:
input_shape_error_message = f'The population size must be devideable by {task.input_shape[-1]}'
assert population_size % task.input_shape[-1] == 0, input_shape_error_message
assert population_size % 2 == 0, 'The population size needs to be of even value'
super(TaskingSoup, self).__init__(population_size, particle_generator, **kwargs)
self.task = task
self.model: Sequential
self.network_params = dict(sparsity_rate=sparsity_rate, early_nan_stopping=True, use_bias=use_bias,
depth=population_size // task.input_shape[-1])
self.network_params.update(kwargs.get('network_params', {}))
self.compile_params = dict(loss='mse', optimizer='sgd')
self.compile_params.update(kwargs.get('compile_params', {}))
def with_network_params(self, **params):
self.network_params.update(params)
def _generate_model(self):
particle_idx_list = list(range(len(self)))
particles_per_layer = len(self) // self.network_params.get('depth')
task_input = Input(self.task.input_shape, name='Task_Input')
# First layer, which is conected to the input layer and independently trainable / not trainable at all.
input_neurons = particles_per_layer * self.task.output_shape
x = Dense(input_neurons, use_bias=self.network_params.get('use_bias'))(task_input)
x = SlicingLayer()(x)
for layer_num in range(self.network_params.get('depth')):
# This needs to be tensors, because particles come as keras models that applicable
x = [self.particles[layer_num*particles_per_layer + i].get_model()(x[i]) for
i in range(particles_per_layer)]
x = [RepeatVector(particles_per_layer)(x[i]) for i in range(particles_per_layer)]
x = [Reshape((particles_per_layer,))(x[i]) for i in range(particles_per_layer)]
x = Concatenate()(x)
x = Dense(self.task.output_shape, use_bias=self.network_params.get('use_bias'), activation='linear')(x)
model = Model(inputs=task_input, outputs=x)
return model
def get_weights(self):
return self.model.get_weights()
def set_weights(self, weights: List[np.ndarray]):
self.model.set_weights(weights)
def set_intermediate_weights(self, weights: List[np.ndarray]):
all_weights = self.get_weights()
all_weights[1:-1] = weights
self.set_weights(all_weights)
def get_intermediate_weights(self):
return self.get_weights()[1:-1]
def seed(self):
K.clear_session()
self.is_compiled = False
super(TaskingSoup, self).seed()
self.model = self._generate_model()
pass
def compile_model(self, **kwargs):
if not self.is_compiled:
compile_params = deepcopy(self.compile_params)
compile_params.update(kwargs)
return self.model.compile(**compile_params)
else:
raise BrokenPipeError('This Model is not compiled yet! Something went wrong in the Pipeline!')
def get_total_weight_amount(self):
if self.is_seeded:
return sum([x.get_amount_of_weights() for x in self.particles])
else:
return 0
def get_shapes(self):
return [x.shape for x in self.get_weights()]
def get_intermediate_shapes(self):
weights = [x.shape for x in self.get_weights()]
return weights[2:-2] if self.network_params.get('use_bias') else weights[1:-1]
def predict(self, x):
return self.model.predict(x)
def evolve(self, iterations=1):
for iteration in range(iterations):
super(TaskingSoup, self).evolve(iterations=1)
self.train_particles()
def get_particle_weights(self):
return np.concatenate([x.get_weights_flat() for x in self.particles])
def get_particle_input_shape(self):
if self.is_seeded:
return tuple([x if x else -1 for x in self.particles[0].get_model().input_shape])
else:
return -1
def set_particle_weights(self, weights):
particle_weight_shape = self.particles[0].shapes(self.particles[0].get_weights())
sizes = [x.get_amount_of_weights() for x in self.particles]
flat_weights = self.weights_to_flat_array(weights)
slices = [flat_weights[x: y] for x, y in zip(accumulate([0] + sizes), accumulate(sizes))]
for particle, slice in zip(self.particles, slices):
new_weights = self.reshape_flat_array(slice, particle_weight_shape)
particle.set_weights(new_weights)
return True
def compiled(self, **kwargs):
if not self.is_compiled:
self.compile_model(**kwargs)
self.is_compiled = True
return self
def train(self, batchsize=1, epoch=0):
self.compiled()
x, y = self.task.get_samples()
callbacks = []
if self.network_params.get('early_nan_stopping'):
callbacks.append(EarlyStoppingByInfNanLoss())
# 'or' does not work on empty lists
callbacks = callbacks if callbacks else None
"""
Please Note:
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided.
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached."""
history = self.model.fit(x=x, y=y, initial_epoch=epoch, epochs=epoch + 1, verbose=0,
batch_size=batchsize, callbacks=callbacks)
return history.history['loss'][-1]
def train_particles(self, **kwargs):
self.compiled()
weights = self.get_particle_weights()
shaped_weights = self.reshape_flat_array(weights, self.get_intermediate_shapes())
self.set_intermediate_weights(shaped_weights)
_ = self.train(**kwargs) # This returns the loss values
new_weights = self.get_intermediate_weights()
self.set_particle_weights(new_weights)
return
if __name__ == '__main__':
if True:
with SoupExperiment("soup") as exp:
for run_id in range(1):
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2))\
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))
# .with_keras_params(activation='linear')\
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
# net_generator = lambda: TrainingNeuralNetworkDecorator(FFTNeuralNetwork(4, 2, 2))\
# .with_keras_params(activation='linear')\
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
soup = Soup(100, net_generator).with_params(remove_divergent=True, remove_zero=True, train=20)
soup.seed()
for _ in tqdm(range(100)):
soup.evolve()
exp.log(soup.count())
# you can access soup.historical_particles[particle_uid].states[time_step]['loss']
# or soup.historical_particles[particle_uid].states[time_step]['weights']
# from soup.dill
exp.save(soup=soup.without_particles())
from task import TaskAdditionOfN
net_generator = lambda: TrainingNeuralNetworkDecorator(
WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
)
soup_generator = lambda: TaskingSoup(20, TaskAdditionOfN(4), net_generator)
with TaskingSoupExperiment(soup_generator, name='solving_soup') as exp:
exp.run_exp(reset_model=False)
if False:
soup_generator = lambda: Soup(10, net_generator).with_soup_params(remove_divergent=True, remove_zero=True)
with SoupExperiment(soup_generator, name='soup') as exp:
net_generator = lambda: TrainingNeuralNetworkDecorator(
WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
)
exp.run_exp(net_generator)
# net_generator = lambda: FFTNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
if False:
soup_generator = lambda: Soup(10, net_generator).with_soup_params(remove_divergent=True, remove_zero=True)
with SoupExperiment(soup_generator, name='soup') as exp:
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
exp.run_exp(net_generator)
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))
# .with_keras_params(activation='linear')\
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
# net_generator = lambda: TrainingNeuralNetworkDecorator(FFTNeuralNetwork(4, 2, 2))\
# .with_keras_params(activation='linear')\
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()

32
code/task.py Normal file
View File

@ -0,0 +1,32 @@
from abc import ABC, abstractmethod
import numpy as np
from typing import Tuple
class Task(ABC):
def __init__(self, input_shape, output_shape, **kwargs):
assert any([x not in kwargs.keys() for x in ["input_shape", "output_shape"]]), 'Dublicated arguments were given'
self.input_shape = input_shape
self.output_shape = output_shape
self.batchsize = kwargs.get('batchsize', 100)
def get_samples(self) -> Tuple[np.ndarray, np.ndarray]:
raise NotImplementedError
class TaskAdditionOfN(Task):
def __init__(self, n: int, input_shape=(4,), output_shape=1, **kwargs):
assert any([x not in kwargs.keys() for x in ["input_shape", "output_shape"]]), 'Dublicated arguments were given'
assert n <= input_shape[0], f'You cannot Add more values (n={n}) than your input is long (in={input_shape}).'
kwargs.update(input_shape=input_shape, output_shape=output_shape)
super(TaskAdditionOfN, self).__init__(**kwargs)
self.n = n
def get_samples(self) -> Tuple[np.ndarray, np.ndarray]:
x = np.zeros((self.batchsize, *self.input_shape))
x[:, :self.n] = np.random.standard_normal((self.batchsize, self.n)) * 0.5
y = np.sum(x, axis=1)
return x, y

View File

@ -1,111 +0,0 @@
from experiment import *
from network import *
from soup import *
import numpy as np
class LearningNeuralNetwork(NeuralNetwork):
@staticmethod
def mean_reduction(weights, features):
single_dim_weights = np.hstack([w.flatten() for w in weights])
shaped_weights = np.reshape(single_dim_weights, (1, features, -1))
x = np.mean(shaped_weights, axis=-1)
return x
@staticmethod
def fft_reduction(weights, features):
single_dim_weights = np.hstack([w.flatten() for w in weights])
x = np.fft.fft(single_dim_weights, n=features)[None, ...]
return x
@staticmethod
def random_reduction(_, features):
x = np.random.rand(features)[None, ...]
return x
def __init__(self, width, depth, features, **kwargs):
raise DeprecationWarning
super().__init__(**kwargs)
self.width = width
self.depth = depth
self.features = features
self.compile_params = dict(loss='mse', optimizer='sgd')
self.model = Sequential()
self.model.add(Dense(units=self.width, input_dim=self.features, **self.keras_params))
for _ in range(self.depth - 1):
self.model.add(Dense(units=self.width, **self.keras_params))
self.model.add(Dense(units=self.features, **self.keras_params))
self.model.compile(**self.compile_params)
def apply_to_weights(self, old_weights, **kwargs):
reduced = kwargs.get('reduction', self.fft_reduction)()
raise NotImplementedError
# build aggregations from old_weights
weights = self.get_weights_flat()
# call network
old_aggregation = self.aggregate_fft(weights, self.aggregates)
new_aggregation = self.apply(old_aggregation)
# generate list of new weights
new_weights_list = self.deaggregate_identically(new_aggregation, self.get_amount_of_weights())
new_weights_list = self.get_shuffler()(new_weights_list)
# write back new weights
new_weights = self.fill_weights(old_weights, new_weights_list)
# return results
if self.params.get("print_all_weight_updates", False) and not self.is_silent():
print("updated old weight aggregations " + str(old_aggregation))
print("to new weight aggregations " + str(new_aggregation))
print("resulting in network weights ...")
print(self.__class__.weights_to_string(new_weights))
return new_weights
def with_compile_params(self, **kwargs):
self.compile_params.update(kwargs)
return self
def learn(self, epochs, reduction, batchsize=1):
with tqdm(total=epochs, ascii=True,
desc='Type: {t} @ Epoch:'.format(t=self.__class__.__name__),
postfix=["Loss", dict(value=0)]) as bar:
for epoch in range(epochs):
old_weights = self.get_weights()
x = reduction(old_weights, self.features)
savestateCallback = SaveStateCallback(self, epoch=epoch)
history = self.model.fit(x=x, y=x, verbose=0, batch_size=batchsize, callbacks=savestateCallback)
bar.postfix[1]["value"] = history.history['loss'][-1]
bar.update()
def vary(e=0.0, f=0.0):
return [
np.array([[1.0+e, 0.0+f], [0.0+f, 0.0+f], [0.0+f, 0.0+f], [0.0+f, 0.0+f]], dtype=np.float32),
np.array([[1.0+e, 0.0+f], [0.0+f, 0.0+f]], dtype=np.float32),
np.array([[1.0+e], [0.0+f]], dtype=np.float32)
]
if __name__ == '__main__':
net = WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='sigmoid')
if False:
net.set_weights([
np.array([[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], dtype=np.float32),
np.array([[1.0, 0.0], [0.0, 0.0]], dtype=np.float32),
np.array([[1.0], [0.0]], dtype=np.float32)
])
print(net.get_weights())
net.self_attack(100)
print(net.get_weights())
print(net.is_fixpoint())
if True:
net.set_weights(vary(0.01, 0.0))
print(net.get_weights())
for _ in range(5):
net.self_attack()
print(net.get_weights())
print(net.is_fixpoint())

View File

@ -1,39 +0,0 @@
class PrintingObject:
class SilenceSignal():
def __init__(self, obj, value):
self.obj = obj
self.new_silent = value
def __enter__(self):
self.old_silent = self.obj.get_silence()
self.obj.set_silence(self.new_silent)
def __exit__(self, exception_type, exception_value, traceback):
self.obj.set_silence(self.old_silent)
def __init__(self):
self.silent = True
def is_silent(self):
return self.silent
def get_silence(self):
return self.is_silent()
def set_silence(self, value=True):
self.silent = value
return self
def unset_silence(self):
self.silent = False
return self
def with_silence(self, value=True):
self.set_silence(value)
return self
def silence(self, value=True):
return self.__class__.SilenceSignal(self, value)
def _print(self, *args, **kwargs):
if not self.silent:
print(*args, **kwargs)

View File

@ -1,283 +0,0 @@
import os
from experiment import Experiment
# noinspection PyUnresolvedReferences
from soup import Soup
from argparse import ArgumentParser
import numpy as np
import plotly as pl
import plotly.graph_objs as go
import colorlover as cl
import dill
from sklearn.manifold.t_sne import TSNE, PCA
def build_args():
arg_parser = ArgumentParser()
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
return arg_parser.parse_args()
def build_from_soup_or_exp(soup):
particles = soup.historical_particles
particle_list = []
for particle in particles.values():
particle_dict = dict(
trajectory=[event['weights'] for event in particle],
time=[event['time'] for event in particle],
action=[event.get('action', None) for event in particle],
counterpart=[event.get('counterpart', None) for event in particle]
)
if any([x is not None for x in particle_dict['counterpart']]):
print('counterpart')
particle_list.append(particle_dict)
return particle_list
def plot_latent_trajectories(soup_or_experiment, filename='latent_trajectory_plot'):
assert isinstance(soup_or_experiment, (Experiment, Soup))
bupu = cl.scales['11']['div']['RdYlGn']
data_dict = build_from_soup_or_exp(soup_or_experiment)
scale = cl.interp(bupu, len(data_dict)+1) # Map color scale to N bins
# Fit the mebedding space
transformer = TSNE()
for particle_dict in data_dict:
array = np.asarray([np.hstack([x.flatten() for x in timestamp]).flatten()
for timestamp in particle_dict['trajectory']])
particle_dict['trajectory'] = array
transformer.fit(array)
# Transform data accordingly and plot it
data = []
for p_id, particle_dict in enumerate(data_dict):
transformed = transformer._fit(np.asarray(particle_dict['trajectory']))
line_trace = go.Scatter(
x=transformed[:, 0],
y=transformed[:, 1],
text='Hovertext goes here'.format(),
line=dict(color=scale[p_id]),
# legendgroup='Position -{}'.format(pos),
name='Particle - {}'.format(p_id),
showlegend=True,
# hoverinfo='text',
mode='lines')
line_start = go.Scatter(mode='markers', x=[transformed[0, 0]], y=[transformed[0, 1]],
marker=dict(
color='rgb(255, 0, 0)',
size=4
),
showlegend=False
)
line_end = go.Scatter(mode='markers', x=[transformed[-1, 0]], y=[transformed[-1, 1]],
marker=dict(
color='rgb(0, 0, 0)',
size=4
),
showlegend=False
)
data.extend([line_trace, line_start, line_end])
layout = dict(title='{} - Latent Trajectory Movement'.format('Penis'),
height=800, width=800, margin=dict(l=0, r=0, t=0, b=0))
# import plotly.io as pio
# pio.write_image(fig, filename)
fig = go.Figure(data=data, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def plot_latent_trajectories_3D(soup_or_experiment, filename='plot'):
def norm(val, a=0, b=0.25):
return (val - a) / (b - a)
data_list = build_from_soup_or_exp(soup_or_experiment)
if not data_list:
return
base_scale = cl.scales['9']['div']['RdYlGn']
# base_scale = cl.scales['9']['qual']['Set1']
scale = cl.interp(base_scale, len(data_list)+1) # Map color scale to N bins
# Fit the embedding space
transformer = PCA(n_components=2)
array = []
for particle_dict in data_list:
array.append(particle_dict['trajectory'])
transformer.fit(np.vstack(array))
# Transform data accordingly and plot it
data = []
for p_id, particle_dict in enumerate(data_list):
transformed = transformer.transform(particle_dict['trajectory'])
line_trace = go.Scatter3d(
x=transformed[:, 0],
y=transformed[:, 1],
z=np.asarray(particle_dict['time']),
text='Particle: {}<br> It had {} lifes.'.format(p_id, len(particle_dict['trajectory'])),
line=dict(
color=scale[p_id],
width=4
),
# legendgroup='Particle - {}'.format(p_id),
name='Particle -{}'.format(p_id),
showlegend=False,
hoverinfo='text',
mode='lines')
line_start = go.Scatter3d(mode='markers', x=[transformed[0, 0]], y=[transformed[0, 1]],
z=np.asarray(particle_dict['time'][0]),
marker=dict(
color='rgb(255, 0, 0)',
size=4
),
showlegend=False
)
line_end = go.Scatter3d(mode='markers', x=[transformed[-1, 0]], y=[transformed[-1, 1]],
z=np.asarray(particle_dict['time'][-1]),
marker=dict(
color='rgb(0, 0, 0)',
size=4
),
showlegend=False
)
data.extend([line_trace, line_start, line_end])
axis_layout = dict(gridcolor='rgb(255, 255, 255)',
gridwidth=3,
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)',
titlefont=dict(
color='black',
size=30
)
)
layout = go.Layout(scene=dict(
# aspectratio=dict(x=2, y=2, z=2),
xaxis=dict(title='Transformed X', **axis_layout),
yaxis=dict(title='Transformed Y', **axis_layout),
zaxis=dict(title='Epoch', **axis_layout)),
# title='{} - Latent Trajectory Movement'.format('Soup'),
width=1024, height=1024,
margin=dict(l=0, r=0, b=0, t=0)
)
fig = go.Figure(data=data, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename, validate=True)
pass
def plot_histogram(bars_dict_list, filename='histogram_plot'):
# catagorical
ryb = cl.scales['10']['div']['RdYlBu']
data = []
for bar_id, bars_dict in bars_dict_list:
hist = go.Histogram(
histfunc="count",
y=bars_dict.get('value', 14),
x=bars_dict.get('name', 'gimme a name'),
showlegend=False,
marker=dict(
color=ryb[bar_id]
),
)
data.append(hist)
layout=dict(title='{} Histogram Plot'.format('Experiment Name Penis'),
height=400, width=400, margin=dict(l=0, r=0, t=0, b=0))
fig = go.Figure(data=data, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def line_plot(line_dict_list, filename='lineplot'):
# lines with standard deviation
# Transform data accordingly and plot it
data = []
rdylgn = cl.scales['10']['div']['RdYlGn']
rdylgn_background = [scale + (0.4,) for scale in cl.to_numeric(rdylgn)]
for line_id, line_dict in enumerate(line_dict_list):
name = line_dict.get('name', 'gimme a name')
upper_bound = go.Scatter(
name='Upper Bound',
x=line_dict['x'],
y=line_dict['upper_y'],
mode='lines',
marker=dict(color="#444"),
line=dict(width=0),
fillcolor=rdylgn_background[line_id],
)
trace = go.Scatter(
x=line_dict['x'],
y=line_dict['main_y'],
mode='lines',
name=name,
line=dict(color=line_id),
fillcolor=rdylgn_background[line_id],
fill='tonexty')
lower_bound = go.Scatter(
name='Lower Bound',
x=line_dict['x'],
y=line_dict['lower_y'],
marker=dict(color="#444"),
line=dict(width=0),
mode='lines')
data.extend([upper_bound, trace, lower_bound])
layout=dict(title='{} Line Plot'.format('Experiment Name Penis'),
height=800, width=800, margin=dict(l=0, r=0, t=0, b=0))
fig = go.Figure(data=data, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=[]):
if os.path.isdir(absolut_file_or_folder):
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
search_and_apply(sub_file_or_folder.path, plotting_function, files_to_look_for=files_to_look_for)
elif absolut_file_or_folder.endswith('.dill'):
file_or_folder = os.path.split(absolut_file_or_folder)[-1]
if file_or_folder in files_to_look_for and not os.path.exists('{}.html'.format(absolut_file_or_folder[:-5])):
print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__,
file=absolut_file_or_folder)
)
with open(absolut_file_or_folder, 'rb') as in_f:
exp = dill.load(in_f)
try:
plotting_function(exp, filename='{}.html'.format(absolut_file_or_folder[:-5]))
except ValueError:
pass
except AttributeError:
pass
else:
# This was either another FilyType or Plot.html alerady exists.
pass
if __name__ == '__main__':
args = build_args()
in_file = args.in_file[0]
out_file = args.out_file
search_and_apply(in_file, plot_latent_trajectories_3D, ["trajectorys.dill", "soup.dill"])