TaskingSoup, TaskingSoupExperiment

This commit is contained in:
Si11ium 2019-07-03 09:17:20 +02:00
parent 320c5c26bc
commit 9bbe5df2b2
7 changed files with 504 additions and 136 deletions

View File

@ -27,12 +27,21 @@ class Experiment(ABC):
def __init__(self, name=None, ident=None, **kwargs):
self.experiment_id = f'{ident or ""}_{time.time()}'
self.experiment_name = name or 'unnamed_experiment'
self.next_iteration = 0
self.iteration = 0
self.log_messages = list()
self.historical_particles = dict()
self.params = dict(exp_iterations=100, application_steps=100, prints=True, trains_per_application=100)
self.with_params(**kwargs)
def __copy__(self):
self_copy = self.__class__(name=self.experiment_name, **self.params)
self_copy.__dict__ = {attr: self.__dict__[attr] for attr in self.__dict__ if
attr not in ['particles', 'historical_particles']}
return self_copy
def __enter__(self):
self.dir = os.path.join('experiments', f'exp-{self.experiment_name}-{self.experiment_id}-{self.next_iteration}')
self.dir = os.path.join('experiments', f'exp-{self.experiment_name}-{self.experiment_id}-{self.iteration}')
os.makedirs(self.dir)
print(f'** created {self.dir} **')
return self
@ -40,7 +49,14 @@ class Experiment(ABC):
def __exit__(self, exc_type, exc_value, traceback):
self.save(experiment=self.without_particles())
self.save_log()
self.next_iteration += 1
# Clean Exit
self.reset_all()
# self.iteration += 1 Taken From here!
def with_params(self, **kwargs):
# Make them your own
self.params.update(kwargs)
return self
def log(self, message, **kwargs):
self.log_messages.append(message)
@ -51,12 +67,6 @@ class Experiment(ABC):
for log_message in self.log_messages:
print(str(log_message), file=log_file)
def __copy__(self):
self_copy = self.__class__(name=self.experiment_name,)
self_copy.__dict__ = {attr: self.__dict__[attr] for attr in self.__dict__ if
attr not in ['particles', 'historical_particles']}
return self_copy
def without_particles(self):
self_copy = copy.copy(self)
# self_copy.particles = [particle.states for particle in self.particles]
@ -68,23 +78,28 @@ class Experiment(ABC):
with open(os.path.join(self.dir, f"{name}.dill"), "wb") as dill_file:
dill.dump(value, dill_file)
def reset_log(self):
self.log_messages = list()
@abstractmethod
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0, **kwargs):
def run_net(self, net, **kwargs):
raise NotImplementedError
pass
def run_exp(self, network_generator, exp_iterations, step_limit=100, prints=False, reset_model=False, **kwargs):
def run_exp(self, network_generator, reset_model=False, **kwargs):
# INFO Run_ID needs to be more than 0, so that exp stores the trajectories!
for run_id in range(exp_iterations):
for run_id in range(self.params.get('exp_iterations')):
network = network_generator()
self.run_net(network, step_limit, run_id=run_id + 1, **kwargs)
self.run_net(network, **kwargs)
self.historical_particles[run_id] = network
if prints:
if self.params.get('prints'):
print("Fixpoint? " + str(network.is_fixpoint()))
if reset_model:
self.reset_model()
self.iteration += 1
if reset_model:
self.reset_model()
def reset_all(self):
self.reset_log()
self.reset_model()
@ -96,22 +111,22 @@ class FixpointExperiment(Experiment):
self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
self.interesting_fixpoints = []
def run_exp(self, network_generator, exp_iterations, logging=True, reset_model=False, **kwargs):
def run_exp(self, network_generator, logging=True, reset_model=False, **kwargs):
kwargs.update(reset_model=False)
super(FixpointExperiment, self).run_exp(network_generator, exp_iterations, **kwargs)
super(FixpointExperiment, self).run_exp(network_generator, **kwargs)
if logging:
self.log(self.counters)
if reset_model:
self.reset_model()
def run_net(self, net, step_limit=100, run_id=0, **kwargs):
def run_net(self, net, **kwargs):
if len(kwargs):
raise IllegalArgumentError
for i in range(step_limit):
for i in range(self.params.get('application_steps')):
if net.is_diverged() or net.is_fixpoint():
break
net.set_weights(net.apply_to_weights(net.get_weights()))
if run_id:
if self.iteration and hasattr(self, 'save_state'):
net.save_state(time=i)
self.count(net)
@ -145,21 +160,32 @@ class MixedFixpointExperiment(FixpointExperiment):
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
super(MixedFixpointExperiment, self).__init__(**kwargs)
def run_net(self, net, step_limit=100, run_id=0, trains_per_application=100, **kwargs):
def run_net(self, net, **kwargs):
assert hasattr(net, 'train'), 'This Network must be trainable, i.e. use the "TrainingNeuralNetworkDecorator"!'
for evolution_step in range(step_limit):
for application in range(self.params.get('application_steps')):
epoch_num = self.params.get('trains_per_application') * application
net.set_weights(net.apply_to_weights(net.get_weights()))
if net.is_diverged() or net.is_fixpoint():
break
epoch_num = run_id * trains_per_application * evolution_step
with tqdm(postfix={"epoch": 0, "loss": 0, None: None},
bar_format="This Epoch:{postfix[epoch]} Loss: {postfix[loss]}%|{r_bar}") as bar:
for epoch in range(epoch_num, epoch_num + trains_per_application):
barformat = "Experiment Iteration: {postfix[iteration]} | "
barformat += "Evolution Step:{postfix[step]}| "
barformat += "Training Epoch:{postfix[epoch]}| "
barformat += "Loss: {postfix[loss]} | {bar}"
with tqdm(total=self.params.get('trains_per_application'),
postfix={'step': 0, 'loss': 0, 'iteration': self.iteration, 'epoch': 0, None: None},
bar_format=barformat) as bar:
# This iterates for self.trains_per_application times, the addition is just for epoch enumeration
for epoch in range(epoch_num, epoch_num + self.params.get('trains_per_application')):
if net.is_diverged():
print('Network diverged to either inf or nan... breaking')
break
loss = net.train(epoch=epoch)
bar.postfix.update(epoch=epoch, loss=loss)
if epoch % 10 == 0:
bar.postfix.update(step=application, epoch=epoch, loss=loss, iteration=self.iteration)
bar.update()
if run_id and hasattr(net, 'save_sate'):
epoch_num += 1
if self.iteration and hasattr(net, 'save_sate'):
net.save_state()
self.count(net)
@ -169,60 +195,71 @@ class TaskExperiment(MixedFixpointExperiment):
def __init__(self, **kwargs):
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
super(TaskExperiment, self).__init__(**kwargs)
self.task_performance = []
self.self_performance = []
def run_exp(self, network_generator, exp_iterations, logging=True, reset_model=False, **kwargs):
def run_exp(self, network_generator, logging=True, reset_model=False, **kwargs):
kwargs.update(reset_model=False, logging=logging)
super(FixpointExperiment, self).run_exp(network_generator, exp_iterations, **kwargs)
super(FixpointExperiment, self).run_exp(network_generator, **kwargs)
if reset_model:
self.reset_model()
pass
def run_net(self, net, step_limit=100, run_id=0, **kwargs):
def run_net(self, net, **kwargs):
assert hasattr(net, 'evaluate')
kwargs.update(step_limit=step_limit, run_id=run_id)
super(TaskExperiment, self).run_net(net, **kwargs)
# Get Performance without Training
selfX, selfY = net.get_samples(self_samples=True)
task_performance = net.evaluate(*net.get_samples(task_samples=True),
batchsize=net.get_amount_of_weights())
self_performance = net.evaluate(*net.get_samples(self_samples=True),
batchsize=net.get_amount_of_weights())
self.task_performance.append(net.evaluate(*net.get_samples(task_samples=True),
batchsize=net.get_amount_of_weights()))
self.self_performance.append(net.evaluate(*net.get_samples(self_samples=True),
batchsize=net.get_amount_of_weights()))
current_performance = dict(task_performance=task_performance,
self_performance=self_performance,
counters=self.counters, id=self.iteration)
self.log(current_performance)
pass
class SoupExperiment(Experiment):
def __init__(self, **kwargs):
def __init__(self, soup_generator, **kwargs):
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
self.soup_generator = soup_generator
super(SoupExperiment, self).__init__(**kwargs)
def run_exp(self, network_generator, exp_iterations,
soup_generator=None, soup_iterations=0, prints=False, **kwargs):
for i in range(soup_iterations):
if not soup_generator:
raise ValueError('A Soup Generator needs to be given!')
soup = soup_generator()
def run_exp(self, network_generator, **kwargs):
for i in range(self.params.get('exp_iterations')):
soup = self.soup_generator()
soup.seed()
for _ in tqdm(range(exp_iterations)):
for _ in tqdm(range(self.params.get('application_steps'))):
soup.evolve()
self.log(soup.count())
self.save(soup=soup.without_particles())
K.clear_session()
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0, **kwargs):
def run_net(self, net, **kwargs):
raise NotImplementedError
pass
class IdentLearningExperiment(Experiment):
class TaskingSoupExperiment(Experiment):
def __init__(self, **kwargs):
def __init__(self, soup_generator, **kwargs):
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
super(IdentLearningExperiment, self).__init__(**kwargs)
self.soup_generator = soup_generator
super(TaskingSoupExperiment, self).__init__(**kwargs)
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0, **kwargs):
def run_exp(self, network_generator, **kwargs):
for i in range(self.params.get('exp_iterations')):
soup = self.soup_generator()
soup.seed()
for _ in tqdm(range(self.params.get('application_steps'))):
soup.evolve()
self.log(soup.count())
self.save(soup=soup.without_particles())
K.clear_session()
def run_net(self, net, **kwargs):
raise NotImplementedError()
pass

View File

@ -3,6 +3,7 @@ import numpy as np
from abc import abstractmethod, ABC
from typing import List, Union, Tuple
from types import FunctionType
import warnings
# Functions and Operators
from operator import mul
@ -38,6 +39,25 @@ class SaveStateCallback(Callback):
return
class EarlyStoppingByInfNanLoss(Callback):
def __init__(self, monitor='loss', verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.verbose = verbose
def on_epoch_end(self, epoch, logs: dict = None):
logs = logs or dict()
current = logs.get(self.monitor)
if current is None:
warnings.warn(f'Early stopping requires {self.monitor} available!', RuntimeWarning)
pass
if np.isnan(current) or np.isinf(current):
if self.verbose > 0:
print(f'Epoch {epoch}: early stopping THR')
self.model.stop_training = True
class NeuralNetwork(ABC):
"""
This is the Base Network Class, including abstract functions that must be implemented.
@ -84,7 +104,7 @@ class NeuralNetwork(ABC):
def __init__(self, **params):
super().__init__()
self.params = dict(epsilon=0.00000000000001)
self.params = dict(epsilon=0.00000000000001, early_nan_stopping=True, store_states=False)
self.params.update(params)
self.name = params.get('name', self.__class__.__name__)
self.keras_params = dict(activation='linear', use_bias=False)
@ -233,21 +253,23 @@ class ParticleDecorator:
def get_states(self):
return self.states
def attack(self, other_network):
def attack(self, other_network, iterations: int = 1):
"""
Set a networks weights based on the output of the application of my function to its weights.
"Alter a networks weights based on my evaluation"
:param other_network:
:param iterations:
:return:
"""
other_network.set_weights(self.apply_to_network(other_network))
for _ in range(iterations):
other_network.set_weights(self.apply_to_network(other_network))
return self
def self_attack(self, iterations=1):
def self_attack(self, iterations: int = 1):
"""
Set my weights based on the output of the application of my function to its weights.
"Alter my network weights based on my evaluation"
:param other_network:
:param iterations:
:return:
"""
for _ in range(iterations):
@ -255,7 +277,7 @@ class ParticleDecorator:
return self
class TaskDecorator(TaskAdditionOf2):
class TaskDecorator(ParticleTaskAdditionOf2):
def __init__(self, network, **kwargs):
super(TaskDecorator, self).__init__(**kwargs)
@ -271,12 +293,21 @@ class TaskDecorator(TaskAdditionOf2):
if task_samples:
return super(TaskDecorator, self).get_samples()
elif self_samples:
return self.network.get_samples()
elif prng() >= kwargs.get('split', 0.5):
return super(TaskDecorator, self).get_samples()
else:
return self.network.get_samples()
self_x, self_y = self.network.get_samples()
task_x, task_y = super(TaskDecorator, self).get_samples()
amount_of_weights = self.network.get_amount_of_weights()
random_idx = np.random.choice(np.arange(amount_of_weights), amount_of_weights//2)
x = self_x[random_idx] = task_x[random_idx]
y = self_y[random_idx] = task_y[random_idx]
return x, y
class WeightwiseNeuralNetwork(NeuralNetwork):
@ -304,7 +335,7 @@ class WeightwiseNeuralNetwork(NeuralNetwork):
# normalize [layer, cell, position]
for idx in range(1, sample.shape[1]):
sample[:, idx] = sample[:, idx] / np.max(sample[:, idx])
return sample, sample
return sample, sample[:, 0]
def apply_to_weights(self, weights) -> List[np.ndarray]:
# ToDo: Insert DocString
@ -427,6 +458,7 @@ class AggregatingNeuralNetwork(NeuralNetwork):
class RecurrentNeuralNetwork(NeuralNetwork):
def __init__(self, width, depth, **kwargs):
raise NotImplementedError
super().__init__(**kwargs)
self.features = 1
self.width = width
@ -510,10 +542,17 @@ class TrainingNeuralNetworkDecorator:
self.model_compiled = True
return self
def train(self, batchsize=1, store_states=False, epoch=0):
def train(self, batchsize=1, epoch=0):
self.compiled()
x, y = self.network.get_samples()
savestatecallback = [SaveStateCallback(network=self, epoch=epoch)] if store_states else None
callbacks = []
if self.get_params().get('store_states'):
callbacks.append(SaveStateCallback(network=self, epoch=epoch))
if self.get_params().get('early_nan_stopping'):
callbacks.append(EarlyStoppingByInfNanLoss())
# 'or' does not work on empty lists
callbacks = callbacks if callbacks else None
"""
Please Note:
@ -526,7 +565,7 @@ class TrainingNeuralNetworkDecorator:
given by `epochs`, but merely until the epoch
of index `epochs` is reached."""
history = self.network.model.fit(x=x, y=y, initial_epoch=epoch, epochs=epoch+1, verbose=0,
batch_size=batchsize, callbacks=savestatecallback)
batch_size=batchsize, callbacks=callbacks)
return history.history['loss'][-1]
def learn_from(self, other_network, batchsize=1):
@ -558,11 +597,10 @@ if __name__ == '__main__':
if True:
# WeightWise Neural Network
net_generator = lambda: TrainingNeuralNetworkDecorator(TaskDecorator(
WeightwiseNeuralNetwork(width=2, depth=2))).with_keras_params(activation='linear')
with TaskExperiment() as exp:
exp.run_exp(net_generator, 10, trains_per_application=10)
exp.reset_all()
with TaskExperiment().with_params(application_steps=10, trains_per_application=1000, exp_iterations=30) as exp:
net_generator = lambda: TrainingNeuralNetworkDecorator(TaskDecorator(
WeightwiseNeuralNetwork(width=4, depth=3))).with_keras_params(activation='linear')
exp.run_exp(net_generator, reset_model=True)
if False:
# Aggregating Neural Network
@ -585,8 +623,7 @@ if __name__ == '__main__':
# ok so this works quite realiably
run_count = 1000
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(
width=2, depth=2).with_params(epsilon=0.0001, steplimit=2, trains_per_application=10
)).with_keras_params(optimizer='sgd')
width=2, depth=2).with_params(epsilon=0.0001)).with_keras_params(optimizer='sgd')
with MixedFixpointExperiment() as exp:
for run_id in tqdm(range(run_count+1)):
exp.run_exp(net_generator, 1)
@ -600,7 +637,7 @@ if __name__ == '__main__':
net = TrainingNeuralNetworkDecorator(
AggregatingNeuralNetwork(4, width=2, depth=2).with_params(epsilon=0.1e-6))
for run_id in tqdm(range(run_count+1)):
loss = net.compiled().train()
current_loss = net.compiled().train()
if run_id % 100 == 0:
net.print_weights()
old_aggs, _ = net.get_aggregated_weights()
@ -609,7 +646,7 @@ if __name__ == '__main__':
print("new weights agg: " + str(new_aggs))
print("Fixpoint? " + str(net.is_fixpoint()))
print("Fixpoint after Agg? " + str(fp))
print("Loss " + str(loss))
print("Loss " + str(current_loss))
print()
if False:
@ -620,10 +657,10 @@ if __name__ == '__main__':
net = TrainingNeuralNetworkDecorator(RecurrentNeuralNetwork(width=2, depth=2)
).with_keras_params(optimizer='sgd', activation='linear')
for run_id in tqdm(range(run_count+1)):
loss = net.compiled().train()
current_loss = net.compiled().train()
if run_id % 500 == 0:
net.print_weights()
# print(net.apply_to_network(net))
print("Fixpoint? " + str(net.is_fixpoint()))
print("Loss " + str(loss))
print("Loss " + str(current_loss))
print()

View File

@ -0,0 +1,66 @@
import os
from experiment import Experiment
# noinspection PyUnresolvedReferences
from soup import Soup
from argparse import ArgumentParser
import numpy as np
import plotly as pl
import plotly.graph_objs as go
import colorlover as cl
import dill
from sklearn.manifold.t_sne import TSNE, PCA
def build_args():
arg_parser = ArgumentParser()
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
return arg_parser.parse_args()
class DataPlotter:
def __init__(self, path=None):
self.path = path or os.getcwd()
pass
def search_and_apply(self, plotting_function, files_to_look_for=None, absolut_file_or_folder=None):
absolut_file_or_folder, files_to_look_for = self.path or absolut_file_or_folder, list() or files_to_look_for
if os.path.isdir(absolut_file_or_folder):
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
self.search_and_apply(plotting_function, files_to_look_for=files_to_look_for,
absolut_file_or_folder=sub_file_or_folder.path)
elif absolut_file_or_folder.endswith('.dill'):
file_or_folder = os.path.split(absolut_file_or_folder)[-1]
if file_or_folder in files_to_look_for and not os.path.exists(
'{}.html'.format(absolut_file_or_folder[:-5])):
print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__,
file=absolut_file_or_folder)
)
with open(absolut_file_or_folder, 'rb') as in_f:
exp = dill.load(in_f)
names_dill_location = os.path.join(*os.path.split(absolut_file_or_folder)[:-1], 'all_names.dill')
with open(names_dill_location, 'rb') as in_f:
names = dill.load(in_f)
try:
plotting_function((names, exp), filename='{}.html'.format(absolut_file_or_folder[:-5]))
except ValueError:
pass
except AttributeError:
pass
else:
# This was either another FilyType or Plot.html already exists.
pass
if __name__ == '__main__':
plotter = DataPlotter
pass

View File

@ -0,0 +1,109 @@
import os
from collections import defaultdict
# noinspection PyUnresolvedReferences
from soup import Soup
from experiment import TaskExperiment
from argparse import ArgumentParser
import plotly as pl
import plotly.graph_objs as go
import colorlover as cl
import dill
import numpy as np
def build_args():
arg_parser = ArgumentParser()
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
return arg_parser.parse_args()
def line_plot(exp: TaskExperiment, filename='lineplot'):
assert isinstance(exp, TaskExperiment), ' This has to be a TaskExperiment!'
traces, data = [], defaultdict(list)
color_scale = cl.scales['3']['div']['RdYlBu']
# Sort data per Key
for message in exp.log_messages:
for key in message.keys():
try:
data[key].append(-0.1 if np.isnan(message[key]) or np.isinf(message[key]) else message[key])
except:
data[key].append(message[key])
for line_id, key in enumerate(data.keys()):
if key not in ['counters', 'id']:
trace = go.Scatter(
x=[x for x in range(len(data[key]))],
y=data[key],
name=key,
line=dict(
color=color_scale[line_id],
width=5
),
)
traces.append(trace)
else:
continue
layout = dict(xaxis=dict(title='Trains per self-application', titlefont=dict(size=20)),
yaxis=dict(title='Average amount of fixpoints found',
titlefont=dict(size=20),
# type='log',
# range=[0, 2]
),
legend=dict(orientation='h', x=0.3, y=-0.3),
# height=800, width=800,
margin=dict(b=0)
)
fig = go.Figure(data=traces, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=None, override=False):
# ToDo: Clean this Mess
assert os.path.exists(absolut_file_or_folder), f'The given path does not exist! Given: {absolut_file_or_folder}'
files_to_look_for = files_to_look_for or list()
if os.path.isdir(absolut_file_or_folder):
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
search_and_apply(sub_file_or_folder.path, plotting_function,
files_to_look_for=files_to_look_for, override=override)
elif absolut_file_or_folder.endswith('.dill'):
file_or_folder = os.path.split(absolut_file_or_folder)[-1]
if file_or_folder in files_to_look_for or not files_to_look_for:
if not os.path.exists('{}.html'.format(absolut_file_or_folder[:-5])) or override:
print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__,
file=absolut_file_or_folder)
)
with open(absolut_file_or_folder, 'rb') as in_f:
exp = dill.load(in_f)
try:
plotting_function(exp, filename='{}.html'.format(absolut_file_or_folder[:-5]))
except ValueError:
pass
except AttributeError:
pass
else:
# Plot.html already exists.
pass
else:
# This was a wrong FilyType.
pass
if __name__ == '__main__':
args = build_args()
in_file = args.in_file[0]
out_file = args.out_file
search_and_apply(in_file, line_plot, override=True)

View File

@ -104,7 +104,7 @@ if __name__ == '__main__':
for run_id in range(10):
net = TrainingNeuralNetworkDecorator(FFTNeuralNetwork(2, width=2, depth=2))\
.with_params(epsilon=0.0001, activation='sigmoid')
exp.run_net(net, 500, 10)
exp.run_net(net)
net.print_weights()

View File

@ -1,30 +1,30 @@
import random
from operator import mul
from functools import reduce
from tensorflow.python.keras.layers import Dense, Dropout, BatchNormalization
from tensorflow.python.keras import backend as K
from network import *
from math import sqrt
def prng():
return random.random()
class Soup(object):
def __init__(self, size, generator, **kwargs):
self.size = size
self.generator = generator
self.particles = []
self.historical_particles = {}
self.params = dict(attacking_rate=0.1, learn_from_rate=0.1, train=0, learn_from_severity=1)
self.params.update(kwargs)
self.soup_params = dict(attacking_rate=0.1, learn_from_rate=0.1, train=0, learn_from_severity=1)
self.soup_params.update(kwargs)
self.time = 0
self.is_seeded = False
self.is_compiled = False
def __copy__(self):
copy_ = Soup(self.size, self.generator, **self.params)
copy_ = Soup(self.size, self.generator, **self.soup_params)
copy_.__dict__ = {attr: self.__dict__[attr] for attr in self.__dict__ if
attr not in ['particles', 'historical_particles']}
return copy_
@ -35,18 +35,18 @@ class Soup(object):
self_copy.historical_particles = {key: val.states for key, val in self.historical_particles.items()}
return self_copy
def with_params(self, **kwargs):
self.params.update(kwargs)
def with_soup_params(self, **kwargs):
self.soup_params.update(kwargs)
return self
def generate_particle(self):
new_particle = ParticleDecorator(self.generator())
self.historical_particles[new_particle.get_uid()] = new_particle
return new_particle
def get_particle(self, uid, otherwise=None):
return self.historical_particles.get(uid, otherwise)
def seed(self):
if not self.is_seeded:
self.particles = []
@ -55,43 +55,43 @@ class Soup(object):
else:
print('already seeded!')
self.is_seeded = True
return self
return self
def evolve(self, iterations=1):
for _ in range(iterations):
self.time += 1
for particle_id, particle in enumerate(self.particles):
description = {'time': self.time}
if prng() < self.params.get('attacking_rate'):
if prng() < self.soup_params.get('attacking_rate'):
other_particle_id = int(prng() * len(self.particles))
other_particle = self.particles[other_particle_id]
particle.attack(other_particle)
description['action'] = 'attacking'
description['counterpart'] = other_particle.get_uid()
if prng() < self.params.get('learn_from_rate'):
if prng() < self.soup_params.get('learn_from_rate'):
other_particle_id = int(prng() * len(self.particles))
other_particle = self.particles[other_particle_id]
for _ in range(self.params.get('learn_from_severity', 1)):
for _ in range(self.soup_params.get('learn_from_severity', 1)):
particle.learn_from(other_particle)
description['action'] = 'learn_from'
description['counterpart'] = other_particle.get_uid()
for _ in range(self.params.get('train', 0)):
for _ in range(self.soup_params.get('train', 0)):
# callbacks on save_state are broken for TrainingNeuralNetwork
loss = particle.train(store_states=False)
description['fitted'] = self.params.get('train', 0)
description['fitted'] = self.soup_params.get('train', 0)
description['loss'] = loss
description['action'] = 'train_self'
description['counterpart'] = None
if self.params.get('remove_divergent') and particle.is_diverged():
if self.soup_params.get('remove_divergent') and particle.is_diverged():
new_particle = self.generate_particle()
self.particles[particle_id] = new_particle
description['action'] = 'divergent_dead'
description['counterpart'] = new_particle.get_uid()
if self.params.get('remove_zero') and particle.is_zero():
if self.soup_params.get('remove_zero') and particle.is_zero():
new_particle = self.generate_particle()
self.particles[particle_id] = new_particle
description['action'] = 'zweo_dead'
@ -113,7 +113,7 @@ class Soup(object):
else:
counters['other'] += 1
return counters
def print_all(self):
for particle in self.particles:
particle.print_weights()
@ -122,62 +122,171 @@ class Soup(object):
class SolvingSoup(Soup):
def __init__(self, task: Task, particle_amount: int, particle_generator, depth: int=None, **kwargs):
super(SolvingSoup, self).__init__(particle_amount, particle_generator, **kwargs)
self.model = Sequential()
self.depth = depth or particle_amount - 1
self.task = task
@staticmethod
def weights_to_flat_array(weights: List[np.ndarray]) -> np.ndarray:
return np.concatenate([d.ravel() for d in weights])
self.network_params = dict()
@staticmethod
def reshape_flat_array(array, shapes: List[Tuple[int]]) -> List[np.ndarray]:
# Same thing, but with an additional np call
# sizes: List[int] = [int(np.prod(shape)) for shape in shapes]
sizes = [reduce(mul, shape) for shape in shapes]
# Split the incoming array into slices for layers
slices = [array[x: y] for x, y in zip(accumulate([0] + sizes), accumulate(sizes))]
# reshape them in accordance to the given shapes
weights = [np.reshape(weight_slice, shape) for weight_slice, shape in zip(slices, shapes)]
return weights
def __init__(self, population_size: int, task: Task, particle_generator, **kwargs):
super(SolvingSoup, self).__init__(population_size, particle_generator, **kwargs)
self.task = task
self.model: Sequential
self.network_params = dict(sparsity_rate=0.1, early_nan_stopping=True)
self.compile_params = dict(loss='mse', optimizer='sgd')
self.compile_params.update(kwargs.get('compile_params', {}))
def with_network_params(self, **params):
self.network_params.update(params)
def _generate_model(self):
model = Sequential()
weights, last_weights = self.get_total_weight_amount(), 0
while weights:
n = int(sqrt(weights))
this_weights = sqrt(weights / n)
if not this_weights:
break
if not model.layers:
# First Input layer
model.add(Dense(this_weights, activation='linear', input_shape=self.task.input_shape))
else:
# Intermediate Layers
model.add(Dense(this_weights, activation='linear'))
self.model.add(BatchNormalization())
self.model.add(Dropout(rate=self.soup_params.get('sparsity_rate')))
weights -= this_weights * last_weights
last_weights = this_weights
# Last Layer
model.add(Dense(self.task.output_shape))
return model
def get_weights(self):
return self.model.get_weights()
def set_weights(self, weights: List[np.ndarray]):
self.model.set_weights(weights)
def set_intermediate_weights(self, weights: List[np.ndarray]):
all_weights = self.get_weights()
all_weights[1:-1] = weights
self.set_weights(all_weights)
def seed(self):
super(SolvingSoup, self).seed()
# Static First Layer
self.model.add(Dense(self.network_params.get('first_layer_units', 10), input_shape=self.task.input_shape))
self.model.add(BatchNormalization())
for layer_num in range(self.depth):
# ToDo !!!!!!!!!!
self.model.add(Dense())
self.model.add(Dropout(rate=self.params.get('sparsity_rate', 0.1)))
has_to_be_zero =
if has_to_be_zero:
raise ValueError(f'This Combination does not Work!, There are still {has_to_be_zero} unnassigned Weights!')
self.model.add(Dense(left_over_units))
self.model.add(Dense(self.task.output_shape))
K.clear_session()
self.model = self._generate_model()
pass
def compile_model(self, **kwargs):
compile_params = copy.deepcopy(self.compile_params)
compile_params.update(kwargs)
return self.model.compile(**compile_params)
if not self.is_compiled:
compile_params = copy.deepcopy(self.compile_params)
compile_params.update(kwargs)
return self.model.compile(**compile_params)
else:
raise BrokenPipeError('This Model is not compiled yet! Something went wrong in the Pipeline!')
def get_total_weight_amount(self):
if self.is_seeded:
return sum([x.get_amount_of_weights for x in self.particles])
else:
return 0
def get_shapes(self):
return [x.shape for x in self.get_weights()]
def get_intermediate_shapes(self):
weights = [x.shape for x in self.get_weights()]
return weights[1:-1]
def predict(self, x):
return self.model.predict(x)
def evolve(self, **kwargs):
super(SolvingSoup, self).evolve(iterations=1)
def get_particle_weights(self):
return np.concatenate([x.get_weights_flat() for x in self.particles])
def set_particle_weights(self, weights):
particle_weight_shape = self.particles[0].shapes(self.particles[0].get_weights())
sizes = [x.get_amount_of_weights() for x in self.particles]
flat_weights = self.weights_to_flat_array(weights)
slices = [flat_weights[x: y] for x, y in zip(accumulate([0] + sizes), accumulate(sizes))]
for particle, weight in zip((self.particles, slices)):
self.reshape_flat_array(weight, particle_weight_shape)
return True
def compiled(self, **kwargs):
if not self.is_compiled:
self.seed()
self.compile_model(**kwargs)
self.is_compiled = True
return self
def train(self, batchsize=1, epoch=0):
self.compiled()
x, y = self.task.get_samples()
callbacks = []
if self.network_params.get('early_nan_stopping'):
callbacks.append(EarlyStoppingByInfNanLoss())
# 'or' does not work on empty lists
callbacks = callbacks if callbacks else None
"""
Please Note:
epochs: Integer. Number of epochs to train the model.
An epoch is an iteration over the entire `x` and `y`
data provided.
Note that in conjunction with `initial_epoch`,
`epochs` is to be understood as "final epoch".
The model is not trained for a number of iterations
given by `epochs`, but merely until the epoch
of index `epochs` is reached."""
history = self.model.fit(x=x, y=y, initial_epoch=epoch, epochs=epoch + 1, verbose=0,
batch_size=batchsize, callbacks=callbacks)
return history.history['loss'][-1]
def train_at_particle_level(self):
self.compiled()
weights = self.get_particle_weights()
shaped_weights = self.reshape_flat_array(weights, self.get_intermediate_shapes())
self.set_intermediate_weights(shaped_weights)
return
if __name__ == '__main__':
if True:
with SoupExperiment(name='soup') as exp:
from task import TaskAdditionOf2
soup_generator = SolvingSoup(20, ParticleTaskAdditionOf2(), net_generator)
with SoupExperiment(soup_generator, name='solving_soup') as exp:
net_generator = lambda: TrainingNeuralNetworkDecorator(
WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
)
soup_generator = lambda: Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True)
exp.run_exp(net_generator, 10, soup_generator, 1, False)
exp.run_exp(net_generator)
if True:
soup_generator = lambda: Soup(10, net_generator).with_soup_params(remove_divergent=True, remove_zero=True)
with SoupExperiment(soup_generator, name='soup') as exp:
net_generator = lambda: TrainingNeuralNetworkDecorator(
WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
)
exp.run_exp(net_generator)
# net_generator = lambda: FFTNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
@ -185,12 +294,12 @@ if __name__ == '__main__':
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
if True:
with SoupExperiment(name='soup') as exp:
soup_generator = lambda: Soup(10, net_generator).with_soup_params(remove_divergent=True, remove_zero=True)
with SoupExperiment(soup_generator, name='soup') as exp:
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
soup_generator = lambda: Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True, train=20)
exp.run_exp(net_generator, 10, soup_generator, 1, False)
exp.run_exp(net_generator)
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))
# .with_keras_params(activation='linear')\

View File

@ -15,14 +15,24 @@ class Task(ABC):
raise NotImplementedError
class TaskAdditionOf2(Task):
class ParticleTaskAdditionOf2(Task):
def __init__(self, **kwargs):
super(TaskAdditionOf2, self).__init__(input_shape=(4,), output_shape=(1, ), **kwargs)
super(ParticleTaskAdditionOf2, self).__init__(input_shape=(4,), output_shape=(1, ), **kwargs)
def get_samples(self) -> Tuple[np.ndarray, np.ndarray]:
x = np.zeros((self.batchsize, *self.input_shape))
x[:, :2] = np.random.standard_normal((self.batchsize, 2)) * 0.5
y = np.zeros_like(x)
y[:, -1] = np.sum(x, axis=1)
return x, y
y = np.sum(x, axis=1)
return x, y
class SoupTask(Task):
def __init__(self, input_shape, output_shape):
super(SoupTask, self).__init__(input_shape, output_shape)
pass
def get_samples(self) -> Tuple[np.ndarray, np.ndarray]:
raise NotImplementedError
# ToDo Hier geht es weiter.