Compare commits

...

7 Commits
master ... TF20

Author SHA1 Message Date
Steffen Illium
a3cba1188f Delete New Text Document.txt 2023-11-14 16:22:29 +01:00
Steffen Illium
2562b3d00b Delete literature directory 2023-11-14 16:22:19 +01:00
Steffen Illium
71d16ef939 Delete paper directory 2023-11-14 16:21:57 +01:00
Si11ium
4a81279b58 Refactor:
Step 4 - Aggregating Neural Networks
Step 5 - Training Neural Networks
2019-06-14 09:55:51 +02:00
Si11ium
9189759320 Refactor:
Step 4 - Aggregating Neural Networks
Step 5 - Training Neural Networks
2019-06-10 18:27:52 +02:00
Si11ium
203c5b45e3 Refactor:
Step 4 - Aggregating Neural Networks
Step 5 - Training Neural Networks
2019-06-08 21:28:38 +02:00
Si11ium
50f7f84084 Refactor:
Step 1 - Introduction of Weight object for global weight operations
Step2 - Cleanup
Step 3 - Redone WEightwise network updates in clean numpy code
2019-06-06 21:57:22 +02:00
17 changed files with 413 additions and 1448 deletions

View File

@ -1,96 +0,0 @@
import os
from experiment import Experiment
# noinspection PyUnresolvedReferences
from soup import Soup
from typing import List
from collections import defaultdict
from argparse import ArgumentParser
import numpy as np
import plotly as pl
import plotly.graph_objs as go
import colorlover as cl
import dill
def build_args():
arg_parser = ArgumentParser()
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
return arg_parser.parse_args()
def plot_bars(names_bars_tuple, filename='histogram_plot'):
# catagorical
ryb = cl.scales['10']['div']['RdYlBu']
names, bars = names_bars_tuple
situations = list(bars[0].keys())
names = ['Weightwise', 'Aggregating', 'Recurrent'] # [name.split(' ')[0] for name in names]
data_dict = {}
for idx, name in enumerate(names):
data_dict[name] = bars[idx]
data = []
for idx, situation in enumerate(situations):
bar = go.Bar(
y=[data_dict[name][situation] for name in names],
# x=[key for key in data_dict[name].keys()],
x=names,
name=situation,
showlegend=True,
)
data.append(bar)
layout = dict(xaxis=dict(title="Networks", titlefont=dict(size=20)),
barmode='stack',
# height=400, width=400,
# margin=dict(l=20, r=20, t=20, b=20)
legend=dict(orientation="h", x=0.05)
)
fig = go.Figure(data=data, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=[]):
if os.path.isdir(absolut_file_or_folder):
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
search_and_apply(sub_file_or_folder.path, plotting_function, files_to_look_for=files_to_look_for)
elif absolut_file_or_folder.endswith('.dill'):
file_or_folder = os.path.split(absolut_file_or_folder)[-1]
if file_or_folder in files_to_look_for and not os.path.exists('{}.html'.format(file_or_folder[:-5])):
print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__,
file=absolut_file_or_folder)
)
with open(absolut_file_or_folder, 'rb') as in_f:
bars = dill.load(in_f)
names_dill_location = os.path.join(*os.path.split(absolut_file_or_folder)[:-1], 'all_names.dill')
with open(names_dill_location, 'rb') as in_f:
names = dill.load(in_f)
plotting_function((names, bars), filename='{}.html'.format(absolut_file_or_folder[:-5]))
else:
pass
# This was not a file i should look for.
else:
# This was either another FilyType or Plot.html alerady exists.
pass
if __name__ == '__main__':
args = build_args()
in_file = args.in_file[0]
out_file = args.out_file
search_and_apply(in_file, plot_bars, files_to_look_for=['all_counters.dill'])
# , 'all_names.dill', 'all_notable_nets.dill'])

View File

@ -1,129 +0,0 @@
import os
from experiment import Experiment
# noinspection PyUnresolvedReferences
from soup import Soup
from typing import List
from collections import defaultdict
from argparse import ArgumentParser
import numpy as np
import plotly as pl
import plotly.graph_objs as go
import colorlover as cl
import dill
def build_args():
arg_parser = ArgumentParser()
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
return arg_parser.parse_args()
def plot_box(exp: Experiment, filename='histogram_plot'):
# catagorical
ryb = cl.scales['10']['div']['RdYlBu']
data = []
for d in range(exp.depth):
names = ['D 10e-{}'.format(d)] * exp.trials
data.extend(names)
trace_list = []
vergence_box = go.Box(
y=exp.ys,
x=data,
name='Time to Vergence',
boxpoints=False,
showlegend=True,
marker=dict(
color=ryb[3]
),
)
fixpoint_box = go.Box(
y=exp.zs,
x=data,
name='Time as Fixpoint',
boxpoints=False,
showlegend=True,
marker=dict(
color=ryb[-1]
),
)
trace_list.extend([vergence_box, fixpoint_box])
layout = dict(title='{}'.format('Known Fixpoint Variation'),
titlefont=dict(size=30),
legend=dict(
orientation="h",
x=.1, y=-0.1,
font=dict(
size=20,
color='black'
),
),
boxmode='group',
boxgap=0,
# barmode='group',
bargap=0,
xaxis=dict(showgrid=False,
zeroline=True,
tickangle=0,
showticklabels=True),
yaxis=dict(
title='Steps',
zeroline=False,
titlefont=dict(
size=30
)
),
# height=400, width=400,
margin=dict(t=50)
)
fig = go.Figure(data=trace_list, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=[]):
if os.path.isdir(absolut_file_or_folder):
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
search_and_apply(sub_file_or_folder.path, plotting_function, files_to_look_for=files_to_look_for)
elif absolut_file_or_folder.endswith('.dill'):
file_or_folder = os.path.split(absolut_file_or_folder)[-1]
if file_or_folder in files_to_look_for and not os.path.exists('{}.html'.format(file_or_folder[:-5])):
print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__,
file=absolut_file_or_folder)
)
with open(absolut_file_or_folder, 'rb') as in_f:
exp = dill.load(in_f)
try:
plotting_function(exp, filename='{}.html'.format(absolut_file_or_folder[:-5]))
except AttributeError:
pass
else:
pass
# This was not a file i should look for.
else:
# This was either another FilyType or Plot.html alerady exists.
pass
if __name__ == '__main__':
args = build_args()
in_file = args.in_file[0]
out_file = args.out_file
search_and_apply(in_file, plot_box, files_to_look_for=['experiment.dill'])
# , 'all_names.dill', 'all_notable_nets.dill'])

View File

@ -4,48 +4,54 @@ import dill
from tqdm import tqdm from tqdm import tqdm
import copy import copy
from tensorflow.python.keras import backend as K
from abc import ABC, abstractmethod
class Experiment(ABC):
class Experiment:
@staticmethod @staticmethod
def from_dill(path): def from_dill(path):
with open(path, "rb") as dill_file: with open(path, "rb") as dill_file:
return dill.load(dill_file) return dill.load(dill_file)
@staticmethod
def reset_model():
K.clear_session()
def __init__(self, name=None, ident=None): def __init__(self, name=None, ident=None):
self.experiment_id = '{}_{}'.format(ident or '', time.time()) self.experiment_id = f'{ident or ""}_{time.time()}'
self.experiment_name = name or 'unnamed_experiment' self.experiment_name = name or 'unnamed_experiment'
self.next_iteration = 0 self.next_iteration = 0
self.log_messages = [] self.log_messages = list()
self.historical_particles = {} self.historical_particles = dict()
def __enter__(self): def __enter__(self):
self.dir = os.path.join('experiments', 'exp-{name}-{id}-{it}'.format( self.dir = os.path.join('experiments', f'exp-{self.experiment_name}-{self.experiment_id}-{self.next_iteration}')
name=self.experiment_name, id=self.experiment_id, it=self.next_iteration)
)
os.makedirs(self.dir) os.makedirs(self.dir)
print("** created {dir} **".format(dir=self.dir)) print(f'** created {self.dir} **')
return self return self
def __exit__(self, exc_type, exc_value, traceback): def __exit__(self, exc_type, exc_value, traceback):
self.save(experiment=self.without_particles()) self.save(experiment=self.without_particles())
self.save_log() self.save_log()
self.next_iteration += 1 self.next_iteration += 1
def log(self, message, **kwargs): def log(self, message, **kwargs):
self.log_messages.append(message) self.log_messages.append(message)
print(message, **kwargs) print(message, **kwargs)
def save_log(self, log_name="log"): def save_log(self, log_name="log"):
with open(os.path.join(self.dir, "{name}.txt".format(name=log_name)), "w") as log_file: with open(os.path.join(self.dir, f"{log_name}.txt"), "w") as log_file:
for log_message in self.log_messages: for log_message in self.log_messages:
print(str(log_message), file=log_file) print(str(log_message), file=log_file)
def __copy__(self): def __copy__(self):
copy_ = Experiment(name=self.experiment_name,) self_copy = self.__class__(name=self.experiment_name,)
copy_.__dict__ = {attr: self.__dict__[attr] for attr in self.__dict__ if self_copy.__dict__ = {attr: self.__dict__[attr] for attr in self.__dict__ if
attr not in ['particles', 'historical_particles']} attr not in ['particles', 'historical_particles']}
return copy_ return self_copy
def without_particles(self): def without_particles(self):
self_copy = copy.copy(self) self_copy = copy.copy(self)
@ -55,19 +61,39 @@ class Experiment:
def save(self, **kwargs): def save(self, **kwargs):
for name, value in kwargs.items(): for name, value in kwargs.items():
with open(os.path.join(self.dir, "{name}.dill".format(name=name)), "wb") as dill_file: with open(os.path.join(self.dir, f"{name}.dill"), "wb") as dill_file:
dill.dump(value, dill_file) dill.dump(value, dill_file)
@abstractmethod
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0, **kwargs):
raise NotImplementedError
pass
def run_exp(self, network_generator, exp_iterations, prints=False, **kwargs):
# INFO Run_ID needs to be more than 0, so that exp stores the trajectories!
for run_id in range(exp_iterations):
network = network_generator()
self.run_net(network, 100, run_id=run_id + 1, **kwargs)
self.historical_particles[run_id] = network
if prints:
print("Fixpoint? " + str(network.is_fixpoint()))
self.reset_model()
def reset_all(self):
self.reset_model()
class FixpointExperiment(Experiment): class FixpointExperiment(Experiment):
if kwargs.get('logging', False):
self.log(self.counters)
def __init__(self, **kwargs): def __init__(self, **kwargs):
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name'] kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
super().__init__(**kwargs) super().__init__(**kwargs)
self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0) self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
self.interesting_fixpoints = [] self.interesting_fixpoints = []
def run_net(self, net, step_limit=100, run_id=0): def run_net(self, net, step_limit=100, run_id=0, **kwargs):
i = 0 i = 0
while i < step_limit and not net.is_diverged() and not net.is_fixpoint(): while i < step_limit and not net.is_diverged() and not net.is_fixpoint():
net.self_attack() net.self_attack()
@ -90,31 +116,56 @@ class FixpointExperiment(Experiment):
else: else:
self.counters['other'] += 1 self.counters['other'] += 1
def reset_counters(self):
for key in self.counters.keys():
self.counters[key] = 0
return True
def reset_all(self):
super(FixpointExperiment, self).reset_all()
self.reset_counters()
class MixedFixpointExperiment(FixpointExperiment): class MixedFixpointExperiment(FixpointExperiment):
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0): def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0, **kwargs):
for i in range(step_limit):
i = 0 if net.is_diverged() or net.is_fixpoint():
while i < step_limit and not net.is_diverged() and not net.is_fixpoint(): break
net.self_attack() net.self_attack()
with tqdm(postfix=["Loss", dict(value=0)]) as bar: with tqdm(postfix=["Loss", dict(value=0)]) as bar:
for _ in range(trains_per_application): for _ in range(trains_per_application):
loss = net.compiled().train() loss = net.compiled().train()
bar.postfix[1]["value"] = loss bar.postfix[1]["value"] = loss
bar.update() bar.update()
i += 1
if run_id: if run_id:
net.save_state() net.save_state()
self.count(net) self.count(net)
class SoupExperiment(Experiment): class SoupExperiment(Experiment):
pass
def __init__(self, **kwargs):
super(SoupExperiment, self).__init__(name=kwargs.get('name', self.__class__.__name__))
def run_exp(self, network_generator, exp_iterations, soup_generator=None, soup_iterations=0, prints=False):
for i in range(soup_iterations):
soup = soup_generator()
soup.seed()
for _ in tqdm(exp_iterations):
soup.evolve()
self.log(soup.count())
self.save(soup=soup.without_particles())
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0, **kwargs):
raise NotImplementedError
pass
class IdentLearningExperiment(Experiment): class IdentLearningExperiment(Experiment):
def __init__(self): def __init__(self):
super(IdentLearningExperiment, self).__init__(name=self.__class__.__name__) super(IdentLearningExperiment, self).__init__(name=self.__class__.__name__)
pass
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0, **kwargs):
pass

View File

@ -1,118 +0,0 @@
import os
from experiment import Experiment
# noinspection PyUnresolvedReferences
from soup import Soup
from argparse import ArgumentParser
import numpy as np
import plotly as pl
import plotly.graph_objs as go
import colorlover as cl
import dill
from sklearn.manifold.t_sne import TSNE, PCA
def build_args():
arg_parser = ArgumentParser()
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
return arg_parser.parse_args()
def line_plot(names_exp_tuple, filename='lineplot'):
names, line_dict_list = names_exp_tuple
names = ['Weightwise', 'Aggregating', 'Recurrent']
if False:
data = []
base_scale = cl.scales['10']['div']['RdYlGn']
scale = cl.interp(base_scale, len(line_dict_list) + 1) # Map color scale to N bins
for ld_id, line_dict in enumerate(line_dict_list):
for data_point in ['ys', 'zs']:
trace = go.Scatter(
x=line_dict['xs'],
y=line_dict[data_point],
name='{} {}zero-fixpoints'.format(names[ld_id], 'non-' if data_point == 'zs' else ''),
line=dict(
# color=scale[ld_id],
width=5,
# dash='dash' if data_point == 'ys' else ''
),
)
data.append(trace)
if True:
data = []
base_scale = cl.scales['10']['div']['RdYlGn']
scale = cl.interp(base_scale, len(line_dict_list) + 1) # Map color scale to N bins
for ld_id, line_dict in enumerate(line_dict_list):
trace = go.Scatter(
x=line_dict['xs'],
y=line_dict['ys'],
name=names[ld_id],
line=dict( # color=scale[ld_id],
width=5
),
)
data.append(trace)
layout = dict(xaxis=dict(title='Trains per self-application', titlefont=dict(size=20)),
yaxis=dict(title='Average amount of fixpoints found',
titlefont=dict(size=20),
# type='log',
# range=[0, 2]
),
legend=dict(orientation='h', x=0.3, y=-0.3),
# height=800, width=800,
margin=dict(b=0)
)
fig = go.Figure(data=data, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=[]):
if os.path.isdir(absolut_file_or_folder):
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
search_and_apply(sub_file_or_folder.path, plotting_function, files_to_look_for=files_to_look_for)
elif absolut_file_or_folder.endswith('.dill'):
file_or_folder = os.path.split(absolut_file_or_folder)[-1]
if file_or_folder in files_to_look_for and not os.path.exists('{}.html'.format(absolut_file_or_folder[:-5])):
print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__,
file=absolut_file_or_folder)
)
with open(absolut_file_or_folder, 'rb') as in_f:
exp = dill.load(in_f)
names_dill_location = os.path.join(*os.path.split(absolut_file_or_folder)[:-1], 'all_names.dill')
with open(names_dill_location, 'rb') as in_f:
names = dill.load(in_f)
try:
plotting_function((names, exp), filename='{}.html'.format(absolut_file_or_folder[:-5]))
except ValueError:
pass
except AttributeError:
pass
else:
# This was either another FilyType or Plot.html alerady exists.
pass
if __name__ == '__main__':
args = build_args()
in_file = args.in_file[0]
out_file = args.out_file
search_and_apply(in_file, line_plot, ["all_data.dill"])

View File

@ -1,191 +0,0 @@
import tensorflow as tf
from keras.models import Sequential, Model
from keras.layers import SimpleRNN, Dense
from keras.layers import Input, TimeDistributed
from tqdm import tqdm
import time
import os
import dill
from experiment import Experiment
import itertools
from typing import Union
import numpy as np
class Network(object):
def __init__(self, features, cells, layers, bias=False, recurrent=False):
self.features = features
self.cells = cells
self.num_layer = layers
bias_params = cells if bias else 0
# Recurrent network
if recurrent:
# First RNN
p_layer_1 = (self.features * self.cells + self.cells ** 2 + bias_params)
# All other RNN Layers
p_layer_n = (self.cells * self.cells + self.cells ** 2 + bias_params) * (self.num_layer - 1)
else:
# First Dense
p_layer_1 = (self.features * self.cells + bias_params)
# All other Dense Layers
p_layer_n = (self.cells * self.cells + bias_params) * (self.num_layer - 1)
# Final Dense
p_layer_out = self.features * self.cells + bias_params
self.parameters = np.sum([p_layer_1, p_layer_n, p_layer_out])
# Build network
cell = SimpleRNN if recurrent else Dense
self.inputs, x = Input(shape=(self.parameters // self.features,
self.features) if recurrent else (self.features,)), None
for layer in range(self.num_layer):
if recurrent:
x = SimpleRNN(self.cells, activation=None, use_bias=False,
return_sequences=True)(self.inputs if layer == 0 else x)
else:
x = Dense(self.cells, activation=None, use_bias=False,
)(self.inputs if layer == 0 else x)
self.outputs = Dense(self.features if recurrent else 1, activation=None, use_bias=False)(x)
print('Network initialized, i haz {p} params @:{e}Features: {f}{e}Cells: {c}{e}Layers: {l}'.format(
p=self.parameters, l=self.num_layer, c=self.cells, f=self.features, e='\n{}'.format(' ' * 5))
)
pass
def get_inputs(self):
return self.inputs
def get_outputs(self):
return self.outputs
class _BaseNetwork(Model):
def __init__(self, **kwargs):
super(_BaseNetwork, self).__init__(**kwargs)
# This is dirty
self.features = None
def get_weights_flat(self):
weights = super().get_weights()
flat = np.asarray(np.concatenate([x.flatten() for x in weights]))
return flat
def step(self, x):
pass
def step_other(self, other: Union[Sequential, Model]) -> bool:
pass
def get_parameter_count(self):
return np.sum([np.prod(x.shape) for x in self.get_weights()])
def train_on_batch(self, *args, **kwargs):
raise NotImplementedError
def compile(self, *args, **kwargs):
raise NotImplementedError
@staticmethod
def mean_abs_error(labels, predictions):
return np.mean(np.abs(predictions - labels), axis=-1)
@staticmethod
def mean_sqrd_error(labels, predictions):
return np.mean(np.square(predictions - labels), axis=-1)
class RecurrentNetwork(_BaseNetwork):
def __init__(self, network: Network, *args, **kwargs):
super().__init__(inputs=network.inputs, outputs=network.outputs)
self.features = network.features
self.parameters = network.parameters
assert self.parameters == self.get_parameter_count()
def step(self, x):
shaped = np.reshape(x, (1, -1, self.features))
return self.predict(shaped).flatten()
def fit(self, epochs=500, **kwargs):
losses = []
with tqdm(total=epochs, ascii=True,
desc='Type: {t}'. format(t=self.__class__.__name__),
postfix=["Loss", dict(value=0)]) as bar:
for _ in range(epochs):
x = self.get_weights_flat()
y = self.step(x)
weights = self.get_weights()
global_idx = 0
for idx, weight_matrix in enumerate(weights):
flattened = weight_matrix.flatten()
new_weights = y[global_idx:global_idx + flattened.shape[0]]
weights[idx] = np.reshape(new_weights, weight_matrix.shape)
global_idx += flattened.shape[0]
losses.append(self.mean_sqrd_error(y.flatten(), self.get_weights_flat()))
self.set_weights(weights)
bar.postfix[1]["value"] = losses[-1]
bar.update()
return losses
class FeedForwardNetwork(_BaseNetwork):
def __init__(self, network:Network, **kwargs):
super().__init__(inputs=network.inputs, outputs=network.outputs, **kwargs)
self.features = network.features
self.parameters = network.parameters
self.num_layer = network.num_layer
self.num_cells = network.cells
# assert self.parameters == self.get_parameter_count()
def step(self, x):
return self.predict(x)
def step_other(self, x):
return self.predict(x)
def fit(self, epochs=500, **kwargs):
losses = []
with tqdm(total=epochs, ascii=True,
desc='Type: {t} @ Epoch:'. format(t=self.__class__.__name__),
postfix=["Loss", dict(value=0)]) as bar:
for _ in range(epochs):
all_weights = self.get_weights_flat()
cell_idx = np.apply_along_axis(lambda x: x/self.num_cells, 0, np.arange(int(self.get_parameter_count())))
xc = np.concatenate((all_weights[..., None], cell_idx[..., None]), axis=1)
y = self.step(xc)
weights = self.get_weights()
global_idx = 0
for idx, weight_matrix in enumerate(weights):
# UPDATE THE WEIGHTS
flattened = weight_matrix.flatten()
new_weights = y[global_idx:global_idx + flattened.shape[0], 0]
weights[idx] = np.reshape(new_weights, weight_matrix.shape)
global_idx += flattened.shape[0]
losses.append(self.mean_sqrd_error(y[:, 0].flatten(), self.get_weights_flat()))
self.set_weights(weights)
bar.postfix[1]["value"] = losses[-1]
bar.update()
return losses
if __name__ == '__main__':
with Experiment() as exp:
features, cells, layers = 2, 2, 2
use_recurrent = False
if use_recurrent:
network = Network(features, cells, layers, recurrent=use_recurrent)
r = RecurrentNetwork(network)
loss = r.fit(epochs=10)
exp.save(rnet=r)
else:
network = Network(features, cells, layers, recurrent=use_recurrent)
ff = FeedForwardNetwork(network)
loss = ff.fit(epochs=10)
exp.save(ffnet=ff)
print(loss)

View File

@ -1,11 +1,13 @@
import numpy as np import numpy as np
from abc import abstractmethod, ABC
from typing import List, Union
from types import FunctionType
from keras.models import Sequential from tensorflow.python.keras.models import Sequential
from keras.callbacks import Callback from tensorflow.python.keras.callbacks import Callback
from keras.layers import SimpleRNN, Dense from tensorflow.python.keras.layers import SimpleRNN, Dense
import keras.backend as K from tensorflow.python.keras import backend as K
from util import *
from experiment import * from experiment import *
# Supress warnings and info messages # Supress warnings and info messages
@ -13,12 +15,12 @@ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
class SaveStateCallback(Callback): class SaveStateCallback(Callback):
def __init__(self, net, epoch=0): def __init__(self, network, epoch=0):
super(SaveStateCallback, self).__init__() super(SaveStateCallback, self).__init__()
self.net = net self.net = network
self.init_epoch = epoch self.init_epoch = epoch
def on_epoch_end(self, epoch, logs={}): def on_epoch_end(self, epoch, logs=None):
description = dict(time=epoch+self.init_epoch) description = dict(time=epoch+self.init_epoch)
description['action'] = 'train_self' description['action'] = 'train_self'
description['counterpart'] = None description['counterpart'] = None
@ -26,52 +28,110 @@ class SaveStateCallback(Callback):
return return
class NeuralNetwork(PrintingObject): class Weights:
@staticmethod @staticmethod
def weights_to_string(weights): def __reshape_flat_array__(array, shapes):
s = "" sizes: List[int] = [int(np.prod(shape)) for shape in shapes]
for layer_id, layer in enumerate(weights): # Split the incoming array into slices for layers
for cell_id, cell in enumerate(layer): slices = [array[x: y] for x, y in zip(np.cumsum([0]+sizes), np.cumsum([0]+sizes)[1:])]
s += "[ " # reshape them in accordance to the given shapes
for weight_id, weight in enumerate(cell): weights = [np.reshape(weight_slice, shape) for weight_slice, shape in zip(slices, shapes)]
s += str(weight) + " " return weights
s += "]"
s += "\n"
return s
@staticmethod def __init__(self, weight_vector: Union[List[np.ndarray], np.ndarray], flat_array_shape=None):
def are_weights_diverged(network_weights): """
for layer_id, layer in enumerate(network_weights): Weight class, for easy manipulation of weight vectors from Keras models
for cell_id, cell in enumerate(layer):
for weight_id, weight in enumerate(cell):
if np.isnan(weight):
return True
if np.isinf(weight):
return True
return False
@staticmethod :param weight_vector: A numpy array holding weights
def are_weights_within(network_weights, lower_bound, upper_bound): :type weight_vector: List[np.ndarray]
for layer_id, layer in enumerate(network_weights): """
for cell_id, cell in enumerate(layer): self.__iter_idx = [0, 0]
for weight_id, weight in enumerate(cell): if flat_array_shape:
# could be a chain comparission "lower_bound <= weight <= upper_bound" weight_vector = self.__reshape_flat_array__(weight_vector, flat_array_shape)
if not (lower_bound <= weight and weight <= upper_bound):
return False self.layers = weight_vector
# TODO: implement a way to access the cells directly
# self.cells = len(self)
# TODO: implement a way to access the weights directly
# self.weights = self.to_flat_array() ?
def __iter__(self):
self.__iter_idx = [0, 0]
return self
def __getitem__(self, item):
return self.layers[item]
def max(self):
np.max(self.layers)
def avg(self):
return np.average(self.layers)
def __len__(self):
return sum([x.size for x in self.layers])
def shapes(self):
return [x.shape for x in self.layers]
def num_layers(self):
return len(self.layers)
def __copy__(self):
return copy.deepcopy(self)
def __next__(self):
# ToDo: Check iteration progress over layers
# ToDo: There is still a problem interation, currently only cell level is the last loop stage.
# Do we need this?
if self.__iter_idx[0] >= len(self.layers):
if self.__iter_idx[1] >= len(self.layers[self.__iter_idx[0]]):
raise StopIteration
result = self.layers[self.__iter_idx[0]][self.__iter_idx[1]]
if self.__iter_idx[1] >= len(self.layers[self.__iter_idx[0]]):
self.__iter_idx[0] += 1
self.__iter_idx[1] = 0
else:
self.__iter_idx[1] += 1
return result
def __repr__(self):
return f'Weights({self.to_flat_array().tolist()})'
def to_flat_array(self) -> np.ndarray:
return np.hstack([weight.flatten() for weight in self.layers])
def from_flat_array(self, array):
new_weights = self.__reshape_flat_array__(array, self.shapes())
return new_weights
def shuffle(self):
flat = self.to_flat_array()
np.random.shuffle(flat)
self.from_flat_array(flat)
return True return True
@staticmethod def are_diverged(self):
def fill_weights(old_weights, new_weights_list): return any([np.isnan(x).any() for x in self.layers]) or any([np.isinf(x).any() for x in self.layers])
new_weights = copy.deepcopy(old_weights)
current_weight_id = 0 def are_within_bounds(self, lower_bound: float, upper_bound: float):
for layer_id, layer in enumerate(new_weights): return bool(sum([((lower_bound < x) & (x > upper_bound)).size for x in self.layers]))
for cell_id, cell in enumerate(layer):
for weight_id, weight in enumerate(cell): def aggregate_by(self, func: FunctionType, num_aggregates):
new_weight = new_weights_list[current_weight_id] collection_sizes = len(self) // num_aggregates
new_weights[layer_id][cell_id][weight_id] = new_weight weights = self.to_flat_array()[:collection_sizes * num_aggregates].reshape((num_aggregates, -1))
current_weight_id += 1 aggregated_weights = func(weights, num_aggregates)
return new_weights left_overs = self.to_flat_array()[collection_sizes * num_aggregates:]
return aggregated_weights, left_overs
class NeuralNetwork(ABC):
"""
This is the Base Network Class, including abstract functions that must be implemented.
"""
def __init__(self, **params): def __init__(self, **params):
super().__init__() super().__init__()
@ -79,14 +139,12 @@ class NeuralNetwork(PrintingObject):
self.params.update(params) self.params.update(params)
self.keras_params = dict(activation='linear', use_bias=False) self.keras_params = dict(activation='linear', use_bias=False)
self.states = [] self.states = []
self.model: Sequential
def get_model(self): def get_params(self) -> dict:
raise NotImplementedError
def get_params(self):
return self.params return self.params
def get_keras_params(self): def get_keras_params(self) -> dict:
return self.keras_params return self.keras_params
def with_params(self, **kwargs): def with_params(self, **kwargs):
@ -97,96 +155,101 @@ class NeuralNetwork(PrintingObject):
self.keras_params.update(kwargs) self.keras_params.update(kwargs)
return self return self
def get_weights(self): def get_weights(self) -> Weights:
return self.model.get_weights() return Weights(self.model.get_weights())
def get_weights_flat(self): def get_weights_flat(self) -> np.ndarray:
return np.hstack([weight.flatten() for weight in self.get_weights()]) return self.get_weights().to_flat_array()
def set_weights(self, new_weights): def set_weights(self, new_weights: Weights):
return self.model.set_weights(new_weights) return self.model.set_weights(new_weights.layers)
def apply_to_weights(self, old_weights): @abstractmethod
def get_samples(self):
# TODO: add a dogstring, telling the user what this does, e.g. what is a sample?
raise NotImplementedError raise NotImplementedError
def apply_to_network(self, other_network): @abstractmethod
def apply_to_weights(self, old_weights) -> Weights:
# TODO: add a dogstring, telling the user what this does, e.g. what is applied?
raise NotImplementedError
def apply_to_network(self, other_network) -> Weights:
# TODO: add a dogstring, telling the user what this does, e.g. what is applied?
new_weights = self.apply_to_weights(other_network.get_weights()) new_weights = self.apply_to_weights(other_network.get_weights())
return new_weights return new_weights
def attack(self, other_network): def attack(self, other_network):
# TODO: add a dogstring, telling the user what this does, e.g. what is an attack?
other_network.set_weights(self.apply_to_network(other_network)) other_network.set_weights(self.apply_to_network(other_network))
return self return self
def fuck(self, other_network): def fuck(self, other_network):
# TODO: add a dogstring, telling the user what this does, e.g. what is fucking?
self.set_weights(self.apply_to_network(other_network)) self.set_weights(self.apply_to_network(other_network))
return self return self
def self_attack(self, iterations=1): def self_attack(self, iterations=1):
# TODO: add a dogstring, telling the user what this does, e.g. what is self attack?
for _ in range(iterations): for _ in range(iterations):
self.attack(self) self.attack(self)
return self return self
def meet(self, other_network): def meet(self, other_network):
# TODO: add a dogstring, telling the user what this does, e.g. what is meeting?
new_other_network = copy.deepcopy(other_network) new_other_network = copy.deepcopy(other_network)
return self.attack(new_other_network) return self.attack(new_other_network)
def is_diverged(self): def is_diverged(self):
return self.are_weights_diverged(self.get_weights()) return self.get_weights().are_diverged()
def is_zero(self, epsilon=None): def is_zero(self, epsilon=None):
epsilon = epsilon or self.get_params().get('epsilon') epsilon = epsilon or self.get_params().get('epsilon')
return self.are_weights_within(self.get_weights(), -epsilon, epsilon) return self.get_weights().are_within_bounds(-epsilon, epsilon)
def is_fixpoint(self, degree=1, epsilon=None): def is_fixpoint(self, degree: int = 1, epsilon: float = None) -> bool:
assert degree >= 1, "degree must be >= 1" assert degree >= 1, "degree must be >= 1"
epsilon = epsilon or self.get_params().get('epsilon') epsilon = epsilon or self.get_params().get('epsilon')
old_weights = self.get_weights()
new_weights = copy.deepcopy(old_weights) new_weights = copy.deepcopy(self.get_weights())
for _ in range(degree): for _ in range(degree):
new_weights = self.apply_to_weights(new_weights) new_weights = self.apply_to_weights(new_weights)
if new_weights.are_diverged():
return False
if NeuralNetwork.are_weights_diverged(new_weights): biggerEpsilon = (np.abs(new_weights.to_flat_array() - self.get_weights().to_flat_array()) >= epsilon).any()
return False
for layer_id, layer in enumerate(old_weights):
for cell_id, cell in enumerate(layer):
for weight_id, weight in enumerate(cell):
new_weight = new_weights[layer_id][cell_id][weight_id]
if abs(new_weight - weight) >= epsilon:
return False
return True
def repr_weights(self, weights=None): # Boolean Value needs to be flipped to answer "is_fixpoint"
return self.weights_to_string(weights or self.get_weights()) return not biggerEpsilon
def print_weights(self, weights=None): def print_weights(self, weights=None):
print(self.repr_weights(weights)) print(weights or self.get_weights())
class ParticleDecorator: class ParticleDecorator:
next_uid = 0 next_uid = 0
def __init__(self, net): def __init__(self, network):
# ToDo: Add DocString, What does it do?
self.uid = self.__class__.next_uid self.uid = self.__class__.next_uid
self.__class__.next_uid += 1 self.__class__.next_uid += 1
self.net = net self.network = network
self.states = [] self.states = []
self.save_state(time=0, self.save_state(time=0, action='init', counterpart=None)
action='init',
counterpart=None
)
def __getattr__(self, name): def __getattr__(self, name):
return getattr(self.net, name) return getattr(self.network, name)
def get_uid(self): def get_uid(self):
return self.uid return self.uid
def make_state(self, **kwargs): def make_state(self, **kwargs):
weights = self.net.get_weights_flat() if self.network.is_diverged():
if any(np.isinf(weights)) or any(np.isnan(weights)):
return None return None
state = {'class': self.net.__class__.__name__, 'weights': weights} state = {'class': self.network.__class__.__name__, 'weights': self.network.get_weights_flat()}
state.update(kwargs) state.update(kwargs)
return state return state
@ -196,15 +259,16 @@ class ParticleDecorator:
self.states += [state] self.states += [state]
else: else:
pass pass
return True
def update_state(self, number, **kwargs): def update_state(self, number, **kwargs):
raise NotImplementedError('Result is vague') raise NotImplementedError('Result is vague')
if number < len(self.states): # if number < len(self.states):
self.states[number] = self.make_state(**kwargs) # self.states[number] = self.make_state(**kwargs)
else: # else:
for i in range(len(self.states), number): # for i in range(len(self.states), number):
self.states += [None] # self.states += [None]
self.states += self.make_state(**kwargs) # self.states += self.make_state(**kwargs)
def get_states(self): def get_states(self):
return self.states return self.states
@ -212,114 +276,78 @@ class ParticleDecorator:
class WeightwiseNeuralNetwork(NeuralNetwork): class WeightwiseNeuralNetwork(NeuralNetwork):
@staticmethod
def normalize_id(value, norm):
if norm > 1:
return float(value) / float(norm)
else:
return float(value)
def __init__(self, width, depth, **kwargs): def __init__(self, width, depth, **kwargs):
# ToDo: Insert Docstring
super().__init__(**kwargs) super().__init__(**kwargs)
self.width = width self.width: int = width
self.depth = depth self.depth: int = depth
self.model = Sequential() self.model = Sequential()
self.model.add(Dense(units=self.width, input_dim=4, **self.keras_params)) self.model.add(Dense(units=self.width, input_dim=4, **self.keras_params))
for _ in range(self.depth-1): for _ in range(self.depth-1):
self.model.add(Dense(units=self.width, **self.keras_params)) self.model.add(Dense(units=self.width, **self.keras_params))
self.model.add(Dense(units=1, **self.keras_params)) self.model.add(Dense(units=1, **self.keras_params))
def get_model(self): def apply(self, inputs):
return self.model # TODO: Write about it... What does it do?
return self.model.predict(inputs)
def apply(self, *inputs): def get_samples(self):
stuff = np.transpose(np.array([[inputs[0]], [inputs[1]], [inputs[2]], [inputs[3]]])) weights = self.get_weights()
return self.model.predict(stuff)[0][0] sample = np.asarray([
[weight, idx, *x] for idx, layer in enumerate(weights.layers) for x, weight in np.ndenumerate(layer)
])
# normalize [layer, cell, position]
for idx in range(1, sample.shape[1]):
sample[:, idx] = sample[:, idx] / np.max(sample[:, idx])
return sample, sample
@classmethod def apply_to_weights(self, weights) -> Weights:
def compute_all_duplex_weight_points(cls, old_weights): # ToDo: Insert DocString
points = [] # Transform the weight matrix in an horizontal stack as: array([[weight, layer, cell, position], ...])
normal_points = [] transformed_weights = self.get_samples()[0]
max_layer_id = len(old_weights) - 1 new_weights = self.apply(transformed_weights)
for layer_id, layer in enumerate(old_weights): # use the original weight shape to transform the new tensor
max_cell_id = len(layer) - 1 return Weights(new_weights, flat_array_shape=weights.shapes())
for cell_id, cell in enumerate(layer):
max_weight_id = len(cell) - 1
for weight_id, weight in enumerate(cell):
normal_layer_id = cls.normalize_id(layer_id, max_layer_id)
normal_cell_id = cls.normalize_id(cell_id, max_cell_id)
normal_weight_id = cls.normalize_id(weight_id, max_weight_id)
points += [[weight, layer_id, cell_id, weight_id]]
normal_points += [[weight, normal_layer_id, normal_cell_id, normal_weight_id]]
return points, normal_points
@classmethod
def compute_all_weight_points(cls, all_weights):
return cls.compute_all_duplex_weight_points(all_weights)[0]
@classmethod
def compute_all_normal_weight_points(cls, all_weights):
return cls.compute_all_duplex_weight_points(all_weights)[1]
def apply_to_weights(self, old_weights):
new_weights = copy.deepcopy(self.get_weights())
for (weight_point, normal_weight_point) in zip(*self.__class__.compute_all_duplex_weight_points(old_weights)):
weight, layer_id, cell_id, weight_id = weight_point
_, normal_layer_id, normal_cell_id, normal_weight_id = normal_weight_point
new_weight = self.apply(*normal_weight_point)
new_weights[layer_id][cell_id][weight_id] = new_weight
if self.params.get("print_all_weight_updates", False) and not self.is_silent():
print("updated old weight {weight}\t @ ({layer},{cell},{weight_id}) "
"to new value {new_weight}\t calling @ ({normal_layer},{normal_cell},{normal_weight_id})").format(
weight=weight, layer=layer_id, cell=cell_id, weight_id=weight_id, new_weight=new_weight,
normal_layer=normal_layer_id, normal_cell=normal_cell_id, normal_weight_id=normal_weight_id)
return new_weights
def compute_samples(self):
samples = []
for normal_weight_point in self.compute_all_normal_weight_points(self.get_weights()):
weight, normal_layer_id, normal_cell_id, normal_weight_id = normal_weight_point
sample = np.transpose(np.array([[weight], [normal_layer_id], [normal_cell_id], [normal_weight_id]]))
samples += [sample[0]]
samples_array = np.asarray(samples)
return samples_array, samples_array[:, 0]
class AggregatingNeuralNetwork(NeuralNetwork): class AggregatingNeuralNetwork(NeuralNetwork):
@staticmethod @staticmethod
def aggregate_average(weights): def aggregate_fft(array: np.ndarray, aggregates: int):
total = 0 flat = array.flatten()
count = 0 # noinspection PyTypeChecker
for weight in weights: fft_reduction = np.fft.fftn(flat, aggregates)
total += float(weight) return fft_reduction
count += 1
return total / float(count)
@staticmethod @staticmethod
def aggregate_max(weights): def aggregate_average(array, _):
max_found = weights[0] return np.average(array, axis=1)
for weight in weights:
max_found = weight > max_found and weight or max_found @staticmethod
return max_found def aggregate_max(array, _):
return np.max(array, axis=1)
@staticmethod @staticmethod
def deaggregate_identically(aggregate, amount): def deaggregate_identically(aggregate, amount):
return [aggregate for _ in range(amount)] # ToDo: Find a better way than using the a hardcoded [0]
return np.hstack([aggregate for _ in range(amount)])[0]
@staticmethod @staticmethod
def shuffle_not(weights_list): def shuffle_not(weights: Weights):
return weights_list """
Doesn't do a thing. f(x)
:param weights: A List of Weights
:type weights: Weights
:return: The same old weights.
:rtype: Weights
"""
return weights
@staticmethod @staticmethod
def shuffle_random(weights_list): def shuffle_random(weights: Weights):
import random assert weights.shuffle()
random.shuffle(weights_list) return weights
return weights_list
def __init__(self, aggregates, width, depth, **kwargs): def __init__(self, aggregates, width, depth, **kwargs):
super().__init__(**kwargs) super().__init__(**kwargs)
@ -332,9 +360,6 @@ class AggregatingNeuralNetwork(NeuralNetwork):
self.model.add(Dense(units=width, **self.keras_params)) self.model.add(Dense(units=width, **self.keras_params))
self.model.add(Dense(units=self.aggregates, **self.keras_params)) self.model.add(Dense(units=self.aggregates, **self.keras_params))
def get_model(self):
return self.model
def get_aggregator(self): def get_aggregator(self):
return self.params.get('aggregator', self.aggregate_average) return self.params.get('aggregator', self.aggregate_average)
@ -345,180 +370,58 @@ class AggregatingNeuralNetwork(NeuralNetwork):
return self.params.get('shuffler', self.shuffle_not) return self.params.get('shuffler', self.shuffle_not)
def get_amount_of_weights(self): def get_amount_of_weights(self):
total_weights = 0 return len(self.get_weights())
for layer_id, layer in enumerate(self.get_weights()):
for cell_id, cell in enumerate(layer):
for weight_id, weight in enumerate(cell):
total_weights += 1
return total_weights
def apply(self, *inputs): def apply(self, inputs):
stuff = np.transpose(np.array([[inputs[i]] for i in range(self.aggregates)])) # You need to add an dimension here... "..." copies array values
return self.model.predict(stuff)[0] return self.model.predict(inputs[None, ...])
def apply_to_weights(self, old_weights):
# build aggregations from old_weights
collection_size = self.get_amount_of_weights() // self.aggregates
collections, leftovers = self.collect_weights(old_weights, collection_size)
# call network
old_aggregations = [self.get_aggregator()(collection) for collection in collections]
new_aggregations = self.apply(*old_aggregations)
# generate list of new weights
new_weights_list = []
for aggregation_id, aggregation in enumerate(new_aggregations):
if aggregation_id == self.aggregates - 1:
new_weights_list += self.get_deaggregator()(aggregation, collection_size + leftovers)
else:
new_weights_list += self.get_deaggregator()(aggregation, collection_size)
new_weights_list = self.get_shuffler()(new_weights_list)
# write back new weights
new_weights = self.fill_weights(old_weights, new_weights_list)
# return results
if self.params.get("print_all_weight_updates", False) and not self.is_silent():
print("updated old weight aggregations " + str(old_aggregations))
print("to new weight aggregations " + str(new_aggregations))
print("resulting in network weights ...")
print(self.weights_to_string(new_weights))
return new_weights
@staticmethod
def collect_weights(all_weights, collection_size):
collections = []
next_collection = []
current_weight_id = 0
for layer_id, layer in enumerate(all_weights):
for cell_id, cell in enumerate(layer):
for weight_id, weight in enumerate(cell):
next_collection += [weight]
if (current_weight_id + 1) % collection_size == 0:
collections += [next_collection]
next_collection = []
current_weight_id += 1
collections[-1] += next_collection
leftovers = len(next_collection)
return collections, leftovers
def get_collected_weights(self):
collection_size = self.get_amount_of_weights() // self.aggregates
return self.collect_weights(self.get_weights(), collection_size)
def get_aggregated_weights(self): def get_aggregated_weights(self):
collections, leftovers = self.get_collected_weights() return self.get_weights().aggregate_by(self.get_aggregator(), self.aggregates)
aggregations = [self.get_aggregator()(collection) for collection in collections]
return aggregations, leftovers
def compute_samples(self): def apply_to_weights(self, old_weights) -> Weights:
# build aggregations of old_weights
old_aggregations, leftovers = self.get_aggregated_weights()
# call network
new_aggregations = self.apply(old_aggregations)
collection_sizes = self.get_amount_of_weights() // self.aggregates
new_aggregations = self.deaggregate_identically(new_aggregations, collection_sizes)
# generate new weights
# only include leftovers if there are some then coonvert them to Weight on base of th old shape
new_weights = Weights(new_aggregations if not leftovers.shape[0] else np.hstack((new_aggregations, leftovers)),
flat_array_shape=old_weights.shapes())
# maybe shuffle
new_weights = self.get_shuffler()(new_weights)
return new_weights
def get_samples(self):
aggregations, _ = self.get_aggregated_weights() aggregations, _ = self.get_aggregated_weights()
sample = np.transpose(np.array([[aggregations[i]] for i in range(self.aggregates)])) # What did that do?
return [sample], [sample] # sample = np.transpose(np.array([[aggregations[i]] for i in range(self.aggregates)]))
return aggregations, aggregations
def is_fixpoint_after_aggregation(self, degree=1, epsilon=None): def is_fixpoint_after_aggregation(self, degree=1, epsilon=None):
assert degree >= 1, "degree must be >= 1" assert degree >= 1, "degree must be >= 1"
epsilon = epsilon or self.get_params().get('epsilon') epsilon = epsilon or self.get_params().get('epsilon')
old_weights = self.get_weights()
old_aggregations, _ = self.get_aggregated_weights() old_aggregations, _ = self.get_aggregated_weights()
new_weights = copy.deepcopy(self.get_weights())
new_weights = copy.deepcopy(old_weights)
for _ in range(degree): for _ in range(degree):
new_weights = self.apply_to_weights(new_weights) new_weights = self.apply_to_weights(new_weights)
if NeuralNetwork.are_weights_diverged(new_weights): if new_weights.are_diverged():
return False return False
collection_size = self.get_amount_of_weights() // self.aggregates
collections, leftovers = self.__class__.collect_weights(new_weights, collection_size)
new_aggregations = [self.get_aggregator()(collection) for collection in collections]
for aggregation_id, old_aggregation in enumerate(old_aggregations): new_aggregations, leftovers = self.get_aggregated_weights()
new_aggregation = new_aggregations[aggregation_id]
if abs(new_aggregation - old_aggregation) >= epsilon:
return False, new_aggregations
return True, new_aggregations
# ToDo: Explain This, why are you additionally checking tolerances of aggregated weights?
biggerEpsilon = (np.abs(np.asarray(old_aggregations) - np.asarray(new_aggregations)) >= epsilon).any()
class FFTNeuralNetwork(NeuralNetwork): # Boolean value has to be flipped to answer the question.
return True, not biggerEpsilon
@staticmethod
def aggregate_fft(weights, dims):
flat = np.hstack([weight.flatten() for weight in weights])
fft_reduction = np.fft.fftn(flat, dims)[None, ...]
return fft_reduction
@staticmethod
def deaggregate_identically(aggregate, dims):
fft_inverse = np.fft.ifftn(aggregate, dims)
return fft_inverse
@staticmethod
def shuffle_not(weights_list):
return weights_list
@staticmethod
def shuffle_random(weights_list):
import random
random.shuffle(weights_list)
return weights_list
def __init__(self, aggregates, width, depth, **kwargs):
super().__init__(**kwargs)
self.aggregates = aggregates
self.width = width
self.depth = depth
self.model = Sequential()
self.model.add(Dense(units=width, input_dim=self.aggregates, **self.keras_params))
for _ in range(depth-1):
self.model.add(Dense(units=width, **self.keras_params))
self.model.add(Dense(units=self.aggregates, **self.keras_params))
def get_model(self):
return self.model
def get_shuffler(self):
return self.params.get('shuffler', self.shuffle_not)
def get_amount_of_weights(self):
total_weights = 0
for layer_id, layer in enumerate(self.get_weights()):
for cell_id, cell in enumerate(layer):
for weight_id, weight in enumerate(cell):
total_weights += 1
return total_weights
def apply(self, inputs):
sample = np.asarray(inputs)
return self.model.predict(sample)[0]
def apply_to_weights(self, old_weights):
# build aggregations from old_weights
weights = self.get_weights_flat()
# call network
old_aggregation = self.aggregate_fft(weights, self.aggregates)
new_aggregation = self.apply(old_aggregation)
# generate list of new weights
new_weights_list = self.deaggregate_identically(new_aggregation, self.get_amount_of_weights())
new_weights_list = self.get_shuffler()(new_weights_list)
# write back new weights
new_weights = self.fill_weights(old_weights, new_weights_list)
# return results
if self.params.get("print_all_weight_updates", False) and not self.is_silent():
print("updated old weight aggregations " + str(old_aggregation))
print("to new weight aggregations " + str(new_aggregation))
print("resulting in network weights ...")
print(self.__class__.weights_to_string(new_weights))
return new_weights
def compute_samples(self):
weights = self.get_weights()
sample = np.asarray(weights)[None, ...]
return [sample], [sample]
class RecurrentNeuralNetwork(NeuralNetwork): class RecurrentNeuralNetwork(NeuralNetwork):
@ -534,9 +437,6 @@ class RecurrentNeuralNetwork(NeuralNetwork):
self.model.add(SimpleRNN(units=width, return_sequences=True, **self.keras_params)) self.model.add(SimpleRNN(units=width, return_sequences=True, **self.keras_params))
self.model.add(SimpleRNN(units=self.features, return_sequences=True, **self.keras_params)) self.model.add(SimpleRNN(units=self.features, return_sequences=True, **self.keras_params))
def get_model(self):
return self.model
def apply(self, *inputs): def apply(self, *inputs):
stuff = np.transpose(np.array([[[inputs[i]] for i in range(len(inputs))]])) stuff = np.transpose(np.array([[[inputs[i]] for i in range(len(inputs))]]))
return self.model.predict(stuff)[0].flatten() return self.model.predict(stuff)[0].flatten()
@ -574,22 +474,22 @@ class RecurrentNeuralNetwork(NeuralNetwork):
return sample, sample return sample, sample
class TrainingNeuralNetworkDecorator(): class TrainingNeuralNetworkDecorator:
def __init__(self, net, **kwargs): def __init__(self, network):
self.net = net self.network = network
self.compile_params = dict(loss='mse', optimizer='sgd') self.compile_params = dict(loss='mse', optimizer='sgd')
self.model_compiled = False self.model_compiled = False
def __getattr__(self, name): def __getattr__(self, name):
return getattr(self.net, name) return getattr(self.network, name)
def with_params(self, **kwargs): def with_params(self, **kwargs):
self.net.with_params(**kwargs) self.network.with_params(**kwargs)
return self return self
def with_keras_params(self, **kwargs): def with_keras_params(self, **kwargs):
self.net.with_keras_params(**kwargs) self.network.with_keras_params(**kwargs)
return self return self
def get_compile_params(self): def get_compile_params(self):
@ -602,7 +502,7 @@ class TrainingNeuralNetworkDecorator():
def compile_model(self, **kwargs): def compile_model(self, **kwargs):
compile_params = copy.deepcopy(self.compile_params) compile_params = copy.deepcopy(self.compile_params)
compile_params.update(kwargs) compile_params.update(kwargs)
return self.net.model.compile(**compile_params) return self.network.model.compile(**compile_params)
def compiled(self, **kwargs): def compiled(self, **kwargs):
if not self.model_compiled: if not self.model_compiled:
@ -612,72 +512,64 @@ class TrainingNeuralNetworkDecorator():
def train(self, batchsize=1, store_states=True, epoch=0): def train(self, batchsize=1, store_states=True, epoch=0):
self.compiled() self.compiled()
x, y = self.net.compute_samples() x, y = self.network.get_samples()
savestatecallback = [SaveStateCallback(net=self, epoch=epoch)] if store_states else None savestatecallback = [SaveStateCallback(network=self, epoch=epoch)] if store_states else None
history = self.net.model.fit(x=x, y=y, epochs=epoch+1, verbose=0, batch_size=batchsize, callbacks=savestatecallback, initial_epoch=epoch) history = self.network.model.fit(x=x, y=y, epochs=epoch+1, verbose=0,
batch_size=batchsize, callbacks=savestatecallback,
initial_epoch=epoch)
return history.history['loss'][-1] return history.history['loss'][-1]
def learn_from(self, other_network, batchsize=1): def learn_from(self, other_network, batchsize=1):
self.compiled() self.compiled()
other_network.compiled() other_network.compiled()
x, y = other_network.net.compute_samples() x, y = other_network.network.get_samples()
history = self.net.model.fit(x=x, y=y, verbose=0, batch_size=batchsize) history = self.network.model.fit(x=x, y=y, verbose=0, batch_size=batchsize)
return history.history['loss'][-1] return history.history['loss'][-1]
if __name__ == '__main__': if __name__ == '__main__':
def run_exp(net, prints=False):
# INFO Run_ID needs to be more than 0, so that exp stores the trajectories!
exp.run_net(net, 100, run_id=run_id + 1)
exp.historical_particles[run_id] = net
if prints:
print("Fixpoint? " + str(net.is_fixpoint()))
print("Loss " + str(loss))
if True: if True:
# WeightWise Neural Network # WeightWise Neural Network
net_generator = ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='linear'))
with FixpointExperiment() as exp: with FixpointExperiment() as exp:
for run_id in tqdm(range(100)): exp.run_exp(net_generator, 10, logging=True)
net = ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2) \ exp.reset_all()
.with_keras_params(activation='linear'))
run_exp(net)
K.clear_session()
exp.log(exp.counters)
if True: if False:
# Aggregating Neural Network # Aggregating Neural Network
net_generator = ParticleDecorator(AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params())
with FixpointExperiment() as exp: with FixpointExperiment() as exp:
for run_id in tqdm(range(100)): exp.run_exp(net_generator, 10, logging=True)
net = ParticleDecorator(AggregatingNeuralNetwork(aggregates=4, width=2, depth=2) \
.with_keras_params())
run_exp(net)
K.clear_session()
exp.log(exp.counters)
if True: exp.reset_all()
#FFT Neural Network
if False:
# FFT Aggregation
net_generator = lambda: ParticleDecorator(
AggregatingNeuralNetwork(
aggregates=4, width=2, depth=2, aggregator=AggregatingNeuralNetwork.aggregate_fft
).with_keras_params(activation='linear'))
with FixpointExperiment() as exp: with FixpointExperiment() as exp:
for run_id in tqdm(range(100)): for run_id in tqdm(range(10)):
net = ParticleDecorator(FFTNeuralNetwork(aggregates=4, width=2, depth=2) \ exp.run_exp(net_generator, 1)
.with_keras_params(activation='linear')) exp.log(exp.counters)
run_exp(net) exp.reset_model()
K.clear_session() exp.reset_all()
exp.log(exp.counters)
if True: if True:
# ok so this works quite realiably # ok so this works quite realiably
with FixpointExperiment() as exp: run_count = 10000
for i in range(1): net_generator = TrainingNeuralNetworkDecorator(
run_count = 1000 ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2))
net = TrainingNeuralNetworkDecorator(ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2))) ).with_params(epsilon=0.0001).with_keras_params(optimizer='sgd')
net.with_params(epsilon=0.0001).with_keras_params(optimizer='sgd') with MixedFixpointExperiment() as exp:
for run_id in tqdm(range(run_count+1)): for run_id in tqdm(range(run_count+1)):
net.compiled() exp.run_exp(net_generator, 1)
loss = net.train(epoch=run_id) if run_id % 100 == 0:
if run_id % 100 == 0: exp.run_net(net_generator, 1)
run_exp(net) K.clear_session()
K.clear_session()
if False: if False:
with FixpointExperiment() as exp: with FixpointExperiment() as exp:

View File

@ -4,7 +4,6 @@ import os
# Concat top Level dir to system environmental variables # Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.') sys.path += os.path.join('..', '.')
from util import *
from experiment import * from experiment import *
from network import * from network import *

View File

@ -3,16 +3,18 @@ import os
# Concat top Level dir to system environmental variables # Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.') sys.path += os.path.join('..', '.')
from util import *
from experiment import * from experiment import *
from network import * from network import *
import keras.backend import tensorflow.python.keras.backend as K
def generate_counters(): def generate_counters():
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0} return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
def count(counters, net, notable_nets=[]):
def count(counters, net, notable_nets=None):
notable_nets = notable_nets or []
if net.is_diverged(): if net.is_diverged():
counters['divergent'] += 1 counters['divergent'] += 1
elif net.is_fixpoint(): elif net.is_fixpoint():
@ -52,7 +54,7 @@ if __name__ == '__main__':
net = ParticleDecorator(net) net = ParticleDecorator(net)
name = str(net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias='" + str(net.get_keras_params().get('use_bias')) + "'" name = str(net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias='" + str(net.get_keras_params().get('use_bias')) + "'"
count(counters, net, notable_nets) count(counters, net, notable_nets)
keras.backend.clear_session() K.clear_session()
all_counters += [counters] all_counters += [counters]
# all_notable_nets += [notable_nets] # all_notable_nets += [notable_nets]
all_names += [name] all_names += [name]

View File

@ -5,12 +5,11 @@ import os
# Concat top Level dir to system environmental variables # Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.') sys.path += os.path.join('..', '.')
from util import *
from experiment import * from experiment import *
from network import * from network import *
from soup import prng from soup import prng
import keras.backend import tensorflow.python.keras.backend as K
from statistics import mean from statistics import mean
@ -85,7 +84,7 @@ if __name__ == '__main__':
exp.ys += [time_to_something] exp.ys += [time_to_something]
# time steps still regarded as sthe initial fix-point # time steps still regarded as sthe initial fix-point
exp.zs += [time_as_fixpoint] exp.zs += [time_as_fixpoint]
keras.backend.clear_session() K.backend.clear_session()
current_scale /= 10.0 current_scale /= 10.0
for d in range(exp.depth): for d in range(exp.depth):
exp.log('variation 10e-' + str(d)) exp.log('variation 10e-' + str(d))

View File

@ -6,13 +6,12 @@ sys.path += os.path.join('..', '.')
from typing import Tuple from typing import Tuple
from util import *
from experiment import * from experiment import *
from network import * from network import *
from soup import * from soup import *
import keras.backend import tensorflow.python.keras.backend as K
from statistics import mean from statistics import mean
avg = mean avg = mean
@ -28,7 +27,7 @@ def generate_counters():
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0} return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
def count(counters, soup, notable_nets=[]): def count(counters, soup, notable_nets=None):
""" """
Count the occurences ot the types of weight trajectories. Count the occurences ot the types of weight trajectories.
@ -40,6 +39,7 @@ def count(counters, soup, notable_nets=[]):
:return: Both the counter dictionary and the list of interessting nets. :return: Both the counter dictionary and the list of interessting nets.
""" """
notable_nets = notable_nets or list()
for net in soup.particles: for net in soup.particles:
if net.is_diverged(): if net.is_diverged():
counters['divergent'] += 1 counters['divergent'] += 1
@ -90,7 +90,7 @@ if __name__ == '__main__':
for time in range(exp.soup_life): for time in range(exp.soup_life):
soup.evolve() soup.evolve()
count(counters, soup, notable_nets) count(counters, soup, notable_nets)
keras.backend.clear_session() K.clear_session()
xs += [learn_from_severity] xs += [learn_from_severity]
ys += [float(counters['fix_zero']) / float(exp.trials)] ys += [float(counters['fix_zero']) / float(exp.trials)]

View File

@ -6,11 +6,10 @@ from typing import Tuple
# Concat top Level dir to system environmental variables # Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.') sys.path += os.path.join('..', '.')
from util import *
from experiment import * from experiment import *
from network import * from network import *
import keras.backend import tensorflow.python.keras.backend as K
def generate_counters(): def generate_counters():
@ -23,7 +22,7 @@ def generate_counters():
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0} return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
def count(counters, net, notable_nets=[]): def count(counters, net, notable_nets=None):
""" """
Count the occurences ot the types of weight trajectories. Count the occurences ot the types of weight trajectories.
@ -34,7 +33,7 @@ def count(counters, net, notable_nets=[]):
:rtype Tuple[dict, list] :rtype Tuple[dict, list]
:return: Both the counter dictionary and the list of interessting nets. :return: Both the counter dictionary and the list of interessting nets.
""" """
notable_nets = notable_nets or list()
if net.is_diverged(): if net.is_diverged():
counters['divergent'] += 1 counters['divergent'] += 1
elif net.is_fixpoint(): elif net.is_fixpoint():

View File

@ -6,12 +6,11 @@ sys.path += os.path.join('..', '.')
from typing import Tuple from typing import Tuple
from util import *
from experiment import * from experiment import *
from network import * from network import *
from soup import * from soup import *
import keras.backend import tensorflow.python.keras.backend as K
def generate_counters(): def generate_counters():
@ -24,7 +23,7 @@ def generate_counters():
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0} return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
def count(counters, soup, notable_nets=[]): def count(counters, soup, notable_nets=None):
""" """
Count the occurences ot the types of weight trajectories. Count the occurences ot the types of weight trajectories.
@ -36,6 +35,7 @@ def count(counters, soup, notable_nets=[]):
:return: Both the counter dictionary and the list of interessting nets. :return: Both the counter dictionary and the list of interessting nets.
""" """
notable_nets = notable_nets or list()
for net in soup.particles: for net in soup.particles:
if net.is_diverged(): if net.is_diverged():
counters['divergent'] += 1 counters['divergent'] += 1
@ -89,7 +89,7 @@ if __name__ == '__main__':
for _ in range(exp.soup_life): for _ in range(exp.soup_life):
soup.evolve() soup.evolve()
count(counters, soup, notable_nets) count(counters, soup, notable_nets)
keras.backend.clear_session() K.clear_session()
xs += [trains_per_selfattack] xs += [trains_per_selfattack]
ys += [float(counters['fix_zero']) / float(exp.trials)] ys += [float(counters['fix_zero']) / float(exp.trials)]

View File

@ -4,16 +4,16 @@ import os
# Concat top Level dir to system environmental variables # Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.') sys.path += os.path.join('..', '.')
from util import *
from experiment import * from experiment import *
from network import * from network import *
import keras.backend as K import tensorflow.python.keras.backend as K
def generate_counters(): def generate_counters():
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0} return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
def count(counters, net, notable_nets=[]): def count(counters, net, notable_nets=None):
notable_nets = notable_nets or list()
if net.is_diverged(): if net.is_diverged():
counters['divergent'] += 1 counters['divergent'] += 1
elif net.is_fixpoint(): elif net.is_fixpoint():

View File

@ -109,39 +109,29 @@ class Soup(object):
if __name__ == '__main__': if __name__ == '__main__':
if False: if True:
with SoupExperiment() as exp: net_generator = lambda: WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
for run_id in range(1): soup_generator = Soup(100, net_generator).with_params(remove_divergent=True, remove_zero=True)
net_generator = lambda: WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params() exp = SoupExperiment()
# net_generator = lambda: FFTNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params() exp.run_exp(net_generator, 1000, soup_generator, 1, False)
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random) # net_generator = lambda: FFTNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params() # net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
soup = Soup(100, net_generator).with_params(remove_divergent=True, remove_zero=True) # .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
soup.seed() # net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
for _ in tqdm(range(1000)):
soup.evolve()
exp.log(soup.count())
exp.save(soup=soup.without_particles())
if True: if True:
with SoupExperiment("soup") as exp: net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \
for run_id in range(1): .with_keras_params(activation='linear').with_params(epsilon=0.0001)
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2))\ soup_generator = lambda: Soup(100, net_generator).with_params(remove_divergent=True, remove_zero=True, train=20)
.with_keras_params(activation='linear').with_params(epsilon=0.0001) exp = SoupExperiment(name="soup")
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))
# .with_keras_params(activation='linear')\ exp.run_exp(net_generator, 100, soup_generator, 1, False)
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
# net_generator = lambda: TrainingNeuralNetworkDecorator(FFTNeuralNetwork(4, 2, 2))\ # net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))
# .with_keras_params(activation='linear')\ # .with_keras_params(activation='linear')\
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random) # .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params() # net_generator = lambda: TrainingNeuralNetworkDecorator(FFTNeuralNetwork(4, 2, 2))\
soup = Soup(100, net_generator).with_params(remove_divergent=True, remove_zero=True, train=20) # .with_keras_params(activation='linear')\
soup.seed() # .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
for _ in tqdm(range(100)): # net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
soup.evolve()
exp.log(soup.count())
# you can access soup.historical_particles[particle_uid].states[time_step]['loss']
# or soup.historical_particles[particle_uid].states[time_step]['weights']
# from soup.dill
exp.save(soup=soup.without_particles())

View File

@ -1,111 +0,0 @@
from experiment import *
from network import *
from soup import *
import numpy as np
class LearningNeuralNetwork(NeuralNetwork):
@staticmethod
def mean_reduction(weights, features):
single_dim_weights = np.hstack([w.flatten() for w in weights])
shaped_weights = np.reshape(single_dim_weights, (1, features, -1))
x = np.mean(shaped_weights, axis=-1)
return x
@staticmethod
def fft_reduction(weights, features):
single_dim_weights = np.hstack([w.flatten() for w in weights])
x = np.fft.fft(single_dim_weights, n=features)[None, ...]
return x
@staticmethod
def random_reduction(_, features):
x = np.random.rand(features)[None, ...]
return x
def __init__(self, width, depth, features, **kwargs):
raise DeprecationWarning
super().__init__(**kwargs)
self.width = width
self.depth = depth
self.features = features
self.compile_params = dict(loss='mse', optimizer='sgd')
self.model = Sequential()
self.model.add(Dense(units=self.width, input_dim=self.features, **self.keras_params))
for _ in range(self.depth - 1):
self.model.add(Dense(units=self.width, **self.keras_params))
self.model.add(Dense(units=self.features, **self.keras_params))
self.model.compile(**self.compile_params)
def apply_to_weights(self, old_weights, **kwargs):
reduced = kwargs.get('reduction', self.fft_reduction)()
raise NotImplementedError
# build aggregations from old_weights
weights = self.get_weights_flat()
# call network
old_aggregation = self.aggregate_fft(weights, self.aggregates)
new_aggregation = self.apply(old_aggregation)
# generate list of new weights
new_weights_list = self.deaggregate_identically(new_aggregation, self.get_amount_of_weights())
new_weights_list = self.get_shuffler()(new_weights_list)
# write back new weights
new_weights = self.fill_weights(old_weights, new_weights_list)
# return results
if self.params.get("print_all_weight_updates", False) and not self.is_silent():
print("updated old weight aggregations " + str(old_aggregation))
print("to new weight aggregations " + str(new_aggregation))
print("resulting in network weights ...")
print(self.__class__.weights_to_string(new_weights))
return new_weights
def with_compile_params(self, **kwargs):
self.compile_params.update(kwargs)
return self
def learn(self, epochs, reduction, batchsize=1):
with tqdm(total=epochs, ascii=True,
desc='Type: {t} @ Epoch:'.format(t=self.__class__.__name__),
postfix=["Loss", dict(value=0)]) as bar:
for epoch in range(epochs):
old_weights = self.get_weights()
x = reduction(old_weights, self.features)
savestateCallback = SaveStateCallback(self, epoch=epoch)
history = self.model.fit(x=x, y=x, verbose=0, batch_size=batchsize, callbacks=savestateCallback)
bar.postfix[1]["value"] = history.history['loss'][-1]
bar.update()
def vary(e=0.0, f=0.0):
return [
np.array([[1.0+e, 0.0+f], [0.0+f, 0.0+f], [0.0+f, 0.0+f], [0.0+f, 0.0+f]], dtype=np.float32),
np.array([[1.0+e, 0.0+f], [0.0+f, 0.0+f]], dtype=np.float32),
np.array([[1.0+e], [0.0+f]], dtype=np.float32)
]
if __name__ == '__main__':
net = WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='sigmoid')
if False:
net.set_weights([
np.array([[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], dtype=np.float32),
np.array([[1.0, 0.0], [0.0, 0.0]], dtype=np.float32),
np.array([[1.0], [0.0]], dtype=np.float32)
])
print(net.get_weights())
net.self_attack(100)
print(net.get_weights())
print(net.is_fixpoint())
if True:
net.set_weights(vary(0.01, 0.0))
print(net.get_weights())
for _ in range(5):
net.self_attack()
print(net.get_weights())
print(net.is_fixpoint())

View File

@ -1,39 +0,0 @@
class PrintingObject:
class SilenceSignal():
def __init__(self, obj, value):
self.obj = obj
self.new_silent = value
def __enter__(self):
self.old_silent = self.obj.get_silence()
self.obj.set_silence(self.new_silent)
def __exit__(self, exception_type, exception_value, traceback):
self.obj.set_silence(self.old_silent)
def __init__(self):
self.silent = True
def is_silent(self):
return self.silent
def get_silence(self):
return self.is_silent()
def set_silence(self, value=True):
self.silent = value
return self
def unset_silence(self):
self.silent = False
return self
def with_silence(self, value=True):
self.set_silence(value)
return self
def silence(self, value=True):
return self.__class__.SilenceSignal(self, value)
def _print(self, *args, **kwargs):
if not self.silent:
print(*args, **kwargs)

View File

@ -1,283 +0,0 @@
import os
from experiment import Experiment
# noinspection PyUnresolvedReferences
from soup import Soup
from argparse import ArgumentParser
import numpy as np
import plotly as pl
import plotly.graph_objs as go
import colorlover as cl
import dill
from sklearn.manifold.t_sne import TSNE, PCA
def build_args():
arg_parser = ArgumentParser()
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
return arg_parser.parse_args()
def build_from_soup_or_exp(soup):
particles = soup.historical_particles
particle_list = []
for particle in particles.values():
particle_dict = dict(
trajectory=[event['weights'] for event in particle],
time=[event['time'] for event in particle],
action=[event.get('action', None) for event in particle],
counterpart=[event.get('counterpart', None) for event in particle]
)
if any([x is not None for x in particle_dict['counterpart']]):
print('counterpart')
particle_list.append(particle_dict)
return particle_list
def plot_latent_trajectories(soup_or_experiment, filename='latent_trajectory_plot'):
assert isinstance(soup_or_experiment, (Experiment, Soup))
bupu = cl.scales['11']['div']['RdYlGn']
data_dict = build_from_soup_or_exp(soup_or_experiment)
scale = cl.interp(bupu, len(data_dict)+1) # Map color scale to N bins
# Fit the mebedding space
transformer = TSNE()
for particle_dict in data_dict:
array = np.asarray([np.hstack([x.flatten() for x in timestamp]).flatten()
for timestamp in particle_dict['trajectory']])
particle_dict['trajectory'] = array
transformer.fit(array)
# Transform data accordingly and plot it
data = []
for p_id, particle_dict in enumerate(data_dict):
transformed = transformer._fit(np.asarray(particle_dict['trajectory']))
line_trace = go.Scatter(
x=transformed[:, 0],
y=transformed[:, 1],
text='Hovertext goes here'.format(),
line=dict(color=scale[p_id]),
# legendgroup='Position -{}'.format(pos),
name='Particle - {}'.format(p_id),
showlegend=True,
# hoverinfo='text',
mode='lines')
line_start = go.Scatter(mode='markers', x=[transformed[0, 0]], y=[transformed[0, 1]],
marker=dict(
color='rgb(255, 0, 0)',
size=4
),
showlegend=False
)
line_end = go.Scatter(mode='markers', x=[transformed[-1, 0]], y=[transformed[-1, 1]],
marker=dict(
color='rgb(0, 0, 0)',
size=4
),
showlegend=False
)
data.extend([line_trace, line_start, line_end])
layout = dict(title='{} - Latent Trajectory Movement'.format('Penis'),
height=800, width=800, margin=dict(l=0, r=0, t=0, b=0))
# import plotly.io as pio
# pio.write_image(fig, filename)
fig = go.Figure(data=data, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def plot_latent_trajectories_3D(soup_or_experiment, filename='plot'):
def norm(val, a=0, b=0.25):
return (val - a) / (b - a)
data_list = build_from_soup_or_exp(soup_or_experiment)
if not data_list:
return
base_scale = cl.scales['9']['div']['RdYlGn']
# base_scale = cl.scales['9']['qual']['Set1']
scale = cl.interp(base_scale, len(data_list)+1) # Map color scale to N bins
# Fit the embedding space
transformer = PCA(n_components=2)
array = []
for particle_dict in data_list:
array.append(particle_dict['trajectory'])
transformer.fit(np.vstack(array))
# Transform data accordingly and plot it
data = []
for p_id, particle_dict in enumerate(data_list):
transformed = transformer.transform(particle_dict['trajectory'])
line_trace = go.Scatter3d(
x=transformed[:, 0],
y=transformed[:, 1],
z=np.asarray(particle_dict['time']),
text='Particle: {}<br> It had {} lifes.'.format(p_id, len(particle_dict['trajectory'])),
line=dict(
color=scale[p_id],
width=4
),
# legendgroup='Particle - {}'.format(p_id),
name='Particle -{}'.format(p_id),
showlegend=False,
hoverinfo='text',
mode='lines')
line_start = go.Scatter3d(mode='markers', x=[transformed[0, 0]], y=[transformed[0, 1]],
z=np.asarray(particle_dict['time'][0]),
marker=dict(
color='rgb(255, 0, 0)',
size=4
),
showlegend=False
)
line_end = go.Scatter3d(mode='markers', x=[transformed[-1, 0]], y=[transformed[-1, 1]],
z=np.asarray(particle_dict['time'][-1]),
marker=dict(
color='rgb(0, 0, 0)',
size=4
),
showlegend=False
)
data.extend([line_trace, line_start, line_end])
axis_layout = dict(gridcolor='rgb(255, 255, 255)',
gridwidth=3,
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)',
titlefont=dict(
color='black',
size=30
)
)
layout = go.Layout(scene=dict(
# aspectratio=dict(x=2, y=2, z=2),
xaxis=dict(title='Transformed X', **axis_layout),
yaxis=dict(title='Transformed Y', **axis_layout),
zaxis=dict(title='Epoch', **axis_layout)),
# title='{} - Latent Trajectory Movement'.format('Soup'),
width=1024, height=1024,
margin=dict(l=0, r=0, b=0, t=0)
)
fig = go.Figure(data=data, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename, validate=True)
pass
def plot_histogram(bars_dict_list, filename='histogram_plot'):
# catagorical
ryb = cl.scales['10']['div']['RdYlBu']
data = []
for bar_id, bars_dict in bars_dict_list:
hist = go.Histogram(
histfunc="count",
y=bars_dict.get('value', 14),
x=bars_dict.get('name', 'gimme a name'),
showlegend=False,
marker=dict(
color=ryb[bar_id]
),
)
data.append(hist)
layout=dict(title='{} Histogram Plot'.format('Experiment Name Penis'),
height=400, width=400, margin=dict(l=0, r=0, t=0, b=0))
fig = go.Figure(data=data, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def line_plot(line_dict_list, filename='lineplot'):
# lines with standard deviation
# Transform data accordingly and plot it
data = []
rdylgn = cl.scales['10']['div']['RdYlGn']
rdylgn_background = [scale + (0.4,) for scale in cl.to_numeric(rdylgn)]
for line_id, line_dict in enumerate(line_dict_list):
name = line_dict.get('name', 'gimme a name')
upper_bound = go.Scatter(
name='Upper Bound',
x=line_dict['x'],
y=line_dict['upper_y'],
mode='lines',
marker=dict(color="#444"),
line=dict(width=0),
fillcolor=rdylgn_background[line_id],
)
trace = go.Scatter(
x=line_dict['x'],
y=line_dict['main_y'],
mode='lines',
name=name,
line=dict(color=line_id),
fillcolor=rdylgn_background[line_id],
fill='tonexty')
lower_bound = go.Scatter(
name='Lower Bound',
x=line_dict['x'],
y=line_dict['lower_y'],
marker=dict(color="#444"),
line=dict(width=0),
mode='lines')
data.extend([upper_bound, trace, lower_bound])
layout=dict(title='{} Line Plot'.format('Experiment Name Penis'),
height=800, width=800, margin=dict(l=0, r=0, t=0, b=0))
fig = go.Figure(data=data, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=[]):
if os.path.isdir(absolut_file_or_folder):
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
search_and_apply(sub_file_or_folder.path, plotting_function, files_to_look_for=files_to_look_for)
elif absolut_file_or_folder.endswith('.dill'):
file_or_folder = os.path.split(absolut_file_or_folder)[-1]
if file_or_folder in files_to_look_for and not os.path.exists('{}.html'.format(absolut_file_or_folder[:-5])):
print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__,
file=absolut_file_or_folder)
)
with open(absolut_file_or_folder, 'rb') as in_f:
exp = dill.load(in_f)
try:
plotting_function(exp, filename='{}.html'.format(absolut_file_or_folder[:-5]))
except ValueError:
pass
except AttributeError:
pass
else:
# This was either another FilyType or Plot.html alerady exists.
pass
if __name__ == '__main__':
args = build_args()
in_file = args.in_file[0]
out_file = args.out_file
search_and_apply(in_file, plot_latent_trajectories_3D, ["trajectorys.dill", "soup.dill"])