Box and stuff
This commit is contained in:
3
.gitignore
vendored
3
.gitignore
vendored
@ -3,9 +3,6 @@
|
|||||||
|
|
||||||
|
|
||||||
### Local Datasets ###
|
### Local Datasets ###
|
||||||
code/raw_runs
|
|
||||||
code/processed_runs
|
|
||||||
experiments/
|
|
||||||
|
|
||||||
|
|
||||||
### Data ###
|
### Data ###
|
||||||
|
@ -60,52 +60,6 @@ def plot_histogram(bars_dict_list: List[dict], filename='histogram_plot'):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def line_plot(line_dict_list, filename='lineplot'):
|
|
||||||
# lines with standard deviation
|
|
||||||
# Transform data accordingly and plot it
|
|
||||||
data = []
|
|
||||||
rdylgn = cl.scales['10']['div']['RdYlGn']
|
|
||||||
rdylgn_background = [scale + (0.4,) for scale in cl.to_numeric(rdylgn)]
|
|
||||||
for line_id, line_dict in enumerate(line_dict_list):
|
|
||||||
name = line_dict.get('name', 'gimme a name')
|
|
||||||
|
|
||||||
upper_bound = go.Scatter(
|
|
||||||
name='Upper Bound',
|
|
||||||
x=line_dict['x'],
|
|
||||||
y=line_dict['upper_y'],
|
|
||||||
mode='lines',
|
|
||||||
marker=dict(color="#444"),
|
|
||||||
line=dict(width=0),
|
|
||||||
fillcolor=rdylgn_background[line_id],
|
|
||||||
)
|
|
||||||
|
|
||||||
trace = go.Scatter(
|
|
||||||
x=line_dict['x'],
|
|
||||||
y=line_dict['main_y'],
|
|
||||||
mode='lines',
|
|
||||||
name=name,
|
|
||||||
line=dict(color=line_id),
|
|
||||||
fillcolor=rdylgn_background[line_id],
|
|
||||||
fill='tonexty')
|
|
||||||
|
|
||||||
lower_bound = go.Scatter(
|
|
||||||
name='Lower Bound',
|
|
||||||
x=line_dict['x'],
|
|
||||||
y=line_dict['lower_y'],
|
|
||||||
marker=dict(color="#444"),
|
|
||||||
line=dict(width=0),
|
|
||||||
mode='lines')
|
|
||||||
|
|
||||||
data.extend([upper_bound, trace, lower_bound])
|
|
||||||
|
|
||||||
layout=dict(title='{} Line Plot'.format('Experiment Name Penis'),
|
|
||||||
height=800, width=800, margin=dict(l=0, r=0, t=0, b=0))
|
|
||||||
|
|
||||||
fig = go.Figure(data=data, layout=layout)
|
|
||||||
pl.offline.plot(fig, auto_open=True, filename=filename)
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=[]):
|
def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=[]):
|
||||||
if os.path.isdir(absolut_file_or_folder):
|
if os.path.isdir(absolut_file_or_folder):
|
||||||
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
|
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
|
||||||
|
114
code/box_plots.py
Normal file
114
code/box_plots.py
Normal file
@ -0,0 +1,114 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from experiment import Experiment
|
||||||
|
# noinspection PyUnresolvedReferences
|
||||||
|
from soup import Soup
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from collections import defaultdict
|
||||||
|
|
||||||
|
from argparse import ArgumentParser
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
import plotly as pl
|
||||||
|
import plotly.graph_objs as go
|
||||||
|
|
||||||
|
import colorlover as cl
|
||||||
|
|
||||||
|
import dill
|
||||||
|
|
||||||
|
|
||||||
|
def build_args():
|
||||||
|
arg_parser = ArgumentParser()
|
||||||
|
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
|
||||||
|
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
|
||||||
|
return arg_parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def plot_box(exp: Experiment, filename='histogram_plot'):
|
||||||
|
# catagorical
|
||||||
|
ryb = cl.scales['10']['div']['RdYlBu']
|
||||||
|
|
||||||
|
data = []
|
||||||
|
|
||||||
|
for d in range(exp.depth):
|
||||||
|
names = ['D 10e-{}'.format(d)] * exp.trials
|
||||||
|
data.extend(names)
|
||||||
|
|
||||||
|
trace_list = []
|
||||||
|
|
||||||
|
vergence_box = go.Box(
|
||||||
|
y=exp.ys,
|
||||||
|
x=data,
|
||||||
|
name='Time to Vergence',
|
||||||
|
boxpoints=False,
|
||||||
|
showlegend=True,
|
||||||
|
marker=dict(
|
||||||
|
color=ryb[3]
|
||||||
|
),
|
||||||
|
)
|
||||||
|
fixpoint_box = go.Box(
|
||||||
|
y=exp.zs,
|
||||||
|
x=data,
|
||||||
|
name='Time as Fixpoint',
|
||||||
|
boxpoints=False,
|
||||||
|
showlegend=True,
|
||||||
|
marker=dict(
|
||||||
|
color=ryb[-1]
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
trace_list.extend([vergence_box, fixpoint_box])
|
||||||
|
|
||||||
|
layout = dict(title='{} Histogram Plot'.format('Experiment Name Penis'),
|
||||||
|
boxmode='group',
|
||||||
|
boxgap=0,
|
||||||
|
# barmode='group',
|
||||||
|
bargap=0,
|
||||||
|
xaxis=dict(showgrid=False,
|
||||||
|
zeroline=True,
|
||||||
|
tickangle=0,
|
||||||
|
showticklabels=True),
|
||||||
|
yaxis=dict(
|
||||||
|
title='Occurences',
|
||||||
|
zeroline=False)
|
||||||
|
# height=400, width=400,
|
||||||
|
# margin=dict(l=20, r=20, t=20, b=20)
|
||||||
|
)
|
||||||
|
|
||||||
|
fig = go.Figure(data=trace_list, layout=layout)
|
||||||
|
pl.offline.plot(fig, auto_open=True, filename=filename)
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=[]):
|
||||||
|
if os.path.isdir(absolut_file_or_folder):
|
||||||
|
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
|
||||||
|
search_and_apply(sub_file_or_folder.path, plotting_function, files_to_look_for=files_to_look_for)
|
||||||
|
elif absolut_file_or_folder.endswith('.dill'):
|
||||||
|
file_or_folder = os.path.split(absolut_file_or_folder)[-1]
|
||||||
|
if file_or_folder in files_to_look_for and not os.path.exists('{}.html'.format(file_or_folder[:-5])):
|
||||||
|
print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__,
|
||||||
|
file=absolut_file_or_folder)
|
||||||
|
)
|
||||||
|
|
||||||
|
with open(absolut_file_or_folder, 'rb') as in_f:
|
||||||
|
exp = dill.load(in_f)
|
||||||
|
|
||||||
|
plotting_function(exp, filename='{}.html'.format(absolut_file_or_folder[:-5]))
|
||||||
|
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
# This was not a file i should look for.
|
||||||
|
else:
|
||||||
|
# This was either another FilyType or Plot.html alerady exists.
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
args = build_args()
|
||||||
|
in_file = args.in_file[0]
|
||||||
|
out_file = args.out_file
|
||||||
|
|
||||||
|
search_and_apply(in_file, plot_box, files_to_look_for=['experiment.dill'])
|
||||||
|
# , 'all_names.dill', 'all_notable_nets.dill'])
|
Binary file not shown.
File diff suppressed because one or more lines are too long
@ -0,0 +1 @@
|
|||||||
|
{'divergent': 38, 'fix_zero': 62, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
|
Binary file not shown.
File diff suppressed because one or more lines are too long
@ -0,0 +1 @@
|
|||||||
|
{'divergent': 0, 'fix_zero': 100, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
|
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
File diff suppressed because one or more lines are too long
@ -0,0 +1 @@
|
|||||||
|
{'divergent': 3, 'fix_zero': 97, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
|
@ -180,7 +180,7 @@ class ParticleDecorator:
|
|||||||
|
|
||||||
def make_state(self, **kwargs):
|
def make_state(self, **kwargs):
|
||||||
weights = self.net.get_weights_flat()
|
weights = self.net.get_weights_flat()
|
||||||
if any(np.isinf(weights)):
|
if any(np.isinf(weights)) or any(np.isnan(weights)):
|
||||||
return None
|
return None
|
||||||
state = {'class': self.net.__class__.__name__, 'weights': weights}
|
state = {'class': self.net.__class__.__name__, 'weights': weights}
|
||||||
state.update(kwargs)
|
state.update(kwargs)
|
||||||
@ -609,8 +609,8 @@ class TrainingNeuralNetworkDecorator():
|
|||||||
def train(self, batchsize=1, store_states=True, epoch=0):
|
def train(self, batchsize=1, store_states=True, epoch=0):
|
||||||
self.compiled()
|
self.compiled()
|
||||||
x, y = self.net.compute_samples()
|
x, y = self.net.compute_samples()
|
||||||
savestatecallback = SaveStateCallback(net=self, epoch=epoch) if store_states else None
|
savestatecallback = [SaveStateCallback(net=self, epoch=epoch)] if store_states else None
|
||||||
history = self.net.model.fit(x=x, y=y, verbose=0, batch_size=batchsize, callbacks=[savestatecallback] if store_states else None, initial_epoch=epoch)
|
history = self.net.model.fit(x=x, y=y, epochs=epoch+1, verbose=0, batch_size=batchsize, callbacks=savestatecallback, initial_epoch=epoch)
|
||||||
return history.history['loss'][-1]
|
return history.history['loss'][-1]
|
||||||
|
|
||||||
def learn_from(self, other_network, batchsize=1):
|
def learn_from(self, other_network, batchsize=1):
|
||||||
@ -628,36 +628,37 @@ if __name__ == '__main__':
|
|||||||
exp.run_net(net, 100, run_id=run_id + 1)
|
exp.run_net(net, 100, run_id=run_id + 1)
|
||||||
exp.historical_particles[run_id] = net
|
exp.historical_particles[run_id] = net
|
||||||
if prints:
|
if prints:
|
||||||
# print(net.apply_to_network(net))
|
|
||||||
print("Fixpoint? " + str(net.is_fixpoint()))
|
print("Fixpoint? " + str(net.is_fixpoint()))
|
||||||
print("Loss " + str(loss))
|
print("Loss " + str(loss))
|
||||||
K.clear_session()
|
|
||||||
|
|
||||||
if False:
|
if True:
|
||||||
# WeightWise Neural Network
|
# WeightWise Neural Network
|
||||||
with FixpointExperiment() as exp:
|
with FixpointExperiment() as exp:
|
||||||
for run_id in tqdm(range(100)):
|
for run_id in tqdm(range(100)):
|
||||||
net = ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2) \
|
net = ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2) \
|
||||||
.with_keras_params(activation='linear'))
|
.with_keras_params(activation='linear'))
|
||||||
run_exp(net)
|
run_exp(net)
|
||||||
|
K.clear_session()
|
||||||
exp.log(exp.counters)
|
exp.log(exp.counters)
|
||||||
|
|
||||||
if False:
|
if True:
|
||||||
# Aggregating Neural Network
|
# Aggregating Neural Network
|
||||||
with FixpointExperiment() as exp:
|
with FixpointExperiment() as exp:
|
||||||
for run_id in tqdm(range(100)):
|
for run_id in tqdm(range(100)):
|
||||||
net = ParticleDecorator(AggregatingNeuralNetwork(aggregates=4, width=2, depth=2) \
|
net = ParticleDecorator(AggregatingNeuralNetwork(aggregates=4, width=2, depth=2) \
|
||||||
.with_keras_params())
|
.with_keras_params())
|
||||||
run_exp(net)
|
run_exp(net)
|
||||||
|
K.clear_session()
|
||||||
exp.log(exp.counters)
|
exp.log(exp.counters)
|
||||||
|
|
||||||
if False:
|
if True:
|
||||||
#FFT Neural Network
|
#FFT Neural Network
|
||||||
with FixpointExperiment() as exp:
|
with FixpointExperiment() as exp:
|
||||||
for run_id in tqdm(range(100)):
|
for run_id in tqdm(range(100)):
|
||||||
net = ParticleDecorator(FFTNeuralNetwork(aggregates=4, width=2, depth=2) \
|
net = ParticleDecorator(FFTNeuralNetwork(aggregates=4, width=2, depth=2) \
|
||||||
.with_keras_params(activation='linear'))
|
.with_keras_params(activation='linear'))
|
||||||
run_exp(net)
|
run_exp(net)
|
||||||
|
K.clear_session()
|
||||||
exp.log(exp.counters)
|
exp.log(exp.counters)
|
||||||
|
|
||||||
if True:
|
if True:
|
||||||
@ -665,13 +666,14 @@ if __name__ == '__main__':
|
|||||||
with FixpointExperiment() as exp:
|
with FixpointExperiment() as exp:
|
||||||
for i in range(1):
|
for i in range(1):
|
||||||
run_count = 1000
|
run_count = 1000
|
||||||
net = ParticleDecorator(TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(width=2, depth=2)))
|
net = TrainingNeuralNetworkDecorator(ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2)))
|
||||||
net.with_params(epsilon=0.0001).with_keras_params(optimizer='sgd')
|
net.with_params(epsilon=0.0001).with_keras_params(optimizer='sgd')
|
||||||
for run_id in tqdm(range(run_count+1)):
|
for run_id in tqdm(range(run_count+1)):
|
||||||
net.compiled()
|
net.compiled()
|
||||||
loss = net.train(epoch=run_id)
|
loss = net.train(epoch=run_id)
|
||||||
if run_id % 100 == 0:
|
if run_id % 100 == 0:
|
||||||
run_exp(net)
|
run_exp(net)
|
||||||
|
K.clear_session()
|
||||||
|
|
||||||
if False:
|
if False:
|
||||||
with FixpointExperiment() as exp:
|
with FixpointExperiment() as exp:
|
||||||
@ -689,6 +691,7 @@ if __name__ == '__main__':
|
|||||||
print("Fixpoint after Agg? " + str(fp))
|
print("Fixpoint after Agg? " + str(fp))
|
||||||
print("Loss " + str(loss))
|
print("Loss " + str(loss))
|
||||||
print()
|
print()
|
||||||
|
|
||||||
if False:
|
if False:
|
||||||
# this explodes in our faces completely... NAN everywhere
|
# this explodes in our faces completely... NAN everywhere
|
||||||
# TODO: Wtf is happening here?
|
# TODO: Wtf is happening here?
|
||||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,36 @@
|
|||||||
|
ParticleDecorator activiation='linear' use_bias='False'
|
||||||
|
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100}
|
||||||
|
|
||||||
|
|
||||||
|
ParticleDecorator activiation='linear' use_bias='False'
|
||||||
|
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100}
|
||||||
|
|
||||||
|
|
||||||
|
ParticleDecorator activiation='linear' use_bias='False'
|
||||||
|
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100}
|
||||||
|
|
||||||
|
|
||||||
|
ParticleDecorator activiation='sigmoid' use_bias='False'
|
||||||
|
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100}
|
||||||
|
|
||||||
|
|
||||||
|
ParticleDecorator activiation='sigmoid' use_bias='False'
|
||||||
|
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100}
|
||||||
|
|
||||||
|
|
||||||
|
ParticleDecorator activiation='sigmoid' use_bias='False'
|
||||||
|
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100}
|
||||||
|
|
||||||
|
|
||||||
|
ParticleDecorator activiation='relu' use_bias='False'
|
||||||
|
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100}
|
||||||
|
|
||||||
|
|
||||||
|
ParticleDecorator activiation='relu' use_bias='False'
|
||||||
|
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100}
|
||||||
|
|
||||||
|
|
||||||
|
ParticleDecorator activiation='relu' use_bias='False'
|
||||||
|
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100}
|
||||||
|
|
||||||
|
|
Binary file not shown.
File diff suppressed because one or more lines are too long
@ -0,0 +1,30 @@
|
|||||||
|
variation 10e-0
|
||||||
|
avg time to vergence 3.72
|
||||||
|
avg time as fixpoint 0
|
||||||
|
variation 10e-1
|
||||||
|
avg time to vergence 5.13
|
||||||
|
avg time as fixpoint 0
|
||||||
|
variation 10e-2
|
||||||
|
avg time to vergence 6.53
|
||||||
|
avg time as fixpoint 0
|
||||||
|
variation 10e-3
|
||||||
|
avg time to vergence 8.09
|
||||||
|
avg time as fixpoint 0
|
||||||
|
variation 10e-4
|
||||||
|
avg time to vergence 9.81
|
||||||
|
avg time as fixpoint 0.06
|
||||||
|
variation 10e-5
|
||||||
|
avg time to vergence 11.43
|
||||||
|
avg time as fixpoint 1.51
|
||||||
|
variation 10e-6
|
||||||
|
avg time to vergence 13.15
|
||||||
|
avg time as fixpoint 3.34
|
||||||
|
variation 10e-7
|
||||||
|
avg time to vergence 14.57
|
||||||
|
avg time as fixpoint 4.79
|
||||||
|
variation 10e-8
|
||||||
|
avg time to vergence 22.41
|
||||||
|
avg time as fixpoint 12.37
|
||||||
|
variation 10e-9
|
||||||
|
avg time to vergence 26.17
|
||||||
|
avg time as fixpoint 16.11
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,8 @@
|
|||||||
|
ParticleDecorator activiation='linear' use_bias=False
|
||||||
|
{'divergent': 0, 'fix_zero': 0, 'fix_other': 19, 'fix_sec': 0, 'other': 1}
|
||||||
|
|
||||||
|
|
||||||
|
ParticleDecorator activiation='linear' use_bias=False
|
||||||
|
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 20}
|
||||||
|
|
||||||
|
|
@ -28,34 +28,38 @@ def count(counters, net, notable_nets=[]):
|
|||||||
counters['other'] += 1
|
counters['other'] += 1
|
||||||
return counters, notable_nets
|
return counters, notable_nets
|
||||||
|
|
||||||
with Experiment('fixpoint-density') as exp:
|
|
||||||
exp.trials = 100
|
|
||||||
exp.epsilon = 1e-4
|
|
||||||
net_generators = []
|
|
||||||
for activation in ['linear', 'sigmoid', 'relu']:
|
|
||||||
net_generators += [lambda activation=activation: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=False)]
|
|
||||||
net_generators += [lambda activation=activation: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=False)]
|
|
||||||
net_generators += [lambda activation=activation: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=False)]
|
|
||||||
all_counters = []
|
|
||||||
all_notable_nets = []
|
|
||||||
all_names = []
|
|
||||||
for net_generator_id, net_generator in enumerate(net_generators):
|
|
||||||
counters = generate_counters()
|
|
||||||
notable_nets = []
|
|
||||||
for _ in tqdm(range(exp.trials)):
|
|
||||||
net = net_generator().with_params(epsilon=exp.epsilon)
|
|
||||||
name = str(net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias='" + str(net.get_keras_params().get('use_bias')) + "'"
|
|
||||||
count(counters, net, notable_nets)
|
|
||||||
keras.backend.clear_session()
|
|
||||||
all_counters += [counters]
|
|
||||||
all_notable_nets += [notable_nets]
|
|
||||||
all_names += [name]
|
|
||||||
exp.save(all_counters=all_counters)
|
|
||||||
exp.save(all_notable_nets=all_notable_nets)
|
|
||||||
exp.save(all_names=all_names)
|
|
||||||
for exp_id, counter in enumerate(all_counters):
|
|
||||||
exp.log(all_names[exp_id])
|
|
||||||
exp.log(all_counters[exp_id])
|
|
||||||
exp.log('\n')
|
|
||||||
|
|
||||||
print('Done')
|
if __name__ == '__main__':
|
||||||
|
with Experiment('fixpoint-density') as exp:
|
||||||
|
exp.trials = 100
|
||||||
|
exp.epsilon = 1e-4
|
||||||
|
net_generators = []
|
||||||
|
for activation in ['linear', 'sigmoid', 'relu']:
|
||||||
|
net_generators += [lambda activation=activation: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=False)]
|
||||||
|
net_generators += [lambda activation=activation: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=False)]
|
||||||
|
net_generators += [lambda activation=activation: FFTNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=False)]
|
||||||
|
# net_generators += [lambda activation=activation: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=False)]
|
||||||
|
all_counters = []
|
||||||
|
all_notable_nets = []
|
||||||
|
all_names = []
|
||||||
|
for net_generator_id, net_generator in enumerate(net_generators):
|
||||||
|
counters = generate_counters()
|
||||||
|
notable_nets = []
|
||||||
|
for _ in tqdm(range(exp.trials)):
|
||||||
|
net = net_generator().with_params(epsilon=exp.epsilon)
|
||||||
|
net = ParticleDecorator(net)
|
||||||
|
name = str(net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias='" + str(net.get_keras_params().get('use_bias')) + "'"
|
||||||
|
count(counters, net, notable_nets)
|
||||||
|
keras.backend.clear_session()
|
||||||
|
all_counters += [counters]
|
||||||
|
all_notable_nets += [notable_nets]
|
||||||
|
all_names += [name]
|
||||||
|
exp.save(all_counters=all_counters)
|
||||||
|
exp.save(all_notable_nets=all_notable_nets)
|
||||||
|
exp.save(all_names=all_names)
|
||||||
|
for exp_id, counter in enumerate(all_counters):
|
||||||
|
exp.log(all_names[exp_id])
|
||||||
|
exp.log(all_counters[exp_id])
|
||||||
|
exp.log('\n')
|
||||||
|
|
||||||
|
print('Done')
|
||||||
|
@ -5,7 +5,6 @@ import os
|
|||||||
# Concat top Level dir to system environmental variables
|
# Concat top Level dir to system environmental variables
|
||||||
sys.path += os.path.join('..', '.')
|
sys.path += os.path.join('..', '.')
|
||||||
|
|
||||||
|
|
||||||
from util import *
|
from util import *
|
||||||
from experiment import *
|
from experiment import *
|
||||||
from network import *
|
from network import *
|
||||||
@ -16,19 +15,22 @@ import keras.backend
|
|||||||
|
|
||||||
from statistics import mean
|
from statistics import mean
|
||||||
avg = mean
|
avg = mean
|
||||||
|
|
||||||
|
|
||||||
def generate_fixpoint_weights():
|
def generate_fixpoint_weights():
|
||||||
return [
|
return [
|
||||||
np.array([[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], dtype=np.float32),
|
np.array([[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], dtype=np.float32),
|
||||||
np.array([[1.0, 0.0], [0.0, 0.0]], dtype=np.float32),
|
np.array([[1.0, 0.0], [0.0, 0.0]], dtype=np.float32),
|
||||||
np.array([[1.0], [0.0]], dtype=np.float32)
|
np.array([[1.0], [0.0]], dtype=np.float32)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def generate_fixpoint_net():
|
def generate_fixpoint_net():
|
||||||
net = WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='sigmoid')
|
net = WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='sigmoid')
|
||||||
net.set_weights(generate_fixpoint_weights())
|
net.set_weights(generate_fixpoint_weights())
|
||||||
return net
|
return net
|
||||||
|
|
||||||
|
|
||||||
def vary(old_weights, e=1.0):
|
def vary(old_weights, e=1.0):
|
||||||
new_weights = copy.deepcopy(old_weights)
|
new_weights = copy.deepcopy(old_weights)
|
||||||
for layer_id, layer in enumerate(new_weights):
|
for layer_id, layer in enumerate(new_weights):
|
||||||
@ -40,45 +42,49 @@ def vary(old_weights, e=1.0):
|
|||||||
new_weights[layer_id][cell_id][weight_id] = weight - prng() * e
|
new_weights[layer_id][cell_id][weight_id] = weight - prng() * e
|
||||||
return new_weights
|
return new_weights
|
||||||
|
|
||||||
with Experiment('known-fixpoint-variation') as exp:
|
|
||||||
exp.depth = 10
|
if __name__ == '__main__':
|
||||||
exp.trials = 100
|
with Experiment('known-fixpoint-variation') as exp:
|
||||||
exp.max_steps = 100
|
exp.depth = 10
|
||||||
exp.epsilon = 1e-4
|
exp.trials = 100
|
||||||
exp.xs = []
|
exp.max_steps = 100
|
||||||
exp.ys = []
|
exp.epsilon = 1e-4
|
||||||
exp.zs = []
|
exp.xs = []
|
||||||
exp.notable_nets = []
|
exp.ys = []
|
||||||
current_scale = 1.0
|
exp.zs = []
|
||||||
for _ in range(exp.depth):
|
exp.notable_nets = []
|
||||||
print('variation scale ' + str(current_scale))
|
current_scale = 1.0
|
||||||
for _ in tqdm(range(exp.trials)):
|
for _ in range(exp.depth):
|
||||||
net = generate_fixpoint_net().with_params(epsilon=exp.epsilon)
|
print('variation scale ' + str(current_scale))
|
||||||
net.set_weights(vary(net.get_weights(), current_scale))
|
for _ in tqdm(range(exp.trials)):
|
||||||
time_to_something = 0
|
net = generate_fixpoint_net().with_params(epsilon=exp.epsilon)
|
||||||
time_as_fixpoint = 0
|
net = ParticleDecorator(net)
|
||||||
still_fixpoint = True
|
net.set_weights(vary(net.get_weights(), current_scale))
|
||||||
for _ in range(exp.max_steps):
|
time_to_something = 0
|
||||||
net.self_attack()
|
time_as_fixpoint = 0
|
||||||
if net.is_zero() or net.is_diverged():
|
still_fixpoint = True
|
||||||
break
|
for _ in range(exp.max_steps):
|
||||||
if net.is_fixpoint():
|
net.self_attack()
|
||||||
if still_fixpoint:
|
if net.is_zero() or net.is_diverged():
|
||||||
time_as_fixpoint += 1
|
break
|
||||||
|
if net.is_fixpoint():
|
||||||
|
if still_fixpoint:
|
||||||
|
time_as_fixpoint += 1
|
||||||
|
else:
|
||||||
|
print('remarkable')
|
||||||
|
exp.notable_nets += [net.get_weights()]
|
||||||
|
still_fixpoint = True
|
||||||
else:
|
else:
|
||||||
print('remarkable')
|
still_fixpoint = False
|
||||||
exp.notable_nets += [net.get_weights()]
|
time_to_something += 1
|
||||||
still_fixpoint = True
|
exp.xs += [current_scale]
|
||||||
else:
|
# time steps taken to reach divergence or zero (reaching another fix-point is basically never happening)
|
||||||
still_fixpoint = False
|
exp.ys += [time_to_something]
|
||||||
time_to_something += 1
|
# time steps still regarded as sthe initial fix-point
|
||||||
exp.xs += [current_scale]
|
exp.zs += [time_as_fixpoint]
|
||||||
exp.ys += [time_to_something] #time steps taken to reach divergence or zero (reaching another fix-point is basically never happening)
|
keras.backend.clear_session()
|
||||||
exp.zs += [time_as_fixpoint] #time steps still regarded as sthe initial fix-point
|
current_scale /= 10.0
|
||||||
keras.backend.clear_session()
|
for d in range(exp.depth):
|
||||||
current_scale /= 10.0
|
exp.log('variation 10e-' + str(d))
|
||||||
for d in range(exp.depth):
|
exp.log('avg time to vergence ' + str(avg(exp.ys[d*exp.trials:(d+1) * exp.trials])))
|
||||||
exp.log('variation 10e-' + str(d))
|
exp.log('avg time as fixpoint ' + str(avg(exp.zs[d*exp.trials:(d+1) * exp.trials])))
|
||||||
exp.log('avg time to vergence ' + str(avg(exp.ys[d*exp.trials:(d+1)*exp.trials])))
|
|
||||||
exp.log('avg time as fixpoint ' + str(avg(exp.zs[d*exp.trials:(d+1)*exp.trials])))
|
|
||||||
|
|
||||||
|
@ -3,6 +3,9 @@ import os
|
|||||||
|
|
||||||
from typing import Tuple
|
from typing import Tuple
|
||||||
|
|
||||||
|
# Concat top Level dir to system environmental variables
|
||||||
|
sys.path += os.path.join('..', '.')
|
||||||
|
|
||||||
from util import *
|
from util import *
|
||||||
from experiment import *
|
from experiment import *
|
||||||
from network import *
|
from network import *
|
||||||
@ -10,10 +13,6 @@ from network import *
|
|||||||
import keras.backend
|
import keras.backend
|
||||||
|
|
||||||
|
|
||||||
# Concat top Level dir to system environmental variables
|
|
||||||
sys.path += os.path.join('..', '.')
|
|
||||||
|
|
||||||
|
|
||||||
def generate_counters():
|
def generate_counters():
|
||||||
"""
|
"""
|
||||||
Initial build of the counter dict, to store counts.
|
Initial build of the counter dict, to store counts.
|
||||||
@ -51,46 +50,52 @@ def count(counters, net, notable_nets=[]):
|
|||||||
counters['other'] += 1
|
counters['other'] += 1
|
||||||
return counters, notable_nets
|
return counters, notable_nets
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
|
||||||
with Experiment('mixed-self-fixpoints') as exp:
|
with Experiment('mixed-self-fixpoints') as exp:
|
||||||
exp.trials = 20
|
exp.trials = 20
|
||||||
exp.selfattacks = 4
|
exp.selfattacks = 4
|
||||||
exp.trains_per_selfattack_values = [100 * i for i in range(11)]
|
exp.trains_per_selfattack_values = [100 * i for i in range(11)]
|
||||||
exp.epsilon = 1e-4
|
exp.epsilon = 1e-4
|
||||||
net_generators = []
|
net_generators = []
|
||||||
for activation in ['linear', 'sigmoid', 'relu']:
|
for activation in ['linear']: # , 'sigmoid', 'relu']:
|
||||||
for use_bias in [False]:
|
for use_bias in [False]:
|
||||||
net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||||
# net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||||
# net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
# net_generators += [lambda activation=activation, use_bias=use_bias: FFTNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||||
|
# net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||||
|
|
||||||
all_names = []
|
all_names = []
|
||||||
all_data = []
|
all_data = []
|
||||||
for net_generator_id, net_generator in enumerate(net_generators):
|
|
||||||
xs = []
|
|
||||||
ys = []
|
|
||||||
for trains_per_selfattack in exp.trains_per_selfattack_values:
|
|
||||||
counters = generate_counters()
|
|
||||||
notable_nets = []
|
|
||||||
for _ in tqdm(range(exp.trials)):
|
|
||||||
net = TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon)
|
|
||||||
name = str(net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias'))
|
|
||||||
for selfattack_id in range(exp.selfattacks):
|
|
||||||
net.self_attack()
|
|
||||||
for train_id in range(trains_per_selfattack):
|
|
||||||
loss = net.compiled().train(epoch=selfattack_id*trains_per_selfattack+train_id)
|
|
||||||
if net.is_diverged() or net.is_fixpoint():
|
|
||||||
break
|
|
||||||
count(counters, net, notable_nets)
|
|
||||||
keras.backend.clear_session()
|
|
||||||
xs += [trains_per_selfattack]
|
|
||||||
ys += [float(counters['fix_zero'] + counters['fix_other']) / float(exp.trials)]
|
|
||||||
all_names += [name]
|
|
||||||
all_data += [{'xs':xs, 'ys':ys}] #xs: how many trains per self-attack from exp.trains_per_selfattack_values, ys: average amount of fixpoints found
|
|
||||||
|
|
||||||
exp.save(all_names=all_names)
|
for net_generator_id, net_generator in enumerate(net_generators):
|
||||||
exp.save(all_data=all_data)
|
xs = []
|
||||||
for exp_id, name in enumerate(all_names):
|
ys = []
|
||||||
exp.log(all_names[exp_id])
|
for trains_per_selfattack in exp.trains_per_selfattack_values:
|
||||||
exp.log(all_data[exp_id])
|
counters = generate_counters()
|
||||||
exp.log('\n')
|
notable_nets = []
|
||||||
|
for _ in tqdm(range(exp.trials)):
|
||||||
|
net = ParticleDecorator(net_generator())
|
||||||
|
net = TrainingNeuralNetworkDecorator(net).with_params(epsilon=exp.epsilon)
|
||||||
|
name = str(net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias'))
|
||||||
|
for selfattack_id in range(exp.selfattacks):
|
||||||
|
net.self_attack()
|
||||||
|
for train_id in range(trains_per_selfattack):
|
||||||
|
loss = net.compiled().train(epoch=selfattack_id*trains_per_selfattack+train_id)
|
||||||
|
if net.is_diverged() or net.is_fixpoint():
|
||||||
|
break
|
||||||
|
count(counters, net, notable_nets)
|
||||||
|
keras.backend.clear_session()
|
||||||
|
xs += [trains_per_selfattack]
|
||||||
|
ys += [float(counters['fix_zero'] + counters['fix_other']) / float(exp.trials)]
|
||||||
|
all_names += [name]
|
||||||
|
# xs: how many trains per self-attack from exp.trains_per_selfattack_values
|
||||||
|
# ys: average amount of fixpoints found
|
||||||
|
all_data += [{'xs': xs, 'ys': ys}]
|
||||||
|
|
||||||
|
exp.save(all_names=all_names)
|
||||||
|
exp.save(all_data=all_data)
|
||||||
|
for exp_id, name in enumerate(all_names):
|
||||||
|
exp.log(all_names[exp_id])
|
||||||
|
exp.log(all_data[exp_id])
|
||||||
|
exp.log('\n')
|
||||||
|
@ -8,7 +8,7 @@ from util import *
|
|||||||
from experiment import *
|
from experiment import *
|
||||||
from network import *
|
from network import *
|
||||||
|
|
||||||
import keras.backend
|
import keras.backend as K
|
||||||
|
|
||||||
def generate_counters():
|
def generate_counters():
|
||||||
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
|
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
|
||||||
@ -29,36 +29,40 @@ def count(counters, net, notable_nets=[]):
|
|||||||
counters['other'] += 1
|
counters['other'] += 1
|
||||||
return counters, notable_nets
|
return counters, notable_nets
|
||||||
|
|
||||||
with Experiment('training_fixpoint') as exp:
|
|
||||||
exp.trials = 5
|
if __name__ == '__main__':
|
||||||
exp.run_count = 500
|
|
||||||
exp.epsilon = 1e-4
|
with Experiment('training_fixpoint') as exp:
|
||||||
net_generators = []
|
exp.trials = 20
|
||||||
for activation in ['linear', 'sigmoid', 'relu']:
|
exp.run_count = 500
|
||||||
for use_bias in [False]:
|
exp.epsilon = 1e-4
|
||||||
net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
net_generators = []
|
||||||
net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
for activation in ['linear']: # , 'sigmoid', 'relu']:
|
||||||
net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
for use_bias in [False]:
|
||||||
all_counters = []
|
net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||||
all_notable_nets = []
|
net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||||
all_names = []
|
# net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||||
for net_generator_id, net_generator in enumerate(net_generators):
|
all_counters = []
|
||||||
counters = generate_counters()
|
all_notable_nets = []
|
||||||
notable_nets = []
|
all_names = []
|
||||||
for _ in tqdm(range(exp.trials)):
|
for net_generator_id, net_generator in enumerate(net_generators):
|
||||||
net = TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon)
|
counters = generate_counters()
|
||||||
name = str(net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias'))
|
notable_nets = []
|
||||||
for run_id in range(exp.run_count):
|
for _ in tqdm(range(exp.trials)):
|
||||||
loss = net.compiled().train(epoch=run_id+1)
|
net = ParticleDecorator(net_generator())
|
||||||
count(counters, net, notable_nets)
|
net = TrainingNeuralNetworkDecorator(net).with_params(epsilon=exp.epsilon)
|
||||||
keras.backend.clear_session()
|
name = str(net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias'))
|
||||||
all_counters += [counters]
|
for run_id in range(exp.run_count):
|
||||||
all_notable_nets += [notable_nets]
|
loss = net.compiled().train(epoch=run_id+1)
|
||||||
all_names += [name]
|
count(counters, net, notable_nets)
|
||||||
exp.save(all_counters=all_counters) #net types reached in the end
|
all_counters += [counters]
|
||||||
exp.save(all_notable_nets=all_notable_nets)
|
all_notable_nets += [notable_nets]
|
||||||
exp.save(all_names=all_names) #experiment setups
|
all_names += [name]
|
||||||
for exp_id, counter in enumerate(all_counters):
|
K.clear_session()
|
||||||
exp.log(all_names[exp_id])
|
exp.save(all_counters=all_counters) #net types reached in the end
|
||||||
exp.log(all_counters[exp_id])
|
# exp.save(all_notable_nets=all_notable_nets)
|
||||||
exp.log('\n')
|
exp.save(all_names=all_names) #experiment setups
|
||||||
|
for exp_id, counter in enumerate(all_counters):
|
||||||
|
exp.log(all_names[exp_id])
|
||||||
|
exp.log(all_counters[exp_id])
|
||||||
|
exp.log('\n')
|
||||||
|
@ -68,7 +68,8 @@ class Soup(object):
|
|||||||
description['counterpart'] = other_particle.get_uid()
|
description['counterpart'] = other_particle.get_uid()
|
||||||
for _ in range(self.params.get('train', 0)):
|
for _ in range(self.params.get('train', 0)):
|
||||||
particle.compiled()
|
particle.compiled()
|
||||||
loss = particle.train(store_states=False) #callbacks on save_state are broken for TrainingNeuralNetwork
|
# callbacks on save_state are broken for TrainingNeuralNetwork
|
||||||
|
loss = particle.train(store_states=False)
|
||||||
description['fitted'] = self.params.get('train', 0)
|
description['fitted'] = self.params.get('train', 0)
|
||||||
description['loss'] = loss
|
description['loss'] = loss
|
||||||
description['action'] = 'train_self'
|
description['action'] = 'train_self'
|
||||||
|
@ -34,6 +34,8 @@ def build_from_soup_or_exp(soup):
|
|||||||
action=[event.get('action', None) for event in particle],
|
action=[event.get('action', None) for event in particle],
|
||||||
counterpart=[event.get('counterpart', None) for event in particle]
|
counterpart=[event.get('counterpart', None) for event in particle]
|
||||||
)
|
)
|
||||||
|
if any([x is not None for x in particle_dict['counterpart']]):
|
||||||
|
print('counterpart')
|
||||||
particle_list.append(particle_dict)
|
particle_list.append(particle_dict)
|
||||||
return particle_list
|
return particle_list
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user