soup trajectory and box plot
This commit is contained in:
parent
1e5bec814d
commit
090546520e
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,12 +0,0 @@
|
||||
ParticleDecorator activiation='linear' use_bias='False'
|
||||
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100}
|
||||
|
||||
|
||||
ParticleDecorator activiation='sigmoid' use_bias='False'
|
||||
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100}
|
||||
|
||||
|
||||
ParticleDecorator activiation='relu' use_bias='False'
|
||||
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100}
|
||||
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,4 +0,0 @@
|
||||
TrainingNeuralNetworkDecorator activiation='sigmoid' use_bias=False
|
||||
{'xs': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'ys': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'zs': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]}
|
||||
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,4 +0,0 @@
|
||||
ParticleDecorator activiation='linear' use_bias=False
|
||||
{'xs': [0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000], 'ys': [0.9, 0.95, 1.0, 0.95, 0.9, 0.95, 0.85, 0.8, 0.85, 0.85, 0.75]}
|
||||
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,4 +0,0 @@
|
||||
TrainingNeuralNetworkDecorator activiation='linear' use_bias=False
|
||||
{'xs': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'ys': [0.4, 0.2, 0.3, 0.2, 0.3, 0.3, 0.5, 0.3, 0.9, 0.6, 0.2], 'zs': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]}
|
||||
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,4 +0,0 @@
|
||||
ParticleDecorator activiation='linear' use_bias=False
|
||||
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 20}
|
||||
|
||||
|
@ -60,7 +60,16 @@ def plot_box(exp: Experiment, filename='histogram_plot'):
|
||||
|
||||
trace_list.extend([vergence_box, fixpoint_box])
|
||||
|
||||
layout = dict(title='{} Histogram Plot'.format('Experiment Name Penis'),
|
||||
layout = dict(title='{}'.format('Known Fixpoint Variation'),
|
||||
titlefont=dict(size=30),
|
||||
legend=dict(
|
||||
orientation="h",
|
||||
x=.1, y=-0.1,
|
||||
font=dict(
|
||||
size=20,
|
||||
color='black'
|
||||
),
|
||||
),
|
||||
boxmode='group',
|
||||
boxgap=0,
|
||||
# barmode='group',
|
||||
@ -71,9 +80,13 @@ def plot_box(exp: Experiment, filename='histogram_plot'):
|
||||
showticklabels=True),
|
||||
yaxis=dict(
|
||||
title='Occurences',
|
||||
zeroline=False)
|
||||
zeroline=False,
|
||||
titlefont=dict(
|
||||
size=30
|
||||
)
|
||||
),
|
||||
# height=400, width=400,
|
||||
# margin=dict(l=20, r=20, t=20, b=20)
|
||||
margin=dict(t=50)
|
||||
)
|
||||
|
||||
fig = go.Figure(data=trace_list, layout=layout)
|
||||
@ -94,8 +107,10 @@ def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_fo
|
||||
|
||||
with open(absolut_file_or_folder, 'rb') as in_f:
|
||||
exp = dill.load(in_f)
|
||||
|
||||
plotting_function(exp, filename='{}.html'.format(absolut_file_or_folder[:-5]))
|
||||
try:
|
||||
plotting_function(exp, filename='{}.html'.format(absolut_file_or_folder[:-5]))
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
else:
|
||||
pass
|
||||
|
@ -171,6 +171,10 @@ class ParticleDecorator:
|
||||
self.__class__.next_uid += 1
|
||||
self.net = net
|
||||
self.states = []
|
||||
self.save_state(time=0,
|
||||
action= 'train_self',
|
||||
counterpart=None
|
||||
)
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self.net, name)
|
||||
|
Binary file not shown.
@ -1 +0,0 @@
|
||||
{'divergent': 0, 'fix_zero': 10, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
|
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
File diff suppressed because one or more lines are too long
30
code/setups/experiments/Known-Fixpoint-Variation/log.txt
Normal file
30
code/setups/experiments/Known-Fixpoint-Variation/log.txt
Normal file
@ -0,0 +1,30 @@
|
||||
variation 10e-0
|
||||
avg time to vergence 3.63
|
||||
avg time as fixpoint 0
|
||||
variation 10e-1
|
||||
avg time to vergence 5.02
|
||||
avg time as fixpoint 0
|
||||
variation 10e-2
|
||||
avg time to vergence 6.46
|
||||
avg time as fixpoint 0
|
||||
variation 10e-3
|
||||
avg time to vergence 8.04
|
||||
avg time as fixpoint 0
|
||||
variation 10e-4
|
||||
avg time to vergence 9.61
|
||||
avg time as fixpoint 0.04
|
||||
variation 10e-5
|
||||
avg time to vergence 11.23
|
||||
avg time as fixpoint 1.38
|
||||
variation 10e-6
|
||||
avg time to vergence 12.99
|
||||
avg time as fixpoint 3.23
|
||||
variation 10e-7
|
||||
avg time to vergence 14.58
|
||||
avg time as fixpoint 4.84
|
||||
variation 10e-8
|
||||
avg time to vergence 21.95
|
||||
avg time as fixpoint 11.91
|
||||
variation 10e-9
|
||||
avg time to vergence 26.45
|
||||
avg time as fixpoint 16.47
|
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
@ -1 +0,0 @@
|
||||
{'divergent': 6, 'fix_zero': 4, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
|
Binary file not shown.
@ -36,11 +36,11 @@ if __name__ == '__main__':
|
||||
exp.trials = 100000
|
||||
exp.epsilon = 1e-4
|
||||
net_generators = []
|
||||
for activation in ['linear', 'sigmoid', 'relu']:
|
||||
for activation in ['linear']:
|
||||
net_generators += [lambda activation=activation: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=False)]
|
||||
net_generators += [lambda activation=activation: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=False)]
|
||||
#net_generators += [lambda activation=activation: FFTNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=False)]
|
||||
net_generators += [lambda activation=activation: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=False)]
|
||||
# net_generators += [lambda activation=activation: FFTNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=False)]
|
||||
# net_generators += [lambda activation=activation: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=False)]
|
||||
all_counters = []
|
||||
all_notable_nets = []
|
||||
all_names = []
|
||||
|
@ -60,8 +60,8 @@ if __name__ == '__main__':
|
||||
net_generators = []
|
||||
for activation in ['linear']: # , 'sigmoid', 'relu']:
|
||||
for use_bias in [False]:
|
||||
# net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
# net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
# net_generators += [lambda activation=activation, use_bias=use_bias: FFTNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
# net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
|
||||
|
@ -19,14 +19,15 @@ if __name__ == '__main__':
|
||||
|
||||
if True:
|
||||
# WeightWise Neural Network
|
||||
with FixpointExperiment() as exp:
|
||||
for run_id in tqdm(range(10)):
|
||||
net = ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2)
|
||||
.with_keras_params(activation='linear'))
|
||||
run_exp(net)
|
||||
K.clear_session()
|
||||
exp.log(exp.counters)
|
||||
exp.save(trajectorys=exp.without_particles())
|
||||
for _ in range(10):
|
||||
with FixpointExperiment() as exp:
|
||||
for run_id in tqdm(range(20)):
|
||||
net = ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2)
|
||||
.with_keras_params(activation='linear'))
|
||||
run_exp(net)
|
||||
K.clear_session()
|
||||
exp.log(exp.counters)
|
||||
exp.save(trajectorys=exp.without_particles())
|
||||
|
||||
if False:
|
||||
# Aggregating Neural Network
|
||||
|
@ -40,8 +40,8 @@ if __name__ == '__main__':
|
||||
for activation in ['linear']: # , 'sigmoid', 'relu']:
|
||||
for use_bias in [False]:
|
||||
net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
# net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
# net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
all_counters = []
|
||||
all_notable_nets = []
|
||||
all_names = []
|
||||
@ -59,7 +59,9 @@ if __name__ == '__main__':
|
||||
all_notable_nets += [notable_nets]
|
||||
all_names += [name]
|
||||
K.clear_session()
|
||||
exp.save(all_counters=all_counters) #net types reached in the end
|
||||
exp.save(all_counters=all_counters)
|
||||
exp.save(trajectorys=exp.without_particles())
|
||||
# net types reached in the end
|
||||
# exp.save(all_notable_nets=all_notable_nets)
|
||||
exp.save(all_names=all_names) #experiment setups
|
||||
for exp_id, counter in enumerate(all_counters):
|
||||
|
@ -50,7 +50,7 @@ def plot_latent_trajectories(soup_or_experiment, filename='latent_trajectory_plo
|
||||
transformer = TSNE()
|
||||
for particle_dict in data_dict:
|
||||
array = np.asarray([np.hstack([x.flatten() for x in timestamp]).flatten()
|
||||
for timestamp in particle_dict['trajectory']])
|
||||
for timestamp in particle_dict['trajectory']])
|
||||
particle_dict['trajectory'] = array
|
||||
transformer.fit(array)
|
||||
|
||||
@ -134,20 +134,20 @@ def plot_latent_trajectories_3D(soup_or_experiment, filename='plot'):
|
||||
mode='lines')
|
||||
|
||||
line_start = go.Scatter3d(mode='markers', x=[transformed[0, 0]], y=[transformed[0, 1]],
|
||||
z=np.asarray(particle_dict['time'][0]),
|
||||
marker=dict(
|
||||
color='rgb(255, 0, 0)',
|
||||
size=4
|
||||
),
|
||||
showlegend=False
|
||||
)
|
||||
z=np.asarray(particle_dict['time'][0]),
|
||||
marker=dict(
|
||||
color='rgb(255, 0, 0)',
|
||||
size=4
|
||||
),
|
||||
showlegend=False
|
||||
)
|
||||
|
||||
line_end = go.Scatter3d(mode='markers', x=[transformed[-1, 0]], y=[transformed[-1, 1]],
|
||||
z=np.asarray(particle_dict['time'][-1]),
|
||||
marker=dict(
|
||||
color='rgb(0, 0, 0)',
|
||||
size=4
|
||||
),
|
||||
z=np.asarray(particle_dict['time'][-1]),
|
||||
marker=dict(
|
||||
color='rgb(0, 0, 0)',
|
||||
size=4
|
||||
),
|
||||
showlegend=False
|
||||
)
|
||||
|
||||
@ -156,14 +156,18 @@ def plot_latent_trajectories_3D(soup_or_experiment, filename='plot'):
|
||||
axis_layout = dict(gridcolor='rgb(255, 255, 255)',
|
||||
zerolinecolor='rgb(255, 255, 255)',
|
||||
showbackground=True,
|
||||
backgroundcolor='rgb(230, 230,230)'
|
||||
backgroundcolor='rgb(230, 230,230)',
|
||||
titlefont=dict(
|
||||
color='black',
|
||||
size=30
|
||||
)
|
||||
)
|
||||
|
||||
layout = go.Layout(scene=dict(
|
||||
# aspectratio=dict(x=2, y=2, z=2),
|
||||
xaxis=dict(tickwidth=1, title='Transformed X', **axis_layout),
|
||||
yaxis=dict(tickwidth=1, title='Transformed Y', **axis_layout),
|
||||
zaxis=dict(tickwidth=1, title='Epoch', **axis_layout)),
|
||||
xaxis=dict(title='Transformed X', **axis_layout),
|
||||
yaxis=dict(title='Transformed Y', **axis_layout),
|
||||
zaxis=dict(title='Epoch', **axis_layout)),
|
||||
# title='{} - Latent Trajectory Movement'.format('Soup'),
|
||||
|
||||
width=1024, height=1024,
|
||||
@ -171,7 +175,7 @@ def plot_latent_trajectories_3D(soup_or_experiment, filename='plot'):
|
||||
)
|
||||
|
||||
fig = go.Figure(data=data, layout=layout)
|
||||
pl.offline.plot(fig, auto_open=True, filename=filename)
|
||||
pl.offline.plot(fig, auto_open=True, filename=filename, validate=True)
|
||||
pass
|
||||
|
||||
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,12 +0,0 @@
|
||||
ParticleDecorator activiation='linear' use_bias='False'
|
||||
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100}
|
||||
|
||||
|
||||
ParticleDecorator activiation='sigmoid' use_bias='False'
|
||||
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100}
|
||||
|
||||
|
||||
ParticleDecorator activiation='relu' use_bias='False'
|
||||
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100}
|
||||
|
||||
|
@ -1,30 +0,0 @@
|
||||
variation 10e-0
|
||||
avg time to vergence 3.65
|
||||
avg time as fixpoint 0
|
||||
variation 10e-1
|
||||
avg time to vergence 5.07
|
||||
avg time as fixpoint 0
|
||||
variation 10e-2
|
||||
avg time to vergence 6.49
|
||||
avg time as fixpoint 0
|
||||
variation 10e-3
|
||||
avg time to vergence 7.97
|
||||
avg time as fixpoint 0
|
||||
variation 10e-4
|
||||
avg time to vergence 9.81
|
||||
avg time as fixpoint 0.04
|
||||
variation 10e-5
|
||||
avg time to vergence 11.4
|
||||
avg time as fixpoint 1.51
|
||||
variation 10e-6
|
||||
avg time to vergence 13.14
|
||||
avg time as fixpoint 3.26
|
||||
variation 10e-7
|
||||
avg time to vergence 14.63
|
||||
avg time as fixpoint 4.95
|
||||
variation 10e-8
|
||||
avg time to vergence 21.35
|
||||
avg time as fixpoint 11.47
|
||||
variation 10e-9
|
||||
avg time to vergence 26.36
|
||||
avg time as fixpoint 16.3
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,4 +0,0 @@
|
||||
TrainingNeuralNetworkDecorator activiation='sigmoid' use_bias=False
|
||||
{'xs': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'ys': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'zs': [0.0, 1.0, 3.4, 7.0, 8.3, 9.3, 9.9, 9.5, 9.7, 9.9, 10.0]}
|
||||
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,4 +0,0 @@
|
||||
TrainingNeuralNetworkDecorator activiation='linear' use_bias=False
|
||||
{'xs': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'ys': [0.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2, 0.0, 0.0], 'zs': [0.0, 0.0, 0.6, 2.2, 3.5, 4.8, 5.6, 7.1, 8.3, 7.5, 9.0]}
|
||||
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,4 +0,0 @@
|
||||
ParticleDecorator activiation='linear' use_bias=False
|
||||
{'xs': [0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000], 'ys': [0.4, 0.45, 0.7, 0.9, 1.0, 0.9, 0.9, 1.0, 0.9, 0.9, 0.9]}
|
||||
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,4 +0,0 @@
|
||||
ParticleDecorator activiation='linear' use_bias=False
|
||||
{'divergent': 0, 'fix_zero': 0, 'fix_other': 19, 'fix_sec': 0, 'other': 1}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user