diff --git a/.gitignore b/.gitignore index e52f977..baba1d1 100644 --- a/.gitignore +++ b/.gitignore @@ -3,9 +3,6 @@ ### Local Datasets ### -code/raw_runs -code/processed_runs -experiments/ ### Data ### diff --git a/code/bar_plot.py b/code/bar_plot.py index ebefa73..f1718f4 100644 --- a/code/bar_plot.py +++ b/code/bar_plot.py @@ -60,52 +60,6 @@ def plot_histogram(bars_dict_list: List[dict], filename='histogram_plot'): pass -def line_plot(line_dict_list, filename='lineplot'): - # lines with standard deviation - # Transform data accordingly and plot it - data = [] - rdylgn = cl.scales['10']['div']['RdYlGn'] - rdylgn_background = [scale + (0.4,) for scale in cl.to_numeric(rdylgn)] - for line_id, line_dict in enumerate(line_dict_list): - name = line_dict.get('name', 'gimme a name') - - upper_bound = go.Scatter( - name='Upper Bound', - x=line_dict['x'], - y=line_dict['upper_y'], - mode='lines', - marker=dict(color="#444"), - line=dict(width=0), - fillcolor=rdylgn_background[line_id], - ) - - trace = go.Scatter( - x=line_dict['x'], - y=line_dict['main_y'], - mode='lines', - name=name, - line=dict(color=line_id), - fillcolor=rdylgn_background[line_id], - fill='tonexty') - - lower_bound = go.Scatter( - name='Lower Bound', - x=line_dict['x'], - y=line_dict['lower_y'], - marker=dict(color="#444"), - line=dict(width=0), - mode='lines') - - data.extend([upper_bound, trace, lower_bound]) - - layout=dict(title='{} Line Plot'.format('Experiment Name Penis'), - height=800, width=800, margin=dict(l=0, r=0, t=0, b=0)) - - fig = go.Figure(data=data, layout=layout) - pl.offline.plot(fig, auto_open=True, filename=filename) - pass - - def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=[]): if os.path.isdir(absolut_file_or_folder): for sub_file_or_folder in os.scandir(absolut_file_or_folder): diff --git a/code/box_plots.py b/code/box_plots.py new file mode 100644 index 0000000..8cf643a --- /dev/null +++ b/code/box_plots.py @@ -0,0 +1,114 @@ +import os + +from experiment import Experiment +# noinspection PyUnresolvedReferences +from soup import Soup +from typing import List + +from collections import defaultdict + +from argparse import ArgumentParser +import numpy as np + +import plotly as pl +import plotly.graph_objs as go + +import colorlover as cl + +import dill + + +def build_args(): + arg_parser = ArgumentParser() + arg_parser.add_argument('-i', '--in_file', nargs=1, type=str) + arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str) + return arg_parser.parse_args() + + +def plot_box(exp: Experiment, filename='histogram_plot'): + # catagorical + ryb = cl.scales['10']['div']['RdYlBu'] + + data = [] + + for d in range(exp.depth): + names = ['D 10e-{}'.format(d)] * exp.trials + data.extend(names) + + trace_list = [] + + vergence_box = go.Box( + y=exp.ys, + x=data, + name='Time to Vergence', + boxpoints=False, + showlegend=True, + marker=dict( + color=ryb[3] + ), + ) + fixpoint_box = go.Box( + y=exp.zs, + x=data, + name='Time as Fixpoint', + boxpoints=False, + showlegend=True, + marker=dict( + color=ryb[-1] + ), + ) + + trace_list.extend([vergence_box, fixpoint_box]) + + layout = dict(title='{} Histogram Plot'.format('Experiment Name Penis'), + boxmode='group', + boxgap=0, + # barmode='group', + bargap=0, + xaxis=dict(showgrid=False, + zeroline=True, + tickangle=0, + showticklabels=True), + yaxis=dict( + title='Occurences', + zeroline=False) + # height=400, width=400, + # margin=dict(l=20, r=20, t=20, b=20) + ) + + fig = go.Figure(data=trace_list, layout=layout) + pl.offline.plot(fig, auto_open=True, filename=filename) + pass + + +def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=[]): + if os.path.isdir(absolut_file_or_folder): + for sub_file_or_folder in os.scandir(absolut_file_or_folder): + search_and_apply(sub_file_or_folder.path, plotting_function, files_to_look_for=files_to_look_for) + elif absolut_file_or_folder.endswith('.dill'): + file_or_folder = os.path.split(absolut_file_or_folder)[-1] + if file_or_folder in files_to_look_for and not os.path.exists('{}.html'.format(file_or_folder[:-5])): + print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__, + file=absolut_file_or_folder) + ) + + with open(absolut_file_or_folder, 'rb') as in_f: + exp = dill.load(in_f) + + plotting_function(exp, filename='{}.html'.format(absolut_file_or_folder[:-5])) + + else: + pass + # This was not a file i should look for. + else: + # This was either another FilyType or Plot.html alerady exists. + pass + + +if __name__ == '__main__': + args = build_args() + in_file = args.in_file[0] + out_file = args.out_file + + search_and_apply(in_file, plot_box, files_to_look_for=['experiment.dill']) + # , 'all_names.dill', 'all_notable_nets.dill']) diff --git a/code/experiments/exp-FixpointExperiment-_6511565650566781-0/experiment.dill b/code/experiments/exp-FixpointExperiment-_6511565650566781-0/experiment.dill new file mode 100644 index 0000000..6e47662 Binary files /dev/null and b/code/experiments/exp-FixpointExperiment-_6511565650566781-0/experiment.dill differ diff --git a/code/experiments/exp-FixpointExperiment-_6511565650566781-0/experiment.html b/code/experiments/exp-FixpointExperiment-_6511565650566781-0/experiment.html new file mode 100644 index 0000000..5c82edd --- /dev/null +++ b/code/experiments/exp-FixpointExperiment-_6511565650566781-0/experiment.html @@ -0,0 +1,7 @@ +
\ No newline at end of file diff --git a/code/experiments/exp-FixpointExperiment-_6511565650566781-0/log.txt b/code/experiments/exp-FixpointExperiment-_6511565650566781-0/log.txt new file mode 100644 index 0000000..a6561c8 --- /dev/null +++ b/code/experiments/exp-FixpointExperiment-_6511565650566781-0/log.txt @@ -0,0 +1 @@ +{'divergent': 38, 'fix_zero': 62, 'fix_other': 0, 'fix_sec': 0, 'other': 0} diff --git a/code/experiments/exp-FixpointExperiment-_6511565800569721-0/experiment.dill b/code/experiments/exp-FixpointExperiment-_6511565800569721-0/experiment.dill new file mode 100644 index 0000000..d6cb497 Binary files /dev/null and b/code/experiments/exp-FixpointExperiment-_6511565800569721-0/experiment.dill differ diff --git a/code/experiments/exp-FixpointExperiment-_6511565800569721-0/experiment.html b/code/experiments/exp-FixpointExperiment-_6511565800569721-0/experiment.html new file mode 100644 index 0000000..64dc661 --- /dev/null +++ b/code/experiments/exp-FixpointExperiment-_6511565800569721-0/experiment.html @@ -0,0 +1,7 @@ +
\ No newline at end of file diff --git a/code/experiments/exp-FixpointExperiment-_6511565800569721-0/log.txt b/code/experiments/exp-FixpointExperiment-_6511565800569721-0/log.txt new file mode 100644 index 0000000..ade4520 --- /dev/null +++ b/code/experiments/exp-FixpointExperiment-_6511565800569721-0/log.txt @@ -0,0 +1 @@ +{'divergent': 0, 'fix_zero': 100, 'fix_other': 0, 'fix_sec': 0, 'other': 0} diff --git a/code/experiments/exp-FixpointExperiment-_6511565864900101-0/experiment.dill b/code/experiments/exp-FixpointExperiment-_6511565864900101-0/experiment.dill new file mode 100644 index 0000000..d20c644 Binary files /dev/null and b/code/experiments/exp-FixpointExperiment-_6511565864900101-0/experiment.dill differ diff --git a/code/experiments/exp-FixpointExperiment-_6511565864900101-0/experiment.html b/code/experiments/exp-FixpointExperiment-_6511565864900101-0/experiment.html new file mode 100644 index 0000000..bab412e --- /dev/null +++ b/code/experiments/exp-FixpointExperiment-_6511565864900101-0/experiment.html @@ -0,0 +1,7 @@ +
\ No newline at end of file diff --git a/code/experiments/exp-FixpointExperiment-_6511565864900101-0/log.txt b/code/experiments/exp-FixpointExperiment-_6511565864900101-0/log.txt new file mode 100644 index 0000000..e69de29 diff --git a/code/experiments/exp-FixpointExperiment-_813945717034465-0/experiment.dill b/code/experiments/exp-FixpointExperiment-_813945717034465-0/experiment.dill new file mode 100644 index 0000000..d0f1e6f Binary files /dev/null and b/code/experiments/exp-FixpointExperiment-_813945717034465-0/experiment.dill differ diff --git a/code/experiments/exp-FixpointExperiment-_813945717034465-0/experiment.html b/code/experiments/exp-FixpointExperiment-_813945717034465-0/experiment.html new file mode 100644 index 0000000..4cf8e46 --- /dev/null +++ b/code/experiments/exp-FixpointExperiment-_813945717034465-0/experiment.html @@ -0,0 +1,7 @@ +
\ No newline at end of file diff --git a/code/experiments/exp-FixpointExperiment-_813945717034465-0/log.txt b/code/experiments/exp-FixpointExperiment-_813945717034465-0/log.txt new file mode 100644 index 0000000..9db47d7 --- /dev/null +++ b/code/experiments/exp-FixpointExperiment-_813945717034465-0/log.txt @@ -0,0 +1 @@ +{'divergent': 3, 'fix_zero': 97, 'fix_other': 0, 'fix_sec': 0, 'other': 0} diff --git a/code/network.py b/code/network.py index 9e735e7..c5c881a 100644 --- a/code/network.py +++ b/code/network.py @@ -180,7 +180,7 @@ class ParticleDecorator: def make_state(self, **kwargs): weights = self.net.get_weights_flat() - if any(np.isinf(weights)): + if any(np.isinf(weights)) or any(np.isnan(weights)): return None state = {'class': self.net.__class__.__name__, 'weights': weights} state.update(kwargs) @@ -609,8 +609,8 @@ class TrainingNeuralNetworkDecorator(): def train(self, batchsize=1, store_states=True, epoch=0): self.compiled() x, y = self.net.compute_samples() - savestatecallback = SaveStateCallback(net=self, epoch=epoch) if store_states else None - history = self.net.model.fit(x=x, y=y, verbose=0, batch_size=batchsize, callbacks=[savestatecallback] if store_states else None, initial_epoch=epoch) + savestatecallback = [SaveStateCallback(net=self, epoch=epoch)] if store_states else None + history = self.net.model.fit(x=x, y=y, epochs=epoch+1, verbose=0, batch_size=batchsize, callbacks=savestatecallback, initial_epoch=epoch) return history.history['loss'][-1] def learn_from(self, other_network, batchsize=1): @@ -628,36 +628,37 @@ if __name__ == '__main__': exp.run_net(net, 100, run_id=run_id + 1) exp.historical_particles[run_id] = net if prints: - # print(net.apply_to_network(net)) print("Fixpoint? " + str(net.is_fixpoint())) print("Loss " + str(loss)) - K.clear_session() - if False: + if True: # WeightWise Neural Network with FixpointExperiment() as exp: for run_id in tqdm(range(100)): net = ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2) \ .with_keras_params(activation='linear')) run_exp(net) + K.clear_session() exp.log(exp.counters) - if False: + if True: # Aggregating Neural Network with FixpointExperiment() as exp: for run_id in tqdm(range(100)): net = ParticleDecorator(AggregatingNeuralNetwork(aggregates=4, width=2, depth=2) \ .with_keras_params()) run_exp(net) + K.clear_session() exp.log(exp.counters) - if False: + if True: #FFT Neural Network with FixpointExperiment() as exp: for run_id in tqdm(range(100)): net = ParticleDecorator(FFTNeuralNetwork(aggregates=4, width=2, depth=2) \ .with_keras_params(activation='linear')) run_exp(net) + K.clear_session() exp.log(exp.counters) if True: @@ -665,13 +666,14 @@ if __name__ == '__main__': with FixpointExperiment() as exp: for i in range(1): run_count = 1000 - net = ParticleDecorator(TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(width=2, depth=2))) + net = TrainingNeuralNetworkDecorator(ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2))) net.with_params(epsilon=0.0001).with_keras_params(optimizer='sgd') for run_id in tqdm(range(run_count+1)): net.compiled() loss = net.train(epoch=run_id) if run_id % 100 == 0: run_exp(net) + K.clear_session() if False: with FixpointExperiment() as exp: @@ -689,6 +691,7 @@ if __name__ == '__main__': print("Fixpoint after Agg? " + str(fp)) print("Loss " + str(loss)) print() + if False: # this explodes in our faces completely... NAN everywhere # TODO: Wtf is happening here? diff --git a/code/setups/experiments/exp-fixpoint-density-_6511547300443771-0/all_counters.dill b/code/setups/experiments/exp-fixpoint-density-_6511547300443771-0/all_counters.dill new file mode 100644 index 0000000..2737dd3 Binary files /dev/null and b/code/setups/experiments/exp-fixpoint-density-_6511547300443771-0/all_counters.dill differ diff --git a/code/setups/experiments/exp-fixpoint-density-_6511547300443771-0/all_names.dill b/code/setups/experiments/exp-fixpoint-density-_6511547300443771-0/all_names.dill new file mode 100644 index 0000000..fae31be Binary files /dev/null and b/code/setups/experiments/exp-fixpoint-density-_6511547300443771-0/all_names.dill differ diff --git a/code/setups/experiments/exp-fixpoint-density-_6511547300443771-0/all_notable_nets.dill b/code/setups/experiments/exp-fixpoint-density-_6511547300443771-0/all_notable_nets.dill new file mode 100644 index 0000000..29f9ce6 Binary files /dev/null and b/code/setups/experiments/exp-fixpoint-density-_6511547300443771-0/all_notable_nets.dill differ diff --git a/code/setups/experiments/exp-fixpoint-density-_6511547300443771-0/experiment.dill b/code/setups/experiments/exp-fixpoint-density-_6511547300443771-0/experiment.dill new file mode 100644 index 0000000..3a1d826 Binary files /dev/null and b/code/setups/experiments/exp-fixpoint-density-_6511547300443771-0/experiment.dill differ diff --git a/code/setups/experiments/exp-fixpoint-density-_6511547300443771-0/log.txt b/code/setups/experiments/exp-fixpoint-density-_6511547300443771-0/log.txt new file mode 100644 index 0000000..38c9f9f --- /dev/null +++ b/code/setups/experiments/exp-fixpoint-density-_6511547300443771-0/log.txt @@ -0,0 +1,36 @@ +ParticleDecorator activiation='linear' use_bias='False' +{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100} + + +ParticleDecorator activiation='linear' use_bias='False' +{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100} + + +ParticleDecorator activiation='linear' use_bias='False' +{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100} + + +ParticleDecorator activiation='sigmoid' use_bias='False' +{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100} + + +ParticleDecorator activiation='sigmoid' use_bias='False' +{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100} + + +ParticleDecorator activiation='sigmoid' use_bias='False' +{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100} + + +ParticleDecorator activiation='relu' use_bias='False' +{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100} + + +ParticleDecorator activiation='relu' use_bias='False' +{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100} + + +ParticleDecorator activiation='relu' use_bias='False' +{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100} + + diff --git a/code/setups/experiments/exp-known-fixpoint-variation-_813943796847257-0/experiment.dill b/code/setups/experiments/exp-known-fixpoint-variation-_813943796847257-0/experiment.dill new file mode 100644 index 0000000..c6a0f35 Binary files /dev/null and b/code/setups/experiments/exp-known-fixpoint-variation-_813943796847257-0/experiment.dill differ diff --git a/code/setups/experiments/exp-known-fixpoint-variation-_813943796847257-0/experiment.html b/code/setups/experiments/exp-known-fixpoint-variation-_813943796847257-0/experiment.html new file mode 100644 index 0000000..05a41e6 --- /dev/null +++ b/code/setups/experiments/exp-known-fixpoint-variation-_813943796847257-0/experiment.html @@ -0,0 +1,7 @@ +
\ No newline at end of file diff --git a/code/setups/experiments/exp-known-fixpoint-variation-_813943796847257-0/log.txt b/code/setups/experiments/exp-known-fixpoint-variation-_813943796847257-0/log.txt new file mode 100644 index 0000000..e9917e1 --- /dev/null +++ b/code/setups/experiments/exp-known-fixpoint-variation-_813943796847257-0/log.txt @@ -0,0 +1,30 @@ +variation 10e-0 +avg time to vergence 3.72 +avg time as fixpoint 0 +variation 10e-1 +avg time to vergence 5.13 +avg time as fixpoint 0 +variation 10e-2 +avg time to vergence 6.53 +avg time as fixpoint 0 +variation 10e-3 +avg time to vergence 8.09 +avg time as fixpoint 0 +variation 10e-4 +avg time to vergence 9.81 +avg time as fixpoint 0.06 +variation 10e-5 +avg time to vergence 11.43 +avg time as fixpoint 1.51 +variation 10e-6 +avg time to vergence 13.15 +avg time as fixpoint 3.34 +variation 10e-7 +avg time to vergence 14.57 +avg time as fixpoint 4.79 +variation 10e-8 +avg time to vergence 22.41 +avg time as fixpoint 12.37 +variation 10e-9 +avg time to vergence 26.17 +avg time as fixpoint 16.11 diff --git a/code/setups/experiments/exp-training_fixpoint-_813946210831437-0/all_counters.dill b/code/setups/experiments/exp-training_fixpoint-_813946210831437-0/all_counters.dill new file mode 100644 index 0000000..5213178 Binary files /dev/null and b/code/setups/experiments/exp-training_fixpoint-_813946210831437-0/all_counters.dill differ diff --git a/code/setups/experiments/exp-training_fixpoint-_813946210831437-0/all_names.dill b/code/setups/experiments/exp-training_fixpoint-_813946210831437-0/all_names.dill new file mode 100644 index 0000000..3cda739 Binary files /dev/null and b/code/setups/experiments/exp-training_fixpoint-_813946210831437-0/all_names.dill differ diff --git a/code/setups/experiments/exp-training_fixpoint-_813946210831437-0/experiment.dill b/code/setups/experiments/exp-training_fixpoint-_813946210831437-0/experiment.dill new file mode 100644 index 0000000..3d89df2 Binary files /dev/null and b/code/setups/experiments/exp-training_fixpoint-_813946210831437-0/experiment.dill differ diff --git a/code/setups/experiments/exp-training_fixpoint-_813946210831437-0/log.txt b/code/setups/experiments/exp-training_fixpoint-_813946210831437-0/log.txt new file mode 100644 index 0000000..17413f9 --- /dev/null +++ b/code/setups/experiments/exp-training_fixpoint-_813946210831437-0/log.txt @@ -0,0 +1,8 @@ +ParticleDecorator activiation='linear' use_bias=False +{'divergent': 0, 'fix_zero': 0, 'fix_other': 19, 'fix_sec': 0, 'other': 1} + + +ParticleDecorator activiation='linear' use_bias=False +{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 20} + + diff --git a/code/setups/fixpoint-density.py b/code/setups/fixpoint-density.py index 8331cd5..2f1aa4c 100644 --- a/code/setups/fixpoint-density.py +++ b/code/setups/fixpoint-density.py @@ -28,34 +28,38 @@ def count(counters, net, notable_nets=[]): counters['other'] += 1 return counters, notable_nets -with Experiment('fixpoint-density') as exp: - exp.trials = 100 - exp.epsilon = 1e-4 - net_generators = [] - for activation in ['linear', 'sigmoid', 'relu']: - net_generators += [lambda activation=activation: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=False)] - net_generators += [lambda activation=activation: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=False)] - net_generators += [lambda activation=activation: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=False)] - all_counters = [] - all_notable_nets = [] - all_names = [] - for net_generator_id, net_generator in enumerate(net_generators): - counters = generate_counters() - notable_nets = [] - for _ in tqdm(range(exp.trials)): - net = net_generator().with_params(epsilon=exp.epsilon) - name = str(net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias='" + str(net.get_keras_params().get('use_bias')) + "'" - count(counters, net, notable_nets) - keras.backend.clear_session() - all_counters += [counters] - all_notable_nets += [notable_nets] - all_names += [name] - exp.save(all_counters=all_counters) - exp.save(all_notable_nets=all_notable_nets) - exp.save(all_names=all_names) - for exp_id, counter in enumerate(all_counters): - exp.log(all_names[exp_id]) - exp.log(all_counters[exp_id]) - exp.log('\n') -print('Done') +if __name__ == '__main__': + with Experiment('fixpoint-density') as exp: + exp.trials = 100 + exp.epsilon = 1e-4 + net_generators = [] + for activation in ['linear', 'sigmoid', 'relu']: + net_generators += [lambda activation=activation: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=False)] + net_generators += [lambda activation=activation: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=False)] + net_generators += [lambda activation=activation: FFTNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=False)] + # net_generators += [lambda activation=activation: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=False)] + all_counters = [] + all_notable_nets = [] + all_names = [] + for net_generator_id, net_generator in enumerate(net_generators): + counters = generate_counters() + notable_nets = [] + for _ in tqdm(range(exp.trials)): + net = net_generator().with_params(epsilon=exp.epsilon) + net = ParticleDecorator(net) + name = str(net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias='" + str(net.get_keras_params().get('use_bias')) + "'" + count(counters, net, notable_nets) + keras.backend.clear_session() + all_counters += [counters] + all_notable_nets += [notable_nets] + all_names += [name] + exp.save(all_counters=all_counters) + exp.save(all_notable_nets=all_notable_nets) + exp.save(all_names=all_names) + for exp_id, counter in enumerate(all_counters): + exp.log(all_names[exp_id]) + exp.log(all_counters[exp_id]) + exp.log('\n') + + print('Done') diff --git a/code/setups/known-fixpoint-variation.py b/code/setups/known-fixpoint-variation.py index dd1f4b9..7f51e3f 100644 --- a/code/setups/known-fixpoint-variation.py +++ b/code/setups/known-fixpoint-variation.py @@ -5,7 +5,6 @@ import os # Concat top Level dir to system environmental variables sys.path += os.path.join('..', '.') - from util import * from experiment import * from network import * @@ -16,19 +15,22 @@ import keras.backend from statistics import mean avg = mean - + + def generate_fixpoint_weights(): return [ np.array([[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], dtype=np.float32), np.array([[1.0, 0.0], [0.0, 0.0]], dtype=np.float32), np.array([[1.0], [0.0]], dtype=np.float32) ] - + + def generate_fixpoint_net(): net = WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='sigmoid') net.set_weights(generate_fixpoint_weights()) return net + def vary(old_weights, e=1.0): new_weights = copy.deepcopy(old_weights) for layer_id, layer in enumerate(new_weights): @@ -40,45 +42,49 @@ def vary(old_weights, e=1.0): new_weights[layer_id][cell_id][weight_id] = weight - prng() * e return new_weights -with Experiment('known-fixpoint-variation') as exp: - exp.depth = 10 - exp.trials = 100 - exp.max_steps = 100 - exp.epsilon = 1e-4 - exp.xs = [] - exp.ys = [] - exp.zs = [] - exp.notable_nets = [] - current_scale = 1.0 - for _ in range(exp.depth): - print('variation scale ' + str(current_scale)) - for _ in tqdm(range(exp.trials)): - net = generate_fixpoint_net().with_params(epsilon=exp.epsilon) - net.set_weights(vary(net.get_weights(), current_scale)) - time_to_something = 0 - time_as_fixpoint = 0 - still_fixpoint = True - for _ in range(exp.max_steps): - net.self_attack() - if net.is_zero() or net.is_diverged(): - break - if net.is_fixpoint(): - if still_fixpoint: - time_as_fixpoint += 1 + +if __name__ == '__main__': + with Experiment('known-fixpoint-variation') as exp: + exp.depth = 10 + exp.trials = 100 + exp.max_steps = 100 + exp.epsilon = 1e-4 + exp.xs = [] + exp.ys = [] + exp.zs = [] + exp.notable_nets = [] + current_scale = 1.0 + for _ in range(exp.depth): + print('variation scale ' + str(current_scale)) + for _ in tqdm(range(exp.trials)): + net = generate_fixpoint_net().with_params(epsilon=exp.epsilon) + net = ParticleDecorator(net) + net.set_weights(vary(net.get_weights(), current_scale)) + time_to_something = 0 + time_as_fixpoint = 0 + still_fixpoint = True + for _ in range(exp.max_steps): + net.self_attack() + if net.is_zero() or net.is_diverged(): + break + if net.is_fixpoint(): + if still_fixpoint: + time_as_fixpoint += 1 + else: + print('remarkable') + exp.notable_nets += [net.get_weights()] + still_fixpoint = True else: - print('remarkable') - exp.notable_nets += [net.get_weights()] - still_fixpoint = True - else: - still_fixpoint = False - time_to_something += 1 - exp.xs += [current_scale] - exp.ys += [time_to_something] #time steps taken to reach divergence or zero (reaching another fix-point is basically never happening) - exp.zs += [time_as_fixpoint] #time steps still regarded as sthe initial fix-point - keras.backend.clear_session() - current_scale /= 10.0 - for d in range(exp.depth): - exp.log('variation 10e-' + str(d)) - exp.log('avg time to vergence ' + str(avg(exp.ys[d*exp.trials:(d+1)*exp.trials]))) - exp.log('avg time as fixpoint ' + str(avg(exp.zs[d*exp.trials:(d+1)*exp.trials]))) - + still_fixpoint = False + time_to_something += 1 + exp.xs += [current_scale] + # time steps taken to reach divergence or zero (reaching another fix-point is basically never happening) + exp.ys += [time_to_something] + # time steps still regarded as sthe initial fix-point + exp.zs += [time_as_fixpoint] + keras.backend.clear_session() + current_scale /= 10.0 + for d in range(exp.depth): + exp.log('variation 10e-' + str(d)) + exp.log('avg time to vergence ' + str(avg(exp.ys[d*exp.trials:(d+1) * exp.trials]))) + exp.log('avg time as fixpoint ' + str(avg(exp.zs[d*exp.trials:(d+1) * exp.trials]))) diff --git a/code/setups/mixed-self-fixpoints.py b/code/setups/mixed-self-fixpoints.py index d956bb0..c8564fb 100644 --- a/code/setups/mixed-self-fixpoints.py +++ b/code/setups/mixed-self-fixpoints.py @@ -3,6 +3,9 @@ import os from typing import Tuple +# Concat top Level dir to system environmental variables +sys.path += os.path.join('..', '.') + from util import * from experiment import * from network import * @@ -10,10 +13,6 @@ from network import * import keras.backend -# Concat top Level dir to system environmental variables -sys.path += os.path.join('..', '.') - - def generate_counters(): """ Initial build of the counter dict, to store counts. @@ -51,46 +50,52 @@ def count(counters, net, notable_nets=[]): counters['other'] += 1 return counters, notable_nets +if __name__ == '__main__': -with Experiment('mixed-self-fixpoints') as exp: - exp.trials = 20 - exp.selfattacks = 4 - exp.trains_per_selfattack_values = [100 * i for i in range(11)] - exp.epsilon = 1e-4 - net_generators = [] - for activation in ['linear', 'sigmoid', 'relu']: - for use_bias in [False]: - net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] - # net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] - # net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] + with Experiment('mixed-self-fixpoints') as exp: + exp.trials = 20 + exp.selfattacks = 4 + exp.trains_per_selfattack_values = [100 * i for i in range(11)] + exp.epsilon = 1e-4 + net_generators = [] + for activation in ['linear']: # , 'sigmoid', 'relu']: + for use_bias in [False]: + net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] + net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] + # net_generators += [lambda activation=activation, use_bias=use_bias: FFTNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] + # net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] - all_names = [] - all_data = [] - for net_generator_id, net_generator in enumerate(net_generators): - xs = [] - ys = [] - for trains_per_selfattack in exp.trains_per_selfattack_values: - counters = generate_counters() - notable_nets = [] - for _ in tqdm(range(exp.trials)): - net = TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon) - name = str(net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias')) - for selfattack_id in range(exp.selfattacks): - net.self_attack() - for train_id in range(trains_per_selfattack): - loss = net.compiled().train(epoch=selfattack_id*trains_per_selfattack+train_id) - if net.is_diverged() or net.is_fixpoint(): - break - count(counters, net, notable_nets) - keras.backend.clear_session() - xs += [trains_per_selfattack] - ys += [float(counters['fix_zero'] + counters['fix_other']) / float(exp.trials)] - all_names += [name] - all_data += [{'xs':xs, 'ys':ys}] #xs: how many trains per self-attack from exp.trains_per_selfattack_values, ys: average amount of fixpoints found + all_names = [] + all_data = [] - exp.save(all_names=all_names) - exp.save(all_data=all_data) - for exp_id, name in enumerate(all_names): - exp.log(all_names[exp_id]) - exp.log(all_data[exp_id]) - exp.log('\n') + for net_generator_id, net_generator in enumerate(net_generators): + xs = [] + ys = [] + for trains_per_selfattack in exp.trains_per_selfattack_values: + counters = generate_counters() + notable_nets = [] + for _ in tqdm(range(exp.trials)): + net = ParticleDecorator(net_generator()) + net = TrainingNeuralNetworkDecorator(net).with_params(epsilon=exp.epsilon) + name = str(net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias')) + for selfattack_id in range(exp.selfattacks): + net.self_attack() + for train_id in range(trains_per_selfattack): + loss = net.compiled().train(epoch=selfattack_id*trains_per_selfattack+train_id) + if net.is_diverged() or net.is_fixpoint(): + break + count(counters, net, notable_nets) + keras.backend.clear_session() + xs += [trains_per_selfattack] + ys += [float(counters['fix_zero'] + counters['fix_other']) / float(exp.trials)] + all_names += [name] + # xs: how many trains per self-attack from exp.trains_per_selfattack_values + # ys: average amount of fixpoints found + all_data += [{'xs': xs, 'ys': ys}] + + exp.save(all_names=all_names) + exp.save(all_data=all_data) + for exp_id, name in enumerate(all_names): + exp.log(all_names[exp_id]) + exp.log(all_data[exp_id]) + exp.log('\n') diff --git a/code/setups/training-fixpoints.py b/code/setups/training-fixpoints.py index 9deb65c..c44bc80 100644 --- a/code/setups/training-fixpoints.py +++ b/code/setups/training-fixpoints.py @@ -8,7 +8,7 @@ from util import * from experiment import * from network import * -import keras.backend +import keras.backend as K def generate_counters(): return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0} @@ -29,36 +29,40 @@ def count(counters, net, notable_nets=[]): counters['other'] += 1 return counters, notable_nets -with Experiment('training_fixpoint') as exp: - exp.trials = 5 - exp.run_count = 500 - exp.epsilon = 1e-4 - net_generators = [] - for activation in ['linear', 'sigmoid', 'relu']: - for use_bias in [False]: - net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] - net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] - net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] - all_counters = [] - all_notable_nets = [] - all_names = [] - for net_generator_id, net_generator in enumerate(net_generators): - counters = generate_counters() - notable_nets = [] - for _ in tqdm(range(exp.trials)): - net = TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon) - name = str(net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias')) - for run_id in range(exp.run_count): - loss = net.compiled().train(epoch=run_id+1) - count(counters, net, notable_nets) - keras.backend.clear_session() - all_counters += [counters] - all_notable_nets += [notable_nets] - all_names += [name] - exp.save(all_counters=all_counters) #net types reached in the end - exp.save(all_notable_nets=all_notable_nets) - exp.save(all_names=all_names) #experiment setups - for exp_id, counter in enumerate(all_counters): - exp.log(all_names[exp_id]) - exp.log(all_counters[exp_id]) - exp.log('\n') \ No newline at end of file + +if __name__ == '__main__': + + with Experiment('training_fixpoint') as exp: + exp.trials = 20 + exp.run_count = 500 + exp.epsilon = 1e-4 + net_generators = [] + for activation in ['linear']: # , 'sigmoid', 'relu']: + for use_bias in [False]: + net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] + net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] + # net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] + all_counters = [] + all_notable_nets = [] + all_names = [] + for net_generator_id, net_generator in enumerate(net_generators): + counters = generate_counters() + notable_nets = [] + for _ in tqdm(range(exp.trials)): + net = ParticleDecorator(net_generator()) + net = TrainingNeuralNetworkDecorator(net).with_params(epsilon=exp.epsilon) + name = str(net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias')) + for run_id in range(exp.run_count): + loss = net.compiled().train(epoch=run_id+1) + count(counters, net, notable_nets) + all_counters += [counters] + all_notable_nets += [notable_nets] + all_names += [name] + K.clear_session() + exp.save(all_counters=all_counters) #net types reached in the end + # exp.save(all_notable_nets=all_notable_nets) + exp.save(all_names=all_names) #experiment setups + for exp_id, counter in enumerate(all_counters): + exp.log(all_names[exp_id]) + exp.log(all_counters[exp_id]) + exp.log('\n') diff --git a/code/soup.py b/code/soup.py index 4185d49..d016601 100644 --- a/code/soup.py +++ b/code/soup.py @@ -68,7 +68,8 @@ class Soup(object): description['counterpart'] = other_particle.get_uid() for _ in range(self.params.get('train', 0)): particle.compiled() - loss = particle.train(store_states=False) #callbacks on save_state are broken for TrainingNeuralNetwork + # callbacks on save_state are broken for TrainingNeuralNetwork + loss = particle.train(store_states=False) description['fitted'] = self.params.get('train', 0) description['loss'] = loss description['action'] = 'train_self' diff --git a/code/visualization.py b/code/visualization.py index 7f23bd7..4327a9d 100644 --- a/code/visualization.py +++ b/code/visualization.py @@ -34,6 +34,8 @@ def build_from_soup_or_exp(soup): action=[event.get('action', None) for event in particle], counterpart=[event.get('counterpart', None) for event in particle] ) + if any([x is not None for x in particle_dict['counterpart']]): + print('counterpart') particle_list.append(particle_dict) return particle_list