bar plots
This commit is contained in:
		| @@ -44,8 +44,9 @@ def plot_bars(names_bars_tuple, filename='histogram_plot'): | ||||
|         ) | ||||
|         data.append(bar) | ||||
|  | ||||
|     layout = dict(title='{} Histogram Plot'.format('Experiment Name Penis'), | ||||
|                   barmode='stack' | ||||
|     layout = dict(title='{} Histogram Plot'.format('Learn Severity'), | ||||
|                   xaxis=dict(title="Learn Severity", tilefont=dict(size=25)), | ||||
|                   # barmode='stack' | ||||
|                   # height=400, width=400, | ||||
|                   # margin=dict(l=20, r=20, t=20, b=20) | ||||
|                   ) | ||||
| @@ -88,5 +89,5 @@ if __name__ == '__main__': | ||||
|     in_file = args.in_file[0] | ||||
|     out_file = args.out_file | ||||
|  | ||||
|     search_and_apply(in_file, plot_bars, files_to_look_for=['all_counters.dill']) | ||||
|     search_and_apply(in_file, plot_bars, files_to_look_for=['all_data.dill']) | ||||
|     # , 'all_names.dill', 'all_notable_nets.dill']) | ||||
|   | ||||
| @@ -62,7 +62,8 @@ class Experiment: | ||||
| class FixpointExperiment(Experiment): | ||||
|  | ||||
|     def __init__(self, **kwargs): | ||||
|         super().__init__(name=self.__class__.__name__, **kwargs) | ||||
|         kwargs['name'] =  self.__class__.__name__ if 'name' not in kwargs else kwargs['name'] | ||||
|         super().__init__(**kwargs) | ||||
|         self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0) | ||||
|         self.interesting_fixpoints = [] | ||||
|  | ||||
|   | ||||
							
								
								
									
										90
									
								
								code/line_plots.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										90
									
								
								code/line_plots.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,90 @@ | ||||
| import os | ||||
|  | ||||
| from experiment import Experiment | ||||
| # noinspection PyUnresolvedReferences | ||||
| from soup import Soup | ||||
|  | ||||
| from argparse import ArgumentParser | ||||
| import numpy as np | ||||
|  | ||||
| import plotly as pl | ||||
| import plotly.graph_objs as go | ||||
|  | ||||
| import colorlover as cl | ||||
|  | ||||
| import dill | ||||
|  | ||||
| from sklearn.manifold.t_sne import TSNE, PCA | ||||
|  | ||||
|  | ||||
| def build_args(): | ||||
|     arg_parser = ArgumentParser() | ||||
|     arg_parser.add_argument('-i', '--in_file', nargs=1, type=str) | ||||
|     arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str) | ||||
|     return arg_parser.parse_args() | ||||
|  | ||||
|  | ||||
| def line_plot(names_exp_tuple, filename='lineplot'): | ||||
|  | ||||
|     names, line_dict_list = names_exp_tuple | ||||
|  | ||||
|     names = "Weightwise Neural Network" | ||||
|  | ||||
|     data = [] | ||||
|     base_scale = cl.scales['10']['div']['RdYlGn'] | ||||
|     scale = cl.interp(base_scale, len(line_dict_list) + 1)  # Map color scale to N bins | ||||
|     for ld_id, line_dict in enumerate(line_dict_list): | ||||
|         trace = go.Scatter( | ||||
|             x=line_dict['xs'], | ||||
|             y=line_dict['ys'], | ||||
|             # mode='lines', | ||||
|             name=names, | ||||
|             line=dict(color=scale[ld_id]), | ||||
|         ) | ||||
|  | ||||
|         data.append(trace) | ||||
|  | ||||
|     layout = dict(title='{} Weight Wise Mixed '.format(''), | ||||
|                   # height=800, width=800, margin=dict(l=0, r=0, t=0, b=0) | ||||
|                   ) | ||||
|  | ||||
|     fig = go.Figure(data=data, layout=layout) | ||||
|     pl.offline.plot(fig, auto_open=True, filename=filename) | ||||
|     pass | ||||
|  | ||||
|  | ||||
| def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=[]): | ||||
|     if os.path.isdir(absolut_file_or_folder): | ||||
|         for sub_file_or_folder in os.scandir(absolut_file_or_folder): | ||||
|             search_and_apply(sub_file_or_folder.path, plotting_function, files_to_look_for=files_to_look_for) | ||||
|     elif absolut_file_or_folder.endswith('.dill'): | ||||
|         file_or_folder = os.path.split(absolut_file_or_folder)[-1] | ||||
|         if file_or_folder in files_to_look_for and not os.path.exists('{}.html'.format(absolut_file_or_folder[:-5])): | ||||
|             print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__, | ||||
|                                                                              file=absolut_file_or_folder) | ||||
|                   ) | ||||
|             with open(absolut_file_or_folder, 'rb') as in_f: | ||||
|                 exp = dill.load(in_f) | ||||
|  | ||||
|             names_dill_location = os.path.join(*os.path.split(absolut_file_or_folder)[:-1], 'all_names.dill') | ||||
|             with open(names_dill_location, 'rb') as in_f: | ||||
|                 names = dill.load(in_f) | ||||
|  | ||||
|             try: | ||||
|                 plotting_function((names, exp), filename='{}.html'.format(absolut_file_or_folder[:-5])) | ||||
|             except ValueError: | ||||
|                 pass | ||||
|             except AttributeError: | ||||
|                 pass | ||||
|         else: | ||||
|             # This was either another FilyType or Plot.html alerady exists. | ||||
|             pass | ||||
|  | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|     args = build_args() | ||||
|     in_file = args.in_file[0] | ||||
|     out_file = args.out_file | ||||
|  | ||||
|     search_and_apply(in_file, line_plot, ["all_data.dill"]) | ||||
|  | ||||
| @@ -172,7 +172,7 @@ class ParticleDecorator: | ||||
|         self.net = net | ||||
|         self.states = [] | ||||
|         self.save_state(time=0, | ||||
|                         action= 'train_self', | ||||
|                         action='init', | ||||
|                         counterpart=None | ||||
|         ) | ||||
|  | ||||
|   | ||||
										
											Binary file not shown.
										
									
								
							| @@ -1 +0,0 @@ | ||||
| {'divergent': 0, 'fix_zero': 0, 'fix_other': 13, 'fix_sec': 0, 'other': 7} | ||||
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							| @@ -0,0 +1,4 @@ | ||||
| TrainingNeuralNetworkDecorator activiation='linear' use_bias=False | ||||
| {'xs': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'ys': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'zs': [0.0, 1.2, 5.2, 7.4, 8.1, 9.1, 9.6, 9.8, 10.0, 9.9, 9.9]} | ||||
|  | ||||
|  | ||||
										
											Binary file not shown.
										
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
										
											Binary file not shown.
										
									
								
							| After Width: | Height: | Size: 207 KiB | 
										
											Binary file not shown.
										
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							| @@ -0,0 +1,4 @@ | ||||
| TrainingNeuralNetworkDecorator activiation='linear' use_bias=False | ||||
| {'xs': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'ys': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'zs': [0.0, 0.0, 0.6, 1.8, 2.7, 5.1, 5.8, 7.8, 8.5, 9.0, 8.8]} | ||||
|  | ||||
|  | ||||
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							| @@ -0,0 +1,12 @@ | ||||
| WeightwiseNeuralNetwork activiation='linear' use_bias=False | ||||
| {'xs': [0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500], 'ys': [0.2, 0.3, 0.15, 0.55, 0.7, 0.85, 0.8, 0.95, 0.9, 1.0, 1.0]} | ||||
|  | ||||
|  | ||||
| AggregatingNeuralNetwork activiation='linear' use_bias=False | ||||
| {'xs': [0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500], 'ys': [1.0, 0.95, 1.0, 1.0, 0.95, 0.9, 0.8, 1.0, 0.85, 1.0, 0.9]} | ||||
|  | ||||
|  | ||||
| RecurrentNeuralNetwork activiation='linear' use_bias=False | ||||
| {'xs': [0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500], 'ys': [0.05, 0.0, 0.05, 0.0, 0.0, 0.1, 0.1, 0.05, 0.1, 0.0, 0.0]} | ||||
|  | ||||
|  | ||||
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							| @@ -0,0 +1,12 @@ | ||||
| WeightwiseNeuralNetwork activiation='linear' use_bias=False | ||||
| {'divergent': 0, 'fix_zero': 0, 'fix_other': 50, 'fix_sec': 0, 'other': 0} | ||||
|  | ||||
|  | ||||
| AggregatingNeuralNetwork activiation='linear' use_bias=False | ||||
| {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 50} | ||||
|  | ||||
|  | ||||
| RecurrentNeuralNetwork activiation='linear' use_bias=False | ||||
| {'divergent': 38, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 12} | ||||
|  | ||||
|  | ||||
										
											Binary file not shown.
										
									
								
							
										
											Binary file not shown.
										
									
								
							| @@ -0,0 +1 @@ | ||||
| {'divergent': 11, 'fix_zero': 9, 'fix_other': 0, 'fix_sec': 0, 'other': 0} | ||||
										
											Binary file not shown.
										
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							| @@ -57,52 +57,54 @@ def count(counters, soup, notable_nets=[]): | ||||
|     return counters, notable_nets | ||||
|  | ||||
|  | ||||
| with SoupExperiment('learn-from-soup') as exp: | ||||
|     exp.soup_size = 10 | ||||
|     exp.soup_life = 100 | ||||
|     exp.trials = 10 | ||||
|     exp.learn_from_severity_values = [10 * i for i in range(11)] | ||||
|     exp.epsilon = 1e-4 | ||||
|     net_generators = [] | ||||
|     for activation in ['sigmoid']: #['linear', 'sigmoid', 'relu']: | ||||
|         for use_bias in [False]: | ||||
|             # net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|             net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|             # net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
| if __name__ == '__main__': | ||||
|  | ||||
|     all_names = [] | ||||
|     all_data = [] | ||||
|     for net_generator_id, net_generator in enumerate(net_generators): | ||||
|         xs = [] | ||||
|         ys = [] | ||||
|         zs = [] | ||||
|         notable_nets = [] | ||||
|         for learn_from_severity in exp.learn_from_severity_values: | ||||
|             counters = generate_counters() | ||||
|             results = [] | ||||
|             for _ in tqdm(range(exp.trials)): | ||||
|                 soup = Soup(exp.soup_size, lambda net_generator=net_generator,exp=exp: TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon)) | ||||
|                 soup.with_params(attacking_rate=-1, learn_from_rate=0.1, train=0, learn_from_severity=learn_from_severity) | ||||
|                 soup.seed() | ||||
|                 name = str(soup.particles[0].net.__class__.__name__) + " activiation='" + str(soup.particles[0].get_keras_params().get('activation')) + "' use_bias=" + str(soup.particles[0].get_keras_params().get('use_bias')) | ||||
|                 for time in range(exp.soup_life): | ||||
|                     soup.evolve() | ||||
|                 count(counters, soup, notable_nets) | ||||
|                 keras.backend.clear_session() | ||||
|     with SoupExperiment('learn-from-soup') as exp: | ||||
|         exp.soup_size = 10 | ||||
|         exp.soup_life = 100 | ||||
|         exp.trials = 10 | ||||
|         exp.learn_from_severity_values = [10 * i for i in range(11)] | ||||
|         exp.epsilon = 1e-4 | ||||
|         net_generators = [] | ||||
|         for activation in ['linear']:  # ['sigmoid', 'linear', 'relu']: | ||||
|             for use_bias in [False]: | ||||
|                 net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|                 # net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|                 # net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|  | ||||
|             xs += [learn_from_severity] | ||||
|             ys += [float(counters['fix_zero']) / float(exp.trials)] | ||||
|             zs += [float(counters['fix_other']) / float(exp.trials)]         | ||||
|         all_names += [name] | ||||
|         # xs: learn_from_intensity according to exp.learn_from_intensity_values | ||||
|         # ys: zero-fixpoints after life time | ||||
|         # zs: non-zero-fixpoints after life time | ||||
|         all_data += [{'xs':xs, 'ys':ys, 'zs':zs}] | ||||
|         all_names = [] | ||||
|         all_data = [] | ||||
|         for net_generator_id, net_generator in enumerate(net_generators): | ||||
|             xs = [] | ||||
|             ys = [] | ||||
|             zs = [] | ||||
|             notable_nets = [] | ||||
|             for learn_from_severity in exp.learn_from_severity_values: | ||||
|                 counters = generate_counters() | ||||
|                 results = [] | ||||
|                 for _ in tqdm(range(exp.trials)): | ||||
|                     soup = Soup(exp.soup_size, lambda net_generator=net_generator,exp=exp: TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon)) | ||||
|                     soup.with_params(attacking_rate=-1, learn_from_rate=0.1, train=0, learn_from_severity=learn_from_severity) | ||||
|                     soup.seed() | ||||
|                     name = str(soup.particles[0].net.__class__.__name__) + " activiation='" + str(soup.particles[0].get_keras_params().get('activation')) + "' use_bias=" + str(soup.particles[0].get_keras_params().get('use_bias')) | ||||
|                     for time in range(exp.soup_life): | ||||
|                         soup.evolve() | ||||
|                     count(counters, soup, notable_nets) | ||||
|                     keras.backend.clear_session() | ||||
|  | ||||
|     exp.save(all_names=all_names) | ||||
|     exp.save(all_data=all_data) | ||||
|     exp.save(soup=soup.without_particles()) | ||||
|     for exp_id, name in enumerate(all_names): | ||||
|         exp.log(all_names[exp_id]) | ||||
|         exp.log(all_data[exp_id]) | ||||
|         exp.log('\n') | ||||
|                 xs += [learn_from_severity] | ||||
|                 ys += [float(counters['fix_zero']) / float(exp.trials)] | ||||
|                 zs += [float(counters['fix_other']) / float(exp.trials)] | ||||
|             all_names += [name] | ||||
|             # xs: learn_from_intensity according to exp.learn_from_intensity_values | ||||
|             # ys: zero-fixpoints after life time | ||||
|             # zs: non-zero-fixpoints after life time | ||||
|             all_data += [{'xs':xs, 'ys':ys, 'zs':zs}] | ||||
|  | ||||
|         exp.save(all_names=all_names) | ||||
|         exp.save(all_data=all_data) | ||||
|         exp.save(soup=soup.without_particles()) | ||||
|         for exp_id, name in enumerate(all_names): | ||||
|             exp.log(all_names[exp_id]) | ||||
|             exp.log(all_data[exp_id]) | ||||
|             exp.log('\n') | ||||
|   | ||||
| @@ -55,15 +55,15 @@ if __name__ == '__main__': | ||||
|     with Experiment('mixed-self-fixpoints') as exp: | ||||
|         exp.trials = 20 | ||||
|         exp.selfattacks = 4 | ||||
|         exp.trains_per_selfattack_values = [100 * i for i in range(11)] | ||||
|         exp.trains_per_selfattack_values = [50 * i for i in range(11)] | ||||
|         exp.epsilon = 1e-4 | ||||
|         net_generators = [] | ||||
|         for activation in ['linear']:  # , 'sigmoid', 'relu']: | ||||
|             for use_bias in [False]: | ||||
|                 net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|                 # net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|                 net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|                 # net_generators += [lambda activation=activation, use_bias=use_bias: FFTNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|                 # net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|                 net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|  | ||||
|         all_names = [] | ||||
|         all_data = [] | ||||
| @@ -77,7 +77,7 @@ if __name__ == '__main__': | ||||
|                 for _ in tqdm(range(exp.trials)): | ||||
|                     net = ParticleDecorator(net_generator()) | ||||
|                     net = TrainingNeuralNetworkDecorator(net).with_params(epsilon=exp.epsilon) | ||||
|                     name = str(net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias')) | ||||
|                     name = str(net.net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias')) | ||||
|                     for selfattack_id in range(exp.selfattacks): | ||||
|                         net.self_attack() | ||||
|                         for train_id in range(trains_per_selfattack): | ||||
|   | ||||
| @@ -52,51 +52,57 @@ def count(counters, soup, notable_nets=[]): | ||||
|             counters['other'] += 1 | ||||
|     return counters, notable_nets | ||||
|  | ||||
| if __name__ == '__main__': | ||||
|  | ||||
| with Experiment('mixed-self-fixpoints') as exp: | ||||
|     exp.trials = 10 | ||||
|     exp.soup_size = 10 | ||||
|     exp.soup_life = 5 | ||||
|     exp.trains_per_selfattack_values = [10 * i for i in range(11)] | ||||
|     exp.epsilon = 1e-4 | ||||
|     net_generators = [] | ||||
|     for activation in ['linear']: #['linear', 'sigmoid', 'relu']: | ||||
|         for use_bias in [False]: | ||||
|             # net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|             net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|             # net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|     with Experiment('mixed-self-fixpoints') as exp: | ||||
|         exp.trials = 10 | ||||
|         exp.soup_size = 10 | ||||
|         exp.soup_life = 5 | ||||
|         exp.trains_per_selfattack_values = [10 * i for i in range(11)] | ||||
|         exp.epsilon = 1e-4 | ||||
|         net_generators = [] | ||||
|         for activation in ['linear']:  # ['linear', 'sigmoid', 'relu']: | ||||
|             for use_bias in [False]: | ||||
|                 net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|                 net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|                 # net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|  | ||||
|     all_names = [] | ||||
|     all_data = [] | ||||
|     for net_generator_id, net_generator in enumerate(net_generators): | ||||
|         xs = [] | ||||
|         ys = [] | ||||
|         zs = [] | ||||
|         for trains_per_selfattack in exp.trains_per_selfattack_values: | ||||
|             counters = generate_counters() | ||||
|             notable_nets = [] | ||||
|             for soup_idx in tqdm(range(exp.trials)): | ||||
|                 soup = Soup(exp.soup_size, lambda net_generator=net_generator,exp=exp: TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon)) | ||||
|                 soup.with_params(attacking_rate=0.1, learn_from_rate=-1, train=trains_per_selfattack, learn_from_severity=-1) | ||||
|                 soup.seed() | ||||
|                 name = str(soup.particles[0].net.__class__.__name__) + " activiation='" + str(soup.particles[0].get_keras_params().get('activation')) + "' use_bias=" + str(soup.particles[0].get_keras_params().get('use_bias')) | ||||
|                 for _ in range(exp.soup_life): | ||||
|                     soup.evolve() | ||||
|                 count(counters, soup, notable_nets) | ||||
|                 keras.backend.clear_session() | ||||
|         all_names = [] | ||||
|         all_data = [] | ||||
|         for net_generator_id, net_generator in enumerate(net_generators): | ||||
|             xs = [] | ||||
|             ys = [] | ||||
|             zs = [] | ||||
|             for trains_per_selfattack in exp.trains_per_selfattack_values: | ||||
|                 counters = generate_counters() | ||||
|                 notable_nets = [] | ||||
|                 for soup_idx in tqdm(range(exp.trials)): | ||||
|                     soup = Soup(exp.soup_size, | ||||
|                                 lambda net_generator=net_generator, exp=exp: TrainingNeuralNetworkDecorator( | ||||
|                                     net_generator()).with_params(epsilon=exp.epsilon)) | ||||
|                     soup.with_params(attacking_rate=0.1, learn_from_rate=-1, train=trains_per_selfattack, | ||||
|                                      learn_from_severity=-1) | ||||
|                     soup.seed() | ||||
|                     name = str(soup.particles[0].net.__class__.__name__) + " activiation='" + str( | ||||
|                         soup.particles[0].get_keras_params().get('activation')) + "' use_bias=" + str( | ||||
|                         soup.particles[0].get_keras_params().get('use_bias')) | ||||
|                     for _ in range(exp.soup_life): | ||||
|                         soup.evolve() | ||||
|                     count(counters, soup, notable_nets) | ||||
|                     keras.backend.clear_session() | ||||
|  | ||||
|             xs += [trains_per_selfattack] | ||||
|             ys += [float(counters['fix_zero']) / float(exp.trials)] | ||||
|             zs += [float(counters['fix_other']) / float(exp.trials)] | ||||
|         all_names += [name] | ||||
|         # xs: how many trains per self-attack from exp.trains_per_selfattack_values | ||||
|         # ys: average amount of zero-fixpoints found | ||||
|         # zs: average amount of non-zero fixpoints | ||||
|         all_data += [{'xs':xs, 'ys':ys, 'zs':zs}] | ||||
|                 xs += [trains_per_selfattack] | ||||
|                 ys += [float(counters['fix_zero']) / float(exp.trials)] | ||||
|                 zs += [float(counters['fix_other']) / float(exp.trials)] | ||||
|             all_names += [name] | ||||
|             # xs: how many trains per self-attack from exp.trains_per_selfattack_values | ||||
|             # ys: average amount of zero-fixpoints found | ||||
|             # zs: average amount of non-zero fixpoints | ||||
|             all_data += [{'xs': xs, 'ys': ys, 'zs': zs}] | ||||
|  | ||||
|     exp.save(all_names=all_names) | ||||
|     exp.save(all_data=all_data) | ||||
|     for exp_id, name in enumerate(all_names): | ||||
|         exp.log(all_names[exp_id]) | ||||
|         exp.log(all_data[exp_id]) | ||||
|         exp.log('\n') | ||||
|         exp.save(all_names=all_names) | ||||
|         exp.save(all_data=all_data) | ||||
|         for exp_id, name in enumerate(all_names): | ||||
|             exp.log(all_names[exp_id]) | ||||
|             exp.log(all_data[exp_id]) | ||||
|             exp.log('\n') | ||||
|   | ||||
| @@ -19,19 +19,18 @@ if __name__ == '__main__': | ||||
|  | ||||
|     if True: | ||||
|         # WeightWise Neural Network | ||||
|         for _ in range(10): | ||||
|             with FixpointExperiment() as exp: | ||||
|                 for run_id in tqdm(range(20)): | ||||
|                     net = ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2) | ||||
|                                             .with_keras_params(activation='linear')) | ||||
|                     run_exp(net) | ||||
|                     K.clear_session() | ||||
|                 exp.log(exp.counters) | ||||
|                 exp.save(trajectorys=exp.without_particles()) | ||||
|         with FixpointExperiment(name="weightwise_self_application") as exp: | ||||
|             for run_id in tqdm(range(20)): | ||||
|                 net = ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2) | ||||
|                                         .with_keras_params(activation='linear')) | ||||
|                 run_exp(net) | ||||
|                 K.clear_session() | ||||
|             exp.log(exp.counters) | ||||
|             exp.save(trajectorys=exp.without_particles()) | ||||
|  | ||||
|     if False: | ||||
|         # Aggregating Neural Network | ||||
|         with FixpointExperiment() as exp: | ||||
|         with FixpointExperiment(name="aggregating_self_application") as exp: | ||||
|             for run_id in tqdm(range(10)): | ||||
|                 net = ParticleDecorator(AggregatingNeuralNetwork(aggregates=4, width=2, depth=2) | ||||
|                                         .with_keras_params(activation='linear')) | ||||
| @@ -53,31 +52,33 @@ if __name__ == '__main__': | ||||
|  | ||||
|     if False: | ||||
|         # ok so this works quite realiably | ||||
|         with FixpointExperiment() as exp: | ||||
|         with FixpointExperiment(name="weightwise_learning") as exp: | ||||
|             for i in range(10): | ||||
|                 run_count = 100 | ||||
|                 net = TrainingNeuralNetworkDecorator(ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2))) | ||||
|                 net.with_params(epsilon=0.0001).with_keras_params(activation='linear') | ||||
|                 exp.historical_particles[net.get_uid()] = net | ||||
|                 for run_id in tqdm(range(run_count+1)): | ||||
|                     net.compiled() | ||||
|                     loss = net.train(epoch=run_id) | ||||
|                     if run_id % 10 == 0: | ||||
|                         run_exp(net) | ||||
|                     # run_exp(net) | ||||
|                     # net.save_state(time=run_id) | ||||
|                 K.clear_session() | ||||
|             exp.save(trajectorys=exp.without_particles()) | ||||
|  | ||||
|     if False: | ||||
|         # ok so this works quite realiably | ||||
|         with FixpointExperiment() as exp: | ||||
|         with FixpointExperiment(name="aggregating_learning") as exp: | ||||
|             for i in range(10): | ||||
|                 run_count = 100 | ||||
|                 net = TrainingNeuralNetworkDecorator(ParticleDecorator(AggregatingNeuralNetwork(4, width=2, depth=2))) | ||||
|                 net.with_params(epsilon=0.0001).with_keras_params(activation='linear') | ||||
|                 exp.historical_particles[net.get_uid()] = net | ||||
|                 for run_id in tqdm(range(run_count+1)): | ||||
|                     net.compiled() | ||||
|                     loss = net.train(epoch=run_id) | ||||
|                     if run_id % 10 == 0: | ||||
|                         run_exp(net) | ||||
|                     # run_exp(net) | ||||
|                     # net.save_state(time=run_id) | ||||
|                 K.clear_session() | ||||
|             exp.save(trajectorys=exp.without_particles()) | ||||
|  | ||||
|   | ||||
| @@ -40,8 +40,8 @@ if __name__ == '__main__': | ||||
|         for activation in ['linear']:  # , 'sigmoid', 'relu']: | ||||
|             for use_bias in [False]: | ||||
|                 net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|                 # net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|                 # net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|                 net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|                 net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] | ||||
|         all_counters = [] | ||||
|         all_notable_nets = [] | ||||
|         all_names = [] | ||||
| @@ -51,7 +51,7 @@ if __name__ == '__main__': | ||||
|             for _ in tqdm(range(exp.trials)): | ||||
|                 net = ParticleDecorator(net_generator()) | ||||
|                 net = TrainingNeuralNetworkDecorator(net).with_params(epsilon=exp.epsilon) | ||||
|                 name = str(net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias')) | ||||
|                 name = str(net.net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias')) | ||||
|                 for run_id in range(exp.run_count): | ||||
|                     loss = net.compiled().train(epoch=run_id+1) | ||||
|                 count(counters, net, notable_nets) | ||||
|   | ||||
| @@ -154,6 +154,7 @@ def plot_latent_trajectories_3D(soup_or_experiment, filename='plot'): | ||||
|         data.extend([line_trace, line_start, line_end]) | ||||
|  | ||||
|     axis_layout = dict(gridcolor='rgb(255, 255, 255)', | ||||
|                        gridwidth=3, | ||||
|                        zerolinecolor='rgb(255, 255, 255)', | ||||
|                        showbackground=True, | ||||
|                        backgroundcolor='rgb(230, 230,230)', | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Si11ium
					Si11ium