diff --git a/code/aggregated_experiments/exp-fixpoint-density-_6512072410481991-0/all_counters.dill b/code/aggregated_experiments/exp-fixpoint-density-_6512072410481991-0/all_counters.dill new file mode 100644 index 0000000..8ebea28 Binary files /dev/null and b/code/aggregated_experiments/exp-fixpoint-density-_6512072410481991-0/all_counters.dill differ diff --git a/code/aggregated_experiments/exp-fixpoint-density-_6512072410481991-0/all_names.dill b/code/aggregated_experiments/exp-fixpoint-density-_6512072410481991-0/all_names.dill new file mode 100644 index 0000000..7038314 Binary files /dev/null and b/code/aggregated_experiments/exp-fixpoint-density-_6512072410481991-0/all_names.dill differ diff --git a/code/aggregated_experiments/exp-fixpoint-density-_6512072410481991-0/all_notable_nets.dill b/code/aggregated_experiments/exp-fixpoint-density-_6512072410481991-0/all_notable_nets.dill new file mode 100644 index 0000000..482c0e3 Binary files /dev/null and b/code/aggregated_experiments/exp-fixpoint-density-_6512072410481991-0/all_notable_nets.dill differ diff --git a/code/aggregated_experiments/exp-fixpoint-density-_6512072410481991-0/experiment.dill b/code/aggregated_experiments/exp-fixpoint-density-_6512072410481991-0/experiment.dill new file mode 100644 index 0000000..96a9c82 Binary files /dev/null and b/code/aggregated_experiments/exp-fixpoint-density-_6512072410481991-0/experiment.dill differ diff --git a/code/aggregated_experiments/exp-fixpoint-density-_6512072410481991-0/log.txt b/code/aggregated_experiments/exp-fixpoint-density-_6512072410481991-0/log.txt new file mode 100644 index 0000000..bc2a47b --- /dev/null +++ b/code/aggregated_experiments/exp-fixpoint-density-_6512072410481991-0/log.txt @@ -0,0 +1,12 @@ +ParticleDecorator activiation='linear' use_bias='False' +{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100} + + +ParticleDecorator activiation='sigmoid' use_bias='False' +{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100} + + +ParticleDecorator activiation='relu' use_bias='False' +{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100} + + diff --git a/code/aggregated_experiments/exp-known-fixpoint-variation-_1628018115073791-0/experiment.dill b/code/aggregated_experiments/exp-known-fixpoint-variation-_1628018115073791-0/experiment.dill new file mode 100644 index 0000000..555654c Binary files /dev/null and b/code/aggregated_experiments/exp-known-fixpoint-variation-_1628018115073791-0/experiment.dill differ diff --git a/code/aggregated_experiments/exp-known-fixpoint-variation-_1628018115073791-0/log.txt b/code/aggregated_experiments/exp-known-fixpoint-variation-_1628018115073791-0/log.txt new file mode 100644 index 0000000..e69de29 diff --git a/code/aggregated_experiments/exp-learn-from-soup-_3256036231914285-0/all_data.dill b/code/aggregated_experiments/exp-learn-from-soup-_3256036231914285-0/all_data.dill new file mode 100644 index 0000000..4795ab3 Binary files /dev/null and b/code/aggregated_experiments/exp-learn-from-soup-_3256036231914285-0/all_data.dill differ diff --git a/code/aggregated_experiments/exp-learn-from-soup-_3256036231914285-0/all_names.dill b/code/aggregated_experiments/exp-learn-from-soup-_3256036231914285-0/all_names.dill new file mode 100644 index 0000000..f395078 Binary files /dev/null and b/code/aggregated_experiments/exp-learn-from-soup-_3256036231914285-0/all_names.dill differ diff --git a/code/aggregated_experiments/exp-learn-from-soup-_3256036231914285-0/experiment.dill b/code/aggregated_experiments/exp-learn-from-soup-_3256036231914285-0/experiment.dill new file mode 100644 index 0000000..f9c1b64 Binary files /dev/null and b/code/aggregated_experiments/exp-learn-from-soup-_3256036231914285-0/experiment.dill differ diff --git a/code/aggregated_experiments/exp-learn-from-soup-_3256036231914285-0/log.txt b/code/aggregated_experiments/exp-learn-from-soup-_3256036231914285-0/log.txt new file mode 100644 index 0000000..5748bc3 --- /dev/null +++ b/code/aggregated_experiments/exp-learn-from-soup-_3256036231914285-0/log.txt @@ -0,0 +1,4 @@ +TrainingNeuralNetworkDecorator activiation='sigmoid' use_bias=False +{'xs': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'ys': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'zs': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]} + + diff --git a/code/aggregated_experiments/exp-mixed-self-fixpoints-_6512079230926771-0/all_data.dill b/code/aggregated_experiments/exp-mixed-self-fixpoints-_6512079230926771-0/all_data.dill new file mode 100644 index 0000000..877caba Binary files /dev/null and b/code/aggregated_experiments/exp-mixed-self-fixpoints-_6512079230926771-0/all_data.dill differ diff --git a/code/aggregated_experiments/exp-mixed-self-fixpoints-_6512079230926771-0/all_names.dill b/code/aggregated_experiments/exp-mixed-self-fixpoints-_6512079230926771-0/all_names.dill new file mode 100644 index 0000000..72174ee Binary files /dev/null and b/code/aggregated_experiments/exp-mixed-self-fixpoints-_6512079230926771-0/all_names.dill differ diff --git a/code/aggregated_experiments/exp-mixed-self-fixpoints-_6512079230926771-0/experiment.dill b/code/aggregated_experiments/exp-mixed-self-fixpoints-_6512079230926771-0/experiment.dill new file mode 100644 index 0000000..7f7bed4 Binary files /dev/null and b/code/aggregated_experiments/exp-mixed-self-fixpoints-_6512079230926771-0/experiment.dill differ diff --git a/code/aggregated_experiments/exp-mixed-self-fixpoints-_6512079230926771-0/log.txt b/code/aggregated_experiments/exp-mixed-self-fixpoints-_6512079230926771-0/log.txt new file mode 100644 index 0000000..4f9fa0b --- /dev/null +++ b/code/aggregated_experiments/exp-mixed-self-fixpoints-_6512079230926771-0/log.txt @@ -0,0 +1,4 @@ +ParticleDecorator activiation='linear' use_bias=False +{'xs': [0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000], 'ys': [0.9, 0.95, 1.0, 0.95, 0.9, 0.95, 0.85, 0.8, 0.85, 0.85, 0.75]} + + diff --git a/code/aggregated_experiments/exp-mixed-self-fixpoints-_814010367758041-0/all_data.dill b/code/aggregated_experiments/exp-mixed-self-fixpoints-_814010367758041-0/all_data.dill new file mode 100644 index 0000000..7b1e54f Binary files /dev/null and b/code/aggregated_experiments/exp-mixed-self-fixpoints-_814010367758041-0/all_data.dill differ diff --git a/code/aggregated_experiments/exp-mixed-self-fixpoints-_814010367758041-0/all_names.dill b/code/aggregated_experiments/exp-mixed-self-fixpoints-_814010367758041-0/all_names.dill new file mode 100644 index 0000000..364954d Binary files /dev/null and b/code/aggregated_experiments/exp-mixed-self-fixpoints-_814010367758041-0/all_names.dill differ diff --git a/code/aggregated_experiments/exp-mixed-self-fixpoints-_814010367758041-0/experiment.dill b/code/aggregated_experiments/exp-mixed-self-fixpoints-_814010367758041-0/experiment.dill new file mode 100644 index 0000000..d97321b Binary files /dev/null and b/code/aggregated_experiments/exp-mixed-self-fixpoints-_814010367758041-0/experiment.dill differ diff --git a/code/aggregated_experiments/exp-mixed-self-fixpoints-_814010367758041-0/log.txt b/code/aggregated_experiments/exp-mixed-self-fixpoints-_814010367758041-0/log.txt new file mode 100644 index 0000000..7ef21bb --- /dev/null +++ b/code/aggregated_experiments/exp-mixed-self-fixpoints-_814010367758041-0/log.txt @@ -0,0 +1,4 @@ +TrainingNeuralNetworkDecorator activiation='linear' use_bias=False +{'xs': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'ys': [0.4, 0.2, 0.3, 0.2, 0.3, 0.3, 0.5, 0.3, 0.9, 0.6, 0.2], 'zs': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]} + + diff --git a/code/aggregated_experiments/exp-training_fixpoint-_6512086999889633-0/all_counters.dill b/code/aggregated_experiments/exp-training_fixpoint-_6512086999889633-0/all_counters.dill new file mode 100644 index 0000000..fbedb6b Binary files /dev/null and b/code/aggregated_experiments/exp-training_fixpoint-_6512086999889633-0/all_counters.dill differ diff --git a/code/aggregated_experiments/exp-training_fixpoint-_6512086999889633-0/all_names.dill b/code/aggregated_experiments/exp-training_fixpoint-_6512086999889633-0/all_names.dill new file mode 100644 index 0000000..72174ee Binary files /dev/null and b/code/aggregated_experiments/exp-training_fixpoint-_6512086999889633-0/all_names.dill differ diff --git a/code/aggregated_experiments/exp-training_fixpoint-_6512086999889633-0/experiment.dill b/code/aggregated_experiments/exp-training_fixpoint-_6512086999889633-0/experiment.dill new file mode 100644 index 0000000..e8183bd Binary files /dev/null and b/code/aggregated_experiments/exp-training_fixpoint-_6512086999889633-0/experiment.dill differ diff --git a/code/aggregated_experiments/exp-training_fixpoint-_6512086999889633-0/log.txt b/code/aggregated_experiments/exp-training_fixpoint-_6512086999889633-0/log.txt new file mode 100644 index 0000000..96a0820 --- /dev/null +++ b/code/aggregated_experiments/exp-training_fixpoint-_6512086999889633-0/log.txt @@ -0,0 +1,4 @@ +ParticleDecorator activiation='linear' use_bias=False +{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 20} + + diff --git a/code/setups/learn_from_soup.py b/code/setups/learn_from_soup.py index 0b7fd61..dda0a02 100644 --- a/code/setups/learn_from_soup.py +++ b/code/setups/learn_from_soup.py @@ -1,6 +1,7 @@ import sys import os +# Concat top Level dir to system environmental variables sys.path += os.path.join('..', '.') from typing import Tuple @@ -16,9 +17,6 @@ import keras.backend from statistics import mean avg = mean -# Concat top Level dir to system environmental variables -sys.path += os.path.join('..', '.') - def generate_counters(): """ @@ -91,6 +89,7 @@ with SoupExperiment('learn-from-soup') as exp: soup.evolve() count(counters, soup, notable_nets) keras.backend.clear_session() + xs += [learn_from_severity] ys += [float(counters['fix_zero']) / float(exp.trials)] zs += [float(counters['fix_other']) / float(exp.trials)] @@ -102,6 +101,7 @@ with SoupExperiment('learn-from-soup') as exp: exp.save(all_names=all_names) exp.save(all_data=all_data) + exp.save(soup=soup.without_particles()) for exp_id, name in enumerate(all_names): exp.log(all_names[exp_id]) exp.log(all_data[exp_id]) diff --git a/code/setups/mixed-soup.py b/code/setups/mixed-soup.py index d42c3b1..79b49ad 100644 --- a/code/setups/mixed-soup.py +++ b/code/setups/mixed-soup.py @@ -75,7 +75,7 @@ with Experiment('mixed-self-fixpoints') as exp: for trains_per_selfattack in exp.trains_per_selfattack_values: counters = generate_counters() notable_nets = [] - for _ in tqdm(range(exp.trials)): + for soup_idx in tqdm(range(exp.trials)): soup = Soup(exp.soup_size, lambda net_generator=net_generator,exp=exp: TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon)) soup.with_params(attacking_rate=0.1, learn_from_rate=-1, train=trains_per_selfattack, learn_from_severity=-1) soup.seed() @@ -84,6 +84,7 @@ with Experiment('mixed-self-fixpoints') as exp: soup.evolve() count(counters, soup, notable_nets) keras.backend.clear_session() + xs += [trains_per_selfattack] ys += [float(counters['fix_zero']) / float(exp.trials)] zs += [float(counters['fix_other']) / float(exp.trials)] diff --git a/code/setups/network_trajectorys.py b/code/setups/network_trajectorys.py new file mode 100644 index 0000000..d2b8676 --- /dev/null +++ b/code/setups/network_trajectorys.py @@ -0,0 +1,106 @@ +import sys +import os + +# Concat top Level dir to system environmental variables +sys.path += os.path.join('..', '.') + +from soup import * +from experiment import * + + + +if __name__ == '__main__': + def run_exp(net, prints=False): + # INFO Run_ID needs to be more than 0, so that exp stores the trajectories! + exp.run_net(net, 100, run_id=run_id + 1) + exp.historical_particles[run_id] = net + if prints: + print("Fixpoint? " + str(net.is_fixpoint())) + print("Loss " + str(loss)) + + if True: + # WeightWise Neural Network + with FixpointExperiment() as exp: + for run_id in tqdm(range(10)): + net = ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2) + .with_keras_params(activation='linear')) + run_exp(net) + K.clear_session() + exp.log(exp.counters) + + if True: + # Aggregating Neural Network + with FixpointExperiment() as exp: + for run_id in tqdm(range(10)): + net = ParticleDecorator(AggregatingNeuralNetwork(aggregates=4, width=2, depth=2) + .with_keras_params(activation='linear')) + run_exp(net) + K.clear_session() + exp.log(exp.counters) + + if True: + #FFT Neural Network + with FixpointExperiment() as exp: + for run_id in tqdm(range(10)): + net = ParticleDecorator(FFTNeuralNetwork(aggregates=4, width=2, depth=2) + .with_keras_params(activation='linear')) + run_exp(net) + K.clear_session() + exp.log(exp.counters) + + if True: + # ok so this works quite realiably + with FixpointExperiment() as exp: + for i in range(10): + run_count = 100 + net = TrainingNeuralNetworkDecorator(ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2))) + net.with_params(epsilon=0.0001).with_keras_params(activation='linear') + for run_id in tqdm(range(run_count+1)): + net.compiled() + loss = net.train(epoch=run_id) + if run_id % 10 == 0: + run_exp(net) + K.clear_session() + + if True: + # ok so this works quite realiably + with FixpointExperiment() as exp: + for i in range(10): + run_count = 100 + net = TrainingNeuralNetworkDecorator(ParticleDecorator(AggregatingNeuralNetwork(4, width=2, depth=2))) + net.with_params(epsilon=0.0001).with_keras_params(activation='linear') + for run_id in tqdm(range(run_count+1)): + net.compiled() + loss = net.train(epoch=run_id) + if run_id % 10 == 0: + run_exp(net) + K.clear_session() + + if False: + # this explodes in our faces completely... NAN everywhere + # TODO: Wtf is happening here? + with FixpointExperiment() as exp: + run_count = 10000 + net = TrainingNeuralNetworkDecorator(RecurrentNeuralNetwork(width=2, depth=2))\ + .with_params(epsilon=0.1e-2).with_keras_params(optimizer='sgd', activation='linear') + for run_id in tqdm(range(run_count+1)): + loss = net.compiled().train() + if run_id % 500 == 0: + net.print_weights() + # print(net.apply_to_network(net)) + print("Fixpoint? " + str(net.is_fixpoint())) + print("Loss " + str(loss)) + print() + if False: + # and this gets somewhat interesting... we can still achieve non-trivial fixpoints + # over multiple applications when training enough in-between + with MixedFixpointExperiment() as exp: + for run_id in range(10): + net = TrainingNeuralNetworkDecorator(FFTNeuralNetwork(2, width=2, depth=2))\ + .with_params(epsilon=0.0001, activation='sigmoid') + exp.run_net(net, 500, 10) + + net.print_weights() + + print("Fixpoint? " + str(net.is_fixpoint())) + exp.log(exp.counters) diff --git a/code/setups/soup_trajectorys.py b/code/setups/soup_trajectorys.py new file mode 100644 index 0000000..c954b7d --- /dev/null +++ b/code/setups/soup_trajectorys.py @@ -0,0 +1,30 @@ +import sys +import os + +# Concat top Level dir to system environmental variables +sys.path += os.path.join('..', '.') + +from soup import * +from experiment import * + + +if __name__ == '__main__': + if True: + with SoupExperiment("soup") as exp: + for run_id in range(10): + # net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \ + # .with_keras_params(activation='linear').with_params(epsilon=0.0001) + # net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))\ + # .with_keras_params(activation='linear') + net_generator = lambda: TrainingNeuralNetworkDecorator(FFTNeuralNetwork(4, 2, 2))\ + .with_keras_params(activation='linear') + # net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params() + soup = Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True, train=20) + soup.seed() + for _ in tqdm(range(100)): + soup.evolve() + exp.log(soup.count()) + # you can access soup.historical_particles[particle_uid].states[time_step]['loss'] + # or soup.historical_particles[particle_uid].states[time_step]['weights'] + # from soup.dill + exp.save(soup=soup.without_particles()) diff --git a/code/soup.py b/code/soup.py index d016601..4d608c9 100644 --- a/code/soup.py +++ b/code/soup.py @@ -109,10 +109,11 @@ class Soup(object): if __name__ == '__main__': - if True: + if False: with SoupExperiment() as exp: for run_id in range(1): net_generator = lambda: WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params() + # net_generator = lambda: FFTNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params() # net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\ # .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random) # net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params() @@ -121,21 +122,26 @@ if __name__ == '__main__': for _ in tqdm(range(1000)): soup.evolve() exp.log(soup.count()) + exp.save(soup=soup.without_particles()) - if False: + if True: with SoupExperiment("soup") as exp: for run_id in range(1): - net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)).with_keras_params( - activation='sigmoid').with_params(epsilon=0.0001) - - # net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\ + net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2))\ + .with_keras_params(activation='linear').with_params(epsilon=0.0001) + # net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2)) + # .with_keras_params(activation='linear')\ + # .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random) + # net_generator = lambda: TrainingNeuralNetworkDecorator(FFTNeuralNetwork(4, 2, 2))\ + # .with_keras_params(activation='linear')\ # .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random) # net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params() - soup = Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True, train=10) + soup = Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True, train=20) soup.seed() for _ in tqdm(range(100)): soup.evolve() - soup.print_all() exp.log(soup.count()) - exp.save(soup=soup.without_particles()) # you can access soup.historical_particles[particle_uid].states[time_step]['loss'] - # or soup.historical_particles[particle_uid].states[time_step]['weights'] from soup.dill + # you can access soup.historical_particles[particle_uid].states[time_step]['loss'] + # or soup.historical_particles[particle_uid].states[time_step]['weights'] + # from soup.dill + exp.save(soup=soup.without_particles()) diff --git a/code/visualization.py b/code/visualization.py index 2a853c7..ea50b90 100644 --- a/code/visualization.py +++ b/code/visualization.py @@ -98,6 +98,8 @@ def plot_latent_trajectories_3D(soup_or_experiment, filename='plot'): return (val - a) / (b - a) data_list = build_from_soup_or_exp(soup_or_experiment) + if not data_list: + return bupu = cl.scales['11']['div']['RdYlGn'] scale = cl.interp(bupu, len(data_list)+1) # Map color scale to N bins @@ -260,4 +262,4 @@ if __name__ == '__main__': in_file = args.in_file[0] out_file = args.out_file - search_and_apply(in_file, plot_latent_trajectories_3D, ["experiment.dill"]) + search_and_apply(in_file, plot_latent_trajectories_3D, ["experiment.dill", "soup.dill"])