From fd215be5de222f04c0d2bf5fb80eeb17c2af723a Mon Sep 17 00:00:00 2001 From: Si11ium Date: Thu, 14 Mar 2019 16:40:29 +0100 Subject: [PATCH] foundations --- .../all_data.dill | Bin 0 -> 318 bytes .../all_names.dill | Bin 0 -> 128 bytes .../experiment.dill | Bin 0 -> 849 bytes .../log.txt | 8 ++++++++ code/setups/fixpoint-density.py | 4 ++-- code/setups/learn_from_soup.py | 11 +++++++---- code/setups/mixed-soup.py | 12 ++++++------ 7 files changed, 23 insertions(+), 12 deletions(-) create mode 100644 code/setups/experiments/exp-mixed-self-fixpoints-_3255826099701415-0/all_data.dill create mode 100644 code/setups/experiments/exp-mixed-self-fixpoints-_3255826099701415-0/all_names.dill create mode 100644 code/setups/experiments/exp-mixed-self-fixpoints-_3255826099701415-0/experiment.dill create mode 100644 code/setups/experiments/exp-mixed-self-fixpoints-_3255826099701415-0/log.txt diff --git a/code/setups/experiments/exp-mixed-self-fixpoints-_3255826099701415-0/all_data.dill b/code/setups/experiments/exp-mixed-self-fixpoints-_3255826099701415-0/all_data.dill new file mode 100644 index 0000000000000000000000000000000000000000..61bac0c2c92982b8470cf6c705f695927a3f8372 GIT binary patch literal 318 zcmZo*jxA)+s4ZmFh+tx1V5lfAWQr|h*6?QVPVqkBtHbC!fzkI1qi+P0?;a*!1!mtC zX5SagsbIC0#f2=fg{&Ix_IJ*Jz*%?un=?URmb?99V=!>He+d(Lmj(vz_OD@5m?Cdr z=6nD;1qk5UQ%h?L*)%eMPGcwJv<#q|IDl?~n+`V=lL1!)cO|A8xO3qQsG8K$R6PK+ C+=Oia literal 0 HcmV?d00001 diff --git a/code/setups/experiments/exp-mixed-self-fixpoints-_3255826099701415-0/all_names.dill b/code/setups/experiments/exp-mixed-self-fixpoints-_3255826099701415-0/all_names.dill new file mode 100644 index 0000000000000000000000000000000000000000..3cda739467cf7dd1d3381f3f2171b0593d1cc048 GIT binary patch literal 128 zcmZo*jxA)+h%jYfUAd~*@swHpH3heo3GIv zV#Q`lhG9%ZTz1f6YsB%L?N)1ld#`zL@Tj@9yVb%Ow(_?&7Ja}gtVV2K=Fo8zL|vyV z^FbnGt#FntQ!9pCbEzQ}S|%~h`374gI!catACwZupzvxN=WW&^`ngE83?p~~VL}s0 z+7O|ZC$bnm+=yfhBHd_Y3iy!}>hPI}6kv6T3pT6Je6Db@jZ3y~_#OX!aNi8h%;2{f zuuAZ*5;Uyf)C#^?P-tH&TyEov?YW;njOwH3etI*ix1RfTrzp?;K4yM)i}Kv_aci3S zF}~*)twL3fL&i)xS8??}T&@lAn%yIN*DtfzBVRT0Rc=@=HVsNm>2z9CS}wtb2D08N zFpI2pWQsS~KA}mP)I-9~zp~*>#mi~pO*Tv8I+gUNc+0P`1v-v0CA1dd3x&7+8CEYO zLdJ?yviYRVPehbamhQB1{j&FAbE7>6N=B5(yPmtg_vO6)*eRmilZs*|LlN=8MHm&< Khj=eLdhiDyYAk^O literal 0 HcmV?d00001 diff --git a/code/setups/experiments/exp-mixed-self-fixpoints-_3255826099701415-0/log.txt b/code/setups/experiments/exp-mixed-self-fixpoints-_3255826099701415-0/log.txt new file mode 100644 index 0000000..b7dd981 --- /dev/null +++ b/code/setups/experiments/exp-mixed-self-fixpoints-_3255826099701415-0/log.txt @@ -0,0 +1,8 @@ +ParticleDecorator activiation='linear' use_bias=False +{'xs': [0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000], 'ys': [0.45, 0.4, 0.6, 0.8, 0.95, 0.85, 0.95, 0.85, 0.9, 1.0, 0.8]} + + +ParticleDecorator activiation='linear' use_bias=False +{'xs': [0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000], 'ys': [0.95, 0.9, 0.9, 0.9, 0.95, 0.8, 0.9, 0.9, 0.85, 0.85, 0.9]} + + diff --git a/code/setups/fixpoint-density.py b/code/setups/fixpoint-density.py index 2f1aa4c..f7ca465 100644 --- a/code/setups/fixpoint-density.py +++ b/code/setups/fixpoint-density.py @@ -37,7 +37,7 @@ if __name__ == '__main__': for activation in ['linear', 'sigmoid', 'relu']: net_generators += [lambda activation=activation: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=False)] net_generators += [lambda activation=activation: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=False)] - net_generators += [lambda activation=activation: FFTNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=False)] + # net_generators += [lambda activation=activation: FFTNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=False)] # net_generators += [lambda activation=activation: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=False)] all_counters = [] all_notable_nets = [] @@ -52,7 +52,7 @@ if __name__ == '__main__': count(counters, net, notable_nets) keras.backend.clear_session() all_counters += [counters] - all_notable_nets += [notable_nets] + # all_notable_nets += [notable_nets] all_names += [name] exp.save(all_counters=all_counters) exp.save(all_notable_nets=all_notable_nets) diff --git a/code/setups/learn_from_soup.py b/code/setups/learn_from_soup.py index 95822b9..c002b1c 100644 --- a/code/setups/learn_from_soup.py +++ b/code/setups/learn_from_soup.py @@ -61,15 +61,15 @@ def count(counters, soup, notable_nets=[]): with SoupExperiment('learn-from-soup') as exp: exp.soup_size = 10 - exp.soup_life = 100 - exp.trials = 10 + exp.soup_life = 1000 + exp.trials = 20 exp.learn_from_severity_values = [10 * i for i in range(11)] exp.epsilon = 1e-4 net_generators = [] for activation in ['sigmoid']: #['linear', 'sigmoid', 'relu']: for use_bias in [False]: net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] - # net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] + net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] # net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)] all_names = [] @@ -95,7 +95,10 @@ with SoupExperiment('learn-from-soup') as exp: ys += [float(counters['fix_zero']) / float(exp.trials)] zs += [float(counters['fix_other']) / float(exp.trials)] all_names += [name] - all_data += [{'xs':xs, 'ys':ys, 'zs':zs}] #xs: learn_from_intensity according to exp.learn_from_intensity_values, ys: zero-fixpoints after life time, zs: non-zero-fixpoints after life time + # xs: learn_from_intensity according to exp.learn_from_intensity_values + # ys: zero-fixpoints after life time + # zs: non-zero-fixpoints after life time + all_data += [{'xs':xs, 'ys':ys, 'zs':zs}] exp.save(all_names=all_names) exp.save(all_data=all_data) diff --git a/code/setups/mixed-soup.py b/code/setups/mixed-soup.py index f3e93c9..70d3f04 100644 --- a/code/setups/mixed-soup.py +++ b/code/setups/mixed-soup.py @@ -1,6 +1,7 @@ import sys import os +# Concat top Level dir to system environmental variables sys.path += os.path.join('..', '.') from typing import Tuple @@ -13,10 +14,6 @@ from soup import * import keras.backend -# Concat top Level dir to system environmental variables -sys.path += os.path.join('..', '.') - - def generate_counters(): """ Initial build of the counter dict, to store counts. @@ -57,7 +54,7 @@ def count(counters, soup, notable_nets=[]): with Experiment('mixed-self-fixpoints') as exp: - exp.trials = 10 + exp.trials = 100 exp.soup_size = 10 exp.soup_life = 5 exp.trains_per_selfattack_values = [10 * i for i in range(11)] @@ -91,7 +88,10 @@ with Experiment('mixed-self-fixpoints') as exp: ys += [float(counters['fix_zero']) / float(exp.trials)] zs += [float(counters['fix_other']) / float(exp.trials)] all_names += [name] - all_data += [{'xs':xs, 'ys':ys, 'zs':zs}] #xs: how many trains per self-attack from exp.trains_per_selfattack_values, ys: average amount of zero-fixpoints found, zs: average amount of non-zero fixpoints + # xs: how many trains per self-attack from exp.trains_per_selfattack_values + # ys: average amount of zero-fixpoints found + # zs: average amount of non-zero fixpoints + all_data += [{'xs':xs, 'ys':ys, 'zs':zs}] exp.save(all_names=all_names) exp.save(all_data=all_data)