Refactor:
Step 6: Experiments
This commit is contained in:
@@ -7,12 +7,13 @@ sys.path += os.path.join('..', '.')
|
||||
from experiment import *
|
||||
from network import *
|
||||
|
||||
import keras.backend as K
|
||||
|
||||
def generate_counters():
|
||||
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
|
||||
|
||||
def count(counters, net, notable_nets=[]):
|
||||
|
||||
def count(counters, net, notable_nets: list=None):
|
||||
notable_nets = notable_nets or list()
|
||||
if net.is_diverged():
|
||||
counters['divergent'] += 1
|
||||
elif net.is_fixpoint():
|
||||
@@ -31,7 +32,7 @@ def count(counters, net, notable_nets=[]):
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
with Experiment('applying_fixpoint') as exp:
|
||||
with FixpointExperiment(name='applying_fixpoint') as exp:
|
||||
exp.trials = 50
|
||||
exp.run_count = 100
|
||||
exp.epsilon = 1e-4
|
||||
@@ -40,7 +41,7 @@ if __name__ == '__main__':
|
||||
for use_bias in [False]:
|
||||
net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
# net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
all_counters = []
|
||||
all_notable_nets = []
|
||||
all_names = []
|
||||
@@ -50,14 +51,14 @@ if __name__ == '__main__':
|
||||
for _ in tqdm(range(exp.trials)):
|
||||
net = ParticleDecorator(net_generator())
|
||||
net.with_params(epsilon=exp.epsilon)
|
||||
name = str(net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias'))
|
||||
name = str(net.name) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias'))
|
||||
for run_id in range(exp.run_count):
|
||||
loss = net.self_attack()
|
||||
count(counters, net, notable_nets)
|
||||
all_counters += [counters]
|
||||
all_notable_nets += [notable_nets]
|
||||
all_names += [name]
|
||||
K.clear_session()
|
||||
exp.reset_model()
|
||||
exp.save(all_counters=all_counters)
|
||||
exp.save(trajectorys=exp.without_particles())
|
||||
# net types reached in the end
|
||||
|
||||
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
@@ -1,4 +0,0 @@
|
||||
TrainingNeuralNetworkDecorator activiation='linear' use_bias=False
|
||||
{'xs': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'ys': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'zs': [0.0, 1.2, 5.2, 7.4, 8.1, 9.1, 9.6, 9.8, 10.0, 9.9, 9.9]}
|
||||
|
||||
|
||||
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
|
Before Width: | Height: | Size: 207 KiB |
@@ -11,7 +11,7 @@ from network import *
|
||||
from soup import *
|
||||
|
||||
|
||||
import tensorflow.python.keras.backend as K
|
||||
from tensorflow.python.keras import backend as K
|
||||
|
||||
from statistics import mean
|
||||
avg = mean
|
||||
@@ -59,7 +59,7 @@ def count(counters, soup, notable_nets=None):
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
with SoupExperiment('learn-from-soup') as exp:
|
||||
with SoupExperiment(name='learn-from-soup') as exp:
|
||||
exp.soup_size = 10
|
||||
exp.soup_life = 100
|
||||
exp.trials = 10
|
||||
@@ -83,10 +83,10 @@ if __name__ == '__main__':
|
||||
counters = generate_counters()
|
||||
results = []
|
||||
for _ in tqdm(range(exp.trials)):
|
||||
soup = Soup(exp.soup_size, lambda net_generator=net_generator,exp=exp: TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon))
|
||||
soup = Soup(exp.soup_size, lambda net_generator=net_generator, exp=exp: TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon))
|
||||
soup.with_params(attacking_rate=-1, learn_from_rate=0.1, train=0, learn_from_severity=learn_from_severity)
|
||||
soup.seed()
|
||||
name = str(soup.particles[0].net.__class__.__name__) + " activiation='" + str(soup.particles[0].get_keras_params().get('activation')) + "' use_bias=" + str(soup.particles[0].get_keras_params().get('use_bias'))
|
||||
name = str(soup.particles[0].name) + " activiation='" + str(soup.particles[0].get_keras_params().get('activation')) + "' use_bias=" + str(soup.particles[0].get_keras_params().get('use_bias'))
|
||||
for time in range(exp.soup_life):
|
||||
soup.evolve()
|
||||
count(counters, soup, notable_nets)
|
||||
|
||||
@@ -9,8 +9,6 @@ sys.path += os.path.join('..', '.')
|
||||
from experiment import *
|
||||
from network import *
|
||||
|
||||
import tensorflow.python.keras.backend as K
|
||||
|
||||
|
||||
def generate_counters():
|
||||
"""
|
||||
@@ -84,7 +82,7 @@ if __name__ == '__main__':
|
||||
if net.is_diverged() or net.is_fixpoint():
|
||||
break
|
||||
count(counters, net, notable_nets)
|
||||
keras.backend.clear_session()
|
||||
exp.reset_model()
|
||||
xs += [trains_per_selfattack]
|
||||
ys += [float(counters['fix_zero'] + counters['fix_other']) / float(exp.trials)]
|
||||
all_names += [name]
|
||||
|
||||
@@ -10,23 +10,22 @@ from experiment import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
if True:
|
||||
with SoupExperiment("soup") as exp:
|
||||
for run_id in range(1):
|
||||
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \
|
||||
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
|
||||
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))\
|
||||
# .with_keras_params(activation='linear')
|
||||
# net_generator = lambda: TrainingNeuralNetworkDecorator(FFTNeuralNetwork(4, 2, 2))\
|
||||
# .with_keras_params(activation='linear')
|
||||
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||
soup = Soup(20, net_generator).with_params(remove_divergent=True, remove_zero=True,
|
||||
train=30,
|
||||
learn_from_rate=-1)
|
||||
soup.seed()
|
||||
for _ in tqdm(range(100)):
|
||||
soup.evolve()
|
||||
exp.log(soup.count())
|
||||
# you can access soup.historical_particles[particle_uid].states[time_step]['loss']
|
||||
# or soup.historical_particles[particle_uid].states[time_step]['weights']
|
||||
# from soup.dill
|
||||
exp.save(soup=soup.without_particles())
|
||||
with SoupExperiment(namne="soup") as exp:
|
||||
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \
|
||||
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
|
||||
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))\
|
||||
# .with_keras_params(activation='linear')
|
||||
# net_generator = lambda: TrainingNeuralNetworkDecorator(FFTNeuralNetwork(4, 2, 2))\
|
||||
# .with_keras_params(activation='linear')
|
||||
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||
soup = Soup(20, net_generator).with_params(remove_divergent=True, remove_zero=True,
|
||||
train=30,
|
||||
learn_from_rate=-1)
|
||||
soup.seed()
|
||||
for _ in tqdm(range(100)):
|
||||
soup.evolve()
|
||||
exp.log(soup.count())
|
||||
# you can access soup.historical_particles[particle_uid].states[time_step]['loss']
|
||||
# or soup.historical_particles[particle_uid].states[time_step]['weights']
|
||||
# from soup.dill
|
||||
exp.save(soup=soup.without_particles())
|
||||
|
||||
Reference in New Issue
Block a user