Refactor:
Step 6: Experiments
This commit is contained in:
parent
4a81279b58
commit
5dfbfcaa20
@ -9,6 +9,10 @@ from tensorflow.python.keras import backend as K
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class IllegalArgumentError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class Experiment(ABC):
|
||||
|
||||
@staticmethod
|
||||
@ -69,23 +73,22 @@ class Experiment(ABC):
|
||||
raise NotImplementedError
|
||||
pass
|
||||
|
||||
def run_exp(self, network_generator, exp_iterations, prints=False, **kwargs):
|
||||
def run_exp(self, network_generator, exp_iterations, step_limit=100, prints=False, reset_model=False):
|
||||
# INFO Run_ID needs to be more than 0, so that exp stores the trajectories!
|
||||
for run_id in range(exp_iterations):
|
||||
network = network_generator()
|
||||
self.run_net(network, 100, run_id=run_id + 1, **kwargs)
|
||||
self.run_net(network, step_limit, run_id=run_id + 1)
|
||||
self.historical_particles[run_id] = network
|
||||
if prints:
|
||||
print("Fixpoint? " + str(network.is_fixpoint()))
|
||||
self.reset_model()
|
||||
if reset_model:
|
||||
self.reset_model()
|
||||
|
||||
def reset_all(self):
|
||||
self.reset_model()
|
||||
|
||||
|
||||
class FixpointExperiment(Experiment):
|
||||
if kwargs.get('logging', False):
|
||||
self.log(self.counters)
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
|
||||
@ -93,11 +96,20 @@ class FixpointExperiment(Experiment):
|
||||
self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
|
||||
self.interesting_fixpoints = []
|
||||
|
||||
def run_exp(self, network_generator, exp_iterations, logging=True, **kwargs):
|
||||
kwargs.update(reset_model=False)
|
||||
super(FixpointExperiment, self).run_exp(network_generator, exp_iterations, **kwargs)
|
||||
if logging:
|
||||
self.log(self.counters)
|
||||
self.reset_model()
|
||||
|
||||
def run_net(self, net, step_limit=100, run_id=0, **kwargs):
|
||||
i = 0
|
||||
while i < step_limit and not net.is_diverged() and not net.is_fixpoint():
|
||||
if len(kwargs):
|
||||
raise IllegalArgumentError
|
||||
for i in range(step_limit):
|
||||
if net.is_diverged() or net.is_fixpoint():
|
||||
break
|
||||
net.self_attack()
|
||||
i += 1
|
||||
if run_id:
|
||||
net.save_state(time=i)
|
||||
self.count(net)
|
||||
@ -128,14 +140,17 @@ class FixpointExperiment(Experiment):
|
||||
|
||||
class MixedFixpointExperiment(FixpointExperiment):
|
||||
|
||||
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0, **kwargs):
|
||||
def __init__(self, **kwargs):
|
||||
super(MixedFixpointExperiment, self).__init__(name=kwargs.get('name', self.__class__.__name__))
|
||||
|
||||
def run_net(self, net, step_limit=100, run_id=0, **kwargs):
|
||||
for i in range(step_limit):
|
||||
if net.is_diverged() or net.is_fixpoint():
|
||||
break
|
||||
net.self_attack()
|
||||
with tqdm(postfix=["Loss", dict(value=0)]) as bar:
|
||||
for _ in range(trains_per_application):
|
||||
loss = net.compiled().train()
|
||||
for _ in range(kwargs.get('trains_per_application', 100)):
|
||||
loss = net.train()
|
||||
bar.postfix[1]["value"] = loss
|
||||
bar.update()
|
||||
if run_id:
|
||||
@ -164,8 +179,8 @@ class SoupExperiment(Experiment):
|
||||
|
||||
class IdentLearningExperiment(Experiment):
|
||||
|
||||
def __init__(self):
|
||||
super(IdentLearningExperiment, self).__init__(name=self.__class__.__name__)
|
||||
def __init__(self, **kwargs):
|
||||
super(IdentLearningExperiment, self).__init__(name=kwargs.get('name', self.__class__.__name__))
|
||||
|
||||
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0, **kwargs):
|
||||
pass
|
||||
|
@ -137,6 +137,7 @@ class NeuralNetwork(ABC):
|
||||
super().__init__()
|
||||
self.params = dict(epsilon=0.00000000000001)
|
||||
self.params.update(params)
|
||||
self.name = params.get('name', self.__class__.__name__)
|
||||
self.keras_params = dict(activation='linear', use_bias=False)
|
||||
self.states = []
|
||||
self.model: Sequential
|
||||
@ -532,38 +533,40 @@ if __name__ == '__main__':
|
||||
|
||||
if True:
|
||||
# WeightWise Neural Network
|
||||
net_generator = ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='linear'))
|
||||
net_generator = lambda : ParticleDecorator(
|
||||
WeightwiseNeuralNetwork(width=2, depth=2
|
||||
).with_keras_params(activation='linear'))
|
||||
with FixpointExperiment() as exp:
|
||||
exp.run_exp(net_generator, 10, logging=True)
|
||||
exp.reset_all()
|
||||
|
||||
if False:
|
||||
if True:
|
||||
# Aggregating Neural Network
|
||||
net_generator = ParticleDecorator(AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params())
|
||||
net_generator = lambda :ParticleDecorator(
|
||||
AggregatingNeuralNetwork(aggregates=4, width=2, depth=2
|
||||
).with_keras_params())
|
||||
with FixpointExperiment() as exp:
|
||||
exp.run_exp(net_generator, 10, logging=True)
|
||||
|
||||
exp.reset_all()
|
||||
|
||||
if False:
|
||||
if True:
|
||||
# FFT Aggregation
|
||||
net_generator = lambda: ParticleDecorator(
|
||||
AggregatingNeuralNetwork(
|
||||
aggregates=4, width=2, depth=2, aggregator=AggregatingNeuralNetwork.aggregate_fft
|
||||
).with_keras_params(activation='linear'))
|
||||
with FixpointExperiment() as exp:
|
||||
for run_id in tqdm(range(10)):
|
||||
exp.run_exp(net_generator, 1)
|
||||
exp.log(exp.counters)
|
||||
exp.reset_model()
|
||||
exp.run_exp(net_generator, 10)
|
||||
exp.log(exp.counters)
|
||||
exp.reset_model()
|
||||
exp.reset_all()
|
||||
|
||||
if True:
|
||||
# ok so this works quite realiably
|
||||
run_count = 10000
|
||||
net_generator = TrainingNeuralNetworkDecorator(
|
||||
ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2))
|
||||
).with_params(epsilon=0.0001).with_keras_params(optimizer='sgd')
|
||||
net_generator = lambda : TrainingNeuralNetworkDecorator(
|
||||
ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2)
|
||||
)).with_params(epsilon=0.0001).with_keras_params(optimizer='sgd')
|
||||
with MixedFixpointExperiment() as exp:
|
||||
for run_id in tqdm(range(run_count+1)):
|
||||
exp.run_exp(net_generator, 1)
|
||||
|
@ -7,12 +7,13 @@ sys.path += os.path.join('..', '.')
|
||||
from experiment import *
|
||||
from network import *
|
||||
|
||||
import keras.backend as K
|
||||
|
||||
def generate_counters():
|
||||
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
|
||||
|
||||
def count(counters, net, notable_nets=[]):
|
||||
|
||||
def count(counters, net, notable_nets: list=None):
|
||||
notable_nets = notable_nets or list()
|
||||
if net.is_diverged():
|
||||
counters['divergent'] += 1
|
||||
elif net.is_fixpoint():
|
||||
@ -31,7 +32,7 @@ def count(counters, net, notable_nets=[]):
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
with Experiment('applying_fixpoint') as exp:
|
||||
with FixpointExperiment(name='applying_fixpoint') as exp:
|
||||
exp.trials = 50
|
||||
exp.run_count = 100
|
||||
exp.epsilon = 1e-4
|
||||
@ -40,7 +41,7 @@ if __name__ == '__main__':
|
||||
for use_bias in [False]:
|
||||
net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
# net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||
all_counters = []
|
||||
all_notable_nets = []
|
||||
all_names = []
|
||||
@ -50,14 +51,14 @@ if __name__ == '__main__':
|
||||
for _ in tqdm(range(exp.trials)):
|
||||
net = ParticleDecorator(net_generator())
|
||||
net.with_params(epsilon=exp.epsilon)
|
||||
name = str(net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias'))
|
||||
name = str(net.name) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias'))
|
||||
for run_id in range(exp.run_count):
|
||||
loss = net.self_attack()
|
||||
count(counters, net, notable_nets)
|
||||
all_counters += [counters]
|
||||
all_notable_nets += [notable_nets]
|
||||
all_names += [name]
|
||||
K.clear_session()
|
||||
exp.reset_model()
|
||||
exp.save(all_counters=all_counters)
|
||||
exp.save(trajectorys=exp.without_particles())
|
||||
# net types reached in the end
|
||||
|
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
@ -1,4 +0,0 @@
|
||||
TrainingNeuralNetworkDecorator activiation='linear' use_bias=False
|
||||
{'xs': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'ys': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'zs': [0.0, 1.2, 5.2, 7.4, 8.1, 9.1, 9.6, 9.8, 10.0, 9.9, 9.9]}
|
||||
|
||||
|
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
Before Width: | Height: | Size: 207 KiB |
@ -11,7 +11,7 @@ from network import *
|
||||
from soup import *
|
||||
|
||||
|
||||
import tensorflow.python.keras.backend as K
|
||||
from tensorflow.python.keras import backend as K
|
||||
|
||||
from statistics import mean
|
||||
avg = mean
|
||||
@ -59,7 +59,7 @@ def count(counters, soup, notable_nets=None):
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
with SoupExperiment('learn-from-soup') as exp:
|
||||
with SoupExperiment(name='learn-from-soup') as exp:
|
||||
exp.soup_size = 10
|
||||
exp.soup_life = 100
|
||||
exp.trials = 10
|
||||
@ -83,10 +83,10 @@ if __name__ == '__main__':
|
||||
counters = generate_counters()
|
||||
results = []
|
||||
for _ in tqdm(range(exp.trials)):
|
||||
soup = Soup(exp.soup_size, lambda net_generator=net_generator,exp=exp: TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon))
|
||||
soup = Soup(exp.soup_size, lambda net_generator=net_generator, exp=exp: TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon))
|
||||
soup.with_params(attacking_rate=-1, learn_from_rate=0.1, train=0, learn_from_severity=learn_from_severity)
|
||||
soup.seed()
|
||||
name = str(soup.particles[0].net.__class__.__name__) + " activiation='" + str(soup.particles[0].get_keras_params().get('activation')) + "' use_bias=" + str(soup.particles[0].get_keras_params().get('use_bias'))
|
||||
name = str(soup.particles[0].name) + " activiation='" + str(soup.particles[0].get_keras_params().get('activation')) + "' use_bias=" + str(soup.particles[0].get_keras_params().get('use_bias'))
|
||||
for time in range(exp.soup_life):
|
||||
soup.evolve()
|
||||
count(counters, soup, notable_nets)
|
||||
|
@ -9,8 +9,6 @@ sys.path += os.path.join('..', '.')
|
||||
from experiment import *
|
||||
from network import *
|
||||
|
||||
import tensorflow.python.keras.backend as K
|
||||
|
||||
|
||||
def generate_counters():
|
||||
"""
|
||||
@ -84,7 +82,7 @@ if __name__ == '__main__':
|
||||
if net.is_diverged() or net.is_fixpoint():
|
||||
break
|
||||
count(counters, net, notable_nets)
|
||||
keras.backend.clear_session()
|
||||
exp.reset_model()
|
||||
xs += [trains_per_selfattack]
|
||||
ys += [float(counters['fix_zero'] + counters['fix_other']) / float(exp.trials)]
|
||||
all_names += [name]
|
||||
|
@ -10,23 +10,22 @@ from experiment import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
if True:
|
||||
with SoupExperiment("soup") as exp:
|
||||
for run_id in range(1):
|
||||
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \
|
||||
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
|
||||
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))\
|
||||
# .with_keras_params(activation='linear')
|
||||
# net_generator = lambda: TrainingNeuralNetworkDecorator(FFTNeuralNetwork(4, 2, 2))\
|
||||
# .with_keras_params(activation='linear')
|
||||
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||
soup = Soup(20, net_generator).with_params(remove_divergent=True, remove_zero=True,
|
||||
train=30,
|
||||
learn_from_rate=-1)
|
||||
soup.seed()
|
||||
for _ in tqdm(range(100)):
|
||||
soup.evolve()
|
||||
exp.log(soup.count())
|
||||
# you can access soup.historical_particles[particle_uid].states[time_step]['loss']
|
||||
# or soup.historical_particles[particle_uid].states[time_step]['weights']
|
||||
# from soup.dill
|
||||
exp.save(soup=soup.without_particles())
|
||||
with SoupExperiment(namne="soup") as exp:
|
||||
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \
|
||||
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
|
||||
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))\
|
||||
# .with_keras_params(activation='linear')
|
||||
# net_generator = lambda: TrainingNeuralNetworkDecorator(FFTNeuralNetwork(4, 2, 2))\
|
||||
# .with_keras_params(activation='linear')
|
||||
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||
soup = Soup(20, net_generator).with_params(remove_divergent=True, remove_zero=True,
|
||||
train=30,
|
||||
learn_from_rate=-1)
|
||||
soup.seed()
|
||||
for _ in tqdm(range(100)):
|
||||
soup.evolve()
|
||||
exp.log(soup.count())
|
||||
# you can access soup.historical_particles[particle_uid].states[time_step]['loss']
|
||||
# or soup.historical_particles[particle_uid].states[time_step]['weights']
|
||||
# from soup.dill
|
||||
exp.save(soup=soup.without_particles())
|
||||
|
@ -111,9 +111,9 @@ class Soup(object):
|
||||
if __name__ == '__main__':
|
||||
if True:
|
||||
net_generator = lambda: WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||
soup_generator = Soup(100, net_generator).with_params(remove_divergent=True, remove_zero=True)
|
||||
soup_generator = Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True)
|
||||
exp = SoupExperiment()
|
||||
exp.run_exp(net_generator, 1000, soup_generator, 1, False)
|
||||
exp.run_exp(net_generator, 10, soup_generator, 1, False)
|
||||
|
||||
# net_generator = lambda: FFTNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
|
||||
@ -123,10 +123,10 @@ if __name__ == '__main__':
|
||||
if True:
|
||||
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \
|
||||
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
|
||||
soup_generator = lambda: Soup(100, net_generator).with_params(remove_divergent=True, remove_zero=True, train=20)
|
||||
soup_generator = lambda: Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True, train=20)
|
||||
exp = SoupExperiment(name="soup")
|
||||
|
||||
exp.run_exp(net_generator, 100, soup_generator, 1, False)
|
||||
exp.run_exp(net_generator, 10, soup_generator, 1, False)
|
||||
|
||||
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))
|
||||
# .with_keras_params(activation='linear')\
|
||||
|
Loading…
x
Reference in New Issue
Block a user