Refactor:
Step 6: Experiments
This commit is contained in:
@@ -9,6 +9,10 @@ from tensorflow.python.keras import backend as K
|
|||||||
from abc import ABC, abstractmethod
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
|
||||||
|
class IllegalArgumentError(ValueError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class Experiment(ABC):
|
class Experiment(ABC):
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -69,14 +73,15 @@ class Experiment(ABC):
|
|||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def run_exp(self, network_generator, exp_iterations, prints=False, **kwargs):
|
def run_exp(self, network_generator, exp_iterations, step_limit=100, prints=False, reset_model=False):
|
||||||
# INFO Run_ID needs to be more than 0, so that exp stores the trajectories!
|
# INFO Run_ID needs to be more than 0, so that exp stores the trajectories!
|
||||||
for run_id in range(exp_iterations):
|
for run_id in range(exp_iterations):
|
||||||
network = network_generator()
|
network = network_generator()
|
||||||
self.run_net(network, 100, run_id=run_id + 1, **kwargs)
|
self.run_net(network, step_limit, run_id=run_id + 1)
|
||||||
self.historical_particles[run_id] = network
|
self.historical_particles[run_id] = network
|
||||||
if prints:
|
if prints:
|
||||||
print("Fixpoint? " + str(network.is_fixpoint()))
|
print("Fixpoint? " + str(network.is_fixpoint()))
|
||||||
|
if reset_model:
|
||||||
self.reset_model()
|
self.reset_model()
|
||||||
|
|
||||||
def reset_all(self):
|
def reset_all(self):
|
||||||
@@ -84,8 +89,6 @@ class Experiment(ABC):
|
|||||||
|
|
||||||
|
|
||||||
class FixpointExperiment(Experiment):
|
class FixpointExperiment(Experiment):
|
||||||
if kwargs.get('logging', False):
|
|
||||||
self.log(self.counters)
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
|
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
|
||||||
@@ -93,11 +96,20 @@ class FixpointExperiment(Experiment):
|
|||||||
self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
|
self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
|
||||||
self.interesting_fixpoints = []
|
self.interesting_fixpoints = []
|
||||||
|
|
||||||
|
def run_exp(self, network_generator, exp_iterations, logging=True, **kwargs):
|
||||||
|
kwargs.update(reset_model=False)
|
||||||
|
super(FixpointExperiment, self).run_exp(network_generator, exp_iterations, **kwargs)
|
||||||
|
if logging:
|
||||||
|
self.log(self.counters)
|
||||||
|
self.reset_model()
|
||||||
|
|
||||||
def run_net(self, net, step_limit=100, run_id=0, **kwargs):
|
def run_net(self, net, step_limit=100, run_id=0, **kwargs):
|
||||||
i = 0
|
if len(kwargs):
|
||||||
while i < step_limit and not net.is_diverged() and not net.is_fixpoint():
|
raise IllegalArgumentError
|
||||||
|
for i in range(step_limit):
|
||||||
|
if net.is_diverged() or net.is_fixpoint():
|
||||||
|
break
|
||||||
net.self_attack()
|
net.self_attack()
|
||||||
i += 1
|
|
||||||
if run_id:
|
if run_id:
|
||||||
net.save_state(time=i)
|
net.save_state(time=i)
|
||||||
self.count(net)
|
self.count(net)
|
||||||
@@ -128,14 +140,17 @@ class FixpointExperiment(Experiment):
|
|||||||
|
|
||||||
class MixedFixpointExperiment(FixpointExperiment):
|
class MixedFixpointExperiment(FixpointExperiment):
|
||||||
|
|
||||||
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
|
super(MixedFixpointExperiment, self).__init__(name=kwargs.get('name', self.__class__.__name__))
|
||||||
|
|
||||||
|
def run_net(self, net, step_limit=100, run_id=0, **kwargs):
|
||||||
for i in range(step_limit):
|
for i in range(step_limit):
|
||||||
if net.is_diverged() or net.is_fixpoint():
|
if net.is_diverged() or net.is_fixpoint():
|
||||||
break
|
break
|
||||||
net.self_attack()
|
net.self_attack()
|
||||||
with tqdm(postfix=["Loss", dict(value=0)]) as bar:
|
with tqdm(postfix=["Loss", dict(value=0)]) as bar:
|
||||||
for _ in range(trains_per_application):
|
for _ in range(kwargs.get('trains_per_application', 100)):
|
||||||
loss = net.compiled().train()
|
loss = net.train()
|
||||||
bar.postfix[1]["value"] = loss
|
bar.postfix[1]["value"] = loss
|
||||||
bar.update()
|
bar.update()
|
||||||
if run_id:
|
if run_id:
|
||||||
@@ -164,8 +179,8 @@ class SoupExperiment(Experiment):
|
|||||||
|
|
||||||
class IdentLearningExperiment(Experiment):
|
class IdentLearningExperiment(Experiment):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self, **kwargs):
|
||||||
super(IdentLearningExperiment, self).__init__(name=self.__class__.__name__)
|
super(IdentLearningExperiment, self).__init__(name=kwargs.get('name', self.__class__.__name__))
|
||||||
|
|
||||||
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0, **kwargs):
|
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0, **kwargs):
|
||||||
pass
|
pass
|
||||||
|
|||||||
@@ -137,6 +137,7 @@ class NeuralNetwork(ABC):
|
|||||||
super().__init__()
|
super().__init__()
|
||||||
self.params = dict(epsilon=0.00000000000001)
|
self.params = dict(epsilon=0.00000000000001)
|
||||||
self.params.update(params)
|
self.params.update(params)
|
||||||
|
self.name = params.get('name', self.__class__.__name__)
|
||||||
self.keras_params = dict(activation='linear', use_bias=False)
|
self.keras_params = dict(activation='linear', use_bias=False)
|
||||||
self.states = []
|
self.states = []
|
||||||
self.model: Sequential
|
self.model: Sequential
|
||||||
@@ -532,28 +533,30 @@ if __name__ == '__main__':
|
|||||||
|
|
||||||
if True:
|
if True:
|
||||||
# WeightWise Neural Network
|
# WeightWise Neural Network
|
||||||
net_generator = ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='linear'))
|
net_generator = lambda : ParticleDecorator(
|
||||||
|
WeightwiseNeuralNetwork(width=2, depth=2
|
||||||
|
).with_keras_params(activation='linear'))
|
||||||
with FixpointExperiment() as exp:
|
with FixpointExperiment() as exp:
|
||||||
exp.run_exp(net_generator, 10, logging=True)
|
exp.run_exp(net_generator, 10, logging=True)
|
||||||
exp.reset_all()
|
exp.reset_all()
|
||||||
|
|
||||||
if False:
|
if True:
|
||||||
# Aggregating Neural Network
|
# Aggregating Neural Network
|
||||||
net_generator = ParticleDecorator(AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params())
|
net_generator = lambda :ParticleDecorator(
|
||||||
|
AggregatingNeuralNetwork(aggregates=4, width=2, depth=2
|
||||||
|
).with_keras_params())
|
||||||
with FixpointExperiment() as exp:
|
with FixpointExperiment() as exp:
|
||||||
exp.run_exp(net_generator, 10, logging=True)
|
exp.run_exp(net_generator, 10, logging=True)
|
||||||
|
|
||||||
exp.reset_all()
|
exp.reset_all()
|
||||||
|
|
||||||
if False:
|
if True:
|
||||||
# FFT Aggregation
|
# FFT Aggregation
|
||||||
net_generator = lambda: ParticleDecorator(
|
net_generator = lambda: ParticleDecorator(
|
||||||
AggregatingNeuralNetwork(
|
AggregatingNeuralNetwork(
|
||||||
aggregates=4, width=2, depth=2, aggregator=AggregatingNeuralNetwork.aggregate_fft
|
aggregates=4, width=2, depth=2, aggregator=AggregatingNeuralNetwork.aggregate_fft
|
||||||
).with_keras_params(activation='linear'))
|
).with_keras_params(activation='linear'))
|
||||||
with FixpointExperiment() as exp:
|
with FixpointExperiment() as exp:
|
||||||
for run_id in tqdm(range(10)):
|
exp.run_exp(net_generator, 10)
|
||||||
exp.run_exp(net_generator, 1)
|
|
||||||
exp.log(exp.counters)
|
exp.log(exp.counters)
|
||||||
exp.reset_model()
|
exp.reset_model()
|
||||||
exp.reset_all()
|
exp.reset_all()
|
||||||
@@ -561,9 +564,9 @@ if __name__ == '__main__':
|
|||||||
if True:
|
if True:
|
||||||
# ok so this works quite realiably
|
# ok so this works quite realiably
|
||||||
run_count = 10000
|
run_count = 10000
|
||||||
net_generator = TrainingNeuralNetworkDecorator(
|
net_generator = lambda : TrainingNeuralNetworkDecorator(
|
||||||
ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2))
|
ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2)
|
||||||
).with_params(epsilon=0.0001).with_keras_params(optimizer='sgd')
|
)).with_params(epsilon=0.0001).with_keras_params(optimizer='sgd')
|
||||||
with MixedFixpointExperiment() as exp:
|
with MixedFixpointExperiment() as exp:
|
||||||
for run_id in tqdm(range(run_count+1)):
|
for run_id in tqdm(range(run_count+1)):
|
||||||
exp.run_exp(net_generator, 1)
|
exp.run_exp(net_generator, 1)
|
||||||
|
|||||||
@@ -7,12 +7,13 @@ sys.path += os.path.join('..', '.')
|
|||||||
from experiment import *
|
from experiment import *
|
||||||
from network import *
|
from network import *
|
||||||
|
|
||||||
import keras.backend as K
|
|
||||||
|
|
||||||
def generate_counters():
|
def generate_counters():
|
||||||
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
|
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
|
||||||
|
|
||||||
def count(counters, net, notable_nets=[]):
|
|
||||||
|
def count(counters, net, notable_nets: list=None):
|
||||||
|
notable_nets = notable_nets or list()
|
||||||
if net.is_diverged():
|
if net.is_diverged():
|
||||||
counters['divergent'] += 1
|
counters['divergent'] += 1
|
||||||
elif net.is_fixpoint():
|
elif net.is_fixpoint():
|
||||||
@@ -31,7 +32,7 @@ def count(counters, net, notable_nets=[]):
|
|||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
||||||
with Experiment('applying_fixpoint') as exp:
|
with FixpointExperiment(name='applying_fixpoint') as exp:
|
||||||
exp.trials = 50
|
exp.trials = 50
|
||||||
exp.run_count = 100
|
exp.run_count = 100
|
||||||
exp.epsilon = 1e-4
|
exp.epsilon = 1e-4
|
||||||
@@ -40,7 +41,7 @@ if __name__ == '__main__':
|
|||||||
for use_bias in [False]:
|
for use_bias in [False]:
|
||||||
net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||||
net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||||
net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
# net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||||
all_counters = []
|
all_counters = []
|
||||||
all_notable_nets = []
|
all_notable_nets = []
|
||||||
all_names = []
|
all_names = []
|
||||||
@@ -50,14 +51,14 @@ if __name__ == '__main__':
|
|||||||
for _ in tqdm(range(exp.trials)):
|
for _ in tqdm(range(exp.trials)):
|
||||||
net = ParticleDecorator(net_generator())
|
net = ParticleDecorator(net_generator())
|
||||||
net.with_params(epsilon=exp.epsilon)
|
net.with_params(epsilon=exp.epsilon)
|
||||||
name = str(net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias'))
|
name = str(net.name) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias'))
|
||||||
for run_id in range(exp.run_count):
|
for run_id in range(exp.run_count):
|
||||||
loss = net.self_attack()
|
loss = net.self_attack()
|
||||||
count(counters, net, notable_nets)
|
count(counters, net, notable_nets)
|
||||||
all_counters += [counters]
|
all_counters += [counters]
|
||||||
all_notable_nets += [notable_nets]
|
all_notable_nets += [notable_nets]
|
||||||
all_names += [name]
|
all_names += [name]
|
||||||
K.clear_session()
|
exp.reset_model()
|
||||||
exp.save(all_counters=all_counters)
|
exp.save(all_counters=all_counters)
|
||||||
exp.save(trajectorys=exp.without_particles())
|
exp.save(trajectorys=exp.without_particles())
|
||||||
# net types reached in the end
|
# net types reached in the end
|
||||||
|
|||||||
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
@@ -1,4 +0,0 @@
|
|||||||
TrainingNeuralNetworkDecorator activiation='linear' use_bias=False
|
|
||||||
{'xs': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'ys': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'zs': [0.0, 1.2, 5.2, 7.4, 8.1, 9.1, 9.6, 9.8, 10.0, 9.9, 9.9]}
|
|
||||||
|
|
||||||
|
|
||||||
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
|
Before Width: | Height: | Size: 207 KiB |
@@ -11,7 +11,7 @@ from network import *
|
|||||||
from soup import *
|
from soup import *
|
||||||
|
|
||||||
|
|
||||||
import tensorflow.python.keras.backend as K
|
from tensorflow.python.keras import backend as K
|
||||||
|
|
||||||
from statistics import mean
|
from statistics import mean
|
||||||
avg = mean
|
avg = mean
|
||||||
@@ -59,7 +59,7 @@ def count(counters, soup, notable_nets=None):
|
|||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
||||||
with SoupExperiment('learn-from-soup') as exp:
|
with SoupExperiment(name='learn-from-soup') as exp:
|
||||||
exp.soup_size = 10
|
exp.soup_size = 10
|
||||||
exp.soup_life = 100
|
exp.soup_life = 100
|
||||||
exp.trials = 10
|
exp.trials = 10
|
||||||
@@ -83,10 +83,10 @@ if __name__ == '__main__':
|
|||||||
counters = generate_counters()
|
counters = generate_counters()
|
||||||
results = []
|
results = []
|
||||||
for _ in tqdm(range(exp.trials)):
|
for _ in tqdm(range(exp.trials)):
|
||||||
soup = Soup(exp.soup_size, lambda net_generator=net_generator,exp=exp: TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon))
|
soup = Soup(exp.soup_size, lambda net_generator=net_generator, exp=exp: TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon))
|
||||||
soup.with_params(attacking_rate=-1, learn_from_rate=0.1, train=0, learn_from_severity=learn_from_severity)
|
soup.with_params(attacking_rate=-1, learn_from_rate=0.1, train=0, learn_from_severity=learn_from_severity)
|
||||||
soup.seed()
|
soup.seed()
|
||||||
name = str(soup.particles[0].net.__class__.__name__) + " activiation='" + str(soup.particles[0].get_keras_params().get('activation')) + "' use_bias=" + str(soup.particles[0].get_keras_params().get('use_bias'))
|
name = str(soup.particles[0].name) + " activiation='" + str(soup.particles[0].get_keras_params().get('activation')) + "' use_bias=" + str(soup.particles[0].get_keras_params().get('use_bias'))
|
||||||
for time in range(exp.soup_life):
|
for time in range(exp.soup_life):
|
||||||
soup.evolve()
|
soup.evolve()
|
||||||
count(counters, soup, notable_nets)
|
count(counters, soup, notable_nets)
|
||||||
|
|||||||
@@ -9,8 +9,6 @@ sys.path += os.path.join('..', '.')
|
|||||||
from experiment import *
|
from experiment import *
|
||||||
from network import *
|
from network import *
|
||||||
|
|
||||||
import tensorflow.python.keras.backend as K
|
|
||||||
|
|
||||||
|
|
||||||
def generate_counters():
|
def generate_counters():
|
||||||
"""
|
"""
|
||||||
@@ -84,7 +82,7 @@ if __name__ == '__main__':
|
|||||||
if net.is_diverged() or net.is_fixpoint():
|
if net.is_diverged() or net.is_fixpoint():
|
||||||
break
|
break
|
||||||
count(counters, net, notable_nets)
|
count(counters, net, notable_nets)
|
||||||
keras.backend.clear_session()
|
exp.reset_model()
|
||||||
xs += [trains_per_selfattack]
|
xs += [trains_per_selfattack]
|
||||||
ys += [float(counters['fix_zero'] + counters['fix_other']) / float(exp.trials)]
|
ys += [float(counters['fix_zero'] + counters['fix_other']) / float(exp.trials)]
|
||||||
all_names += [name]
|
all_names += [name]
|
||||||
|
|||||||
@@ -10,8 +10,7 @@ from experiment import *
|
|||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
if True:
|
if True:
|
||||||
with SoupExperiment("soup") as exp:
|
with SoupExperiment(namne="soup") as exp:
|
||||||
for run_id in range(1):
|
|
||||||
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \
|
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \
|
||||||
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
|
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
|
||||||
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))\
|
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))\
|
||||||
|
|||||||
@@ -111,9 +111,9 @@ class Soup(object):
|
|||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
if True:
|
if True:
|
||||||
net_generator = lambda: WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
net_generator = lambda: WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||||
soup_generator = Soup(100, net_generator).with_params(remove_divergent=True, remove_zero=True)
|
soup_generator = Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True)
|
||||||
exp = SoupExperiment()
|
exp = SoupExperiment()
|
||||||
exp.run_exp(net_generator, 1000, soup_generator, 1, False)
|
exp.run_exp(net_generator, 10, soup_generator, 1, False)
|
||||||
|
|
||||||
# net_generator = lambda: FFTNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
# net_generator = lambda: FFTNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||||
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
|
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
|
||||||
@@ -123,10 +123,10 @@ if __name__ == '__main__':
|
|||||||
if True:
|
if True:
|
||||||
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \
|
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \
|
||||||
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
|
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
|
||||||
soup_generator = lambda: Soup(100, net_generator).with_params(remove_divergent=True, remove_zero=True, train=20)
|
soup_generator = lambda: Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True, train=20)
|
||||||
exp = SoupExperiment(name="soup")
|
exp = SoupExperiment(name="soup")
|
||||||
|
|
||||||
exp.run_exp(net_generator, 100, soup_generator, 1, False)
|
exp.run_exp(net_generator, 10, soup_generator, 1, False)
|
||||||
|
|
||||||
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))
|
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))
|
||||||
# .with_keras_params(activation='linear')\
|
# .with_keras_params(activation='linear')\
|
||||||
|
|||||||
Reference in New Issue
Block a user