added two more experiments, now we possibly have all of them
This commit is contained in:
parent
0bcc25121f
commit
5c7a646d69
@ -609,11 +609,11 @@ class TrainingNeuralNetworkDecorator():
|
|||||||
def train(self, batchsize=1, store_states=True, epoch=0):
|
def train(self, batchsize=1, store_states=True, epoch=0):
|
||||||
self.compiled()
|
self.compiled()
|
||||||
x, y = self.net.compute_samples()
|
x, y = self.net.compute_samples()
|
||||||
savestatecallback = SaveStateCallback(net=self.net, epoch=epoch) if store_states else None
|
savestatecallback = SaveStateCallback(net=self, epoch=epoch) if store_states else None
|
||||||
history = self.net.model.fit(x=x, y=y, verbose=0, batch_size=batchsize, callbacks=[savestatecallback], initial_epoch=epoch)
|
history = self.net.model.fit(x=x, y=y, verbose=0, batch_size=batchsize, callbacks=[savestatecallback] if store_states else None, initial_epoch=epoch)
|
||||||
return history.history['loss'][-1]
|
return history.history['loss'][-1]
|
||||||
|
|
||||||
def train_other(self, other_network, batchsize=1):
|
def learn_from(self, other_network, batchsize=1):
|
||||||
self.compiled()
|
self.compiled()
|
||||||
other_network.compiled()
|
other_network.compiled()
|
||||||
x, y = other_network.net.compute_samples()
|
x, y = other_network.net.compute_samples()
|
||||||
|
@ -7,6 +7,8 @@ from util import *
|
|||||||
from experiment import *
|
from experiment import *
|
||||||
from network import *
|
from network import *
|
||||||
|
|
||||||
|
import keras.backend
|
||||||
|
|
||||||
def generate_counters():
|
def generate_counters():
|
||||||
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
|
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
|
||||||
|
|
||||||
@ -44,7 +46,7 @@ with Experiment('fixpoint-density') as exp:
|
|||||||
net = net_generator().with_params(epsilon=exp.epsilon)
|
net = net_generator().with_params(epsilon=exp.epsilon)
|
||||||
name = str(net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias='" + str(net.get_keras_params().get('use_bias')) + "'"
|
name = str(net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias='" + str(net.get_keras_params().get('use_bias')) + "'"
|
||||||
count(counters, net, notable_nets)
|
count(counters, net, notable_nets)
|
||||||
K.clear_session()
|
keras.backend.clear_session()
|
||||||
all_counters += [counters]
|
all_counters += [counters]
|
||||||
all_notable_nets += [notable_nets]
|
all_notable_nets += [notable_nets]
|
||||||
all_names += [name]
|
all_names += [name]
|
||||||
|
@ -11,6 +11,9 @@ from experiment import *
|
|||||||
from network import *
|
from network import *
|
||||||
from soup import prng
|
from soup import prng
|
||||||
|
|
||||||
|
import keras.backend
|
||||||
|
|
||||||
|
|
||||||
from statistics import mean
|
from statistics import mean
|
||||||
avg = mean
|
avg = mean
|
||||||
|
|
||||||
@ -70,8 +73,9 @@ with Experiment('known-fixpoint-variation') as exp:
|
|||||||
still_fixpoint = False
|
still_fixpoint = False
|
||||||
time_to_something += 1
|
time_to_something += 1
|
||||||
exp.xs += [current_scale]
|
exp.xs += [current_scale]
|
||||||
exp.ys += [time_to_something]
|
exp.ys += [time_to_something] #time steps taken to reach divergence or zero (reaching another fix-point is basically never happening)
|
||||||
exp.zs += [time_as_fixpoint]
|
exp.zs += [time_as_fixpoint] #time steps still regarded as sthe initial fix-point
|
||||||
|
keras.backend.clear_session()
|
||||||
current_scale /= 10.0
|
current_scale /= 10.0
|
||||||
for d in range(exp.depth):
|
for d in range(exp.depth):
|
||||||
exp.log('variation 10e-' + str(d))
|
exp.log('variation 10e-' + str(d))
|
||||||
|
105
code/setups/learn_from_soup.py
Normal file
105
code/setups/learn_from_soup.py
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
sys.path += os.path.join('..', '.')
|
||||||
|
|
||||||
|
from typing import Tuple
|
||||||
|
|
||||||
|
from util import *
|
||||||
|
from experiment import *
|
||||||
|
from network import *
|
||||||
|
from soup import *
|
||||||
|
|
||||||
|
|
||||||
|
import keras.backend
|
||||||
|
|
||||||
|
from statistics import mean
|
||||||
|
avg = mean
|
||||||
|
|
||||||
|
# Concat top Level dir to system environmental variables
|
||||||
|
sys.path += os.path.join('..', '.')
|
||||||
|
|
||||||
|
|
||||||
|
def generate_counters():
|
||||||
|
"""
|
||||||
|
Initial build of the counter dict, to store counts.
|
||||||
|
|
||||||
|
:rtype: dict
|
||||||
|
:return: dictionary holding counter for: 'divergent', 'fix_zero', 'fix_sec', 'other'
|
||||||
|
"""
|
||||||
|
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
|
||||||
|
|
||||||
|
|
||||||
|
def count(counters, soup, notable_nets=[]):
|
||||||
|
"""
|
||||||
|
Count the occurences ot the types of weight trajectories.
|
||||||
|
|
||||||
|
:param counters: A counter dictionary.
|
||||||
|
:param soup: A Soup
|
||||||
|
:param notable_nets: A list to store and save intersting candidates
|
||||||
|
|
||||||
|
:rtype Tuple[dict, list]
|
||||||
|
:return: Both the counter dictionary and the list of interessting nets.
|
||||||
|
"""
|
||||||
|
|
||||||
|
for net in soup.particles:
|
||||||
|
if net.is_diverged():
|
||||||
|
counters['divergent'] += 1
|
||||||
|
elif net.is_fixpoint():
|
||||||
|
if net.is_zero():
|
||||||
|
counters['fix_zero'] += 1
|
||||||
|
else:
|
||||||
|
counters['fix_other'] += 1
|
||||||
|
# notable_nets += [net]
|
||||||
|
# elif net.is_fixpoint(2):
|
||||||
|
# counters['fix_sec'] += 1
|
||||||
|
# notable_nets += [net]
|
||||||
|
else:
|
||||||
|
counters['other'] += 1
|
||||||
|
return counters, notable_nets
|
||||||
|
|
||||||
|
|
||||||
|
with SoupExperiment('learn-from-soup') as exp:
|
||||||
|
exp.soup_size = 10
|
||||||
|
exp.soup_life = 100
|
||||||
|
exp.trials = 10
|
||||||
|
exp.learn_from_severity_values = [10 * i for i in range(11)]
|
||||||
|
exp.epsilon = 1e-4
|
||||||
|
net_generators = []
|
||||||
|
for activation in ['sigmoid']: #['linear', 'sigmoid', 'relu']:
|
||||||
|
for use_bias in [False]:
|
||||||
|
net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||||
|
# net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||||
|
# net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||||
|
|
||||||
|
all_names = []
|
||||||
|
all_data = []
|
||||||
|
for net_generator_id, net_generator in enumerate(net_generators):
|
||||||
|
xs = []
|
||||||
|
ys = []
|
||||||
|
zs = []
|
||||||
|
notable_nets = []
|
||||||
|
for learn_from_severity in exp.learn_from_severity_values:
|
||||||
|
counters = generate_counters()
|
||||||
|
results = []
|
||||||
|
for _ in tqdm(range(exp.trials)):
|
||||||
|
soup = Soup(exp.soup_size, lambda net_generator=net_generator,exp=exp: TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon))
|
||||||
|
soup.with_params(attacking_rate=-1, learn_from_rate=0.1, train=0, learn_from_severity=learn_from_severity)
|
||||||
|
soup.seed()
|
||||||
|
name = str(soup.particles[0].net.__class__.__name__) + " activiation='" + str(soup.particles[0].get_keras_params().get('activation')) + "' use_bias=" + str(soup.particles[0].get_keras_params().get('use_bias'))
|
||||||
|
for time in range(exp.soup_life):
|
||||||
|
soup.evolve()
|
||||||
|
count(counters, soup, notable_nets)
|
||||||
|
keras.backend.clear_session()
|
||||||
|
xs += [learn_from_severity]
|
||||||
|
ys += [float(counters['fix_zero']) / float(exp.trials)]
|
||||||
|
zs += [float(counters['fix_other']) / float(exp.trials)]
|
||||||
|
all_names += [name]
|
||||||
|
all_data += [{'xs':xs, 'ys':ys, 'zs':zs}] #xs: learn_from_intensity according to exp.learn_from_intensity_values, ys: zero-fixpoints after life time, zs: non-zero-fixpoints after life time
|
||||||
|
|
||||||
|
exp.save(all_names=all_names)
|
||||||
|
exp.save(all_data=all_data)
|
||||||
|
for exp_id, name in enumerate(all_names):
|
||||||
|
exp.log(all_names[exp_id])
|
||||||
|
exp.log(all_data[exp_id])
|
||||||
|
exp.log('\n')
|
@ -52,7 +52,7 @@ def count(counters, net, notable_nets=[]):
|
|||||||
return counters, notable_nets
|
return counters, notable_nets
|
||||||
|
|
||||||
|
|
||||||
with Experiment('training_fixpoint') as exp:
|
with Experiment('mixed-self-fixpoints') as exp:
|
||||||
exp.trials = 20
|
exp.trials = 20
|
||||||
exp.selfattacks = 4
|
exp.selfattacks = 4
|
||||||
exp.trains_per_selfattack_values = [100 * i for i in range(11)]
|
exp.trains_per_selfattack_values = [100 * i for i in range(11)]
|
||||||
@ -86,7 +86,7 @@ with Experiment('training_fixpoint') as exp:
|
|||||||
xs += [trains_per_selfattack]
|
xs += [trains_per_selfattack]
|
||||||
ys += [float(counters['fix_zero'] + counters['fix_other']) / float(exp.trials)]
|
ys += [float(counters['fix_zero'] + counters['fix_other']) / float(exp.trials)]
|
||||||
all_names += [name]
|
all_names += [name]
|
||||||
all_data += [{'xs':xs, 'ys':ys}]
|
all_data += [{'xs':xs, 'ys':ys}] #xs: how many trains per self-attack from exp.trains_per_selfattack_values, ys: average amount of fixpoints found
|
||||||
|
|
||||||
exp.save(all_names=all_names)
|
exp.save(all_names=all_names)
|
||||||
exp.save(all_data=all_data)
|
exp.save(all_data=all_data)
|
||||||
|
101
code/setups/mixed-soup.py
Normal file
101
code/setups/mixed-soup.py
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
sys.path += os.path.join('..', '.')
|
||||||
|
|
||||||
|
from typing import Tuple
|
||||||
|
|
||||||
|
from util import *
|
||||||
|
from experiment import *
|
||||||
|
from network import *
|
||||||
|
from soup import *
|
||||||
|
|
||||||
|
import keras.backend
|
||||||
|
|
||||||
|
|
||||||
|
# Concat top Level dir to system environmental variables
|
||||||
|
sys.path += os.path.join('..', '.')
|
||||||
|
|
||||||
|
|
||||||
|
def generate_counters():
|
||||||
|
"""
|
||||||
|
Initial build of the counter dict, to store counts.
|
||||||
|
|
||||||
|
:rtype: dict
|
||||||
|
:return: dictionary holding counter for: 'divergent', 'fix_zero', 'fix_sec', 'other'
|
||||||
|
"""
|
||||||
|
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
|
||||||
|
|
||||||
|
|
||||||
|
def count(counters, soup, notable_nets=[]):
|
||||||
|
"""
|
||||||
|
Count the occurences ot the types of weight trajectories.
|
||||||
|
|
||||||
|
:param counters: A counter dictionary.
|
||||||
|
:param soup: A Soup
|
||||||
|
:param notable_nets: A list to store and save intersting candidates
|
||||||
|
|
||||||
|
:rtype Tuple[dict, list]
|
||||||
|
:return: Both the counter dictionary and the list of interessting nets.
|
||||||
|
"""
|
||||||
|
|
||||||
|
for net in soup.particles:
|
||||||
|
if net.is_diverged():
|
||||||
|
counters['divergent'] += 1
|
||||||
|
elif net.is_fixpoint():
|
||||||
|
if net.is_zero():
|
||||||
|
counters['fix_zero'] += 1
|
||||||
|
else:
|
||||||
|
counters['fix_other'] += 1
|
||||||
|
# notable_nets += [net]
|
||||||
|
# elif net.is_fixpoint(2):
|
||||||
|
# counters['fix_sec'] += 1
|
||||||
|
# notable_nets += [net]
|
||||||
|
else:
|
||||||
|
counters['other'] += 1
|
||||||
|
return counters, notable_nets
|
||||||
|
|
||||||
|
|
||||||
|
with Experiment('mixed-self-fixpoints') as exp:
|
||||||
|
exp.trials = 10
|
||||||
|
exp.soup_size = 10
|
||||||
|
exp.soup_life = 5
|
||||||
|
exp.trains_per_selfattack_values = [10 * i for i in range(11)]
|
||||||
|
exp.epsilon = 1e-4
|
||||||
|
net_generators = []
|
||||||
|
for activation in ['linear']: #['linear', 'sigmoid', 'relu']:
|
||||||
|
for use_bias in [False]:
|
||||||
|
net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||||
|
# net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||||
|
# net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
|
||||||
|
|
||||||
|
all_names = []
|
||||||
|
all_data = []
|
||||||
|
for net_generator_id, net_generator in enumerate(net_generators):
|
||||||
|
xs = []
|
||||||
|
ys = []
|
||||||
|
zs = []
|
||||||
|
for trains_per_selfattack in exp.trains_per_selfattack_values:
|
||||||
|
counters = generate_counters()
|
||||||
|
notable_nets = []
|
||||||
|
for _ in tqdm(range(exp.trials)):
|
||||||
|
soup = Soup(exp.soup_size, lambda net_generator=net_generator,exp=exp: TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon))
|
||||||
|
soup.with_params(attacking_rate=0.1, learn_from_rate=-1, train=trains_per_selfattack, learn_from_severity=-1)
|
||||||
|
soup.seed()
|
||||||
|
name = str(soup.particles[0].net.__class__.__name__) + " activiation='" + str(soup.particles[0].get_keras_params().get('activation')) + "' use_bias=" + str(soup.particles[0].get_keras_params().get('use_bias'))
|
||||||
|
for _ in range(exp.soup_life):
|
||||||
|
soup.evolve()
|
||||||
|
count(counters, soup, notable_nets)
|
||||||
|
keras.backend.clear_session()
|
||||||
|
xs += [trains_per_selfattack]
|
||||||
|
ys += [float(counters['fix_zero']) / float(exp.trials)]
|
||||||
|
zs += [float(counters['fix_other']) / float(exp.trials)]
|
||||||
|
all_names += [name]
|
||||||
|
all_data += [{'xs':xs, 'ys':ys, 'zs':zs}] #xs: how many trains per self-attack from exp.trains_per_selfattack_values, ys: average amount of zero-fixpoints found, zs: average amount of non-zero fixpoints
|
||||||
|
|
||||||
|
exp.save(all_names=all_names)
|
||||||
|
exp.save(all_data=all_data)
|
||||||
|
for exp_id, name in enumerate(all_names):
|
||||||
|
exp.log(all_names[exp_id])
|
||||||
|
exp.log(all_data[exp_id])
|
||||||
|
exp.log('\n')
|
@ -8,6 +8,8 @@ from util import *
|
|||||||
from experiment import *
|
from experiment import *
|
||||||
from network import *
|
from network import *
|
||||||
|
|
||||||
|
import keras.backend
|
||||||
|
|
||||||
def generate_counters():
|
def generate_counters():
|
||||||
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
|
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
|
||||||
|
|
||||||
@ -49,12 +51,13 @@ with Experiment('training_fixpoint') as exp:
|
|||||||
for run_id in range(exp.run_count):
|
for run_id in range(exp.run_count):
|
||||||
loss = net.compiled().train(epoch=run_id+1)
|
loss = net.compiled().train(epoch=run_id+1)
|
||||||
count(counters, net, notable_nets)
|
count(counters, net, notable_nets)
|
||||||
|
keras.backend.clear_session()
|
||||||
all_counters += [counters]
|
all_counters += [counters]
|
||||||
all_notable_nets += [notable_nets]
|
all_notable_nets += [notable_nets]
|
||||||
all_names += [name]
|
all_names += [name]
|
||||||
exp.save(all_counters=all_counters)
|
exp.save(all_counters=all_counters) #net types reached in the end
|
||||||
exp.save(all_notable_nets=all_notable_nets)
|
exp.save(all_notable_nets=all_notable_nets)
|
||||||
exp.save(all_names=all_names)
|
exp.save(all_names=all_names) #experiment setups
|
||||||
for exp_id, counter in enumerate(all_counters):
|
for exp_id, counter in enumerate(all_counters):
|
||||||
exp.log(all_names[exp_id])
|
exp.log(all_names[exp_id])
|
||||||
exp.log(all_counters[exp_id])
|
exp.log(all_counters[exp_id])
|
||||||
|
12
code/soup.py
12
code/soup.py
@ -14,7 +14,7 @@ class Soup(object):
|
|||||||
self.generator = generator
|
self.generator = generator
|
||||||
self.particles = []
|
self.particles = []
|
||||||
self.historical_particles = {}
|
self.historical_particles = {}
|
||||||
self.params = dict(attacking_rate=0.1, train_other_rate=0.1, train=0)
|
self.params = dict(attacking_rate=0.1, learn_from_rate=0.1, train=0, learn_from_severity=1)
|
||||||
self.params.update(kwargs)
|
self.params.update(kwargs)
|
||||||
self.time = 0
|
self.time = 0
|
||||||
|
|
||||||
@ -59,14 +59,16 @@ class Soup(object):
|
|||||||
particle.attack(other_particle)
|
particle.attack(other_particle)
|
||||||
description['action'] = 'attacking'
|
description['action'] = 'attacking'
|
||||||
description['counterpart'] = other_particle.get_uid()
|
description['counterpart'] = other_particle.get_uid()
|
||||||
if prng() < self.params.get('train_other_rate') and hasattr(self, 'train_other'):
|
if prng() < self.params.get('learn_from_rate'):
|
||||||
other_particle_id = int(prng() * len(self.particles))
|
other_particle_id = int(prng() * len(self.particles))
|
||||||
other_particle = self.particles[other_particle_id]
|
other_particle = self.particles[other_particle_id]
|
||||||
particle.train_other(other_particle)
|
for _ in range(self.params.get('learn_from_severity', 1)):
|
||||||
description['action'] = 'train_other'
|
particle.learn_from(other_particle)
|
||||||
|
description['action'] = 'learn_from'
|
||||||
description['counterpart'] = other_particle.get_uid()
|
description['counterpart'] = other_particle.get_uid()
|
||||||
for _ in range(self.params.get('train', 0)):
|
for _ in range(self.params.get('train', 0)):
|
||||||
loss = particle.compiled().train()
|
particle.compiled()
|
||||||
|
loss = particle.train(store_states=False) #callbacks on save_state are broken for TrainingNeuralNetwork
|
||||||
description['fitted'] = self.params.get('train', 0)
|
description['fitted'] = self.params.get('train', 0)
|
||||||
description['loss'] = loss
|
description['loss'] = loss
|
||||||
description['action'] = 'train_self'
|
description['action'] = 'train_self'
|
||||||
|
Loading…
x
Reference in New Issue
Block a user