latest
This commit is contained in:
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,12 @@
|
|||||||
|
ParticleDecorator activiation='linear' use_bias='False'
|
||||||
|
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100}
|
||||||
|
|
||||||
|
|
||||||
|
ParticleDecorator activiation='sigmoid' use_bias='False'
|
||||||
|
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100}
|
||||||
|
|
||||||
|
|
||||||
|
ParticleDecorator activiation='relu' use_bias='False'
|
||||||
|
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 100}
|
||||||
|
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,4 @@
|
|||||||
|
TrainingNeuralNetworkDecorator activiation='sigmoid' use_bias=False
|
||||||
|
{'xs': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'ys': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'zs': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]}
|
||||||
|
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,4 @@
|
|||||||
|
ParticleDecorator activiation='linear' use_bias=False
|
||||||
|
{'xs': [0, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000], 'ys': [0.9, 0.95, 1.0, 0.95, 0.9, 0.95, 0.85, 0.8, 0.85, 0.85, 0.75]}
|
||||||
|
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,4 @@
|
|||||||
|
TrainingNeuralNetworkDecorator activiation='linear' use_bias=False
|
||||||
|
{'xs': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'ys': [0.4, 0.2, 0.3, 0.2, 0.3, 0.3, 0.5, 0.3, 0.9, 0.6, 0.2], 'zs': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]}
|
||||||
|
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,4 @@
|
|||||||
|
ParticleDecorator activiation='linear' use_bias=False
|
||||||
|
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 20}
|
||||||
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
|||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
# Concat top Level dir to system environmental variables
|
||||||
sys.path += os.path.join('..', '.')
|
sys.path += os.path.join('..', '.')
|
||||||
|
|
||||||
from typing import Tuple
|
from typing import Tuple
|
||||||
@ -16,9 +17,6 @@ import keras.backend
|
|||||||
from statistics import mean
|
from statistics import mean
|
||||||
avg = mean
|
avg = mean
|
||||||
|
|
||||||
# Concat top Level dir to system environmental variables
|
|
||||||
sys.path += os.path.join('..', '.')
|
|
||||||
|
|
||||||
|
|
||||||
def generate_counters():
|
def generate_counters():
|
||||||
"""
|
"""
|
||||||
@ -91,6 +89,7 @@ with SoupExperiment('learn-from-soup') as exp:
|
|||||||
soup.evolve()
|
soup.evolve()
|
||||||
count(counters, soup, notable_nets)
|
count(counters, soup, notable_nets)
|
||||||
keras.backend.clear_session()
|
keras.backend.clear_session()
|
||||||
|
|
||||||
xs += [learn_from_severity]
|
xs += [learn_from_severity]
|
||||||
ys += [float(counters['fix_zero']) / float(exp.trials)]
|
ys += [float(counters['fix_zero']) / float(exp.trials)]
|
||||||
zs += [float(counters['fix_other']) / float(exp.trials)]
|
zs += [float(counters['fix_other']) / float(exp.trials)]
|
||||||
@ -102,6 +101,7 @@ with SoupExperiment('learn-from-soup') as exp:
|
|||||||
|
|
||||||
exp.save(all_names=all_names)
|
exp.save(all_names=all_names)
|
||||||
exp.save(all_data=all_data)
|
exp.save(all_data=all_data)
|
||||||
|
exp.save(soup=soup.without_particles())
|
||||||
for exp_id, name in enumerate(all_names):
|
for exp_id, name in enumerate(all_names):
|
||||||
exp.log(all_names[exp_id])
|
exp.log(all_names[exp_id])
|
||||||
exp.log(all_data[exp_id])
|
exp.log(all_data[exp_id])
|
||||||
|
@ -75,7 +75,7 @@ with Experiment('mixed-self-fixpoints') as exp:
|
|||||||
for trains_per_selfattack in exp.trains_per_selfattack_values:
|
for trains_per_selfattack in exp.trains_per_selfattack_values:
|
||||||
counters = generate_counters()
|
counters = generate_counters()
|
||||||
notable_nets = []
|
notable_nets = []
|
||||||
for _ in tqdm(range(exp.trials)):
|
for soup_idx in tqdm(range(exp.trials)):
|
||||||
soup = Soup(exp.soup_size, lambda net_generator=net_generator,exp=exp: TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon))
|
soup = Soup(exp.soup_size, lambda net_generator=net_generator,exp=exp: TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon))
|
||||||
soup.with_params(attacking_rate=0.1, learn_from_rate=-1, train=trains_per_selfattack, learn_from_severity=-1)
|
soup.with_params(attacking_rate=0.1, learn_from_rate=-1, train=trains_per_selfattack, learn_from_severity=-1)
|
||||||
soup.seed()
|
soup.seed()
|
||||||
@ -84,6 +84,7 @@ with Experiment('mixed-self-fixpoints') as exp:
|
|||||||
soup.evolve()
|
soup.evolve()
|
||||||
count(counters, soup, notable_nets)
|
count(counters, soup, notable_nets)
|
||||||
keras.backend.clear_session()
|
keras.backend.clear_session()
|
||||||
|
|
||||||
xs += [trains_per_selfattack]
|
xs += [trains_per_selfattack]
|
||||||
ys += [float(counters['fix_zero']) / float(exp.trials)]
|
ys += [float(counters['fix_zero']) / float(exp.trials)]
|
||||||
zs += [float(counters['fix_other']) / float(exp.trials)]
|
zs += [float(counters['fix_other']) / float(exp.trials)]
|
||||||
|
106
code/setups/network_trajectorys.py
Normal file
106
code/setups/network_trajectorys.py
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Concat top Level dir to system environmental variables
|
||||||
|
sys.path += os.path.join('..', '.')
|
||||||
|
|
||||||
|
from soup import *
|
||||||
|
from experiment import *
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
def run_exp(net, prints=False):
|
||||||
|
# INFO Run_ID needs to be more than 0, so that exp stores the trajectories!
|
||||||
|
exp.run_net(net, 100, run_id=run_id + 1)
|
||||||
|
exp.historical_particles[run_id] = net
|
||||||
|
if prints:
|
||||||
|
print("Fixpoint? " + str(net.is_fixpoint()))
|
||||||
|
print("Loss " + str(loss))
|
||||||
|
|
||||||
|
if True:
|
||||||
|
# WeightWise Neural Network
|
||||||
|
with FixpointExperiment() as exp:
|
||||||
|
for run_id in tqdm(range(10)):
|
||||||
|
net = ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2)
|
||||||
|
.with_keras_params(activation='linear'))
|
||||||
|
run_exp(net)
|
||||||
|
K.clear_session()
|
||||||
|
exp.log(exp.counters)
|
||||||
|
|
||||||
|
if True:
|
||||||
|
# Aggregating Neural Network
|
||||||
|
with FixpointExperiment() as exp:
|
||||||
|
for run_id in tqdm(range(10)):
|
||||||
|
net = ParticleDecorator(AggregatingNeuralNetwork(aggregates=4, width=2, depth=2)
|
||||||
|
.with_keras_params(activation='linear'))
|
||||||
|
run_exp(net)
|
||||||
|
K.clear_session()
|
||||||
|
exp.log(exp.counters)
|
||||||
|
|
||||||
|
if True:
|
||||||
|
#FFT Neural Network
|
||||||
|
with FixpointExperiment() as exp:
|
||||||
|
for run_id in tqdm(range(10)):
|
||||||
|
net = ParticleDecorator(FFTNeuralNetwork(aggregates=4, width=2, depth=2)
|
||||||
|
.with_keras_params(activation='linear'))
|
||||||
|
run_exp(net)
|
||||||
|
K.clear_session()
|
||||||
|
exp.log(exp.counters)
|
||||||
|
|
||||||
|
if True:
|
||||||
|
# ok so this works quite realiably
|
||||||
|
with FixpointExperiment() as exp:
|
||||||
|
for i in range(10):
|
||||||
|
run_count = 100
|
||||||
|
net = TrainingNeuralNetworkDecorator(ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2)))
|
||||||
|
net.with_params(epsilon=0.0001).with_keras_params(activation='linear')
|
||||||
|
for run_id in tqdm(range(run_count+1)):
|
||||||
|
net.compiled()
|
||||||
|
loss = net.train(epoch=run_id)
|
||||||
|
if run_id % 10 == 0:
|
||||||
|
run_exp(net)
|
||||||
|
K.clear_session()
|
||||||
|
|
||||||
|
if True:
|
||||||
|
# ok so this works quite realiably
|
||||||
|
with FixpointExperiment() as exp:
|
||||||
|
for i in range(10):
|
||||||
|
run_count = 100
|
||||||
|
net = TrainingNeuralNetworkDecorator(ParticleDecorator(AggregatingNeuralNetwork(4, width=2, depth=2)))
|
||||||
|
net.with_params(epsilon=0.0001).with_keras_params(activation='linear')
|
||||||
|
for run_id in tqdm(range(run_count+1)):
|
||||||
|
net.compiled()
|
||||||
|
loss = net.train(epoch=run_id)
|
||||||
|
if run_id % 10 == 0:
|
||||||
|
run_exp(net)
|
||||||
|
K.clear_session()
|
||||||
|
|
||||||
|
if False:
|
||||||
|
# this explodes in our faces completely... NAN everywhere
|
||||||
|
# TODO: Wtf is happening here?
|
||||||
|
with FixpointExperiment() as exp:
|
||||||
|
run_count = 10000
|
||||||
|
net = TrainingNeuralNetworkDecorator(RecurrentNeuralNetwork(width=2, depth=2))\
|
||||||
|
.with_params(epsilon=0.1e-2).with_keras_params(optimizer='sgd', activation='linear')
|
||||||
|
for run_id in tqdm(range(run_count+1)):
|
||||||
|
loss = net.compiled().train()
|
||||||
|
if run_id % 500 == 0:
|
||||||
|
net.print_weights()
|
||||||
|
# print(net.apply_to_network(net))
|
||||||
|
print("Fixpoint? " + str(net.is_fixpoint()))
|
||||||
|
print("Loss " + str(loss))
|
||||||
|
print()
|
||||||
|
if False:
|
||||||
|
# and this gets somewhat interesting... we can still achieve non-trivial fixpoints
|
||||||
|
# over multiple applications when training enough in-between
|
||||||
|
with MixedFixpointExperiment() as exp:
|
||||||
|
for run_id in range(10):
|
||||||
|
net = TrainingNeuralNetworkDecorator(FFTNeuralNetwork(2, width=2, depth=2))\
|
||||||
|
.with_params(epsilon=0.0001, activation='sigmoid')
|
||||||
|
exp.run_net(net, 500, 10)
|
||||||
|
|
||||||
|
net.print_weights()
|
||||||
|
|
||||||
|
print("Fixpoint? " + str(net.is_fixpoint()))
|
||||||
|
exp.log(exp.counters)
|
30
code/setups/soup_trajectorys.py
Normal file
30
code/setups/soup_trajectorys.py
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Concat top Level dir to system environmental variables
|
||||||
|
sys.path += os.path.join('..', '.')
|
||||||
|
|
||||||
|
from soup import *
|
||||||
|
from experiment import *
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
if True:
|
||||||
|
with SoupExperiment("soup") as exp:
|
||||||
|
for run_id in range(10):
|
||||||
|
# net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \
|
||||||
|
# .with_keras_params(activation='linear').with_params(epsilon=0.0001)
|
||||||
|
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))\
|
||||||
|
# .with_keras_params(activation='linear')
|
||||||
|
net_generator = lambda: TrainingNeuralNetworkDecorator(FFTNeuralNetwork(4, 2, 2))\
|
||||||
|
.with_keras_params(activation='linear')
|
||||||
|
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||||
|
soup = Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True, train=20)
|
||||||
|
soup.seed()
|
||||||
|
for _ in tqdm(range(100)):
|
||||||
|
soup.evolve()
|
||||||
|
exp.log(soup.count())
|
||||||
|
# you can access soup.historical_particles[particle_uid].states[time_step]['loss']
|
||||||
|
# or soup.historical_particles[particle_uid].states[time_step]['weights']
|
||||||
|
# from soup.dill
|
||||||
|
exp.save(soup=soup.without_particles())
|
26
code/soup.py
26
code/soup.py
@ -109,10 +109,11 @@ class Soup(object):
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
if True:
|
if False:
|
||||||
with SoupExperiment() as exp:
|
with SoupExperiment() as exp:
|
||||||
for run_id in range(1):
|
for run_id in range(1):
|
||||||
net_generator = lambda: WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
net_generator = lambda: WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||||
|
# net_generator = lambda: FFTNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||||
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
|
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
|
||||||
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
||||||
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||||
@ -121,21 +122,26 @@ if __name__ == '__main__':
|
|||||||
for _ in tqdm(range(1000)):
|
for _ in tqdm(range(1000)):
|
||||||
soup.evolve()
|
soup.evolve()
|
||||||
exp.log(soup.count())
|
exp.log(soup.count())
|
||||||
|
exp.save(soup=soup.without_particles())
|
||||||
|
|
||||||
if False:
|
if True:
|
||||||
with SoupExperiment("soup") as exp:
|
with SoupExperiment("soup") as exp:
|
||||||
for run_id in range(1):
|
for run_id in range(1):
|
||||||
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)).with_keras_params(
|
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2))\
|
||||||
activation='sigmoid').with_params(epsilon=0.0001)
|
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
|
||||||
|
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))
|
||||||
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
|
# .with_keras_params(activation='linear')\
|
||||||
|
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
||||||
|
# net_generator = lambda: TrainingNeuralNetworkDecorator(FFTNeuralNetwork(4, 2, 2))\
|
||||||
|
# .with_keras_params(activation='linear')\
|
||||||
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
||||||
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||||
soup = Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True, train=10)
|
soup = Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True, train=20)
|
||||||
soup.seed()
|
soup.seed()
|
||||||
for _ in tqdm(range(100)):
|
for _ in tqdm(range(100)):
|
||||||
soup.evolve()
|
soup.evolve()
|
||||||
soup.print_all()
|
|
||||||
exp.log(soup.count())
|
exp.log(soup.count())
|
||||||
exp.save(soup=soup.without_particles()) # you can access soup.historical_particles[particle_uid].states[time_step]['loss']
|
# you can access soup.historical_particles[particle_uid].states[time_step]['loss']
|
||||||
# or soup.historical_particles[particle_uid].states[time_step]['weights'] from soup.dill
|
# or soup.historical_particles[particle_uid].states[time_step]['weights']
|
||||||
|
# from soup.dill
|
||||||
|
exp.save(soup=soup.without_particles())
|
||||||
|
@ -98,6 +98,8 @@ def plot_latent_trajectories_3D(soup_or_experiment, filename='plot'):
|
|||||||
return (val - a) / (b - a)
|
return (val - a) / (b - a)
|
||||||
|
|
||||||
data_list = build_from_soup_or_exp(soup_or_experiment)
|
data_list = build_from_soup_or_exp(soup_or_experiment)
|
||||||
|
if not data_list:
|
||||||
|
return
|
||||||
|
|
||||||
bupu = cl.scales['11']['div']['RdYlGn']
|
bupu = cl.scales['11']['div']['RdYlGn']
|
||||||
scale = cl.interp(bupu, len(data_list)+1) # Map color scale to N bins
|
scale = cl.interp(bupu, len(data_list)+1) # Map color scale to N bins
|
||||||
@ -260,4 +262,4 @@ if __name__ == '__main__':
|
|||||||
in_file = args.in_file[0]
|
in_file = args.in_file[0]
|
||||||
out_file = args.out_file
|
out_file = args.out_file
|
||||||
|
|
||||||
search_and_apply(in_file, plot_latent_trajectories_3D, ["experiment.dill"])
|
search_and_apply(in_file, plot_latent_trajectories_3D, ["experiment.dill", "soup.dill"])
|
||||||
|
Reference in New Issue
Block a user