tried out some stuff

This commit is contained in:
Thomas Gabor
2019-03-04 03:38:22 +01:00
parent 025d9cc337
commit 5295b51c28
3 changed files with 54 additions and 6 deletions

View File

@ -177,6 +177,13 @@ class AggregatingNeuralNetwork(NeuralNetwork):
count += 1
return total / float(count)
@staticmethod
def aggregate_max(weights):
max_found = weights[0]
for weight in weights:
max_found = weight > max_found and weight or max_found
return max_found
@staticmethod
def deaggregate_identically(aggregate, amount):
return [aggregate for _ in range(amount)]
@ -307,11 +314,12 @@ class RecurrentNeuralNetwork(NeuralNetwork):
return new_weights
if __name__ == '__main__':
with FixpointExperiment() as exp:
for run_id in tqdm(range(100)):
# net = WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='linear')
net = AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation='linear').with_params(shuffler=AggregatingNeuralNetwork.shuffle_random, print_all_weight_updates=False)
net = AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation='linear').with_params(shuffler=AggregatingNeuralNetwork.shuffle_random, print_all_weight_updates=False, use_bias=True)
# net = RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation='linear').with_params(print_all_weight_updates=True)
# net.print_weights()
exp.run_net(net, 100)

View File

@ -33,6 +33,11 @@ class Soup:
other_particle_id = int(prng() * len(self.particles))
other_particle = self.particles[other_particle_id]
particle.attack(other_particle)
if self.params.get('remove_divergent') and particle.is_diverged():
self.particles[particle_id] = self.generator()
if self.params.get('remove_zero') and particle.is_zero():
self.particles[particle_id] = self.generator()
def count(self):
counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
@ -53,9 +58,12 @@ class Soup:
if __name__ == '__main__':
with SoupExperiment() as exp:
for run_id in tqdm(range(1)):
net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='linear').with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
soup = Soup(100, net_generator)
for run_id in range(1):
net_generator = lambda: WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='sigmoid').with_params()
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid').with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
soup = Soup(100, net_generator).with_params(remove_divergent=True, remove_zero=True)
soup.seed()
soup.evolve(100)
for _ in tqdm(range(100)):
soup.evolve()
exp.log(soup.count())

32
code/test.py Normal file
View File

@ -0,0 +1,32 @@
from experiment import *
from network import *
from soup import *
import numpy as np
def vary(e=0.0, f=0.0):
return [
np.array([[1.0+e, 0.0+f], [0.0+f, 0.0+f], [0.0+f, 0.0+f], [0.0+f, 0.0+f]], dtype=np.float32),
np.array([[1.0+e, 0.0+f], [0.0+f, 0.0+f]], dtype=np.float32),
np.array([[1.0+e], [0.0+f]], dtype=np.float32)
]
net = WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='sigmoid')
if False:
net.set_weights([
np.array([[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], dtype=np.float32),
np.array([[1.0, 0.0], [0.0, 0.0]], dtype=np.float32),
np.array([[1.0], [0.0]], dtype=np.float32)
])
print(net.get_weights())
net.self_attack(100)
print(net.get_weights())
print(net.is_fixpoint())
if True:
net.set_weights(vary(0.01, 0.0))
print(net.get_weights())
for _ in range(5):
net.self_attack()
print(net.get_weights())
print(net.is_fixpoint())