added RecurrentNeuralNetwork, did some clean up

This commit is contained in:
Thomas Gabor
2019-03-03 03:37:49 +01:00
parent 865e3c4f36
commit c0d250e4f9
2 changed files with 86 additions and 42 deletions

View File

@ -47,3 +47,32 @@ class Experiment:
for name,value in kwargs.items(): for name,value in kwargs.items():
with open(self.dir + "/" + str(name) + ".dill", "wb") as dill_file: with open(self.dir + "/" + str(name) + ".dill", "wb") as dill_file:
dill.dump(value, dill_file) dill.dump(value, dill_file)
class FixpointExperiment(Experiment):
def initialize_more(self):
self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
self.interesting_fixpoints = []
def run_net(self, net, step_limit=100):
i = 0
while i < step_limit and not net.is_diverged() and not net.is_fixpoint():
net.self_attack()
i += 1
self.count(net)
def count(self, net):
if net.is_diverged():
self.counters['divergent'] += 1
elif net.is_fixpoint():
if net.is_zero():
self.counters['fix_zero'] += 1
else:
self.counters['fix_other'] += 1
self.interesting_fixpoints.append(net)
self.log(net.repr_weights())
net.self_attack()
self.log(net.repr_weights())
elif net.is_fixpoint(2):
self.counters['fix_sec'] += 1
else:
self.counters['other'] += 1

View File

@ -8,7 +8,7 @@ from keras.layers import SimpleRNN, Dense
from keras.layers import Input, TimeDistributed from keras.layers import Input, TimeDistributed
from tqdm import tqdm from tqdm import tqdm
from experiment import Experiment from experiment import FixpointExperiment
def normalize_id(value, norm): def normalize_id(value, norm):
@ -35,6 +35,8 @@ def are_weights_within(network_weights, lower_bound, upper_bound):
return False return False
return True return True
class NeuralNetwork: class NeuralNetwork:
@staticmethod @staticmethod
@ -49,12 +51,10 @@ class NeuralNetwork:
s += "\n" s += "\n"
return s return s
def __init__(self, width, depth, **keras_params): def __init__(self, **params):
self.width = width
self.depth = depth
self.params = dict(epsilon=0.00000000000001) self.params = dict(epsilon=0.00000000000001)
self.params.update(params)
self.keras_params = dict(activation='linear', use_bias=False) self.keras_params = dict(activation='linear', use_bias=False)
self.keras_params.update(keras_params)
self.silent = True self.silent = True
def silence(self): def silence(self):
@ -127,7 +127,9 @@ class NeuralNetwork:
class WeightwiseNeuralNetwork(NeuralNetwork): class WeightwiseNeuralNetwork(NeuralNetwork):
def __init__(self, width, depth, **kwargs): def __init__(self, width, depth, **kwargs):
super().__init__(width, depth, **kwargs) super().__init__(**kwargs)
self.width = width
self.depth = depth
self.model = Sequential() self.model = Sequential()
self.model.add(Dense(units=width, input_dim=4, **self.keras_params)) self.model.add(Dense(units=width, input_dim=4, **self.keras_params))
for _ in range(depth-1): for _ in range(depth-1):
@ -156,7 +158,6 @@ class WeightwiseNeuralNetwork(NeuralNetwork):
return new_weights return new_weights
class AggregatingNeuralNetwork(NeuralNetwork): class AggregatingNeuralNetwork(NeuralNetwork):
@staticmethod @staticmethod
@ -173,7 +174,9 @@ class AggregatingNeuralNetwork(NeuralNetwork):
return [aggregate for _ in range(amount)] return [aggregate for _ in range(amount)]
def __init__(self, aggregates, width, depth, **kwargs): def __init__(self, aggregates, width, depth, **kwargs):
super().__init__(width, depth, **kwargs) super().__init__(**kwargs)
self.width = width
self.depth = depth
self.aggregates = aggregates self.aggregates = aggregates
self.aggregator = self.params.get('aggregator', self.__class__.aggregate_average) self.aggregator = self.params.get('aggregator', self.__class__.aggregate_average)
self.deaggregator = self.params.get('deaggregator', self.__class__.deaggregate_identically) self.deaggregator = self.params.get('deaggregator', self.__class__.deaggregate_identically)
@ -239,41 +242,53 @@ class AggregatingNeuralNetwork(NeuralNetwork):
return new_weights return new_weights
class FixpointExperiment(Experiment):
class RecurrentNeuralNetwork(NeuralNetwork):
def initialize_more(self): def __init__(self, width, depth, **kwargs):
self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0) super().__init__(**kwargs)
self.interesting_fixpoints = [] self.features = 1
def run_net(self, net, step_limit=100): self.width = width
i = 0 self.depth = depth
while i < step_limit and not net.is_diverged() and not net.is_fixpoint(): self.model = Sequential()
net.self_attack() self.model.add(SimpleRNN(units=width, input_dim=self.features, return_sequences=True, **self.keras_params))
i += 1 for _ in range(depth-1):
self.count(net) self.model.add(SimpleRNN(units=width, return_sequences=True, **self.keras_params))
def count(self, net): self.model.add(SimpleRNN(units=self.features, return_sequences=True, **self.keras_params))
if net.is_diverged():
self.counters['divergent'] += 1 def apply(self, *input):
elif net.is_fixpoint(): stuff = np.transpose(np.array([[[input[i]] for i in range(len(input))]]))
if net.is_zero(): return self.model.predict(stuff)[0].flatten()
self.counters['fix_zero'] += 1
else: def apply_to_weights(self, old_weights):
self.counters['fix_other'] += 1 # build list from old weights
self.interesting_fixpoints.append(net) new_weights = copy.deepcopy(old_weights)
self.log(net.repr_weights()) old_weights_list = []
net.self_attack() for layer_id,layer in enumerate(old_weights):
self.log(net.repr_weights()) for cell_id,cell in enumerate(layer):
elif net.is_fixpoint(2): for weight_id,weight in enumerate(cell):
self.counters['fix_sec'] += 1 old_weights_list += [weight]
else: # call network
self.counters['other'] += 1 new_weights_list = self.apply(*old_weights_list)
# write back new weights from list of rnn returns
current_weight_id = 0
for layer_id,layer in enumerate(new_weights):
for cell_id,cell in enumerate(layer):
for weight_id,weight in enumerate(cell):
new_weight = new_weights_list[current_weight_id]
new_weights[layer_id][cell_id][weight_id] = new_weight
current_weight_id += 1
return new_weights
if __name__ == '__main__': if __name__ == '__main__':
if True:
with FixpointExperiment() as exp: with FixpointExperiment() as exp:
for run_id in tqdm(range(100)): for run_id in tqdm(range(100)):
# net = WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear') # net = WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear')
net = AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='linear').with_params(print_all_weight_updates=False) # net = AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='linear').with_params(print_all_weight_updates=False)
exp.run_net(net, 100) net = RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params(print_all_weight_updates=True)
# net.print_weights() # net.print_weights()
exp.log(exp.counters) exp.run_net(net, 100)
exp.log(exp.counters)