Learning Neural Network with FFT feature reduction
This commit is contained in:
@ -13,11 +13,8 @@ class Experiment:
|
|||||||
|
|
||||||
def __init__(self, name=None, ident=None):
|
def __init__(self, name=None, ident=None):
|
||||||
self.experiment_id = ident or time.time()
|
self.experiment_id = ident or time.time()
|
||||||
# TODO reapair this path
|
self.experiment_name = name or 'experiment'
|
||||||
this_file = os.path.realpath(os.getcwd())
|
self.base_dir = os.path.join('experiments', self.experiment_name)
|
||||||
# Was wolltest du hier tun? Vorher die nummer war unsinnig.
|
|
||||||
self.experiment_name = name or os.path.basename(this_file).split('.')[0]
|
|
||||||
self.base_dir = os.path.join(os.getcwd(), self.experiment_name)
|
|
||||||
self.next_iteration = 0
|
self.next_iteration = 0
|
||||||
self.log_messages = []
|
self.log_messages = []
|
||||||
|
|
||||||
@ -25,9 +22,8 @@ class Experiment:
|
|||||||
self.dir = os.path.join(self.base_dir, 'experiments', 'exp-{name}-{id}-{it}'.format(
|
self.dir = os.path.join(self.base_dir, 'experiments', 'exp-{name}-{id}-{it}'.format(
|
||||||
name=self.experiment_name, id=self.experiment_id, it=self.next_iteration)
|
name=self.experiment_name, id=self.experiment_id, it=self.next_iteration)
|
||||||
)
|
)
|
||||||
# Use makedirs for subfolder creation
|
|
||||||
os.makedirs(self.dir)
|
os.makedirs(self.dir)
|
||||||
# os.mkdir(self.dir)
|
|
||||||
print("** created {dir} **".format(dir=self.dir))
|
print("** created {dir} **".format(dir=self.dir))
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@ -54,7 +50,7 @@ class Experiment:
|
|||||||
class FixpointExperiment(Experiment):
|
class FixpointExperiment(Experiment):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__(name=self.__class__.__name__)
|
||||||
self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
|
self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
|
||||||
self.interesting_fixpoints = []
|
self.interesting_fixpoints = []
|
||||||
|
|
||||||
@ -85,3 +81,6 @@ class FixpointExperiment(Experiment):
|
|||||||
|
|
||||||
class SoupExperiment(Experiment):
|
class SoupExperiment(Experiment):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
class IdentLearningExperiment(Experiment):
|
||||||
|
pass
|
@ -824,7 +824,7 @@
|
|||||||
"name": "python",
|
"name": "python",
|
||||||
"nbconvert_exporter": "python",
|
"nbconvert_exporter": "python",
|
||||||
"pygments_lexer": "ipython3",
|
"pygments_lexer": "ipython3",
|
||||||
"version": "3.6.7"
|
"version": "3.6.8"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"nbformat": 4,
|
"nbformat": 4,
|
||||||
|
108
code/network.py
108
code/network.py
@ -8,7 +8,7 @@ from keras.layers import SimpleRNN, Dense
|
|||||||
from keras.layers import Input, TimeDistributed
|
from keras.layers import Input, TimeDistributed
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
from experiment import FixpointExperiment
|
from experiment import FixpointExperiment, IdentLearningExperiment
|
||||||
|
|
||||||
|
|
||||||
def normalize_id(value, norm):
|
def normalize_id(value, norm):
|
||||||
@ -36,7 +36,6 @@ def are_weights_within(network_weights, lower_bound, upper_bound):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class NeuralNetwork:
|
class NeuralNetwork:
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -52,10 +51,12 @@ class NeuralNetwork:
|
|||||||
return s
|
return s
|
||||||
|
|
||||||
def __init__(self, **params):
|
def __init__(self, **params):
|
||||||
|
self.model = Sequential()
|
||||||
self.params = dict(epsilon=0.00000000000001)
|
self.params = dict(epsilon=0.00000000000001)
|
||||||
self.params.update(params)
|
self.params.update(params)
|
||||||
self.keras_params = dict(activation='linear', use_bias=False)
|
self.keras_params = dict(activation='linear', use_bias=False)
|
||||||
self.silent = True
|
self.silent = True
|
||||||
|
self.model = None
|
||||||
|
|
||||||
def silence(self):
|
def silence(self):
|
||||||
self.silent = True
|
self.silent = True
|
||||||
@ -64,7 +65,7 @@ class NeuralNetwork:
|
|||||||
def unsilence(self):
|
def unsilence(self):
|
||||||
self.silent = False
|
self.silent = False
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def with_params(self, **kwargs):
|
def with_params(self, **kwargs):
|
||||||
self.params.update(kwargs)
|
self.params.update(kwargs)
|
||||||
return self
|
return self
|
||||||
@ -79,6 +80,10 @@ class NeuralNetwork:
|
|||||||
def set_weights(self, new_weights):
|
def set_weights(self, new_weights):
|
||||||
return self.model.set_weights(new_weights)
|
return self.model.set_weights(new_weights)
|
||||||
|
|
||||||
|
def apply_to_weights(self, old_weights):
|
||||||
|
# Placeholder
|
||||||
|
return old_weights
|
||||||
|
|
||||||
def apply_to_network(self, other_network):
|
def apply_to_network(self, other_network):
|
||||||
new_weights = self.apply_to_weights(other_network.get_weights())
|
new_weights = self.apply_to_weights(other_network.get_weights())
|
||||||
return new_weights
|
return new_weights
|
||||||
@ -110,14 +115,16 @@ class NeuralNetwork:
|
|||||||
def is_fixpoint(self, degree=1, epsilon=None):
|
def is_fixpoint(self, degree=1, epsilon=None):
|
||||||
epsilon = epsilon or self.params.get('epsilon')
|
epsilon = epsilon or self.params.get('epsilon')
|
||||||
old_weights = self.get_weights()
|
old_weights = self.get_weights()
|
||||||
|
assert degree, "Degree cannot be 0, Null"
|
||||||
self.silence()
|
self.silence()
|
||||||
for _ in range(degree):
|
for _ in range(degree):
|
||||||
new_weights = self.apply_to_network(self)
|
new_weights = self.apply_to_network(self)
|
||||||
|
|
||||||
self.unsilence()
|
self.unsilence()
|
||||||
if are_weights_diverged(new_weights):
|
if are_weights_diverged(new_weights):
|
||||||
return False
|
return False
|
||||||
for layer_id,layer in enumerate(old_weights):
|
for layer_id,layer in enumerate(old_weights):
|
||||||
for cell_id,cell in enumerate(layer):
|
for cell_id, cell in enumerate(layer):
|
||||||
for weight_id,weight in enumerate(cell):
|
for weight_id,weight in enumerate(cell):
|
||||||
new_weight = new_weights[layer_id][cell_id][weight_id]
|
new_weight = new_weights[layer_id][cell_id][weight_id]
|
||||||
if abs(new_weight - weight) >= epsilon:
|
if abs(new_weight - weight) >= epsilon:
|
||||||
@ -131,17 +138,15 @@ class NeuralNetwork:
|
|||||||
print(self.repr_weights())
|
print(self.repr_weights())
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class WeightwiseNeuralNetwork(NeuralNetwork):
|
class WeightwiseNeuralNetwork(NeuralNetwork):
|
||||||
|
|
||||||
def __init__(self, width, depth, **kwargs):
|
def __init__(self, width, depth, **kwargs):
|
||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
self.width = width
|
self.width = width
|
||||||
self.depth = depth
|
self.depth = depth
|
||||||
self.model = Sequential()
|
self.model.add(Dense(units=self.width, input_dim=4, **self.keras_params))
|
||||||
self.model.add(Dense(units=width, input_dim=4, **self.keras_params))
|
for _ in range(self.depth-1):
|
||||||
for _ in range(depth-1):
|
self.model.add(Dense(units=self.width, **self.keras_params))
|
||||||
self.model.add(Dense(units=width, **self.keras_params))
|
|
||||||
self.model.add(Dense(units=1, **self.keras_params))
|
self.model.add(Dense(units=1, **self.keras_params))
|
||||||
|
|
||||||
def apply(self, *input):
|
def apply(self, *input):
|
||||||
@ -151,16 +156,21 @@ class WeightwiseNeuralNetwork(NeuralNetwork):
|
|||||||
def apply_to_weights(self, old_weights):
|
def apply_to_weights(self, old_weights):
|
||||||
new_weights = copy.deepcopy(old_weights)
|
new_weights = copy.deepcopy(old_weights)
|
||||||
max_layer_id = len(old_weights) - 1
|
max_layer_id = len(old_weights) - 1
|
||||||
|
|
||||||
for layer_id,layer in enumerate(old_weights):
|
for layer_id,layer in enumerate(old_weights):
|
||||||
max_cell_id = len(layer) - 1
|
max_cell_id = len(layer) - 1
|
||||||
for cell_id,cell in enumerate(layer):
|
|
||||||
|
for cell_id, cell in enumerate(layer):
|
||||||
max_weight_id = len(cell) - 1
|
max_weight_id = len(cell) - 1
|
||||||
for weight_id,weight in enumerate(cell):
|
|
||||||
|
for weight_id, weight in enumerate(cell):
|
||||||
normal_layer_id = normalize_id(layer_id, max_layer_id)
|
normal_layer_id = normalize_id(layer_id, max_layer_id)
|
||||||
normal_cell_id = normalize_id(cell_id, max_cell_id)
|
normal_cell_id = normalize_id(cell_id, max_cell_id)
|
||||||
normal_weight_id = normalize_id(weight_id, max_weight_id)
|
normal_weight_id = normalize_id(weight_id, max_weight_id)
|
||||||
|
|
||||||
new_weight = self.apply(weight, normal_layer_id, normal_cell_id, normal_weight_id)
|
new_weight = self.apply(weight, normal_layer_id, normal_cell_id, normal_weight_id)
|
||||||
new_weights[layer_id][cell_id][weight_id] = new_weight
|
new_weights[layer_id][cell_id][weight_id] = new_weight
|
||||||
|
|
||||||
if self.params.get("print_all_weight_updates", False) and not self.silent:
|
if self.params.get("print_all_weight_updates", False) and not self.silent:
|
||||||
print("updated old weight " + str(weight) + "\t @ (" + str(layer_id) + "," + str(cell_id) + "," + str(weight_id) + ") to new value " + str(new_weight) + "\t calling @ (" + str(normal_layer_id) + "," + str(normal_cell_id) + "," + str(normal_weight_id) + ")")
|
print("updated old weight " + str(weight) + "\t @ (" + str(layer_id) + "," + str(cell_id) + "," + str(weight_id) + ") to new value " + str(new_weight) + "\t calling @ (" + str(normal_layer_id) + "," + str(normal_cell_id) + "," + str(normal_weight_id) + ")")
|
||||||
return new_weights
|
return new_weights
|
||||||
@ -203,7 +213,6 @@ class AggregatingNeuralNetwork(NeuralNetwork):
|
|||||||
self.aggregates = aggregates
|
self.aggregates = aggregates
|
||||||
self.width = width
|
self.width = width
|
||||||
self.depth = depth
|
self.depth = depth
|
||||||
self.model = Sequential()
|
|
||||||
self.model.add(Dense(units=width, input_dim=self.aggregates, **self.keras_params))
|
self.model.add(Dense(units=width, input_dim=self.aggregates, **self.keras_params))
|
||||||
for _ in range(depth-1):
|
for _ in range(depth-1):
|
||||||
self.model.add(Dense(units=width, **self.keras_params))
|
self.model.add(Dense(units=width, **self.keras_params))
|
||||||
@ -272,8 +281,7 @@ class AggregatingNeuralNetwork(NeuralNetwork):
|
|||||||
print("to new weight aggregations " + str(new_aggregations))
|
print("to new weight aggregations " + str(new_aggregations))
|
||||||
print("resulting in network weights ...")
|
print("resulting in network weights ...")
|
||||||
print(self.__class__.weights_to_string(new_weights))
|
print(self.__class__.weights_to_string(new_weights))
|
||||||
return new_weights
|
return new_weights
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class RecurrentNeuralNetwork(NeuralNetwork):
|
class RecurrentNeuralNetwork(NeuralNetwork):
|
||||||
@ -283,7 +291,6 @@ class RecurrentNeuralNetwork(NeuralNetwork):
|
|||||||
self.features = 1
|
self.features = 1
|
||||||
self.width = width
|
self.width = width
|
||||||
self.depth = depth
|
self.depth = depth
|
||||||
self.model = Sequential()
|
|
||||||
self.model.add(SimpleRNN(units=width, input_dim=self.features, return_sequences=True, **self.keras_params))
|
self.model.add(SimpleRNN(units=width, input_dim=self.features, return_sequences=True, **self.keras_params))
|
||||||
for _ in range(depth-1):
|
for _ in range(depth-1):
|
||||||
self.model.add(SimpleRNN(units=width, return_sequences=True, **self.keras_params))
|
self.model.add(SimpleRNN(units=width, return_sequences=True, **self.keras_params))
|
||||||
@ -312,14 +319,67 @@ class RecurrentNeuralNetwork(NeuralNetwork):
|
|||||||
new_weights[layer_id][cell_id][weight_id] = new_weight
|
new_weights[layer_id][cell_id][weight_id] = new_weight
|
||||||
current_weight_id += 1
|
current_weight_id += 1
|
||||||
return new_weights
|
return new_weights
|
||||||
|
|
||||||
|
|
||||||
|
class LearningNeuralNetwork(NeuralNetwork):
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _apply_mean_reduction(self):
|
||||||
|
return
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _apply_fft_reduction(self):
|
||||||
|
return
|
||||||
|
|
||||||
|
def __init__(self, width, depth, features, mode='fft', **kwargs):
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
self.width = width
|
||||||
|
self.depth = depth
|
||||||
|
self.features = features
|
||||||
|
self.compile_params = dict(loss='mse', optimizer='sgd')
|
||||||
|
self.apply_reduction = self._apply_fft_reduction if mode.lower()=='fft' else self._apply_mean_reduction
|
||||||
|
self.model = Sequential()
|
||||||
|
self.model.add(Dense(units=self.width, input_dim=self.features, **self.keras_params))
|
||||||
|
for _ in range(self.depth-1):
|
||||||
|
self.model.add(Dense(units=self.width, **self.keras_params))
|
||||||
|
self.model.add(Dense(units=self.features, **self.keras_params))
|
||||||
|
self.model.compile(**self.compile_params)
|
||||||
|
|
||||||
|
def with_compile_params(self, **kwargs):
|
||||||
|
self.compile_params.update(kwargs)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def learn(self, epochs, batchsize=1):
|
||||||
|
with tqdm(total=epochs, ascii=True,
|
||||||
|
desc='Type: {t} @ Epoch:'.format(t=self.__class__.__name__),
|
||||||
|
postfix=["Loss", dict(value=0)]) as bar:
|
||||||
|
for epoch in range(epochs):
|
||||||
|
old_weights = self.get_weights()
|
||||||
|
single_dim_weights = np.hstack([w.flatten() for w in old_weights])
|
||||||
|
x = np.fft.fft(single_dim_weights, n=self.features)
|
||||||
|
history = self.model.fit(x=x, y=x)
|
||||||
|
bar.postfix[1]["value"] = history
|
||||||
|
bar.update()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
with FixpointExperiment() as exp:
|
if False:
|
||||||
for run_id in tqdm(range(100)):
|
with FixpointExperiment() as exp:
|
||||||
# net = WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='linear')
|
for run_id in tqdm(range(100)):
|
||||||
net = AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation='linear').with_params(shuffler=AggregatingNeuralNetwork.shuffle_random, print_all_weight_updates=False, use_bias=True)
|
# net = WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='linear')
|
||||||
# net = RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation='linear').with_params(print_all_weight_updates=True)
|
net = AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation='linear').with_params(shuffler=AggregatingNeuralNetwork.shuffle_random, print_all_weight_updates=False, use_bias=True)
|
||||||
# net.print_weights()
|
# net = RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation='linear').with_params(print_all_weight_updates=True)
|
||||||
exp.run_net(net, 100)
|
|
||||||
exp.log(exp.counters)
|
# net.print_weights()
|
||||||
|
exp.run_net(net, 100)
|
||||||
|
exp.log(exp.counters)
|
||||||
|
|
||||||
|
if True:
|
||||||
|
with IdentLearningExperiment() as exp:
|
||||||
|
net = LearningNeuralNetwork(width=2, depth=2, features=2) \
|
||||||
|
.with_keras_params(activation='linear') \
|
||||||
|
.with_params(print_all_weight_updates=False)
|
||||||
|
net.learn(100)
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@ if __name__ == '__main__':
|
|||||||
net.self_attack(100)
|
net.self_attack(100)
|
||||||
print(net.get_weights())
|
print(net.get_weights())
|
||||||
print(net.is_fixpoint())
|
print(net.is_fixpoint())
|
||||||
|
|
||||||
if True:
|
if True:
|
||||||
net.set_weights(vary(0.01, 0.0))
|
net.set_weights(vary(0.01, 0.0))
|
||||||
print(net.get_weights())
|
print(net.get_weights())
|
||||||
|
Reference in New Issue
Block a user