Dense Code

This commit is contained in:
Si11ium
2019-03-01 20:14:12 +01:00
parent cfbf341814
commit ee3ac7d41a
2 changed files with 868 additions and 24 deletions

832
code/fixpoint-2.ipynb Normal file

File diff suppressed because one or more lines are too long

View File

@@ -4,10 +4,11 @@ from keras.layers import SimpleRNN, Dense
from keras.layers import Input, TimeDistributed from keras.layers import Input, TimeDistributed
from tqdm import tqdm from tqdm import tqdm
import itertools
from typing import Union from typing import Union
import numpy as np import numpy as np
class Network(object): class Network(object):
def __init__(self, features, cells, layers, bias=False, recurrent=False): def __init__(self, features, cells, layers, bias=False, recurrent=False):
self.features = features self.features = features
@@ -31,16 +32,17 @@ class Network(object):
self.parameters = np.sum([p_layer_1, p_layer_n, p_layer_out]) self.parameters = np.sum([p_layer_1, p_layer_n, p_layer_out])
# Build network # Build network
cell = SimpleRNN if recurrent else Dense cell = SimpleRNN if recurrent else Dense
self.inputs, x = Input(shape=(self.parameters // self.features, self.features,)), None self.inputs, x = Input(shape=(self.parameters // self.features,
self.features) if recurrent else (self.features,)), None
for layer in range(self.num_layer): for layer in range(self.num_layer):
if recurrent: if recurrent:
x = SimpleRNN(cells, activation=None, use_bias=False, x = SimpleRNN(self.cells, activation=None, use_bias=False,
return_sequences=True)(self.inputs if layer == 0 else x) return_sequences=True)(self.inputs if layer == 0 else x)
else: else:
x = Dense(cells, activation=None, use_bias=False, x = Dense(self.cells, activation=None, use_bias=False,
)(self.inputs if layer == 0 else x) )(self.inputs if layer == 0 else x)
self.outputs = Dense(self.features, activation=None, use_bias=False)(x) self.outputs = Dense(self.features if recurrent else 1, activation=None, use_bias=False)(x)
print('Network initialized, i haz {p} params @:{e}Features: {f}{e}Cells: {c}{e}Layers: {l}'.format( print('Network initialized, i haz {p} params @:{e}Features: {f}{e}Cells: {c}{e}Layers: {l}'.format(
p=self.parameters, l=self.num_layer, c=self.cells, f=self.features, e='\n{}'.format(' ' * 5)) p=self.parameters, l=self.num_layer, c=self.cells, f=self.features, e='\n{}'.format(' ' * 5))
) )
@@ -65,10 +67,8 @@ class _BaseNetwork(Model):
flat = np.asarray(np.concatenate([x.flatten() for x in weights])) flat = np.asarray(np.concatenate([x.flatten() for x in weights]))
return flat return flat
def step(self): def step(self, x):
flat = self.get_weights_flat() pass
x = np.reshape(flat, (1, -1, self.features))
return self.predict(x).flatten()
def step_other(self, other: Union[Sequential, Model]) -> bool: def step_other(self, other: Union[Sequential, Model]) -> bool:
pass pass
@@ -98,13 +98,18 @@ class RecurrentNetwork(_BaseNetwork):
self.parameters = network.parameters self.parameters = network.parameters
assert self.parameters == self.get_parameter_count() assert self.parameters == self.get_parameter_count()
def step(self, x):
shaped = np.reshape(x, (1, -1, self.features))
return self.predict(shaped).flatten()
def fit(self, epochs=500, **kwargs): def fit(self, epochs=500, **kwargs):
losses = [] losses = []
with tqdm(total=epochs, ascii=True, with tqdm(total=epochs, ascii=True,
desc='Type: {t}'. format(t=self.__class__.__name__), desc='Type: {t}'. format(t=self.__class__.__name__),
postfix=["Loss", dict(value=0)]) as bar: postfix=["Loss", dict(value=0)]) as bar:
for _ in range(epochs): for _ in range(epochs):
y = self.step() x = self.get_weights_flat()
y = self.step(x)
weights = self.get_weights() weights = self.get_weights()
global_idx = 0 global_idx = 0
for idx, weight_matrix in enumerate(weights): for idx, weight_matrix in enumerate(weights):
@@ -125,7 +130,14 @@ class FeedForwardNetwork(_BaseNetwork):
self.features = network.features self.features = network.features
self.parameters = network.parameters self.parameters = network.parameters
self.num_layer = network.num_layer self.num_layer = network.num_layer
assert self.parameters == self.get_parameter_count() self.num_cells = network.cells
# assert self.parameters == self.get_parameter_count()
def step(self, x):
return self.predict(x)
def step_other(self, x):
return self.predict(x)
def fit(self, epochs=500, **kwargs): def fit(self, epochs=500, **kwargs):
losses = [] losses = []
@@ -133,30 +145,30 @@ class FeedForwardNetwork(_BaseNetwork):
desc='Type: {t} @ Epoch:'. format(t=self.__class__.__name__), desc='Type: {t} @ Epoch:'. format(t=self.__class__.__name__),
postfix=["Loss", dict(value=0)]) as bar: postfix=["Loss", dict(value=0)]) as bar:
for _ in range(epochs): for _ in range(epochs):
y = self.step() all_weights = self.get_weights_flat()
cell_idx = np.apply_along_axis(lambda x: x/self.num_cells, 0, np.arange(int(self.get_parameter_count())))
xc = np.concatenate((all_weights[..., None], cell_idx[..., None]), axis=1)
y = self.step(xc)
weights = self.get_weights() weights = self.get_weights()
# This is where i have to apply the aggregator
global_idx = 0 global_idx = 0
# This is where the weights are assigned to the new ones
for idx, weight_matrix in enumerate(weights): for idx, weight_matrix in enumerate(weights):
if self.num_layer == 1:
# In case of dense layers with a single layer, the RNN procedure can be applied # UPDATE THE WEIGHTS
flattened = weight_matrix.flatten() flattened = weight_matrix.flatten()
else: new_weights = y[global_idx:global_idx + flattened.shape[0], 0]
# In case of multiple layers, a function aggregator has to be applied first.
# possible aggregators are: Mean, Transformation, Spektral analysis
pass
new_weights = y[global_idx:global_idx + flattened.shape[0]]
weights[idx] = np.reshape(new_weights, weight_matrix.shape) weights[idx] = np.reshape(new_weights, weight_matrix.shape)
global_idx += flattened.shape[0] global_idx += flattened.shape[0]
losses.append(self.mean_sqrd_error(y.flatten(), self.get_weights_flat()))
losses.append(self.mean_sqrd_error(y[:, 0].flatten(), self.get_weights_flat()))
self.set_weights(weights) self.set_weights(weights)
bar.postfix[1]["value"] = losses[-1] bar.postfix[1]["value"] = losses[-1]
bar.update() bar.update()
return losses return losses
if __name__ == '__main__': if __name__ == '__main__':
features, cells, layers = 2, 2, 2 features, cells, layers = 2, 2, 2
use_recurrent = False use_recurrent = False