Bug Resolved in Particle.is_zero()
Now at normal execution Times
This commit is contained in:
@ -167,10 +167,11 @@ class SoupExperiment(Experiment):
|
|||||||
for i in range(soup_iterations):
|
for i in range(soup_iterations):
|
||||||
soup = soup_generator()
|
soup = soup_generator()
|
||||||
soup.seed()
|
soup.seed()
|
||||||
for _ in tqdm(exp_iterations):
|
for _ in tqdm(range(exp_iterations)):
|
||||||
soup.evolve()
|
soup.evolve()
|
||||||
self.log(soup.count())
|
self.log(soup.count())
|
||||||
self.save(soup=soup.without_particles())
|
self.save(soup=soup.without_particles())
|
||||||
|
K.clear_session()
|
||||||
|
|
||||||
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0, **kwargs):
|
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0, **kwargs):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
146
code/network.py
146
code/network.py
@ -3,6 +3,9 @@ from abc import abstractmethod, ABC
|
|||||||
from typing import List, Union, Tuple
|
from typing import List, Union, Tuple
|
||||||
from types import FunctionType
|
from types import FunctionType
|
||||||
|
|
||||||
|
from operator import mul
|
||||||
|
from functools import reduce
|
||||||
|
|
||||||
from tensorflow.python.keras.models import Sequential
|
from tensorflow.python.keras.models import Sequential
|
||||||
from tensorflow.python.keras.callbacks import Callback
|
from tensorflow.python.keras.callbacks import Callback
|
||||||
from tensorflow.python.keras.layers import SimpleRNN, Dense
|
from tensorflow.python.keras.layers import SimpleRNN, Dense
|
||||||
@ -27,17 +30,10 @@ class SaveStateCallback(Callback):
|
|||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
class WeightToolBox:
|
class NeuralNetwork(ABC):
|
||||||
|
"""
|
||||||
def __init__(self):
|
This is the Base Network Class, including abstract functions that must be implemented.
|
||||||
"""
|
"""
|
||||||
Weight class, for easy manipulation of weight vectors from Keras models
|
|
||||||
"""
|
|
||||||
|
|
||||||
# TODO: implement a way to access the cells directly
|
|
||||||
# self.cells = len(self)
|
|
||||||
# TODO: implement a way to access the weights directly
|
|
||||||
# self.weights = self.to_flat_array() ?
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def max(weights: List[np.ndarray]):
|
def max(weights: List[np.ndarray]):
|
||||||
@ -48,11 +44,15 @@ class WeightToolBox:
|
|||||||
return np.average(weights)
|
return np.average(weights)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def weight_amount(weights: List[np.ndarray]):
|
def are_weights_diverged(weights: List[np.ndarray]) -> bool:
|
||||||
return np.sum([x.size for x in weights])
|
return any([any((np.isnan(x).any(), np.isinf(x).any())) for x in weights])
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def len(weights: List[np.ndarray]):
|
def are_weights_within_bounds(weights: List[np.ndarray], lower_bound: float, upper_bound: float) -> bool:
|
||||||
|
return any([((lower_bound < x) & (x < upper_bound)).any() for x in weights])
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def weight_amount(weights: List[np.ndarray]):
|
||||||
return sum([x.size for x in weights])
|
return sum([x.size for x in weights])
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -64,50 +64,23 @@ class WeightToolBox:
|
|||||||
return len(weights)
|
return len(weights)
|
||||||
|
|
||||||
def repr(self, weights: List[np.ndarray]):
|
def repr(self, weights: List[np.ndarray]):
|
||||||
return f'Weights({self.to_flat_array(weights).tolist()})'
|
return f'Weights({self.weights_to_flat_array(weights).tolist()})'
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def to_flat_array(weights: List[np.ndarray]) -> np.ndarray:
|
def weights_to_flat_array(weights: List[np.ndarray]) -> np.ndarray:
|
||||||
return np.hstack([weight.flatten() for weight in weights])
|
return np.concatenate([d.ravel() for d in weights])
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def reshape_flat_array(array, shapes) -> List[np.ndarray]:
|
def reshape_flat_array(array, shapes: List[Tuple[int]]) -> List[np.ndarray]:
|
||||||
sizes: List[int] = [int(np.prod(shape)) for shape in shapes]
|
sizes: List[int] = [int(np.prod(shape)) for shape in shapes]
|
||||||
|
|
||||||
|
sizes = [reduce(mul, shape) for shape in shapes]
|
||||||
# Split the incoming array into slices for layers
|
# Split the incoming array into slices for layers
|
||||||
slices = [array[x: y] for x, y in zip(np.cumsum([0] + sizes), np.cumsum([0] + sizes)[1:])]
|
slices = [array[x: y] for x, y in zip(np.cumsum([0] + sizes), np.cumsum([0] + sizes)[1:])]
|
||||||
# reshape them in accordance to the given shapes
|
# reshape them in accordance to the given shapes
|
||||||
weights = [np.reshape(weight_slice, shape) for weight_slice, shape in zip(slices, shapes)]
|
weights = [np.reshape(weight_slice, shape) for weight_slice, shape in zip(slices, shapes)]
|
||||||
return weights
|
return weights
|
||||||
|
|
||||||
def reshape_flat_array_like(self, array, weights: List[np.ndarray]) -> List[np.ndarray]:
|
|
||||||
return self.reshape_flat_array(array, self.shapes(weights))
|
|
||||||
|
|
||||||
def shuffle_weights(self, weights: List[np.ndarray]):
|
|
||||||
flat = self.to_flat_array(weights)
|
|
||||||
np.random.shuffle(flat)
|
|
||||||
return self.reshape_flat_array_like(flat, weights)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def are_diverged(weights: List[np.ndarray]) -> bool:
|
|
||||||
return any([np.isnan(x).any() for x in weights]) or any([np.isinf(x).any() for x in weights])
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def are_within_bounds(weights: List[np.ndarray], lower_bound: float, upper_bound: float) -> bool:
|
|
||||||
return bool(sum([((lower_bound < x) & (x > upper_bound)).size for x in weights]))
|
|
||||||
|
|
||||||
def aggregate_weights_by(self, weights: List[np.ndarray], func: FunctionType, num_aggregates: int):
|
|
||||||
collection_sizes = self.len(weights) // num_aggregates
|
|
||||||
weights = self.to_flat_array(weights)[:collection_sizes * num_aggregates].reshape((num_aggregates, -1))
|
|
||||||
aggregated_weights = func(weights, num_aggregates)
|
|
||||||
left_overs = self.to_flat_array(weights)[collection_sizes * num_aggregates:]
|
|
||||||
return aggregated_weights, left_overs
|
|
||||||
|
|
||||||
|
|
||||||
class NeuralNetwork(ABC):
|
|
||||||
"""
|
|
||||||
This is the Base Network Class, including abstract functions that must be implemented.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, **params):
|
def __init__(self, **params):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
self.params = dict(epsilon=0.00000000000001)
|
self.params = dict(epsilon=0.00000000000001)
|
||||||
@ -131,25 +104,21 @@ class NeuralNetwork(ABC):
|
|||||||
self.keras_params.update(kwargs)
|
self.keras_params.update(kwargs)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
def print_weights(self, weights=None):
|
||||||
|
print(self.repr(weights or self.get_weights()))
|
||||||
|
|
||||||
def get_weights(self) -> List[np.ndarray]:
|
def get_weights(self) -> List[np.ndarray]:
|
||||||
return self.model.get_weights()
|
return self.model.get_weights()
|
||||||
|
|
||||||
def get_weights_flat(self) -> np.ndarray:
|
def get_weights_flat(self) -> np.ndarray:
|
||||||
return weightToolBox.to_flat_array(self.get_weights())
|
return self.weights_to_flat_array(self.get_weights())
|
||||||
|
|
||||||
|
def reshape_flat_array_like(self, array, weights: List[np.ndarray]) -> List[np.ndarray]:
|
||||||
|
return self.reshape_flat_array(array, self.shapes(weights))
|
||||||
|
|
||||||
def set_weights(self, new_weights: List[np.ndarray]):
|
def set_weights(self, new_weights: List[np.ndarray]):
|
||||||
return self.model.set_weights(new_weights)
|
return self.model.set_weights(new_weights)
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def get_samples(self):
|
|
||||||
# TODO: add a dogstring, telling the user what this does, e.g. what is a sample?
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def apply_to_weights(self, old_weights) -> List[np.ndarray]:
|
|
||||||
# TODO: add a dogstring, telling the user what this does, e.g. what is applied?
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def apply_to_network(self, other_network) -> List[np.ndarray]:
|
def apply_to_network(self, other_network) -> List[np.ndarray]:
|
||||||
# TODO: add a dogstring, telling the user what this does, e.g. what is applied?
|
# TODO: add a dogstring, telling the user what this does, e.g. what is applied?
|
||||||
new_weights = self.apply_to_weights(other_network.get_weights())
|
new_weights = self.apply_to_weights(other_network.get_weights())
|
||||||
@ -177,11 +146,11 @@ class NeuralNetwork(ABC):
|
|||||||
return self.attack(new_other_network)
|
return self.attack(new_other_network)
|
||||||
|
|
||||||
def is_diverged(self):
|
def is_diverged(self):
|
||||||
return weightToolBox.are_diverged(self.get_weights())
|
return self.are_weights_diverged(self.get_weights())
|
||||||
|
|
||||||
def is_zero(self, epsilon=None):
|
def is_zero(self, epsilon=None):
|
||||||
epsilon = epsilon or self.get_params().get('epsilon')
|
epsilon = epsilon or self.get_params().get('epsilon')
|
||||||
return weightToolBox.are_within_bounds(self.get_weights(), -epsilon, epsilon)
|
return self.are_weights_within_bounds(self.get_weights(), -epsilon, epsilon)
|
||||||
|
|
||||||
def is_fixpoint(self, degree: int = 1, epsilon: float = None) -> bool:
|
def is_fixpoint(self, degree: int = 1, epsilon: float = None) -> bool:
|
||||||
assert degree >= 1, "degree must be >= 1"
|
assert degree >= 1, "degree must be >= 1"
|
||||||
@ -191,17 +160,38 @@ class NeuralNetwork(ABC):
|
|||||||
|
|
||||||
for _ in range(degree):
|
for _ in range(degree):
|
||||||
new_weights = self.apply_to_weights(new_weights)
|
new_weights = self.apply_to_weights(new_weights)
|
||||||
if weightToolBox.are_diverged(new_weights):
|
if self.are_weights_diverged(new_weights):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
biggerEpsilon = (np.abs(weightToolBox.to_flat_array(new_weights) - weightToolBox.to_flat_array(self.get_weights()))
|
flat_new = self.weights_to_flat_array(new_weights)
|
||||||
>= epsilon).any()
|
flat_old = self.weights_to_flat_array(self.get_weights())
|
||||||
|
biggerEpsilon = (np.abs(flat_new - flat_old) >= epsilon).any()
|
||||||
|
|
||||||
# Boolean Value needs to be flipped to answer "is_fixpoint"
|
# Boolean Value needs to be flipped to answer "is_fixpoint"
|
||||||
return not biggerEpsilon
|
return not biggerEpsilon
|
||||||
|
|
||||||
def print_weights(self, weights=None):
|
def aggregate_weights_by(self, weights: List[np.ndarray], func: FunctionType, num_aggregates: int):
|
||||||
print(weightToolBox.repr(weights or self.get_weights()))
|
collection_sizes = self.len(weights) // num_aggregates
|
||||||
|
flat = self.weights_to_flat_array(weights)
|
||||||
|
weights = flat[:collection_sizes * num_aggregates].reshape((num_aggregates, -1))
|
||||||
|
left_overs = flat[collection_sizes * num_aggregates:]
|
||||||
|
aggregated_weights = func(weights, num_aggregates)
|
||||||
|
return aggregated_weights, left_overs
|
||||||
|
|
||||||
|
def shuffle_weights(self, weights: List[np.ndarray]):
|
||||||
|
flat = self.weights_to_flat_array(weights)
|
||||||
|
np.random.shuffle(flat)
|
||||||
|
return self.reshape_flat_array_like(flat, weights)
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_samples(self):
|
||||||
|
# TODO: add a dogstring, telling the user what this does, e.g. what is a sample?
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def apply_to_weights(self, old_weights) -> List[np.ndarray]:
|
||||||
|
# TODO: add a dogstring, telling the user what this does, e.g. what is applied?
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
class ParticleDecorator:
|
class ParticleDecorator:
|
||||||
@ -281,10 +271,10 @@ class WeightwiseNeuralNetwork(NeuralNetwork):
|
|||||||
def apply_to_weights(self, weights) -> List[np.ndarray]:
|
def apply_to_weights(self, weights) -> List[np.ndarray]:
|
||||||
# ToDo: Insert DocString
|
# ToDo: Insert DocString
|
||||||
# Transform the weight matrix in an horizontal stack as: array([[weight, layer, cell, position], ...])
|
# Transform the weight matrix in an horizontal stack as: array([[weight, layer, cell, position], ...])
|
||||||
transformed_weights = self.get_samples(weights)[0]
|
transformed_weights, _ = self.get_samples(weights)
|
||||||
new_flat_weights = self.apply(transformed_weights)
|
new_flat_weights = self.apply(transformed_weights)
|
||||||
# use the original weight shape to transform the new tensor
|
# use the original weight shape to transform the new tensor
|
||||||
return weightToolBox.reshape_flat_array_like(new_flat_weights, weights)
|
return self.reshape_flat_array_like(new_flat_weights, weights)
|
||||||
|
|
||||||
|
|
||||||
class AggregatingNeuralNetwork(NeuralNetwork):
|
class AggregatingNeuralNetwork(NeuralNetwork):
|
||||||
@ -306,8 +296,7 @@ class AggregatingNeuralNetwork(NeuralNetwork):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def deaggregate_identically(aggregate, amount):
|
def deaggregate_identically(aggregate, amount):
|
||||||
# ToDo: Find a better way than using the a hardcoded [0]
|
return np.repeat(aggregate, amount, axis=0)
|
||||||
return np.hstack([aggregate for _ in range(amount)])[0]
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def shuffle_not(weights: List[np.ndarray]):
|
def shuffle_not(weights: List[np.ndarray]):
|
||||||
@ -321,9 +310,8 @@ class AggregatingNeuralNetwork(NeuralNetwork):
|
|||||||
"""
|
"""
|
||||||
return weights
|
return weights
|
||||||
|
|
||||||
@staticmethod
|
def shuffle_random(self, weights: List[np.ndarray]):
|
||||||
def shuffle_random(weights: List[np.ndarray]):
|
weights = self.shuffle_weights(weights)
|
||||||
weights = weightToolBox.shuffle_weights(weights)
|
|
||||||
return weights
|
return weights
|
||||||
|
|
||||||
def __init__(self, aggregates, width, depth, **kwargs):
|
def __init__(self, aggregates, width, depth, **kwargs):
|
||||||
@ -347,14 +335,14 @@ class AggregatingNeuralNetwork(NeuralNetwork):
|
|||||||
return self.params.get('shuffler', self.shuffle_not)
|
return self.params.get('shuffler', self.shuffle_not)
|
||||||
|
|
||||||
def get_amount_of_weights(self):
|
def get_amount_of_weights(self):
|
||||||
return weightToolBox.weight_amount(self.get_weights())
|
return self.weight_amount(self.get_weights())
|
||||||
|
|
||||||
def apply(self, inputs):
|
def apply(self, inputs):
|
||||||
# You need to add an dimension here... "..." copies array values
|
# You need to add an dimension here... "..." copies array values
|
||||||
return self.model.predict(inputs[None, ...])
|
return self.model.predict(inputs[None, ...])
|
||||||
|
|
||||||
def get_aggregated_weights(self):
|
def get_aggregated_weights(self):
|
||||||
return weightToolBox.aggregate_weights_by(self.get_weights(), self.get_aggregator(), self.aggregates)
|
return self.aggregate_weights_by(self.get_weights(), self.get_aggregator(), self.aggregates)
|
||||||
|
|
||||||
def apply_to_weights(self, old_weights) -> List[np.ndarray]:
|
def apply_to_weights(self, old_weights) -> List[np.ndarray]:
|
||||||
|
|
||||||
@ -367,8 +355,8 @@ class AggregatingNeuralNetwork(NeuralNetwork):
|
|||||||
new_aggregations = self.deaggregate_identically(new_aggregations, collection_sizes)
|
new_aggregations = self.deaggregate_identically(new_aggregations, collection_sizes)
|
||||||
# generate new weights
|
# generate new weights
|
||||||
# only include leftovers if there are some then coonvert them to Weight on base of th old shape
|
# only include leftovers if there are some then coonvert them to Weight on base of th old shape
|
||||||
complete_weights = new_aggregations if not leftovers.shape[0] else np.hstack((new_aggregations, leftovers))
|
complete_weights = new_aggregations if not leftovers.shape[0] else np.hstack((new_aggregations, leftovers))
|
||||||
new_weights = weightToolBox.reshape_flat_array_like(complete_weights, old_weights)
|
new_weights = self.reshape_flat_array_like(complete_weights, old_weights)
|
||||||
|
|
||||||
# maybe shuffle
|
# maybe shuffle
|
||||||
new_weights = self.get_shuffler()(new_weights)
|
new_weights = self.get_shuffler()(new_weights)
|
||||||
@ -389,7 +377,7 @@ class AggregatingNeuralNetwork(NeuralNetwork):
|
|||||||
|
|
||||||
for _ in range(degree):
|
for _ in range(degree):
|
||||||
new_weights = self.apply_to_weights(new_weights)
|
new_weights = self.apply_to_weights(new_weights)
|
||||||
if weightToolBox.are_diverged(new_weights):
|
if self.are_weights_diverged(new_weights):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
new_aggregations, leftovers = self.get_aggregated_weights()
|
new_aggregations, leftovers = self.get_aggregated_weights()
|
||||||
@ -505,8 +493,6 @@ class TrainingNeuralNetworkDecorator:
|
|||||||
return history.history['loss'][-1]
|
return history.history['loss'][-1]
|
||||||
|
|
||||||
|
|
||||||
weightToolBox = WeightToolBox()
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
||||||
if False:
|
if False:
|
||||||
@ -518,7 +504,7 @@ if __name__ == '__main__':
|
|||||||
exp.run_exp(net_generator, 10, logging=True)
|
exp.run_exp(net_generator, 10, logging=True)
|
||||||
exp.reset_all()
|
exp.reset_all()
|
||||||
|
|
||||||
if False:
|
if True:
|
||||||
# Aggregating Neural Network
|
# Aggregating Neural Network
|
||||||
net_generator = lambda: ParticleDecorator(
|
net_generator = lambda: ParticleDecorator(
|
||||||
AggregatingNeuralNetwork(aggregates=4, width=2, depth=2
|
AggregatingNeuralNetwork(aggregates=4, width=2, depth=2
|
||||||
|
@ -29,3 +29,4 @@ if __name__ == '__main__':
|
|||||||
# or soup.historical_particles[particle_uid].states[time_step]['weights']
|
# or soup.historical_particles[particle_uid].states[time_step]['weights']
|
||||||
# from soup.dill
|
# from soup.dill
|
||||||
exp.save(soup=soup.without_particles())
|
exp.save(soup=soup.without_particles())
|
||||||
|
K.clear_session()
|
||||||
|
43
code/soup.py
43
code/soup.py
@ -67,7 +67,6 @@ class Soup(object):
|
|||||||
description['action'] = 'learn_from'
|
description['action'] = 'learn_from'
|
||||||
description['counterpart'] = other_particle.get_uid()
|
description['counterpart'] = other_particle.get_uid()
|
||||||
for _ in range(self.params.get('train', 0)):
|
for _ in range(self.params.get('train', 0)):
|
||||||
particle.compiled()
|
|
||||||
# callbacks on save_state are broken for TrainingNeuralNetwork
|
# callbacks on save_state are broken for TrainingNeuralNetwork
|
||||||
loss = particle.train(store_states=False)
|
loss = particle.train(store_states=False)
|
||||||
description['fitted'] = self.params.get('train', 0)
|
description['fitted'] = self.params.get('train', 0)
|
||||||
@ -110,28 +109,30 @@ class Soup(object):
|
|||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
if True:
|
if True:
|
||||||
net_generator = lambda: WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
with SoupExperiment(name='soup') as exp:
|
||||||
soup_generator = Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True)
|
net_generator = lambda: TrainingNeuralNetworkDecorator(
|
||||||
exp = SoupExperiment()
|
WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||||
exp.run_exp(net_generator, 10, soup_generator, 1, False)
|
)
|
||||||
|
soup_generator = lambda: Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True)
|
||||||
|
exp.run_exp(net_generator, 10, soup_generator, 1, False)
|
||||||
|
|
||||||
# net_generator = lambda: FFTNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
# net_generator = lambda: FFTNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||||
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
|
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
|
||||||
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
||||||
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||||
|
|
||||||
if True:
|
if True:
|
||||||
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \
|
with SoupExperiment(name='soup') as exp:
|
||||||
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
|
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \
|
||||||
soup_generator = lambda: Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True, train=20)
|
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
|
||||||
exp = SoupExperiment(name="soup")
|
soup_generator = lambda: Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True, train=20)
|
||||||
|
|
||||||
exp.run_exp(net_generator, 10, soup_generator, 1, False)
|
exp.run_exp(net_generator, 10, soup_generator, 1, False)
|
||||||
|
|
||||||
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))
|
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))
|
||||||
# .with_keras_params(activation='linear')\
|
# .with_keras_params(activation='linear')\
|
||||||
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
||||||
# net_generator = lambda: TrainingNeuralNetworkDecorator(FFTNeuralNetwork(4, 2, 2))\
|
# net_generator = lambda: TrainingNeuralNetworkDecorator(FFTNeuralNetwork(4, 2, 2))\
|
||||||
# .with_keras_params(activation='linear')\
|
# .with_keras_params(activation='linear')\
|
||||||
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
||||||
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||||
|
Reference in New Issue
Block a user