Bug Fixes: Taskingsoup now can have n entities per layer, where n is the size of the task input. Entity task input and soup task input need to be of same size.

This commit is contained in:
Si11ium
2019-07-14 17:29:45 +02:00
parent 9bbe5df2b2
commit de85f45e6b
4 changed files with 164 additions and 87 deletions

View File

@@ -2,7 +2,7 @@ import os
import time import time
import dill import dill
from tqdm import tqdm from tqdm import tqdm
import copy from copy import copy
from tensorflow.python.keras import backend as K from tensorflow.python.keras import backend as K
@@ -33,11 +33,11 @@ class Experiment(ABC):
self.params = dict(exp_iterations=100, application_steps=100, prints=True, trains_per_application=100) self.params = dict(exp_iterations=100, application_steps=100, prints=True, trains_per_application=100)
self.with_params(**kwargs) self.with_params(**kwargs)
def __copy__(self): def __copy__(self, *args, **kwargs):
params = self.params
self_copy = self.__class__(name=self.experiment_name, **self.params) params.update(name=self.experiment_name)
self_copy.__dict__ = {attr: self.__dict__[attr] for attr in self.__dict__ if params.update(**kwargs)
attr not in ['particles', 'historical_particles']} self_copy = self.__class__(*args, **params)
return self_copy return self_copy
def __enter__(self): def __enter__(self):
@@ -68,9 +68,14 @@ class Experiment(ABC):
print(str(log_message), file=log_file) print(str(log_message), file=log_file)
def without_particles(self): def without_particles(self):
self_copy = copy.copy(self) self_copy = copy(self)
# self_copy.particles = [particle.states for particle in self.particles] # Check if attribute exists
self_copy.historical_particles = {key: val.states for key, val in self.historical_particles.items()} if hasattr(self, 'historical_particles'):
# Check if it is empty.
if self.historical_particles:
# Do the Update
# self_copy.particles = [particle.states for particle in self.particles]
self_copy.historical_particles = {key: val.states for key, val in self.historical_particles.items()}
return self_copy return self_copy
def save(self, **kwargs): def save(self, **kwargs):
@@ -196,7 +201,7 @@ class TaskExperiment(MixedFixpointExperiment):
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name'] kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
super(TaskExperiment, self).__init__(**kwargs) super(TaskExperiment, self).__init__(**kwargs)
def run_exp(self, network_generator, logging=True, reset_model=False, **kwargs): def run_exp(self, network_generator, reset_model=False, logging=True, **kwargs):
kwargs.update(reset_model=False, logging=logging) kwargs.update(reset_model=False, logging=logging)
super(FixpointExperiment, self).run_exp(network_generator, **kwargs) super(FixpointExperiment, self).run_exp(network_generator, **kwargs)
if reset_model: if reset_model:
@@ -247,10 +252,13 @@ class TaskingSoupExperiment(Experiment):
def __init__(self, soup_generator, **kwargs): def __init__(self, soup_generator, **kwargs):
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name'] kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
self.soup_generator = soup_generator
super(TaskingSoupExperiment, self).__init__(**kwargs) super(TaskingSoupExperiment, self).__init__(**kwargs)
self.soup_generator = soup_generator
def run_exp(self, network_generator, **kwargs): def __copy__(self):
super(TaskingSoupExperiment, self).__copy__(self.soup_generator)
def run_exp(self, **kwargs):
for i in range(self.params.get('exp_iterations')): for i in range(self.params.get('exp_iterations')):
soup = self.soup_generator() soup = self.soup_generator()
soup.seed() soup.seed()
@@ -263,3 +271,7 @@ class TaskingSoupExperiment(Experiment):
def run_net(self, net, **kwargs): def run_net(self, net, **kwargs):
raise NotImplementedError() raise NotImplementedError()
pass pass
if __name__ == '__main__':
pass

View File

@@ -1,16 +1,17 @@
# Librarys # Librarys
import numpy as np import numpy as np
from abc import abstractmethod, ABC from abc import abstractmethod, ABC
from typing import List, Union, Tuple from typing import List, Tuple
from types import FunctionType from types import FunctionType
import warnings import warnings
import os
# Functions and Operators # Functions and Operators
from operator import mul from operator import mul
from functools import reduce from functools import reduce
from itertools import accumulate from itertools import accumulate
from statistics import mean from copy import deepcopy
from random import random as prng
# Deep learning Framework # Deep learning Framework
from tensorflow.python.keras.models import Sequential from tensorflow.python.keras.models import Sequential
@@ -18,8 +19,8 @@ from tensorflow.python.keras.callbacks import Callback
from tensorflow.python.keras.layers import SimpleRNN, Dense from tensorflow.python.keras.layers import SimpleRNN, Dense
# Experiment Class # Experiment Class
from experiment import * from task import TaskAdditionOfN
from task import * from experiment import TaskExperiment
# Supress warnings and info messages # Supress warnings and info messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
@@ -131,8 +132,11 @@ class NeuralNetwork(ABC):
def get_amount_of_weights(self): def get_amount_of_weights(self):
return self.get_weight_amount(self.get_weights()) return self.get_weight_amount(self.get_weights())
def get_model(self):
return self.model
def get_weights(self) -> List[np.ndarray]: def get_weights(self) -> List[np.ndarray]:
return self.model.get_weights() return self.get_model().get_weights()
def get_weights_flat(self) -> np.ndarray: def get_weights_flat(self) -> np.ndarray:
return self.weights_to_flat_array(self.get_weights()) return self.weights_to_flat_array(self.get_weights())
@@ -163,7 +167,7 @@ class NeuralNetwork(ABC):
assert degree >= 1, "degree must be >= 1" assert degree >= 1, "degree must be >= 1"
epsilon = epsilon or self.get_params().get('epsilon') epsilon = epsilon or self.get_params().get('epsilon')
new_weights = copy.deepcopy(self.get_weights()) new_weights = deepcopy(self.get_weights())
for _ in range(degree): for _ in range(degree):
new_weights = self.apply_to_weights(new_weights) new_weights = self.apply_to_weights(new_weights)
@@ -277,7 +281,7 @@ class ParticleDecorator:
return self return self
class TaskDecorator(ParticleTaskAdditionOf2): class TaskDecorator(TaskAdditionOfN):
def __init__(self, network, **kwargs): def __init__(self, network, **kwargs):
super(TaskDecorator, self).__init__(**kwargs) super(TaskDecorator, self).__init__(**kwargs)
@@ -299,6 +303,7 @@ class TaskDecorator(ParticleTaskAdditionOf2):
else: else:
self_x, self_y = self.network.get_samples() self_x, self_y = self.network.get_samples()
# Super class = Task
task_x, task_y = super(TaskDecorator, self).get_samples() task_x, task_y = super(TaskDecorator, self).get_samples()
amount_of_weights = self.network.get_amount_of_weights() amount_of_weights = self.network.get_amount_of_weights()
@@ -439,7 +444,7 @@ class AggregatingNeuralNetwork(NeuralNetwork):
epsilon = epsilon or self.get_params().get('epsilon') epsilon = epsilon or self.get_params().get('epsilon')
old_aggregations, _ = self.get_aggregated_weights() old_aggregations, _ = self.get_aggregated_weights()
new_weights = copy.deepcopy(self.get_weights()) new_weights = deepcopy(self.get_weights())
for _ in range(degree): for _ in range(degree):
new_weights = self.apply_to_weights(new_weights) new_weights = self.apply_to_weights(new_weights)
@@ -459,7 +464,7 @@ class RecurrentNeuralNetwork(NeuralNetwork):
def __init__(self, width, depth, **kwargs): def __init__(self, width, depth, **kwargs):
raise NotImplementedError raise NotImplementedError
super().__init__(**kwargs) super(RecurrentNeuralNetwork, self).__init__()
self.features = 1 self.features = 1
self.width = width self.width = width
self.depth = depth self.depth = depth
@@ -475,7 +480,7 @@ class RecurrentNeuralNetwork(NeuralNetwork):
def apply_to_weights(self, old_weights): def apply_to_weights(self, old_weights):
# build list from old weights # build list from old weights
new_weights = copy.deepcopy(old_weights) new_weights = deepcopy(old_weights)
old_weights_list = [] old_weights_list = []
for layer_id, layer in enumerate(old_weights): for layer_id, layer in enumerate(old_weights):
for cell_id, cell in enumerate(layer): for cell_id, cell in enumerate(layer):
@@ -532,7 +537,7 @@ class TrainingNeuralNetworkDecorator:
return self return self
def compile_model(self, **kwargs): def compile_model(self, **kwargs):
compile_params = copy.deepcopy(self.compile_params) compile_params = deepcopy(self.compile_params)
compile_params.update(kwargs) compile_params.update(kwargs)
return self.network.model.compile(**compile_params) return self.network.model.compile(**compile_params)
@@ -599,7 +604,8 @@ if __name__ == '__main__':
# WeightWise Neural Network # WeightWise Neural Network
with TaskExperiment().with_params(application_steps=10, trains_per_application=1000, exp_iterations=30) as exp: with TaskExperiment().with_params(application_steps=10, trains_per_application=1000, exp_iterations=30) as exp:
net_generator = lambda: TrainingNeuralNetworkDecorator(TaskDecorator( net_generator = lambda: TrainingNeuralNetworkDecorator(TaskDecorator(
WeightwiseNeuralNetwork(width=4, depth=3))).with_keras_params(activation='linear') WeightwiseNeuralNetwork(width=2, depth=2))
).with_keras_params(activation='linear')
exp.run_exp(net_generator, reset_model=True) exp.run_exp(net_generator, reset_model=True)
if False: if False:

View File

@@ -1,15 +1,55 @@
import random import random
from tensorflow.python.keras.layers import Dense, Dropout, BatchNormalization from tensorflow.python.keras.layers import Dense, Dropout, BatchNormalization
from tensorflow.python.keras.layers import Input, Layer, Concatenate, RepeatVector, Reshape
from tensorflow.python.keras.models import Sequential, Model
from tensorflow.python.keras import backend as K from tensorflow.python.keras import backend as K
from network import * from typing import List, Tuple
# Functions and Operators
from operator import mul
from functools import reduce
from itertools import accumulate
import numpy as np
from task import Task, TaskAdditionOfN
from copy import copy, deepcopy
from network import ParticleDecorator, WeightwiseNeuralNetwork, TrainingNeuralNetworkDecorator, \
EarlyStoppingByInfNanLoss
from experiment import TaskingSoupExperiment
from math import sqrt from math import sqrt
def prng(): def prng():
return random.random() return random.random()
class SlicingLayer(Layer):
def __init__(self):
self.kernel: None
self.inputs: int
super(SlicingLayer, self).__init__()
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.kernel = None
self.inputs = input_shape[-1]
super(SlicingLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x, **kwargs):
concats = [Concatenate()([x[:, i][..., None]] * self.inputs) for i in range(x.shape[-1].value)]
return concats
def compute_output_shape(self, input_shape):
return [Concatenate()([(None, 1)] * 4) for _ in range(input_shape[-1])]
class Soup(object): class Soup(object):
def __init__(self, size, generator, **kwargs): def __init__(self, size, generator, **kwargs):
@@ -23,6 +63,9 @@ class Soup(object):
self.is_seeded = False self.is_seeded = False
self.is_compiled = False self.is_compiled = False
def __len__(self):
return len(self.particles)
def __copy__(self): def __copy__(self):
copy_ = Soup(self.size, self.generator, **self.soup_params) copy_ = Soup(self.size, self.generator, **self.soup_params)
copy_.__dict__ = {attr: self.__dict__[attr] for attr in self.__dict__ if copy_.__dict__ = {attr: self.__dict__[attr] for attr in self.__dict__ if
@@ -30,7 +73,7 @@ class Soup(object):
return copy_ return copy_
def without_particles(self): def without_particles(self):
self_copy = copy.copy(self) self_copy = copy(self)
# self_copy.particles = [particle.states for particle in self.particles] # self_copy.particles = [particle.states for particle in self.particles]
self_copy.historical_particles = {key: val.states for key, val in self.historical_particles.items()} self_copy.historical_particles = {key: val.states for key, val in self.historical_particles.items()}
return self_copy return self_copy
@@ -120,7 +163,7 @@ class Soup(object):
print(particle.is_fixpoint()) print(particle.is_fixpoint())
class SolvingSoup(Soup): class TaskingSoup(Soup):
@staticmethod @staticmethod
def weights_to_flat_array(weights: List[np.ndarray]) -> np.ndarray: def weights_to_flat_array(weights: List[np.ndarray]) -> np.ndarray:
@@ -138,12 +181,21 @@ class SolvingSoup(Soup):
weights = [np.reshape(weight_slice, shape) for weight_slice, shape in zip(slices, shapes)] weights = [np.reshape(weight_slice, shape) for weight_slice, shape in zip(slices, shapes)]
return weights return weights
def __init__(self, population_size: int, task: Task, particle_generator, **kwargs): def __init__(self, population_size: int, task: Task, particle_generator, sparsity_rate=0.1, use_bias=False,
super(SolvingSoup, self).__init__(population_size, particle_generator, **kwargs) safe=True, **kwargs):
if safe:
input_shape_error_message = f'The population size must be devideable by {task.input_shape[-1]}'
assert population_size % task.input_shape[-1] == 0, input_shape_error_message
assert population_size % 2 == 0, 'The population size needs to be of even value'
super(TaskingSoup, self).__init__(population_size, particle_generator, **kwargs)
self.task = task self.task = task
self.model: Sequential self.model: Sequential
self.network_params = dict(sparsity_rate=0.1, early_nan_stopping=True) self.network_params = dict(sparsity_rate=sparsity_rate, early_nan_stopping=True, use_bias=use_bias,
depth=population_size // task.input_shape[-1])
self.network_params.update(kwargs.get('network_params', {}))
self.compile_params = dict(loss='mse', optimizer='sgd') self.compile_params = dict(loss='mse', optimizer='sgd')
self.compile_params.update(kwargs.get('compile_params', {})) self.compile_params.update(kwargs.get('compile_params', {}))
@@ -151,25 +203,24 @@ class SolvingSoup(Soup):
self.network_params.update(params) self.network_params.update(params)
def _generate_model(self): def _generate_model(self):
model = Sequential() particle_idx_list = list(range(len(self)))
weights, last_weights = self.get_total_weight_amount(), 0 particles_per_layer = len(self) // self.network_params.get('depth')
while weights: task_input = Input(self.task.input_shape, name='Task_Input')
n = int(sqrt(weights)) # First layer, which is conected to the input layer and independently trainable / not trainable at all.
this_weights = sqrt(weights / n) input_neurons = particles_per_layer * self.task.output_shape
if not this_weights: x = Dense(input_neurons, use_bias=self.network_params.get('use_bias'))(task_input)
break x = SlicingLayer()(x)
if not model.layers:
# First Input layer for layer_num in range(self.network_params.get('depth')):
model.add(Dense(this_weights, activation='linear', input_shape=self.task.input_shape)) # This needs to be tensors, because particles come as keras models that applicable
else: x = [self.particles[layer_num*particles_per_layer + i].get_model()(x[i]) for
# Intermediate Layers i in range(particles_per_layer)]
model.add(Dense(this_weights, activation='linear')) x = [RepeatVector(particles_per_layer)(x[i]) for i in range(particles_per_layer)]
self.model.add(BatchNormalization()) x = [Reshape((particles_per_layer,))(x[i]) for i in range(particles_per_layer)]
self.model.add(Dropout(rate=self.soup_params.get('sparsity_rate'))) x = Concatenate()(x)
weights -= this_weights * last_weights x = Dense(self.task.output_shape, use_bias=self.network_params.get('use_bias'), activation='linear')(x)
last_weights = this_weights
# Last Layer model = Model(inputs=task_input, outputs=x)
model.add(Dense(self.task.output_shape))
return model return model
def get_weights(self): def get_weights(self):
@@ -183,15 +234,19 @@ class SolvingSoup(Soup):
all_weights[1:-1] = weights all_weights[1:-1] = weights
self.set_weights(all_weights) self.set_weights(all_weights)
def get_intermediate_weights(self):
return self.get_weights()[1:-1]
def seed(self): def seed(self):
super(SolvingSoup, self).seed()
K.clear_session() K.clear_session()
self.is_compiled = False
super(TaskingSoup, self).seed()
self.model = self._generate_model() self.model = self._generate_model()
pass pass
def compile_model(self, **kwargs): def compile_model(self, **kwargs):
if not self.is_compiled: if not self.is_compiled:
compile_params = copy.deepcopy(self.compile_params) compile_params = deepcopy(self.compile_params)
compile_params.update(kwargs) compile_params.update(kwargs)
return self.model.compile(**compile_params) return self.model.compile(**compile_params)
else: else:
@@ -199,7 +254,7 @@ class SolvingSoup(Soup):
def get_total_weight_amount(self): def get_total_weight_amount(self):
if self.is_seeded: if self.is_seeded:
return sum([x.get_amount_of_weights for x in self.particles]) return sum([x.get_amount_of_weights() for x in self.particles])
else: else:
return 0 return 0
@@ -208,29 +263,37 @@ class SolvingSoup(Soup):
def get_intermediate_shapes(self): def get_intermediate_shapes(self):
weights = [x.shape for x in self.get_weights()] weights = [x.shape for x in self.get_weights()]
return weights[1:-1] return weights[2:-2] if self.network_params.get('use_bias') else weights[1:-1]
def predict(self, x): def predict(self, x):
return self.model.predict(x) return self.model.predict(x)
def evolve(self, **kwargs): def evolve(self, iterations=1):
super(SolvingSoup, self).evolve(iterations=1) for iteration in range(iterations):
super(TaskingSoup, self).evolve(iterations=1)
self.train_particles()
def get_particle_weights(self): def get_particle_weights(self):
return np.concatenate([x.get_weights_flat() for x in self.particles]) return np.concatenate([x.get_weights_flat() for x in self.particles])
def get_particle_input_shape(self):
if self.is_seeded:
return tuple([x if x else -1 for x in self.particles[0].get_model().input_shape])
else:
return -1
def set_particle_weights(self, weights): def set_particle_weights(self, weights):
particle_weight_shape = self.particles[0].shapes(self.particles[0].get_weights()) particle_weight_shape = self.particles[0].shapes(self.particles[0].get_weights())
sizes = [x.get_amount_of_weights() for x in self.particles] sizes = [x.get_amount_of_weights() for x in self.particles]
flat_weights = self.weights_to_flat_array(weights) flat_weights = self.weights_to_flat_array(weights)
slices = [flat_weights[x: y] for x, y in zip(accumulate([0] + sizes), accumulate(sizes))] slices = [flat_weights[x: y] for x, y in zip(accumulate([0] + sizes), accumulate(sizes))]
for particle, weight in zip((self.particles, slices)): for particle, slice in zip(self.particles, slices):
self.reshape_flat_array(weight, particle_weight_shape) new_weights = self.reshape_flat_array(slice, particle_weight_shape)
particle.set_weights(new_weights)
return True return True
def compiled(self, **kwargs): def compiled(self, **kwargs):
if not self.is_compiled: if not self.is_compiled:
self.seed()
self.compile_model(**kwargs) self.compile_model(**kwargs)
self.is_compiled = True self.is_compiled = True
return self return self
@@ -259,27 +322,29 @@ class SolvingSoup(Soup):
batch_size=batchsize, callbacks=callbacks) batch_size=batchsize, callbacks=callbacks)
return history.history['loss'][-1] return history.history['loss'][-1]
def train_at_particle_level(self): def train_particles(self, **kwargs):
self.compiled() self.compiled()
weights = self.get_particle_weights() weights = self.get_particle_weights()
shaped_weights = self.reshape_flat_array(weights, self.get_intermediate_shapes()) shaped_weights = self.reshape_flat_array(weights, self.get_intermediate_shapes())
self.set_intermediate_weights(shaped_weights) self.set_intermediate_weights(shaped_weights)
_ = self.train(**kwargs) # This returns the loss values
new_weights = self.get_intermediate_weights()
self.set_particle_weights(new_weights)
return return
if __name__ == '__main__': if __name__ == '__main__':
if True: if True:
from task import TaskAdditionOf2 from task import TaskAdditionOfN
soup_generator = SolvingSoup(20, ParticleTaskAdditionOf2(), net_generator)
with SoupExperiment(soup_generator, name='solving_soup') as exp:
net_generator = lambda: TrainingNeuralNetworkDecorator(
WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
)
exp.run_exp(net_generator)
if True: net_generator = lambda: TrainingNeuralNetworkDecorator(
WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
)
soup_generator = lambda: TaskingSoup(20, TaskAdditionOfN(4), net_generator)
with TaskingSoupExperiment(soup_generator, name='solving_soup') as exp:
exp.run_exp(reset_model=False)
if False:
soup_generator = lambda: Soup(10, net_generator).with_soup_params(remove_divergent=True, remove_zero=True) soup_generator = lambda: Soup(10, net_generator).with_soup_params(remove_divergent=True, remove_zero=True)
with SoupExperiment(soup_generator, name='soup') as exp: with SoupExperiment(soup_generator, name='soup') as exp:
net_generator = lambda: TrainingNeuralNetworkDecorator( net_generator = lambda: TrainingNeuralNetworkDecorator(
@@ -293,7 +358,7 @@ if __name__ == '__main__':
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random) # .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params() # net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
if True: if False:
soup_generator = lambda: Soup(10, net_generator).with_soup_params(remove_divergent=True, remove_zero=True) soup_generator = lambda: Soup(10, net_generator).with_soup_params(remove_divergent=True, remove_zero=True)
with SoupExperiment(soup_generator, name='soup') as exp: with SoupExperiment(soup_generator, name='soup') as exp:
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \ net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \

View File

@@ -1,12 +1,13 @@
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
import numpy as np import numpy as np
from typing import Tuple, List, Union from typing import Tuple
class Task(ABC): class Task(ABC):
def __init__(self, input_shape, output_shape, **kwargs): def __init__(self, input_shape, output_shape, **kwargs):
assert any([x not in kwargs.keys() for x in ["input_shape", "output_shape"]]), 'Dublicated arguments were given'
self.input_shape = input_shape self.input_shape = input_shape
self.output_shape = output_shape self.output_shape = output_shape
self.batchsize = kwargs.get('batchsize', 100) self.batchsize = kwargs.get('batchsize', 100)
@@ -15,24 +16,17 @@ class Task(ABC):
raise NotImplementedError raise NotImplementedError
class ParticleTaskAdditionOf2(Task): class TaskAdditionOfN(Task):
def __init__(self, **kwargs): def __init__(self, n: int, input_shape=(4,), output_shape=1, **kwargs):
super(ParticleTaskAdditionOf2, self).__init__(input_shape=(4,), output_shape=(1, ), **kwargs) assert any([x not in kwargs.keys() for x in ["input_shape", "output_shape"]]), 'Dublicated arguments were given'
assert n <= input_shape[0], f'You cannot Add more values (n={n}) than your input is long (in={input_shape}).'
kwargs.update(input_shape=input_shape, output_shape=output_shape)
super(TaskAdditionOfN, self).__init__(**kwargs)
self.n = n
def get_samples(self) -> Tuple[np.ndarray, np.ndarray]: def get_samples(self) -> Tuple[np.ndarray, np.ndarray]:
x = np.zeros((self.batchsize, *self.input_shape)) x = np.zeros((self.batchsize, *self.input_shape))
x[:, :2] = np.random.standard_normal((self.batchsize, 2)) * 0.5 x[:, :self.n] = np.random.standard_normal((self.batchsize, self.n)) * 0.5
y = np.sum(x, axis=1) y = np.sum(x, axis=1)
return x, y return x, y
class SoupTask(Task):
def __init__(self, input_shape, output_shape):
super(SoupTask, self).__init__(input_shape, output_shape)
pass
def get_samples(self) -> Tuple[np.ndarray, np.ndarray]:
raise NotImplementedError
# ToDo Hier geht es weiter.