Refactor:
Step 4 - Aggregating Neural Networks Step 5 - Training Neural Networks
This commit is contained in:
@@ -4,48 +4,48 @@ import dill
|
|||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
import copy
|
import copy
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
|
||||||
|
|
||||||
|
class _BaseExperiment(ABC):
|
||||||
|
|
||||||
class Experiment:
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def from_dill(path):
|
def from_dill(path):
|
||||||
with open(path, "rb") as dill_file:
|
with open(path, "rb") as dill_file:
|
||||||
return dill.load(dill_file)
|
return dill.load(dill_file)
|
||||||
|
|
||||||
def __init__(self, name=None, ident=None):
|
def __init__(self, name=None, ident=None):
|
||||||
self.experiment_id = '{}_{}'.format(ident or '', time.time())
|
self.experiment_id = f'{ident or ""}_{time.time()}'
|
||||||
self.experiment_name = name or 'unnamed_experiment'
|
self.experiment_name = name or 'unnamed_experiment'
|
||||||
self.next_iteration = 0
|
self.next_iteration = 0
|
||||||
self.log_messages = []
|
self.log_messages = list()
|
||||||
self.historical_particles = {}
|
self.historical_particles = dict()
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
self.dir = os.path.join('experiments', 'exp-{name}-{id}-{it}'.format(
|
self.dir = os.path.join('experiments', f'exp-{self.experiment_name}-{self.experiment_id}-{self.next_iteration}')
|
||||||
name=self.experiment_name, id=self.experiment_id, it=self.next_iteration)
|
|
||||||
)
|
|
||||||
os.makedirs(self.dir)
|
os.makedirs(self.dir)
|
||||||
print("** created {dir} **".format(dir=self.dir))
|
print(f'** created {self.dir} **')
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_value, traceback):
|
def __exit__(self, exc_type, exc_value, traceback):
|
||||||
self.save(experiment=self.without_particles())
|
self.save(experiment=self.without_particles())
|
||||||
self.save_log()
|
self.save_log()
|
||||||
self.next_iteration += 1
|
self.next_iteration += 1
|
||||||
|
|
||||||
def log(self, message, **kwargs):
|
def log(self, message, **kwargs):
|
||||||
self.log_messages.append(message)
|
self.log_messages.append(message)
|
||||||
print(message, **kwargs)
|
print(message, **kwargs)
|
||||||
|
|
||||||
def save_log(self, log_name="log"):
|
def save_log(self, log_name="log"):
|
||||||
with open(os.path.join(self.dir, "{name}.txt".format(name=log_name)), "w") as log_file:
|
with open(os.path.join(self.dir, f"{log_name}.txt"), "w") as log_file:
|
||||||
for log_message in self.log_messages:
|
for log_message in self.log_messages:
|
||||||
print(str(log_message), file=log_file)
|
print(str(log_message), file=log_file)
|
||||||
|
|
||||||
def __copy__(self):
|
def __copy__(self):
|
||||||
copy_ = Experiment(name=self.experiment_name,)
|
self_copy = self.__class__(name=self.experiment_name,)
|
||||||
copy_.__dict__ = {attr: self.__dict__[attr] for attr in self.__dict__ if
|
self_copy.__dict__ = {attr: self.__dict__[attr] for attr in self.__dict__ if
|
||||||
attr not in ['particles', 'historical_particles']}
|
attr not in ['particles', 'historical_particles']}
|
||||||
return copy_
|
return self_copy
|
||||||
|
|
||||||
def without_particles(self):
|
def without_particles(self):
|
||||||
self_copy = copy.copy(self)
|
self_copy = copy.copy(self)
|
||||||
@@ -55,14 +55,29 @@ class Experiment:
|
|||||||
|
|
||||||
def save(self, **kwargs):
|
def save(self, **kwargs):
|
||||||
for name, value in kwargs.items():
|
for name, value in kwargs.items():
|
||||||
with open(os.path.join(self.dir, "{name}.dill".format(name=name)), "wb") as dill_file:
|
with open(os.path.join(self.dir, f"{name}.dill"), "wb") as dill_file:
|
||||||
dill.dump(value, dill_file)
|
dill.dump(value, dill_file)
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def run_net(self, network, iterations, run_id=0):
|
||||||
|
raise NotImplementedError
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class Experiment(_BaseExperiment):
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super(Experiment, self).__init__(**kwargs)
|
||||||
|
pass
|
||||||
|
|
||||||
|
def run_net(self, network, iterations, run_id=0):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class FixpointExperiment(Experiment):
|
class FixpointExperiment(Experiment):
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
|
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
|
||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
|
self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
|
||||||
self.interesting_fixpoints = []
|
self.interesting_fixpoints = []
|
||||||
@@ -107,14 +122,14 @@ class MixedFixpointExperiment(FixpointExperiment):
|
|||||||
if run_id:
|
if run_id:
|
||||||
net.save_state()
|
net.save_state()
|
||||||
self.count(net)
|
self.count(net)
|
||||||
|
|
||||||
|
|
||||||
class SoupExperiment(Experiment):
|
class SoupExperiment(Experiment):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class IdentLearningExperiment(Experiment):
|
class IdentLearningExperiment(Experiment):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(IdentLearningExperiment, self).__init__(name=self.__class__.__name__)
|
super(IdentLearningExperiment, self).__init__(name=self.__class__.__name__)
|
||||||
pass
|
pass
|
||||||
|
|||||||
191
code/methods.py
191
code/methods.py
@@ -1,191 +0,0 @@
|
|||||||
import tensorflow as tf
|
|
||||||
from keras.models import Sequential, Model
|
|
||||||
from keras.layers import SimpleRNN, Dense
|
|
||||||
from keras.layers import Input, TimeDistributed
|
|
||||||
from tqdm import tqdm
|
|
||||||
import time
|
|
||||||
import os
|
|
||||||
import dill
|
|
||||||
|
|
||||||
from experiment import Experiment
|
|
||||||
|
|
||||||
import itertools
|
|
||||||
|
|
||||||
from typing import Union
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
class Network(object):
|
|
||||||
def __init__(self, features, cells, layers, bias=False, recurrent=False):
|
|
||||||
self.features = features
|
|
||||||
self.cells = cells
|
|
||||||
self.num_layer = layers
|
|
||||||
bias_params = cells if bias else 0
|
|
||||||
|
|
||||||
# Recurrent network
|
|
||||||
if recurrent:
|
|
||||||
# First RNN
|
|
||||||
p_layer_1 = (self.features * self.cells + self.cells ** 2 + bias_params)
|
|
||||||
# All other RNN Layers
|
|
||||||
p_layer_n = (self.cells * self.cells + self.cells ** 2 + bias_params) * (self.num_layer - 1)
|
|
||||||
else:
|
|
||||||
# First Dense
|
|
||||||
p_layer_1 = (self.features * self.cells + bias_params)
|
|
||||||
# All other Dense Layers
|
|
||||||
p_layer_n = (self.cells * self.cells + bias_params) * (self.num_layer - 1)
|
|
||||||
# Final Dense
|
|
||||||
p_layer_out = self.features * self.cells + bias_params
|
|
||||||
self.parameters = np.sum([p_layer_1, p_layer_n, p_layer_out])
|
|
||||||
# Build network
|
|
||||||
cell = SimpleRNN if recurrent else Dense
|
|
||||||
self.inputs, x = Input(shape=(self.parameters // self.features,
|
|
||||||
self.features) if recurrent else (self.features,)), None
|
|
||||||
|
|
||||||
for layer in range(self.num_layer):
|
|
||||||
if recurrent:
|
|
||||||
x = SimpleRNN(self.cells, activation=None, use_bias=False,
|
|
||||||
return_sequences=True)(self.inputs if layer == 0 else x)
|
|
||||||
else:
|
|
||||||
x = Dense(self.cells, activation=None, use_bias=False,
|
|
||||||
)(self.inputs if layer == 0 else x)
|
|
||||||
self.outputs = Dense(self.features if recurrent else 1, activation=None, use_bias=False)(x)
|
|
||||||
print('Network initialized, i haz {p} params @:{e}Features: {f}{e}Cells: {c}{e}Layers: {l}'.format(
|
|
||||||
p=self.parameters, l=self.num_layer, c=self.cells, f=self.features, e='\n{}'.format(' ' * 5))
|
|
||||||
)
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get_inputs(self):
|
|
||||||
return self.inputs
|
|
||||||
|
|
||||||
def get_outputs(self):
|
|
||||||
return self.outputs
|
|
||||||
|
|
||||||
|
|
||||||
class _BaseNetwork(Model):
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
super(_BaseNetwork, self).__init__(**kwargs)
|
|
||||||
# This is dirty
|
|
||||||
self.features = None
|
|
||||||
|
|
||||||
def get_weights_flat(self):
|
|
||||||
weights = super().get_weights()
|
|
||||||
flat = np.asarray(np.concatenate([x.flatten() for x in weights]))
|
|
||||||
return flat
|
|
||||||
|
|
||||||
def step(self, x):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def step_other(self, other: Union[Sequential, Model]) -> bool:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get_parameter_count(self):
|
|
||||||
return np.sum([np.prod(x.shape) for x in self.get_weights()])
|
|
||||||
|
|
||||||
def train_on_batch(self, *args, **kwargs):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def compile(self, *args, **kwargs):
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def mean_abs_error(labels, predictions):
|
|
||||||
return np.mean(np.abs(predictions - labels), axis=-1)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def mean_sqrd_error(labels, predictions):
|
|
||||||
return np.mean(np.square(predictions - labels), axis=-1)
|
|
||||||
|
|
||||||
|
|
||||||
class RecurrentNetwork(_BaseNetwork):
|
|
||||||
def __init__(self, network: Network, *args, **kwargs):
|
|
||||||
super().__init__(inputs=network.inputs, outputs=network.outputs)
|
|
||||||
self.features = network.features
|
|
||||||
self.parameters = network.parameters
|
|
||||||
assert self.parameters == self.get_parameter_count()
|
|
||||||
|
|
||||||
def step(self, x):
|
|
||||||
shaped = np.reshape(x, (1, -1, self.features))
|
|
||||||
return self.predict(shaped).flatten()
|
|
||||||
|
|
||||||
def fit(self, epochs=500, **kwargs):
|
|
||||||
losses = []
|
|
||||||
with tqdm(total=epochs, ascii=True,
|
|
||||||
desc='Type: {t}'. format(t=self.__class__.__name__),
|
|
||||||
postfix=["Loss", dict(value=0)]) as bar:
|
|
||||||
for _ in range(epochs):
|
|
||||||
x = self.get_weights_flat()
|
|
||||||
y = self.step(x)
|
|
||||||
weights = self.get_weights()
|
|
||||||
global_idx = 0
|
|
||||||
for idx, weight_matrix in enumerate(weights):
|
|
||||||
flattened = weight_matrix.flatten()
|
|
||||||
new_weights = y[global_idx:global_idx + flattened.shape[0]]
|
|
||||||
weights[idx] = np.reshape(new_weights, weight_matrix.shape)
|
|
||||||
global_idx += flattened.shape[0]
|
|
||||||
losses.append(self.mean_sqrd_error(y.flatten(), self.get_weights_flat()))
|
|
||||||
self.set_weights(weights)
|
|
||||||
bar.postfix[1]["value"] = losses[-1]
|
|
||||||
bar.update()
|
|
||||||
return losses
|
|
||||||
|
|
||||||
|
|
||||||
class FeedForwardNetwork(_BaseNetwork):
|
|
||||||
def __init__(self, network:Network, **kwargs):
|
|
||||||
super().__init__(inputs=network.inputs, outputs=network.outputs, **kwargs)
|
|
||||||
self.features = network.features
|
|
||||||
self.parameters = network.parameters
|
|
||||||
self.num_layer = network.num_layer
|
|
||||||
self.num_cells = network.cells
|
|
||||||
# assert self.parameters == self.get_parameter_count()
|
|
||||||
|
|
||||||
def step(self, x):
|
|
||||||
return self.predict(x)
|
|
||||||
|
|
||||||
def step_other(self, x):
|
|
||||||
return self.predict(x)
|
|
||||||
|
|
||||||
def fit(self, epochs=500, **kwargs):
|
|
||||||
losses = []
|
|
||||||
with tqdm(total=epochs, ascii=True,
|
|
||||||
desc='Type: {t} @ Epoch:'. format(t=self.__class__.__name__),
|
|
||||||
postfix=["Loss", dict(value=0)]) as bar:
|
|
||||||
for _ in range(epochs):
|
|
||||||
all_weights = self.get_weights_flat()
|
|
||||||
cell_idx = np.apply_along_axis(lambda x: x/self.num_cells, 0, np.arange(int(self.get_parameter_count())))
|
|
||||||
xc = np.concatenate((all_weights[..., None], cell_idx[..., None]), axis=1)
|
|
||||||
|
|
||||||
y = self.step(xc)
|
|
||||||
|
|
||||||
weights = self.get_weights()
|
|
||||||
global_idx = 0
|
|
||||||
|
|
||||||
for idx, weight_matrix in enumerate(weights):
|
|
||||||
|
|
||||||
# UPDATE THE WEIGHTS
|
|
||||||
flattened = weight_matrix.flatten()
|
|
||||||
new_weights = y[global_idx:global_idx + flattened.shape[0], 0]
|
|
||||||
weights[idx] = np.reshape(new_weights, weight_matrix.shape)
|
|
||||||
global_idx += flattened.shape[0]
|
|
||||||
|
|
||||||
losses.append(self.mean_sqrd_error(y[:, 0].flatten(), self.get_weights_flat()))
|
|
||||||
self.set_weights(weights)
|
|
||||||
bar.postfix[1]["value"] = losses[-1]
|
|
||||||
bar.update()
|
|
||||||
return losses
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
with Experiment() as exp:
|
|
||||||
features, cells, layers = 2, 2, 2
|
|
||||||
use_recurrent = False
|
|
||||||
if use_recurrent:
|
|
||||||
network = Network(features, cells, layers, recurrent=use_recurrent)
|
|
||||||
r = RecurrentNetwork(network)
|
|
||||||
loss = r.fit(epochs=10)
|
|
||||||
exp.save(rnet=r)
|
|
||||||
else:
|
|
||||||
network = Network(features, cells, layers, recurrent=use_recurrent)
|
|
||||||
ff = FeedForwardNetwork(network)
|
|
||||||
loss = ff.fit(epochs=10)
|
|
||||||
exp.save(ffnet=ff)
|
|
||||||
print(loss)
|
|
||||||
@@ -315,6 +315,7 @@ class AggregatingNeuralNetwork(NeuralNetwork):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def aggregate_fft(array: np.ndarray, aggregates: int):
|
def aggregate_fft(array: np.ndarray, aggregates: int):
|
||||||
flat = array.flatten()
|
flat = array.flatten()
|
||||||
|
# noinspection PyTypeChecker
|
||||||
fft_reduction = np.fft.fftn(flat, aggregates)
|
fft_reduction = np.fft.fftn(flat, aggregates)
|
||||||
return fft_reduction
|
return fft_reduction
|
||||||
|
|
||||||
@@ -542,7 +543,7 @@ if __name__ == '__main__':
|
|||||||
for run_id in tqdm(range(10)):
|
for run_id in tqdm(range(10)):
|
||||||
net = ParticleDecorator(
|
net = ParticleDecorator(
|
||||||
WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='linear'))
|
WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='linear'))
|
||||||
run_exp(net)
|
exp.run_exp(net)
|
||||||
K.clear_session()
|
K.clear_session()
|
||||||
exp.log(exp.counters)
|
exp.log(exp.counters)
|
||||||
|
|
||||||
|
|||||||
@@ -90,7 +90,7 @@ if __name__ == '__main__':
|
|||||||
for time in range(exp.soup_life):
|
for time in range(exp.soup_life):
|
||||||
soup.evolve()
|
soup.evolve()
|
||||||
count(counters, soup, notable_nets)
|
count(counters, soup, notable_nets)
|
||||||
keras.backend.clear_session()
|
K.clear_session()
|
||||||
|
|
||||||
xs += [learn_from_severity]
|
xs += [learn_from_severity]
|
||||||
ys += [float(counters['fix_zero']) / float(exp.trials)]
|
ys += [float(counters['fix_zero']) / float(exp.trials)]
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ if __name__ == '__main__':
|
|||||||
for _ in range(exp.soup_life):
|
for _ in range(exp.soup_life):
|
||||||
soup.evolve()
|
soup.evolve()
|
||||||
count(counters, soup, notable_nets)
|
count(counters, soup, notable_nets)
|
||||||
keras.backend.clear_session()
|
K.clear_session()
|
||||||
|
|
||||||
xs += [trains_per_selfattack]
|
xs += [trains_per_selfattack]
|
||||||
ys += [float(counters['fix_zero']) / float(exp.trials)]
|
ys += [float(counters['fix_zero']) / float(exp.trials)]
|
||||||
|
|||||||
111
code/test.py
111
code/test.py
@@ -1,111 +0,0 @@
|
|||||||
from experiment import *
|
|
||||||
from network import *
|
|
||||||
from soup import *
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
|
|
||||||
class LearningNeuralNetwork(NeuralNetwork):
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def mean_reduction(weights, features):
|
|
||||||
single_dim_weights = np.hstack([w.flatten() for w in weights])
|
|
||||||
shaped_weights = np.reshape(single_dim_weights, (1, features, -1))
|
|
||||||
x = np.mean(shaped_weights, axis=-1)
|
|
||||||
return x
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def fft_reduction(weights, features):
|
|
||||||
single_dim_weights = np.hstack([w.flatten() for w in weights])
|
|
||||||
x = np.fft.fft(single_dim_weights, n=features)[None, ...]
|
|
||||||
return x
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def random_reduction(_, features):
|
|
||||||
x = np.random.rand(features)[None, ...]
|
|
||||||
return x
|
|
||||||
|
|
||||||
def __init__(self, width, depth, features, **kwargs):
|
|
||||||
raise DeprecationWarning
|
|
||||||
super().__init__(**kwargs)
|
|
||||||
self.width = width
|
|
||||||
self.depth = depth
|
|
||||||
self.features = features
|
|
||||||
self.compile_params = dict(loss='mse', optimizer='sgd')
|
|
||||||
self.model = Sequential()
|
|
||||||
self.model.add(Dense(units=self.width, input_dim=self.features, **self.keras_params))
|
|
||||||
for _ in range(self.depth - 1):
|
|
||||||
self.model.add(Dense(units=self.width, **self.keras_params))
|
|
||||||
self.model.add(Dense(units=self.features, **self.keras_params))
|
|
||||||
self.model.compile(**self.compile_params)
|
|
||||||
|
|
||||||
def apply_to_weights(self, old_weights, **kwargs):
|
|
||||||
reduced = kwargs.get('reduction', self.fft_reduction)()
|
|
||||||
raise NotImplementedError
|
|
||||||
# build aggregations from old_weights
|
|
||||||
weights = self.get_weights_flat()
|
|
||||||
|
|
||||||
# call network
|
|
||||||
old_aggregation = self.aggregate_fft(weights, self.aggregates)
|
|
||||||
new_aggregation = self.apply(old_aggregation)
|
|
||||||
|
|
||||||
# generate list of new weights
|
|
||||||
new_weights_list = self.deaggregate_identically(new_aggregation, self.get_amount_of_weights())
|
|
||||||
|
|
||||||
new_weights_list = self.get_shuffler()(new_weights_list)
|
|
||||||
|
|
||||||
# write back new weights
|
|
||||||
new_weights = self.fill_weights(old_weights, new_weights_list)
|
|
||||||
|
|
||||||
# return results
|
|
||||||
if self.params.get("print_all_weight_updates", False) and not self.is_silent():
|
|
||||||
print("updated old weight aggregations " + str(old_aggregation))
|
|
||||||
print("to new weight aggregations " + str(new_aggregation))
|
|
||||||
print("resulting in network weights ...")
|
|
||||||
print(self.weights_to_string(new_weights))
|
|
||||||
return new_weights
|
|
||||||
|
|
||||||
def with_compile_params(self, **kwargs):
|
|
||||||
self.compile_params.update(kwargs)
|
|
||||||
return self
|
|
||||||
|
|
||||||
def learn(self, epochs, reduction, batchsize=1):
|
|
||||||
with tqdm(total=epochs, ascii=True,
|
|
||||||
desc='Type: {t} @ Epoch:'.format(t=self.__class__.__name__),
|
|
||||||
postfix=["Loss", dict(value=0)]) as bar:
|
|
||||||
for epoch in range(epochs):
|
|
||||||
old_weights = self.get_weights()
|
|
||||||
x = reduction(old_weights, self.features)
|
|
||||||
savestateCallback = SaveStateCallback(self, epoch=epoch)
|
|
||||||
history = self.model.fit(x=x, y=x, verbose=0, batch_size=batchsize, callbacks=savestateCallback)
|
|
||||||
bar.postfix[1]["value"] = history.history['loss'][-1]
|
|
||||||
bar.update()
|
|
||||||
|
|
||||||
|
|
||||||
def vary(e=0.0, f=0.0):
|
|
||||||
return [
|
|
||||||
np.array([[1.0+e, 0.0+f], [0.0+f, 0.0+f], [0.0+f, 0.0+f], [0.0+f, 0.0+f]], dtype=np.float32),
|
|
||||||
np.array([[1.0+e, 0.0+f], [0.0+f, 0.0+f]], dtype=np.float32),
|
|
||||||
np.array([[1.0+e], [0.0+f]], dtype=np.float32)
|
|
||||||
]
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
|
|
||||||
net = WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='sigmoid')
|
|
||||||
if False:
|
|
||||||
net.set_weights([
|
|
||||||
np.array([[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], dtype=np.float32),
|
|
||||||
np.array([[1.0, 0.0], [0.0, 0.0]], dtype=np.float32),
|
|
||||||
np.array([[1.0], [0.0]], dtype=np.float32)
|
|
||||||
])
|
|
||||||
print(net.get_weights())
|
|
||||||
net.self_attack(100)
|
|
||||||
print(net.get_weights())
|
|
||||||
print(net.is_fixpoint())
|
|
||||||
|
|
||||||
if True:
|
|
||||||
net.set_weights(vary(0.01, 0.0))
|
|
||||||
print(net.get_weights())
|
|
||||||
for _ in range(5):
|
|
||||||
net.self_attack()
|
|
||||||
print(net.get_weights())
|
|
||||||
print(net.is_fixpoint())
|
|
||||||
Reference in New Issue
Block a user