model getter fixed
This commit is contained in:
1146
.gitignore
vendored
1146
.gitignore
vendored
File diff suppressed because it is too large
Load Diff
@@ -1,113 +1,113 @@
|
|||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
import dill
|
import dill
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
|
||||||
|
|
||||||
class Experiment:
|
class Experiment:
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def from_dill(path):
|
def from_dill(path):
|
||||||
with open(path, "rb") as dill_file:
|
with open(path, "rb") as dill_file:
|
||||||
return dill.load(dill_file)
|
return dill.load(dill_file)
|
||||||
|
|
||||||
def __init__(self, name=None, ident=None):
|
def __init__(self, name=None, ident=None):
|
||||||
self.experiment_id = ident or time.time()
|
self.experiment_id = ident or time.time()
|
||||||
self.experiment_name = name or 'unnamed_experiment'
|
self.experiment_name = name or 'unnamed_experiment'
|
||||||
self.base_dir = self.experiment_name
|
self.base_dir = self.experiment_name
|
||||||
self.next_iteration = 0
|
self.next_iteration = 0
|
||||||
self.log_messages = []
|
self.log_messages = []
|
||||||
self.data_storage = defaultdict(list)
|
self.data_storage = defaultdict(list)
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
self.dir = os.path.join(self.base_dir, 'experiments', 'exp-{name}-{id}-{it}'.format(
|
self.dir = os.path.join(self.base_dir, 'experiments', 'exp-{name}-{id}-{it}'.format(
|
||||||
name=self.experiment_name, id=self.experiment_id, it=self.next_iteration)
|
name=self.experiment_name, id=self.experiment_id, it=self.next_iteration)
|
||||||
)
|
)
|
||||||
|
|
||||||
os.makedirs(self.dir)
|
os.makedirs(self.dir)
|
||||||
print("** created {dir} **".format(dir=self.dir))
|
print("** created {dir} **".format(dir=self.dir))
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_value, traceback):
|
def __exit__(self, exc_type, exc_value, traceback):
|
||||||
self.save(experiment=self)
|
self.save(experiment=self)
|
||||||
self.save_log()
|
self.save_log()
|
||||||
self.next_iteration += 1
|
self.next_iteration += 1
|
||||||
|
|
||||||
def log(self, message, **kwargs):
|
def log(self, message, **kwargs):
|
||||||
self.log_messages.append(message)
|
self.log_messages.append(message)
|
||||||
print(message, **kwargs)
|
print(message, **kwargs)
|
||||||
|
|
||||||
def save_log(self, log_name="log"):
|
def save_log(self, log_name="log"):
|
||||||
with open(os.path.join(self.dir, "{name}.txt".format(name=log_name)), "w") as log_file:
|
with open(os.path.join(self.dir, "{name}.txt".format(name=log_name)), "w") as log_file:
|
||||||
for log_message in self.log_messages:
|
for log_message in self.log_messages:
|
||||||
print(str(log_message), file=log_file)
|
print(str(log_message), file=log_file)
|
||||||
|
|
||||||
def save(self, **kwargs):
|
def save(self, **kwargs):
|
||||||
for name, value in kwargs.items():
|
for name, value in kwargs.items():
|
||||||
with open(os.path.join(self.dir, "{name}.dill".format(name=name)), "wb") as dill_file:
|
with open(os.path.join(self.dir, "{name}.dill".format(name=name)), "wb") as dill_file:
|
||||||
dill.dump(value, dill_file)
|
dill.dump(value, dill_file)
|
||||||
|
|
||||||
def add_trajectory_segment(self, run_id, trajectory):
|
def add_trajectory_segment(self, run_id, trajectory):
|
||||||
self.data_storage[run_id].append(trajectory)
|
self.data_storage[run_id].append(trajectory)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
class FixpointExperiment(Experiment):
|
class FixpointExperiment(Experiment):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(name=self.__class__.__name__)
|
super().__init__(name=self.__class__.__name__)
|
||||||
self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
|
self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
|
||||||
self.interesting_fixpoints = []
|
self.interesting_fixpoints = []
|
||||||
|
|
||||||
def run_net(self, net, step_limit=100, run_id=0):
|
def run_net(self, net, step_limit=100, run_id=0):
|
||||||
i = 0
|
i = 0
|
||||||
while i < step_limit and not net.is_diverged() and not net.is_fixpoint():
|
while i < step_limit and not net.is_diverged() and not net.is_fixpoint():
|
||||||
net.self_attack()
|
net.self_attack()
|
||||||
i += 1
|
i += 1
|
||||||
if run_id:
|
if run_id:
|
||||||
weights = net.get_weights_flat()
|
weights = net.get_weights_flat()
|
||||||
self.add_trajectory_segment(run_id, weights)
|
self.add_trajectory_segment(run_id, weights)
|
||||||
self.count(net)
|
self.count(net)
|
||||||
|
|
||||||
def count(self, net):
|
def count(self, net):
|
||||||
if net.is_diverged():
|
if net.is_diverged():
|
||||||
self.counters['divergent'] += 1
|
self.counters['divergent'] += 1
|
||||||
elif net.is_fixpoint():
|
elif net.is_fixpoint():
|
||||||
if net.is_zero():
|
if net.is_zero():
|
||||||
self.counters['fix_zero'] += 1
|
self.counters['fix_zero'] += 1
|
||||||
else:
|
else:
|
||||||
self.counters['fix_other'] += 1
|
self.counters['fix_other'] += 1
|
||||||
self.interesting_fixpoints.append(net.get_weights())
|
self.interesting_fixpoints.append(net.get_weights())
|
||||||
elif net.is_fixpoint(2):
|
elif net.is_fixpoint(2):
|
||||||
self.counters['fix_sec'] += 1
|
self.counters['fix_sec'] += 1
|
||||||
else:
|
else:
|
||||||
self.counters['other'] += 1
|
self.counters['other'] += 1
|
||||||
|
|
||||||
|
|
||||||
class MixedFixpointExperiment(FixpointExperiment):
|
class MixedFixpointExperiment(FixpointExperiment):
|
||||||
|
|
||||||
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0):
|
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0):
|
||||||
# TODO Where to place the trajectory storage ?
|
# TODO Where to place the trajectory storage ?
|
||||||
# weights = net.get_weights()
|
# weights = net.get_weights()
|
||||||
# self.add_trajectory_segment(run_id, weights)
|
# self.add_trajectory_segment(run_id, weights)
|
||||||
|
|
||||||
i = 0
|
i = 0
|
||||||
while i < step_limit and not net.is_diverged() and not net.is_fixpoint():
|
while i < step_limit and not net.is_diverged() and not net.is_fixpoint():
|
||||||
net.self_attack()
|
net.self_attack()
|
||||||
with tqdm(postfix=["Loss", dict(value=0)]) as bar:
|
with tqdm(postfix=["Loss", dict(value=0)]) as bar:
|
||||||
for _ in range(trains_per_application):
|
for _ in range(trains_per_application):
|
||||||
loss = net.compiled().train()
|
loss = net.compiled().train()
|
||||||
bar.postfix[1]["value"] = loss
|
bar.postfix[1]["value"] = loss
|
||||||
bar.update()
|
bar.update()
|
||||||
i += 1
|
i += 1
|
||||||
self.count(net)
|
self.count(net)
|
||||||
|
|
||||||
|
|
||||||
class SoupExperiment(Experiment):
|
class SoupExperiment(Experiment):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class IdentLearningExperiment(Experiment):
|
class IdentLearningExperiment(Experiment):
|
||||||
pass
|
pass
|
||||||
|
File diff suppressed because one or more lines are too long
382
code/methods.py
382
code/methods.py
@@ -1,191 +1,191 @@
|
|||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
from keras.models import Sequential, Model
|
from keras.models import Sequential, Model
|
||||||
from keras.layers import SimpleRNN, Dense
|
from keras.layers import SimpleRNN, Dense
|
||||||
from keras.layers import Input, TimeDistributed
|
from keras.layers import Input, TimeDistributed
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
import time
|
import time
|
||||||
import os
|
import os
|
||||||
import dill
|
import dill
|
||||||
|
|
||||||
from experiment import Experiment
|
from experiment import Experiment
|
||||||
|
|
||||||
import itertools
|
import itertools
|
||||||
|
|
||||||
from typing import Union
|
from typing import Union
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
class Network(object):
|
class Network(object):
|
||||||
def __init__(self, features, cells, layers, bias=False, recurrent=False):
|
def __init__(self, features, cells, layers, bias=False, recurrent=False):
|
||||||
self.features = features
|
self.features = features
|
||||||
self.cells = cells
|
self.cells = cells
|
||||||
self.num_layer = layers
|
self.num_layer = layers
|
||||||
bias_params = cells if bias else 0
|
bias_params = cells if bias else 0
|
||||||
|
|
||||||
# Recurrent network
|
# Recurrent network
|
||||||
if recurrent:
|
if recurrent:
|
||||||
# First RNN
|
# First RNN
|
||||||
p_layer_1 = (self.features * self.cells + self.cells ** 2 + bias_params)
|
p_layer_1 = (self.features * self.cells + self.cells ** 2 + bias_params)
|
||||||
# All other RNN Layers
|
# All other RNN Layers
|
||||||
p_layer_n = (self.cells * self.cells + self.cells ** 2 + bias_params) * (self.num_layer - 1)
|
p_layer_n = (self.cells * self.cells + self.cells ** 2 + bias_params) * (self.num_layer - 1)
|
||||||
else:
|
else:
|
||||||
# First Dense
|
# First Dense
|
||||||
p_layer_1 = (self.features * self.cells + bias_params)
|
p_layer_1 = (self.features * self.cells + bias_params)
|
||||||
# All other Dense Layers
|
# All other Dense Layers
|
||||||
p_layer_n = (self.cells * self.cells + bias_params) * (self.num_layer - 1)
|
p_layer_n = (self.cells * self.cells + bias_params) * (self.num_layer - 1)
|
||||||
# Final Dense
|
# Final Dense
|
||||||
p_layer_out = self.features * self.cells + bias_params
|
p_layer_out = self.features * self.cells + bias_params
|
||||||
self.parameters = np.sum([p_layer_1, p_layer_n, p_layer_out])
|
self.parameters = np.sum([p_layer_1, p_layer_n, p_layer_out])
|
||||||
# Build network
|
# Build network
|
||||||
cell = SimpleRNN if recurrent else Dense
|
cell = SimpleRNN if recurrent else Dense
|
||||||
self.inputs, x = Input(shape=(self.parameters // self.features,
|
self.inputs, x = Input(shape=(self.parameters // self.features,
|
||||||
self.features) if recurrent else (self.features,)), None
|
self.features) if recurrent else (self.features,)), None
|
||||||
|
|
||||||
for layer in range(self.num_layer):
|
for layer in range(self.num_layer):
|
||||||
if recurrent:
|
if recurrent:
|
||||||
x = SimpleRNN(self.cells, activation=None, use_bias=False,
|
x = SimpleRNN(self.cells, activation=None, use_bias=False,
|
||||||
return_sequences=True)(self.inputs if layer == 0 else x)
|
return_sequences=True)(self.inputs if layer == 0 else x)
|
||||||
else:
|
else:
|
||||||
x = Dense(self.cells, activation=None, use_bias=False,
|
x = Dense(self.cells, activation=None, use_bias=False,
|
||||||
)(self.inputs if layer == 0 else x)
|
)(self.inputs if layer == 0 else x)
|
||||||
self.outputs = Dense(self.features if recurrent else 1, activation=None, use_bias=False)(x)
|
self.outputs = Dense(self.features if recurrent else 1, activation=None, use_bias=False)(x)
|
||||||
print('Network initialized, i haz {p} params @:{e}Features: {f}{e}Cells: {c}{e}Layers: {l}'.format(
|
print('Network initialized, i haz {p} params @:{e}Features: {f}{e}Cells: {c}{e}Layers: {l}'.format(
|
||||||
p=self.parameters, l=self.num_layer, c=self.cells, f=self.features, e='\n{}'.format(' ' * 5))
|
p=self.parameters, l=self.num_layer, c=self.cells, f=self.features, e='\n{}'.format(' ' * 5))
|
||||||
)
|
)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def get_inputs(self):
|
def get_inputs(self):
|
||||||
return self.inputs
|
return self.inputs
|
||||||
|
|
||||||
def get_outputs(self):
|
def get_outputs(self):
|
||||||
return self.outputs
|
return self.outputs
|
||||||
|
|
||||||
|
|
||||||
class _BaseNetwork(Model):
|
class _BaseNetwork(Model):
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
def __init__(self, **kwargs):
|
||||||
super(_BaseNetwork, self).__init__(**kwargs)
|
super(_BaseNetwork, self).__init__(**kwargs)
|
||||||
# This is dirty
|
# This is dirty
|
||||||
self.features = None
|
self.features = None
|
||||||
|
|
||||||
def get_weights_flat(self):
|
def get_weights_flat(self):
|
||||||
weights = super().get_weights()
|
weights = super().get_weights()
|
||||||
flat = np.asarray(np.concatenate([x.flatten() for x in weights]))
|
flat = np.asarray(np.concatenate([x.flatten() for x in weights]))
|
||||||
return flat
|
return flat
|
||||||
|
|
||||||
def step(self, x):
|
def step(self, x):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def step_other(self, other: Union[Sequential, Model]) -> bool:
|
def step_other(self, other: Union[Sequential, Model]) -> bool:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def get_parameter_count(self):
|
def get_parameter_count(self):
|
||||||
return np.sum([np.prod(x.shape) for x in self.get_weights()])
|
return np.sum([np.prod(x.shape) for x in self.get_weights()])
|
||||||
|
|
||||||
def train_on_batch(self, *args, **kwargs):
|
def train_on_batch(self, *args, **kwargs):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
def compile(self, *args, **kwargs):
|
def compile(self, *args, **kwargs):
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def mean_abs_error(labels, predictions):
|
def mean_abs_error(labels, predictions):
|
||||||
return np.mean(np.abs(predictions - labels), axis=-1)
|
return np.mean(np.abs(predictions - labels), axis=-1)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def mean_sqrd_error(labels, predictions):
|
def mean_sqrd_error(labels, predictions):
|
||||||
return np.mean(np.square(predictions - labels), axis=-1)
|
return np.mean(np.square(predictions - labels), axis=-1)
|
||||||
|
|
||||||
|
|
||||||
class RecurrentNetwork(_BaseNetwork):
|
class RecurrentNetwork(_BaseNetwork):
|
||||||
def __init__(self, network: Network, *args, **kwargs):
|
def __init__(self, network: Network, *args, **kwargs):
|
||||||
super().__init__(inputs=network.inputs, outputs=network.outputs)
|
super().__init__(inputs=network.inputs, outputs=network.outputs)
|
||||||
self.features = network.features
|
self.features = network.features
|
||||||
self.parameters = network.parameters
|
self.parameters = network.parameters
|
||||||
assert self.parameters == self.get_parameter_count()
|
assert self.parameters == self.get_parameter_count()
|
||||||
|
|
||||||
def step(self, x):
|
def step(self, x):
|
||||||
shaped = np.reshape(x, (1, -1, self.features))
|
shaped = np.reshape(x, (1, -1, self.features))
|
||||||
return self.predict(shaped).flatten()
|
return self.predict(shaped).flatten()
|
||||||
|
|
||||||
def fit(self, epochs=500, **kwargs):
|
def fit(self, epochs=500, **kwargs):
|
||||||
losses = []
|
losses = []
|
||||||
with tqdm(total=epochs, ascii=True,
|
with tqdm(total=epochs, ascii=True,
|
||||||
desc='Type: {t}'. format(t=self.__class__.__name__),
|
desc='Type: {t}'. format(t=self.__class__.__name__),
|
||||||
postfix=["Loss", dict(value=0)]) as bar:
|
postfix=["Loss", dict(value=0)]) as bar:
|
||||||
for _ in range(epochs):
|
for _ in range(epochs):
|
||||||
x = self.get_weights_flat()
|
x = self.get_weights_flat()
|
||||||
y = self.step(x)
|
y = self.step(x)
|
||||||
weights = self.get_weights()
|
weights = self.get_weights()
|
||||||
global_idx = 0
|
global_idx = 0
|
||||||
for idx, weight_matrix in enumerate(weights):
|
for idx, weight_matrix in enumerate(weights):
|
||||||
flattened = weight_matrix.flatten()
|
flattened = weight_matrix.flatten()
|
||||||
new_weights = y[global_idx:global_idx + flattened.shape[0]]
|
new_weights = y[global_idx:global_idx + flattened.shape[0]]
|
||||||
weights[idx] = np.reshape(new_weights, weight_matrix.shape)
|
weights[idx] = np.reshape(new_weights, weight_matrix.shape)
|
||||||
global_idx += flattened.shape[0]
|
global_idx += flattened.shape[0]
|
||||||
losses.append(self.mean_sqrd_error(y.flatten(), self.get_weights_flat()))
|
losses.append(self.mean_sqrd_error(y.flatten(), self.get_weights_flat()))
|
||||||
self.set_weights(weights)
|
self.set_weights(weights)
|
||||||
bar.postfix[1]["value"] = losses[-1]
|
bar.postfix[1]["value"] = losses[-1]
|
||||||
bar.update()
|
bar.update()
|
||||||
return losses
|
return losses
|
||||||
|
|
||||||
|
|
||||||
class FeedForwardNetwork(_BaseNetwork):
|
class FeedForwardNetwork(_BaseNetwork):
|
||||||
def __init__(self, network:Network, **kwargs):
|
def __init__(self, network:Network, **kwargs):
|
||||||
super().__init__(inputs=network.inputs, outputs=network.outputs, **kwargs)
|
super().__init__(inputs=network.inputs, outputs=network.outputs, **kwargs)
|
||||||
self.features = network.features
|
self.features = network.features
|
||||||
self.parameters = network.parameters
|
self.parameters = network.parameters
|
||||||
self.num_layer = network.num_layer
|
self.num_layer = network.num_layer
|
||||||
self.num_cells = network.cells
|
self.num_cells = network.cells
|
||||||
# assert self.parameters == self.get_parameter_count()
|
# assert self.parameters == self.get_parameter_count()
|
||||||
|
|
||||||
def step(self, x):
|
def step(self, x):
|
||||||
return self.predict(x)
|
return self.predict(x)
|
||||||
|
|
||||||
def step_other(self, x):
|
def step_other(self, x):
|
||||||
return self.predict(x)
|
return self.predict(x)
|
||||||
|
|
||||||
def fit(self, epochs=500, **kwargs):
|
def fit(self, epochs=500, **kwargs):
|
||||||
losses = []
|
losses = []
|
||||||
with tqdm(total=epochs, ascii=True,
|
with tqdm(total=epochs, ascii=True,
|
||||||
desc='Type: {t} @ Epoch:'. format(t=self.__class__.__name__),
|
desc='Type: {t} @ Epoch:'. format(t=self.__class__.__name__),
|
||||||
postfix=["Loss", dict(value=0)]) as bar:
|
postfix=["Loss", dict(value=0)]) as bar:
|
||||||
for _ in range(epochs):
|
for _ in range(epochs):
|
||||||
all_weights = self.get_weights_flat()
|
all_weights = self.get_weights_flat()
|
||||||
cell_idx = np.apply_along_axis(lambda x: x/self.num_cells, 0, np.arange(int(self.get_parameter_count())))
|
cell_idx = np.apply_along_axis(lambda x: x/self.num_cells, 0, np.arange(int(self.get_parameter_count())))
|
||||||
xc = np.concatenate((all_weights[..., None], cell_idx[..., None]), axis=1)
|
xc = np.concatenate((all_weights[..., None], cell_idx[..., None]), axis=1)
|
||||||
|
|
||||||
y = self.step(xc)
|
y = self.step(xc)
|
||||||
|
|
||||||
weights = self.get_weights()
|
weights = self.get_weights()
|
||||||
global_idx = 0
|
global_idx = 0
|
||||||
|
|
||||||
for idx, weight_matrix in enumerate(weights):
|
for idx, weight_matrix in enumerate(weights):
|
||||||
|
|
||||||
# UPDATE THE WEIGHTS
|
# UPDATE THE WEIGHTS
|
||||||
flattened = weight_matrix.flatten()
|
flattened = weight_matrix.flatten()
|
||||||
new_weights = y[global_idx:global_idx + flattened.shape[0], 0]
|
new_weights = y[global_idx:global_idx + flattened.shape[0], 0]
|
||||||
weights[idx] = np.reshape(new_weights, weight_matrix.shape)
|
weights[idx] = np.reshape(new_weights, weight_matrix.shape)
|
||||||
global_idx += flattened.shape[0]
|
global_idx += flattened.shape[0]
|
||||||
|
|
||||||
losses.append(self.mean_sqrd_error(y[:, 0].flatten(), self.get_weights_flat()))
|
losses.append(self.mean_sqrd_error(y[:, 0].flatten(), self.get_weights_flat()))
|
||||||
self.set_weights(weights)
|
self.set_weights(weights)
|
||||||
bar.postfix[1]["value"] = losses[-1]
|
bar.postfix[1]["value"] = losses[-1]
|
||||||
bar.update()
|
bar.update()
|
||||||
return losses
|
return losses
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
with Experiment() as exp:
|
with Experiment() as exp:
|
||||||
features, cells, layers = 2, 2, 2
|
features, cells, layers = 2, 2, 2
|
||||||
use_recurrent = False
|
use_recurrent = False
|
||||||
if use_recurrent:
|
if use_recurrent:
|
||||||
network = Network(features, cells, layers, recurrent=use_recurrent)
|
network = Network(features, cells, layers, recurrent=use_recurrent)
|
||||||
r = RecurrentNetwork(network)
|
r = RecurrentNetwork(network)
|
||||||
loss = r.fit(epochs=10)
|
loss = r.fit(epochs=10)
|
||||||
exp.save(rnet=r)
|
exp.save(rnet=r)
|
||||||
else:
|
else:
|
||||||
network = Network(features, cells, layers, recurrent=use_recurrent)
|
network = Network(features, cells, layers, recurrent=use_recurrent)
|
||||||
ff = FeedForwardNetwork(network)
|
ff = FeedForwardNetwork(network)
|
||||||
loss = ff.fit(epochs=10)
|
loss = ff.fit(epochs=10)
|
||||||
exp.save(ffnet=ff)
|
exp.save(ffnet=ff)
|
||||||
print(loss)
|
print(loss)
|
||||||
|
1396
code/network.py
1396
code/network.py
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
208
code/soup.py
208
code/soup.py
@@ -1,104 +1,104 @@
|
|||||||
import random
|
import random
|
||||||
import copy
|
import copy
|
||||||
|
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
from experiment import *
|
from experiment import *
|
||||||
from network import *
|
from network import *
|
||||||
|
|
||||||
|
|
||||||
def prng():
|
def prng():
|
||||||
return random.random()
|
return random.random()
|
||||||
|
|
||||||
|
|
||||||
class Soup:
|
class Soup:
|
||||||
|
|
||||||
def __init__(self, size, generator, **kwargs):
|
def __init__(self, size, generator, **kwargs):
|
||||||
self.size = size
|
self.size = size
|
||||||
self.generator = generator
|
self.generator = generator
|
||||||
self.particles = []
|
self.particles = []
|
||||||
self.params = dict(meeting_rate=0.1, train_other_rate=0.1, train=0)
|
self.params = dict(meeting_rate=0.1, train_other_rate=0.1, train=0)
|
||||||
self.params.update(kwargs)
|
self.params.update(kwargs)
|
||||||
|
|
||||||
def with_params(self, **kwargs):
|
def with_params(self, **kwargs):
|
||||||
self.params.update(kwargs)
|
self.params.update(kwargs)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def seed(self):
|
def seed(self):
|
||||||
self.particles = []
|
self.particles = []
|
||||||
for _ in range(self.size):
|
for _ in range(self.size):
|
||||||
self.particles += [self.generator()]
|
self.particles += [self.generator()]
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def evolve(self, iterations=1):
|
def evolve(self, iterations=1):
|
||||||
for _ in range(iterations):
|
for _ in range(iterations):
|
||||||
for particle_id, particle in enumerate(self.particles):
|
for particle_id, particle in enumerate(self.particles):
|
||||||
if prng() < self.params.get('meeting_rate'):
|
if prng() < self.params.get('meeting_rate'):
|
||||||
other_particle_id = int(prng() * len(self.particles))
|
other_particle_id = int(prng() * len(self.particles))
|
||||||
other_particle = self.particles[other_particle_id]
|
other_particle = self.particles[other_particle_id]
|
||||||
particle.attack(other_particle)
|
particle.attack(other_particle)
|
||||||
if prng() < self.params.get('train_other_rate'):
|
if prng() < self.params.get('train_other_rate'):
|
||||||
other_particle_id = int(prng() * len(self.particles))
|
other_particle_id = int(prng() * len(self.particles))
|
||||||
other_particle = self.particles[other_particle_id]
|
other_particle = self.particles[other_particle_id]
|
||||||
particle.train_other(other_particle)
|
particle.train_other(other_particle)
|
||||||
try:
|
try:
|
||||||
for _ in range(self.params.get('train', 0)):
|
for _ in range(self.params.get('train', 0)):
|
||||||
particle.compiled().train()
|
particle.compiled().train()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
pass
|
pass
|
||||||
if self.params.get('remove_divergent') and particle.is_diverged():
|
if self.params.get('remove_divergent') and particle.is_diverged():
|
||||||
self.particles[particle_id] = self.generator()
|
self.particles[particle_id] = self.generator()
|
||||||
if self.params.get('remove_zero') and particle.is_zero():
|
if self.params.get('remove_zero') and particle.is_zero():
|
||||||
self.particles[particle_id] = self.generator()
|
self.particles[particle_id] = self.generator()
|
||||||
|
|
||||||
def count(self):
|
def count(self):
|
||||||
counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
|
counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
|
||||||
for particle in self.particles:
|
for particle in self.particles:
|
||||||
if particle.is_diverged():
|
if particle.is_diverged():
|
||||||
counters['divergent'] += 1
|
counters['divergent'] += 1
|
||||||
elif particle.is_fixpoint():
|
elif particle.is_fixpoint():
|
||||||
if particle.is_zero():
|
if particle.is_zero():
|
||||||
counters['fix_zero'] += 1
|
counters['fix_zero'] += 1
|
||||||
else:
|
else:
|
||||||
counters['fix_other'] += 1
|
counters['fix_other'] += 1
|
||||||
elif particle.is_fixpoint(2):
|
elif particle.is_fixpoint(2):
|
||||||
counters['fix_sec'] += 1
|
counters['fix_sec'] += 1
|
||||||
else:
|
else:
|
||||||
counters['other'] += 1
|
counters['other'] += 1
|
||||||
return counters
|
return counters
|
||||||
|
|
||||||
|
|
||||||
class LearningSoup(Soup):
|
class LearningSoup(Soup):
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(LearningSoup, self).__init__(**kwargs)
|
super(LearningSoup, self).__init__(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
if False:
|
if False:
|
||||||
with SoupExperiment() as exp:
|
with SoupExperiment() as exp:
|
||||||
for run_id in range(1):
|
for run_id in range(1):
|
||||||
net_generator = lambda: WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
net_generator = lambda: WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||||
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
|
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
|
||||||
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
||||||
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||||
soup = Soup(100, net_generator).with_params(remove_divergent=True, remove_zero=True)
|
soup = Soup(100, net_generator).with_params(remove_divergent=True, remove_zero=True)
|
||||||
soup.seed()
|
soup.seed()
|
||||||
for _ in tqdm(range(100)):
|
for _ in tqdm(range(100)):
|
||||||
soup.evolve()
|
soup.evolve()
|
||||||
exp.log(soup.count())
|
exp.log(soup.count())
|
||||||
|
|
||||||
if True:
|
if True:
|
||||||
with SoupExperiment() as exp:
|
with SoupExperiment() as exp:
|
||||||
for run_id in range(1):
|
for run_id in range(1):
|
||||||
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)).with_keras_params(
|
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)).with_keras_params(
|
||||||
activation='linear')
|
activation='linear')
|
||||||
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
|
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
|
||||||
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
||||||
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||||
soup = Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True).with_params(train=500)
|
soup = Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True).with_params(train=500)
|
||||||
soup.seed()
|
soup.seed()
|
||||||
for _ in tqdm(range(10)):
|
for _ in tqdm(range(10)):
|
||||||
soup.evolve()
|
soup.evolve()
|
||||||
exp.log(soup.count())
|
exp.log(soup.count())
|
||||||
|
|
||||||
|
66
code/test.py
66
code/test.py
@@ -1,33 +1,33 @@
|
|||||||
from experiment import *
|
from experiment import *
|
||||||
from network import *
|
from network import *
|
||||||
from soup import *
|
from soup import *
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
def vary(e=0.0, f=0.0):
|
def vary(e=0.0, f=0.0):
|
||||||
return [
|
return [
|
||||||
np.array([[1.0+e, 0.0+f], [0.0+f, 0.0+f], [0.0+f, 0.0+f], [0.0+f, 0.0+f]], dtype=np.float32),
|
np.array([[1.0+e, 0.0+f], [0.0+f, 0.0+f], [0.0+f, 0.0+f], [0.0+f, 0.0+f]], dtype=np.float32),
|
||||||
np.array([[1.0+e, 0.0+f], [0.0+f, 0.0+f]], dtype=np.float32),
|
np.array([[1.0+e, 0.0+f], [0.0+f, 0.0+f]], dtype=np.float32),
|
||||||
np.array([[1.0+e], [0.0+f]], dtype=np.float32)
|
np.array([[1.0+e], [0.0+f]], dtype=np.float32)
|
||||||
]
|
]
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
||||||
net = WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='sigmoid')
|
net = WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='sigmoid')
|
||||||
if False:
|
if False:
|
||||||
net.set_weights([
|
net.set_weights([
|
||||||
np.array([[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], dtype=np.float32),
|
np.array([[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], dtype=np.float32),
|
||||||
np.array([[1.0, 0.0], [0.0, 0.0]], dtype=np.float32),
|
np.array([[1.0, 0.0], [0.0, 0.0]], dtype=np.float32),
|
||||||
np.array([[1.0], [0.0]], dtype=np.float32)
|
np.array([[1.0], [0.0]], dtype=np.float32)
|
||||||
])
|
])
|
||||||
print(net.get_weights())
|
print(net.get_weights())
|
||||||
net.self_attack(100)
|
net.self_attack(100)
|
||||||
print(net.get_weights())
|
print(net.get_weights())
|
||||||
print(net.is_fixpoint())
|
print(net.is_fixpoint())
|
||||||
|
|
||||||
if True:
|
if True:
|
||||||
net.set_weights(vary(0.01, 0.0))
|
net.set_weights(vary(0.01, 0.0))
|
||||||
print(net.get_weights())
|
print(net.get_weights())
|
||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
net.self_attack()
|
net.self_attack()
|
||||||
print(net.get_weights())
|
print(net.get_weights())
|
||||||
print(net.is_fixpoint())
|
print(net.is_fixpoint())
|
||||||
|
76
code/util.py
76
code/util.py
@@ -1,39 +1,39 @@
|
|||||||
class PrintingObject:
|
class PrintingObject:
|
||||||
|
|
||||||
class SilenceSignal():
|
class SilenceSignal():
|
||||||
def __init__(self, obj, value):
|
def __init__(self, obj, value):
|
||||||
self.obj = obj
|
self.obj = obj
|
||||||
self.new_silent = value
|
self.new_silent = value
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
self.old_silent = self.obj.get_silence()
|
self.old_silent = self.obj.get_silence()
|
||||||
self.obj.set_silence(self.new_silent)
|
self.obj.set_silence(self.new_silent)
|
||||||
def __exit__(self, exception_type, exception_value, traceback):
|
def __exit__(self, exception_type, exception_value, traceback):
|
||||||
self.obj.set_silence(self.old_silent)
|
self.obj.set_silence(self.old_silent)
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.silent = True
|
self.silent = True
|
||||||
|
|
||||||
def is_silent(self):
|
def is_silent(self):
|
||||||
return self.silent
|
return self.silent
|
||||||
|
|
||||||
def get_silence(self):
|
def get_silence(self):
|
||||||
return self.is_silent()
|
return self.is_silent()
|
||||||
|
|
||||||
def set_silence(self, value=True):
|
def set_silence(self, value=True):
|
||||||
self.silent = value
|
self.silent = value
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def unset_silence(self):
|
def unset_silence(self):
|
||||||
self.silent = False
|
self.silent = False
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def with_silence(self, value=True):
|
def with_silence(self, value=True):
|
||||||
self.set_silence(value)
|
self.set_silence(value)
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def silence(self, value=True):
|
def silence(self, value=True):
|
||||||
return self.__class__.SilenceSignal(self, value)
|
return self.__class__.SilenceSignal(self, value)
|
||||||
|
|
||||||
def _print(self, *args, **kwargs):
|
def _print(self, *args, **kwargs):
|
||||||
if not self.silent:
|
if not self.silent:
|
||||||
print(*args, **kwargs)
|
print(*args, **kwargs)
|
@@ -1,197 +1,197 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
from argparse import ArgumentParser
|
from argparse import ArgumentParser
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
import plotly as pl
|
import plotly as pl
|
||||||
import plotly.graph_objs as go
|
import plotly.graph_objs as go
|
||||||
|
|
||||||
import colorlover as cl
|
import colorlover as cl
|
||||||
|
|
||||||
import dill
|
import dill
|
||||||
|
|
||||||
from sklearn.manifold.t_sne import TSNE
|
from sklearn.manifold.t_sne import TSNE
|
||||||
|
|
||||||
|
|
||||||
def build_args():
|
def build_args():
|
||||||
arg_parser = ArgumentParser()
|
arg_parser = ArgumentParser()
|
||||||
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
|
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
|
||||||
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
|
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
|
||||||
return arg_parser.parse_args()
|
return arg_parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
def plot_latent_trajectories(data_dict, filename='latent_trajectory_plot'):
|
def plot_latent_trajectories(data_dict, filename='latent_trajectory_plot'):
|
||||||
|
|
||||||
bupu = cl.scales['9']['seq']['BuPu']
|
bupu = cl.scales['9']['seq']['BuPu']
|
||||||
scale = cl.interp(bupu, len(data_dict)+1) # Map color scale to N bins
|
scale = cl.interp(bupu, len(data_dict)+1) # Map color scale to N bins
|
||||||
|
|
||||||
# Fit the mebedding space
|
# Fit the mebedding space
|
||||||
transformer = TSNE()
|
transformer = TSNE()
|
||||||
for trajectory_id in data_dict:
|
for trajectory_id in data_dict:
|
||||||
transformer.fit(np.asarray(data_dict[trajectory_id]))
|
transformer.fit(np.asarray(data_dict[trajectory_id]))
|
||||||
|
|
||||||
# Transform data accordingly and plot it
|
# Transform data accordingly and plot it
|
||||||
data = []
|
data = []
|
||||||
for trajectory_id in data_dict:
|
for trajectory_id in data_dict:
|
||||||
transformed = transformer._fit(np.asarray(data_dict[trajectory_id]))
|
transformed = transformer._fit(np.asarray(data_dict[trajectory_id]))
|
||||||
line_trace = go.Scatter(
|
line_trace = go.Scatter(
|
||||||
x=transformed[:, 0],
|
x=transformed[:, 0],
|
||||||
y=transformed[:, 1],
|
y=transformed[:, 1],
|
||||||
text='Hovertext goes here'.format(),
|
text='Hovertext goes here'.format(),
|
||||||
line=dict(color=scale[trajectory_id]),
|
line=dict(color=scale[trajectory_id]),
|
||||||
# legendgroup='Position -{}'.format(pos),
|
# legendgroup='Position -{}'.format(pos),
|
||||||
# name='Position -{}'.format(pos),
|
# name='Position -{}'.format(pos),
|
||||||
showlegend=False,
|
showlegend=False,
|
||||||
# hoverinfo='text',
|
# hoverinfo='text',
|
||||||
mode='lines')
|
mode='lines')
|
||||||
line_start = go.Scatter(mode='markers', x=[transformed[0, 0]], y=[transformed[0, 1]],
|
line_start = go.Scatter(mode='markers', x=[transformed[0, 0]], y=[transformed[0, 1]],
|
||||||
marker=dict(
|
marker=dict(
|
||||||
color='rgb(255, 0, 0)',
|
color='rgb(255, 0, 0)',
|
||||||
size=4
|
size=4
|
||||||
),
|
),
|
||||||
showlegend=False
|
showlegend=False
|
||||||
)
|
)
|
||||||
line_end = go.Scatter(mode='markers', x=[transformed[-1, 0]], y=[transformed[-1, 1]],
|
line_end = go.Scatter(mode='markers', x=[transformed[-1, 0]], y=[transformed[-1, 1]],
|
||||||
marker=dict(
|
marker=dict(
|
||||||
color='rgb(0, 0, 0)',
|
color='rgb(0, 0, 0)',
|
||||||
size=4
|
size=4
|
||||||
),
|
),
|
||||||
showlegend=False
|
showlegend=False
|
||||||
)
|
)
|
||||||
data.extend([line_trace, line_start, line_end])
|
data.extend([line_trace, line_start, line_end])
|
||||||
|
|
||||||
layout = dict(title='{} - Latent Trajectory Movement'.format('Penis'),
|
layout = dict(title='{} - Latent Trajectory Movement'.format('Penis'),
|
||||||
height=800, width=800, margin=dict(l=0, r=0, t=0, b=0))
|
height=800, width=800, margin=dict(l=0, r=0, t=0, b=0))
|
||||||
# import plotly.io as pio
|
# import plotly.io as pio
|
||||||
# pio.write_image(fig, filename)
|
# pio.write_image(fig, filename)
|
||||||
fig = go.Figure(data=data, layout=layout)
|
fig = go.Figure(data=data, layout=layout)
|
||||||
pl.offline.plot(fig, auto_open=True, filename=filename)
|
pl.offline.plot(fig, auto_open=True, filename=filename)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def plot_latent_trajectories_3D(data_dict, filename='plot'):
|
def plot_latent_trajectories_3D(data_dict, filename='plot'):
|
||||||
def norm(val, a=0, b=0.25):
|
def norm(val, a=0, b=0.25):
|
||||||
return (val - a) / (b - a)
|
return (val - a) / (b - a)
|
||||||
|
|
||||||
bupu = cl.scales['9']['seq']['BuPu']
|
bupu = cl.scales['9']['seq']['BuPu']
|
||||||
scale = cl.interp(bupu, len(data_dict)+1) # Map color scale to N bins
|
scale = cl.interp(bupu, len(data_dict)+1) # Map color scale to N bins
|
||||||
|
|
||||||
max_len = max([len(trajectory) for trajectory in data_dict.values()])
|
max_len = max([len(trajectory) for trajectory in data_dict.values()])
|
||||||
|
|
||||||
# Fit the mebedding space
|
# Fit the mebedding space
|
||||||
transformer = TSNE()
|
transformer = TSNE()
|
||||||
for trajectory_id in data_dict:
|
for trajectory_id in data_dict:
|
||||||
transformer.fit(data_dict[trajectory_id])
|
transformer.fit(data_dict[trajectory_id])
|
||||||
|
|
||||||
# Transform data accordingly and plot it
|
# Transform data accordingly and plot it
|
||||||
data = []
|
data = []
|
||||||
for trajectory_id in data_dict:
|
for trajectory_id in data_dict:
|
||||||
transformed = transformer._fit(np.asarray(data_dict[trajectory_id]))
|
transformed = transformer._fit(np.asarray(data_dict[trajectory_id]))
|
||||||
trace = go.Scatter3d(
|
trace = go.Scatter3d(
|
||||||
x=transformed[:, 0],
|
x=transformed[:, 0],
|
||||||
y=transformed[:, 1],
|
y=transformed[:, 1],
|
||||||
z=np.arange(transformed.shape[0]),
|
z=np.arange(transformed.shape[0]),
|
||||||
text='Hovertext goes here'.format(),
|
text='Hovertext goes here'.format(),
|
||||||
line=dict(color=scale[trajectory_id]),
|
line=dict(color=scale[trajectory_id]),
|
||||||
# legendgroup='Position -{}'.format(pos),
|
# legendgroup='Position -{}'.format(pos),
|
||||||
# name='Position -{}'.format(pos),
|
# name='Position -{}'.format(pos),
|
||||||
showlegend=False,
|
showlegend=False,
|
||||||
# hoverinfo='text',
|
# hoverinfo='text',
|
||||||
mode='lines')
|
mode='lines')
|
||||||
data.append(trace)
|
data.append(trace)
|
||||||
|
|
||||||
layout = go.Layout(scene=dict(aspectratio=dict(x=2, y=2, z=1),
|
layout = go.Layout(scene=dict(aspectratio=dict(x=2, y=2, z=1),
|
||||||
xaxis=dict(tickwidth=1, title='Transformed X'),
|
xaxis=dict(tickwidth=1, title='Transformed X'),
|
||||||
yaxis=dict(tickwidth=1, title='transformed Y'),
|
yaxis=dict(tickwidth=1, title='transformed Y'),
|
||||||
zaxis=dict(tickwidth=1, title='Epoch')),
|
zaxis=dict(tickwidth=1, title='Epoch')),
|
||||||
title='{} - Latent Trajectory Movement'.format('Penis'),
|
title='{} - Latent Trajectory Movement'.format('Penis'),
|
||||||
width=800, height=800,
|
width=800, height=800,
|
||||||
margin=dict(l=0, r=0, b=0, t=0))
|
margin=dict(l=0, r=0, b=0, t=0))
|
||||||
|
|
||||||
fig = go.Figure(data=data, layout=layout)
|
fig = go.Figure(data=data, layout=layout)
|
||||||
pl.offline.plot(fig, auto_open=True, filename=filename)
|
pl.offline.plot(fig, auto_open=True, filename=filename)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def plot_histogram(bars_dict_list, filename='histogram_plot'):
|
def plot_histogram(bars_dict_list, filename='histogram_plot'):
|
||||||
# catagorical
|
# catagorical
|
||||||
ryb = cl.scales['10']['div']['RdYlBu']
|
ryb = cl.scales['10']['div']['RdYlBu']
|
||||||
|
|
||||||
data = []
|
data = []
|
||||||
for bar_id, bars_dict in bars_dict_list:
|
for bar_id, bars_dict in bars_dict_list:
|
||||||
hist = go.Histogram(
|
hist = go.Histogram(
|
||||||
histfunc="count",
|
histfunc="count",
|
||||||
y=bars_dict.get('value', 14),
|
y=bars_dict.get('value', 14),
|
||||||
x=bars_dict.get('name', 'gimme a name'),
|
x=bars_dict.get('name', 'gimme a name'),
|
||||||
showlegend=False,
|
showlegend=False,
|
||||||
marker=dict(
|
marker=dict(
|
||||||
color=ryb[bar_id]
|
color=ryb[bar_id]
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
data.append(hist)
|
data.append(hist)
|
||||||
|
|
||||||
layout=dict(title='{} Histogram Plot'.format('Experiment Name Penis'),
|
layout=dict(title='{} Histogram Plot'.format('Experiment Name Penis'),
|
||||||
height=400, width=400, margin=dict(l=0, r=0, t=0, b=0))
|
height=400, width=400, margin=dict(l=0, r=0, t=0, b=0))
|
||||||
|
|
||||||
fig = go.Figure(data=data, layout=layout)
|
fig = go.Figure(data=data, layout=layout)
|
||||||
pl.offline.plot(fig, auto_open=True, filename=filename)
|
pl.offline.plot(fig, auto_open=True, filename=filename)
|
||||||
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def line_plot(line_dict_list, filename='lineplot'):
|
def line_plot(line_dict_list, filename='lineplot'):
|
||||||
# lines with standard deviation
|
# lines with standard deviation
|
||||||
# Transform data accordingly and plot it
|
# Transform data accordingly and plot it
|
||||||
data = []
|
data = []
|
||||||
rdylgn = cl.scales['10']['div']['RdYlGn']
|
rdylgn = cl.scales['10']['div']['RdYlGn']
|
||||||
rdylgn_background = [scale + (0.4,) for scale in cl.to_numeric(rdylgn)]
|
rdylgn_background = [scale + (0.4,) for scale in cl.to_numeric(rdylgn)]
|
||||||
for line_id, line_dict in enumerate(line_dict_list):
|
for line_id, line_dict in enumerate(line_dict_list):
|
||||||
name = line_dict.get('name', 'gimme a name')
|
name = line_dict.get('name', 'gimme a name')
|
||||||
|
|
||||||
upper_bound = go.Scatter(
|
upper_bound = go.Scatter(
|
||||||
name='Upper Bound',
|
name='Upper Bound',
|
||||||
x=line_dict['x'],
|
x=line_dict['x'],
|
||||||
y=line_dict['upper_y'],
|
y=line_dict['upper_y'],
|
||||||
mode='lines',
|
mode='lines',
|
||||||
marker=dict(color="#444"),
|
marker=dict(color="#444"),
|
||||||
line=dict(width=0),
|
line=dict(width=0),
|
||||||
fillcolor=rdylgn_background[line_id],
|
fillcolor=rdylgn_background[line_id],
|
||||||
)
|
)
|
||||||
|
|
||||||
trace = go.Scatter(
|
trace = go.Scatter(
|
||||||
x=line_dict['x'],
|
x=line_dict['x'],
|
||||||
y=line_dict['main_y'],
|
y=line_dict['main_y'],
|
||||||
mode='lines',
|
mode='lines',
|
||||||
name=name,
|
name=name,
|
||||||
line=dict(color=line_id),
|
line=dict(color=line_id),
|
||||||
fillcolor=rdylgn_background[line_id],
|
fillcolor=rdylgn_background[line_id],
|
||||||
fill='tonexty')
|
fill='tonexty')
|
||||||
|
|
||||||
lower_bound = go.Scatter(
|
lower_bound = go.Scatter(
|
||||||
name='Lower Bound',
|
name='Lower Bound',
|
||||||
x=line_dict['x'],
|
x=line_dict['x'],
|
||||||
y=line_dict['lower_y'],
|
y=line_dict['lower_y'],
|
||||||
marker=dict(color="#444"),
|
marker=dict(color="#444"),
|
||||||
line=dict(width=0),
|
line=dict(width=0),
|
||||||
mode='lines')
|
mode='lines')
|
||||||
|
|
||||||
data.extend([upper_bound, trace, lower_bound])
|
data.extend([upper_bound, trace, lower_bound])
|
||||||
|
|
||||||
layout=dict(title='{} Line Plot'.format('Experiment Name Penis'),
|
layout=dict(title='{} Line Plot'.format('Experiment Name Penis'),
|
||||||
height=800, width=800, margin=dict(l=0, r=0, t=0, b=0))
|
height=800, width=800, margin=dict(l=0, r=0, t=0, b=0))
|
||||||
|
|
||||||
fig = go.Figure(data=data, layout=layout)
|
fig = go.Figure(data=data, layout=layout)
|
||||||
pl.offline.plot(fig, auto_open=True, filename=filename)
|
pl.offline.plot(fig, auto_open=True, filename=filename)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
args = build_args()
|
args = build_args()
|
||||||
in_file = args.in_file[0]
|
in_file = args.in_file[0]
|
||||||
out_file = args.out_file
|
out_file = args.out_file
|
||||||
|
|
||||||
with open(in_file, 'rb') as in_f:
|
with open(in_file, 'rb') as in_f:
|
||||||
experiment = dill.load(in_f)
|
experiment = dill.load(in_f)
|
||||||
plot_latent_trajectories_3D(experiment.data_storage)
|
plot_latent_trajectories_3D(experiment.data_storage)
|
||||||
|
|
||||||
print('aha')
|
print('aha')
|
||||||
|
Reference in New Issue
Block a user