model getter fixed
This commit is contained in:
parent
de6aa68f23
commit
20e9545b02
1146
.gitignore
vendored
1146
.gitignore
vendored
File diff suppressed because it is too large
Load Diff
@ -1,113 +1,113 @@
|
||||
import os
|
||||
import time
|
||||
import dill
|
||||
from tqdm import tqdm
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
class Experiment:
|
||||
|
||||
@staticmethod
|
||||
def from_dill(path):
|
||||
with open(path, "rb") as dill_file:
|
||||
return dill.load(dill_file)
|
||||
|
||||
def __init__(self, name=None, ident=None):
|
||||
self.experiment_id = ident or time.time()
|
||||
self.experiment_name = name or 'unnamed_experiment'
|
||||
self.base_dir = self.experiment_name
|
||||
self.next_iteration = 0
|
||||
self.log_messages = []
|
||||
self.data_storage = defaultdict(list)
|
||||
|
||||
def __enter__(self):
|
||||
self.dir = os.path.join(self.base_dir, 'experiments', 'exp-{name}-{id}-{it}'.format(
|
||||
name=self.experiment_name, id=self.experiment_id, it=self.next_iteration)
|
||||
)
|
||||
|
||||
os.makedirs(self.dir)
|
||||
print("** created {dir} **".format(dir=self.dir))
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.save(experiment=self)
|
||||
self.save_log()
|
||||
self.next_iteration += 1
|
||||
|
||||
def log(self, message, **kwargs):
|
||||
self.log_messages.append(message)
|
||||
print(message, **kwargs)
|
||||
|
||||
def save_log(self, log_name="log"):
|
||||
with open(os.path.join(self.dir, "{name}.txt".format(name=log_name)), "w") as log_file:
|
||||
for log_message in self.log_messages:
|
||||
print(str(log_message), file=log_file)
|
||||
|
||||
def save(self, **kwargs):
|
||||
for name, value in kwargs.items():
|
||||
with open(os.path.join(self.dir, "{name}.dill".format(name=name)), "wb") as dill_file:
|
||||
dill.dump(value, dill_file)
|
||||
|
||||
def add_trajectory_segment(self, run_id, trajectory):
|
||||
self.data_storage[run_id].append(trajectory)
|
||||
return
|
||||
|
||||
|
||||
class FixpointExperiment(Experiment):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(name=self.__class__.__name__)
|
||||
self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
|
||||
self.interesting_fixpoints = []
|
||||
|
||||
def run_net(self, net, step_limit=100, run_id=0):
|
||||
i = 0
|
||||
while i < step_limit and not net.is_diverged() and not net.is_fixpoint():
|
||||
net.self_attack()
|
||||
i += 1
|
||||
if run_id:
|
||||
weights = net.get_weights_flat()
|
||||
self.add_trajectory_segment(run_id, weights)
|
||||
self.count(net)
|
||||
|
||||
def count(self, net):
|
||||
if net.is_diverged():
|
||||
self.counters['divergent'] += 1
|
||||
elif net.is_fixpoint():
|
||||
if net.is_zero():
|
||||
self.counters['fix_zero'] += 1
|
||||
else:
|
||||
self.counters['fix_other'] += 1
|
||||
self.interesting_fixpoints.append(net.get_weights())
|
||||
elif net.is_fixpoint(2):
|
||||
self.counters['fix_sec'] += 1
|
||||
else:
|
||||
self.counters['other'] += 1
|
||||
|
||||
|
||||
class MixedFixpointExperiment(FixpointExperiment):
|
||||
|
||||
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0):
|
||||
# TODO Where to place the trajectory storage ?
|
||||
# weights = net.get_weights()
|
||||
# self.add_trajectory_segment(run_id, weights)
|
||||
|
||||
i = 0
|
||||
while i < step_limit and not net.is_diverged() and not net.is_fixpoint():
|
||||
net.self_attack()
|
||||
with tqdm(postfix=["Loss", dict(value=0)]) as bar:
|
||||
for _ in range(trains_per_application):
|
||||
loss = net.compiled().train()
|
||||
bar.postfix[1]["value"] = loss
|
||||
bar.update()
|
||||
i += 1
|
||||
self.count(net)
|
||||
|
||||
|
||||
class SoupExperiment(Experiment):
|
||||
pass
|
||||
|
||||
|
||||
class IdentLearningExperiment(Experiment):
|
||||
pass
|
||||
import os
|
||||
import time
|
||||
import dill
|
||||
from tqdm import tqdm
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
class Experiment:
|
||||
|
||||
@staticmethod
|
||||
def from_dill(path):
|
||||
with open(path, "rb") as dill_file:
|
||||
return dill.load(dill_file)
|
||||
|
||||
def __init__(self, name=None, ident=None):
|
||||
self.experiment_id = ident or time.time()
|
||||
self.experiment_name = name or 'unnamed_experiment'
|
||||
self.base_dir = self.experiment_name
|
||||
self.next_iteration = 0
|
||||
self.log_messages = []
|
||||
self.data_storage = defaultdict(list)
|
||||
|
||||
def __enter__(self):
|
||||
self.dir = os.path.join(self.base_dir, 'experiments', 'exp-{name}-{id}-{it}'.format(
|
||||
name=self.experiment_name, id=self.experiment_id, it=self.next_iteration)
|
||||
)
|
||||
|
||||
os.makedirs(self.dir)
|
||||
print("** created {dir} **".format(dir=self.dir))
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.save(experiment=self)
|
||||
self.save_log()
|
||||
self.next_iteration += 1
|
||||
|
||||
def log(self, message, **kwargs):
|
||||
self.log_messages.append(message)
|
||||
print(message, **kwargs)
|
||||
|
||||
def save_log(self, log_name="log"):
|
||||
with open(os.path.join(self.dir, "{name}.txt".format(name=log_name)), "w") as log_file:
|
||||
for log_message in self.log_messages:
|
||||
print(str(log_message), file=log_file)
|
||||
|
||||
def save(self, **kwargs):
|
||||
for name, value in kwargs.items():
|
||||
with open(os.path.join(self.dir, "{name}.dill".format(name=name)), "wb") as dill_file:
|
||||
dill.dump(value, dill_file)
|
||||
|
||||
def add_trajectory_segment(self, run_id, trajectory):
|
||||
self.data_storage[run_id].append(trajectory)
|
||||
return
|
||||
|
||||
|
||||
class FixpointExperiment(Experiment):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(name=self.__class__.__name__)
|
||||
self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
|
||||
self.interesting_fixpoints = []
|
||||
|
||||
def run_net(self, net, step_limit=100, run_id=0):
|
||||
i = 0
|
||||
while i < step_limit and not net.is_diverged() and not net.is_fixpoint():
|
||||
net.self_attack()
|
||||
i += 1
|
||||
if run_id:
|
||||
weights = net.get_weights_flat()
|
||||
self.add_trajectory_segment(run_id, weights)
|
||||
self.count(net)
|
||||
|
||||
def count(self, net):
|
||||
if net.is_diverged():
|
||||
self.counters['divergent'] += 1
|
||||
elif net.is_fixpoint():
|
||||
if net.is_zero():
|
||||
self.counters['fix_zero'] += 1
|
||||
else:
|
||||
self.counters['fix_other'] += 1
|
||||
self.interesting_fixpoints.append(net.get_weights())
|
||||
elif net.is_fixpoint(2):
|
||||
self.counters['fix_sec'] += 1
|
||||
else:
|
||||
self.counters['other'] += 1
|
||||
|
||||
|
||||
class MixedFixpointExperiment(FixpointExperiment):
|
||||
|
||||
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0):
|
||||
# TODO Where to place the trajectory storage ?
|
||||
# weights = net.get_weights()
|
||||
# self.add_trajectory_segment(run_id, weights)
|
||||
|
||||
i = 0
|
||||
while i < step_limit and not net.is_diverged() and not net.is_fixpoint():
|
||||
net.self_attack()
|
||||
with tqdm(postfix=["Loss", dict(value=0)]) as bar:
|
||||
for _ in range(trains_per_application):
|
||||
loss = net.compiled().train()
|
||||
bar.postfix[1]["value"] = loss
|
||||
bar.update()
|
||||
i += 1
|
||||
self.count(net)
|
||||
|
||||
|
||||
class SoupExperiment(Experiment):
|
||||
pass
|
||||
|
||||
|
||||
class IdentLearningExperiment(Experiment):
|
||||
pass
|
||||
|
File diff suppressed because one or more lines are too long
382
code/methods.py
382
code/methods.py
@ -1,191 +1,191 @@
|
||||
import tensorflow as tf
|
||||
from keras.models import Sequential, Model
|
||||
from keras.layers import SimpleRNN, Dense
|
||||
from keras.layers import Input, TimeDistributed
|
||||
from tqdm import tqdm
|
||||
import time
|
||||
import os
|
||||
import dill
|
||||
|
||||
from experiment import Experiment
|
||||
|
||||
import itertools
|
||||
|
||||
from typing import Union
|
||||
import numpy as np
|
||||
|
||||
class Network(object):
|
||||
def __init__(self, features, cells, layers, bias=False, recurrent=False):
|
||||
self.features = features
|
||||
self.cells = cells
|
||||
self.num_layer = layers
|
||||
bias_params = cells if bias else 0
|
||||
|
||||
# Recurrent network
|
||||
if recurrent:
|
||||
# First RNN
|
||||
p_layer_1 = (self.features * self.cells + self.cells ** 2 + bias_params)
|
||||
# All other RNN Layers
|
||||
p_layer_n = (self.cells * self.cells + self.cells ** 2 + bias_params) * (self.num_layer - 1)
|
||||
else:
|
||||
# First Dense
|
||||
p_layer_1 = (self.features * self.cells + bias_params)
|
||||
# All other Dense Layers
|
||||
p_layer_n = (self.cells * self.cells + bias_params) * (self.num_layer - 1)
|
||||
# Final Dense
|
||||
p_layer_out = self.features * self.cells + bias_params
|
||||
self.parameters = np.sum([p_layer_1, p_layer_n, p_layer_out])
|
||||
# Build network
|
||||
cell = SimpleRNN if recurrent else Dense
|
||||
self.inputs, x = Input(shape=(self.parameters // self.features,
|
||||
self.features) if recurrent else (self.features,)), None
|
||||
|
||||
for layer in range(self.num_layer):
|
||||
if recurrent:
|
||||
x = SimpleRNN(self.cells, activation=None, use_bias=False,
|
||||
return_sequences=True)(self.inputs if layer == 0 else x)
|
||||
else:
|
||||
x = Dense(self.cells, activation=None, use_bias=False,
|
||||
)(self.inputs if layer == 0 else x)
|
||||
self.outputs = Dense(self.features if recurrent else 1, activation=None, use_bias=False)(x)
|
||||
print('Network initialized, i haz {p} params @:{e}Features: {f}{e}Cells: {c}{e}Layers: {l}'.format(
|
||||
p=self.parameters, l=self.num_layer, c=self.cells, f=self.features, e='\n{}'.format(' ' * 5))
|
||||
)
|
||||
pass
|
||||
|
||||
def get_inputs(self):
|
||||
return self.inputs
|
||||
|
||||
def get_outputs(self):
|
||||
return self.outputs
|
||||
|
||||
|
||||
class _BaseNetwork(Model):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(_BaseNetwork, self).__init__(**kwargs)
|
||||
# This is dirty
|
||||
self.features = None
|
||||
|
||||
def get_weights_flat(self):
|
||||
weights = super().get_weights()
|
||||
flat = np.asarray(np.concatenate([x.flatten() for x in weights]))
|
||||
return flat
|
||||
|
||||
def step(self, x):
|
||||
pass
|
||||
|
||||
def step_other(self, other: Union[Sequential, Model]) -> bool:
|
||||
pass
|
||||
|
||||
def get_parameter_count(self):
|
||||
return np.sum([np.prod(x.shape) for x in self.get_weights()])
|
||||
|
||||
def train_on_batch(self, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def compile(self, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def mean_abs_error(labels, predictions):
|
||||
return np.mean(np.abs(predictions - labels), axis=-1)
|
||||
|
||||
@staticmethod
|
||||
def mean_sqrd_error(labels, predictions):
|
||||
return np.mean(np.square(predictions - labels), axis=-1)
|
||||
|
||||
|
||||
class RecurrentNetwork(_BaseNetwork):
|
||||
def __init__(self, network: Network, *args, **kwargs):
|
||||
super().__init__(inputs=network.inputs, outputs=network.outputs)
|
||||
self.features = network.features
|
||||
self.parameters = network.parameters
|
||||
assert self.parameters == self.get_parameter_count()
|
||||
|
||||
def step(self, x):
|
||||
shaped = np.reshape(x, (1, -1, self.features))
|
||||
return self.predict(shaped).flatten()
|
||||
|
||||
def fit(self, epochs=500, **kwargs):
|
||||
losses = []
|
||||
with tqdm(total=epochs, ascii=True,
|
||||
desc='Type: {t}'. format(t=self.__class__.__name__),
|
||||
postfix=["Loss", dict(value=0)]) as bar:
|
||||
for _ in range(epochs):
|
||||
x = self.get_weights_flat()
|
||||
y = self.step(x)
|
||||
weights = self.get_weights()
|
||||
global_idx = 0
|
||||
for idx, weight_matrix in enumerate(weights):
|
||||
flattened = weight_matrix.flatten()
|
||||
new_weights = y[global_idx:global_idx + flattened.shape[0]]
|
||||
weights[idx] = np.reshape(new_weights, weight_matrix.shape)
|
||||
global_idx += flattened.shape[0]
|
||||
losses.append(self.mean_sqrd_error(y.flatten(), self.get_weights_flat()))
|
||||
self.set_weights(weights)
|
||||
bar.postfix[1]["value"] = losses[-1]
|
||||
bar.update()
|
||||
return losses
|
||||
|
||||
|
||||
class FeedForwardNetwork(_BaseNetwork):
|
||||
def __init__(self, network:Network, **kwargs):
|
||||
super().__init__(inputs=network.inputs, outputs=network.outputs, **kwargs)
|
||||
self.features = network.features
|
||||
self.parameters = network.parameters
|
||||
self.num_layer = network.num_layer
|
||||
self.num_cells = network.cells
|
||||
# assert self.parameters == self.get_parameter_count()
|
||||
|
||||
def step(self, x):
|
||||
return self.predict(x)
|
||||
|
||||
def step_other(self, x):
|
||||
return self.predict(x)
|
||||
|
||||
def fit(self, epochs=500, **kwargs):
|
||||
losses = []
|
||||
with tqdm(total=epochs, ascii=True,
|
||||
desc='Type: {t} @ Epoch:'. format(t=self.__class__.__name__),
|
||||
postfix=["Loss", dict(value=0)]) as bar:
|
||||
for _ in range(epochs):
|
||||
all_weights = self.get_weights_flat()
|
||||
cell_idx = np.apply_along_axis(lambda x: x/self.num_cells, 0, np.arange(int(self.get_parameter_count())))
|
||||
xc = np.concatenate((all_weights[..., None], cell_idx[..., None]), axis=1)
|
||||
|
||||
y = self.step(xc)
|
||||
|
||||
weights = self.get_weights()
|
||||
global_idx = 0
|
||||
|
||||
for idx, weight_matrix in enumerate(weights):
|
||||
|
||||
# UPDATE THE WEIGHTS
|
||||
flattened = weight_matrix.flatten()
|
||||
new_weights = y[global_idx:global_idx + flattened.shape[0], 0]
|
||||
weights[idx] = np.reshape(new_weights, weight_matrix.shape)
|
||||
global_idx += flattened.shape[0]
|
||||
|
||||
losses.append(self.mean_sqrd_error(y[:, 0].flatten(), self.get_weights_flat()))
|
||||
self.set_weights(weights)
|
||||
bar.postfix[1]["value"] = losses[-1]
|
||||
bar.update()
|
||||
return losses
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with Experiment() as exp:
|
||||
features, cells, layers = 2, 2, 2
|
||||
use_recurrent = False
|
||||
if use_recurrent:
|
||||
network = Network(features, cells, layers, recurrent=use_recurrent)
|
||||
r = RecurrentNetwork(network)
|
||||
loss = r.fit(epochs=10)
|
||||
exp.save(rnet=r)
|
||||
else:
|
||||
network = Network(features, cells, layers, recurrent=use_recurrent)
|
||||
ff = FeedForwardNetwork(network)
|
||||
loss = ff.fit(epochs=10)
|
||||
exp.save(ffnet=ff)
|
||||
print(loss)
|
||||
import tensorflow as tf
|
||||
from keras.models import Sequential, Model
|
||||
from keras.layers import SimpleRNN, Dense
|
||||
from keras.layers import Input, TimeDistributed
|
||||
from tqdm import tqdm
|
||||
import time
|
||||
import os
|
||||
import dill
|
||||
|
||||
from experiment import Experiment
|
||||
|
||||
import itertools
|
||||
|
||||
from typing import Union
|
||||
import numpy as np
|
||||
|
||||
class Network(object):
|
||||
def __init__(self, features, cells, layers, bias=False, recurrent=False):
|
||||
self.features = features
|
||||
self.cells = cells
|
||||
self.num_layer = layers
|
||||
bias_params = cells if bias else 0
|
||||
|
||||
# Recurrent network
|
||||
if recurrent:
|
||||
# First RNN
|
||||
p_layer_1 = (self.features * self.cells + self.cells ** 2 + bias_params)
|
||||
# All other RNN Layers
|
||||
p_layer_n = (self.cells * self.cells + self.cells ** 2 + bias_params) * (self.num_layer - 1)
|
||||
else:
|
||||
# First Dense
|
||||
p_layer_1 = (self.features * self.cells + bias_params)
|
||||
# All other Dense Layers
|
||||
p_layer_n = (self.cells * self.cells + bias_params) * (self.num_layer - 1)
|
||||
# Final Dense
|
||||
p_layer_out = self.features * self.cells + bias_params
|
||||
self.parameters = np.sum([p_layer_1, p_layer_n, p_layer_out])
|
||||
# Build network
|
||||
cell = SimpleRNN if recurrent else Dense
|
||||
self.inputs, x = Input(shape=(self.parameters // self.features,
|
||||
self.features) if recurrent else (self.features,)), None
|
||||
|
||||
for layer in range(self.num_layer):
|
||||
if recurrent:
|
||||
x = SimpleRNN(self.cells, activation=None, use_bias=False,
|
||||
return_sequences=True)(self.inputs if layer == 0 else x)
|
||||
else:
|
||||
x = Dense(self.cells, activation=None, use_bias=False,
|
||||
)(self.inputs if layer == 0 else x)
|
||||
self.outputs = Dense(self.features if recurrent else 1, activation=None, use_bias=False)(x)
|
||||
print('Network initialized, i haz {p} params @:{e}Features: {f}{e}Cells: {c}{e}Layers: {l}'.format(
|
||||
p=self.parameters, l=self.num_layer, c=self.cells, f=self.features, e='\n{}'.format(' ' * 5))
|
||||
)
|
||||
pass
|
||||
|
||||
def get_inputs(self):
|
||||
return self.inputs
|
||||
|
||||
def get_outputs(self):
|
||||
return self.outputs
|
||||
|
||||
|
||||
class _BaseNetwork(Model):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(_BaseNetwork, self).__init__(**kwargs)
|
||||
# This is dirty
|
||||
self.features = None
|
||||
|
||||
def get_weights_flat(self):
|
||||
weights = super().get_weights()
|
||||
flat = np.asarray(np.concatenate([x.flatten() for x in weights]))
|
||||
return flat
|
||||
|
||||
def step(self, x):
|
||||
pass
|
||||
|
||||
def step_other(self, other: Union[Sequential, Model]) -> bool:
|
||||
pass
|
||||
|
||||
def get_parameter_count(self):
|
||||
return np.sum([np.prod(x.shape) for x in self.get_weights()])
|
||||
|
||||
def train_on_batch(self, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
def compile(self, *args, **kwargs):
|
||||
raise NotImplementedError
|
||||
|
||||
@staticmethod
|
||||
def mean_abs_error(labels, predictions):
|
||||
return np.mean(np.abs(predictions - labels), axis=-1)
|
||||
|
||||
@staticmethod
|
||||
def mean_sqrd_error(labels, predictions):
|
||||
return np.mean(np.square(predictions - labels), axis=-1)
|
||||
|
||||
|
||||
class RecurrentNetwork(_BaseNetwork):
|
||||
def __init__(self, network: Network, *args, **kwargs):
|
||||
super().__init__(inputs=network.inputs, outputs=network.outputs)
|
||||
self.features = network.features
|
||||
self.parameters = network.parameters
|
||||
assert self.parameters == self.get_parameter_count()
|
||||
|
||||
def step(self, x):
|
||||
shaped = np.reshape(x, (1, -1, self.features))
|
||||
return self.predict(shaped).flatten()
|
||||
|
||||
def fit(self, epochs=500, **kwargs):
|
||||
losses = []
|
||||
with tqdm(total=epochs, ascii=True,
|
||||
desc='Type: {t}'. format(t=self.__class__.__name__),
|
||||
postfix=["Loss", dict(value=0)]) as bar:
|
||||
for _ in range(epochs):
|
||||
x = self.get_weights_flat()
|
||||
y = self.step(x)
|
||||
weights = self.get_weights()
|
||||
global_idx = 0
|
||||
for idx, weight_matrix in enumerate(weights):
|
||||
flattened = weight_matrix.flatten()
|
||||
new_weights = y[global_idx:global_idx + flattened.shape[0]]
|
||||
weights[idx] = np.reshape(new_weights, weight_matrix.shape)
|
||||
global_idx += flattened.shape[0]
|
||||
losses.append(self.mean_sqrd_error(y.flatten(), self.get_weights_flat()))
|
||||
self.set_weights(weights)
|
||||
bar.postfix[1]["value"] = losses[-1]
|
||||
bar.update()
|
||||
return losses
|
||||
|
||||
|
||||
class FeedForwardNetwork(_BaseNetwork):
|
||||
def __init__(self, network:Network, **kwargs):
|
||||
super().__init__(inputs=network.inputs, outputs=network.outputs, **kwargs)
|
||||
self.features = network.features
|
||||
self.parameters = network.parameters
|
||||
self.num_layer = network.num_layer
|
||||
self.num_cells = network.cells
|
||||
# assert self.parameters == self.get_parameter_count()
|
||||
|
||||
def step(self, x):
|
||||
return self.predict(x)
|
||||
|
||||
def step_other(self, x):
|
||||
return self.predict(x)
|
||||
|
||||
def fit(self, epochs=500, **kwargs):
|
||||
losses = []
|
||||
with tqdm(total=epochs, ascii=True,
|
||||
desc='Type: {t} @ Epoch:'. format(t=self.__class__.__name__),
|
||||
postfix=["Loss", dict(value=0)]) as bar:
|
||||
for _ in range(epochs):
|
||||
all_weights = self.get_weights_flat()
|
||||
cell_idx = np.apply_along_axis(lambda x: x/self.num_cells, 0, np.arange(int(self.get_parameter_count())))
|
||||
xc = np.concatenate((all_weights[..., None], cell_idx[..., None]), axis=1)
|
||||
|
||||
y = self.step(xc)
|
||||
|
||||
weights = self.get_weights()
|
||||
global_idx = 0
|
||||
|
||||
for idx, weight_matrix in enumerate(weights):
|
||||
|
||||
# UPDATE THE WEIGHTS
|
||||
flattened = weight_matrix.flatten()
|
||||
new_weights = y[global_idx:global_idx + flattened.shape[0], 0]
|
||||
weights[idx] = np.reshape(new_weights, weight_matrix.shape)
|
||||
global_idx += flattened.shape[0]
|
||||
|
||||
losses.append(self.mean_sqrd_error(y[:, 0].flatten(), self.get_weights_flat()))
|
||||
self.set_weights(weights)
|
||||
bar.postfix[1]["value"] = losses[-1]
|
||||
bar.update()
|
||||
return losses
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
with Experiment() as exp:
|
||||
features, cells, layers = 2, 2, 2
|
||||
use_recurrent = False
|
||||
if use_recurrent:
|
||||
network = Network(features, cells, layers, recurrent=use_recurrent)
|
||||
r = RecurrentNetwork(network)
|
||||
loss = r.fit(epochs=10)
|
||||
exp.save(rnet=r)
|
||||
else:
|
||||
network = Network(features, cells, layers, recurrent=use_recurrent)
|
||||
ff = FeedForwardNetwork(network)
|
||||
loss = ff.fit(epochs=10)
|
||||
exp.save(ffnet=ff)
|
||||
print(loss)
|
||||
|
1396
code/network.py
1396
code/network.py
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
208
code/soup.py
208
code/soup.py
@ -1,104 +1,104 @@
|
||||
import random
|
||||
import copy
|
||||
|
||||
from tqdm import tqdm
|
||||
|
||||
from experiment import *
|
||||
from network import *
|
||||
|
||||
|
||||
def prng():
|
||||
return random.random()
|
||||
|
||||
|
||||
class Soup:
|
||||
|
||||
def __init__(self, size, generator, **kwargs):
|
||||
self.size = size
|
||||
self.generator = generator
|
||||
self.particles = []
|
||||
self.params = dict(meeting_rate=0.1, train_other_rate=0.1, train=0)
|
||||
self.params.update(kwargs)
|
||||
|
||||
def with_params(self, **kwargs):
|
||||
self.params.update(kwargs)
|
||||
return self
|
||||
|
||||
def seed(self):
|
||||
self.particles = []
|
||||
for _ in range(self.size):
|
||||
self.particles += [self.generator()]
|
||||
return self
|
||||
|
||||
def evolve(self, iterations=1):
|
||||
for _ in range(iterations):
|
||||
for particle_id, particle in enumerate(self.particles):
|
||||
if prng() < self.params.get('meeting_rate'):
|
||||
other_particle_id = int(prng() * len(self.particles))
|
||||
other_particle = self.particles[other_particle_id]
|
||||
particle.attack(other_particle)
|
||||
if prng() < self.params.get('train_other_rate'):
|
||||
other_particle_id = int(prng() * len(self.particles))
|
||||
other_particle = self.particles[other_particle_id]
|
||||
particle.train_other(other_particle)
|
||||
try:
|
||||
for _ in range(self.params.get('train', 0)):
|
||||
particle.compiled().train()
|
||||
except AttributeError:
|
||||
pass
|
||||
if self.params.get('remove_divergent') and particle.is_diverged():
|
||||
self.particles[particle_id] = self.generator()
|
||||
if self.params.get('remove_zero') and particle.is_zero():
|
||||
self.particles[particle_id] = self.generator()
|
||||
|
||||
def count(self):
|
||||
counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
|
||||
for particle in self.particles:
|
||||
if particle.is_diverged():
|
||||
counters['divergent'] += 1
|
||||
elif particle.is_fixpoint():
|
||||
if particle.is_zero():
|
||||
counters['fix_zero'] += 1
|
||||
else:
|
||||
counters['fix_other'] += 1
|
||||
elif particle.is_fixpoint(2):
|
||||
counters['fix_sec'] += 1
|
||||
else:
|
||||
counters['other'] += 1
|
||||
return counters
|
||||
|
||||
|
||||
class LearningSoup(Soup):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(LearningSoup, self).__init__(**kwargs)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if False:
|
||||
with SoupExperiment() as exp:
|
||||
for run_id in range(1):
|
||||
net_generator = lambda: WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
|
||||
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
||||
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||
soup = Soup(100, net_generator).with_params(remove_divergent=True, remove_zero=True)
|
||||
soup.seed()
|
||||
for _ in tqdm(range(100)):
|
||||
soup.evolve()
|
||||
exp.log(soup.count())
|
||||
|
||||
if True:
|
||||
with SoupExperiment() as exp:
|
||||
for run_id in range(1):
|
||||
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)).with_keras_params(
|
||||
activation='linear')
|
||||
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
|
||||
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
||||
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||
soup = Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True).with_params(train=500)
|
||||
soup.seed()
|
||||
for _ in tqdm(range(10)):
|
||||
soup.evolve()
|
||||
exp.log(soup.count())
|
||||
|
||||
import random
|
||||
import copy
|
||||
|
||||
from tqdm import tqdm
|
||||
|
||||
from experiment import *
|
||||
from network import *
|
||||
|
||||
|
||||
def prng():
|
||||
return random.random()
|
||||
|
||||
|
||||
class Soup:
|
||||
|
||||
def __init__(self, size, generator, **kwargs):
|
||||
self.size = size
|
||||
self.generator = generator
|
||||
self.particles = []
|
||||
self.params = dict(meeting_rate=0.1, train_other_rate=0.1, train=0)
|
||||
self.params.update(kwargs)
|
||||
|
||||
def with_params(self, **kwargs):
|
||||
self.params.update(kwargs)
|
||||
return self
|
||||
|
||||
def seed(self):
|
||||
self.particles = []
|
||||
for _ in range(self.size):
|
||||
self.particles += [self.generator()]
|
||||
return self
|
||||
|
||||
def evolve(self, iterations=1):
|
||||
for _ in range(iterations):
|
||||
for particle_id, particle in enumerate(self.particles):
|
||||
if prng() < self.params.get('meeting_rate'):
|
||||
other_particle_id = int(prng() * len(self.particles))
|
||||
other_particle = self.particles[other_particle_id]
|
||||
particle.attack(other_particle)
|
||||
if prng() < self.params.get('train_other_rate'):
|
||||
other_particle_id = int(prng() * len(self.particles))
|
||||
other_particle = self.particles[other_particle_id]
|
||||
particle.train_other(other_particle)
|
||||
try:
|
||||
for _ in range(self.params.get('train', 0)):
|
||||
particle.compiled().train()
|
||||
except AttributeError:
|
||||
pass
|
||||
if self.params.get('remove_divergent') and particle.is_diverged():
|
||||
self.particles[particle_id] = self.generator()
|
||||
if self.params.get('remove_zero') and particle.is_zero():
|
||||
self.particles[particle_id] = self.generator()
|
||||
|
||||
def count(self):
|
||||
counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
|
||||
for particle in self.particles:
|
||||
if particle.is_diverged():
|
||||
counters['divergent'] += 1
|
||||
elif particle.is_fixpoint():
|
||||
if particle.is_zero():
|
||||
counters['fix_zero'] += 1
|
||||
else:
|
||||
counters['fix_other'] += 1
|
||||
elif particle.is_fixpoint(2):
|
||||
counters['fix_sec'] += 1
|
||||
else:
|
||||
counters['other'] += 1
|
||||
return counters
|
||||
|
||||
|
||||
class LearningSoup(Soup):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(LearningSoup, self).__init__(**kwargs)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if False:
|
||||
with SoupExperiment() as exp:
|
||||
for run_id in range(1):
|
||||
net_generator = lambda: WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
|
||||
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
||||
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||
soup = Soup(100, net_generator).with_params(remove_divergent=True, remove_zero=True)
|
||||
soup.seed()
|
||||
for _ in tqdm(range(100)):
|
||||
soup.evolve()
|
||||
exp.log(soup.count())
|
||||
|
||||
if True:
|
||||
with SoupExperiment() as exp:
|
||||
for run_id in range(1):
|
||||
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)).with_keras_params(
|
||||
activation='linear')
|
||||
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
|
||||
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
||||
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||
soup = Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True).with_params(train=500)
|
||||
soup.seed()
|
||||
for _ in tqdm(range(10)):
|
||||
soup.evolve()
|
||||
exp.log(soup.count())
|
||||
|
||||
|
66
code/test.py
66
code/test.py
@ -1,33 +1,33 @@
|
||||
from experiment import *
|
||||
from network import *
|
||||
from soup import *
|
||||
import numpy as np
|
||||
|
||||
def vary(e=0.0, f=0.0):
|
||||
return [
|
||||
np.array([[1.0+e, 0.0+f], [0.0+f, 0.0+f], [0.0+f, 0.0+f], [0.0+f, 0.0+f]], dtype=np.float32),
|
||||
np.array([[1.0+e, 0.0+f], [0.0+f, 0.0+f]], dtype=np.float32),
|
||||
np.array([[1.0+e], [0.0+f]], dtype=np.float32)
|
||||
]
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
net = WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='sigmoid')
|
||||
if False:
|
||||
net.set_weights([
|
||||
np.array([[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], dtype=np.float32),
|
||||
np.array([[1.0, 0.0], [0.0, 0.0]], dtype=np.float32),
|
||||
np.array([[1.0], [0.0]], dtype=np.float32)
|
||||
])
|
||||
print(net.get_weights())
|
||||
net.self_attack(100)
|
||||
print(net.get_weights())
|
||||
print(net.is_fixpoint())
|
||||
|
||||
if True:
|
||||
net.set_weights(vary(0.01, 0.0))
|
||||
print(net.get_weights())
|
||||
for _ in range(5):
|
||||
net.self_attack()
|
||||
print(net.get_weights())
|
||||
print(net.is_fixpoint())
|
||||
from experiment import *
|
||||
from network import *
|
||||
from soup import *
|
||||
import numpy as np
|
||||
|
||||
def vary(e=0.0, f=0.0):
|
||||
return [
|
||||
np.array([[1.0+e, 0.0+f], [0.0+f, 0.0+f], [0.0+f, 0.0+f], [0.0+f, 0.0+f]], dtype=np.float32),
|
||||
np.array([[1.0+e, 0.0+f], [0.0+f, 0.0+f]], dtype=np.float32),
|
||||
np.array([[1.0+e], [0.0+f]], dtype=np.float32)
|
||||
]
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
net = WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='sigmoid')
|
||||
if False:
|
||||
net.set_weights([
|
||||
np.array([[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], dtype=np.float32),
|
||||
np.array([[1.0, 0.0], [0.0, 0.0]], dtype=np.float32),
|
||||
np.array([[1.0], [0.0]], dtype=np.float32)
|
||||
])
|
||||
print(net.get_weights())
|
||||
net.self_attack(100)
|
||||
print(net.get_weights())
|
||||
print(net.is_fixpoint())
|
||||
|
||||
if True:
|
||||
net.set_weights(vary(0.01, 0.0))
|
||||
print(net.get_weights())
|
||||
for _ in range(5):
|
||||
net.self_attack()
|
||||
print(net.get_weights())
|
||||
print(net.is_fixpoint())
|
||||
|
76
code/util.py
76
code/util.py
@ -1,39 +1,39 @@
|
||||
class PrintingObject:
|
||||
|
||||
class SilenceSignal():
|
||||
def __init__(self, obj, value):
|
||||
self.obj = obj
|
||||
self.new_silent = value
|
||||
def __enter__(self):
|
||||
self.old_silent = self.obj.get_silence()
|
||||
self.obj.set_silence(self.new_silent)
|
||||
def __exit__(self, exception_type, exception_value, traceback):
|
||||
self.obj.set_silence(self.old_silent)
|
||||
|
||||
def __init__(self):
|
||||
self.silent = True
|
||||
|
||||
def is_silent(self):
|
||||
return self.silent
|
||||
|
||||
def get_silence(self):
|
||||
return self.is_silent()
|
||||
|
||||
def set_silence(self, value=True):
|
||||
self.silent = value
|
||||
return self
|
||||
|
||||
def unset_silence(self):
|
||||
self.silent = False
|
||||
return self
|
||||
|
||||
def with_silence(self, value=True):
|
||||
self.set_silence(value)
|
||||
return self
|
||||
|
||||
def silence(self, value=True):
|
||||
return self.__class__.SilenceSignal(self, value)
|
||||
|
||||
def _print(self, *args, **kwargs):
|
||||
if not self.silent:
|
||||
class PrintingObject:
|
||||
|
||||
class SilenceSignal():
|
||||
def __init__(self, obj, value):
|
||||
self.obj = obj
|
||||
self.new_silent = value
|
||||
def __enter__(self):
|
||||
self.old_silent = self.obj.get_silence()
|
||||
self.obj.set_silence(self.new_silent)
|
||||
def __exit__(self, exception_type, exception_value, traceback):
|
||||
self.obj.set_silence(self.old_silent)
|
||||
|
||||
def __init__(self):
|
||||
self.silent = True
|
||||
|
||||
def is_silent(self):
|
||||
return self.silent
|
||||
|
||||
def get_silence(self):
|
||||
return self.is_silent()
|
||||
|
||||
def set_silence(self, value=True):
|
||||
self.silent = value
|
||||
return self
|
||||
|
||||
def unset_silence(self):
|
||||
self.silent = False
|
||||
return self
|
||||
|
||||
def with_silence(self, value=True):
|
||||
self.set_silence(value)
|
||||
return self
|
||||
|
||||
def silence(self, value=True):
|
||||
return self.__class__.SilenceSignal(self, value)
|
||||
|
||||
def _print(self, *args, **kwargs):
|
||||
if not self.silent:
|
||||
print(*args, **kwargs)
|
@ -1,197 +1,197 @@
|
||||
import os
|
||||
|
||||
from argparse import ArgumentParser
|
||||
import numpy as np
|
||||
|
||||
import plotly as pl
|
||||
import plotly.graph_objs as go
|
||||
|
||||
import colorlover as cl
|
||||
|
||||
import dill
|
||||
|
||||
from sklearn.manifold.t_sne import TSNE
|
||||
|
||||
|
||||
def build_args():
|
||||
arg_parser = ArgumentParser()
|
||||
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
|
||||
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
|
||||
return arg_parser.parse_args()
|
||||
|
||||
|
||||
def plot_latent_trajectories(data_dict, filename='latent_trajectory_plot'):
|
||||
|
||||
bupu = cl.scales['9']['seq']['BuPu']
|
||||
scale = cl.interp(bupu, len(data_dict)+1) # Map color scale to N bins
|
||||
|
||||
# Fit the mebedding space
|
||||
transformer = TSNE()
|
||||
for trajectory_id in data_dict:
|
||||
transformer.fit(np.asarray(data_dict[trajectory_id]))
|
||||
|
||||
# Transform data accordingly and plot it
|
||||
data = []
|
||||
for trajectory_id in data_dict:
|
||||
transformed = transformer._fit(np.asarray(data_dict[trajectory_id]))
|
||||
line_trace = go.Scatter(
|
||||
x=transformed[:, 0],
|
||||
y=transformed[:, 1],
|
||||
text='Hovertext goes here'.format(),
|
||||
line=dict(color=scale[trajectory_id]),
|
||||
# legendgroup='Position -{}'.format(pos),
|
||||
# name='Position -{}'.format(pos),
|
||||
showlegend=False,
|
||||
# hoverinfo='text',
|
||||
mode='lines')
|
||||
line_start = go.Scatter(mode='markers', x=[transformed[0, 0]], y=[transformed[0, 1]],
|
||||
marker=dict(
|
||||
color='rgb(255, 0, 0)',
|
||||
size=4
|
||||
),
|
||||
showlegend=False
|
||||
)
|
||||
line_end = go.Scatter(mode='markers', x=[transformed[-1, 0]], y=[transformed[-1, 1]],
|
||||
marker=dict(
|
||||
color='rgb(0, 0, 0)',
|
||||
size=4
|
||||
),
|
||||
showlegend=False
|
||||
)
|
||||
data.extend([line_trace, line_start, line_end])
|
||||
|
||||
layout = dict(title='{} - Latent Trajectory Movement'.format('Penis'),
|
||||
height=800, width=800, margin=dict(l=0, r=0, t=0, b=0))
|
||||
# import plotly.io as pio
|
||||
# pio.write_image(fig, filename)
|
||||
fig = go.Figure(data=data, layout=layout)
|
||||
pl.offline.plot(fig, auto_open=True, filename=filename)
|
||||
pass
|
||||
|
||||
|
||||
def plot_latent_trajectories_3D(data_dict, filename='plot'):
|
||||
def norm(val, a=0, b=0.25):
|
||||
return (val - a) / (b - a)
|
||||
|
||||
bupu = cl.scales['9']['seq']['BuPu']
|
||||
scale = cl.interp(bupu, len(data_dict)+1) # Map color scale to N bins
|
||||
|
||||
max_len = max([len(trajectory) for trajectory in data_dict.values()])
|
||||
|
||||
# Fit the mebedding space
|
||||
transformer = TSNE()
|
||||
for trajectory_id in data_dict:
|
||||
transformer.fit(data_dict[trajectory_id])
|
||||
|
||||
# Transform data accordingly and plot it
|
||||
data = []
|
||||
for trajectory_id in data_dict:
|
||||
transformed = transformer._fit(np.asarray(data_dict[trajectory_id]))
|
||||
trace = go.Scatter3d(
|
||||
x=transformed[:, 0],
|
||||
y=transformed[:, 1],
|
||||
z=np.arange(transformed.shape[0]),
|
||||
text='Hovertext goes here'.format(),
|
||||
line=dict(color=scale[trajectory_id]),
|
||||
# legendgroup='Position -{}'.format(pos),
|
||||
# name='Position -{}'.format(pos),
|
||||
showlegend=False,
|
||||
# hoverinfo='text',
|
||||
mode='lines')
|
||||
data.append(trace)
|
||||
|
||||
layout = go.Layout(scene=dict(aspectratio=dict(x=2, y=2, z=1),
|
||||
xaxis=dict(tickwidth=1, title='Transformed X'),
|
||||
yaxis=dict(tickwidth=1, title='transformed Y'),
|
||||
zaxis=dict(tickwidth=1, title='Epoch')),
|
||||
title='{} - Latent Trajectory Movement'.format('Penis'),
|
||||
width=800, height=800,
|
||||
margin=dict(l=0, r=0, b=0, t=0))
|
||||
|
||||
fig = go.Figure(data=data, layout=layout)
|
||||
pl.offline.plot(fig, auto_open=True, filename=filename)
|
||||
pass
|
||||
|
||||
|
||||
def plot_histogram(bars_dict_list, filename='histogram_plot'):
|
||||
# catagorical
|
||||
ryb = cl.scales['10']['div']['RdYlBu']
|
||||
|
||||
data = []
|
||||
for bar_id, bars_dict in bars_dict_list:
|
||||
hist = go.Histogram(
|
||||
histfunc="count",
|
||||
y=bars_dict.get('value', 14),
|
||||
x=bars_dict.get('name', 'gimme a name'),
|
||||
showlegend=False,
|
||||
marker=dict(
|
||||
color=ryb[bar_id]
|
||||
),
|
||||
)
|
||||
data.append(hist)
|
||||
|
||||
layout=dict(title='{} Histogram Plot'.format('Experiment Name Penis'),
|
||||
height=400, width=400, margin=dict(l=0, r=0, t=0, b=0))
|
||||
|
||||
fig = go.Figure(data=data, layout=layout)
|
||||
pl.offline.plot(fig, auto_open=True, filename=filename)
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def line_plot(line_dict_list, filename='lineplot'):
|
||||
# lines with standard deviation
|
||||
# Transform data accordingly and plot it
|
||||
data = []
|
||||
rdylgn = cl.scales['10']['div']['RdYlGn']
|
||||
rdylgn_background = [scale + (0.4,) for scale in cl.to_numeric(rdylgn)]
|
||||
for line_id, line_dict in enumerate(line_dict_list):
|
||||
name = line_dict.get('name', 'gimme a name')
|
||||
|
||||
upper_bound = go.Scatter(
|
||||
name='Upper Bound',
|
||||
x=line_dict['x'],
|
||||
y=line_dict['upper_y'],
|
||||
mode='lines',
|
||||
marker=dict(color="#444"),
|
||||
line=dict(width=0),
|
||||
fillcolor=rdylgn_background[line_id],
|
||||
)
|
||||
|
||||
trace = go.Scatter(
|
||||
x=line_dict['x'],
|
||||
y=line_dict['main_y'],
|
||||
mode='lines',
|
||||
name=name,
|
||||
line=dict(color=line_id),
|
||||
fillcolor=rdylgn_background[line_id],
|
||||
fill='tonexty')
|
||||
|
||||
lower_bound = go.Scatter(
|
||||
name='Lower Bound',
|
||||
x=line_dict['x'],
|
||||
y=line_dict['lower_y'],
|
||||
marker=dict(color="#444"),
|
||||
line=dict(width=0),
|
||||
mode='lines')
|
||||
|
||||
data.extend([upper_bound, trace, lower_bound])
|
||||
|
||||
layout=dict(title='{} Line Plot'.format('Experiment Name Penis'),
|
||||
height=800, width=800, margin=dict(l=0, r=0, t=0, b=0))
|
||||
|
||||
fig = go.Figure(data=data, layout=layout)
|
||||
pl.offline.plot(fig, auto_open=True, filename=filename)
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = build_args()
|
||||
in_file = args.in_file[0]
|
||||
out_file = args.out_file
|
||||
|
||||
with open(in_file, 'rb') as in_f:
|
||||
experiment = dill.load(in_f)
|
||||
plot_latent_trajectories_3D(experiment.data_storage)
|
||||
|
||||
print('aha')
|
||||
import os
|
||||
|
||||
from argparse import ArgumentParser
|
||||
import numpy as np
|
||||
|
||||
import plotly as pl
|
||||
import plotly.graph_objs as go
|
||||
|
||||
import colorlover as cl
|
||||
|
||||
import dill
|
||||
|
||||
from sklearn.manifold.t_sne import TSNE
|
||||
|
||||
|
||||
def build_args():
|
||||
arg_parser = ArgumentParser()
|
||||
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
|
||||
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
|
||||
return arg_parser.parse_args()
|
||||
|
||||
|
||||
def plot_latent_trajectories(data_dict, filename='latent_trajectory_plot'):
|
||||
|
||||
bupu = cl.scales['9']['seq']['BuPu']
|
||||
scale = cl.interp(bupu, len(data_dict)+1) # Map color scale to N bins
|
||||
|
||||
# Fit the mebedding space
|
||||
transformer = TSNE()
|
||||
for trajectory_id in data_dict:
|
||||
transformer.fit(np.asarray(data_dict[trajectory_id]))
|
||||
|
||||
# Transform data accordingly and plot it
|
||||
data = []
|
||||
for trajectory_id in data_dict:
|
||||
transformed = transformer._fit(np.asarray(data_dict[trajectory_id]))
|
||||
line_trace = go.Scatter(
|
||||
x=transformed[:, 0],
|
||||
y=transformed[:, 1],
|
||||
text='Hovertext goes here'.format(),
|
||||
line=dict(color=scale[trajectory_id]),
|
||||
# legendgroup='Position -{}'.format(pos),
|
||||
# name='Position -{}'.format(pos),
|
||||
showlegend=False,
|
||||
# hoverinfo='text',
|
||||
mode='lines')
|
||||
line_start = go.Scatter(mode='markers', x=[transformed[0, 0]], y=[transformed[0, 1]],
|
||||
marker=dict(
|
||||
color='rgb(255, 0, 0)',
|
||||
size=4
|
||||
),
|
||||
showlegend=False
|
||||
)
|
||||
line_end = go.Scatter(mode='markers', x=[transformed[-1, 0]], y=[transformed[-1, 1]],
|
||||
marker=dict(
|
||||
color='rgb(0, 0, 0)',
|
||||
size=4
|
||||
),
|
||||
showlegend=False
|
||||
)
|
||||
data.extend([line_trace, line_start, line_end])
|
||||
|
||||
layout = dict(title='{} - Latent Trajectory Movement'.format('Penis'),
|
||||
height=800, width=800, margin=dict(l=0, r=0, t=0, b=0))
|
||||
# import plotly.io as pio
|
||||
# pio.write_image(fig, filename)
|
||||
fig = go.Figure(data=data, layout=layout)
|
||||
pl.offline.plot(fig, auto_open=True, filename=filename)
|
||||
pass
|
||||
|
||||
|
||||
def plot_latent_trajectories_3D(data_dict, filename='plot'):
|
||||
def norm(val, a=0, b=0.25):
|
||||
return (val - a) / (b - a)
|
||||
|
||||
bupu = cl.scales['9']['seq']['BuPu']
|
||||
scale = cl.interp(bupu, len(data_dict)+1) # Map color scale to N bins
|
||||
|
||||
max_len = max([len(trajectory) for trajectory in data_dict.values()])
|
||||
|
||||
# Fit the mebedding space
|
||||
transformer = TSNE()
|
||||
for trajectory_id in data_dict:
|
||||
transformer.fit(data_dict[trajectory_id])
|
||||
|
||||
# Transform data accordingly and plot it
|
||||
data = []
|
||||
for trajectory_id in data_dict:
|
||||
transformed = transformer._fit(np.asarray(data_dict[trajectory_id]))
|
||||
trace = go.Scatter3d(
|
||||
x=transformed[:, 0],
|
||||
y=transformed[:, 1],
|
||||
z=np.arange(transformed.shape[0]),
|
||||
text='Hovertext goes here'.format(),
|
||||
line=dict(color=scale[trajectory_id]),
|
||||
# legendgroup='Position -{}'.format(pos),
|
||||
# name='Position -{}'.format(pos),
|
||||
showlegend=False,
|
||||
# hoverinfo='text',
|
||||
mode='lines')
|
||||
data.append(trace)
|
||||
|
||||
layout = go.Layout(scene=dict(aspectratio=dict(x=2, y=2, z=1),
|
||||
xaxis=dict(tickwidth=1, title='Transformed X'),
|
||||
yaxis=dict(tickwidth=1, title='transformed Y'),
|
||||
zaxis=dict(tickwidth=1, title='Epoch')),
|
||||
title='{} - Latent Trajectory Movement'.format('Penis'),
|
||||
width=800, height=800,
|
||||
margin=dict(l=0, r=0, b=0, t=0))
|
||||
|
||||
fig = go.Figure(data=data, layout=layout)
|
||||
pl.offline.plot(fig, auto_open=True, filename=filename)
|
||||
pass
|
||||
|
||||
|
||||
def plot_histogram(bars_dict_list, filename='histogram_plot'):
|
||||
# catagorical
|
||||
ryb = cl.scales['10']['div']['RdYlBu']
|
||||
|
||||
data = []
|
||||
for bar_id, bars_dict in bars_dict_list:
|
||||
hist = go.Histogram(
|
||||
histfunc="count",
|
||||
y=bars_dict.get('value', 14),
|
||||
x=bars_dict.get('name', 'gimme a name'),
|
||||
showlegend=False,
|
||||
marker=dict(
|
||||
color=ryb[bar_id]
|
||||
),
|
||||
)
|
||||
data.append(hist)
|
||||
|
||||
layout=dict(title='{} Histogram Plot'.format('Experiment Name Penis'),
|
||||
height=400, width=400, margin=dict(l=0, r=0, t=0, b=0))
|
||||
|
||||
fig = go.Figure(data=data, layout=layout)
|
||||
pl.offline.plot(fig, auto_open=True, filename=filename)
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def line_plot(line_dict_list, filename='lineplot'):
|
||||
# lines with standard deviation
|
||||
# Transform data accordingly and plot it
|
||||
data = []
|
||||
rdylgn = cl.scales['10']['div']['RdYlGn']
|
||||
rdylgn_background = [scale + (0.4,) for scale in cl.to_numeric(rdylgn)]
|
||||
for line_id, line_dict in enumerate(line_dict_list):
|
||||
name = line_dict.get('name', 'gimme a name')
|
||||
|
||||
upper_bound = go.Scatter(
|
||||
name='Upper Bound',
|
||||
x=line_dict['x'],
|
||||
y=line_dict['upper_y'],
|
||||
mode='lines',
|
||||
marker=dict(color="#444"),
|
||||
line=dict(width=0),
|
||||
fillcolor=rdylgn_background[line_id],
|
||||
)
|
||||
|
||||
trace = go.Scatter(
|
||||
x=line_dict['x'],
|
||||
y=line_dict['main_y'],
|
||||
mode='lines',
|
||||
name=name,
|
||||
line=dict(color=line_id),
|
||||
fillcolor=rdylgn_background[line_id],
|
||||
fill='tonexty')
|
||||
|
||||
lower_bound = go.Scatter(
|
||||
name='Lower Bound',
|
||||
x=line_dict['x'],
|
||||
y=line_dict['lower_y'],
|
||||
marker=dict(color="#444"),
|
||||
line=dict(width=0),
|
||||
mode='lines')
|
||||
|
||||
data.extend([upper_bound, trace, lower_bound])
|
||||
|
||||
layout=dict(title='{} Line Plot'.format('Experiment Name Penis'),
|
||||
height=800, width=800, margin=dict(l=0, r=0, t=0, b=0))
|
||||
|
||||
fig = go.Figure(data=data, layout=layout)
|
||||
pl.offline.plot(fig, auto_open=True, filename=filename)
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = build_args()
|
||||
in_file = args.in_file[0]
|
||||
out_file = args.out_file
|
||||
|
||||
with open(in_file, 'rb') as in_f:
|
||||
experiment = dill.load(in_f)
|
||||
plot_latent_trajectories_3D(experiment.data_storage)
|
||||
|
||||
print('aha')
|
||||
|
Loading…
x
Reference in New Issue
Block a user