plotting
This commit is contained in:
@ -1,7 +1,6 @@
|
|||||||
import math
|
import math
|
||||||
import copy
|
import copy
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from tqdm import tqdm
|
|
||||||
|
|
||||||
from keras.models import Sequential
|
from keras.models import Sequential
|
||||||
from keras.layers import SimpleRNN, Dense
|
from keras.layers import SimpleRNN, Dense
|
||||||
@ -43,6 +42,7 @@ class NeuralNetwork(PrintingObject):
|
|||||||
for layer_id, layer in enumerate(network_weights):
|
for layer_id, layer in enumerate(network_weights):
|
||||||
for cell_id, cell in enumerate(layer):
|
for cell_id, cell in enumerate(layer):
|
||||||
for weight_id, weight in enumerate(cell):
|
for weight_id, weight in enumerate(cell):
|
||||||
|
# could be a chain comparission "lower_bound <= weight <= upper_bound"
|
||||||
if not (lower_bound <= weight and weight <= upper_bound):
|
if not (lower_bound <= weight and weight <= upper_bound):
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
@ -538,6 +538,7 @@ class LearningNeuralNetwork(NeuralNetwork):
|
|||||||
self.depth = depth
|
self.depth = depth
|
||||||
self.features = features
|
self.features = features
|
||||||
self.compile_params = dict(loss='mse', optimizer='sgd')
|
self.compile_params = dict(loss='mse', optimizer='sgd')
|
||||||
|
self.model = Sequential()
|
||||||
self.model.add(Dense(units=self.width, input_dim=self.features, **self.keras_params))
|
self.model.add(Dense(units=self.width, input_dim=self.features, **self.keras_params))
|
||||||
for _ in range(self.depth-1):
|
for _ in range(self.depth-1):
|
||||||
self.model.add(Dense(units=self.width, **self.keras_params))
|
self.model.add(Dense(units=self.width, **self.keras_params))
|
||||||
@ -591,7 +592,7 @@ class TrainingNeuralNetworkDecorator():
|
|||||||
def compile_model(self, **kwargs):
|
def compile_model(self, **kwargs):
|
||||||
compile_params = copy.deepcopy(self.compile_params)
|
compile_params = copy.deepcopy(self.compile_params)
|
||||||
compile_params.update(kwargs)
|
compile_params.update(kwargs)
|
||||||
return self.get_model().compile(**compile_params)
|
return self.net.model.compile(**compile_params)
|
||||||
|
|
||||||
def compiled(self, **kwargs):
|
def compiled(self, **kwargs):
|
||||||
if not self.model_compiled:
|
if not self.model_compiled:
|
||||||
@ -617,7 +618,7 @@ if __name__ == '__main__':
|
|||||||
if False:
|
if False:
|
||||||
with FixpointExperiment() as exp:
|
with FixpointExperiment() as exp:
|
||||||
for run_id in tqdm(range(100)):
|
for run_id in tqdm(range(100)):
|
||||||
# net = WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='linear')
|
net = WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='linear')
|
||||||
# net = AggregatingNeuralNetwork(aggregates=4, width=2, depth=2)\
|
# net = AggregatingNeuralNetwork(aggregates=4, width=2, depth=2)\
|
||||||
# net = FFTNeuralNetwork(aggregates=4, width=2, depth=2) \
|
# net = FFTNeuralNetwork(aggregates=4, width=2, depth=2) \
|
||||||
# .with_params(print_all_weight_updates=False, use_bias=False)
|
# .with_params(print_all_weight_updates=False, use_bias=False)
|
||||||
|
27
code/soup.py
27
code/soup.py
@ -1,9 +1,5 @@
|
|||||||
import random
|
import random
|
||||||
import copy
|
|
||||||
|
|
||||||
from tqdm import tqdm
|
|
||||||
|
|
||||||
from experiment import *
|
|
||||||
from network import *
|
from network import *
|
||||||
|
|
||||||
|
|
||||||
@ -21,7 +17,19 @@ class Soup:
|
|||||||
self.params = dict(attacking_rate=0.1, train_other_rate=0.1, train=0)
|
self.params = dict(attacking_rate=0.1, train_other_rate=0.1, train=0)
|
||||||
self.params.update(kwargs)
|
self.params.update(kwargs)
|
||||||
self.time = 0
|
self.time = 0
|
||||||
|
|
||||||
|
def __copy__(self):
|
||||||
|
copy_ = Soup(self.size, self.generator, **self.params)
|
||||||
|
copy_.__dict__ = {attr: self.__dict__[attr] for attr in self.__dict__ if
|
||||||
|
attr not in ['particles', 'historical_particles']}
|
||||||
|
return copy_
|
||||||
|
|
||||||
|
def without_particles(self):
|
||||||
|
self_copy = copy.copy(self)
|
||||||
|
# self_copy.particles = [particle.states for particle in self.particles]
|
||||||
|
self_copy.historical_particles = {key: val.states for key, val in self.historical_particles.items()}
|
||||||
|
return self_copy
|
||||||
|
|
||||||
def with_params(self, **kwargs):
|
def with_params(self, **kwargs):
|
||||||
self.params.update(kwargs)
|
self.params.update(kwargs)
|
||||||
return self
|
return self
|
||||||
@ -94,6 +102,7 @@ class Soup:
|
|||||||
particle.print_weights()
|
particle.print_weights()
|
||||||
print(particle.is_fixpoint())
|
print(particle.is_fixpoint())
|
||||||
|
|
||||||
|
|
||||||
class ParticleDecorator:
|
class ParticleDecorator:
|
||||||
|
|
||||||
next_uid = 0
|
next_uid = 0
|
||||||
@ -131,7 +140,6 @@ class ParticleDecorator:
|
|||||||
return self.states
|
return self.states
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
if False:
|
if False:
|
||||||
with SoupExperiment() as exp:
|
with SoupExperiment() as exp:
|
||||||
@ -155,12 +163,11 @@ if __name__ == '__main__':
|
|||||||
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
|
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
|
||||||
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
|
||||||
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
|
||||||
soup = Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True, train=200)
|
soup = Soup(10, net_generator).with_params(remove_divergent=True, remove_zero=True, train=10)
|
||||||
soup.seed()
|
soup.seed()
|
||||||
for _ in tqdm(range(10)):
|
for _ in tqdm(range(100)):
|
||||||
soup.evolve()
|
soup.evolve()
|
||||||
soup.print_all()
|
soup.print_all()
|
||||||
exp.log(soup.count())
|
exp.log(soup.count())
|
||||||
exp.save(soup=soup) # you can access soup.historical_particles[particle_uid].states[time_step]['loss']
|
exp.save(soup=soup.without_particles()) # you can access soup.historical_particles[particle_uid].states[time_step]['loss']
|
||||||
# or soup.historical_particles[particle_uid].states[time_step]['weights'] from soup.dill
|
# or soup.historical_particles[particle_uid].states[time_step]['weights'] from soup.dill
|
||||||
|
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
import os
|
import os
|
||||||
from typing import Union
|
|
||||||
|
|
||||||
from experiment import Experiment, SoupExperiment
|
from experiment import Experiment
|
||||||
|
# noinspection PyUnresolvedReferences
|
||||||
|
from soup import Soup
|
||||||
|
|
||||||
from argparse import ArgumentParser
|
from argparse import ArgumentParser
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -23,29 +24,42 @@ def build_args():
|
|||||||
return arg_parser.parse_args()
|
return arg_parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def build_from_soup(soup):
|
||||||
|
particles = soup.historical_particles
|
||||||
|
particle_dict = [dict(trajectory=[timestamp['weights'] for timestamp in particle],
|
||||||
|
fitted=[timestamp['fitted'] for timestamp in particle],
|
||||||
|
loss=[timestamp['loss'] for timestamp in particle],
|
||||||
|
time=[timestamp['time'] for timestamp in particle]) for particle in particles.values()]
|
||||||
|
return particle_dict
|
||||||
|
|
||||||
|
|
||||||
def plot_latent_trajectories(soup_or_experiment, filename='latent_trajectory_plot'):
|
def plot_latent_trajectories(soup_or_experiment, filename='latent_trajectory_plot'):
|
||||||
assert isinstance(soup_or_experiment, Union[Experiment, SoupExperiment])
|
assert isinstance(soup_or_experiment, (Experiment, Soup))
|
||||||
bupu = cl.scales['9']['seq']['BuPu']
|
bupu = cl.scales['11']['div']['RdYlGn']
|
||||||
data_dict = soup_or_experiment.data_storage
|
data_dict = soup_or_experiment.data_storage if isinstance(soup_or_experiment, Experiment) \
|
||||||
|
else build_from_soup(soup_or_experiment)
|
||||||
scale = cl.interp(bupu, len(data_dict)+1) # Map color scale to N bins
|
scale = cl.interp(bupu, len(data_dict)+1) # Map color scale to N bins
|
||||||
|
|
||||||
# Fit the mebedding space
|
# Fit the mebedding space
|
||||||
transformer = TSNE()
|
transformer = TSNE()
|
||||||
for trajectory_id in data_dict:
|
for particle_dict in data_dict:
|
||||||
transformer.fit(np.asarray(data_dict[trajectory_id]))
|
array = np.asarray([np.hstack([x.flatten() for x in timestamp]).flatten()
|
||||||
|
for timestamp in particle_dict['trajectory']])
|
||||||
|
particle_dict['trajectory'] = array
|
||||||
|
transformer.fit(array)
|
||||||
|
|
||||||
# Transform data accordingly and plot it
|
# Transform data accordingly and plot it
|
||||||
data = []
|
data = []
|
||||||
for trajectory_id in data_dict:
|
for p_id, particle_dict in enumerate(data_dict):
|
||||||
transformed = transformer._fit(np.asarray(data_dict[trajectory_id]))
|
transformed = transformer._fit(np.asarray(particle_dict['trajectory']))
|
||||||
line_trace = go.Scatter(
|
line_trace = go.Scatter(
|
||||||
x=transformed[:, 0],
|
x=transformed[:, 0],
|
||||||
y=transformed[:, 1],
|
y=transformed[:, 1],
|
||||||
text='Hovertext goes here'.format(),
|
text='Hovertext goes here'.format(),
|
||||||
line=dict(color=scale[trajectory_id]),
|
line=dict(color=scale[p_id]),
|
||||||
# legendgroup='Position -{}'.format(pos),
|
# legendgroup='Position -{}'.format(pos),
|
||||||
# name='Position -{}'.format(pos),
|
name='Particle - {}'.format(p_id),
|
||||||
showlegend=False,
|
showlegend=True,
|
||||||
# hoverinfo='text',
|
# hoverinfo='text',
|
||||||
mode='lines')
|
mode='lines')
|
||||||
line_start = go.Scatter(mode='markers', x=[transformed[0, 0]], y=[transformed[0, 1]],
|
line_start = go.Scatter(mode='markers', x=[transformed[0, 0]], y=[transformed[0, 1]],
|
||||||
@ -73,34 +87,38 @@ def plot_latent_trajectories(soup_or_experiment, filename='latent_trajectory_plo
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def plot_latent_trajectories_3D(data_dict, filename='plot'):
|
def plot_latent_trajectories_3D(soup_or_experiment, filename='plot'):
|
||||||
def norm(val, a=0, b=0.25):
|
def norm(val, a=0, b=0.25):
|
||||||
return (val - a) / (b - a)
|
return (val - a) / (b - a)
|
||||||
|
|
||||||
bupu = cl.scales['9']['seq']['BuPu']
|
data_dict = soup_or_experiment.data_storage if isinstance(soup_or_experiment, Experiment) \
|
||||||
|
else build_from_soup(soup_or_experiment)
|
||||||
|
|
||||||
|
bupu = cl.scales['11']['div']['RdYlGn']
|
||||||
scale = cl.interp(bupu, len(data_dict)+1) # Map color scale to N bins
|
scale = cl.interp(bupu, len(data_dict)+1) # Map color scale to N bins
|
||||||
|
|
||||||
max_len = max([len(trajectory) for trajectory in data_dict.values()])
|
# Fit the embedding space
|
||||||
|
|
||||||
# Fit the mebedding space
|
|
||||||
transformer = TSNE()
|
transformer = TSNE()
|
||||||
for trajectory_id in data_dict:
|
for particle_dict in data_dict:
|
||||||
transformer.fit(data_dict[trajectory_id])
|
array = np.asarray([np.hstack([x.flatten() for x in timestamp]).flatten()
|
||||||
|
for timestamp in particle_dict['trajectory']])
|
||||||
|
particle_dict['trajectory'] = array
|
||||||
|
transformer.fit(array)
|
||||||
|
|
||||||
# Transform data accordingly and plot it
|
# Transform data accordingly and plot it
|
||||||
data = []
|
data = []
|
||||||
for trajectory_id in data_dict:
|
for p_id, particle_dict in enumerate(data_dict):
|
||||||
transformed = transformer._fit(np.asarray(data_dict[trajectory_id]))
|
transformed = transformer._fit(particle_dict['trajectory'])
|
||||||
trace = go.Scatter3d(
|
trace = go.Scatter3d(
|
||||||
x=transformed[:, 0],
|
x=transformed[:, 0],
|
||||||
y=transformed[:, 1],
|
y=transformed[:, 1],
|
||||||
z=np.arange(transformed.shape[0]),
|
z=np.asarray(particle_dict['time']),
|
||||||
text='Hovertext goes here'.format(),
|
text='Particle: {}<br> It had {} lifes.'.format(p_id, len(particle_dict['trajectory'])),
|
||||||
line=dict(color=scale[trajectory_id]),
|
line=dict(color=scale[p_id]),
|
||||||
# legendgroup='Position -{}'.format(pos),
|
# legendgroup='Particle - {}'.format(p_id),
|
||||||
# name='Position -{}'.format(pos),
|
name='Particle -{}'.format(p_id),
|
||||||
showlegend=False,
|
# showlegend=True,
|
||||||
# hoverinfo='text',
|
hoverinfo='text',
|
||||||
mode='lines')
|
mode='lines')
|
||||||
data.append(trace)
|
data.append(trace)
|
||||||
|
|
||||||
@ -109,7 +127,7 @@ def plot_latent_trajectories_3D(data_dict, filename='plot'):
|
|||||||
yaxis=dict(tickwidth=1, title='transformed Y'),
|
yaxis=dict(tickwidth=1, title='transformed Y'),
|
||||||
zaxis=dict(tickwidth=1, title='Epoch')),
|
zaxis=dict(tickwidth=1, title='Epoch')),
|
||||||
title='{} - Latent Trajectory Movement'.format('Penis'),
|
title='{} - Latent Trajectory Movement'.format('Penis'),
|
||||||
width=800, height=800,
|
# width=0, height=0,
|
||||||
margin=dict(l=0, r=0, b=0, t=0))
|
margin=dict(l=0, r=0, b=0, t=0))
|
||||||
|
|
||||||
fig = go.Figure(data=data, layout=layout)
|
fig = go.Figure(data=data, layout=layout)
|
||||||
@ -213,4 +231,4 @@ if __name__ == '__main__':
|
|||||||
in_file = args.in_file[0]
|
in_file = args.in_file[0]
|
||||||
out_file = args.out_file
|
out_file = args.out_file
|
||||||
|
|
||||||
search_and_apply(in_file, plot_latent_trajectories)
|
search_and_apply(in_file, plot_latent_trajectories_3D)
|
||||||
|
Reference in New Issue
Block a user