Revert "Merge branch 'cristian_lenta-ba_code' into 'master'"

This reverts merge request !1
This commit is contained in:
Cristian Lenta 2021-05-03 08:37:31 +02:00
parent 46307610bc
commit 5e5bf9d7d9
111 changed files with 8338 additions and 2 deletions

572
.gitignore vendored Normal file
View File

@ -0,0 +1,572 @@
# Created by https://www.gitignore.io/api/data,linux,macos,python,windows,pycharm,database,jupyternotebook
# Edit at https://www.gitignore.io/?templates=data,linux,macos,python,windows,pycharm,database,jupyternotebook
### Local Datasets ###
/experiments
/setups/experiments
### Data ###
*.csv
*.dat
*.efx
*.gbr
*.key
*.pps
*.ppt
*.pptx
*.sdf
*.tax2010
*.vcf
*.xml
### Database ###
*.accdb
*.db
*.dbf
*.mdb
*.pdb
*.sqlite3
### JupyterNotebook ###
.ipynb_checkpoints
*/.ipynb_checkpoints/*
# Remove previous ipynb_checkpoints
# git rm -r .ipynb_checkpoints/
#
### Linux ###
*~
# temporary files which can be created if a process still has a handle open of a deleted file
.fuse_hidden*
# KDE directory preferences
.directory
# Linux trash folder which might appear on any partition or disk
.Trash-*
# .nfs files are created when an open file is removed but is still being accessed
.nfs*
### macOS ###
# General
.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
### PyCharm ###
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/**/usage.statistics.xml
.idea/**/dictionaries
.idea/**/shelf
# Generated files
.idea/**/contentModel.xml
# Sensitive or high-churn files
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
.idea/**/dbnavigator.xml
# Gradle
.idea/**/gradle.xml
.idea/**/libraries
# Gradle and Maven with auto-import
# When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using
# auto-import.
# .idea/modules.xml
# .idea/*.iml
# .idea/modules
# CMake
cmake-build-*/
# Mongo Explorer plugin
.idea/**/mongoSettings.xml
# File-based project format
*.iws
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
# Editor-based Rest Client
.idea/httpRequests
# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser
### PyCharm Patch ###
# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
# *.iml
# modules.xml
# .idea/misc.xml
# *.ipr
# Sonarlint plugin
.idea/sonarlint
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
### Python Patch ###
.venv/
### Windows ###
# Windows thumbnail cache files
Thumbs.db
ehthumbs.db
ehthumbs_vista.db
# Dump file
*.stackdump
# Folder config file
[Dd]esktop.ini
# Recycle Bin used on file shares
$RECYCLE.BIN/
# Windows Installer files
*.cab
*.msi
*.msix
*.msm
*.msp
# Windows shortcuts
*.lnk
# pycharm
.idea/
#######################################
#### Tex related
## Core latex/pdflatex auxiliary files:
*.aux
*.lof
*.log
*.lot
*.fls
*.out
*.toc
*.fmt
*.fot
*.cb
*.cb2
.*.lb
## Intermediate documents:
*.dvi
*.xdv
*-converted-to.*
# these rules might exclude image files for figures etc.
# *.ps
# *.eps
# *.pdf
## Generated if empty string is given at "Please type another file name for output:"
.pdf
## Bibliography auxiliary files (bibtex/biblatex/biber):
*.bbl
*.bcf
*.blg
*-blx.aux
*-blx.bib
*.run.xml
## Build tool auxiliary files:
*.fdb_latexmk
*.synctex
*.synctex(busy)
*.synctex.gz
*.synctex.gz(busy)
*.pdfsync
## Build tool directories for auxiliary files
# latexrun
latex.out/
## Auxiliary and intermediate files from other packages:
# algorithms
*.alg
*.loa
# achemso
acs-*.bib
# amsthm
*.thm
# beamer
*.nav
*.pre
*.snm
*.vrb
# changes
*.soc
# comment
*.cut
# cprotect
*.cpt
# elsarticle (documentclass of Elsevier journals)
*.spl
# endnotes
*.ent
# fixme
*.lox
# feynmf/feynmp
*.mf
*.mp
*.t[1-9]
*.t[1-9][0-9]
*.tfm
#(r)(e)ledmac/(r)(e)ledpar
*.end
*.?end
*.[1-9]
*.[1-9][0-9]
*.[1-9][0-9][0-9]
*.[1-9]R
*.[1-9][0-9]R
*.[1-9][0-9][0-9]R
*.eledsec[1-9]
*.eledsec[1-9]R
*.eledsec[1-9][0-9]
*.eledsec[1-9][0-9]R
*.eledsec[1-9][0-9][0-9]
*.eledsec[1-9][0-9][0-9]R
# glossaries
*.acn
*.acr
*.glg
*.glo
*.gls
*.glsdefs
# gnuplottex
*-gnuplottex-*
# gregoriotex
*.gaux
*.gtex
# htlatex
*.4ct
*.4tc
*.idv
*.lg
*.trc
*.xref
# hyperref
*.brf
# knitr
*-concordance.tex
# TODO Comment the next line if you want to keep your tikz graphics files
*.tikz
*-tikzDictionary
# listings
*.lol
# makeidx
*.idx
*.ilg
*.ind
*.ist
# minitoc
*.maf
*.mlf
*.mlt
*.mtc[0-9]*
*.slf[0-9]*
*.slt[0-9]*
*.stc[0-9]*
# minted
_minted*
*.pyg
# morewrites
*.mw
# nomencl
*.nlg
*.nlo
*.nls
# pax
*.pax
# pdfpcnotes
*.pdfpc
# sagetex
*.sagetex.sage
*.sagetex.py
*.sagetex.scmd
# scrwfile
*.wrt
# sympy
*.sout
*.sympy
sympy-plots-for-*.tex/
# pdfcomment
*.upa
*.upb
# pythontex
*.pytxcode
pythontex-files-*/
# tcolorbox
*.listing
# thmtools
*.loe
# TikZ & PGF
*.dpth
*.md5
*.auxlock
# todonotes
*.tdo
# vhistory
*.hst
*.ver
# easy-todo
*.lod
# xcolor
*.xcp
# xmpincl
*.xmpi
# xindy
*.xdy
# xypic precompiled matrices
*.xyc
# endfloat
*.ttt
*.fff
# Latexian
TSWLatexianTemp*
## Editors:
# WinEdt
*.bak
*.sav
# Texpad
.texpadtmp
# LyX
*.lyx~
# Kile
*.backup
# KBibTeX
*~[0-9]*
# auto folder when using emacs and auctex
./auto/*
*.el
# expex forward references with \gathertags
*-tags.tex
# standalone packages
*.sta
# End of https://www.gitignore.io/api/data,linux,macos,python,windows,pycharm,database,jupyternotebook

View File

@ -1,2 +1,2 @@
# cristian_lenta - BA code
# bannana-networks

96
code/bar_plot.py Normal file
View File

@ -0,0 +1,96 @@
import os
from experiment import Experiment
# noinspection PyUnresolvedReferences
from soup import Soup
from typing import List
from collections import defaultdict
from argparse import ArgumentParser
import numpy as np
import plotly as pl
import plotly.graph_objs as go
import colorlover as cl
import dill
def build_args():
arg_parser = ArgumentParser()
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
return arg_parser.parse_args()
def plot_bars(names_bars_tuple, filename='histogram_plot'):
# catagorical
ryb = cl.scales['10']['div']['RdYlBu']
names, bars = names_bars_tuple
situations = list(bars[0].keys())
names = ['Weightwise', 'Aggregating', 'Recurrent'] # [name.split(' ')[0] for name in names]
data_dict = {}
for idx, name in enumerate(names):
data_dict[name] = bars[idx]
data = []
for idx, situation in enumerate(situations):
bar = go.Bar(
y=[data_dict[name][situation] for name in names],
# x=[key for key in data_dict[name].keys()],
x=names,
name=situation,
showlegend=True,
)
data.append(bar)
layout = dict(xaxis=dict(title="Networks", titlefont=dict(size=20)),
barmode='stack',
# height=400, width=400,
# margin=dict(l=20, r=20, t=20, b=20)
legend=dict(orientation="h", x=0.05)
)
fig = go.Figure(data=data, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=[]):
if os.path.isdir(absolut_file_or_folder):
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
search_and_apply(sub_file_or_folder.path, plotting_function, files_to_look_for=files_to_look_for)
elif absolut_file_or_folder.endswith('.dill'):
file_or_folder = os.path.split(absolut_file_or_folder)[-1]
if file_or_folder in files_to_look_for and not os.path.exists('{}.html'.format(file_or_folder[:-5])):
print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__,
file=absolut_file_or_folder)
)
with open(absolut_file_or_folder, 'rb') as in_f:
bars = dill.load(in_f)
names_dill_location = os.path.join(*os.path.split(absolut_file_or_folder)[:-1], 'all_names.dill')
with open(names_dill_location, 'rb') as in_f:
names = dill.load(in_f)
plotting_function((names, bars), filename='{}.html'.format(absolut_file_or_folder[:-5]))
else:
pass
# This was not a file i should look for.
else:
# This was either another FilyType or Plot.html alerady exists.
pass
if __name__ == '__main__':
args = build_args()
in_file = args.in_file[0]
out_file = args.out_file
search_and_apply(in_file, plot_bars, files_to_look_for=['all_counters.dill'])
# , 'all_names.dill', 'all_notable_nets.dill'])

129
code/box_plots.py Normal file
View File

@ -0,0 +1,129 @@
import os
from experiment import Experiment
# noinspection PyUnresolvedReferences
from soup import Soup
from typing import List
from collections import defaultdict
from argparse import ArgumentParser
import numpy as np
import plotly as pl
import plotly.graph_objs as go
import colorlover as cl
import dill
def build_args():
arg_parser = ArgumentParser()
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
return arg_parser.parse_args()
def plot_box(exp: Experiment, filename='histogram_plot'):
# catagorical
ryb = cl.scales['10']['div']['RdYlBu']
data = []
for d in range(exp.depth):
names = ['D 10e-{}'.format(d)] * exp.trials
data.extend(names)
trace_list = []
vergence_box = go.Box(
y=exp.ys,
x=data,
name='Time to Vergence',
boxpoints=False,
showlegend=True,
marker=dict(
color=ryb[3]
),
)
fixpoint_box = go.Box(
y=exp.zs,
x=data,
name='Time as Fixpoint',
boxpoints=False,
showlegend=True,
marker=dict(
color=ryb[-1]
),
)
trace_list.extend([vergence_box, fixpoint_box])
layout = dict(title='{}'.format('Known Fixpoint Variation'),
titlefont=dict(size=30),
legend=dict(
orientation="h",
x=.1, y=-0.1,
font=dict(
size=20,
color='black'
),
),
boxmode='group',
boxgap=0,
# barmode='group',
bargap=0,
xaxis=dict(showgrid=False,
zeroline=True,
tickangle=0,
showticklabels=True),
yaxis=dict(
title='Steps',
zeroline=False,
titlefont=dict(
size=30
)
),
# height=400, width=400,
margin=dict(t=50)
)
fig = go.Figure(data=trace_list, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=[]):
if os.path.isdir(absolut_file_or_folder):
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
search_and_apply(sub_file_or_folder.path, plotting_function, files_to_look_for=files_to_look_for)
elif absolut_file_or_folder.endswith('.dill'):
file_or_folder = os.path.split(absolut_file_or_folder)[-1]
if file_or_folder in files_to_look_for and not os.path.exists('{}.html'.format(file_or_folder[:-5])):
print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__,
file=absolut_file_or_folder)
)
with open(absolut_file_or_folder, 'rb') as in_f:
exp = dill.load(in_f)
try:
plotting_function(exp, filename='{}.html'.format(absolut_file_or_folder[:-5]))
except AttributeError:
pass
else:
pass
# This was not a file i should look for.
else:
# This was either another FilyType or Plot.html alerady exists.
pass
if __name__ == '__main__':
args = build_args()
in_file = args.in_file[0]
out_file = args.out_file
search_and_apply(in_file, plot_box, files_to_look_for=['experiment.dill'])
# , 'all_names.dill', 'all_notable_nets.dill'])

120
code/experiment.py Normal file
View File

@ -0,0 +1,120 @@
import os
import time
import dill
from tqdm import tqdm
import copy
class Experiment:
@staticmethod
def from_dill(path):
with open(path, "rb") as dill_file:
return dill.load(dill_file)
def __init__(self, name=None, ident=None):
self.experiment_id = '{}_{}'.format(ident or '', time.time())
self.experiment_name = name or 'unnamed_experiment'
self.next_iteration = 0
self.log_messages = []
self.historical_particles = {}
def __enter__(self):
self.dir = os.path.join('experiments', 'exp-{name}-{id}-{it}'.format(
name=self.experiment_name, id=self.experiment_id, it=self.next_iteration)
)
os.makedirs(self.dir)
print("** created {dir} **".format(dir=self.dir))
return self
def __exit__(self, exc_type, exc_value, traceback):
self.save(experiment=self.without_particles())
self.save_log()
self.next_iteration += 1
def log(self, message, **kwargs):
self.log_messages.append(message)
print(message, **kwargs)
def save_log(self, log_name="log"):
with open(os.path.join(self.dir, "{name}.txt".format(name=log_name)), "w") as log_file:
for log_message in self.log_messages:
print(str(log_message), file=log_file)
def __copy__(self):
copy_ = Experiment(name=self.experiment_name,)
copy_.__dict__ = {attr: self.__dict__[attr] for attr in self.__dict__ if
attr not in ['particles', 'historical_particles']}
return copy_
def without_particles(self):
self_copy = copy.copy(self)
# self_copy.particles = [particle.states for particle in self.particles]
self_copy.historical_particles = {key: val.states for key, val in self.historical_particles.items()}
return self_copy
def save(self, **kwargs):
for name, value in kwargs.items():
with open(os.path.join(self.dir, "{name}.dill".format(name=name)), "wb") as dill_file:
dill.dump(value, dill_file)
class FixpointExperiment(Experiment):
def __init__(self, **kwargs):
kwargs['name'] = self.__class__.__name__ if 'name' not in kwargs else kwargs['name']
super().__init__(**kwargs)
self.counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
self.interesting_fixpoints = []
def run_net(self, net, step_limit=100, run_id=0):
i = 0
while i < step_limit and not net.is_diverged() and not net.is_fixpoint():
net.self_attack()
i += 1
if run_id:
net.save_state(time=i)
self.count(net)
def count(self, net):
if net.is_diverged():
self.counters['divergent'] += 1
elif net.is_fixpoint():
if net.is_zero():
self.counters['fix_zero'] += 1
else:
self.counters['fix_other'] += 1
self.interesting_fixpoints.append(net.get_weights())
elif net.is_fixpoint(2):
self.counters['fix_sec'] += 1
else:
self.counters['other'] += 1
class MixedFixpointExperiment(FixpointExperiment):
def run_net(self, net, trains_per_application=100, step_limit=100, run_id=0):
i = 0
while i < step_limit and not net.is_diverged() and not net.is_fixpoint():
net.self_attack()
with tqdm(postfix=["Loss", dict(value=0)]) as bar:
for _ in range(trains_per_application):
loss = net.compiled().train()
bar.postfix[1]["value"] = loss
bar.update()
i += 1
if run_id:
net.save_state()
self.count(net)
class SoupExperiment(Experiment):
pass
class IdentLearningExperiment(Experiment):
def __init__(self):
super(IdentLearningExperiment, self).__init__(name=self.__class__.__name__)
pass

832
code/fixpoint-2.ipynb Normal file

File diff suppressed because one or more lines are too long

118
code/line_plots.py Normal file
View File

@ -0,0 +1,118 @@
import os
from experiment import Experiment
# noinspection PyUnresolvedReferences
from soup import Soup
from argparse import ArgumentParser
import numpy as np
import plotly as pl
import plotly.graph_objs as go
import colorlover as cl
import dill
from sklearn.manifold.t_sne import TSNE, PCA
def build_args():
arg_parser = ArgumentParser()
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
return arg_parser.parse_args()
def line_plot(names_exp_tuple, filename='lineplot'):
names, line_dict_list = names_exp_tuple
names = ['Weightwise', 'Aggregating', 'Recurrent']
if False:
data = []
base_scale = cl.scales['10']['div']['RdYlGn']
scale = cl.interp(base_scale, len(line_dict_list) + 1) # Map color scale to N bins
for ld_id, line_dict in enumerate(line_dict_list):
for data_point in ['ys', 'zs']:
trace = go.Scatter(
x=line_dict['xs'],
y=line_dict[data_point],
name='{} {}zero-fixpoints'.format(names[ld_id], 'non-' if data_point == 'zs' else ''),
line=dict(
# color=scale[ld_id],
width=5,
# dash='dash' if data_point == 'ys' else ''
),
)
data.append(trace)
if True:
data = []
base_scale = cl.scales['10']['div']['RdYlGn']
scale = cl.interp(base_scale, len(line_dict_list) + 1) # Map color scale to N bins
for ld_id, line_dict in enumerate(line_dict_list):
trace = go.Scatter(
x=line_dict['xs'],
y=line_dict['ys'],
name=names[ld_id],
line=dict( # color=scale[ld_id],
width=5
),
)
data.append(trace)
layout = dict(xaxis=dict(title='Trains per self-application', titlefont=dict(size=20)),
yaxis=dict(title='Average amount of fixpoints found',
titlefont=dict(size=20),
# type='log',
# range=[0, 2]
),
legend=dict(orientation='h', x=0.3, y=-0.3),
# height=800, width=800,
margin=dict(b=0)
)
fig = go.Figure(data=data, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=[]):
if os.path.isdir(absolut_file_or_folder):
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
search_and_apply(sub_file_or_folder.path, plotting_function, files_to_look_for=files_to_look_for)
elif absolut_file_or_folder.endswith('.dill'):
file_or_folder = os.path.split(absolut_file_or_folder)[-1]
if file_or_folder in files_to_look_for and not os.path.exists('{}.html'.format(absolut_file_or_folder[:-5])):
print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__,
file=absolut_file_or_folder)
)
with open(absolut_file_or_folder, 'rb') as in_f:
exp = dill.load(in_f)
names_dill_location = os.path.join(*os.path.split(absolut_file_or_folder)[:-1], 'all_names.dill')
with open(names_dill_location, 'rb') as in_f:
names = dill.load(in_f)
try:
plotting_function((names, exp), filename='{}.html'.format(absolut_file_or_folder[:-5]))
except ValueError:
pass
except AttributeError:
pass
else:
# This was either another FilyType or Plot.html alerady exists.
pass
if __name__ == '__main__':
args = build_args()
in_file = args.in_file[0]
out_file = args.out_file
search_and_apply(in_file, line_plot, ["all_data.dill"])

191
code/methods.py Normal file
View File

@ -0,0 +1,191 @@
import tensorflow as tf
from keras.models import Sequential, Model
from keras.layers import SimpleRNN, Dense
from keras.layers import Input, TimeDistributed
from tqdm import tqdm
import time
import os
import dill
from experiment import Experiment
import itertools
from typing import Union
import numpy as np
class Network(object):
def __init__(self, features, cells, layers, bias=False, recurrent=False):
self.features = features
self.cells = cells
self.num_layer = layers
bias_params = cells if bias else 0
# Recurrent network
if recurrent:
# First RNN
p_layer_1 = (self.features * self.cells + self.cells ** 2 + bias_params)
# All other RNN Layers
p_layer_n = (self.cells * self.cells + self.cells ** 2 + bias_params) * (self.num_layer - 1)
else:
# First Dense
p_layer_1 = (self.features * self.cells + bias_params)
# All other Dense Layers
p_layer_n = (self.cells * self.cells + bias_params) * (self.num_layer - 1)
# Final Dense
p_layer_out = self.features * self.cells + bias_params
self.parameters = np.sum([p_layer_1, p_layer_n, p_layer_out])
# Build network
cell = SimpleRNN if recurrent else Dense
self.inputs, x = Input(shape=(self.parameters // self.features,
self.features) if recurrent else (self.features,)), None
for layer in range(self.num_layer):
if recurrent:
x = SimpleRNN(self.cells, activation=None, use_bias=False,
return_sequences=True)(self.inputs if layer == 0 else x)
else:
x = Dense(self.cells, activation=None, use_bias=False,
)(self.inputs if layer == 0 else x)
self.outputs = Dense(self.features if recurrent else 1, activation=None, use_bias=False)(x)
print('Network initialized, i haz {p} params @:{e}Features: {f}{e}Cells: {c}{e}Layers: {l}'.format(
p=self.parameters, l=self.num_layer, c=self.cells, f=self.features, e='\n{}'.format(' ' * 5))
)
pass
def get_inputs(self):
return self.inputs
def get_outputs(self):
return self.outputs
class _BaseNetwork(Model):
def __init__(self, **kwargs):
super(_BaseNetwork, self).__init__(**kwargs)
# This is dirty
self.features = None
def get_weights_flat(self):
weights = super().get_weights()
flat = np.asarray(np.concatenate([x.flatten() for x in weights]))
return flat
def step(self, x):
pass
def step_other(self, other: Union[Sequential, Model]) -> bool:
pass
def get_parameter_count(self):
return np.sum([np.prod(x.shape) for x in self.get_weights()])
def train_on_batch(self, *args, **kwargs):
raise NotImplementedError
def compile(self, *args, **kwargs):
raise NotImplementedError
@staticmethod
def mean_abs_error(labels, predictions):
return np.mean(np.abs(predictions - labels), axis=-1)
@staticmethod
def mean_sqrd_error(labels, predictions):
return np.mean(np.square(predictions - labels), axis=-1)
class RecurrentNetwork(_BaseNetwork):
def __init__(self, network: Network, *args, **kwargs):
super().__init__(inputs=network.inputs, outputs=network.outputs)
self.features = network.features
self.parameters = network.parameters
assert self.parameters == self.get_parameter_count()
def step(self, x):
shaped = np.reshape(x, (1, -1, self.features))
return self.predict(shaped).flatten()
def fit(self, epochs=500, **kwargs):
losses = []
with tqdm(total=epochs, ascii=True,
desc='Type: {t}'. format(t=self.__class__.__name__),
postfix=["Loss", dict(value=0)]) as bar:
for _ in range(epochs):
x = self.get_weights_flat()
y = self.step(x)
weights = self.get_weights()
global_idx = 0
for idx, weight_matrix in enumerate(weights):
flattened = weight_matrix.flatten()
new_weights = y[global_idx:global_idx + flattened.shape[0]]
weights[idx] = np.reshape(new_weights, weight_matrix.shape)
global_idx += flattened.shape[0]
losses.append(self.mean_sqrd_error(y.flatten(), self.get_weights_flat()))
self.set_weights(weights)
bar.postfix[1]["value"] = losses[-1]
bar.update()
return losses
class FeedForwardNetwork(_BaseNetwork):
def __init__(self, network:Network, **kwargs):
super().__init__(inputs=network.inputs, outputs=network.outputs, **kwargs)
self.features = network.features
self.parameters = network.parameters
self.num_layer = network.num_layer
self.num_cells = network.cells
# assert self.parameters == self.get_parameter_count()
def step(self, x):
return self.predict(x)
def step_other(self, x):
return self.predict(x)
def fit(self, epochs=500, **kwargs):
losses = []
with tqdm(total=epochs, ascii=True,
desc='Type: {t} @ Epoch:'. format(t=self.__class__.__name__),
postfix=["Loss", dict(value=0)]) as bar:
for _ in range(epochs):
all_weights = self.get_weights_flat()
cell_idx = np.apply_along_axis(lambda x: x/self.num_cells, 0, np.arange(int(self.get_parameter_count())))
xc = np.concatenate((all_weights[..., None], cell_idx[..., None]), axis=1)
y = self.step(xc)
weights = self.get_weights()
global_idx = 0
for idx, weight_matrix in enumerate(weights):
# UPDATE THE WEIGHTS
flattened = weight_matrix.flatten()
new_weights = y[global_idx:global_idx + flattened.shape[0], 0]
weights[idx] = np.reshape(new_weights, weight_matrix.shape)
global_idx += flattened.shape[0]
losses.append(self.mean_sqrd_error(y[:, 0].flatten(), self.get_weights_flat()))
self.set_weights(weights)
bar.postfix[1]["value"] = losses[-1]
bar.update()
return losses
if __name__ == '__main__':
with Experiment() as exp:
features, cells, layers = 2, 2, 2
use_recurrent = False
if use_recurrent:
network = Network(features, cells, layers, recurrent=use_recurrent)
r = RecurrentNetwork(network)
loss = r.fit(epochs=10)
exp.save(rnet=r)
else:
network = Network(features, cells, layers, recurrent=use_recurrent)
ff = FeedForwardNetwork(network)
loss = ff.fit(epochs=10)
exp.save(ffnet=ff)
print(loss)

726
code/network.py Normal file
View File

@ -0,0 +1,726 @@
import numpy as np
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.callbacks import Callback
from tensorflow.python.keras.layers import SimpleRNN, Dense
from tensorflow.python.keras import backend as K
from util import *
from experiment import *
# Supress warnings and info messages
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
class SaveStateCallback(Callback):
def __init__(self, net, epoch=0):
super(SaveStateCallback, self).__init__()
self.net = net
self.init_epoch = epoch
def on_epoch_end(self, epoch, logs={}):
description = dict(time=epoch+self.init_epoch)
description['action'] = 'train_self'
description['counterpart'] = None
self.net.save_state(**description)
return
class NeuralNetwork(PrintingObject):
@staticmethod
def weights_to_string(weights):
s = ""
for layer_id, layer in enumerate(weights):
for cell_id, cell in enumerate(layer):
s += "[ "
for weight_id, weight in enumerate(cell):
s += str(weight) + " "
s += "]"
s += "\n"
return s
@staticmethod
def are_weights_diverged(network_weights):
for layer_id, layer in enumerate(network_weights):
for cell_id, cell in enumerate(layer):
for weight_id, weight in enumerate(cell):
if np.isnan(weight):
return True
if np.isinf(weight):
return True
return False
@staticmethod
def are_weights_within(network_weights, lower_bound, upper_bound):
for layer_id, layer in enumerate(network_weights):
for cell_id, cell in enumerate(layer):
for weight_id, weight in enumerate(cell):
# could be a chain comparission "lower_bound <= weight <= upper_bound"
if not (lower_bound <= weight and weight <= upper_bound):
return False
return True
@staticmethod
def fill_weights(old_weights, new_weights_list):
new_weights = copy.deepcopy(old_weights)
current_weight_id = 0
for layer_id, layer in enumerate(new_weights):
for cell_id, cell in enumerate(layer):
for weight_id, weight in enumerate(cell):
new_weight = new_weights_list[current_weight_id]
new_weights[layer_id][cell_id][weight_id] = new_weight
current_weight_id += 1
return new_weights
def __init__(self, **params):
super().__init__()
self.params = dict(epsilon=0.00000000000001)
self.params.update(params)
self.keras_params = dict(activation='linear', use_bias=False)
self.states = []
def get_model(self):
raise NotImplementedError
def get_params(self):
return self.params
def get_keras_params(self):
return self.keras_params
def with_params(self, **kwargs):
self.params.update(kwargs)
return self
def with_keras_params(self, **kwargs):
self.keras_params.update(kwargs)
return self
def get_weights(self):
return self.model.get_weights()
def get_weights_flat(self):
return np.hstack([weight.flatten() for weight in self.get_weights()])
def set_weights(self, new_weights):
return self.model.set_weights(new_weights)
def apply_to_weights(self, old_weights):
raise NotImplementedError
def apply_to_network(self, other_network):
new_weights = self.apply_to_weights(other_network.get_weights())
return new_weights
def attack(self, other_network):
other_network.set_weights(self.apply_to_network(other_network))
return self
def fuck(self, other_network):
self.set_weights(self.apply_to_network(other_network))
return self
def self_attack(self, iterations=1):
for _ in range(iterations):
self.attack(self)
return self
def meet(self, other_network):
new_other_network = copy.deepcopy(other_network)
return self.attack(new_other_network)
def is_diverged(self):
return self.are_weights_diverged(self.get_weights())
def is_zero(self, epsilon=None):
epsilon = epsilon or self.get_params().get('epsilon')
return self.are_weights_within(self.get_weights(), -epsilon, epsilon)
def is_fixpoint(self, degree=1, epsilon=None):
assert degree >= 1, "degree must be >= 1"
epsilon = epsilon or self.get_params().get('epsilon')
old_weights = self.get_weights()
new_weights = copy.deepcopy(old_weights)
for _ in range(degree):
new_weights = self.apply_to_weights(new_weights)
if NeuralNetwork.are_weights_diverged(new_weights):
return False
for layer_id, layer in enumerate(old_weights):
for cell_id, cell in enumerate(layer):
for weight_id, weight in enumerate(cell):
new_weight = new_weights[layer_id][cell_id][weight_id]
if abs(new_weight - weight) >= epsilon:
return False
return True
def repr_weights(self, weights=None):
return self.weights_to_string(weights or self.get_weights())
def print_weights(self, weights=None):
print(self.repr_weights(weights))
class ParticleDecorator:
next_uid = 0
def __init__(self, net):
self.uid = self.__class__.next_uid
self.__class__.next_uid += 1
self.net = net
self.states = []
self.save_state(time=0,
action='init',
counterpart=None
)
def __getattr__(self, name):
return getattr(self.net, name)
def get_uid(self):
return self.uid
def make_state(self, **kwargs):
weights = self.net.get_weights_flat()
if any(np.isinf(weights)) or any(np.isnan(weights)):
return None
state = {'class': self.net.__class__.__name__, 'weights': weights}
state.update(kwargs)
return state
def save_state(self, **kwargs):
state = self.make_state(**kwargs)
if state is not None:
self.states += [state]
else:
pass
def update_state(self, number, **kwargs):
raise NotImplementedError('Result is vague')
if number < len(self.states):
self.states[number] = self.make_state(**kwargs)
else:
for i in range(len(self.states), number):
self.states += [None]
self.states += self.make_state(**kwargs)
def get_states(self):
return self.states
class WeightwiseNeuralNetwork(NeuralNetwork):
@staticmethod
def normalize_id(value, norm):
if norm > 1:
return float(value) / float(norm)
else:
return float(value)
def __init__(self, width, depth, **kwargs):
super().__init__(**kwargs)
self.width = width
self.depth = depth
self.model = Sequential()
self.model.add(Dense(units=self.width, input_dim=4, **self.keras_params))
for _ in range(self.depth-1):
self.model.add(Dense(units=self.width, **self.keras_params))
self.model.add(Dense(units=1, **self.keras_params))
def get_model(self):
return self.model
def apply(self, *inputs):
stuff = np.transpose(np.array([[inputs[0]], [inputs[1]], [inputs[2]], [inputs[3]]]))
return self.model.predict(stuff)[0][0]
@classmethod
def compute_all_duplex_weight_points(cls, old_weights):
points = []
normal_points = []
max_layer_id = len(old_weights) - 1
for layer_id, layer in enumerate(old_weights):
max_cell_id = len(layer) - 1
for cell_id, cell in enumerate(layer):
max_weight_id = len(cell) - 1
for weight_id, weight in enumerate(cell):
normal_layer_id = cls.normalize_id(layer_id, max_layer_id)
normal_cell_id = cls.normalize_id(cell_id, max_cell_id)
normal_weight_id = cls.normalize_id(weight_id, max_weight_id)
points += [[weight, layer_id, cell_id, weight_id]]
normal_points += [[weight, normal_layer_id, normal_cell_id, normal_weight_id]]
return points, normal_points
@classmethod
def compute_all_weight_points(cls, all_weights):
return cls.compute_all_duplex_weight_points(all_weights)[0]
@classmethod
def compute_all_normal_weight_points(cls, all_weights):
return cls.compute_all_duplex_weight_points(all_weights)[1]
def apply_to_weights(self, old_weights):
new_weights = copy.deepcopy(self.get_weights())
for (weight_point, normal_weight_point) in zip(*self.__class__.compute_all_duplex_weight_points(old_weights)):
weight, layer_id, cell_id, weight_id = weight_point
_, normal_layer_id, normal_cell_id, normal_weight_id = normal_weight_point
new_weight = self.apply(*normal_weight_point)
new_weights[layer_id][cell_id][weight_id] = new_weight
if self.params.get("print_all_weight_updates", False) and not self.is_silent():
print("updated old weight {weight}\t @ ({layer},{cell},{weight_id}) "
"to new value {new_weight}\t calling @ ({normal_layer},{normal_cell},{normal_weight_id})").format(
weight=weight, layer=layer_id, cell=cell_id, weight_id=weight_id, new_weight=new_weight,
normal_layer=normal_layer_id, normal_cell=normal_cell_id, normal_weight_id=normal_weight_id)
return new_weights
def compute_samples(self):
samples = []
for normal_weight_point in self.compute_all_normal_weight_points(self.get_weights()):
weight, normal_layer_id, normal_cell_id, normal_weight_id = normal_weight_point
sample = np.transpose(np.array([[weight], [normal_layer_id], [normal_cell_id], [normal_weight_id]]))
samples += [sample[0]]
samples_array = np.asarray(samples)
return samples_array, samples_array[:, 0]
class AggregatingNeuralNetwork(NeuralNetwork):
@staticmethod
def aggregate_average(weights):
total = 0
count = 0
for weight in weights:
total += float(weight)
count += 1
return total / float(count)
@staticmethod
def aggregate_max(weights):
max_found = weights[0]
for weight in weights:
max_found = weight > max_found and weight or max_found
return max_found
@staticmethod
def deaggregate_identically(aggregate, amount):
return [aggregate for _ in range(amount)]
@staticmethod
def shuffle_not(weights_list):
return weights_list
@staticmethod
def shuffle_random(weights_list):
import random
random.shuffle(weights_list)
return weights_list
def __init__(self, aggregates, width, depth, **kwargs):
super().__init__(**kwargs)
self.aggregates = aggregates
self.width = width
self.depth = depth
self.model = Sequential()
self.model.add(Dense(units=width, input_dim=self.aggregates, **self.keras_params))
for _ in range(depth-1):
self.model.add(Dense(units=width, **self.keras_params))
self.model.add(Dense(units=self.aggregates, **self.keras_params))
def get_model(self):
return self.model
def get_aggregator(self):
return self.params.get('aggregator', self.aggregate_average)
def get_deaggregator(self):
return self.params.get('deaggregator', self.deaggregate_identically)
def get_shuffler(self):
return self.params.get('shuffler', self.shuffle_not)
def get_amount_of_weights(self):
total_weights = 0
for layer_id, layer in enumerate(self.get_weights()):
for cell_id, cell in enumerate(layer):
for weight_id, weight in enumerate(cell):
total_weights += 1
return total_weights
def apply(self, *inputs):
stuff = np.transpose(np.array([[inputs[i]] for i in range(self.aggregates)]))
return self.model.predict(stuff)[0]
def apply_to_weights(self, old_weights):
# build aggregations from old_weights
collection_size = self.get_amount_of_weights() // self.aggregates
collections, leftovers = self.collect_weights(old_weights, collection_size)
# call network
old_aggregations = [self.get_aggregator()(collection) for collection in collections]
new_aggregations = self.apply(*old_aggregations)
# generate list of new weights
new_weights_list = []
for aggregation_id, aggregation in enumerate(new_aggregations):
if aggregation_id == self.aggregates - 1:
new_weights_list += self.get_deaggregator()(aggregation, collection_size + leftovers)
else:
new_weights_list += self.get_deaggregator()(aggregation, collection_size)
new_weights_list = self.get_shuffler()(new_weights_list)
# write back new weights
new_weights = self.fill_weights(old_weights, new_weights_list)
# return results
if self.params.get("print_all_weight_updates", False) and not self.is_silent():
print("updated old weight aggregations " + str(old_aggregations))
print("to new weight aggregations " + str(new_aggregations))
print("resulting in network weights ...")
print(self.weights_to_string(new_weights))
return new_weights
@staticmethod
def collect_weights(all_weights, collection_size):
collections = []
next_collection = []
current_weight_id = 0
for layer_id, layer in enumerate(all_weights):
for cell_id, cell in enumerate(layer):
for weight_id, weight in enumerate(cell):
next_collection += [weight]
if (current_weight_id + 1) % collection_size == 0:
collections += [next_collection]
next_collection = []
current_weight_id += 1
collections[-1] += next_collection
leftovers = len(next_collection)
return collections, leftovers
def get_collected_weights(self):
collection_size = self.get_amount_of_weights() // self.aggregates
return self.collect_weights(self.get_weights(), collection_size)
def get_aggregated_weights(self):
collections, leftovers = self.get_collected_weights()
aggregations = [self.get_aggregator()(collection) for collection in collections]
return aggregations, leftovers
def compute_samples(self):
aggregations, _ = self.get_aggregated_weights()
sample = np.transpose(np.array([[aggregations[i]] for i in range(self.aggregates)]))
return [sample], [sample]
def is_fixpoint_after_aggregation(self, degree=1, epsilon=None):
assert degree >= 1, "degree must be >= 1"
epsilon = epsilon or self.get_params().get('epsilon')
old_weights = self.get_weights()
old_aggregations, _ = self.get_aggregated_weights()
new_weights = copy.deepcopy(old_weights)
for _ in range(degree):
new_weights = self.apply_to_weights(new_weights)
if NeuralNetwork.are_weights_diverged(new_weights):
return False
collection_size = self.get_amount_of_weights() // self.aggregates
collections, leftovers = self.__class__.collect_weights(new_weights, collection_size)
new_aggregations = [self.get_aggregator()(collection) for collection in collections]
for aggregation_id, old_aggregation in enumerate(old_aggregations):
new_aggregation = new_aggregations[aggregation_id]
if abs(new_aggregation - old_aggregation) >= epsilon:
return False, new_aggregations
return True, new_aggregations
class FFTNeuralNetwork(NeuralNetwork):
@staticmethod
def aggregate_fft(weights, dims):
flat = np.hstack([weight.flatten() for weight in weights])
fft_reduction = np.fft.fftn(flat, dims)[None, ...]
return fft_reduction
@staticmethod
def deaggregate_identically(aggregate, dims):
fft_inverse = np.fft.ifftn(aggregate, dims)
return fft_inverse
@staticmethod
def shuffle_not(weights_list):
return weights_list
@staticmethod
def shuffle_random(weights_list):
import random
random.shuffle(weights_list)
return weights_list
def __init__(self, aggregates, width, depth, **kwargs):
super().__init__(**kwargs)
self.aggregates = aggregates
self.width = width
self.depth = depth
self.model = Sequential()
self.model.add(Dense(units=width, input_dim=self.aggregates, **self.keras_params))
for _ in range(depth-1):
self.model.add(Dense(units=width, **self.keras_params))
self.model.add(Dense(units=self.aggregates, **self.keras_params))
def get_model(self):
return self.model
def get_shuffler(self):
return self.params.get('shuffler', self.shuffle_not)
def get_amount_of_weights(self):
total_weights = 0
for layer_id, layer in enumerate(self.get_weights()):
for cell_id, cell in enumerate(layer):
for weight_id, weight in enumerate(cell):
total_weights += 1
return total_weights
def apply(self, inputs):
sample = np.asarray(inputs)
return self.model.predict(sample)[0]
def apply_to_weights(self, old_weights):
# build aggregations from old_weights
weights = self.get_weights_flat()
# call network
old_aggregation = self.aggregate_fft(weights, self.aggregates)
new_aggregation = self.apply(old_aggregation)
# generate list of new weights
new_weights_list = self.deaggregate_identically(new_aggregation, self.get_amount_of_weights())
new_weights_list = self.get_shuffler()(new_weights_list)
# write back new weights
new_weights = self.fill_weights(old_weights, new_weights_list)
# return results
if self.params.get("print_all_weight_updates", False) and not self.is_silent():
print("updated old weight aggregations " + str(old_aggregation))
print("to new weight aggregations " + str(new_aggregation))
print("resulting in network weights ...")
print(self.__class__.weights_to_string(new_weights))
return new_weights
def compute_samples(self):
weights = self.get_weights()
sample = np.asarray(weights)[None, ...]
return [sample], [sample]
class RecurrentNeuralNetwork(NeuralNetwork):
def __init__(self, width, depth, **kwargs):
super().__init__(**kwargs)
self.features = 1
self.width = width
self.depth = depth
self.model = Sequential()
self.model.add(SimpleRNN(units=width, input_dim=self.features, return_sequences=True, **self.keras_params))
for _ in range(depth-1):
self.model.add(SimpleRNN(units=width, return_sequences=True, **self.keras_params))
self.model.add(SimpleRNN(units=self.features, return_sequences=True, **self.keras_params))
def get_model(self):
return self.model
def apply(self, *inputs):
stuff = np.transpose(np.array([[[inputs[i]] for i in range(len(inputs))]]))
return self.model.predict(stuff)[0].flatten()
def apply_to_weights(self, old_weights):
# build list from old weights
new_weights = copy.deepcopy(old_weights)
old_weights_list = []
for layer_id, layer in enumerate(old_weights):
for cell_id, cell in enumerate(layer):
for weight_id, weight in enumerate(cell):
old_weights_list += [weight]
# call network
new_weights_list = self.apply(*old_weights_list)
# write back new weights from list of rnn returns
current_weight_id = 0
for layer_id, layer in enumerate(new_weights):
for cell_id, cell in enumerate(layer):
for weight_id, weight in enumerate(cell):
new_weight = new_weights_list[current_weight_id]
new_weights[layer_id][cell_id][weight_id] = new_weight
current_weight_id += 1
return new_weights
def compute_samples(self):
# build list from old weights
old_weights_list = []
for layer_id, layer in enumerate(self.get_weights()):
for cell_id, cell in enumerate(layer):
for weight_id, weight in enumerate(cell):
old_weights_list += [weight]
sample = np.asarray(old_weights_list)[None, ..., None]
return sample, sample
class TrainingNeuralNetworkDecorator():
def __init__(self, net, **kwargs):
self.net = net
self.compile_params = dict(loss='mse', optimizer='sgd')
self.model_compiled = False
def __getattr__(self, name):
return getattr(self.net, name)
def with_params(self, **kwargs):
self.net.with_params(**kwargs)
return self
def with_keras_params(self, **kwargs):
self.net.with_keras_params(**kwargs)
return self
def get_compile_params(self):
return self.compile_params
def with_compile_params(self, **kwargs):
self.compile_params.update(kwargs)
return self
def compile_model(self, **kwargs):
compile_params = copy.deepcopy(self.compile_params)
compile_params.update(kwargs)
return self.net.model.compile(**compile_params)
def compiled(self, **kwargs):
if not self.model_compiled:
self.compile_model(**kwargs)
self.model_compiled = True
return self
def train(self, batchsize=1, store_states=True, epoch=0):
self.compiled()
x, y = self.net.compute_samples()
savestatecallback = [SaveStateCallback(net=self, epoch=epoch)] if store_states else None
history = self.net.model.fit(x=x, y=y, epochs=epoch+1, verbose=0, batch_size=batchsize, callbacks=savestatecallback, initial_epoch=epoch)
return history.history['loss'][-1]
def learn_from(self, other_network, batchsize=1):
self.compiled()
other_network.compiled()
x, y = other_network.net.compute_samples()
history = self.net.model.fit(x=x, y=y, verbose=0, batch_size=batchsize)
return history.history['loss'][-1]
if __name__ == '__main__':
def run_exp(net, prints=False):
# INFO Run_ID needs to be more than 0, so that exp stores the trajectories!
exp.run_net(net, 100, run_id=run_id + 1)
exp.historical_particles[run_id] = net
if prints:
print("Fixpoint? " + str(net.is_fixpoint()))
print("Loss " + str(loss))
if True:
# WeightWise Neural Network
with FixpointExperiment() as exp:
for run_id in tqdm(range(100)):
net = ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2) \
.with_keras_params(activation='linear'))
run_exp(net)
K.clear_session()
exp.log(exp.counters)
if True:
# Aggregating Neural Network
with FixpointExperiment() as exp:
for run_id in tqdm(range(100)):
net = ParticleDecorator(AggregatingNeuralNetwork(aggregates=4, width=2, depth=2) \
.with_keras_params())
run_exp(net)
K.clear_session()
exp.log(exp.counters)
if True:
#FFT Neural Network
with FixpointExperiment() as exp:
for run_id in tqdm(range(100)):
net = ParticleDecorator(FFTNeuralNetwork(aggregates=4, width=2, depth=2) \
.with_keras_params(activation='linear'))
run_exp(net)
K.clear_session()
exp.log(exp.counters)
if True:
# ok so this works quite realiably
with FixpointExperiment() as exp:
for i in range(1):
run_count = 1000
net = TrainingNeuralNetworkDecorator(ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2)))
net.with_params(epsilon=0.0001).with_keras_params(optimizer='sgd')
for run_id in tqdm(range(run_count+1)):
net.compiled()
loss = net.train(epoch=run_id)
if run_id % 100 == 0:
run_exp(net)
K.clear_session()
if False:
with FixpointExperiment() as exp:
run_count = 1000
net = TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, width=2, depth=2)).with_params(epsilon=0.1e-6)
for run_id in tqdm(range(run_count+1)):
loss = net.compiled().train()
if run_id % 100 == 0:
net.print_weights()
old_aggs, _ = net.net.get_aggregated_weights()
print("old weights agg: " + str(old_aggs))
fp, new_aggs = net.net.is_fixpoint_after_aggregation(epsilon=0.0001)
print("new weights agg: " + str(new_aggs))
print("Fixpoint? " + str(net.is_fixpoint()))
print("Fixpoint after Agg? " + str(fp))
print("Loss " + str(loss))
print()
if False:
# this explodes in our faces completely... NAN everywhere
# TODO: Wtf is happening here?
with FixpointExperiment() as exp:
run_count = 10000
net = TrainingNeuralNetworkDecorator(RecurrentNeuralNetwork(width=2, depth=2))\
.with_params(epsilon=0.1e-2).with_keras_params(optimizer='sgd', activation='linear')
for run_id in tqdm(range(run_count+1)):
loss = net.compiled().train()
if run_id % 500 == 0:
net.print_weights()
# print(net.apply_to_network(net))
print("Fixpoint? " + str(net.is_fixpoint()))
print("Loss " + str(loss))
print()
if False:
# and this gets somewhat interesting... we can still achieve non-trivial fixpoints
# over multiple applications when training enough in-between
with MixedFixpointExperiment() as exp:
for run_id in range(10):
net = TrainingNeuralNetworkDecorator(FFTNeuralNetwork(2, width=2, depth=2))\
.with_params(epsilon=0.0001, activation='sigmoid')
exp.run_net(net, 500, 10)
net.print_weights()
print("Fixpoint? " + str(net.is_fixpoint()))
exp.log(exp.counters)

Binary file not shown.

View File

@ -0,0 +1 @@
{'divergent': 0, 'fix_zero': 0, 'fix_other': 13, 'fix_sec': 0, 'other': 7}

BIN
code/results/Soup/soup.dill Normal file

Binary file not shown.

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,30 @@
[-0.15321673 1.0428386 -0.7245892 -0.04343993 0.42338863 0.02538261
-0.40465942 -0.0242596 -1.226809 -0.8168446 0.26588777 -1.0929432
0.5383322 -0.73875046]
[-0.03072096 -1.369665 -0.357126 -0.21180922 0.3853204 0.22853081
-0.3705557 -0.21977347 -0.6684716 0.12849599 1.0226644 -0.0922638
-0.7828449 -0.6572327 ]
[-1.2444692 0.61213857 0.07965802 0.12361202 0.62641835 0.9720597
0.3863232 0.59948945 1.0857513 0.49231085 -0.5319295 0.29433587
-0.64177823 0.17603302]
[-0.9938292 -0.4438207 -0.03172896 0.06261964 -0.3870194 0.7637992
0.0244509 -0.04825407 0.91551745 -0.78740424 0.29226422 -0.52767307
-0.41744384 0.5567152 ]
[-0.39049304 0.8842579 -0.8447943 -0.19669186 0.7207061 0.16780053
0.3728221 0.08680353 0.7535456 -0.1000197 0.02029054 0.8640245
-0.15881588 1.1905665 ]
[ 1.0482084 0.9248296 -0.26946014 0.57047915 -0.32660747 0.6914731
-0.18025818 0.3816289 -0.69358927 0.21312684 -0.39932403 -0.02991759
-0.83068466 0.45619962]
[ 0.75814664 0.10328437 0.07867077 -0.0743314 -0.53440267 0.50492585
-0.54172474 0.51184535 0.3462249 1.0527638 -0.9503541 0.9235086
-0.1665241 1.1497779 ]
[-0.77187353 1.1105504 0.24265823 0.53782856 -0.34098852 -0.75576884
-0.25396293 -0.56288165 0.3851537 -0.67497945 0.14336896 0.763481
-0.9224985 0.6374753 ]
[-0.79123825 0.68166596 -0.30061013 -0.19360289 0.5632736 0.36276665
0.7470975 0.48115698 0.10046808 -0.8064349 -1.036736 -0.68296516
-1.156437 0.52633154]
[ 0.1788832 -1.5321186 -0.62001514 -0.3870902 0.97524184 0.6088638
-0.08297889 -0.05180515 -0.29096788 0.7519439 0.8803648 0.82771575
-0.854887 0.1742936 ]

Binary file not shown.

After

Width:  |  Height:  |  Size: 19 KiB

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,12 @@
WeightwiseNeuralNetwork activiation='linear' use_bias=False
{'divergent': 23, 'fix_zero': 27, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
AggregatingNeuralNetwork activiation='linear' use_bias=False
{'divergent': 4, 'fix_zero': 46, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
RecurrentNeuralNetwork activiation='linear' use_bias=False
{'divergent': 46, 'fix_zero': 4, 'fix_other': 0, 'fix_sec': 0, 'other': 0}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,4 @@
TrainingNeuralNetworkDecorator activiation='linear' use_bias=False
{'xs': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'ys': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'zs': [0.0, 1.2, 5.2, 7.4, 8.1, 9.1, 9.6, 9.8, 10.0, 9.9, 9.9]}

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 207 KiB

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,12 @@
WeightwiseNeuralNetwork activiation='linear' use_bias=False
{'xs': [0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500], 'ys': [0.2, 0.3, 0.15, 0.55, 0.7, 0.85, 0.8, 0.95, 0.9, 1.0, 1.0]}
AggregatingNeuralNetwork activiation='linear' use_bias=False
{'xs': [0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500], 'ys': [1.0, 0.95, 1.0, 1.0, 0.95, 0.9, 0.8, 1.0, 0.85, 1.0, 0.9]}
RecurrentNeuralNetwork activiation='linear' use_bias=False
{'xs': [0, 50, 100, 150, 200, 250, 300, 350, 400, 450, 500], 'ys': [0.05, 0.0, 0.05, 0.0, 0.0, 0.1, 0.1, 0.05, 0.1, 0.0, 0.0]}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,8 @@
TrainingNeuralNetworkDecorator activiation='linear' use_bias=False
{'xs': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'ys': [0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0], 'zs': [0.0, 0.0, 0.7, 1.9, 3.6, 4.3, 6.0, 6.1, 8.3, 7.7, 8.8]}
TrainingNeuralNetworkDecorator activiation='linear' use_bias=False
{'xs': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'ys': [0.8, 0.4, 0.4, 0.3, 0.2, 0.2, 0.2, 0.2, 0.2, 0.4, 0.3], 'zs': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,12 @@
WeightwiseNeuralNetwork activiation='linear' use_bias=False
{'divergent': 0, 'fix_zero': 0, 'fix_other': 50, 'fix_sec': 0, 'other': 0}
AggregatingNeuralNetwork activiation='linear' use_bias=False
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 50}
RecurrentNeuralNetwork activiation='linear' use_bias=False
{'divergent': 38, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 12}

Binary file not shown.

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,30 @@
variation 10e-0
avg time to vergence 3.63
avg time as fixpoint 0
variation 10e-1
avg time to vergence 5.02
avg time as fixpoint 0
variation 10e-2
avg time to vergence 6.46
avg time as fixpoint 0
variation 10e-3
avg time to vergence 8.04
avg time as fixpoint 0
variation 10e-4
avg time to vergence 9.61
avg time as fixpoint 0.04
variation 10e-5
avg time to vergence 11.23
avg time as fixpoint 1.38
variation 10e-6
avg time to vergence 12.99
avg time as fixpoint 3.23
variation 10e-7
avg time to vergence 14.58
avg time as fixpoint 4.84
variation 10e-8
avg time to vergence 21.95
avg time as fixpoint 11.91
variation 10e-9
avg time to vergence 26.45
avg time as fixpoint 16.47

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

BIN
code/results/mixed_soup.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 234 KiB

BIN
code/results/newplot(2).png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 259 KiB

View File

@ -0,0 +1 @@
{'divergent': 0, 'fix_zero': 10, 'fix_other': 0, 'fix_sec': 0, 'other': 0}

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 224 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 137 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 187 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 155 KiB

File diff suppressed because one or more lines are too long

BIN
code/results/soup1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 266 KiB

BIN
code/results/soup2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 226 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 17 KiB

View File

@ -0,0 +1,70 @@
import sys
import os
# Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.')
from util import *
from experiment import *
from network import *
import keras.backend as K
def generate_counters():
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
def count(counters, net, notable_nets=[]):
if net.is_diverged():
counters['divergent'] += 1
elif net.is_fixpoint():
if net.is_zero():
counters['fix_zero'] += 1
else:
counters['fix_other'] += 1
notable_nets += [net]
elif net.is_fixpoint(2):
counters['fix_sec'] += 1
notable_nets += [net]
else:
counters['other'] += 1
return counters, notable_nets
if __name__ == '__main__':
with Experiment('applying_fixpoint') as exp:
exp.trials = 50
exp.run_count = 100
exp.epsilon = 1e-4
net_generators = []
for activation in ['linear']: # , 'sigmoid', 'relu']:
for use_bias in [False]:
net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
all_counters = []
all_notable_nets = []
all_names = []
for net_generator_id, net_generator in enumerate(net_generators):
counters = generate_counters()
notable_nets = []
for _ in tqdm(range(exp.trials)):
net = ParticleDecorator(net_generator())
net.with_params(epsilon=exp.epsilon)
name = str(net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias'))
for run_id in range(exp.run_count):
loss = net.self_attack()
count(counters, net, notable_nets)
all_counters += [counters]
all_notable_nets += [notable_nets]
all_names += [name]
K.clear_session()
exp.save(all_counters=all_counters)
exp.save(trajectorys=exp.without_particles())
# net types reached in the end
# exp.save(all_notable_nets=all_notable_nets)
exp.save(all_names=all_names) #experiment setups
for exp_id, counter in enumerate(all_counters):
exp.log(all_names[exp_id])
exp.log(all_counters[exp_id])
exp.log('\n')

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,4 @@
TrainingNeuralNetworkDecorator activiation='linear' use_bias=False
{'xs': [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100], 'ys': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], 'zs': [0.0, 1.2, 5.2, 7.4, 8.1, 9.1, 9.6, 9.8, 10.0, 9.9, 9.9]}

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 207 KiB

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,12 @@
WeightwiseNeuralNetwork activiation='linear' use_bias=False
{'divergent': 0, 'fix_zero': 0, 'fix_other': 50, 'fix_sec': 0, 'other': 0}
AggregatingNeuralNetwork activiation='linear' use_bias=False
{'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 50}
RecurrentNeuralNetwork activiation='linear' use_bias=False
{'divergent': 38, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 12}

View File

@ -0,0 +1 @@
{'divergent': 11, 'fix_zero': 9, 'fix_other': 0, 'fix_sec': 0, 'other': 0}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,67 @@
import sys
import os
# Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.')
from util import *
from experiment import *
from network import *
import keras.backend
def generate_counters():
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
def count(counters, net, notable_nets=[]):
if net.is_diverged():
counters['divergent'] += 1
elif net.is_fixpoint():
if net.is_zero():
counters['fix_zero'] += 1
else:
counters['fix_other'] += 1
notable_nets += [net]
elif net.is_fixpoint(2):
counters['fix_sec'] += 1
notable_nets += [net]
else:
counters['other'] += 1
return counters, notable_nets
if __name__ == '__main__':
with Experiment('fixpoint-density') as exp:
#NOTE: settings could/should stay this way
#FFT doesn't work though
exp.trials = 100000
exp.epsilon = 1e-4
net_generators = []
for activation in ['linear']:
net_generators += [lambda activation=activation: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=False)]
net_generators += [lambda activation=activation: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=False)]
# net_generators += [lambda activation=activation: FFTNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=False)]
# net_generators += [lambda activation=activation: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=False)]
all_counters = []
all_notable_nets = []
all_names = []
for net_generator_id, net_generator in enumerate(net_generators):
counters = generate_counters()
notable_nets = []
for _ in tqdm(range(exp.trials)):
net = net_generator().with_params(epsilon=exp.epsilon)
net = ParticleDecorator(net)
name = str(net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias='" + str(net.get_keras_params().get('use_bias')) + "'"
count(counters, net, notable_nets)
keras.backend.clear_session()
all_counters += [counters]
# all_notable_nets += [notable_nets]
all_names += [name]
exp.save(all_counters=all_counters)
exp.save(all_notable_nets=all_notable_nets)
exp.save(all_names=all_names)
for exp_id, counter in enumerate(all_counters):
exp.log(all_names[exp_id])
exp.log(all_counters[exp_id])
exp.log('\n')
print('Done')

View File

@ -0,0 +1,93 @@
import sys
import os
# Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.')
from util import *
from experiment import *
from network import *
from soup import prng
import keras.backend
from statistics import mean
avg = mean
def generate_fixpoint_weights():
return [
np.array([[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], dtype=np.float32),
np.array([[1.0, 0.0], [0.0, 0.0]], dtype=np.float32),
np.array([[1.0], [0.0]], dtype=np.float32)
]
def generate_fixpoint_net():
#NOTE: Weightwise only is all we can do right now IMO
net = WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='sigmoid')
# I don't know if this work for aggregaeting. We don't actually need it, though.
# net = AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation='sigmoid')
net.set_weights(generate_fixpoint_weights())
return net
def vary(old_weights, e=1.0):
new_weights = copy.deepcopy(old_weights)
for layer_id, layer in enumerate(new_weights):
for cell_id, cell in enumerate(layer):
for weight_id, weight in enumerate(cell):
if prng() < 0.5:
new_weights[layer_id][cell_id][weight_id] = weight + prng() * e
else:
new_weights[layer_id][cell_id][weight_id] = weight - prng() * e
return new_weights
if __name__ == '__main__':
with Experiment('known-fixpoint-variation') as exp:
exp.depth = 10
exp.trials = 100
exp.max_steps = 100
exp.epsilon = 1e-4
exp.xs = []
exp.ys = []
exp.zs = []
exp.notable_nets = []
current_scale = 1.0
for _ in range(exp.depth):
print('variation scale ' + str(current_scale))
for _ in tqdm(range(exp.trials)):
net = generate_fixpoint_net().with_params(epsilon=exp.epsilon)
net = ParticleDecorator(net)
net.set_weights(vary(net.get_weights(), current_scale))
time_to_something = 0
time_as_fixpoint = 0
still_fixpoint = True
for _ in range(exp.max_steps):
net.self_attack()
if net.is_zero() or net.is_diverged():
break
if net.is_fixpoint():
if still_fixpoint:
time_as_fixpoint += 1
else:
print('remarkable')
exp.notable_nets += [net.get_weights()]
still_fixpoint = True
else:
still_fixpoint = False
time_to_something += 1
exp.xs += [current_scale]
# time steps taken to reach divergence or zero (reaching another fix-point is basically never happening)
exp.ys += [time_to_something]
# time steps still regarded as sthe initial fix-point
exp.zs += [time_as_fixpoint]
keras.backend.clear_session()
current_scale /= 10.0
for d in range(exp.depth):
exp.log('variation 10e-' + str(d))
exp.log('avg time to vergence ' + str(avg(exp.ys[d*exp.trials:(d+1) * exp.trials])))
exp.log('avg time as fixpoint ' + str(avg(exp.zs[d*exp.trials:(d+1) * exp.trials])))

View File

@ -0,0 +1,110 @@
import sys
import os
# Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.')
from typing import Tuple
from util import *
from experiment import *
from network import *
from soup import *
import keras.backend
from statistics import mean
avg = mean
def generate_counters():
"""
Initial build of the counter dict, to store counts.
:rtype: dict
:return: dictionary holding counter for: 'divergent', 'fix_zero', 'fix_sec', 'other'
"""
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
def count(counters, soup, notable_nets=[]):
"""
Count the occurences ot the types of weight trajectories.
:param counters: A counter dictionary.
:param soup: A Soup
:param notable_nets: A list to store and save intersting candidates
:rtype Tuple[dict, list]
:return: Both the counter dictionary and the list of interessting nets.
"""
for net in soup.particles:
if net.is_diverged():
counters['divergent'] += 1
elif net.is_fixpoint():
if net.is_zero():
counters['fix_zero'] += 1
else:
counters['fix_other'] += 1
# notable_nets += [net]
# elif net.is_fixpoint(2):
# counters['fix_sec'] += 1
# notable_nets += [net]
else:
counters['other'] += 1
return counters, notable_nets
if __name__ == '__main__':
with SoupExperiment('learn-from-soup') as exp:
exp.soup_size = 10
exp.soup_life = 100
exp.trials = 10
exp.learn_from_severity_values = [10 * i for i in range(11)]
exp.epsilon = 1e-4
net_generators = []
for activation in ['linear']: # ['sigmoid', 'linear', 'relu']:
for use_bias in [False]:
net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
# net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
# net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
all_names = []
all_data = []
for net_generator_id, net_generator in enumerate(net_generators):
xs = []
ys = []
zs = []
notable_nets = []
for learn_from_severity in exp.learn_from_severity_values:
counters = generate_counters()
results = []
for _ in tqdm(range(exp.trials)):
soup = Soup(exp.soup_size, lambda net_generator=net_generator,exp=exp: TrainingNeuralNetworkDecorator(net_generator()).with_params(epsilon=exp.epsilon))
soup.with_params(attacking_rate=-1, learn_from_rate=0.1, train=0, learn_from_severity=learn_from_severity)
soup.seed()
name = str(soup.particles[0].net.__class__.__name__) + " activiation='" + str(soup.particles[0].get_keras_params().get('activation')) + "' use_bias=" + str(soup.particles[0].get_keras_params().get('use_bias'))
for time in range(exp.soup_life):
soup.evolve()
count(counters, soup, notable_nets)
keras.backend.clear_session()
xs += [learn_from_severity]
ys += [float(counters['fix_zero']) / float(exp.trials)]
zs += [float(counters['fix_other']) / float(exp.trials)]
all_names += [name]
# xs: learn_from_intensity according to exp.learn_from_intensity_values
# ys: zero-fixpoints after life time
# zs: non-zero-fixpoints after life time
all_data += [{'xs':xs, 'ys':ys, 'zs':zs}]
exp.save(all_names=all_names)
exp.save(all_data=all_data)
exp.save(soup=soup.without_particles())
for exp_id, name in enumerate(all_names):
exp.log(all_names[exp_id])
exp.log(all_data[exp_id])
exp.log('\n')

View File

@ -0,0 +1,101 @@
import sys
import os
from typing import Tuple
# Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.')
from util import *
from experiment import *
from network import *
import keras.backend
def generate_counters():
"""
Initial build of the counter dict, to store counts.
:rtype: dict
:return: dictionary holding counter for: 'divergent', 'fix_zero', 'fix_sec', 'other'
"""
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
def count(counters, net, notable_nets=[]):
"""
Count the occurences ot the types of weight trajectories.
:param counters: A counter dictionary.
:param net: A Neural Network
:param notable_nets: A list to store and save intersting candidates
:rtype Tuple[dict, list]
:return: Both the counter dictionary and the list of interessting nets.
"""
if net.is_diverged():
counters['divergent'] += 1
elif net.is_fixpoint():
if net.is_zero():
counters['fix_zero'] += 1
else:
counters['fix_other'] += 1
notable_nets += [net]
elif net.is_fixpoint(2):
counters['fix_sec'] += 1
notable_nets += [net]
else:
counters['other'] += 1
return counters, notable_nets
if __name__ == '__main__':
with Experiment('mixed-self-fixpoints') as exp:
exp.trials = 20
exp.selfattacks = 4
exp.trains_per_selfattack_values = [50 * i for i in range(11)]
exp.epsilon = 1e-4
net_generators = []
for activation in ['linear']: # , 'sigmoid', 'relu']:
for use_bias in [False]:
net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
# net_generators += [lambda activation=activation, use_bias=use_bias: FFTNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
all_names = []
all_data = []
for net_generator_id, net_generator in enumerate(net_generators):
xs = []
ys = []
for trains_per_selfattack in exp.trains_per_selfattack_values:
counters = generate_counters()
notable_nets = []
for _ in tqdm(range(exp.trials)):
net = ParticleDecorator(net_generator())
net = TrainingNeuralNetworkDecorator(net).with_params(epsilon=exp.epsilon)
name = str(net.net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias'))
for selfattack_id in range(exp.selfattacks):
net.self_attack()
for train_id in range(trains_per_selfattack):
loss = net.compiled().train(epoch=selfattack_id*trains_per_selfattack+train_id)
if net.is_diverged() or net.is_fixpoint():
break
count(counters, net, notable_nets)
keras.backend.clear_session()
xs += [trains_per_selfattack]
ys += [float(counters['fix_zero'] + counters['fix_other']) / float(exp.trials)]
all_names += [name]
# xs: how many trains per self-attack from exp.trains_per_selfattack_values
# ys: average amount of fixpoints found
all_data += [{'xs': xs, 'ys': ys}]
exp.save(all_names=all_names)
exp.save(all_data=all_data)
for exp_id, name in enumerate(all_names):
exp.log(all_names[exp_id])
exp.log(all_data[exp_id])
exp.log('\n')

108
code/setups/mixed-soup.py Normal file
View File

@ -0,0 +1,108 @@
import sys
import os
# Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.')
from typing import Tuple
from util import *
from experiment import *
from network import *
from soup import *
import keras.backend
def generate_counters():
"""
Initial build of the counter dict, to store counts.
:rtype: dict
:return: dictionary holding counter for: 'divergent', 'fix_zero', 'fix_sec', 'other'
"""
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
def count(counters, soup, notable_nets=[]):
"""
Count the occurences ot the types of weight trajectories.
:param counters: A counter dictionary.
:param soup: A Soup
:param notable_nets: A list to store and save intersting candidates
:rtype Tuple[dict, list]
:return: Both the counter dictionary and the list of interessting nets.
"""
for net in soup.particles:
if net.is_diverged():
counters['divergent'] += 1
elif net.is_fixpoint():
if net.is_zero():
counters['fix_zero'] += 1
else:
counters['fix_other'] += 1
# notable_nets += [net]
# elif net.is_fixpoint(2):
# counters['fix_sec'] += 1
# notable_nets += [net]
else:
counters['other'] += 1
return counters, notable_nets
if __name__ == '__main__':
with Experiment('mixed-soup') as exp:
exp.trials = 10
exp.soup_size = 10
exp.soup_life = 5
exp.trains_per_selfattack_values = [10 * i for i in range(11)]
exp.epsilon = 1e-4
net_generators = []
for activation in ['linear']: # ['linear', 'sigmoid', 'relu']:
for use_bias in [False]:
net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
# net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
all_names = []
all_data = []
for net_generator_id, net_generator in enumerate(net_generators):
xs = []
ys = []
zs = []
for trains_per_selfattack in exp.trains_per_selfattack_values:
counters = generate_counters()
notable_nets = []
for soup_idx in tqdm(range(exp.trials)):
soup = Soup(exp.soup_size,
lambda net_generator=net_generator, exp=exp: TrainingNeuralNetworkDecorator(
net_generator()).with_params(epsilon=exp.epsilon))
soup.with_params(attacking_rate=0.1, learn_from_rate=-1, train=trains_per_selfattack,
learn_from_severity=-1)
soup.seed()
name = str(soup.particles[0].net.__class__.__name__) + " activiation='" + str(
soup.particles[0].get_keras_params().get('activation')) + "' use_bias=" + str(
soup.particles[0].get_keras_params().get('use_bias'))
for _ in range(exp.soup_life):
soup.evolve()
count(counters, soup, notable_nets)
keras.backend.clear_session()
xs += [trains_per_selfattack]
ys += [float(counters['fix_zero']) / float(exp.trials)]
zs += [float(counters['fix_other']) / float(exp.trials)]
all_names += [name]
# xs: how many trains per self-attack from exp.trains_per_selfattack_values
# ys: average amount of zero-fixpoints found
# zs: average amount of non-zero fixpoints
all_data += [{'xs': xs, 'ys': ys, 'zs': zs}]
exp.save(all_names=all_names)
exp.save(all_data=all_data)
for exp_id, name in enumerate(all_names):
exp.log(all_names[exp_id])
exp.log(all_data[exp_id])
exp.log('\n')

View File

@ -0,0 +1,112 @@
import sys
import os
# Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.')
from soup import *
from experiment import *
if __name__ == '__main__':
def run_exp(net, prints=False):
# INFO Run_ID needs to be more than 0, so that exp stores the trajectories!
exp.run_net(net, 100, run_id=run_id + 1)
exp.historical_particles[run_id] = net
if prints:
print("Fixpoint? " + str(net.is_fixpoint()))
print("Loss " + str(loss))
if True:
# WeightWise Neural Network
with FixpointExperiment(name="weightwise_self_application") as exp:
for run_id in tqdm(range(20)):
net = ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2)
.with_keras_params(activation='linear'))
run_exp(net)
K.clear_session()
exp.log(exp.counters)
exp.save(trajectorys=exp.without_particles())
if False:
# Aggregating Neural Network
with FixpointExperiment(name="aggregating_self_application") as exp:
for run_id in tqdm(range(10)):
net = ParticleDecorator(AggregatingNeuralNetwork(aggregates=4, width=2, depth=2)
.with_keras_params(activation='linear'))
run_exp(net)
K.clear_session()
exp.log(exp.counters)
exp.save(trajectorys=exp.without_particles())
if False:
#FFT Neural Network
with FixpointExperiment() as exp:
for run_id in tqdm(range(10)):
net = ParticleDecorator(FFTNeuralNetwork(aggregates=4, width=2, depth=2)
.with_keras_params(activation='linear'))
run_exp(net)
K.clear_session()
exp.log(exp.counters)
exp.save(trajectorys=exp.without_particles())
if False:
# ok so this works quite realiably
with FixpointExperiment(name="weightwise_learning") as exp:
for i in range(10):
run_count = 100
net = TrainingNeuralNetworkDecorator(ParticleDecorator(WeightwiseNeuralNetwork(width=2, depth=2)))
net.with_params(epsilon=0.0001).with_keras_params(activation='linear')
exp.historical_particles[net.get_uid()] = net
for run_id in tqdm(range(run_count+1)):
net.compiled()
loss = net.train(epoch=run_id)
# run_exp(net)
# net.save_state(time=run_id)
K.clear_session()
exp.save(trajectorys=exp.without_particles())
if False:
# ok so this works quite realiably
with FixpointExperiment(name="aggregating_learning") as exp:
for i in range(10):
run_count = 100
net = TrainingNeuralNetworkDecorator(ParticleDecorator(AggregatingNeuralNetwork(4, width=2, depth=2)))
net.with_params(epsilon=0.0001).with_keras_params(activation='linear')
exp.historical_particles[net.get_uid()] = net
for run_id in tqdm(range(run_count+1)):
net.compiled()
loss = net.train(epoch=run_id)
# run_exp(net)
# net.save_state(time=run_id)
K.clear_session()
exp.save(trajectorys=exp.without_particles())
if False:
# this explodes in our faces completely... NAN everywhere
# TODO: Wtf is happening here?
with FixpointExperiment() as exp:
run_count = 10000
net = TrainingNeuralNetworkDecorator(RecurrentNeuralNetwork(width=2, depth=2))\
.with_params(epsilon=0.1e-2).with_keras_params(optimizer='sgd', activation='linear')
for run_id in tqdm(range(run_count+1)):
loss = net.compiled().train()
if run_id % 500 == 0:
net.print_weights()
# print(net.apply_to_network(net))
print("Fixpoint? " + str(net.is_fixpoint()))
print("Loss " + str(loss))
print()
if False:
# and this gets somewhat interesting... we can still achieve non-trivial fixpoints
# over multiple applications when training enough in-between
with MixedFixpointExperiment() as exp:
for run_id in range(10):
net = TrainingNeuralNetworkDecorator(FFTNeuralNetwork(2, width=2, depth=2))\
.with_params(epsilon=0.0001, activation='sigmoid')
exp.run_net(net, 500, 10)
net.print_weights()
print("Fixpoint? " + str(net.is_fixpoint()))
exp.log(exp.counters)

View File

@ -0,0 +1,32 @@
import sys
import os
# Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.')
from soup import *
from experiment import *
if __name__ == '__main__':
if True:
with SoupExperiment("soup") as exp:
for run_id in range(1):
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2)) \
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))\
# .with_keras_params(activation='linear')
# net_generator = lambda: TrainingNeuralNetworkDecorator(FFTNeuralNetwork(4, 2, 2))\
# .with_keras_params(activation='linear')
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
soup = Soup(20, net_generator).with_params(remove_divergent=True, remove_zero=True,
train=30,
learn_from_rate=-1)
soup.seed()
for _ in tqdm(range(100)):
soup.evolve()
exp.log(soup.count())
# you can access soup.historical_particles[particle_uid].states[time_step]['loss']
# or soup.historical_particles[particle_uid].states[time_step]['weights']
# from soup.dill
exp.save(soup=soup.without_particles())

View File

@ -0,0 +1,70 @@
import sys
import os
# Concat top Level dir to system environmental variables
sys.path += os.path.join('..', '.')
from util import *
from experiment import *
from network import *
import keras.backend as K
def generate_counters():
return {'divergent': 0, 'fix_zero': 0, 'fix_other': 0, 'fix_sec': 0, 'other': 0}
def count(counters, net, notable_nets=[]):
if net.is_diverged():
counters['divergent'] += 1
elif net.is_fixpoint():
if net.is_zero():
counters['fix_zero'] += 1
else:
counters['fix_other'] += 1
notable_nets += [net]
elif net.is_fixpoint(2):
counters['fix_sec'] += 1
notable_nets += [net]
else:
counters['other'] += 1
return counters, notable_nets
if __name__ == '__main__':
with Experiment('training_fixpoint') as exp:
exp.trials = 50
exp.run_count = 1000
exp.epsilon = 1e-4
net_generators = []
for activation in ['linear']: # , 'sigmoid', 'relu']:
for use_bias in [False]:
net_generators += [lambda activation=activation, use_bias=use_bias: WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
net_generators += [lambda activation=activation, use_bias=use_bias: AggregatingNeuralNetwork(aggregates=4, width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
net_generators += [lambda activation=activation, use_bias=use_bias: RecurrentNeuralNetwork(width=2, depth=2).with_keras_params(activation=activation, use_bias=use_bias)]
all_counters = []
all_notable_nets = []
all_names = []
for net_generator_id, net_generator in enumerate(net_generators):
counters = generate_counters()
notable_nets = []
for _ in tqdm(range(exp.trials)):
net = ParticleDecorator(net_generator())
net = TrainingNeuralNetworkDecorator(net).with_params(epsilon=exp.epsilon)
name = str(net.net.net.__class__.__name__) + " activiation='" + str(net.get_keras_params().get('activation')) + "' use_bias=" + str(net.get_keras_params().get('use_bias'))
for run_id in range(exp.run_count):
loss = net.compiled().train(epoch=run_id+1)
count(counters, net, notable_nets)
all_counters += [counters]
all_notable_nets += [notable_nets]
all_names += [name]
K.clear_session()
exp.save(all_counters=all_counters)
exp.save(trajectorys=exp.without_particles())
# net types reached in the end
# exp.save(all_notable_nets=all_notable_nets)
exp.save(all_names=all_names) #experiment setups
for exp_id, counter in enumerate(all_counters):
exp.log(all_names[exp_id])
exp.log(all_counters[exp_id])
exp.log('\n')

147
code/soup.py Normal file
View File

@ -0,0 +1,147 @@
import random
from network import *
def prng():
return random.random()
class Soup(object):
def __init__(self, size, generator, **kwargs):
self.size = size
self.generator = generator
self.particles = []
self.historical_particles = {}
self.params = dict(attacking_rate=0.1, learn_from_rate=0.1, train=0, learn_from_severity=1)
self.params.update(kwargs)
self.time = 0
def __copy__(self):
copy_ = Soup(self.size, self.generator, **self.params)
copy_.__dict__ = {attr: self.__dict__[attr] for attr in self.__dict__ if
attr not in ['particles', 'historical_particles']}
return copy_
def without_particles(self):
self_copy = copy.copy(self)
# self_copy.particles = [particle.states for particle in self.particles]
self_copy.historical_particles = {key: val.states for key, val in self.historical_particles.items()}
return self_copy
def with_params(self, **kwargs):
self.params.update(kwargs)
return self
def generate_particle(self):
new_particle = ParticleDecorator(self.generator())
self.historical_particles[new_particle.get_uid()] = new_particle
return new_particle
def get_particle(self, uid, otherwise=None):
return self.historical_particles.get(uid, otherwise)
def seed(self):
self.particles = []
for _ in range(self.size):
self.particles += [self.generate_particle()]
return self
def evolve(self, iterations=1):
for _ in range(iterations):
self.time += 1
for particle_id, particle in enumerate(self.particles):
description = {'time': self.time}
if prng() < self.params.get('attacking_rate'):
other_particle_id = int(prng() * len(self.particles))
other_particle = self.particles[other_particle_id]
particle.attack(other_particle)
description['action'] = 'attacking'
description['counterpart'] = other_particle.get_uid()
if prng() < self.params.get('learn_from_rate'):
other_particle_id = int(prng() * len(self.particles))
other_particle = self.particles[other_particle_id]
for _ in range(self.params.get('learn_from_severity', 1)):
particle.learn_from(other_particle)
description['action'] = 'learn_from'
description['counterpart'] = other_particle.get_uid()
for _ in range(self.params.get('train', 0)):
particle.compiled()
# callbacks on save_state are broken for TrainingNeuralNetwork
loss = particle.train(store_states=False)
description['fitted'] = self.params.get('train', 0)
description['loss'] = loss
description['action'] = 'train_self'
description['counterpart'] = None
if self.params.get('remove_divergent') and particle.is_diverged():
new_particle = self.generate_particle()
self.particles[particle_id] = new_particle
description['action'] = 'divergent_dead'
description['counterpart'] = new_particle.get_uid()
if self.params.get('remove_zero') and particle.is_zero():
new_particle = self.generate_particle()
self.particles[particle_id] = new_particle
description['action'] = 'zweo_dead'
description['counterpart'] = new_particle.get_uid()
particle.save_state(**description)
def count(self):
counters = dict(divergent=0, fix_zero=0, fix_other=0, fix_sec=0, other=0)
for particle in self.particles:
if particle.is_diverged():
counters['divergent'] += 1
elif particle.is_fixpoint():
if particle.is_zero():
counters['fix_zero'] += 1
else:
counters['fix_other'] += 1
elif particle.is_fixpoint(2):
counters['fix_sec'] += 1
else:
counters['other'] += 1
return counters
def print_all(self):
for particle in self.particles:
particle.print_weights()
print(particle.is_fixpoint())
if __name__ == '__main__':
if False:
with SoupExperiment() as exp:
for run_id in range(1):
net_generator = lambda: WeightwiseNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
# net_generator = lambda: FFTNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
# net_generator = lambda: AggregatingNeuralNetwork(4, 2, 2).with_keras_params(activation='sigmoid')\
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
soup = Soup(100, net_generator).with_params(remove_divergent=True, remove_zero=True)
soup.seed()
for _ in tqdm(range(1000)):
soup.evolve()
exp.log(soup.count())
exp.save(soup=soup.without_particles())
if True:
with SoupExperiment("soup") as exp:
for run_id in range(1):
net_generator = lambda: TrainingNeuralNetworkDecorator(WeightwiseNeuralNetwork(2, 2))\
.with_keras_params(activation='linear').with_params(epsilon=0.0001)
# net_generator = lambda: TrainingNeuralNetworkDecorator(AggregatingNeuralNetwork(4, 2, 2))
# .with_keras_params(activation='linear')\
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
# net_generator = lambda: TrainingNeuralNetworkDecorator(FFTNeuralNetwork(4, 2, 2))\
# .with_keras_params(activation='linear')\
# .with_params(shuffler=AggregatingNeuralNetwork.shuffle_random)
# net_generator = lambda: RecurrentNeuralNetwork(2, 2).with_keras_params(activation='linear').with_params()
soup = Soup(100, net_generator).with_params(remove_divergent=True, remove_zero=True, train=20)
soup.seed()
for _ in tqdm(range(100)):
soup.evolve()
exp.log(soup.count())
# you can access soup.historical_particles[particle_uid].states[time_step]['loss']
# or soup.historical_particles[particle_uid].states[time_step]['weights']
# from soup.dill
exp.save(soup=soup.without_particles())

111
code/test.py Normal file
View File

@ -0,0 +1,111 @@
from experiment import *
from network import *
from soup import *
import numpy as np
class LearningNeuralNetwork(NeuralNetwork):
@staticmethod
def mean_reduction(weights, features):
single_dim_weights = np.hstack([w.flatten() for w in weights])
shaped_weights = np.reshape(single_dim_weights, (1, features, -1))
x = np.mean(shaped_weights, axis=-1)
return x
@staticmethod
def fft_reduction(weights, features):
single_dim_weights = np.hstack([w.flatten() for w in weights])
x = np.fft.fft(single_dim_weights, n=features)[None, ...]
return x
@staticmethod
def random_reduction(_, features):
x = np.random.rand(features)[None, ...]
return x
def __init__(self, width, depth, features, **kwargs):
raise DeprecationWarning
super().__init__(**kwargs)
self.width = width
self.depth = depth
self.features = features
self.compile_params = dict(loss='mse', optimizer='sgd')
self.model = Sequential()
self.model.add(Dense(units=self.width, input_dim=self.features, **self.keras_params))
for _ in range(self.depth - 1):
self.model.add(Dense(units=self.width, **self.keras_params))
self.model.add(Dense(units=self.features, **self.keras_params))
self.model.compile(**self.compile_params)
def apply_to_weights(self, old_weights, **kwargs):
reduced = kwargs.get('reduction', self.fft_reduction)()
raise NotImplementedError
# build aggregations from old_weights
weights = self.get_weights_flat()
# call network
old_aggregation = self.aggregate_fft(weights, self.aggregates)
new_aggregation = self.apply(old_aggregation)
# generate list of new weights
new_weights_list = self.deaggregate_identically(new_aggregation, self.get_amount_of_weights())
new_weights_list = self.get_shuffler()(new_weights_list)
# write back new weights
new_weights = self.fill_weights(old_weights, new_weights_list)
# return results
if self.params.get("print_all_weight_updates", False) and not self.is_silent():
print("updated old weight aggregations " + str(old_aggregation))
print("to new weight aggregations " + str(new_aggregation))
print("resulting in network weights ...")
print(self.__class__.weights_to_string(new_weights))
return new_weights
def with_compile_params(self, **kwargs):
self.compile_params.update(kwargs)
return self
def learn(self, epochs, reduction, batchsize=1):
with tqdm(total=epochs, ascii=True,
desc='Type: {t} @ Epoch:'.format(t=self.__class__.__name__),
postfix=["Loss", dict(value=0)]) as bar:
for epoch in range(epochs):
old_weights = self.get_weights()
x = reduction(old_weights, self.features)
savestateCallback = SaveStateCallback(self, epoch=epoch)
history = self.model.fit(x=x, y=x, verbose=0, batch_size=batchsize, callbacks=savestateCallback)
bar.postfix[1]["value"] = history.history['loss'][-1]
bar.update()
def vary(e=0.0, f=0.0):
return [
np.array([[1.0+e, 0.0+f], [0.0+f, 0.0+f], [0.0+f, 0.0+f], [0.0+f, 0.0+f]], dtype=np.float32),
np.array([[1.0+e, 0.0+f], [0.0+f, 0.0+f]], dtype=np.float32),
np.array([[1.0+e], [0.0+f]], dtype=np.float32)
]
if __name__ == '__main__':
net = WeightwiseNeuralNetwork(width=2, depth=2).with_keras_params(activation='sigmoid')
if False:
net.set_weights([
np.array([[1.0, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], dtype=np.float32),
np.array([[1.0, 0.0], [0.0, 0.0]], dtype=np.float32),
np.array([[1.0], [0.0]], dtype=np.float32)
])
print(net.get_weights())
net.self_attack(100)
print(net.get_weights())
print(net.is_fixpoint())
if True:
net.set_weights(vary(0.01, 0.0))
print(net.get_weights())
for _ in range(5):
net.self_attack()
print(net.get_weights())
print(net.is_fixpoint())

39
code/util.py Normal file
View File

@ -0,0 +1,39 @@
class PrintingObject:
class SilenceSignal():
def __init__(self, obj, value):
self.obj = obj
self.new_silent = value
def __enter__(self):
self.old_silent = self.obj.get_silence()
self.obj.set_silence(self.new_silent)
def __exit__(self, exception_type, exception_value, traceback):
self.obj.set_silence(self.old_silent)
def __init__(self):
self.silent = True
def is_silent(self):
return self.silent
def get_silence(self):
return self.is_silent()
def set_silence(self, value=True):
self.silent = value
return self
def unset_silence(self):
self.silent = False
return self
def with_silence(self, value=True):
self.set_silence(value)
return self
def silence(self, value=True):
return self.__class__.SilenceSignal(self, value)
def _print(self, *args, **kwargs):
if not self.silent:
print(*args, **kwargs)

283
code/visualization.py Normal file
View File

@ -0,0 +1,283 @@
import os
from experiment import Experiment
# noinspection PyUnresolvedReferences
from soup import Soup
from argparse import ArgumentParser
import numpy as np
import plotly as pl
import plotly.graph_objs as go
import colorlover as cl
import dill
from sklearn.manifold.t_sne import TSNE, PCA
def build_args():
arg_parser = ArgumentParser()
arg_parser.add_argument('-i', '--in_file', nargs=1, type=str)
arg_parser.add_argument('-o', '--out_file', nargs='?', default='out', type=str)
return arg_parser.parse_args()
def build_from_soup_or_exp(soup):
particles = soup.historical_particles
particle_list = []
for particle in particles.values():
particle_dict = dict(
trajectory=[event['weights'] for event in particle],
time=[event['time'] for event in particle],
action=[event.get('action', None) for event in particle],
counterpart=[event.get('counterpart', None) for event in particle]
)
if any([x is not None for x in particle_dict['counterpart']]):
print('counterpart')
particle_list.append(particle_dict)
return particle_list
def plot_latent_trajectories(soup_or_experiment, filename='latent_trajectory_plot'):
assert isinstance(soup_or_experiment, (Experiment, Soup))
bupu = cl.scales['11']['div']['RdYlGn']
data_dict = build_from_soup_or_exp(soup_or_experiment)
scale = cl.interp(bupu, len(data_dict)+1) # Map color scale to N bins
# Fit the mebedding space
transformer = TSNE()
for particle_dict in data_dict:
array = np.asarray([np.hstack([x.flatten() for x in timestamp]).flatten()
for timestamp in particle_dict['trajectory']])
particle_dict['trajectory'] = array
transformer.fit(array)
# Transform data accordingly and plot it
data = []
for p_id, particle_dict in enumerate(data_dict):
transformed = transformer._fit(np.asarray(particle_dict['trajectory']))
line_trace = go.Scatter(
x=transformed[:, 0],
y=transformed[:, 1],
text='Hovertext goes here'.format(),
line=dict(color=scale[p_id]),
# legendgroup='Position -{}'.format(pos),
name='Particle - {}'.format(p_id),
showlegend=True,
# hoverinfo='text',
mode='lines')
line_start = go.Scatter(mode='markers', x=[transformed[0, 0]], y=[transformed[0, 1]],
marker=dict(
color='rgb(255, 0, 0)',
size=4
),
showlegend=False
)
line_end = go.Scatter(mode='markers', x=[transformed[-1, 0]], y=[transformed[-1, 1]],
marker=dict(
color='rgb(0, 0, 0)',
size=4
),
showlegend=False
)
data.extend([line_trace, line_start, line_end])
layout = dict(title='{} - Latent Trajectory Movement'.format('Penis'),
height=800, width=800, margin=dict(l=0, r=0, t=0, b=0))
# import plotly.io as pio
# pio.write_image(fig, filename)
fig = go.Figure(data=data, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def plot_latent_trajectories_3D(soup_or_experiment, filename='plot'):
def norm(val, a=0, b=0.25):
return (val - a) / (b - a)
data_list = build_from_soup_or_exp(soup_or_experiment)
if not data_list:
return
base_scale = cl.scales['9']['div']['RdYlGn']
# base_scale = cl.scales['9']['qual']['Set1']
scale = cl.interp(base_scale, len(data_list)+1) # Map color scale to N bins
# Fit the embedding space
transformer = PCA(n_components=2)
array = []
for particle_dict in data_list:
array.append(particle_dict['trajectory'])
transformer.fit(np.vstack(array))
# Transform data accordingly and plot it
data = []
for p_id, particle_dict in enumerate(data_list):
transformed = transformer.transform(particle_dict['trajectory'])
line_trace = go.Scatter3d(
x=transformed[:, 0],
y=transformed[:, 1],
z=np.asarray(particle_dict['time']),
text='Particle: {}<br> It had {} lifes.'.format(p_id, len(particle_dict['trajectory'])),
line=dict(
color=scale[p_id],
width=4
),
# legendgroup='Particle - {}'.format(p_id),
name='Particle -{}'.format(p_id),
showlegend=False,
hoverinfo='text',
mode='lines')
line_start = go.Scatter3d(mode='markers', x=[transformed[0, 0]], y=[transformed[0, 1]],
z=np.asarray(particle_dict['time'][0]),
marker=dict(
color='rgb(255, 0, 0)',
size=4
),
showlegend=False
)
line_end = go.Scatter3d(mode='markers', x=[transformed[-1, 0]], y=[transformed[-1, 1]],
z=np.asarray(particle_dict['time'][-1]),
marker=dict(
color='rgb(0, 0, 0)',
size=4
),
showlegend=False
)
data.extend([line_trace, line_start, line_end])
axis_layout = dict(gridcolor='rgb(255, 255, 255)',
gridwidth=3,
zerolinecolor='rgb(255, 255, 255)',
showbackground=True,
backgroundcolor='rgb(230, 230,230)',
titlefont=dict(
color='black',
size=30
)
)
layout = go.Layout(scene=dict(
# aspectratio=dict(x=2, y=2, z=2),
xaxis=dict(title='Transformed X', **axis_layout),
yaxis=dict(title='Transformed Y', **axis_layout),
zaxis=dict(title='Epoch', **axis_layout)),
# title='{} - Latent Trajectory Movement'.format('Soup'),
width=1024, height=1024,
margin=dict(l=0, r=0, b=0, t=0)
)
fig = go.Figure(data=data, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename, validate=True)
pass
def plot_histogram(bars_dict_list, filename='histogram_plot'):
# catagorical
ryb = cl.scales['10']['div']['RdYlBu']
data = []
for bar_id, bars_dict in bars_dict_list:
hist = go.Histogram(
histfunc="count",
y=bars_dict.get('value', 14),
x=bars_dict.get('name', 'gimme a name'),
showlegend=False,
marker=dict(
color=ryb[bar_id]
),
)
data.append(hist)
layout=dict(title='{} Histogram Plot'.format('Experiment Name Penis'),
height=400, width=400, margin=dict(l=0, r=0, t=0, b=0))
fig = go.Figure(data=data, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def line_plot(line_dict_list, filename='lineplot'):
# lines with standard deviation
# Transform data accordingly and plot it
data = []
rdylgn = cl.scales['10']['div']['RdYlGn']
rdylgn_background = [scale + (0.4,) for scale in cl.to_numeric(rdylgn)]
for line_id, line_dict in enumerate(line_dict_list):
name = line_dict.get('name', 'gimme a name')
upper_bound = go.Scatter(
name='Upper Bound',
x=line_dict['x'],
y=line_dict['upper_y'],
mode='lines',
marker=dict(color="#444"),
line=dict(width=0),
fillcolor=rdylgn_background[line_id],
)
trace = go.Scatter(
x=line_dict['x'],
y=line_dict['main_y'],
mode='lines',
name=name,
line=dict(color=line_id),
fillcolor=rdylgn_background[line_id],
fill='tonexty')
lower_bound = go.Scatter(
name='Lower Bound',
x=line_dict['x'],
y=line_dict['lower_y'],
marker=dict(color="#444"),
line=dict(width=0),
mode='lines')
data.extend([upper_bound, trace, lower_bound])
layout=dict(title='{} Line Plot'.format('Experiment Name Penis'),
height=800, width=800, margin=dict(l=0, r=0, t=0, b=0))
fig = go.Figure(data=data, layout=layout)
pl.offline.plot(fig, auto_open=True, filename=filename)
pass
def search_and_apply(absolut_file_or_folder, plotting_function, files_to_look_for=[]):
if os.path.isdir(absolut_file_or_folder):
for sub_file_or_folder in os.scandir(absolut_file_or_folder):
search_and_apply(sub_file_or_folder.path, plotting_function, files_to_look_for=files_to_look_for)
elif absolut_file_or_folder.endswith('.dill'):
file_or_folder = os.path.split(absolut_file_or_folder)[-1]
if file_or_folder in files_to_look_for and not os.path.exists('{}.html'.format(absolut_file_or_folder[:-5])):
print('Apply Plotting function "{func}" on file "{file}"'.format(func=plotting_function.__name__,
file=absolut_file_or_folder)
)
with open(absolut_file_or_folder, 'rb') as in_f:
exp = dill.load(in_f)
try:
plotting_function(exp, filename='{}.html'.format(absolut_file_or_folder[:-5]))
except ValueError:
pass
except AttributeError:
pass
else:
# This was either another FilyType or Plot.html alerady exists.
pass
if __name__ == '__main__':
args = build_args()
in_file = args.in_file[0]
out_file = args.out_file
search_and_apply(in_file, plot_latent_trajectories_3D, ["trajectorys.dill", "soup.dill"])

Some files were not shown because too many files have changed in this diff Show More