committed Andy Mattausch's code under related/EP/
This commit is contained in:
parent
0105eb6998
commit
3a170d886b
95
related/EP/src/FeatureReduction.py
Normal file
95
related/EP/src/FeatureReduction.py
Normal file
@ -0,0 +1,95 @@
|
||||
import numpy as np
|
||||
import numbers
|
||||
|
||||
class FeatureReduction():
|
||||
def __init__(self, type):
|
||||
self.type = type
|
||||
self.VecFromWeigths = None
|
||||
|
||||
def calc(self, vec, n):
|
||||
self.weigthsToVec(vec)
|
||||
return {
|
||||
'fft' : self.fft(self.VecFromWeigths, n),
|
||||
'rfft': self.rfftn(self.VecFromWeigths, n),
|
||||
'mean': self.mean(self.VecFromWeigths,n),
|
||||
'meanShuffled':self.mean(self.shuffelVec(self.VecFromWeigths,3),n)
|
||||
}[self.type]
|
||||
|
||||
def fft(self, vec, n):
|
||||
return np.fft.fft(vec, n)
|
||||
|
||||
def rfftn(self, vec, n):
|
||||
return np.fft.rfft(vec, n)
|
||||
|
||||
def shuffelVec(self, vec, mod):
|
||||
newVec = np.array([])
|
||||
rVec = np.array([])
|
||||
i = 0
|
||||
while i < len(vec):
|
||||
if i % mod == 0:
|
||||
newVec = np.append(newVec, vec[i])
|
||||
else:
|
||||
rVec = np.append(rVec, vec[i])
|
||||
i += 1
|
||||
if len(newVec) != len(vec):
|
||||
newVec = np.append(newVec, self.shuffelVec(rVec, mod))
|
||||
return newVec
|
||||
|
||||
def mean(self,vec, n):
|
||||
'''
|
||||
Zerlegt einen Vektor in n gleich große Teile und berechnet den Mitteltwert.
|
||||
:param vec: Eingabevektor als array mit x Komponenten
|
||||
:param n: Die Größe des Ausgabevektors
|
||||
:return:Vektor als array mit n Komponenten
|
||||
'''
|
||||
if n > len(vec):
|
||||
Exception("n is bigger than len(vec) - no feature reduction avaiable")
|
||||
x = len(vec)/n
|
||||
result = np.array([])
|
||||
factor =1
|
||||
if x - int(x) != 0:
|
||||
factor = x - int(x)
|
||||
actFactor = factor
|
||||
vv = 0
|
||||
for value in vec:
|
||||
if round(x,5) <= 1:
|
||||
x = len(vec) / n
|
||||
vv += actFactor * value
|
||||
result = np.append(result, [round(vv / (len(vec) / n),6)])
|
||||
vv = (1 - actFactor) * value
|
||||
if round((1 - actFactor),5) > 0:
|
||||
x -= (1-actFactor)
|
||||
actFactor += factor
|
||||
if round(actFactor,5) > 1:
|
||||
actFactor -= 1
|
||||
else:
|
||||
actFactor = factor
|
||||
else:
|
||||
vv += value
|
||||
x -= 1
|
||||
return result
|
||||
|
||||
def weigthsToVec(self, weights, vec=np.array([])):
|
||||
'''
|
||||
Die Keras liefert die Gewichte eines neuronalen Netzwerkes in einem mehrdimensionalen Array. Dieses Array
|
||||
beinhaltet nicht nur die Gewichte der einzelnen Schichten, sondern auch der Status der Ausgabe der einzelnen
|
||||
Neuronen. Die Gewichte ines Netzes mit einem Neuron in der Eingabeschicht, zwei Neuronen in einer versteckten
|
||||
Schicht und einem Neuron in der Ausgabeschicht hat beispielsweise folgende Darstellung:
|
||||
[[[1 2]]
|
||||
[0. 0.]
|
||||
[[2] [3]]
|
||||
[0.]]
|
||||
Diese Funktion überführt die Darstellung in einen Vektor. Dabei werden die Informationen um die Ausgabe der
|
||||
einzelnen Neuronen verworfen. Der Vektor der die oben beschriebenen Gewichte darstellt hat folgende Form:
|
||||
[1, 2, 2, 3]
|
||||
:param weights: mehrdimensionales Array der Gewichte aus Keras
|
||||
:return: Vektor in Form eines Arrays
|
||||
'''
|
||||
if isinstance(weights, np.float32):
|
||||
vec = np.append(vec, weights)
|
||||
else:
|
||||
for x in weights:
|
||||
if isinstance(x[0], np.ndarray):
|
||||
for xx in x:
|
||||
vec = np.append(vec, xx)
|
||||
self.VecFromWeigths = vec
|
66
related/EP/src/Functions.py
Normal file
66
related/EP/src/Functions.py
Normal file
@ -0,0 +1,66 @@
|
||||
import glob
|
||||
import random
|
||||
import numpy as np
|
||||
|
||||
def checkFileExists(fileName):
|
||||
'''
|
||||
prüft ob eine Datei existiert und gibt gegebenenfalls den Pfad zurück, sonst False
|
||||
:param fileName:
|
||||
:return: Boolean
|
||||
'''
|
||||
|
||||
file = glob.glob(fileName)
|
||||
if len(file) > 0:
|
||||
return file[0]
|
||||
else:
|
||||
return False
|
||||
|
||||
def calcMeanSquaredError(a, b):
|
||||
'''
|
||||
Berechnet den MSE zwischen a und b
|
||||
:param a:
|
||||
:param b:
|
||||
:return: MSE
|
||||
'''
|
||||
|
||||
a = a.astype(float)
|
||||
b = b.astype(float)
|
||||
mse = ((a - b) ** 2).mean()
|
||||
return mse
|
||||
|
||||
def calcScale(array):
|
||||
'''
|
||||
Berechnet die Skala eines Arrays
|
||||
:param array:
|
||||
:return:
|
||||
'''
|
||||
return abs(max(array)-min(array))
|
||||
|
||||
def getRandomLayer(tuple, standardDeviation=0.01):
|
||||
'''
|
||||
Liefert ein zufällige Gewichte für die übergebene Schicht im Keras Format
|
||||
:param tuple: ein Tupel für die Schicht (12, 75) ist beispielweise eine Schicht mit 12 auf 75 Neuronen
|
||||
:param standardDeviation:
|
||||
:return: Zufällige Gewichte für die Schicht im Keras Format
|
||||
'''
|
||||
randomLayer = []
|
||||
i = 0
|
||||
while i < tuple[0]:
|
||||
randomNeuron = []
|
||||
x = 0
|
||||
while x < tuple[1]:
|
||||
randomNeuron.append(getRandomGausNumber(standardDeviation))
|
||||
x += 1
|
||||
randomLayer.append(randomNeuron)
|
||||
i+= 1
|
||||
randomLayer = [randomLayer]
|
||||
randomLayer.append([0.]*tuple[1])
|
||||
return randomLayer
|
||||
|
||||
def getRandomGausNumber(standardDeviation):
|
||||
'''
|
||||
Liefert eine Zufallszahl um null mit der angegebenen Standardabweichung
|
||||
:param standardDeviation:
|
||||
:return:
|
||||
'''
|
||||
return np.random.normal(0.0, standardDeviation)
|
11
related/EP/src/LossHistory.py
Normal file
11
related/EP/src/LossHistory.py
Normal file
@ -0,0 +1,11 @@
|
||||
import keras
|
||||
|
||||
class LossHistory(keras.callbacks.Callback):
|
||||
def on_train_begin(self, logs={}):
|
||||
self.losses = []
|
||||
|
||||
def on_batch_end(self, batch, logs={}):
|
||||
self.losses.append(logs.get('loss'))
|
||||
|
||||
def addLoss(self,loss):
|
||||
self.losses.append(loss)
|
364
related/EP/src/NeuralNetwork.py
Normal file
364
related/EP/src/NeuralNetwork.py
Normal file
@ -0,0 +1,364 @@
|
||||
import matplotlib
|
||||
matplotlib.use('Agg')
|
||||
import datetime
|
||||
from keras import backend
|
||||
from keras.models import Sequential
|
||||
from keras.layers.core import Dense, Activation
|
||||
from keras.optimizers import Adadelta
|
||||
from keras.models import load_model
|
||||
from keras import utils
|
||||
import copy
|
||||
import numpy as np
|
||||
import collections
|
||||
from operator import add
|
||||
|
||||
try:
|
||||
from src.Functions import Functions
|
||||
from src.PltData import PltData
|
||||
from src.FeatureReduction import FeatureReduction
|
||||
from src.LossHistory import LossHistory
|
||||
except ImportError:
|
||||
import Functions
|
||||
from PltData import PltData
|
||||
from FeatureReduction import FeatureReduction
|
||||
from LossHistory import LossHistory
|
||||
|
||||
class NeuralNetwork:
|
||||
|
||||
def __init__(self, numberOfNeurons, activationFunctions, featureReduction,
|
||||
numberLoops, loss='mean_squared_error', printVectors=False, path="../images/",
|
||||
fitByHillClimber=False, standardDeviation = 0.01, numberOtRandomShots=20, checkNewWeightsIsReallyBetter=False):
|
||||
'''
|
||||
:param numberOfNeurons: Array mit Integers. Gibt die Anzahl der Neuronen der einzelnen Schichten an
|
||||
:param activationFunctions: Array mit Strings für die Aktivierungsfunktionen der einzelnen Schichten
|
||||
:param featureReduction: String der die Funktion zur Feature Reduzierung angibt
|
||||
:param numberLoops: die Anzahl der Schleifendruchläufe
|
||||
:param loss: Fehlerfunktion
|
||||
:param printVectors: Boolean gibt an ob die Gewichte und der dazugehörige die die feature reduction funktion
|
||||
transformierte Vektor ausgeben werden soll
|
||||
:param path: der Pfad zu den Ergebnissen
|
||||
'''
|
||||
backend.clear_session()
|
||||
self.model = Sequential()
|
||||
self.optimzier = Adadelta()
|
||||
self.epochs = 1
|
||||
self.fitByHillClimber = fitByHillClimber
|
||||
self.checkNewWeightsIsReallyBetter=checkNewWeightsIsReallyBetter
|
||||
self.numberOtRandomShots = numberOtRandomShots
|
||||
self.standardDeviation = standardDeviation
|
||||
self.numberOfNeurons = numberOfNeurons
|
||||
self.activationFunctions = activationFunctions
|
||||
self.featureReduction = featureReduction
|
||||
self.featureReductionFunction = FeatureReduction(self.featureReduction)
|
||||
self.numberLoops = numberLoops
|
||||
self.loss = loss
|
||||
self.addedLayers = "No_Layers_added"
|
||||
self.result = []
|
||||
self.fileForVectors = None
|
||||
self.printVectors = printVectors
|
||||
self.path = path
|
||||
self.beginGrowing = 0
|
||||
self.stopGrowing = 0
|
||||
self.LM = 0
|
||||
self.dataHistory = []
|
||||
self.minFailure = 100
|
||||
self.minFailureLoop = None
|
||||
|
||||
def addLayers(self):
|
||||
i = 2
|
||||
self.addedLayers = "inputDim_" + str(self.numberOfNeurons[0]) + "_" + str(self.activationFunctions[0]) + \
|
||||
"_" + str(self.numberOfNeurons[1])
|
||||
self.model.add(Dense(self.numberOfNeurons[1], kernel_initializer="uniform", input_dim=self.numberOfNeurons[0],
|
||||
activation=self.activationFunctions[0]))
|
||||
while i < len(self.numberOfNeurons):
|
||||
self.addLayer(self.activationFunctions[i-1], self.numberOfNeurons[i])
|
||||
i+= 1
|
||||
|
||||
def addLayer(self, activationFunction, numberOfNeurons):
|
||||
self.model.add(Dense(numberOfNeurons, kernel_initializer="uniform"))
|
||||
self.model.add((Activation(activationFunction)))
|
||||
self.addedLayers += "_" + activationFunction+"_"+ str(numberOfNeurons)
|
||||
|
||||
def fitByStochasticHillClimberV3(self, inputD, outputD, callbacks=None):
|
||||
'''
|
||||
Diese Version des stochastischen Hill Climbers, überorüft den Fehler nur Anhand der neuen Gewichte.
|
||||
:param inputD:
|
||||
:param outputD:
|
||||
:param callbacks:
|
||||
:return:
|
||||
'''
|
||||
|
||||
weights = self.model.get_weights()
|
||||
aktWeights = self.model.get_weights()
|
||||
memDict = {}
|
||||
i = 0
|
||||
if callbacks != None:
|
||||
for f in callbacks:
|
||||
f.on_train_begin()
|
||||
while i <= self.numberOtRandomShots:
|
||||
i+= 1
|
||||
loss = Functions.calcMeanSquaredError(self.model.predict(inputD, batch_size=1), outputD)
|
||||
if i == 1:
|
||||
if callbacks != None:
|
||||
for f in callbacks:
|
||||
f.addLoss(loss)
|
||||
memDict[loss] = weights
|
||||
weights = self.joinWeights(self.getRandomWeights(),weights)
|
||||
self.model.set_weights(weights)
|
||||
inputD = np.array([self.featureReductionFunction.calc(self.model.get_weights(), self.numberOfNeurons[0])])
|
||||
outputD = inputD
|
||||
|
||||
od = collections.OrderedDict(sorted(memDict.items()))
|
||||
od = list(od.items())
|
||||
self.model.set_weights(od[0][1])
|
||||
return
|
||||
|
||||
def fitByStochasticHillClimber(self, inputD, outputD, callbacks=None):
|
||||
'''
|
||||
Die ersten beiden Versionen des Hill Climber.
|
||||
V1 wird ausgeführt wenn self.checkNewWeightsIsReallyBetter nicht True ist. In diesem Fall wird nur gegen die
|
||||
alten Gewichte und dessen Repräsentation geprüft.
|
||||
V2 wird ausgeführt wenn self.checkNewWeightsIsReallyBetter True ist. In diesem Fall wird ein zweiter Check auf
|
||||
die neuen Gewichte ausgeführt. Nur wenn beide ein besseres Ergebnis liefern, werden die neuen Gewichte übernommen.
|
||||
Die bessere Variante ist fitByStochasticHillClimberV3 - in der nur gegen die neuen Gewichte geprüft wird.
|
||||
:param inputD:
|
||||
:param outputD:
|
||||
:param callbacks:
|
||||
:return:
|
||||
'''
|
||||
weights = self.model.get_weights()
|
||||
aktWeights = self.model.get_weights()
|
||||
memDict = {}
|
||||
i = 0
|
||||
if callbacks != None:
|
||||
for f in callbacks:
|
||||
f.on_train_begin()
|
||||
while i <= self.numberOtRandomShots:
|
||||
i+= 1
|
||||
loss = Functions.calcMeanSquaredError(self.model.predict(inputD), outputD)
|
||||
if i == 1:
|
||||
if callbacks != None:
|
||||
for f in callbacks:
|
||||
f.addLoss(loss)
|
||||
memDict[loss] = weights
|
||||
weights = self.joinWeights(self.getRandomWeights(),weights)
|
||||
self.model.set_weights(weights)
|
||||
od = collections.OrderedDict(sorted(memDict.items()))
|
||||
od = list(od.items())
|
||||
if self.checkNewWeightsIsReallyBetter:
|
||||
self.model.set_weights(od[0][1])
|
||||
iData = np.array([self.featureReductionFunction.calc(self.model.get_weights(), self.numberOfNeurons[0])])
|
||||
errorWithNewWeights = Functions.calcMeanSquaredError(self.model.predict(iData, batch_size=1), iData)
|
||||
self.model.set_weights(aktWeights)
|
||||
errorWithOldWeights = Functions.calcMeanSquaredError(self.model.predict(iData, batch_size=1), iData)
|
||||
if errorWithNewWeights <errorWithOldWeights:
|
||||
self.model.set_weights(od[0][1])
|
||||
else:
|
||||
self.model.set_weights(od[0][1])
|
||||
#print(Functions.calcMeanSquaredError(self.model.predict(inputD), outputD), od[0][0])
|
||||
return
|
||||
|
||||
def removeAFOutputFromWeightsArray(self,weights):
|
||||
'''
|
||||
der Output von model.get_weigths() liefert nicht nur die Gewichte des Netzes sondern auch die aktuelle Ausgabe,
|
||||
der Neuronen. Manchmal ist es nötig für weitere Berechungen diese Ausgabe zu entfernen.
|
||||
:param weights:
|
||||
:return:
|
||||
'''
|
||||
newWeights = []
|
||||
for value in weights:
|
||||
if isinstance(value[0], list) or isinstance(value[0], np.ndarray):
|
||||
newWeights.append(value)
|
||||
return newWeights
|
||||
|
||||
def joinWeights(self, first, second):
|
||||
'''
|
||||
addiert zwei Arrays die die Gewichte darstellen.
|
||||
:param first:
|
||||
:param second:
|
||||
:return:
|
||||
'''
|
||||
newWeights = copy.deepcopy(first)
|
||||
x = 0
|
||||
for myList in first:
|
||||
if isinstance(myList[0], list):
|
||||
#gewichte addieren
|
||||
newWeights[x] = self.joinArrays(myList,second[x])
|
||||
x += 1
|
||||
return newWeights
|
||||
|
||||
def joinArrays(self, first, second):
|
||||
x = 0
|
||||
for value in first:
|
||||
if isinstance(value, list):
|
||||
first[x] = self.joinArrays(first[x], second[x])
|
||||
else:
|
||||
first[x] += second[x]
|
||||
x+= 1
|
||||
return first
|
||||
|
||||
def getRandomWeights(self):
|
||||
'''
|
||||
liefert zufällig generierte Gewichte für die aktuelle Netzkonfiguration
|
||||
:return:
|
||||
'''
|
||||
i = 0
|
||||
while i+1 < len(self.numberOfNeurons):
|
||||
tuple = (self.numberOfNeurons[i],self.numberOfNeurons[i+1])
|
||||
layer = Functions.getRandomLayer(tuple)
|
||||
if i == 0:
|
||||
weights= layer
|
||||
else:
|
||||
for list in layer:
|
||||
weights.append(list)
|
||||
i+=1
|
||||
return weights
|
||||
|
||||
|
||||
def fit(self, stepWise=False, checkLM=False, searchForThreshold=False, checkScale=False):
|
||||
numberLoops = self.numberLoops
|
||||
self.model.compile(loss=self.loss, optimizer=self.optimzier)
|
||||
history = LossHistory()
|
||||
i = 0
|
||||
iamHere = False
|
||||
while i < numberLoops:
|
||||
weights = self.model.get_weights()
|
||||
data = np.array([self.featureReductionFunction.calc(weights, self.numberOfNeurons[0])])
|
||||
self.dataHistory.append(data)
|
||||
if self.printVectors:
|
||||
self.printVec(i, self.featureReductionFunction.VecFromWeigths, data)
|
||||
if not self.fitByHillClimber:
|
||||
self.model.fit(data, data, epochs=1, callbacks=[history], verbose=0)
|
||||
else:
|
||||
self.fitByStochasticHillClimberV3(data, data, callbacks=[history])
|
||||
if history.losses[-1] < self.minFailure:
|
||||
self.minFailure = history.losses[-1]
|
||||
self.minFailureLoop = i
|
||||
self.result.append(history.losses[-1])
|
||||
|
||||
i += 1
|
||||
if checkScale:
|
||||
dd = np.sum(np.array(self.result[-1000:]))
|
||||
if self.checkGrowing(self.result, 10) or dd == 0. or i > 2500:
|
||||
break
|
||||
if searchForThreshold:
|
||||
if self.checkGrowing(self.result, 100):
|
||||
return self.result[0], True
|
||||
if i > 1000:
|
||||
return self.result[0], False
|
||||
if checkLM:
|
||||
if len(self.result) > 1000:
|
||||
dd = np.sum(np.array(self.result[-1000:]))
|
||||
if dd == 0.:
|
||||
# Wenn die Summe der letzten 1000 Fehler echt null ist - muss ein Fixpunkt gefunden worden sein
|
||||
self.beginGrowing = 0
|
||||
break
|
||||
if self.checkGrowing(self.result, 10) and self.beginGrowing == 0:
|
||||
# Fehler steigt wieder
|
||||
self.beginGrowing = i
|
||||
if stepWise:
|
||||
self.numberLoops = i
|
||||
self.printEvaluation()
|
||||
if self.beginGrowing > 0:
|
||||
'''
|
||||
if len(self.result) > 1000:
|
||||
if (i > 10000 and self.checkGrowing(self.result, 100)):
|
||||
if stepWise:
|
||||
self.numberLoops = i
|
||||
self.printEvaluation()
|
||||
#print("BREAK 1", round(dd,6), dd)
|
||||
break
|
||||
'''
|
||||
if not self.checkGrowing(self.result, 10, checkSame=False) and i-self.beginGrowing>500 and not iamHere:
|
||||
# In einigen Fällen ist der Wachstum sehr langsam, deswegen checkSame = False,
|
||||
# nach beginGrowing kommt es manchmal vor, dass der Wachstum kurz aussetzt, deswegen
|
||||
# müssen sollten zwischen beginGrowing und endGrowing 500 Schritte liegen
|
||||
self.stopGrowing = i
|
||||
self.LM =self.result[len(self.result) - 1]
|
||||
if stepWise:
|
||||
self.numberLoops = i
|
||||
self.printEvaluation()
|
||||
iamHere = True
|
||||
else:
|
||||
break
|
||||
|
||||
pl = PltData(np.array(self.result))
|
||||
pl.linePlot(self.getFileName(i), width=1600, text=self.getDescription())
|
||||
|
||||
def printEvaluation(self):
|
||||
start = -100000
|
||||
stop = 100000
|
||||
step = 1
|
||||
data = np.arange(start, stop, step)
|
||||
self.evaluate(data, str(start) + "_" + str(stop) + "_" + str(step),
|
||||
text=self.getDescription() + "\nStart: " + str(start) + "\nStop " + str(stop) + "\nStep " + str(step))
|
||||
|
||||
def checkGrowing(self, mArray, range, checkSame=True):
|
||||
if len(mArray) < range*2:
|
||||
return False
|
||||
values = np.array(mArray[-1*range*2:])
|
||||
values = values.reshape(-1, int(len(values)/2))
|
||||
if np.sum(values[0]) == np.sum(values[1]) and checkSame:
|
||||
return False
|
||||
if np.sum(values[0]) > np.sum(values[1]):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def evaluate(self, inputData, filename, text=""):
|
||||
pD = self.model.predict(inputData, batch_size=1)
|
||||
pD = np.reshape(pD,(1,len(pD)))[0]
|
||||
pl = PltData(pD)
|
||||
pl.linePlot(self.path + self.getFileName() + "_" + filename, width=1024, text=text, xLegend="X", yLegend="Y", x=inputData, yTextPos=-0.0015, xTextPos=-10000)
|
||||
|
||||
def loadModel(self):
|
||||
file = "../nnModels/" + self.getFileName() + ".h5"
|
||||
if not Functions.checkFileExists(file):
|
||||
print("no model found in " + str(file))
|
||||
return
|
||||
self.model = load_model(file)
|
||||
print("Model loaded from " + str(file))
|
||||
|
||||
def saveModel(self):
|
||||
self.model.save("../nnModels/" + str(self.getFileName()) + ".h5")
|
||||
|
||||
def printVec(self, loop, weight, input):
|
||||
if self.fileForVectors == None:
|
||||
self.fileForVectors = open(self.path + self.getFileName() + ".txt", "w")
|
||||
self.fileForVectors.write("numberOfLoop \t weights \t " +self.featureReduction + " result\n")
|
||||
self.fileForVectors.write(str(loop) + " \t " + repr(weight) +" \t " + repr(input) + " \n")
|
||||
|
||||
|
||||
def getFileName(self, numberLoops=None):
|
||||
if numberLoops is None:
|
||||
numberLoops = self.numberLoops
|
||||
fileName ="nOL_" + str(len(self.activationFunctions)) + \
|
||||
self.addedLayers + "_nLoops_" + str(numberLoops) + "_fR_" +self.featureReduction
|
||||
if self.fitByHillClimber:
|
||||
fileName += "_standardDeviation_" +str(self.standardDeviation) +\
|
||||
"_numberOtRandomShots_"+str(self.numberOtRandomShots)
|
||||
if self.checkNewWeightsIsReallyBetter:
|
||||
fileName += "_checkNewWeightsIsReallyBetter"
|
||||
return fileName
|
||||
|
||||
def getDescription(self):
|
||||
text = "Loops: " + str(self.numberLoops) + \
|
||||
"\nLayers:" + self.getLayerText() +\
|
||||
"\nOptimizer: Adadelta" + \
|
||||
"\nFeature Reduction: " + self.featureReduction
|
||||
if self.fitByHillClimber:
|
||||
text += "\nStandard Deviation: " +str(self.standardDeviation) +\
|
||||
"\nNumber Of Random Shots: " + str(self.numberOtRandomShots) +\
|
||||
"\nMinimaler Fehler: " +str(self.minFailure) +" bei Loop: " + str(self.minFailureLoop)+\
|
||||
"\ncheckNewWeightsIsReallyBetter:" + str(self.checkNewWeightsIsReallyBetter)
|
||||
|
||||
return text
|
||||
|
||||
def getLayerText(self):
|
||||
text = ""
|
||||
i = 0
|
||||
for l in self.activationFunctions:
|
||||
text += "\n" + str(i+1) + " Layer " + l + " " + str(self.numberOfNeurons[i+1]) + " Neuronen"
|
||||
i += 1
|
||||
return text
|
||||
|
175
related/EP/src/PltData.py
Normal file
175
related/EP/src/PltData.py
Normal file
@ -0,0 +1,175 @@
|
||||
import matplotlib
|
||||
matplotlib.use('Agg')
|
||||
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
import networkx as nx
|
||||
|
||||
class PltData:
|
||||
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
|
||||
|
||||
def linePlot(self, fileName, x = None, legend=[], text="", yLegend="Mittle quadratische Fehler", xLegend="Anzahl der Loops",plotText = True, dest="images",
|
||||
height=800, multiFigs=False, pltDiff=True, width=None, xTextPos = 0, yTextPos = 0):
|
||||
myDbi = 96
|
||||
dim = len(self.data.shape)
|
||||
if width == None:
|
||||
try:
|
||||
size = len(self.data[0]) * 4.5 / myDbi
|
||||
except TypeError:
|
||||
size = len(self.data)* 4.5 /myDbi
|
||||
else:
|
||||
size = width/myDbi
|
||||
if size > 2^16:
|
||||
size = 2^16
|
||||
|
||||
height = height/myDbi
|
||||
plt.figure(figsize=(size, height))
|
||||
|
||||
if dim == 2:
|
||||
i = 0
|
||||
for row in self.data:
|
||||
if len(legend) > 0:
|
||||
label = legend[i]
|
||||
else:
|
||||
label = ""
|
||||
if multiFigs:
|
||||
if i == 0:
|
||||
f, axarr = plt.subplots(len(self.data), sharex=True, sharey=True)
|
||||
f.set_figheight(height)
|
||||
f.set_figwidth(size)
|
||||
f.text(0.04, 0.5, yLegend, va='center', rotation='vertical')
|
||||
axarr[i].plot(row)
|
||||
axarr[i].grid(True)
|
||||
axarr[i].set_ylabel(label)
|
||||
|
||||
else:
|
||||
if x is None:
|
||||
plt.plot(row, label=label)
|
||||
else:
|
||||
plt.plot(x, row, label=label)
|
||||
i += 1
|
||||
if pltDiff:
|
||||
plt.plot(np.subtract(self.data[0].astype(float), self.data[1].astype(float)), label="Differenz")
|
||||
else:
|
||||
if x is None:
|
||||
plt.plot(self.data)
|
||||
else:
|
||||
plt.plot(x,self.data)
|
||||
|
||||
plt.legend()
|
||||
if not multiFigs:
|
||||
plt.ylabel(yLegend)
|
||||
plt.xlabel(xLegend)
|
||||
if plotText:
|
||||
plt.text(0+xTextPos, np.amax(self.data)+yTextPos, text)
|
||||
plt.grid(True)
|
||||
plt.savefig("../"+ dest + "/" + fileName+ ".png", bbox_inches='tight')
|
||||
plt.close()
|
||||
|
||||
def plotNNModel(self, data, fileName, dest="images"):
|
||||
data, pos = self.getModelData(data)
|
||||
Gp = nx.Graph()
|
||||
Gp.add_weighted_edges_from(data)
|
||||
plt.figure(figsize=(15, 15))
|
||||
nx.draw(Gp, pos=pos)
|
||||
nx.draw_networkx_labels(Gp, pos=pos)
|
||||
nx.draw_networkx_edge_labels(Gp, pos=pos)
|
||||
plt.savefig("../" + dest + "/" + fileName, bbox_inches='tight')
|
||||
plt.close()
|
||||
|
||||
def plotPoints(self, data, labels, filename, xlabel = ""):
|
||||
plt.figure(figsize=(1600/96, 5))
|
||||
i = 0
|
||||
dots = ["ro", "go", "bo","yo"]
|
||||
for row in data:
|
||||
plt.plot(row, [i] * (len(row)), dots[i], label=labels[i])
|
||||
i+=1
|
||||
plt.legend()
|
||||
plt.xlabel(xlabel)
|
||||
plt.grid(True)
|
||||
|
||||
plt.savefig(filename, bbox_inches='tight')
|
||||
plt.close()
|
||||
|
||||
def getModelData(self, data):
|
||||
pos = {}
|
||||
xRange, yRange = self.getPositionRanges(data)
|
||||
layer = 1000
|
||||
layerSteps = 1000
|
||||
modelData = []
|
||||
firstLayer = True
|
||||
z = 0
|
||||
r = 0
|
||||
while z < len(data)-1:
|
||||
x = data[z]
|
||||
if firstLayer: nextNodeNumber = 0
|
||||
nodeNumber = 0
|
||||
if isinstance(x[0], np.ndarray):
|
||||
xPos = xRange[r]
|
||||
xPosNext = xRange[r+1]
|
||||
r += 1
|
||||
if firstLayer:
|
||||
yKor = int(self.getLenOfFirstLayer(x)% len(yRange)/2)
|
||||
else:
|
||||
yKor = int((len(data[z+1])% len(yRange))/2)
|
||||
if len(data) > z+3:
|
||||
yKorNext = int(len(yRange)/(len(data[z + 3])) / 2)
|
||||
for array in x:
|
||||
if not firstLayer: nextNodeNumber = 0
|
||||
for value in array:
|
||||
modelData.append((layer+nodeNumber, layer+layerSteps+nextNodeNumber, value))
|
||||
try:
|
||||
yPos = yRange[nodeNumber + yKor]
|
||||
except IndexError:
|
||||
yPos = yRange[nodeNumber]
|
||||
if layer+nodeNumber not in pos:
|
||||
pos[layer+nodeNumber] = np.array([xPos, yPos])
|
||||
if layer+layerSteps+nextNodeNumber not in pos:
|
||||
pos[layer+layerSteps+nextNodeNumber] = np.array([xPosNext, yRange[nextNodeNumber * yKorNext]])
|
||||
if firstLayer:
|
||||
nodeNumber += 1
|
||||
else:
|
||||
nextNodeNumber += 1
|
||||
if not firstLayer:
|
||||
nodeNumber += 1
|
||||
else:
|
||||
nextNodeNumber += 1
|
||||
|
||||
layer += layerSteps
|
||||
z += 1
|
||||
firstLayer = False
|
||||
return modelData, pos
|
||||
|
||||
def getPositionRanges(self, data):
|
||||
nOLayers = len(data)/2+1
|
||||
myMax = self.getLenOfFirstLayer(data[0])
|
||||
for x in data:
|
||||
if isinstance(x[0], np.float32):
|
||||
if len(x) > myMax:
|
||||
myMax = len(x)
|
||||
xRange = np.arange(-1, 1.1, (2 / (nOLayers - 1)))
|
||||
yRange = np.arange(-1, 1.1, (2 / (myMax - 1)))
|
||||
return xRange, yRange
|
||||
|
||||
def getLenOfFirstLayer(self, data):
|
||||
y = 0
|
||||
for x in data:
|
||||
y += len(x)
|
||||
return y
|
||||
'''
|
||||
for x in data:
|
||||
nextNodeNumber = 0
|
||||
nodeNumber = 0
|
||||
if isinstance(x[0], np.ndarray):
|
||||
for array in x:
|
||||
for value in array:
|
||||
modelData.append((layer+nodeNumber, layer+layerSteps+ nextNodeNumber, value))
|
||||
|
||||
nodeNumber += 1
|
||||
|
||||
nextNodeNumber += 1
|
||||
layer += layerSteps
|
||||
'''
|
65
related/EP/src/evalSomething.py
Normal file
65
related/EP/src/evalSomething.py
Normal file
@ -0,0 +1,65 @@
|
||||
import matplotlib
|
||||
matplotlib.use('Agg')
|
||||
import datetime
|
||||
from keras.utils import plot_model
|
||||
|
||||
import copy
|
||||
import numpy as np
|
||||
import decimal
|
||||
|
||||
try:
|
||||
from src.PltData import PltData
|
||||
from src.Functions import Functions
|
||||
from src.NeuralNetwork import NeuralNetwork
|
||||
from src.FeatureReduction import FeatureReduction
|
||||
except ImportError:
|
||||
from PltData import PltData
|
||||
from NeuralNetwork import NeuralNetwork
|
||||
from FeatureReduction import FeatureReduction
|
||||
import Functions
|
||||
|
||||
def getRangeAroundNumber(myNumber, rangeWidth):
|
||||
'''
|
||||
Gibt einen Zahlen Bereich rund um myNumber aus
|
||||
:param myNumber:
|
||||
:return:
|
||||
'''
|
||||
try:
|
||||
myNumber = myNumber.real
|
||||
except TypeError:
|
||||
myNumber = myNumber
|
||||
d = decimal.Decimal(myNumber.real)
|
||||
ex = 3
|
||||
print(myNumber*(10**ex))
|
||||
start = myNumber*(10**ex)-rangeWidth
|
||||
stop = myNumber * (10**ex) + rangeWidth
|
||||
data = np.arange(start, stop, 1)
|
||||
data = data / (10**ex)
|
||||
return data
|
||||
|
||||
def evalSomething(numberOfNeurons, activationFunctions, featureReduction,
|
||||
numberLoops, loss):
|
||||
|
||||
nn = NeuralNetwork(numberOfNeurons, activationFunctions, featureReduction,
|
||||
numberLoops, loss, printVectors=False)
|
||||
nn.addLayers()
|
||||
nn.loadModel()
|
||||
weights = nn.model.get_weights()
|
||||
data = np.array([nn.featureReductionFunction.calc(weights, nn.numberOfNeurons[0])])
|
||||
fp = data[0][0]
|
||||
#data = getRangeAroundNumber(fp, 30)
|
||||
data = np.arange(-10000, 10000, 1)
|
||||
start = min(data)
|
||||
stop = max(data)
|
||||
step = abs((start-stop)/len(data))
|
||||
text = nn.getDescription() +"\nFixpunkt: "+ str(fp)
|
||||
nn.evaluate(data, str(start)+"_"+str(stop)+"_"+str(step), text= text + "\nStart: " +str(start) +"\nStop "+ str(stop) + "\nStep "+ str(step))
|
||||
|
||||
v = np.array([1,2,3,24])
|
||||
for i in v:
|
||||
evalSomething(numberOfNeurons=[1, i, 1], activationFunctions=["sigmoid", "linear"], featureReduction='rfft',
|
||||
numberLoops=100000, loss='mean_squared_error')
|
||||
i-=1
|
||||
|
||||
|
||||
|
3089
related/EP/src/testSomething.py
Normal file
3089
related/EP/src/testSomething.py
Normal file
File diff suppressed because one or more lines are too long
BIN
related/EP/src/threshold.png
Normal file
BIN
related/EP/src/threshold.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 21 KiB |
49
related/EP/test/TestFeatureReduction.py
Normal file
49
related/EP/test/TestFeatureReduction.py
Normal file
@ -0,0 +1,49 @@
|
||||
import unittest
|
||||
import numpy as np
|
||||
|
||||
from src.FeatureReduction import FeatureReduction
|
||||
|
||||
class TestFeatureReduction(unittest.TestCase):
|
||||
|
||||
def testfft(self):
|
||||
data = np.array([1,2,3,4])
|
||||
d = FeatureReduction("mean").mean(FeatureReduction('mean').shuffelVec(data,4),2)
|
||||
print(d)
|
||||
|
||||
def testVecMean(self):
|
||||
data = np.array([1,2,3,4,5,6,7,8,9])
|
||||
d = FeatureReduction("mean").mean(data, 1)
|
||||
self.assertEqual(np.array([45/9]),d)
|
||||
|
||||
d = FeatureReduction("mean").mean(data, 2)
|
||||
np.testing.assert_array_equal(np.array([round(12.5/4.5,6), round(32.5/4.5,6)]), d)
|
||||
|
||||
d = FeatureReduction("mean").mean(data, 3)
|
||||
np.testing.assert_array_equal(np.array([2, 5, 8]), d)
|
||||
|
||||
d = FeatureReduction("mean").mean(data, 4)
|
||||
np.testing.assert_array_equal(np.array([round(3.75/2.25,6), round(8.75/2.25,6), round(13.75 / 2.25,6), round(18.75/2.25,6)]), d)
|
||||
|
||||
d = FeatureReduction("mean").mean(data, 5)
|
||||
np.testing.assert_array_equal(np.array([round(2.6 / 1.8,6), round(5.8 / 1.8,6), round(9 / 1.8,6),
|
||||
round(12.2 / 1.8,6), round(15.4/1.8,6)]), d)
|
||||
|
||||
d = FeatureReduction("mean").mean(data, 6)
|
||||
np.testing.assert_array_equal(np.array([round(2 / 1.5,6), round(4 / 1.5,6), round(6.5 / 1.5,6),
|
||||
round(8.5 / 1.5,6), round(11/1.5,6),round(13/1.5,6)]), d)
|
||||
|
||||
d = FeatureReduction("mean").mean(data, 9)
|
||||
np.testing.assert_array_equal(np.array([1,2,3,4,5,6,7,8,9]), d)
|
||||
|
||||
def testWeigthsToVec(self):
|
||||
test =np.array([[ 0.04457645, -0.03319572]], dtype=np.float32), np.array([ 0., 0.], dtype=np.float32), np.array([[-0.03747094],
|
||||
[ 0.01189486]], dtype=np.float32), np.array([ 0.], dtype=np.float32)
|
||||
FeatureReduction("mean").calc(test, 1)
|
||||
|
||||
def testShuffelVec(self):
|
||||
vec = np.array([1,2,3,4,5,6,7,8,9,10])
|
||||
print(FeatureReduction('mean').shuffelVec(vec,2))
|
||||
|
||||
def testPP(self):
|
||||
vec = np.array([1., 5., 3.])
|
||||
print(FeatureReduction('mean').calc(vec, 1))
|
29
related/EP/test/TestFunctions.py
Normal file
29
related/EP/test/TestFunctions.py
Normal file
@ -0,0 +1,29 @@
|
||||
import unittest
|
||||
import numpy as np
|
||||
|
||||
import src.Functions
|
||||
|
||||
class TestFunctions(unittest.TestCase):
|
||||
|
||||
def testcalcMeanSquaredError(self):
|
||||
a = np.array([1, 2, 3, 4, 5])
|
||||
b = np.array([1.1, 2.05, 2.95, 4.01, 4.5])
|
||||
self.assertEqual(0.05, src.Functions.calcMeanSquaredError(a, b))
|
||||
|
||||
a = np.array(['1', '2', '3', '4', '5'])
|
||||
b = np.array(['1.1', '2.05', '2.95', '4.01', '4.5'])
|
||||
self.assertEqual(0.05, src.Functions.calcMeanSquaredError(a, b))
|
||||
|
||||
def testGetRandomLayer(self):
|
||||
layer = (1, 3)
|
||||
self.assertEqual(layer, np.shape(src.Functions.getRandomLayer(layer)))
|
||||
layer = (3, 1)
|
||||
self.assertEqual(layer, np.shape(src.Functions.getRandomLayer(layer)))
|
||||
layer = (8, 2)
|
||||
self.assertEqual(layer, np.shape(src.Functions.getRandomLayer(layer)))
|
||||
layer = (100, 1)
|
||||
self.assertEqual(layer, np.shape(src.Functions.getRandomLayer(layer)))
|
||||
layer = (1, 1)
|
||||
self.assertEqual(layer, np.shape(src.Functions.getRandomLayer(layer)))
|
||||
layer = (4, 50)
|
||||
self.assertEqual(layer, np.shape(src.Functions.getRandomLayer(layer)))
|
41
related/EP/test/testPlotData.py
Normal file
41
related/EP/test/testPlotData.py
Normal file
@ -0,0 +1,41 @@
|
||||
import unittest
|
||||
import numpy as np
|
||||
|
||||
from src.PltData import PltData
|
||||
|
||||
class TestPlotData(unittest.TestCase):
|
||||
|
||||
def testPlotNNModel(self):
|
||||
#[2, 3, 5] Netz
|
||||
nn = np.array([[-0.00862074, -0.00609563], [ 0.03935056, 0.0159397 ]], dtype=np.float32),\
|
||||
np.array([ 0., 0.], dtype=np.float32),\
|
||||
np.array([[ 0.01351449, 0.04824072, 0.04954299], [ 0.04268739, -0.04188565, 0.03875775]], dtype=np.float32),\
|
||||
np.array([ 0., 0., 0.], dtype=np.float32),\
|
||||
np.array([[ 0.01074128, -0.00355459, 0.00787288, -0.02870593, -0.0204265 ], [ 0.01399798, -0.0096233 , 0.03152497, 0.03874204, -0.0466414 ], [ 0.04445429, -0.02976017, 0.00065653, -0.04210887, -0.02864893]], dtype=np.float32),\
|
||||
np.array([ 0., 0., 0., 0., 0.], dtype=np.float32)
|
||||
|
||||
#[2, 1, 2] Netz
|
||||
nn2 =np.array([[ 0.01390548, -0.01149112], [ 0.02786468, -0.02605006]], dtype=np.float32), \
|
||||
np.array([ 0., 0.], dtype=np.float32), \
|
||||
np.array([[-0.03265964],[ 0.013609 ]], dtype=np.float32), \
|
||||
np.array([ 0.], dtype=np.float32), \
|
||||
np.array([[ 0.02287653, 0.02650055]], dtype=np.float32), \
|
||||
np.array([ 0., 0.], dtype=np.float32)
|
||||
|
||||
#[4,2,2]
|
||||
nn3 = np.array([[ 0.03519103, -0.04059422, 0.04508766, -0.04067679], [ 0.01457861, 0.01178179, -0.01784203, 0.00051603], [-0.00807861, 0.01152407, 0.0136507 , 0.02639047], [ 0.04526602, -0.01604335, 0.00661949, 0.0434478 ]], dtype=np.float32), \
|
||||
np.array([ 0., 0., 0., 0.], dtype=np.float32),\
|
||||
np.array([[ 0.03728329, -0.01507163], [ 0.00789828, 0.0494065 ], [-0.00945786, -0.04301547], [-0.01999701, -0.01306728]], dtype=np.float32),\
|
||||
np.array([ 0., 0.], dtype=np.float32),\
|
||||
np.array([[-0.03051615, -0.03279487], [ 0.01100482, -0.02652025]], dtype=np.float32),\
|
||||
np.array([ 0., 0.], dtype=np.float32)
|
||||
|
||||
# [1, 1, 2] Netz
|
||||
nn4 = np.array([[0.01390548]], dtype=np.float32), \
|
||||
np.array([0.], dtype=np.float32), \
|
||||
np.array([[-0.03265964]], dtype=np.float32), \
|
||||
np.array([0.], dtype=np.float32), \
|
||||
np.array([[0.02287653, 0.02650055]], dtype=np.float32), \
|
||||
np.array([0., 0.], dtype=np.float32)
|
||||
|
||||
PltData(None).plotNNModel(nn3, "test.png")
|
Loading…
x
Reference in New Issue
Block a user