Function for loading colored routes from a file. This way, a user can define his own paths without a manual coordinate input.
91 lines
4.0 KiB
Python
91 lines
4.0 KiB
Python
from keras.layers import TimeDistributed, Dense, LSTM, MaxPooling1D, Convolution1D, UpSampling2D, RepeatVector, \
|
|
MaxPooling2D, Convolution2D, Deconvolution2D, ConvLSTM2D, AveragePooling3D, Flatten, Reshape, ZeroPadding2D, \
|
|
ZeroPadding1D, Dropout
|
|
from keras.models import Sequential
|
|
|
|
from keras.callbacks import TensorBoard
|
|
from keras import backend as K
|
|
from temporalPooling import TemporalMaxPooling
|
|
from keras.utils import plot_model
|
|
|
|
|
|
import time
|
|
import numpy as np
|
|
import pickle
|
|
from math import sqrt
|
|
|
|
# from tools import TrackCollection
|
|
|
|
|
|
def get_batch(X, size):
|
|
a = np.random.choice(len(X), size, replace=False)
|
|
return X[a]
|
|
|
|
|
|
def load_preprocesseed_data(filename):
|
|
if not filename.endswith('.pik'):
|
|
raise TypeError('input File needs to be a Pickle object ".pik"!')
|
|
with open(filename, 'rb') as f:
|
|
data = pickle.load(f)
|
|
return data
|
|
|
|
# Autoencoder From:
|
|
# https://github.com/BinRoot/TensorFlow-Book/blob/master/ch07_autoencoder/Concept01_autoencoder.ipynb
|
|
|
|
|
|
if __name__ == '__main__':
|
|
'''HERE IS THE TRAINING!!!!!'''
|
|
trackCollection = load_preprocesseed_data('test_track.pik')
|
|
dataArray = trackCollection.as_n_sample_4D(5).astype(int)
|
|
# [nb_samples, nb_frames, width, height, channels] # if using dim_ordering = 'tensorflow'
|
|
# [nb_samples, nb_frames, channels, width, height] # if using dim_ordering = 'theano'
|
|
dataArray = dataArray[..., None]
|
|
samples, timestep, w, h, c = dataArray.shape
|
|
classes = 10
|
|
K.set_image_dim_ordering('tf')
|
|
seq = Sequential()
|
|
seq.add(TimeDistributed(Convolution2D(activation='relu', filters=classes, kernel_size=(3, 3), strides=1), input_shape=(timestep, w, h, c)))
|
|
seq.add(TimeDistributed(MaxPooling2D(pool_size=2, strides=2)))
|
|
seq.add(TimeDistributed(Convolution2D(activation='relu', filters=classes, kernel_size=(3, 3), strides=1)))
|
|
seq.add(TimeDistributed(MaxPooling2D(pool_size=2, strides=2)))
|
|
seq.summary()
|
|
time.sleep(1)
|
|
seq.add(TimeDistributed(Convolution2D(activation='relu', filters=classes, kernel_size=(3, 3), strides=1)))
|
|
# seq.add(TimeDistributed(Dropout(0.2)))
|
|
seq.add(TimeDistributed(MaxPooling2D(pool_size=2, strides=2)))
|
|
seq.add(TimeDistributed(Flatten()))
|
|
seq.add(LSTM(int(seq.layers[-1].output_shape[-1]), return_sequences=False)) # ,recurrent_dropout=0.2, dropout=0.2))
|
|
seq.add(Dense(int(seq.layers[-1].output_shape[-1]), activation=None))
|
|
seq.add(Dense(classes, activation='softmax'))
|
|
seq.add(Dense(int(seq.layers[-2].output_shape[-1]), activation=None))
|
|
# seq.summary()
|
|
# time.sleep(1)
|
|
seq.add(RepeatVector(timestep))
|
|
seq.add(LSTM(seq.layers[-1].output_shape[-1], return_sequences=True)) # , recurrent_dropout=0.2, dropout=0.2))
|
|
reValue = int(sqrt(seq.layers[-1].output_shape[-1]//classes))
|
|
seq.add(TimeDistributed(Reshape((reValue, reValue, classes))))
|
|
seq.add(TimeDistributed(UpSampling2D(2)))
|
|
# seq.add(TimeDistributed(Dropout(0.2)))
|
|
seq.add(TimeDistributed(Deconvolution2D(activation='relu', filters=classes, kernel_size=(3,3), strides=1)))
|
|
seq.add(TimeDistributed(UpSampling2D(2)))
|
|
seq.add(TimeDistributed(Deconvolution2D(activation='relu', filters=classes//2, kernel_size=(3,3), strides=1)))
|
|
seq.add(TimeDistributed(UpSampling2D(2)))
|
|
seq.add(TimeDistributed(Deconvolution2D(activation='relu', filters=1, kernel_size=(3,3), strides=1)))
|
|
|
|
seq.compile(loss='binary_crossentropy', optimizer='adagrad') # adadelta
|
|
seq.summary()
|
|
time.sleep(1)
|
|
|
|
bCallBack = TensorBoard(log_dir='./logdir', histogram_freq=10, write_graph=True, write_images=True)
|
|
seq.fit(dataArray, dataArray, batch_size=500, epochs=100, callbacks=[bCallBack])
|
|
|
|
# reconstructed = seq.predict(dataArray)
|
|
# seq.save('TDConv_LSTM_D_LSTM_TDConv')
|
|
# tc2 = load_preprocesseed_data('test_track.pik')
|
|
# testArrays = tc2.as_n_sample_4D(5)
|
|
# testArrays = testArrays[..., None]
|
|
|
|
# get_3rd_layer_output = K.function([seq.layers[0].input],
|
|
# [seq.layers[3].output])
|
|
# layer_output = get_3rd_layer_output([])[0]
|