Automatic Route Importer
Function for loading colored routes from a file. This way, a user can define his own paths without a manual coordinate input.
This commit is contained in:
90
basicTraining.py
Normal file
90
basicTraining.py
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
from keras.layers import TimeDistributed, Dense, LSTM, MaxPooling1D, Convolution1D, UpSampling2D, RepeatVector, \
|
||||||
|
MaxPooling2D, Convolution2D, Deconvolution2D, ConvLSTM2D, AveragePooling3D, Flatten, Reshape, ZeroPadding2D, \
|
||||||
|
ZeroPadding1D, Dropout
|
||||||
|
from keras.models import Sequential
|
||||||
|
|
||||||
|
from keras.callbacks import TensorBoard
|
||||||
|
from keras import backend as K
|
||||||
|
from temporalPooling import TemporalMaxPooling
|
||||||
|
from keras.utils import plot_model
|
||||||
|
|
||||||
|
|
||||||
|
import time
|
||||||
|
import numpy as np
|
||||||
|
import pickle
|
||||||
|
from math import sqrt
|
||||||
|
|
||||||
|
# from tools import TrackCollection
|
||||||
|
|
||||||
|
|
||||||
|
def get_batch(X, size):
|
||||||
|
a = np.random.choice(len(X), size, replace=False)
|
||||||
|
return X[a]
|
||||||
|
|
||||||
|
|
||||||
|
def load_preprocesseed_data(filename):
|
||||||
|
if not filename.endswith('.pik'):
|
||||||
|
raise TypeError('input File needs to be a Pickle object ".pik"!')
|
||||||
|
with open(filename, 'rb') as f:
|
||||||
|
data = pickle.load(f)
|
||||||
|
return data
|
||||||
|
|
||||||
|
# Autoencoder From:
|
||||||
|
# https://github.com/BinRoot/TensorFlow-Book/blob/master/ch07_autoencoder/Concept01_autoencoder.ipynb
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
'''HERE IS THE TRAINING!!!!!'''
|
||||||
|
trackCollection = load_preprocesseed_data('test_track.pik')
|
||||||
|
dataArray = trackCollection.as_n_sample_4D(5).astype(int)
|
||||||
|
# [nb_samples, nb_frames, width, height, channels] # if using dim_ordering = 'tensorflow'
|
||||||
|
# [nb_samples, nb_frames, channels, width, height] # if using dim_ordering = 'theano'
|
||||||
|
dataArray = dataArray[..., None]
|
||||||
|
samples, timestep, w, h, c = dataArray.shape
|
||||||
|
classes = 10
|
||||||
|
K.set_image_dim_ordering('tf')
|
||||||
|
seq = Sequential()
|
||||||
|
seq.add(TimeDistributed(Convolution2D(activation='relu', filters=classes, kernel_size=(3, 3), strides=1), input_shape=(timestep, w, h, c)))
|
||||||
|
seq.add(TimeDistributed(MaxPooling2D(pool_size=2, strides=2)))
|
||||||
|
seq.add(TimeDistributed(Convolution2D(activation='relu', filters=classes, kernel_size=(3, 3), strides=1)))
|
||||||
|
seq.add(TimeDistributed(MaxPooling2D(pool_size=2, strides=2)))
|
||||||
|
seq.summary()
|
||||||
|
time.sleep(1)
|
||||||
|
seq.add(TimeDistributed(Convolution2D(activation='relu', filters=classes, kernel_size=(3, 3), strides=1)))
|
||||||
|
# seq.add(TimeDistributed(Dropout(0.2)))
|
||||||
|
seq.add(TimeDistributed(MaxPooling2D(pool_size=2, strides=2)))
|
||||||
|
seq.add(TimeDistributed(Flatten()))
|
||||||
|
seq.add(LSTM(int(seq.layers[-1].output_shape[-1]), return_sequences=False)) # ,recurrent_dropout=0.2, dropout=0.2))
|
||||||
|
seq.add(Dense(int(seq.layers[-1].output_shape[-1]), activation=None))
|
||||||
|
seq.add(Dense(classes, activation='softmax'))
|
||||||
|
seq.add(Dense(int(seq.layers[-2].output_shape[-1]), activation=None))
|
||||||
|
# seq.summary()
|
||||||
|
# time.sleep(1)
|
||||||
|
seq.add(RepeatVector(timestep))
|
||||||
|
seq.add(LSTM(seq.layers[-1].output_shape[-1], return_sequences=True)) # , recurrent_dropout=0.2, dropout=0.2))
|
||||||
|
reValue = int(sqrt(seq.layers[-1].output_shape[-1]//classes))
|
||||||
|
seq.add(TimeDistributed(Reshape((reValue, reValue, classes))))
|
||||||
|
seq.add(TimeDistributed(UpSampling2D(2)))
|
||||||
|
# seq.add(TimeDistributed(Dropout(0.2)))
|
||||||
|
seq.add(TimeDistributed(Deconvolution2D(activation='relu', filters=classes, kernel_size=(3,3), strides=1)))
|
||||||
|
seq.add(TimeDistributed(UpSampling2D(2)))
|
||||||
|
seq.add(TimeDistributed(Deconvolution2D(activation='relu', filters=classes//2, kernel_size=(3,3), strides=1)))
|
||||||
|
seq.add(TimeDistributed(UpSampling2D(2)))
|
||||||
|
seq.add(TimeDistributed(Deconvolution2D(activation='relu', filters=1, kernel_size=(3,3), strides=1)))
|
||||||
|
|
||||||
|
seq.compile(loss='binary_crossentropy', optimizer='adagrad') # adadelta
|
||||||
|
seq.summary()
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
bCallBack = TensorBoard(log_dir='./logdir', histogram_freq=10, write_graph=True, write_images=True)
|
||||||
|
seq.fit(dataArray, dataArray, batch_size=500, epochs=100, callbacks=[bCallBack])
|
||||||
|
|
||||||
|
# reconstructed = seq.predict(dataArray)
|
||||||
|
# seq.save('TDConv_LSTM_D_LSTM_TDConv')
|
||||||
|
# tc2 = load_preprocesseed_data('test_track.pik')
|
||||||
|
# testArrays = tc2.as_n_sample_4D(5)
|
||||||
|
# testArrays = testArrays[..., None]
|
||||||
|
|
||||||
|
# get_3rd_layer_output = K.function([seq.layers[0].input],
|
||||||
|
# [seq.layers[3].output])
|
||||||
|
# layer_output = get_3rd_layer_output([])[0]
|
||||||
136
gumble10.py
Normal file
136
gumble10.py
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
from keras.layers import (Input, TimeDistributed, Dense, LSTM, UpSampling2D, RepeatVector, MaxPooling2D,
|
||||||
|
Convolution2D, Deconvolution2D, Flatten, Reshape, Lambda)
|
||||||
|
|
||||||
|
from keras.models import Model, Sequential
|
||||||
|
|
||||||
|
from keras import backend as K
|
||||||
|
|
||||||
|
from keras.metrics import binary_crossentropy
|
||||||
|
from keras.activations import softmax
|
||||||
|
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
import pickle
|
||||||
|
from math import sqrt
|
||||||
|
|
||||||
|
from Trainer import Trainer
|
||||||
|
|
||||||
|
|
||||||
|
def get_batch(X, size):
|
||||||
|
a = np.random.choice(len(X), size, replace=False)
|
||||||
|
return X[a]
|
||||||
|
|
||||||
|
|
||||||
|
def load_preprocesseed_data(filename):
|
||||||
|
if not filename.endswith('.pik'):
|
||||||
|
raise TypeError('input File needs to be a Pickle object ".pik"!')
|
||||||
|
with open(filename, 'rb') as f:
|
||||||
|
data = pickle.load(f)
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
K.set_image_dim_ordering('tf')
|
||||||
|
'''HERE IS THE TRAINING!!!!!'''
|
||||||
|
# Paper From https://github.com/nzw0301/keras-examples/blob/master/gumbel_softmax_vae_MNIST.ipynb
|
||||||
|
# https://arxiv.org/pdf/1611.01144.pdf
|
||||||
|
|
||||||
|
# Data PreProcessing, keep the Batchsize Shmall because of Small memory 500 Should do, rerun the fitting!
|
||||||
|
trackCollection = load_preprocesseed_data('test_track.pik')
|
||||||
|
T = Trainer('gumble', trackCollection, 2, 5)
|
||||||
|
|
||||||
|
# PreStage 1: Encoder Input
|
||||||
|
enc_input = Input(shape=(T.timesteps, T.width, T.height, 1), name='main_input')
|
||||||
|
|
||||||
|
# Stage 1: Encoding
|
||||||
|
enc_seq = Sequential(name='Encoder')
|
||||||
|
enc_seq.add(TimeDistributed(Convolution2D(activation='relu', filters=T.filters,
|
||||||
|
kernel_size=(3, 3), strides=1), name='Conv1',
|
||||||
|
input_shape=(T.timesteps, T.width, T.height, 1)))
|
||||||
|
enc_seq.add(TimeDistributed(MaxPooling2D(pool_size=2, strides=2), name='MaxPool1'))
|
||||||
|
|
||||||
|
enc_seq.add(TimeDistributed(Convolution2D(activation='relu', filters=T.filters,
|
||||||
|
kernel_size=(5, 5), strides=1),
|
||||||
|
name='Conv2'))
|
||||||
|
enc_seq.add(TimeDistributed(MaxPooling2D(pool_size=2, strides=2), name='MaxPool2'))
|
||||||
|
|
||||||
|
enc_seq.add(TimeDistributed(Flatten(), name='Flatten'))
|
||||||
|
enc_seq.add(LSTM(int(enc_seq.layers[-1].output_shape[-1]), return_sequences=False, name='LSTM_Encode'))
|
||||||
|
|
||||||
|
encoding = enc_seq(enc_input)
|
||||||
|
|
||||||
|
# Stage 2: Bottleneck
|
||||||
|
logits_y = Dense(T.classes * T.cD)(encoding) # activation='softmax' ICh denke nicht
|
||||||
|
|
||||||
|
# Sampling Function
|
||||||
|
def sampling(logits):
|
||||||
|
U = K.random_uniform(K.shape(logits), 0, 1)
|
||||||
|
y = logits - K.log(-K.log(U + 1e-20) + 1e-20) # logits + gumbel noise
|
||||||
|
y = softmax(K.reshape(y, (-1, T.cD, T.classes)) / T.tau)
|
||||||
|
y = K.reshape(y, (-1, T.cD * T.classes))
|
||||||
|
return y
|
||||||
|
|
||||||
|
z = Lambda(sampling,)(logits_y)
|
||||||
|
|
||||||
|
# Stage 3: Decoding
|
||||||
|
dec_seq = Sequential(name='Decoder')
|
||||||
|
|
||||||
|
dec_seq.add(RepeatVector(T.timesteps, name='TimeRepeater', input_shape=(T.classes * T.cD,)))
|
||||||
|
dec_seq.add(LSTM(enc_seq.layers[-1].output_shape[-1], return_sequences=True, name='LSTM_Decode'))
|
||||||
|
|
||||||
|
reValue = int(sqrt(dec_seq.layers[-1].output_shape[-1]//T.filters))
|
||||||
|
|
||||||
|
dec_seq.add(TimeDistributed(Reshape((reValue, reValue, T.filters)), name='ReShape'))
|
||||||
|
|
||||||
|
dec_seq.add(TimeDistributed(UpSampling2D(2), name='Up1'))
|
||||||
|
dec_seq.add(TimeDistributed(Deconvolution2D(activation='relu', filters=T.filters,
|
||||||
|
kernel_size=(4, 4), strides=1), name='DeConv1'))
|
||||||
|
dec_seq.add(TimeDistributed(UpSampling2D(2), name='Up2'))
|
||||||
|
dec_seq.add(TimeDistributed(Deconvolution2D(activation='hard_sigmoid', filters=1, kernel_size=(5, 5), strides=1),
|
||||||
|
name='DeConv2'))
|
||||||
|
|
||||||
|
dec_output = dec_seq(z)
|
||||||
|
|
||||||
|
# Gumble Loss Function
|
||||||
|
def gumbel_loss(x, x_hat):
|
||||||
|
# N = T.cD; M = T.classes
|
||||||
|
q_y = K.reshape(logits_y, (-1, T.cD, T.classes))
|
||||||
|
q_y = softmax(q_y)
|
||||||
|
log_q_y = K.log(q_y + 1e-20)
|
||||||
|
kl_tmp = q_y * (log_q_y - K.log(1.0 / T.classes))
|
||||||
|
KL = K.sum(kl_tmp, axis=(1, 2))
|
||||||
|
x = K.reshape(x, (-1, T.original_dim)) # !
|
||||||
|
x_hat = K.reshape(x_hat, (-1, T.original_dim)) # !
|
||||||
|
elbo = T.original_dim * binary_crossentropy(x, x_hat) - KL
|
||||||
|
return elbo
|
||||||
|
|
||||||
|
T.set_model(Model(inputs=enc_input, outputs=dec_output), gumbel_loss, optimizer='adagrad')
|
||||||
|
|
||||||
|
# Generatorfrom latent to input space
|
||||||
|
decoder_input = Input(shape=(T.classes * T.cD,))
|
||||||
|
decoder_output = dec_seq(decoder_input)
|
||||||
|
T.set_generator(Model(inputs=decoder_input, outputs=decoder_output))
|
||||||
|
|
||||||
|
# Separate encoder from input to latent space
|
||||||
|
argmax_y = K.max(K.reshape(logits_y, (-1, T.cD, T.classes)), axis=-1, keepdims=True)
|
||||||
|
argmax_y = K.equal(K.reshape(logits_y, (-1, T.cD, T.classes)), argmax_y)
|
||||||
|
encoder = K.function([enc_input], [argmax_y])
|
||||||
|
T.set_encoder(encoder)
|
||||||
|
|
||||||
|
if True:
|
||||||
|
T.load_weights('Gumble10Weights')
|
||||||
|
T.train('Gumble10Weights')
|
||||||
|
T.save_weights('Gumble10Weights')
|
||||||
|
if False:
|
||||||
|
T.load_weights('Gumble10Weights')
|
||||||
|
if False:
|
||||||
|
T.plot_model('Gumble10.png', show_shapes=True, show_layer_names=True)
|
||||||
|
if False:
|
||||||
|
# T.color_track(trackCollection[list(trackCollection.keys())[2200]], nClusters=4) # 2600
|
||||||
|
T.color_random_track(completeSequence=False, nClusters=4)
|
||||||
|
if True:
|
||||||
|
T.show_prediction(200)
|
||||||
|
if False:
|
||||||
|
T.sample_latent(200)
|
||||||
|
if True:
|
||||||
|
T.multi_path_coloring(10)
|
||||||
@@ -12,6 +12,9 @@ from tools import Worker, Isovist, TrackCollection, IndoorToolset # , Track, Is
|
|||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
walkableTiles = 255
|
walkableTiles = 255
|
||||||
|
trackColor = 103
|
||||||
|
startColor = 210
|
||||||
|
endColor = 79
|
||||||
w = Worker()
|
w = Worker()
|
||||||
# file = 'maps\Tate.bmp'
|
# file = 'maps\Tate.bmp'
|
||||||
# file = 'maps\Map.bmp'
|
# file = 'maps\Map.bmp'
|
||||||
@@ -20,9 +23,9 @@ if __name__ == '__main__':
|
|||||||
# file = 'maps\doom.bmp'
|
# file = 'maps\doom.bmp'
|
||||||
# file = 'maps\priz.bmp'
|
# file = 'maps\priz.bmp'
|
||||||
# file = 'maps\\tum.bmp'
|
# file = 'maps\\tum.bmp'
|
||||||
file = 'maps\crossing.bmp'
|
file = 'maps\\x.bmp'
|
||||||
with Image.open(file) as f:
|
with Image.open(file) as f:
|
||||||
baseToolset = IndoorToolset(np.array(f), walkableTiles, worker=w, isoVistSize=30)
|
baseToolset = IndoorToolset(np.array(f), walkableTiles, worker=w)
|
||||||
|
|
||||||
baseToolset.refresh_random_clock()
|
baseToolset.refresh_random_clock()
|
||||||
|
|
||||||
@@ -87,21 +90,31 @@ if __name__ == '__main__':
|
|||||||
|
|
||||||
"""Synthesyse n-bunch Tracks/ minimum length / storage for TensorFlow"""
|
"""Synthesyse n-bunch Tracks/ minimum length / storage for TensorFlow"""
|
||||||
# Some Explenation text here
|
# Some Explenation text here
|
||||||
if True:
|
if False:
|
||||||
pCol = TrackCollection(baseToolset)
|
pCol = TrackCollection(baseToolset)
|
||||||
pCol.add_single_track((67, 103), (67, 64), qhull=False)
|
pCol.add_single_track((67, 103), (67, 64), qhull=False)
|
||||||
pCol.add_single_track((47, 82), (88, 82), qhull=False)
|
pCol.add_single_track((47, 82), (88, 82), qhull=False)
|
||||||
#pCol.add_n_bunch_random(
|
# pCol.add_n_bunch_random(
|
||||||
# 1000, penalty=None, safe=False,
|
# 1000, penalty=None, safe=False,
|
||||||
# minLen=50
|
# minLen=50
|
||||||
# # minLen=int(sqrt(baseToolset.width * baseToolset.height) / 4)
|
# # minLen=int(sqrt(baseToolset.width * baseToolset.height) / 4)
|
||||||
#)
|
# )
|
||||||
|
|
||||||
pCol.save_to_disc('crossing')
|
pCol.save_to_disc('crossing')
|
||||||
|
|
||||||
# pCol3 = TrackCollection(baseToolset)
|
# pCol3 = TrackCollection(baseToolset)
|
||||||
# pCol3.recover_from_disc('synthetic_tracks_sizeDIV4')
|
# pCol3.recover_from_disc('synthetic_tracks_sizeDIV4')
|
||||||
|
|
||||||
|
"""Read Tracks from colored Bitmap"""
|
||||||
|
if True:
|
||||||
|
pCol = TrackCollection(baseToolset)
|
||||||
|
pCol.read_from_basemap(startColor, trackColor, endColor)
|
||||||
|
baseToolset.isovists.set_rangeLimit(30)
|
||||||
|
baseToolset.isovists.add_for_trackCollection(pCol)
|
||||||
|
print(len(pCol))
|
||||||
|
pCol.save_to_disc('x')
|
||||||
|
|
||||||
|
pass
|
||||||
print('Success!')
|
print('Success!')
|
||||||
pass
|
pass
|
||||||
pass
|
pass
|
||||||
|
|||||||
94
tools.py
94
tools.py
@@ -19,7 +19,8 @@ from PCHA import PCHA
|
|||||||
from operator import itemgetter
|
from operator import itemgetter
|
||||||
from dtw import dtw
|
from dtw import dtw
|
||||||
|
|
||||||
workercount = 4
|
workercount = 1
|
||||||
|
|
||||||
|
|
||||||
class Worker(object):
|
class Worker(object):
|
||||||
def __init__(self, n=workercount):
|
def __init__(self, n=workercount):
|
||||||
@@ -61,7 +62,7 @@ class Worker(object):
|
|||||||
|
|
||||||
|
|
||||||
class IsovistCollection(UserDict):
|
class IsovistCollection(UserDict):
|
||||||
def __init__(self, walkable, rangeLimit, tileArray, worker=None):
|
def __init__(self, walkable, rangeLimit, tileArray, worker=None, single_threaded=False):
|
||||||
super(IsovistCollection, self).__init__()
|
super(IsovistCollection, self).__init__()
|
||||||
if not isinstance(worker, Worker):
|
if not isinstance(worker, Worker):
|
||||||
raise TypeError
|
raise TypeError
|
||||||
@@ -70,16 +71,18 @@ class IsovistCollection(UserDict):
|
|||||||
self.tileArray = tileArray
|
self.tileArray = tileArray
|
||||||
self.rangeLimit = rangeLimit
|
self.rangeLimit = rangeLimit
|
||||||
self.lfr = None
|
self.lfr = None
|
||||||
if isinstance(self.tileArray, np.ndarray):
|
if rangeLimit:
|
||||||
workerResult = worker.init_many(
|
if not single_threaded:
|
||||||
Isovist, [(*npIdx, self.tileArray, self.walkable, self.rangeLimit)
|
workerResult = worker.init_many(
|
||||||
for npIdx, value in np.ndenumerate(self.tileArray) if value == self.walkable])
|
Isovist, [(*npIdx, self.tileArray, self.walkable, self.rangeLimit)
|
||||||
self.data = {isovist.vertex: isovist for isovist in workerResult}
|
for npIdx, value in np.ndenumerate(self.tileArray) if value == self.walkable])
|
||||||
|
self.data = {isovist.vertex: isovist for isovist in workerResult}
|
||||||
# The following would be a non multithreaded approach, maybe activate it for smaller blueprints later
|
# The following would be a non multithreaded approach, maybe activate it for smaller blueprints later
|
||||||
# TODO: Activate this for smaller Blueprints, when multithreading would lead to overhead
|
# TODO: Activate this for smaller Blueprints, when multithreading would lead to overhead
|
||||||
# for ndIndex, value in np.ndenumerate(self.tileArray):
|
else:
|
||||||
# if value == self.walkable:
|
for ndIndex, value in np.ndenumerate(self.tileArray):
|
||||||
# self.addIsovist(*ndIndex)
|
if value == self.walkable or value > 0:
|
||||||
|
self.add_isovist(*ndIndex)
|
||||||
else:
|
else:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -191,6 +194,21 @@ class IsovistCollection(UserDict):
|
|||||||
|
|
||||||
# [nb_samples, nb_frames, width, height, channels]
|
# [nb_samples, nb_frames, width, height, channels]
|
||||||
|
|
||||||
|
def set_rangeLimit(self, n):
|
||||||
|
if isinstance(n, int):
|
||||||
|
self.rangeLimit = n
|
||||||
|
else:
|
||||||
|
raise TypeError('n needs to be an integer!')
|
||||||
|
|
||||||
|
def add_for_trackCollection(self, trackCollection):
|
||||||
|
if isinstance(self.tileArray, np.ndarray) and self.rangeLimit:
|
||||||
|
for key in trackCollection.keys():
|
||||||
|
for i in range(len(trackCollection[key])):
|
||||||
|
self.add_isovist(*trackCollection[key][i])
|
||||||
|
else:
|
||||||
|
raise ValueError('Please provide a valid basemap array and a rangeLimit >= 1)')
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
class Isovist(object):
|
class Isovist(object):
|
||||||
def __init__(self, x, y, array, walkable, rangeLimit):
|
def __init__(self, x, y, array, walkable, rangeLimit):
|
||||||
@@ -218,7 +236,8 @@ class Isovist(object):
|
|||||||
self.x = x
|
self.x = x
|
||||||
self.y = y
|
self.y = y
|
||||||
self.vertex = (self.x, self.y)
|
self.vertex = (self.x, self.y)
|
||||||
self.rangeLimit = rangeLimit if rangeLimit else int(sqrt(array.shape[0] * array.shape[1]))
|
if isinstance(array, np.ndarray):
|
||||||
|
self.rangeLimit = rangeLimit if rangeLimit else int(sqrt(array.shape[0] * array.shape[1]))
|
||||||
self.visArray = np.zeros(array.shape, dtype=bool)
|
self.visArray = np.zeros(array.shape, dtype=bool)
|
||||||
|
|
||||||
for octant in range(8):
|
for octant in range(8):
|
||||||
@@ -246,7 +265,7 @@ class Isovist(object):
|
|||||||
if x < 0 or y < 0:
|
if x < 0 or y < 0:
|
||||||
return True
|
return True
|
||||||
try:
|
try:
|
||||||
return False if array[x, y] == walkable else True
|
return False if array[x, y] == walkable or array[x, y] > 0 else True
|
||||||
except IndexError:
|
except IndexError:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@@ -385,7 +404,7 @@ class TrackCollection(UserDict):
|
|||||||
key = self.__find_list_middle(track)
|
key = self.__find_list_middle(track)
|
||||||
track.vertex = key
|
track.vertex = key
|
||||||
self[key] = track
|
self[key] = track
|
||||||
n = i
|
n = i + 1
|
||||||
|
|
||||||
else:
|
else:
|
||||||
singleSourceDij_S = nx.single_source_dijkstra_path(self.map.graph, start, weight='weight')
|
singleSourceDij_S = nx.single_source_dijkstra_path(self.map.graph, start, weight='weight')
|
||||||
@@ -650,6 +669,48 @@ class TrackCollection(UserDict):
|
|||||||
saveIMG = saveIMG if saveIMG.endswith('.tif') else '%s.tif' % saveIMG
|
saveIMG = saveIMG if saveIMG.endswith('.tif') else '%s.tif' % saveIMG
|
||||||
savefig(saveIMG)
|
savefig(saveIMG)
|
||||||
|
|
||||||
|
def read_from_basemap(self, startColor, trackColor, endColor):
|
||||||
|
|
||||||
|
def find_next_candidates(p):
|
||||||
|
positions = [(p[0]-1, p[1]-1),
|
||||||
|
(p[0], p[1]-1),
|
||||||
|
(p[0]+1, p[1]-1),
|
||||||
|
(p[0]-1, p[1]),
|
||||||
|
(p[0]+1, p[1]),
|
||||||
|
(p[0]-1, p[1]+1),
|
||||||
|
(p[0], p[1]+1),
|
||||||
|
(p[0]+1, p[1]+1)]
|
||||||
|
return [point for point in positions if self.map.imgArray[point] in [endColor, trackColor]]
|
||||||
|
|
||||||
|
startTiles, tracks = list(), list()
|
||||||
|
for idx, value in np.ndenumerate(self.map.imgArray):
|
||||||
|
if value == startColor:
|
||||||
|
startTiles.append(idx)
|
||||||
|
|
||||||
|
for startTile in startTiles:
|
||||||
|
currentTrack = list()
|
||||||
|
position = startTile
|
||||||
|
while self.map.imgArray[position] != endColor:
|
||||||
|
currentTrack.append(position)
|
||||||
|
c = find_next_candidates(position)
|
||||||
|
if len(c) == 1:
|
||||||
|
position = c[0]
|
||||||
|
elif len(c) == 2:
|
||||||
|
position = c[0] if c[0] not in currentTrack else c[1]
|
||||||
|
else:
|
||||||
|
raise ValueError('Something went wrong here, maybe no stop position?')
|
||||||
|
tracks.append(Track(currentTrack, self.map.walkableTile, qhull=False))
|
||||||
|
self.add_n_bunch_tracks(0, 0, 0, tracks)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
print('pass')
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class Track(UserList):
|
class Track(UserList):
|
||||||
def __init__(self, NodeList, walkableTile, qhull=True):
|
def __init__(self, NodeList, walkableTile, qhull=True):
|
||||||
@@ -751,7 +812,7 @@ class Track(UserList):
|
|||||||
|
|
||||||
|
|
||||||
class IndoorToolset(object):
|
class IndoorToolset(object):
|
||||||
def __init__(self, imageArray, walkableTile, graph=None, worker=None, isoVistSize=25):
|
def __init__(self, imageArray, walkableTile, graph=None, worker=None, isoVistSize=0):
|
||||||
"""
|
"""
|
||||||
:param graph: An optional Graph
|
:param graph: An optional Graph
|
||||||
:type graph: nx.Graph
|
:type graph: nx.Graph
|
||||||
@@ -771,6 +832,7 @@ class IndoorToolset(object):
|
|||||||
self.__rand = random.Random()
|
self.__rand = random.Random()
|
||||||
self.isovists = IsovistCollection(self.walkableTile, isoVistSize, self.imgArray, worker=worker)
|
self.isovists = IsovistCollection(self.walkableTile, isoVistSize, self.imgArray, worker=worker)
|
||||||
|
|
||||||
|
|
||||||
def refresh_random_clock(self):
|
def refresh_random_clock(self):
|
||||||
self.__rand.seed(time.clock())
|
self.__rand.seed(time.clock())
|
||||||
|
|
||||||
@@ -851,7 +913,7 @@ class IndoorToolset(object):
|
|||||||
def translate_to_graph(self):
|
def translate_to_graph(self):
|
||||||
graph = nx.Graph()
|
graph = nx.Graph()
|
||||||
for idx, value in np.ndenumerate(self.imgArray):
|
for idx, value in np.ndenumerate(self.imgArray):
|
||||||
if value == self.walkableTile:
|
if value > 0 or value == self.walkableTile:
|
||||||
x, y = idx
|
x, y = idx
|
||||||
graph.add_node((x, y), count=0)
|
graph.add_node((x, y), count=0)
|
||||||
|
|
||||||
@@ -940,6 +1002,8 @@ class IndoorToolset(object):
|
|||||||
return self.calculate_path(p, p2, penalty=penalty)
|
return self.calculate_path(p, p2, penalty=penalty)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Extraction Function - had to be static because of multiprocessing
|
# Extraction Function - had to be static because of multiprocessing
|
||||||
def extract_arch_attributes(track, shortestT, hClassList):
|
def extract_arch_attributes(track, shortestT, hClassList):
|
||||||
attributes = list()
|
attributes = list()
|
||||||
|
|||||||
Reference in New Issue
Block a user