added type detection pipeline
This commit is contained in:
parent
821b2d1961
commit
1f7bfe7765
94
main_pipeline.py
Normal file
94
main_pipeline.py
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from torch_geometric.data import Data
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
import polyscope as ps
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from torch.utils.data import DataLoader
|
||||||
|
|
||||||
|
# Dataset and Dataloaders
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Transforms
|
||||||
|
from ml_lib.point_toolset.point_io import BatchToData
|
||||||
|
from ml_lib.utils.model_io import SavedLightningModels
|
||||||
|
|
||||||
|
|
||||||
|
# Datasets
|
||||||
|
from datasets.full_pointclouds import FullCloudsDataset
|
||||||
|
from utils.pointcloud import read_pointcloud, normalize_pointcloud, cluster_cubes, append_onehotencoded_type, \
|
||||||
|
label2color, polytopes_to_planes
|
||||||
|
from utils.project_config import GlobalVar, ThisConfig
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_dataloader(config_obj):
|
||||||
|
dataset = FullCloudsDataset(config_obj.data.root, split=GlobalVar.data_split.test,
|
||||||
|
setting=GlobalVar.settings[config_obj.model.type])
|
||||||
|
# noinspection PyTypeChecker
|
||||||
|
return DataLoader(dataset, batch_size=config_obj.train.batch_size,
|
||||||
|
num_workers=config_obj.data.worker, shuffle=False)
|
||||||
|
|
||||||
|
|
||||||
|
def restore_logger_and_model(log_dir):
|
||||||
|
model = SavedLightningModels.load_checkpoint(models_root_path=log_dir, n=-1)
|
||||||
|
model = model.restore()
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
model.cuda()
|
||||||
|
else:
|
||||||
|
model.cpu()
|
||||||
|
return model
|
||||||
|
|
||||||
|
def predict_prim_type(input_pc, model):
|
||||||
|
|
||||||
|
input_data = (
|
||||||
|
torch.tensor(np.array([input_pc[:, 3:6]], np.float)),
|
||||||
|
torch.tensor(input_pc[:, 0:3]),
|
||||||
|
np.zeros(input_pc.shape[0])
|
||||||
|
)
|
||||||
|
|
||||||
|
batch_to_data = BatchToData()
|
||||||
|
|
||||||
|
data = batch_to_data(input_data[0], input_data[1], input_data[2])
|
||||||
|
y = loaded_model(data.to(device='cuda' if torch.cuda.is_available() else 'cpu'))
|
||||||
|
y_primary = torch.argmax(y.main_out, dim=-1).squeeze().cpu().numpy()
|
||||||
|
|
||||||
|
return np.concatenate((input_pc, y_primary.reshape(-1,1)), axis=1)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
|
||||||
|
input_pc_path = 'data/pc/pc.txt'
|
||||||
|
|
||||||
|
model_path = Path('trained_models/version_1')
|
||||||
|
config_filename = 'config.ini'
|
||||||
|
config = ThisConfig()
|
||||||
|
config.read_file((Path(model_path) / config_filename).open('r'))
|
||||||
|
loaded_model = restore_logger_and_model(model_path)
|
||||||
|
loaded_model.eval()
|
||||||
|
|
||||||
|
input_pc = read_pointcloud(input_pc_path, ' ', False)
|
||||||
|
|
||||||
|
input_pc = normalize_pointcloud(input_pc)
|
||||||
|
|
||||||
|
grid_clusters = cluster_cubes(input_pc, [1,1,1], 2048)
|
||||||
|
|
||||||
|
ps.init()
|
||||||
|
|
||||||
|
for i,grid_cluster_pc in enumerate(grid_clusters):
|
||||||
|
|
||||||
|
print("Cluster pointcloud size: {}".format(grid_cluster_pc.shape[0]))
|
||||||
|
|
||||||
|
pc_with_prim_type = predict_prim_type(grid_cluster_pc, loaded_model)
|
||||||
|
|
||||||
|
#pc_with_prim_type = polytopes_to_planes(pc_with_prim_type)
|
||||||
|
|
||||||
|
pc_with_prim_type = append_onehotencoded_type(pc_with_prim_type)
|
||||||
|
|
||||||
|
pc = ps.register_point_cloud("points_" + str(i), pc_with_prim_type[:, :3], radius=0.01)
|
||||||
|
pc.add_color_quantity("prim types", label2color(pc_with_prim_type[:, 6].astype(np.int64)), True)
|
||||||
|
|
||||||
|
ps.show()
|
||||||
|
|
||||||
|
print('Done')
|
91
requirements.txt
Normal file
91
requirements.txt
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
absl-py==0.9.0
|
||||||
|
attrs==19.3.0
|
||||||
|
bravado==10.6.2
|
||||||
|
bravado-core==5.17.0
|
||||||
|
cachetools==4.1.0
|
||||||
|
certifi==2020.4.5.2
|
||||||
|
chardet==3.0.4
|
||||||
|
click==7.1.2
|
||||||
|
combo==0.1.0
|
||||||
|
cycler==0.10.0
|
||||||
|
decorator==4.4.2
|
||||||
|
future==0.18.2
|
||||||
|
gitdb==4.0.5
|
||||||
|
GitPython==3.1.3
|
||||||
|
google-auth==1.16.1
|
||||||
|
google-auth-oauthlib==0.4.1
|
||||||
|
googledrivedownloader==0.4
|
||||||
|
grpcio==1.29.0
|
||||||
|
h5py==2.10.0
|
||||||
|
idna==2.9
|
||||||
|
imageio==2.8.0
|
||||||
|
importlib-metadata==1.6.1
|
||||||
|
isodate==0.6.0
|
||||||
|
joblib==0.15.1
|
||||||
|
jsonpointer==2.0
|
||||||
|
jsonref==0.2
|
||||||
|
jsonschema==3.2.0
|
||||||
|
kiwisolver==1.2.0
|
||||||
|
llvmlite==0.32.1
|
||||||
|
Markdown==3.2.2
|
||||||
|
matplotlib==3.2.1
|
||||||
|
monotonic==1.5
|
||||||
|
msgpack==1.0.0
|
||||||
|
msgpack-python==0.5.6
|
||||||
|
natsort==7.0.1
|
||||||
|
neptune-client==0.4.109
|
||||||
|
networkx==2.4
|
||||||
|
numba==0.49.1
|
||||||
|
numpy==1.18.5
|
||||||
|
oauthlib==3.1.0
|
||||||
|
pandas==1.0.4
|
||||||
|
Pillow==7.1.2
|
||||||
|
plyfile==0.7.2
|
||||||
|
polyscope==0.1.2
|
||||||
|
protobuf==3.12.2
|
||||||
|
py3nvml==0.2.6
|
||||||
|
pyasn1==0.4.8
|
||||||
|
pyasn1-modules==0.2.8
|
||||||
|
PyJWT==1.7.1
|
||||||
|
pyod==0.8.0
|
||||||
|
pyparsing==2.4.7
|
||||||
|
pyrsistent==0.16.0
|
||||||
|
python-dateutil==2.8.1
|
||||||
|
pytorch-lightning==0.7.6
|
||||||
|
pytz==2020.1
|
||||||
|
PyWavelets==1.1.1
|
||||||
|
PyYAML==5.3.1
|
||||||
|
rdflib==5.0.0
|
||||||
|
requests==2.23.0
|
||||||
|
requests-oauthlib==1.3.0
|
||||||
|
rfc3987==1.3.8
|
||||||
|
rsa==4.0
|
||||||
|
scikit-image==0.17.2
|
||||||
|
scikit-learn==0.23.1
|
||||||
|
scipy==1.4.1
|
||||||
|
simplejson==3.17.0
|
||||||
|
six==1.15.0
|
||||||
|
smmap==3.0.4
|
||||||
|
strict-rfc3339==0.7
|
||||||
|
suod==0.0.4
|
||||||
|
swagger-spec-validator==2.7.0
|
||||||
|
tensorboard==2.2.2
|
||||||
|
tensorboard-plugin-wit==1.6.0.post3
|
||||||
|
test-tube==0.7.5
|
||||||
|
threadpoolctl==2.1.0
|
||||||
|
tifffile==2020.6.3
|
||||||
|
torch==1.4.0+cpu
|
||||||
|
torch-cluster==1.5.4
|
||||||
|
torch-geometric==1.4.3
|
||||||
|
torch-scatter==2.0.4
|
||||||
|
torch-sparse==0.6.1
|
||||||
|
torchcontrib==0.0.2
|
||||||
|
torchvision==0.5.0
|
||||||
|
tqdm==4.45.0
|
||||||
|
typing-extensions==3.7.4.2
|
||||||
|
urllib3==1.25.9
|
||||||
|
webcolors==1.11.1
|
||||||
|
websocket-client==0.57.0
|
||||||
|
Werkzeug==1.0.1
|
||||||
|
xmltodict==0.12.0
|
||||||
|
zipp==3.1.0
|
282
utils/pointcloud.py
Normal file
282
utils/pointcloud.py
Normal file
@ -0,0 +1,282 @@
|
|||||||
|
import numpy as np
|
||||||
|
from sklearn.cluster import DBSCAN
|
||||||
|
|
||||||
|
from pyod.models.knn import KNN
|
||||||
|
from pyod.models.sod import SOD
|
||||||
|
from pyod.models.abod import ABOD
|
||||||
|
from pyod.models.sos import SOS
|
||||||
|
from pyod.models.pca import PCA
|
||||||
|
from pyod.models.ocsvm import OCSVM
|
||||||
|
from pyod.models.mcd import MCD
|
||||||
|
from pyod.models.lof import LOF
|
||||||
|
from pyod.models.cof import COF
|
||||||
|
from pyod.models.cblof import CBLOF
|
||||||
|
from pyod.models.loci import LOCI
|
||||||
|
from pyod.models.hbos import HBOS
|
||||||
|
from pyod.models.lscp import LSCP
|
||||||
|
from pyod.models.feature_bagging import FeatureBagging
|
||||||
|
|
||||||
|
from utils.project_config import Classes
|
||||||
|
|
||||||
|
|
||||||
|
def polytopes_to_planes(pc):
|
||||||
|
pc[(pc[:, 6] == float(Classes.Box)) | (pc[:, 6] == float(Classes.Polytope)), 6] = float(Classes.Plane);
|
||||||
|
return pc
|
||||||
|
|
||||||
|
|
||||||
|
def mini_color_table(index, norm=True):
|
||||||
|
colors = [
|
||||||
|
[0.,0.,0.],
|
||||||
|
[0.5000, 0.5400, 0.5300], [0.8900, 0.1500, 0.2100], [0.6400, 0.5800, 0.5000],
|
||||||
|
[1.0000, 0.3800, 0.0100], [1.0000, 0.6600, 0.1400], [0.4980, 1.0000, 0.0000],
|
||||||
|
[0.4980, 1.0000, 0.8314], [0.9412, 0.9725, 1.0000], [0.5412, 0.1686, 0.8863],
|
||||||
|
[0.5765, 0.4392, 0.8588], [0.3600, 0.1400, 0.4300], [0.5600, 0.3700, 0.6000],
|
||||||
|
]
|
||||||
|
|
||||||
|
color = colors[index % len(colors)]
|
||||||
|
|
||||||
|
if not norm:
|
||||||
|
color[0] *= 255
|
||||||
|
color[1] *= 255
|
||||||
|
color[2] *= 255
|
||||||
|
|
||||||
|
return color
|
||||||
|
|
||||||
|
|
||||||
|
def cluster2Color(cluster, cluster_idx):
|
||||||
|
colors = np.zeros(shape=(len(cluster), 3))
|
||||||
|
point_idx = 0
|
||||||
|
for point in cluster:
|
||||||
|
colors[point_idx, :] = mini_color_table(cluster_idx)
|
||||||
|
point_idx += 1
|
||||||
|
|
||||||
|
return colors
|
||||||
|
|
||||||
|
|
||||||
|
def label2color(labels):
|
||||||
|
'''
|
||||||
|
labels: np.ndarray with shape (n, )
|
||||||
|
colors(return): np.ndarray with shape (n, 3)
|
||||||
|
'''
|
||||||
|
num = labels.shape[0]
|
||||||
|
colors = np.zeros((num, 3))
|
||||||
|
|
||||||
|
minl, maxl = np.min(labels), np.max(labels)
|
||||||
|
for l in range(minl, maxl + 1):
|
||||||
|
colors[labels == l, :] = mini_color_table(l)
|
||||||
|
|
||||||
|
return colors
|
||||||
|
|
||||||
|
|
||||||
|
def read_pointcloud(path, delimiter=' ', hasHeader=True):
|
||||||
|
with open(path, 'r') as f:
|
||||||
|
if hasHeader:
|
||||||
|
# Get rid of the Header
|
||||||
|
_ = f.readline()
|
||||||
|
# This iterates over all lines, splits them and converts values to floats. Will fail on wrong values.
|
||||||
|
pc = [[float(x) for x in line.rstrip().split(delimiter)] for line in f if line != '']
|
||||||
|
|
||||||
|
return np.asarray(pc)[:, :6]
|
||||||
|
|
||||||
|
|
||||||
|
def write_pointcloud(file, pc, numCols=6):
|
||||||
|
np.savetxt(file, pc[:, :numCols], header=str(len(pc)) + ' ' + str(numCols), comments='')
|
||||||
|
|
||||||
|
|
||||||
|
def farthest_point_sampling(pts, K):
|
||||||
|
if pts.shape[0] < K:
|
||||||
|
return pts
|
||||||
|
|
||||||
|
def calc_distances(p0, points):
|
||||||
|
return ((p0[:3] - points[:, :3]) ** 2).sum(axis=1)
|
||||||
|
|
||||||
|
farthest_pts = np.zeros((K, pts.shape[1]))
|
||||||
|
farthest_pts[0] = pts[np.random.randint(len(pts))]
|
||||||
|
distances = calc_distances(farthest_pts[0], pts)
|
||||||
|
for i in range(1, K):
|
||||||
|
farthest_pts[i] = pts[np.argmax(distances)]
|
||||||
|
distances = np.minimum(distances, calc_distances(farthest_pts[i], pts))
|
||||||
|
|
||||||
|
return farthest_pts
|
||||||
|
|
||||||
|
|
||||||
|
def cluster_per_column(pc, column):
|
||||||
|
clusters = []
|
||||||
|
for i in range(0, int(np.max(pc[:, column]))):
|
||||||
|
cluster_pc = pc[pc[:, column] == i, :]
|
||||||
|
clusters.append(cluster_pc)
|
||||||
|
|
||||||
|
return clusters
|
||||||
|
|
||||||
|
|
||||||
|
def cluster_cubes(data, cluster_dims, max_points_per_cluster=-1, min_points_per_cluster=-1):
|
||||||
|
if cluster_dims[0] is 1 and cluster_dims[1] is 1 and cluster_dims[2] is 1:
|
||||||
|
print("no need to cluster.")
|
||||||
|
return [farthest_point_sampling(data, max_points_per_cluster)]
|
||||||
|
|
||||||
|
max = data[:, :3].max(axis=0)
|
||||||
|
max += max * 0.01
|
||||||
|
|
||||||
|
min = data[:, :3].min(axis=0)
|
||||||
|
min -= min * 0.01
|
||||||
|
|
||||||
|
size = (max - min)
|
||||||
|
|
||||||
|
clusters = {}
|
||||||
|
|
||||||
|
cluster_size = size / np.array(cluster_dims, dtype=np.float32)
|
||||||
|
|
||||||
|
print('Min: ' + str(min) + ' Max: ' + str(max))
|
||||||
|
print('Cluster Size: ' + str(cluster_size))
|
||||||
|
|
||||||
|
for row in data:
|
||||||
|
# print('Row: ' + str(row))
|
||||||
|
|
||||||
|
cluster_pos = ((row[:3] - min) / cluster_size).astype(int)
|
||||||
|
cluster_idx = cluster_dims[0] * cluster_dims[2] * cluster_pos[1] + cluster_dims[0] * cluster_pos[2] + \
|
||||||
|
cluster_pos[0]
|
||||||
|
clusters.setdefault(cluster_idx, []).append(row)
|
||||||
|
|
||||||
|
# Apply farthest point sampling to each cluster
|
||||||
|
final_clusters = []
|
||||||
|
for key, cluster in clusters.items():
|
||||||
|
c = np.vstack(cluster)
|
||||||
|
if c.shape[0] < min_points_per_cluster and -1 is not min_points_per_cluster:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if max_points_per_cluster is not -1:
|
||||||
|
final_clusters.append(farthest_point_sampling(c, max_points_per_cluster))
|
||||||
|
else:
|
||||||
|
final_clusters.append(c)
|
||||||
|
|
||||||
|
return final_clusters
|
||||||
|
|
||||||
|
|
||||||
|
def cluster_dbscan(data, selected_indices, eps, min_samples=5, metric='euclidean', algo='auto'):
|
||||||
|
# print('Clustering. Min Samples: ' + str(min_samples) + ' EPS: ' + str(eps) + "Selected Indices: " + str(selected_indices))
|
||||||
|
|
||||||
|
# 0,1,2 : pos
|
||||||
|
# 3,4,5 : normal
|
||||||
|
# 6: type index
|
||||||
|
# 7,8,9,10: type index one hot encoded
|
||||||
|
# 11,12: normal as angles
|
||||||
|
|
||||||
|
db_res = DBSCAN(eps=eps, metric=metric, n_jobs=-1, min_samples=min_samples, algorithm=algo).fit(
|
||||||
|
data[:, selected_indices])
|
||||||
|
|
||||||
|
labels = db_res.labels_
|
||||||
|
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
|
||||||
|
n_noise = list(labels).count(-1)
|
||||||
|
# print("Noise: " + str(n_noise) + " Clusters: " + str(n_clusters))
|
||||||
|
|
||||||
|
clusters = {}
|
||||||
|
for idx, l in enumerate(labels):
|
||||||
|
if l is -1:
|
||||||
|
continue
|
||||||
|
clusters.setdefault(str(l), []).append(data[idx, :])
|
||||||
|
|
||||||
|
npClusters = []
|
||||||
|
for cluster in clusters.values():
|
||||||
|
npClusters.append(np.array(cluster))
|
||||||
|
|
||||||
|
return npClusters
|
||||||
|
|
||||||
|
|
||||||
|
def draw_clusters(clusters):
|
||||||
|
clouds = []
|
||||||
|
|
||||||
|
for cluster_idx, cluster in enumerate(clusters):
|
||||||
|
cloud = o3d.PointCloud()
|
||||||
|
cloud.points = o3d.Vector3dVector(cluster[:, :3])
|
||||||
|
cloud.colors = o3d.Vector3dVector(cluster2Color(cluster, cluster_idx))
|
||||||
|
clouds.append(cloud)
|
||||||
|
|
||||||
|
o3d.draw_geometries(clouds)
|
||||||
|
|
||||||
|
|
||||||
|
def write_clusters(path, clusters, type_column=6):
|
||||||
|
file = open(path, "w")
|
||||||
|
file.write(str(len(clusters)) + "\n")
|
||||||
|
|
||||||
|
for cluster in clusters:
|
||||||
|
# print("Types: ", cluster[:, type_column])
|
||||||
|
|
||||||
|
types = np.unique(cluster[:, type_column], axis=0).astype(int)
|
||||||
|
|
||||||
|
def type_mapping(t):
|
||||||
|
if t == 0:
|
||||||
|
return 2
|
||||||
|
elif t == 1:
|
||||||
|
return 1
|
||||||
|
elif t == 3:
|
||||||
|
return 4
|
||||||
|
return t
|
||||||
|
|
||||||
|
types = np.array([type_mapping(t) for t in types])
|
||||||
|
print("Types: {}, Points: {}".format(types, cluster.shape[0]))
|
||||||
|
# draw_sample_data(cluster)
|
||||||
|
|
||||||
|
np.savetxt(file, types.reshape(1, types.shape[0]), delimiter=';', header='', comments='', fmt='%i')
|
||||||
|
np.savetxt(file, cluster[:, :6], header=str(len(cluster)) + ' ' + str(6), comments='')
|
||||||
|
|
||||||
|
|
||||||
|
def draw_sample_data(sample_data, colored_normals=False):
|
||||||
|
cloud = o3d.PointCloud()
|
||||||
|
cloud.points = o3d.Vector3dVector(sample_data[:, :3])
|
||||||
|
cloud.colors = \
|
||||||
|
o3d.Vector3dVector(label2color(sample_data[:, 6].astype(int)) if not colored_normals else sample_data[:, 3:6])
|
||||||
|
|
||||||
|
o3d.draw_geometries([cloud])
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_pointcloud(pc, factor=1.0):
|
||||||
|
max = pc.max(axis=0)
|
||||||
|
min = pc.min(axis=0)
|
||||||
|
|
||||||
|
f = np.max([abs(max[0] - min[0]), abs(max[1] - min[1]), abs(max[2] - min[2])])
|
||||||
|
|
||||||
|
pc[:, 0:3] /= (f * factor)
|
||||||
|
pc[:, 3:6] /= (np.linalg.norm(pc[:, 3:6], ord=2, axis=1, keepdims=True))
|
||||||
|
|
||||||
|
return pc
|
||||||
|
|
||||||
|
|
||||||
|
def hierarchical_clustering(data, selected_indices_0, selected_indices_1, eps, min_samples=5, metric='euclidean',
|
||||||
|
algo='auto'):
|
||||||
|
total_clusters = []
|
||||||
|
|
||||||
|
clusters = cluster_dbscan(data, selected_indices_0, eps, min_samples, metric=metric, algo=algo)
|
||||||
|
|
||||||
|
for cluster in clusters:
|
||||||
|
# cluster = normalize_pointcloud(cluster)
|
||||||
|
sub_clusters = cluster_dbscan(cluster, selected_indices_1, eps, min_samples, metric=metric, algo=algo)
|
||||||
|
total_clusters.extend(sub_clusters)
|
||||||
|
|
||||||
|
return total_clusters
|
||||||
|
|
||||||
|
|
||||||
|
def filter_clusters(clusters, filter):
|
||||||
|
filtered_clusters = []
|
||||||
|
|
||||||
|
for c in clusters:
|
||||||
|
if filter(c):
|
||||||
|
filtered_clusters.append(c)
|
||||||
|
|
||||||
|
return filtered_clusters
|
||||||
|
|
||||||
|
|
||||||
|
def split_outliers(pc, columns):
|
||||||
|
clf = LOF() # FeatureBagging() # detector_list=[LOF(), KNN()]
|
||||||
|
clf.fit(pc[:, columns])
|
||||||
|
|
||||||
|
# LOF, kNN
|
||||||
|
|
||||||
|
return pc[clf.labels_ == 0], pc[clf.labels_ == 1]
|
||||||
|
|
||||||
|
|
||||||
|
def append_onehotencoded_type(data, factor=1.0):
|
||||||
|
types = data[:, 6].astype(int)
|
||||||
|
res = np.zeros((len(types), 8))
|
||||||
|
res[np.arange(len(types)), types] = factor
|
||||||
|
|
||||||
|
return np.column_stack((data, res))
|
Loading…
x
Reference in New Issue
Block a user