added full pipeline

This commit is contained in:
Markus Friedrich 2020-06-26 16:29:24 +02:00
parent 2a7a236b89
commit fc88c687ae
2 changed files with 77 additions and 41 deletions

View File

@ -20,10 +20,16 @@ from ml_lib.utils.model_io import SavedLightningModels
# Datasets
from datasets.shapenet import ShapeNetPartSegDataset
from models import PointNet2
from utils.pointcloud import cluster_cubes, append_onehotencoded_type, label2color
from utils.project_settings import GlobalVar
from utils.pointcloud import cluster_cubes, append_onehotencoded_type, label2color, hierarchical_clustering, \
write_clusters, cluster2Color, cluster_dbscan
from utils.project_settings import GlobalVar, DataClass
class DisplayMode(DataClass):
Clusters = 0,
Types = 1,
Nothing = 2
def restore_logger_and_model(log_dir):
model = SavedLightningModels.load_checkpoint(models_root_path=log_dir, model=PointNet2, n=-1)
model = model.restore()
@ -54,39 +60,89 @@ def predict_prim_type(input_pc, model):
if __name__ == '__main__':
# input_pc_path = Path('data') / 'pc' / 'test.xyz'
display_mode = DisplayMode.Clusters
grid_cluster_max_pts = 3000#8192
grid_clusters = [3,3,3]
type_cluster_eps = 0.1
type_cluster_min_pts = 50
model_path = Path('output') / 'PN2' / 'PN_14628b734c5b651b013ad9e36c406934' / 'version_0'
# config_filename = 'config.ini'
# config = ThisConfig()
# config.read_file((Path(model_path) / config_filename).open('r'))
model_path = Path('output') / 'PN2' / 'PN_9843bf499399786cfd58fe79fa1b3db8' / 'version_0'
loaded_model = restore_logger_and_model(model_path)
loaded_model.eval()
#input_pc = read_pointcloud(input_pc_path, ' ', False)
# input_pc = normalize_pointcloud(input_pc)
# TEST DATASET
transforms = Compose([NormalizeScale(), ])
test_dataset = ShapeNetPartSegDataset('data', mode=GlobalVar.data_split.predict, collate_per_segment=False,
refresh=True, transform=transforms)
grid_clusters = cluster_cubes(test_dataset[0], [1, 1, 1], max_points_per_cluster=8192)
grid_clusters = cluster_cubes(test_dataset[0], grid_clusters, max_points_per_cluster=grid_cluster_max_pts)
ps.init()
# ========================== Grid Clustering ==========================
grid_cluster_pcs = []
for i, grid_cluster_pc in enumerate(grid_clusters):
print("Cluster pointcloud size: {}".format(grid_cluster_pc.shape[0]))
pc_with_prim_type = predict_prim_type(grid_cluster_pc, loaded_model)
# pc_with_prim_type = polytopes_to_planes(pc_with_prim_type)
# Re-Map Primitive type for 1-hot-encoding.
pc_with_prim_type[:, 6][pc_with_prim_type[:, 6] == 0.0] = 0.0 # Sphere
pc_with_prim_type[:, 6][pc_with_prim_type[:, 6] == 1.0] = 1.0 # Cylinder
pc_with_prim_type[:, 6][pc_with_prim_type[:, 6] == 3.0] = 2.0 # Box
pc_with_prim_type[:, 6][pc_with_prim_type[:, 6] == 4.0] = 2.0 # Polytope
pc_with_prim_type[:, 6][pc_with_prim_type[:, 6] == 6.0] = 2.0 # Plane
pc_with_prim_type = append_onehotencoded_type(pc_with_prim_type)
pc = ps.register_point_cloud("points_" + str(i), pc_with_prim_type[:, :3], radius=0.01)
pc.add_color_quantity("prim types", label2color(pc_with_prim_type[:, 6].astype(np.int64)), True)
grid_cluster_pcs.append(pc_with_prim_type)
# Merge grid cluster pcs together
final_pc = np.concatenate(grid_cluster_pcs)
# ========================== DBSCAN Clustering ==========================
print("DB Scan on point cloud " + str(final_pc.shape))
total_clusters = []
clusters = cluster_dbscan(final_pc, [0, 1, 2, 3, 4, 5], eps=type_cluster_eps,
min_samples=type_cluster_min_pts)
print("Pre-clustering done. Clusters: ", len(clusters))
for cluster in clusters:
print("2nd level clustering ..")
prim_types_in_cluster = len(np.unique(cluster[:, 6], axis=0))
if prim_types_in_cluster == 1:
print("No need for 2nd level clustering since there is only a single primitive type in the cluster.")
total_clusters.append(cluster)
else:
sub_clusters = cluster_dbscan(cluster, [0, 1, 2, 7, 8, 9], eps=type_cluster_eps,
min_samples=type_cluster_min_pts)
print("Sub clusters: ", len(sub_clusters))
total_clusters.extend(sub_clusters)
result_clusters = list(filter(lambda c: c.shape[0] > type_cluster_min_pts, total_clusters))
for cluster in result_clusters:
print("Cluster: ", cluster.shape[0])
write_clusters('clusters.txt', result_clusters, 6)
# ========================== Result visualization ==========================
if display_mode == DisplayMode.Types:
pc = ps.register_point_cloud("points_" + str(i), final_pc[:, :3], radius=0.01)
pc.add_color_quantity("prim types", label2color(final_pc[:, 6].astype(np.int64)), True)
elif display_mode == DisplayMode.Clusters:
for i, result_cluster in enumerate(result_clusters):
pc = ps.register_point_cloud("points_" + str(i), result_cluster[:, :3], radius=0.01)
pc.add_color_quantity("prim types", cluster2Color(result_cluster,i), True)
ps.show()

View File

@ -1,7 +1,7 @@
import numpy as np
from sklearn.cluster import DBSCAN
import open3d as o3d
#import open3d as o3d
from pyod.models.lof import LOF
@ -52,6 +52,8 @@ def label2color(labels):
num = labels.shape[0]
colors = np.zeros((num, 3))
print(labels)
minl, maxl = np.min(labels), np.max(labels)
for l in range(minl, maxl + 1):
colors[labels == l, :] = mini_color_table(l)
@ -189,19 +191,6 @@ def cluster_dbscan(data, selected_indices, eps, min_samples=5, metric='euclidean
return npClusters
def draw_clusters(clusters):
clouds = []
for cluster_idx, cluster in enumerate(clusters):
cloud = o3d.PointCloud()
cloud.points = o3d.Vector3dVector(cluster[:, :3])
cloud.colors = o3d.Vector3dVector(cluster2Color(cluster, cluster_idx))
clouds.append(cloud)
o3d.draw_geometries(clouds)
def write_clusters(path, clusters, type_column=6):
file = open(path, "w")
file.write(str(len(clusters)) + "\n")
@ -216,7 +205,7 @@ def write_clusters(path, clusters, type_column=6):
return 2
elif t == 1:
return 1
elif t == 3:
elif t == 2:
return 4
return t
@ -228,15 +217,6 @@ def write_clusters(path, clusters, type_column=6):
np.savetxt(file, cluster[:, :6], header=str(len(cluster)) + ' ' + str(6), comments='')
def draw_sample_data(sample_data, colored_normals=False):
cloud = o3d.PointCloud()
cloud.points = o3d.Vector3dVector(sample_data[:, :3])
cloud.colors = \
o3d.Vector3dVector(label2color(sample_data[:, 6].astype(int)) if not colored_normals else sample_data[:, 3:6])
o3d.draw_geometries([cloud])
def normalize_pointcloud(pc, factor=1.0):
max = pc.max(axis=0)
min = pc.min(axis=0)
@ -284,7 +264,7 @@ def split_outliers(pc, columns):
def append_onehotencoded_type(data, factor=1.0):
types = data[:, 6].astype(int)
res = np.zeros((len(types), 8))
res = np.zeros((len(types), 3))
res[np.arange(len(types)), types] = factor
return np.column_stack((data, res))