This commit is contained in:
Markus Friedrich 2019-08-09 17:28:04 +02:00
parent f2cc070d04
commit e4cc447f68
2 changed files with 41 additions and 96 deletions

View File

@ -207,14 +207,15 @@ def normalize_pointcloud(pc, factor=1.0):
return pc
def hierarchical_clustering(data, selected_indices, eps, min_samples=5, metric='euclidean', algo='auto'):
def hierarchical_clustering(data, selected_indices_0, selected_indices_1, eps, min_samples=5, metric='euclidean', algo='auto'):
total_clusters = []
clusters = cluster_dbscan(data, selected_indices, eps, min_samples, metric=metric, algo=algo)
clusters = cluster_dbscan(data, selected_indices_0, eps, min_samples, metric=metric, algo=algo)
for cluster in clusters:
sub_clusters = cluster_dbscan(cluster, selected_indices, eps, min_samples, metric=metric, algo=algo)
# cluster = normalize_pointcloud(cluster)
sub_clusters = cluster_dbscan(cluster, selected_indices_1, eps, min_samples, metric=metric, algo=algo)
total_clusters.extend(sub_clusters)
return total_clusters

View File

@ -86,18 +86,6 @@ def clusterToColor(cluster, cluster_idx):
return colors
def normalize_pointcloud(pc):
max = pc.max(axis=0)
min = pc.min(axis=0)
f = np.max([abs(max[0] - min[0]), abs(max[1] - min[1]), abs(max[2] - min[2])])
pc[:, 0:3] /= f
pc[:, 3:6] /= (np.linalg.norm(pc[:, 3:6], ord=2, axis=1, keepdims=True))
return pc
def farthest_point_sampling(pts, K):
@ -140,43 +128,6 @@ def append_normal_angles(data):
return np.column_stack((data, res))
def extract_cube_clusters(data, cluster_dims, max_points_per_cluster, min_points_per_cluster):
max = data[:,:3].max(axis=0)
max += max * 0.01
min = data[:,:3].min(axis=0)
min -= min * 0.01
size = (max - min)
clusters = {}
cluster_size = size / np.array(cluster_dims, dtype=np.float32)
print('Min: ' + str(min) + ' Max: ' + str(max))
print('Cluster Size: ' + str(cluster_size))
for row in data:
# print('Row: ' + str(row))
cluster_pos = ((row[:3] - min) / cluster_size).astype(int)
cluster_idx = cluster_dims[0] * cluster_dims[2] * cluster_pos[1] + cluster_dims[0] * cluster_pos[2] + cluster_pos[0]
clusters.setdefault(cluster_idx, []).append(row)
# Apply farthest point sampling to each cluster
final_clusters = []
for key, cluster in clusters.items():
c = np.vstack(cluster)
if c.shape[0] < min_points_per_cluster:
continue
final_clusters.append(farthest_point_sampling(c, max_points_per_cluster))
return final_clusters
def extract_clusters(data, selected_indices, eps, min_samples, metric='euclidean', algo='auto'):
min_samples = min_samples * len(data)
@ -262,37 +213,44 @@ if __name__ == '__main__':
print('Create data set ..')
dataset_folder = './data/raw/predict/'
pointcloud_file = './pointclouds/1_pc.xyz'
pointcloud_file = './pointclouds/0_0.xyz'
# Load and pre-process point cloud
pcloud = pc.read_pointcloud(pointcloud_file)
pcloud = normalize_pointcloud(pcloud)
# pcloud = append_normal_angles(pcloud)
# pcloud = farthest_point_sampling(pcloud, opt.npoints)
pcloud = pc.normalize_pointcloud(pcloud, 1)
# Test: Pre-predict clustering
print("point cloud size: ", pcloud.shape)
clusters = extract_clusters(pcloud, [0, 1, 2, 3, 4, 5], eps=0.10, min_samples=0.005,
metric='euclidean', algo='auto')
#draw_clusters(clusters)
#a, b = pc.split_outliers(pcloud, [3, 4, 5])
#draw_sample_data(a, True)
#draw_sample_data(b, True)
#pcloud = a
# pc = StandardScaler().fit_transform(pc)
# for 0_0.xyz: pc.hierarchical_clustering(pcloud, [0, 1, 2, 3, 4, 5], eps=0.1, min_samples=5)
#pc_clusters = pc.cluster_dbscan(pcloud, [0, 1, 2, 3,4,5], eps=0.5, min_samples=5)
#pc_clusters = pc.filter_clusters(pc_clusters, 100)
#pc_clusters = [pcloud]
#print("NUM CLUSTERS: ", len(pc_clusters))
#draw_clusters(pc_clusters)
#for c in pc_clusters:
#draw_sample_data(c, True)
# print("Cluster Size: ", len(c))
# draw_sample_data(pcloud)
pc_clusters = pc.hierarchical_clustering(pcloud, selected_indices_0=[0, 1, 2, 3, 4, 5],
selected_indices_1=[0, 1, 2, 3, 4, 5], eps=0.1, min_samples=5)
# pc.cluster_cubes(pcloud, [4, 4, 4])
recreate_folder(dataset_folder)
# Add full point cloud to prediction folder.
# recreate_folder(dataset_folder + '0_0' + '/')
# pc_fps = farthest_point_sampling(pcloud, opt.npoints)
# pc.write_pointcloud(dataset_folder + '0_0' + '/pc.xyz', pc_fps)
# Add cluster point clouds to prediction folder.
pc_clusters = extract_cube_clusters(pcloud, [4, 4, 4], 2048, 100)
# pc_clusters = extract_clusters(pc, [0, 1, 2, 3, 4, 5], eps=0.1, min_samples=0.0001, metric='euclidean', algo='auto')
draw_clusters(pc_clusters)
for idx, pcc in enumerate(pc_clusters):
print("Cluster shape: ", pcc.shape)
pcc = farthest_point_sampling(pcc, opt.npoints)
recreate_folder(dataset_folder + str(idx) + '/')
pc.write_pointcloud(dataset_folder + str(idx) + '/pc.xyz', pcc)
@ -328,7 +286,7 @@ if __name__ == '__main__':
net.eval()
labeled_dataset = None
result_clusters = []
# Iterate over all the samples and predict
for sample in test_dataset:
@ -340,7 +298,7 @@ if __name__ == '__main__':
else:
sample_data = np.column_stack((sample["points"].numpy(), sample["normals"], pred_label.numpy()))
draw_sample_data(sample_data, False)
# draw_sample_data(sample_data, False)
#print("Sample Datat: ", sample_data[:5, :])
#print('Eval done.')
@ -360,14 +318,14 @@ if __name__ == '__main__':
# draw_sample_data(sample_data, False)
# result_clusters.extend(clusters)
# result_clusters.append(sample_data)
result_clusters.append(sample_data)
if labeled_dataset is None:
labeled_dataset = sample_data
else:
labeled_dataset = np.vstack((labeled_dataset, sample_data))
#draw_clusters(result_clusters)
print("prediction done")
draw_sample_data(labeled_dataset, False)
print("point cloud size: ", labeled_dataset.shape)
@ -376,22 +334,8 @@ if __name__ == '__main__':
print("Max: ", np.max(labeled_dataset[:, :3]))
print("Min: ", np.min(pcloud[:, :3]))
print("Max: ", np.max(pcloud[:, :3]))
#print("Data Set: ", labeled_dataset[:5, :])
labeled_dataset = normalize_pointcloud(labeled_dataset)
labeled_dataset = append_normal_angles(labeled_dataset)
#labeled_dataset = farthest_point_sampling(labeled_dataset, opt.npoints)
labeled_dataset = append_onehotencoded_type(labeled_dataset, 1.0)
clusters = extract_clusters(labeled_dataset, [0, 1, 2, 3, 4, 5], eps=0.10, min_samples=0.005,
metric='euclidean', algo='auto')
# TODO: Take result clusters and cluster them by primitive type.
#total_clusters = []
#for cluster in clusters:
# sub_clusters = extract_clusters(cluster, [7,8,9], eps=0.10, min_samples=0.05,
# metric='euclidean', algo='auto')
# total_clusters.extend(sub_clusters)
draw_clusters(clusters)
pc.write_clusters("clusters.txt", clusters)
pc.write_clusters("clusters.txt", result_clusters)