This commit is contained in:
Markus Friedrich 2019-08-09 14:54:08 +02:00
parent 4e1fcdfd43
commit 851b0ce01e
2 changed files with 109 additions and 123 deletions

View File

@ -3,6 +3,22 @@ import open3d as o3d
from sklearn.cluster import DBSCAN
from pyod.models.knn import KNN
from pyod.models.sod import SOD
from pyod.models.abod import ABOD
from pyod.models.sos import SOS
from pyod.models.pca import PCA
from pyod.models.ocsvm import OCSVM
from pyod.models.mcd import MCD
from pyod.models.lof import LOF
from pyod.models.cof import COF
from pyod.models.cblof import CBLOF
from pyod.models.loci import LOCI
from pyod.models.hbos import HBOS
from pyod.models.lscp import LSCP
from pyod.models.feature_bagging import FeatureBagging
def mini_color_table(index, norm=True):
colors = [
[0.5000, 0.5400, 0.5300], [0.8900, 0.1500, 0.2100], [0.6400, 0.5800, 0.5000],
@ -75,7 +91,11 @@ def cluster_per_column(pc, column):
return clusters
def cluster_cubes(data, cluster_dims):
def cluster_cubes(data, cluster_dims, max_points_per_cluster=-1, min_points_per_cluster=-1):
if cluster_dims[0] is 1 and cluster_dims[1] is 1 and cluster_dims[2] is 1:
print("no need to cluster.")
return [data]
max = data[:,:3].max(axis=0)
max += max * 0.01
@ -101,18 +121,23 @@ def cluster_cubes(data, cluster_dims):
clusters.setdefault(cluster_idx, []).append(row)
# Apply farthest point sampling to each cluster
final_clusters = []
for key, cluster in clusters.items():
c = np.vstack(cluster)
clusters[key] = c # farthest_point_sampling(c, max_points_per_cluster)
if c.shape[0] < min_points_per_cluster and -1 is not min_points_per_cluster:
continue
return clusters.values()
if max_points_per_cluster is not -1:
final_clusters.append(farthest_point_sampling(c, max_points_per_cluster))
else:
final_clusters.append(c)
return final_clusters
def cluster_dbscan(data, selected_indices, eps, min_samples, metric='euclidean', algo='auto'):
def cluster_dbscan(data, selected_indices, eps, min_samples=5, metric='euclidean', algo='auto'):
min_samples = min_samples * len(data)
print('Clustering. Min Samples: ' + str(min_samples) + ' EPS: ' + str(eps) + "Selected Indices: " + str(selected_indices))
# print('Clustering. Min Samples: ' + str(min_samples) + ' EPS: ' + str(eps) + "Selected Indices: " + str(selected_indices))
# 0,1,2 : pos
# 3,4,5 : normal
@ -120,13 +145,13 @@ def cluster_dbscan(data, selected_indices, eps, min_samples, metric='euclidean',
# 7,8,9,10: type index one hot encoded
# 11,12: normal as angles
db_res = DBSCAN(eps=eps, metric=metric, n_jobs=-1, algorithm=algo, min_samples=min_samples).fit(data[:, selected_indices])
db_res = DBSCAN(eps=eps, metric=metric, n_jobs=-1, min_samples=min_samples, algorithm=algo).fit(data[:, selected_indices])
labels = db_res.labels_
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
n_noise = list(labels).count(-1)
print("Noise: " + str(n_noise) + " Clusters: " + str(n_clusters))
# print("Noise: " + str(n_noise) + " Clusters: " + str(n_clusters))
clusters = {}
for idx, l in enumerate(labels):
@ -167,3 +192,49 @@ def write_clusters(path, clusters, type_column=6):
np.savetxt(file, types, header='', comments='')
np.savetxt(file, cluster[:, :6], header=str(len(cluster)) + ' ' + str(6), comments='')
def normalize_pointcloud(pc, factor=1.0):
max = pc.max(axis=0)
min = pc.min(axis=0)
f = np.max([abs(max[0] - min[0]), abs(max[1] - min[1]), abs(max[2] - min[2])])
pc[:, 0:3] /= (f * factor)
pc[:, 3:6] /= (np.linalg.norm(pc[:, 3:6], ord=2, axis=1, keepdims=True))
return pc
def hierarchical_clustering(data, selected_indices, eps, min_samples=5, metric='euclidean', algo='auto'):
total_clusters = []
clusters = cluster_dbscan(data, selected_indices, eps, min_samples, metric=metric, algo=algo)
for cluster in clusters:
sub_clusters = cluster_dbscan(cluster, selected_indices, eps, min_samples, metric=metric, algo=algo)
total_clusters.extend(sub_clusters)
return total_clusters
def filter_clusters(clusters, min_size):
filtered_clusters = []
for c in clusters:
if len(c) >= min_size:
filtered_clusters.append(c)
return filtered_clusters
def split_outliers(pc, columns):
clf = KNN()#FeatureBagging() # detector_list=[LOF(), KNN()]
clf.fit(pc[:, columns])
# LOF, kNN
return pc[clf.labels_ == 0], pc[clf.labels_ == 1]

View File

@ -85,18 +85,6 @@ def clusterToColor(cluster, cluster_idx):
return colors
def normalize_pointcloud(pc):
max = pc.max(axis=0)
min = pc.min(axis=0)
f = np.max([abs(max[0] - min[0]), abs(max[1] - min[1]), abs(max[2] - min[2])])
pc[:, 0:3] /= f
pc[:, 3:6] /= (np.linalg.norm(pc[:, 3:6], ord=2, axis=1, keepdims=True))
return pc
def farthest_point_sampling(pts, K):
@ -139,43 +127,6 @@ def append_normal_angles(data):
return np.column_stack((data, res))
def extract_cube_clusters(data, cluster_dims, max_points_per_cluster, min_points_per_cluster):
max = data[:,:3].max(axis=0)
max += max * 0.01
min = data[:,:3].min(axis=0)
min -= min * 0.01
size = (max - min)
clusters = {}
cluster_size = size / np.array(cluster_dims, dtype=np.float32)
print('Min: ' + str(min) + ' Max: ' + str(max))
print('Cluster Size: ' + str(cluster_size))
for row in data:
# print('Row: ' + str(row))
cluster_pos = ((row[:3] - min) / cluster_size).astype(int)
cluster_idx = cluster_dims[0] * cluster_dims[2] * cluster_pos[1] + cluster_dims[0] * cluster_pos[2] + cluster_pos[0]
clusters.setdefault(cluster_idx, []).append(row)
# Apply farthest point sampling to each cluster
final_clusters = []
for key, cluster in clusters.items():
c = np.vstack(cluster)
if c.shape[0] < min_points_per_cluster:
continue
final_clusters.append(farthest_point_sampling(c, max_points_per_cluster))
return final_clusters
def extract_clusters(data, selected_indices, eps, min_samples, metric='euclidean', algo='auto'):
min_samples = min_samples * len(data)
@ -243,7 +194,7 @@ sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../') # add proj
parser = argparse.ArgumentParser()
parser.add_argument('--npoints', type=int, default=2048, help='resample points number')
parser.add_argument('--model', type=str, default='./checkpoint/seg_model_custom_30.pth', help='model path')
parser.add_argument('--model', type=str, default='./checkpoint/seg_model_custom_0.pth', help='model path')
parser.add_argument('--sample_idx', type=int, default=0, help='select a sample to segment and view result')
parser.add_argument('--headers', type=strtobool, default=True, help='if raw files come with headers')
parser.add_argument('--collate_per_segment', type=strtobool, default=True, help='whether to look at pointclouds or sub')
@ -260,41 +211,46 @@ if __name__ == '__main__':
print('Create data set ..')
dataset_folder = './data/raw/predict/'
pointcloud_file = './pointclouds/0_pc.xyz'
pointcloud_file = './pointclouds/0_0.xyz'
# Load and pre-process point cloud
pcloud = pc.read_pointcloud(pointcloud_file)
pcloud = normalize_pointcloud(pcloud)
# pcloud = append_normal_angles(pcloud)
# pcloud = farthest_point_sampling(pcloud, opt.npoints)
pcloud = pc.normalize_pointcloud(pcloud, 1)
# Test: Pre-predict clustering
print("point cloud size: ", pcloud.shape)
clusters = extract_clusters(pcloud, [0, 1, 2, 3, 4, 5], eps=0.10, min_samples=0.005,
metric='euclidean', algo='auto')
#draw_clusters(clusters)
#a, b = pc.split_outliers(pcloud, [3, 4, 5])
#draw_sample_data(a, True)
#draw_sample_data(b, True)
#pcloud = a
# pc = StandardScaler().fit_transform(pc)
recreate_folder(dataset_folder)
# for 0_0.xyz: pc.hierarchical_clustering(pcloud, [0, 1, 2, 3, 4, 5], eps=0.1, min_samples=5)
# Add full point cloud to prediction folder.
# recreate_folder(dataset_folder + '0_0' + '/')
# pc_fps = farthest_point_sampling(pcloud, opt.npoints)
# pc.write_pointcloud(dataset_folder + '0_0' + '/pc.xyz', pc_fps)
# Add cluster point clouds to prediction folder.
pc_clusters = extract_cube_clusters(pcloud, [4, 4, 4], 2048, 100)
# pc_clusters = extract_clusters(pc, [0, 1, 2, 3, 4, 5], eps=0.1, min_samples=0.0001, metric='euclidean', algo='auto')
# pc_clusters = pc.hierarchical_clustering(pcloud, [0, 1, 2, 3, 4, 5], eps=0.1, min_samples=5)
# pc_clusters = pc.filter_clusters(pc_clusters, 100)
pc_clusters = [pcloud]
print("NUM CLUSTERS: ", len(pc_clusters))
draw_clusters(pc_clusters)
for c in pc_clusters:
draw_sample_data(c, True)
print("Cluster Size: ", len(c))
# draw_sample_data(pcloud)
pc_clusters = pc.cluster_cubes(pcloud, [1, 1, 1])
recreate_folder(dataset_folder)
for idx, pcc in enumerate(pc_clusters):
print("Cluster shape: ", pcc.shape)
pcc = farthest_point_sampling(pcc, opt.npoints)
recreate_folder(dataset_folder + str(idx) + '/')
pc.write_pointcloud(dataset_folder + str(idx) + '/pc.xyz', pcc)
#draw_sample_data(pcc, False)
# draw_sample_data(pcc, False)
# Load dataset
print('load dataset ..')
@ -317,7 +273,6 @@ if __name__ == '__main__':
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
dtype = torch.float
# net = PointNetPartSegmentNet(num_classes)
net = PointNet2PartSegmentNet(num_classes)
net.load_state_dict(torch.load(opt.model, map_location=device.type))
@ -334,58 +289,18 @@ if __name__ == '__main__':
pred_label, gt_label = eval_sample(net, sample)
sample_data = np.column_stack((sample["points"].numpy(), sample["normals"].numpy(), pred_label.numpy()))
draw_sample_data(sample_data, False)
#print("Sample Datat: ", sample_data[:5, :])
#print('Eval done.')
print("PRED LABEL: ", pred_label)
#sample_data = normalize_pointcloud(sample_data)
#sample_data = append_onehotencoded_type(sample_data, 1.0)
#sample_data = append_normal_angles(sample_data)
# print('Clustering ..')
# print('Shape: ' + str(sample_data.shape))
# clusters = extract_clusters(sample_data, [0, 1, 2, 3, 4, 5, 7, 8, 9, 10], eps=0.1, min_samples=0.0001, metric='euclidean', algo='auto')
# print('Clustering done. ' + str(len(clusters)) + " Clusters.")
# print(sample_data[:, 6])
# draw_sample_data(sample_data, False)
# result_clusters.extend(clusters)
# result_clusters.append(sample_data)
if labeled_dataset is None:
labeled_dataset = sample_data
else:
labeled_dataset = np.vstack((labeled_dataset, sample_data))
#draw_clusters(result_clusters)
print("prediction done")
draw_sample_data(labeled_dataset, False)
print("point cloud size: ", labeled_dataset.shape)
print("Min: ", np.min(labeled_dataset[:, :3]))
print("Max: ", np.max(labeled_dataset[:, :3]))
print("Min: ", np.min(pcloud[:, :3]))
print("Max: ", np.max(pcloud[:, :3]))
#print("Data Set: ", labeled_dataset[:5, :])
labeled_dataset = normalize_pointcloud(labeled_dataset)
labeled_dataset = append_normal_angles(labeled_dataset)
#labeled_dataset = farthest_point_sampling(labeled_dataset, opt.npoints)
labeled_dataset = append_onehotencoded_type(labeled_dataset, 1.0)
clusters = extract_clusters(labeled_dataset, [0, 1, 2, 3, 4, 5], eps=0.10, min_samples=0.005,
metric='euclidean', algo='auto')
#total_clusters = []
#for cluster in clusters:
# sub_clusters = extract_clusters(cluster, [7,8,9], eps=0.10, min_samples=0.05,
# metric='euclidean', algo='auto')
# total_clusters.extend(sub_clusters)
draw_clusters(clusters)
pc.write_clusters("clusters.txt", clusters)