From 76870a7bf84c878f9ab7f19f8c979f103b36844a Mon Sep 17 00:00:00 2001 From: Markus Friedrich Date: Thu, 19 Sep 2019 18:19:38 +0200 Subject: [PATCH] small changes --- pointcloud.py | 5 +++-- predict/predict.py | 35 ++++++++++++++++++++++++----------- 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/pointcloud.py b/pointcloud.py index 7b5f9c4..5ac0bfc 100644 --- a/pointcloud.py +++ b/pointcloud.py @@ -68,7 +68,7 @@ def read_pointcloud(path, delimiter=' ', hasHeader=True): if hasHeader: # Get rid of the Header _ = f.readline() - # This itrates over all lines, splits them anc converts values to floats. Will fail on wrong values. + # This iterates over all lines, splits them and converts values to floats. Will fail on wrong values. pc = [[float(x) for x in line.rstrip().split(delimiter)] for line in f if line != ''] return np.asarray(pc) @@ -215,7 +215,8 @@ def write_clusters(path, clusters, type_column=6): return t types = np.array([type_mapping(t) for t in types]) - print("Types: ", types) + print("Types: {}, Points: {}".format(types, cluster.shape[0])) + # draw_sample_data(cluster) np.savetxt(file, types.reshape(1, types.shape[0]),delimiter=';', header='', comments='', fmt='%i') np.savetxt(file, cluster[:, :6], header=str(len(cluster)) + ' ' + str(6), comments='') diff --git a/predict/predict.py b/predict/predict.py index 6ad0352..59e18ba 100644 --- a/predict/predict.py +++ b/predict/predict.py @@ -63,7 +63,7 @@ sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../') # add proj parser = argparse.ArgumentParser() parser.add_argument('--npoints', type=int, default=2048, help='resample points number') -parser.add_argument('--model', type=str, default='./checkpoint/seg_model_custom_131.pth', help='model path') +parser.add_argument('--model', type=str, default='./checkpoint/seg_model_custom_0.pth', help='model path') parser.add_argument('--sample_idx', type=int, default=0, help='select a sample to segment and view result') parser.add_argument('--headers', type=strtobool, default=True, help='if raw files come with headers') parser.add_argument('--with_normals', type=strtobool, default=True, help='if training will include normals') @@ -84,7 +84,19 @@ if __name__ == '__main__': print('Create data set ..') dataset_folder = './data/raw/predict/' - pointcloud_file = './pointclouds/m3.xyz' + + # pointcloud_file = './pointclouds/0_0.xyz' + + # pointcloud_file = 'C:/Projekte/csg-fitter/csg-fitter/models/0/0.xyz' + + param_set = {"test1": {"cells": 2, "eps": 0.15, "min_samples": 100}} + + file = "test1" + + used_params = param_set[file] + + pointcloud_file = 'C:/Projekte/visigrapp2020/data/' + file + '/pc.txt' + # Load and pre-process point cloud pcloud = pc.read_pointcloud(pointcloud_file) @@ -93,7 +105,8 @@ if __name__ == '__main__': #pc_clusters = pc.hierarchical_clustering(pcloud, selected_indices_0=[0, 1, 2, 3, 4, 5], # selected_indices_1=[0, 1, 2, 3, 4, 5], eps=0.7, min_samples=5) - pc_clusters = pc.cluster_cubes(pcloud, [4, 4, 4]) + n_cells = used_params["cells"] + pc_clusters = pc.cluster_cubes(pcloud, [n_cells, n_cells, n_cells]) print("Pre-Processing: Clustering") pc.draw_clusters(pc_clusters) @@ -196,12 +209,12 @@ if __name__ == '__main__': # Clustering that results in per-primitive type clusters # ------------------------------------------------------------------------------------------------------------------ - # labeled_dataset = np.loadtxt('labeled_dataset.txt') + #labeled_dataset = np.loadtxt('labeled_dataset.txt') pc.draw_sample_data(labeled_dataset) # Try to get rid of outliers. - labeled_dataset,outliers = pc.split_outliers(labeled_dataset, columns=[0,1,2,3,4,5]) - pc.draw_sample_data(outliers, False) + # labeled_dataset,outliers = pc.split_outliers(labeled_dataset, columns=[0,1,2,3,4,5]) + # pc.draw_sample_data(outliers, False) print("Final clustering..") @@ -211,9 +224,9 @@ if __name__ == '__main__': total_clusters = [] - clusters = pc.cluster_dbscan(labeled_dataset, [0,1,2,3,4,5], eps=0.1, min_samples=100) + clusters = pc.cluster_dbscan(labeled_dataset, [0,1,2,3,4,5], eps=used_params["eps"], min_samples=used_params["min_samples"]) print("Pre-clustering done. Clusters: ", len(clusters)) - pc.draw_clusters(clusters) + #pc.draw_clusters(clusters) for cluster in clusters: #cluster = pc.normalize_pointcloud(cluster) @@ -225,11 +238,11 @@ if __name__ == '__main__': print("No need for 2nd level clustering since there is only a single primitive type in the cluster.") total_clusters.append(cluster) else: - sub_clusters = pc.cluster_dbscan(cluster, [0,1,2,7,8,9,10], eps=0.1, min_samples=100) + sub_clusters = pc.cluster_dbscan(cluster, [0,1,2,7,8,9,10], eps=used_params["eps"], min_samples=used_params["min_samples"]) print("Sub clusters: ", len(sub_clusters)) total_clusters.extend(sub_clusters) - - result_clusters = list(filter(lambda c: c.shape[0] > 100, total_clusters)) + + result_clusters = list(filter(lambda c: c.shape[0] > used_params["min_samples"], total_clusters)) for cluster in result_clusters: print("Cluster: ", cluster.shape[0])