stuff
This commit is contained in:
parent
ce7ff0ae7c
commit
4e1fcdfd43
@ -152,4 +152,18 @@ def draw_clusters(clusters):
|
||||
cloud.colors = o3d.Vector3dVector(clusterToColor(cluster, cluster_idx))
|
||||
clouds.append(cloud)
|
||||
|
||||
o3d.draw_geometries(clouds)
|
||||
o3d.draw_geometries(clouds)
|
||||
|
||||
|
||||
def write_clusters(path, clusters, type_column=6):
|
||||
|
||||
file = open(path, "w")
|
||||
file.write(str(len(clusters)) + "\n")
|
||||
|
||||
for cluster in clusters:
|
||||
print("Types: ", cluster[:, type_column])
|
||||
|
||||
types = np.unique(cluster[:, type_column], axis=0)
|
||||
|
||||
np.savetxt(file, types, header='', comments='')
|
||||
np.savetxt(file, cluster[:, :6], header=str(len(cluster)) + ' ' + str(6), comments='')
|
||||
|
@ -139,7 +139,7 @@ def append_normal_angles(data):
|
||||
return np.column_stack((data, res))
|
||||
|
||||
|
||||
def extract_cube_clusters(data, cluster_dims, max_points_per_cluster):
|
||||
def extract_cube_clusters(data, cluster_dims, max_points_per_cluster, min_points_per_cluster):
|
||||
|
||||
max = data[:,:3].max(axis=0)
|
||||
max += max * 0.01
|
||||
@ -165,11 +165,15 @@ def extract_cube_clusters(data, cluster_dims, max_points_per_cluster):
|
||||
clusters.setdefault(cluster_idx, []).append(row)
|
||||
|
||||
# Apply farthest point sampling to each cluster
|
||||
final_clusters = []
|
||||
for key, cluster in clusters.items():
|
||||
c = np.vstack(cluster)
|
||||
clusters[key] = farthest_point_sampling(c, max_points_per_cluster)
|
||||
if c.shape[0] < min_points_per_cluster:
|
||||
continue
|
||||
|
||||
return clusters.values()
|
||||
final_clusters.append(farthest_point_sampling(c, max_points_per_cluster))
|
||||
|
||||
return final_clusters
|
||||
|
||||
|
||||
def extract_clusters(data, selected_indices, eps, min_samples, metric='euclidean', algo='auto'):
|
||||
@ -186,7 +190,6 @@ def extract_clusters(data, selected_indices, eps, min_samples, metric='euclidean
|
||||
|
||||
db_res = DBSCAN(eps=eps, metric=metric, n_jobs=-1, algorithm=algo, min_samples=min_samples).fit(data[:, selected_indices])
|
||||
|
||||
|
||||
labels = db_res.labels_
|
||||
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
|
||||
n_noise = list(labels).count(-1)
|
||||
@ -198,7 +201,6 @@ def extract_clusters(data, selected_indices, eps, min_samples, metric='euclidean
|
||||
continue
|
||||
clusters.setdefault(str(l), []).append(data[idx, :])
|
||||
|
||||
|
||||
npClusters = []
|
||||
for cluster in clusters.values():
|
||||
npClusters.append(np.array(cluster))
|
||||
@ -241,7 +243,7 @@ sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../') # add proj
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--npoints', type=int, default=2048, help='resample points number')
|
||||
parser.add_argument('--model', type=str, default='./checkpoint/seg_model_custom_3.pth', help='model path')
|
||||
parser.add_argument('--model', type=str, default='./checkpoint/seg_model_custom_30.pth', help='model path')
|
||||
parser.add_argument('--sample_idx', type=int, default=0, help='select a sample to segment and view result')
|
||||
parser.add_argument('--headers', type=strtobool, default=True, help='if raw files come with headers')
|
||||
parser.add_argument('--collate_per_segment', type=strtobool, default=True, help='whether to look at pointclouds or sub')
|
||||
@ -260,31 +262,40 @@ if __name__ == '__main__':
|
||||
dataset_folder = './data/raw/predict/'
|
||||
pointcloud_file = './pointclouds/0_pc.xyz'
|
||||
|
||||
# Load and pre-process point cloud
|
||||
pcloud = pc.read_pointcloud(pointcloud_file)
|
||||
pcloud = normalize_pointcloud(pcloud)
|
||||
pcloud = append_normal_angles(pcloud)
|
||||
# pcloud = append_normal_angles(pcloud)
|
||||
# pcloud = farthest_point_sampling(pcloud, opt.npoints)
|
||||
|
||||
# Test: Pre-predict clustering
|
||||
print("point cloud size: ", pcloud.shape)
|
||||
clusters = extract_clusters(pcloud, [0, 1, 2, 3, 4, 5], eps=0.10, min_samples=0.005,
|
||||
metric='euclidean', algo='auto')
|
||||
#draw_clusters(clusters)
|
||||
|
||||
# pc = StandardScaler().fit_transform(pc)
|
||||
|
||||
recreate_folder(dataset_folder)
|
||||
|
||||
# Add full point cloud to prediction folder.
|
||||
recreate_folder(dataset_folder + '0_0' + '/')
|
||||
pc_fps = farthest_point_sampling(pcloud, opt.npoints)
|
||||
pc.write_pointcloud(dataset_folder + '0_0' + '/pc.xyz', pc_fps)
|
||||
# recreate_folder(dataset_folder + '0_0' + '/')
|
||||
# pc_fps = farthest_point_sampling(pcloud, opt.npoints)
|
||||
# pc.write_pointcloud(dataset_folder + '0_0' + '/pc.xyz', pc_fps)
|
||||
|
||||
pc_clusters = extract_cube_clusters(pcloud, [4,4,4], 1024)
|
||||
#pc_clusters = extract_clusters(pc, [0, 1, 2, 3, 4, 5], eps=0.1, min_samples=0.0001, metric='euclidean', algo='auto')
|
||||
# Add cluster point clouds to prediction folder.
|
||||
for idx, pcc in enumerate(pc_clusters):
|
||||
pc_clusters = extract_cube_clusters(pcloud, [4, 4, 4], 2048, 100)
|
||||
# pc_clusters = extract_clusters(pc, [0, 1, 2, 3, 4, 5], eps=0.1, min_samples=0.0001, metric='euclidean', algo='auto')
|
||||
|
||||
draw_clusters(pc_clusters)
|
||||
|
||||
for idx, pcc in enumerate(pc_clusters):
|
||||
print("Cluster shape: ", pcc.shape)
|
||||
pcc = farthest_point_sampling(pcc, opt.npoints)
|
||||
recreate_folder(dataset_folder + str(idx) + '/')
|
||||
pc.write_pointcloud(dataset_folder + str(idx) + '/pc.xyz', pcc)
|
||||
#draw_sample_data(pcc, False)
|
||||
|
||||
draw_clusters(pc_clusters)
|
||||
|
||||
# Load dataset
|
||||
print('load dataset ..')
|
||||
test_transform = GT.Compose([GT.NormalizeScale(), ])
|
||||
@ -293,7 +304,7 @@ if __name__ == '__main__':
|
||||
mode='predict',
|
||||
root_dir='data',
|
||||
npoints=opt.npoints,
|
||||
refresh=True,
|
||||
refresh=False,
|
||||
collate_per_segment=opt.collate_per_segment,
|
||||
has_variations=opt.has_variations,
|
||||
headers=opt.headers
|
||||
@ -301,8 +312,6 @@ if __name__ == '__main__':
|
||||
|
||||
num_classes = test_dataset.num_classes()
|
||||
|
||||
print('test dataset size: ', len(test_dataset))
|
||||
|
||||
# Load model
|
||||
print('Construct model ..')
|
||||
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
|
||||
@ -315,33 +324,68 @@ if __name__ == '__main__':
|
||||
net = net.to(device, dtype)
|
||||
net.eval()
|
||||
|
||||
result_clusters = []
|
||||
labeled_dataset = None
|
||||
|
||||
# Iterate over all the samples
|
||||
# Iterate over all the samples and predict
|
||||
for sample in test_dataset:
|
||||
|
||||
print('Eval test sample ..')
|
||||
# Predict
|
||||
|
||||
pred_label, gt_label = eval_sample(net, sample)
|
||||
sample_data = np.column_stack((sample["points"].numpy(), sample["normals"].numpy(), pred_label.numpy()))
|
||||
print('Eval done.')
|
||||
|
||||
sample_data = normalize_pointcloud(sample_data)
|
||||
|
||||
sample_data = append_onehotencoded_type(sample_data, 1.0)
|
||||
sample_data = append_normal_angles(sample_data)
|
||||
|
||||
print('Clustering ..')
|
||||
print('Shape: ' + str(sample_data.shape))
|
||||
|
||||
clusters = extract_clusters(sample_data, [0, 1, 2, 3, 4, 5, 7, 8, 9, 10], eps=0.1, min_samples=0.0001, metric='euclidean', algo='auto')
|
||||
|
||||
print('Clustering done. ' + str(len(clusters)) + " Clusters.")
|
||||
print(sample_data[:, 6])
|
||||
|
||||
draw_sample_data(sample_data, False)
|
||||
|
||||
result_clusters.extend(clusters)
|
||||
#print("Sample Datat: ", sample_data[:5, :])
|
||||
#print('Eval done.')
|
||||
print("PRED LABEL: ", pred_label)
|
||||
|
||||
#sample_data = normalize_pointcloud(sample_data)
|
||||
#sample_data = append_onehotencoded_type(sample_data, 1.0)
|
||||
#sample_data = append_normal_angles(sample_data)
|
||||
|
||||
# print('Clustering ..')
|
||||
# print('Shape: ' + str(sample_data.shape))
|
||||
|
||||
# clusters = extract_clusters(sample_data, [0, 1, 2, 3, 4, 5, 7, 8, 9, 10], eps=0.1, min_samples=0.0001, metric='euclidean', algo='auto')
|
||||
# print('Clustering done. ' + str(len(clusters)) + " Clusters.")
|
||||
# print(sample_data[:, 6])
|
||||
|
||||
# draw_sample_data(sample_data, False)
|
||||
|
||||
# result_clusters.extend(clusters)
|
||||
# result_clusters.append(sample_data)
|
||||
|
||||
if labeled_dataset is None:
|
||||
labeled_dataset = sample_data
|
||||
else:
|
||||
labeled_dataset = np.vstack((labeled_dataset, sample_data))
|
||||
|
||||
#draw_clusters(result_clusters)
|
||||
|
||||
draw_sample_data(labeled_dataset, False)
|
||||
print("point cloud size: ", labeled_dataset.shape)
|
||||
|
||||
print("Min: ", np.min(labeled_dataset[:, :3]))
|
||||
print("Max: ", np.max(labeled_dataset[:, :3]))
|
||||
print("Min: ", np.min(pcloud[:, :3]))
|
||||
print("Max: ", np.max(pcloud[:, :3]))
|
||||
#print("Data Set: ", labeled_dataset[:5, :])
|
||||
labeled_dataset = normalize_pointcloud(labeled_dataset)
|
||||
labeled_dataset = append_normal_angles(labeled_dataset)
|
||||
#labeled_dataset = farthest_point_sampling(labeled_dataset, opt.npoints)
|
||||
|
||||
labeled_dataset = append_onehotencoded_type(labeled_dataset, 1.0)
|
||||
|
||||
clusters = extract_clusters(labeled_dataset, [0, 1, 2, 3, 4, 5], eps=0.10, min_samples=0.005,
|
||||
metric='euclidean', algo='auto')
|
||||
|
||||
#total_clusters = []
|
||||
#for cluster in clusters:
|
||||
# sub_clusters = extract_clusters(cluster, [7,8,9], eps=0.10, min_samples=0.05,
|
||||
# metric='euclidean', algo='auto')
|
||||
# total_clusters.extend(sub_clusters)
|
||||
|
||||
draw_clusters(clusters)
|
||||
|
||||
pc.write_clusters("clusters.txt", clusters)
|
Loading…
x
Reference in New Issue
Block a user