Merge remote-tracking branch 'origin/master'

# Conflicts:
#	predict/predict.py
This commit is contained in:
Markus Friedrich 2019-08-09 15:42:01 +02:00
commit f2cc070d04
2 changed files with 130 additions and 37 deletions

View File

@ -148,6 +148,8 @@ class CustomShapeNet(InMemoryDataset):
####################################
# This is where you define the keys
attr_dict = dict(y=y, pos=points[:, :3 if not self.with_normals else 6])
if not self.with_normals:
attr_dict.update(normals=points[:, 3:6])
####################################
if self.collate_per_element:
data = Data(**attr_dict)

View File

@ -28,8 +28,9 @@ def eval_sample(net, sample):
# points: (n, 3)
points, gt_label = sample['points'], sample['labels']
n = points.shape[0]
f = points.shape[1]
points = points.view(1, n, 3) # make a batch
points = points.view(1, n, f) # make a batch
points = points.transpose(1, 2).contiguous()
points = points.to(device, dtype)
@ -85,6 +86,18 @@ def clusterToColor(cluster, cluster_idx):
return colors
def normalize_pointcloud(pc):
max = pc.max(axis=0)
min = pc.min(axis=0)
f = np.max([abs(max[0] - min[0]), abs(max[1] - min[1]), abs(max[2] - min[2])])
pc[:, 0:3] /= f
pc[:, 3:6] /= (np.linalg.norm(pc[:, 3:6], ord=2, axis=1, keepdims=True))
return pc
def farthest_point_sampling(pts, K):
@ -127,6 +140,43 @@ def append_normal_angles(data):
return np.column_stack((data, res))
def extract_cube_clusters(data, cluster_dims, max_points_per_cluster, min_points_per_cluster):
max = data[:,:3].max(axis=0)
max += max * 0.01
min = data[:,:3].min(axis=0)
min -= min * 0.01
size = (max - min)
clusters = {}
cluster_size = size / np.array(cluster_dims, dtype=np.float32)
print('Min: ' + str(min) + ' Max: ' + str(max))
print('Cluster Size: ' + str(cluster_size))
for row in data:
# print('Row: ' + str(row))
cluster_pos = ((row[:3] - min) / cluster_size).astype(int)
cluster_idx = cluster_dims[0] * cluster_dims[2] * cluster_pos[1] + cluster_dims[0] * cluster_pos[2] + cluster_pos[0]
clusters.setdefault(cluster_idx, []).append(row)
# Apply farthest point sampling to each cluster
final_clusters = []
for key, cluster in clusters.items():
c = np.vstack(cluster)
if c.shape[0] < min_points_per_cluster:
continue
final_clusters.append(farthest_point_sampling(c, max_points_per_cluster))
return final_clusters
def extract_clusters(data, selected_indices, eps, min_samples, metric='euclidean', algo='auto'):
min_samples = min_samples * len(data)
@ -178,7 +228,7 @@ def draw_clusters(clusters):
def draw_sample_data(sample_data, colored_normals = False):
cloud = o3d.PointCloud()
cloud.points = o3d.Vector3dVector(sample_data[:,:3])
cloud.points = o3d.Vector3dVector(sample_data[:, :3])
cloud.colors = \
o3d.Vector3dVector(label2color(sample_data[:, 6].astype(int)) if not colored_normals else sample_data[:, 3:6])
@ -194,9 +244,10 @@ sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../') # add proj
parser = argparse.ArgumentParser()
parser.add_argument('--npoints', type=int, default=2048, help='resample points number')
parser.add_argument('--model', type=str, default='./checkpoint/seg_model_custom_0.pth', help='model path')
parser.add_argument('--model', type=str, default='./checkpoint/seg_model_custom_3.pth', help='model path')
parser.add_argument('--sample_idx', type=int, default=0, help='select a sample to segment and view result')
parser.add_argument('--headers', type=strtobool, default=True, help='if raw files come with headers')
parser.add_argument('--with_normals', type=strtobool, default=True, help='if training will include normals')
parser.add_argument('--collate_per_segment', type=strtobool, default=True, help='whether to look at pointclouds or sub')
parser.add_argument('--has_variations', type=strtobool, default=False,
help='whether a single pointcloud has variations '
@ -211,46 +262,41 @@ if __name__ == '__main__':
print('Create data set ..')
dataset_folder = './data/raw/predict/'
pointcloud_file = './pointclouds/0_0.xyz'
pointcloud_file = './pointclouds/1_pc.xyz'
# Load and pre-process point cloud
pcloud = pc.read_pointcloud(pointcloud_file)
pcloud = pc.normalize_pointcloud(pcloud, 1)
pcloud = normalize_pointcloud(pcloud)
# pcloud = append_normal_angles(pcloud)
# pcloud = farthest_point_sampling(pcloud, opt.npoints)
#a, b = pc.split_outliers(pcloud, [3, 4, 5])
#draw_sample_data(a, True)
#draw_sample_data(b, True)
#pcloud = a
# Test: Pre-predict clustering
print("point cloud size: ", pcloud.shape)
clusters = extract_clusters(pcloud, [0, 1, 2, 3, 4, 5], eps=0.10, min_samples=0.005,
metric='euclidean', algo='auto')
#draw_clusters(clusters)
# for 0_0.xyz: pc.hierarchical_clustering(pcloud, [0, 1, 2, 3, 4, 5], eps=0.1, min_samples=5)
# pc_clusters = pc.hierarchical_clustering(pcloud, [0, 1, 2, 3, 4, 5], eps=0.1, min_samples=5)
# pc_clusters = pc.filter_clusters(pc_clusters, 100)
pc_clusters = [pcloud]
print("NUM CLUSTERS: ", len(pc_clusters))
draw_clusters(pc_clusters)
for c in pc_clusters:
draw_sample_data(c, True)
print("Cluster Size: ", len(c))
# draw_sample_data(pcloud)
pc_clusters = pc.cluster_cubes(pcloud, [1, 1, 1])
# pc = StandardScaler().fit_transform(pc)
recreate_folder(dataset_folder)
for idx, pcc in enumerate(pc_clusters):
# Add full point cloud to prediction folder.
# recreate_folder(dataset_folder + '0_0' + '/')
# pc_fps = farthest_point_sampling(pcloud, opt.npoints)
# pc.write_pointcloud(dataset_folder + '0_0' + '/pc.xyz', pc_fps)
# Add cluster point clouds to prediction folder.
pc_clusters = extract_cube_clusters(pcloud, [4, 4, 4], 2048, 100)
# pc_clusters = extract_clusters(pc, [0, 1, 2, 3, 4, 5], eps=0.1, min_samples=0.0001, metric='euclidean', algo='auto')
draw_clusters(pc_clusters)
for idx, pcc in enumerate(pc_clusters):
print("Cluster shape: ", pcc.shape)
pcc = farthest_point_sampling(pcc, opt.npoints)
recreate_folder(dataset_folder + str(idx) + '/')
pc.write_pointcloud(dataset_folder + str(idx) + '/pc.xyz', pcc)
# draw_sample_data(pcc, False)
#draw_sample_data(pcc, False)
# Load dataset
print('load dataset ..')
@ -259,8 +305,9 @@ if __name__ == '__main__':
test_dataset = ShapeNetPartSegDataset(
mode='predict',
root_dir='data',
with_normals=opt.with_normals,
npoints=opt.npoints,
refresh=False,
refresh=True,
collate_per_segment=opt.collate_per_segment,
has_variations=opt.has_variations,
headers=opt.headers
@ -273,7 +320,8 @@ if __name__ == '__main__':
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
dtype = torch.float
net = PointNet2PartSegmentNet(num_classes)
# net = PointNetPartSegmentNet(num_classes)
net = PointNet2PartSegmentNet(num_classes, with_normals=opt.with_normals)
net.load_state_dict(torch.load(opt.model, map_location=device.type))
net = net.to(device, dtype)
@ -287,20 +335,63 @@ if __name__ == '__main__':
# Predict
pred_label, gt_label = eval_sample(net, sample)
sample_data = np.column_stack((sample["points"].numpy(), sample["normals"].numpy(), pred_label.numpy()))
if opt.with_normals:
sample_data = np.column_stack((sample["points"].numpy(), pred_label.numpy()))
else:
sample_data = np.column_stack((sample["points"].numpy(), sample["normals"], pred_label.numpy()))
draw_sample_data(sample_data, False)
#print("Sample Datat: ", sample_data[:5, :])
#print('Eval done.')
print("PRED LABEL: ", pred_label)
#sample_data = normalize_pointcloud(sample_data)
#sample_data = append_onehotencoded_type(sample_data, 1.0)
#sample_data = append_normal_angles(sample_data)
# print('Clustering ..')
# print('Shape: ' + str(sample_data.shape))
# clusters = extract_clusters(sample_data, [0, 1, 2, 3, 4, 5, 7, 8, 9, 10], eps=0.1, min_samples=0.0001, metric='euclidean', algo='auto')
# print('Clustering done. ' + str(len(clusters)) + " Clusters.")
# print(sample_data[:, 6])
# draw_sample_data(sample_data, False)
# result_clusters.extend(clusters)
# result_clusters.append(sample_data)
if labeled_dataset is None:
labeled_dataset = sample_data
else:
labeled_dataset = np.vstack((labeled_dataset, sample_data))
print("prediction done")
#draw_clusters(result_clusters)
draw_sample_data(labeled_dataset, False)
print("point cloud size: ", labeled_dataset.shape)
print("Min: ", np.min(labeled_dataset[:, :3]))
print("Max: ", np.max(labeled_dataset[:, :3]))
print("Min: ", np.min(pcloud[:, :3]))
print("Max: ", np.max(pcloud[:, :3]))
#print("Data Set: ", labeled_dataset[:5, :])
labeled_dataset = normalize_pointcloud(labeled_dataset)
labeled_dataset = append_normal_angles(labeled_dataset)
#labeled_dataset = farthest_point_sampling(labeled_dataset, opt.npoints)
labeled_dataset = append_onehotencoded_type(labeled_dataset, 1.0)
clusters = extract_clusters(labeled_dataset, [0, 1, 2, 3, 4, 5], eps=0.10, min_samples=0.005,
metric='euclidean', algo='auto')
#total_clusters = []
#for cluster in clusters:
# sub_clusters = extract_clusters(cluster, [7,8,9], eps=0.10, min_samples=0.05,
# metric='euclidean', algo='auto')
# total_clusters.extend(sub_clusters)
draw_clusters(clusters)
pc.write_clusters("clusters.txt", clusters)